1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * derived from drivers/kvm/kvm_main.c 5 * 6 * Copyright (C) 2006 Qumranet, Inc. 7 * Copyright (C) 2008 Qumranet, Inc. 8 * Copyright IBM Corporation, 2008 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 10 * 11 * Authors: 12 * Avi Kivity <avi@qumranet.com> 13 * Yaniv Kamay <yaniv@qumranet.com> 14 * Amit Shah <amit.shah@qumranet.com> 15 * Ben-Ami Yassour <benami@il.ibm.com> 16 * 17 * This work is licensed under the terms of the GNU GPL, version 2. See 18 * the COPYING file in the top-level directory. 19 * 20 */ 21 22 #include <linux/kvm_host.h> 23 #include "irq.h" 24 #include "mmu.h" 25 #include "i8254.h" 26 #include "tss.h" 27 #include "kvm_cache_regs.h" 28 #include "x86.h" 29 #include "cpuid.h" 30 31 #include <linux/clocksource.h> 32 #include <linux/interrupt.h> 33 #include <linux/kvm.h> 34 #include <linux/fs.h> 35 #include <linux/vmalloc.h> 36 #include <linux/module.h> 37 #include <linux/mman.h> 38 #include <linux/highmem.h> 39 #include <linux/iommu.h> 40 #include <linux/intel-iommu.h> 41 #include <linux/cpufreq.h> 42 #include <linux/user-return-notifier.h> 43 #include <linux/srcu.h> 44 #include <linux/slab.h> 45 #include <linux/perf_event.h> 46 #include <linux/uaccess.h> 47 #include <linux/hash.h> 48 #include <linux/pci.h> 49 #include <linux/timekeeper_internal.h> 50 #include <linux/pvclock_gtod.h> 51 #include <trace/events/kvm.h> 52 53 #define CREATE_TRACE_POINTS 54 #include "trace.h" 55 56 #include <asm/debugreg.h> 57 #include <asm/msr.h> 58 #include <asm/desc.h> 59 #include <asm/mtrr.h> 60 #include <asm/mce.h> 61 #include <asm/i387.h> 62 #include <asm/fpu-internal.h> /* Ugh! */ 63 #include <asm/xcr.h> 64 #include <asm/pvclock.h> 65 #include <asm/div64.h> 66 67 #define MAX_IO_MSRS 256 68 #define KVM_MAX_MCE_BANKS 32 69 #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P) 70 71 #define emul_to_vcpu(ctxt) \ 72 container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt) 73 74 /* EFER defaults: 75 * - enable syscall per default because its emulated by KVM 76 * - enable LME and LMA per default on 64 bit KVM 77 */ 78 #ifdef CONFIG_X86_64 79 static 80 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA)); 81 #else 82 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); 83 #endif 84 85 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM 86 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 87 88 static void update_cr8_intercept(struct kvm_vcpu *vcpu); 89 static void process_nmi(struct kvm_vcpu *vcpu); 90 91 struct kvm_x86_ops *kvm_x86_ops; 92 EXPORT_SYMBOL_GPL(kvm_x86_ops); 93 94 static bool ignore_msrs = 0; 95 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); 96 97 bool kvm_has_tsc_control; 98 EXPORT_SYMBOL_GPL(kvm_has_tsc_control); 99 u32 kvm_max_guest_tsc_khz; 100 EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz); 101 102 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */ 103 static u32 tsc_tolerance_ppm = 250; 104 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); 105 106 #define KVM_NR_SHARED_MSRS 16 107 108 struct kvm_shared_msrs_global { 109 int nr; 110 u32 msrs[KVM_NR_SHARED_MSRS]; 111 }; 112 113 struct kvm_shared_msrs { 114 struct user_return_notifier urn; 115 bool registered; 116 struct kvm_shared_msr_values { 117 u64 host; 118 u64 curr; 119 } values[KVM_NR_SHARED_MSRS]; 120 }; 121 122 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; 123 static struct kvm_shared_msrs __percpu *shared_msrs; 124 125 struct kvm_stats_debugfs_item debugfs_entries[] = { 126 { "pf_fixed", VCPU_STAT(pf_fixed) }, 127 { "pf_guest", VCPU_STAT(pf_guest) }, 128 { "tlb_flush", VCPU_STAT(tlb_flush) }, 129 { "invlpg", VCPU_STAT(invlpg) }, 130 { "exits", VCPU_STAT(exits) }, 131 { "io_exits", VCPU_STAT(io_exits) }, 132 { "mmio_exits", VCPU_STAT(mmio_exits) }, 133 { "signal_exits", VCPU_STAT(signal_exits) }, 134 { "irq_window", VCPU_STAT(irq_window_exits) }, 135 { "nmi_window", VCPU_STAT(nmi_window_exits) }, 136 { "halt_exits", VCPU_STAT(halt_exits) }, 137 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 138 { "hypercalls", VCPU_STAT(hypercalls) }, 139 { "request_irq", VCPU_STAT(request_irq_exits) }, 140 { "irq_exits", VCPU_STAT(irq_exits) }, 141 { "host_state_reload", VCPU_STAT(host_state_reload) }, 142 { "efer_reload", VCPU_STAT(efer_reload) }, 143 { "fpu_reload", VCPU_STAT(fpu_reload) }, 144 { "insn_emulation", VCPU_STAT(insn_emulation) }, 145 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) }, 146 { "irq_injections", VCPU_STAT(irq_injections) }, 147 { "nmi_injections", VCPU_STAT(nmi_injections) }, 148 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, 149 { "mmu_pte_write", VM_STAT(mmu_pte_write) }, 150 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, 151 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) }, 152 { "mmu_flooded", VM_STAT(mmu_flooded) }, 153 { "mmu_recycled", VM_STAT(mmu_recycled) }, 154 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, 155 { "mmu_unsync", VM_STAT(mmu_unsync) }, 156 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, 157 { "largepages", VM_STAT(lpages) }, 158 { NULL } 159 }; 160 161 u64 __read_mostly host_xcr0; 162 163 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); 164 165 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) 166 { 167 int i; 168 for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++) 169 vcpu->arch.apf.gfns[i] = ~0; 170 } 171 172 static void kvm_on_user_return(struct user_return_notifier *urn) 173 { 174 unsigned slot; 175 struct kvm_shared_msrs *locals 176 = container_of(urn, struct kvm_shared_msrs, urn); 177 struct kvm_shared_msr_values *values; 178 179 for (slot = 0; slot < shared_msrs_global.nr; ++slot) { 180 values = &locals->values[slot]; 181 if (values->host != values->curr) { 182 wrmsrl(shared_msrs_global.msrs[slot], values->host); 183 values->curr = values->host; 184 } 185 } 186 locals->registered = false; 187 user_return_notifier_unregister(urn); 188 } 189 190 static void shared_msr_update(unsigned slot, u32 msr) 191 { 192 u64 value; 193 unsigned int cpu = smp_processor_id(); 194 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); 195 196 /* only read, and nobody should modify it at this time, 197 * so don't need lock */ 198 if (slot >= shared_msrs_global.nr) { 199 printk(KERN_ERR "kvm: invalid MSR slot!"); 200 return; 201 } 202 rdmsrl_safe(msr, &value); 203 smsr->values[slot].host = value; 204 smsr->values[slot].curr = value; 205 } 206 207 void kvm_define_shared_msr(unsigned slot, u32 msr) 208 { 209 if (slot >= shared_msrs_global.nr) 210 shared_msrs_global.nr = slot + 1; 211 shared_msrs_global.msrs[slot] = msr; 212 /* we need ensured the shared_msr_global have been updated */ 213 smp_wmb(); 214 } 215 EXPORT_SYMBOL_GPL(kvm_define_shared_msr); 216 217 static void kvm_shared_msr_cpu_online(void) 218 { 219 unsigned i; 220 221 for (i = 0; i < shared_msrs_global.nr; ++i) 222 shared_msr_update(i, shared_msrs_global.msrs[i]); 223 } 224 225 void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) 226 { 227 unsigned int cpu = smp_processor_id(); 228 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); 229 230 if (((value ^ smsr->values[slot].curr) & mask) == 0) 231 return; 232 smsr->values[slot].curr = value; 233 wrmsrl(shared_msrs_global.msrs[slot], value); 234 if (!smsr->registered) { 235 smsr->urn.on_user_return = kvm_on_user_return; 236 user_return_notifier_register(&smsr->urn); 237 smsr->registered = true; 238 } 239 } 240 EXPORT_SYMBOL_GPL(kvm_set_shared_msr); 241 242 static void drop_user_return_notifiers(void *ignore) 243 { 244 unsigned int cpu = smp_processor_id(); 245 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); 246 247 if (smsr->registered) 248 kvm_on_user_return(&smsr->urn); 249 } 250 251 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) 252 { 253 return vcpu->arch.apic_base; 254 } 255 EXPORT_SYMBOL_GPL(kvm_get_apic_base); 256 257 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data) 258 { 259 /* TODO: reserve bits check */ 260 kvm_lapic_set_base(vcpu, data); 261 } 262 EXPORT_SYMBOL_GPL(kvm_set_apic_base); 263 264 asmlinkage void kvm_spurious_fault(void) 265 { 266 /* Fault while not rebooting. We want the trace. */ 267 BUG(); 268 } 269 EXPORT_SYMBOL_GPL(kvm_spurious_fault); 270 271 #define EXCPT_BENIGN 0 272 #define EXCPT_CONTRIBUTORY 1 273 #define EXCPT_PF 2 274 275 static int exception_class(int vector) 276 { 277 switch (vector) { 278 case PF_VECTOR: 279 return EXCPT_PF; 280 case DE_VECTOR: 281 case TS_VECTOR: 282 case NP_VECTOR: 283 case SS_VECTOR: 284 case GP_VECTOR: 285 return EXCPT_CONTRIBUTORY; 286 default: 287 break; 288 } 289 return EXCPT_BENIGN; 290 } 291 292 static void kvm_multiple_exception(struct kvm_vcpu *vcpu, 293 unsigned nr, bool has_error, u32 error_code, 294 bool reinject) 295 { 296 u32 prev_nr; 297 int class1, class2; 298 299 kvm_make_request(KVM_REQ_EVENT, vcpu); 300 301 if (!vcpu->arch.exception.pending) { 302 queue: 303 vcpu->arch.exception.pending = true; 304 vcpu->arch.exception.has_error_code = has_error; 305 vcpu->arch.exception.nr = nr; 306 vcpu->arch.exception.error_code = error_code; 307 vcpu->arch.exception.reinject = reinject; 308 return; 309 } 310 311 /* to check exception */ 312 prev_nr = vcpu->arch.exception.nr; 313 if (prev_nr == DF_VECTOR) { 314 /* triple fault -> shutdown */ 315 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 316 return; 317 } 318 class1 = exception_class(prev_nr); 319 class2 = exception_class(nr); 320 if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) 321 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) { 322 /* generate double fault per SDM Table 5-5 */ 323 vcpu->arch.exception.pending = true; 324 vcpu->arch.exception.has_error_code = true; 325 vcpu->arch.exception.nr = DF_VECTOR; 326 vcpu->arch.exception.error_code = 0; 327 } else 328 /* replace previous exception with a new one in a hope 329 that instruction re-execution will regenerate lost 330 exception */ 331 goto queue; 332 } 333 334 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) 335 { 336 kvm_multiple_exception(vcpu, nr, false, 0, false); 337 } 338 EXPORT_SYMBOL_GPL(kvm_queue_exception); 339 340 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) 341 { 342 kvm_multiple_exception(vcpu, nr, false, 0, true); 343 } 344 EXPORT_SYMBOL_GPL(kvm_requeue_exception); 345 346 void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) 347 { 348 if (err) 349 kvm_inject_gp(vcpu, 0); 350 else 351 kvm_x86_ops->skip_emulated_instruction(vcpu); 352 } 353 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); 354 355 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) 356 { 357 ++vcpu->stat.pf_guest; 358 vcpu->arch.cr2 = fault->address; 359 kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); 360 } 361 EXPORT_SYMBOL_GPL(kvm_inject_page_fault); 362 363 void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) 364 { 365 if (mmu_is_nested(vcpu) && !fault->nested_page_fault) 366 vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); 367 else 368 vcpu->arch.mmu.inject_page_fault(vcpu, fault); 369 } 370 371 void kvm_inject_nmi(struct kvm_vcpu *vcpu) 372 { 373 atomic_inc(&vcpu->arch.nmi_queued); 374 kvm_make_request(KVM_REQ_NMI, vcpu); 375 } 376 EXPORT_SYMBOL_GPL(kvm_inject_nmi); 377 378 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) 379 { 380 kvm_multiple_exception(vcpu, nr, true, error_code, false); 381 } 382 EXPORT_SYMBOL_GPL(kvm_queue_exception_e); 383 384 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) 385 { 386 kvm_multiple_exception(vcpu, nr, true, error_code, true); 387 } 388 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); 389 390 /* 391 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue 392 * a #GP and return false. 393 */ 394 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) 395 { 396 if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl) 397 return true; 398 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 399 return false; 400 } 401 EXPORT_SYMBOL_GPL(kvm_require_cpl); 402 403 /* 404 * This function will be used to read from the physical memory of the currently 405 * running guest. The difference to kvm_read_guest_page is that this function 406 * can read from guest physical or from the guest's guest physical memory. 407 */ 408 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 409 gfn_t ngfn, void *data, int offset, int len, 410 u32 access) 411 { 412 gfn_t real_gfn; 413 gpa_t ngpa; 414 415 ngpa = gfn_to_gpa(ngfn); 416 real_gfn = mmu->translate_gpa(vcpu, ngpa, access); 417 if (real_gfn == UNMAPPED_GVA) 418 return -EFAULT; 419 420 real_gfn = gpa_to_gfn(real_gfn); 421 422 return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len); 423 } 424 EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu); 425 426 int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, 427 void *data, int offset, int len, u32 access) 428 { 429 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, 430 data, offset, len, access); 431 } 432 433 /* 434 * Load the pae pdptrs. Return true is they are all valid. 435 */ 436 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) 437 { 438 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; 439 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; 440 int i; 441 int ret; 442 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; 443 444 ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte, 445 offset * sizeof(u64), sizeof(pdpte), 446 PFERR_USER_MASK|PFERR_WRITE_MASK); 447 if (ret < 0) { 448 ret = 0; 449 goto out; 450 } 451 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { 452 if (is_present_gpte(pdpte[i]) && 453 (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) { 454 ret = 0; 455 goto out; 456 } 457 } 458 ret = 1; 459 460 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); 461 __set_bit(VCPU_EXREG_PDPTR, 462 (unsigned long *)&vcpu->arch.regs_avail); 463 __set_bit(VCPU_EXREG_PDPTR, 464 (unsigned long *)&vcpu->arch.regs_dirty); 465 out: 466 467 return ret; 468 } 469 EXPORT_SYMBOL_GPL(load_pdptrs); 470 471 static bool pdptrs_changed(struct kvm_vcpu *vcpu) 472 { 473 u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; 474 bool changed = true; 475 int offset; 476 gfn_t gfn; 477 int r; 478 479 if (is_long_mode(vcpu) || !is_pae(vcpu)) 480 return false; 481 482 if (!test_bit(VCPU_EXREG_PDPTR, 483 (unsigned long *)&vcpu->arch.regs_avail)) 484 return true; 485 486 gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT; 487 offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1); 488 r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), 489 PFERR_USER_MASK | PFERR_WRITE_MASK); 490 if (r < 0) 491 goto out; 492 changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; 493 out: 494 495 return changed; 496 } 497 498 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 499 { 500 unsigned long old_cr0 = kvm_read_cr0(vcpu); 501 unsigned long update_bits = X86_CR0_PG | X86_CR0_WP | 502 X86_CR0_CD | X86_CR0_NW; 503 504 cr0 |= X86_CR0_ET; 505 506 #ifdef CONFIG_X86_64 507 if (cr0 & 0xffffffff00000000UL) 508 return 1; 509 #endif 510 511 cr0 &= ~CR0_RESERVED_BITS; 512 513 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) 514 return 1; 515 516 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) 517 return 1; 518 519 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { 520 #ifdef CONFIG_X86_64 521 if ((vcpu->arch.efer & EFER_LME)) { 522 int cs_db, cs_l; 523 524 if (!is_pae(vcpu)) 525 return 1; 526 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 527 if (cs_l) 528 return 1; 529 } else 530 #endif 531 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, 532 kvm_read_cr3(vcpu))) 533 return 1; 534 } 535 536 if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) 537 return 1; 538 539 kvm_x86_ops->set_cr0(vcpu, cr0); 540 541 if ((cr0 ^ old_cr0) & X86_CR0_PG) { 542 kvm_clear_async_pf_completion_queue(vcpu); 543 kvm_async_pf_hash_reset(vcpu); 544 } 545 546 if ((cr0 ^ old_cr0) & update_bits) 547 kvm_mmu_reset_context(vcpu); 548 return 0; 549 } 550 EXPORT_SYMBOL_GPL(kvm_set_cr0); 551 552 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) 553 { 554 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); 555 } 556 EXPORT_SYMBOL_GPL(kvm_lmsw); 557 558 static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) 559 { 560 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && 561 !vcpu->guest_xcr0_loaded) { 562 /* kvm_set_xcr() also depends on this */ 563 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); 564 vcpu->guest_xcr0_loaded = 1; 565 } 566 } 567 568 static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) 569 { 570 if (vcpu->guest_xcr0_loaded) { 571 if (vcpu->arch.xcr0 != host_xcr0) 572 xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); 573 vcpu->guest_xcr0_loaded = 0; 574 } 575 } 576 577 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) 578 { 579 u64 xcr0; 580 581 /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ 582 if (index != XCR_XFEATURE_ENABLED_MASK) 583 return 1; 584 xcr0 = xcr; 585 if (!(xcr0 & XSTATE_FP)) 586 return 1; 587 if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) 588 return 1; 589 if (xcr0 & ~host_xcr0) 590 return 1; 591 kvm_put_guest_xcr0(vcpu); 592 vcpu->arch.xcr0 = xcr0; 593 return 0; 594 } 595 596 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) 597 { 598 if (kvm_x86_ops->get_cpl(vcpu) != 0 || 599 __kvm_set_xcr(vcpu, index, xcr)) { 600 kvm_inject_gp(vcpu, 0); 601 return 1; 602 } 603 return 0; 604 } 605 EXPORT_SYMBOL_GPL(kvm_set_xcr); 606 607 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 608 { 609 unsigned long old_cr4 = kvm_read_cr4(vcpu); 610 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | 611 X86_CR4_PAE | X86_CR4_SMEP; 612 if (cr4 & CR4_RESERVED_BITS) 613 return 1; 614 615 if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE)) 616 return 1; 617 618 if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP)) 619 return 1; 620 621 if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_RDWRGSFS)) 622 return 1; 623 624 if (is_long_mode(vcpu)) { 625 if (!(cr4 & X86_CR4_PAE)) 626 return 1; 627 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) 628 && ((cr4 ^ old_cr4) & pdptr_bits) 629 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, 630 kvm_read_cr3(vcpu))) 631 return 1; 632 633 if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { 634 if (!guest_cpuid_has_pcid(vcpu)) 635 return 1; 636 637 /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ 638 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) 639 return 1; 640 } 641 642 if (kvm_x86_ops->set_cr4(vcpu, cr4)) 643 return 1; 644 645 if (((cr4 ^ old_cr4) & pdptr_bits) || 646 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) 647 kvm_mmu_reset_context(vcpu); 648 649 if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) 650 kvm_update_cpuid(vcpu); 651 652 return 0; 653 } 654 EXPORT_SYMBOL_GPL(kvm_set_cr4); 655 656 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 657 { 658 if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { 659 kvm_mmu_sync_roots(vcpu); 660 kvm_mmu_flush_tlb(vcpu); 661 return 0; 662 } 663 664 if (is_long_mode(vcpu)) { 665 if (kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) { 666 if (cr3 & CR3_PCID_ENABLED_RESERVED_BITS) 667 return 1; 668 } else 669 if (cr3 & CR3_L_MODE_RESERVED_BITS) 670 return 1; 671 } else { 672 if (is_pae(vcpu)) { 673 if (cr3 & CR3_PAE_RESERVED_BITS) 674 return 1; 675 if (is_paging(vcpu) && 676 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) 677 return 1; 678 } 679 /* 680 * We don't check reserved bits in nonpae mode, because 681 * this isn't enforced, and VMware depends on this. 682 */ 683 } 684 685 /* 686 * Does the new cr3 value map to physical memory? (Note, we 687 * catch an invalid cr3 even in real-mode, because it would 688 * cause trouble later on when we turn on paging anyway.) 689 * 690 * A real CPU would silently accept an invalid cr3 and would 691 * attempt to use it - with largely undefined (and often hard 692 * to debug) behavior on the guest side. 693 */ 694 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT))) 695 return 1; 696 vcpu->arch.cr3 = cr3; 697 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); 698 vcpu->arch.mmu.new_cr3(vcpu); 699 return 0; 700 } 701 EXPORT_SYMBOL_GPL(kvm_set_cr3); 702 703 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) 704 { 705 if (cr8 & CR8_RESERVED_BITS) 706 return 1; 707 if (irqchip_in_kernel(vcpu->kvm)) 708 kvm_lapic_set_tpr(vcpu, cr8); 709 else 710 vcpu->arch.cr8 = cr8; 711 return 0; 712 } 713 EXPORT_SYMBOL_GPL(kvm_set_cr8); 714 715 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) 716 { 717 if (irqchip_in_kernel(vcpu->kvm)) 718 return kvm_lapic_get_cr8(vcpu); 719 else 720 return vcpu->arch.cr8; 721 } 722 EXPORT_SYMBOL_GPL(kvm_get_cr8); 723 724 static void kvm_update_dr7(struct kvm_vcpu *vcpu) 725 { 726 unsigned long dr7; 727 728 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 729 dr7 = vcpu->arch.guest_debug_dr7; 730 else 731 dr7 = vcpu->arch.dr7; 732 kvm_x86_ops->set_dr7(vcpu, dr7); 733 vcpu->arch.switch_db_regs = (dr7 & DR7_BP_EN_MASK); 734 } 735 736 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) 737 { 738 switch (dr) { 739 case 0 ... 3: 740 vcpu->arch.db[dr] = val; 741 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) 742 vcpu->arch.eff_db[dr] = val; 743 break; 744 case 4: 745 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) 746 return 1; /* #UD */ 747 /* fall through */ 748 case 6: 749 if (val & 0xffffffff00000000ULL) 750 return -1; /* #GP */ 751 vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1; 752 break; 753 case 5: 754 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) 755 return 1; /* #UD */ 756 /* fall through */ 757 default: /* 7 */ 758 if (val & 0xffffffff00000000ULL) 759 return -1; /* #GP */ 760 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; 761 kvm_update_dr7(vcpu); 762 break; 763 } 764 765 return 0; 766 } 767 768 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) 769 { 770 int res; 771 772 res = __kvm_set_dr(vcpu, dr, val); 773 if (res > 0) 774 kvm_queue_exception(vcpu, UD_VECTOR); 775 else if (res < 0) 776 kvm_inject_gp(vcpu, 0); 777 778 return res; 779 } 780 EXPORT_SYMBOL_GPL(kvm_set_dr); 781 782 static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) 783 { 784 switch (dr) { 785 case 0 ... 3: 786 *val = vcpu->arch.db[dr]; 787 break; 788 case 4: 789 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) 790 return 1; 791 /* fall through */ 792 case 6: 793 *val = vcpu->arch.dr6; 794 break; 795 case 5: 796 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) 797 return 1; 798 /* fall through */ 799 default: /* 7 */ 800 *val = vcpu->arch.dr7; 801 break; 802 } 803 804 return 0; 805 } 806 807 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) 808 { 809 if (_kvm_get_dr(vcpu, dr, val)) { 810 kvm_queue_exception(vcpu, UD_VECTOR); 811 return 1; 812 } 813 return 0; 814 } 815 EXPORT_SYMBOL_GPL(kvm_get_dr); 816 817 bool kvm_rdpmc(struct kvm_vcpu *vcpu) 818 { 819 u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); 820 u64 data; 821 int err; 822 823 err = kvm_pmu_read_pmc(vcpu, ecx, &data); 824 if (err) 825 return err; 826 kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data); 827 kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32); 828 return err; 829 } 830 EXPORT_SYMBOL_GPL(kvm_rdpmc); 831 832 /* 833 * List of msr numbers which we expose to userspace through KVM_GET_MSRS 834 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. 835 * 836 * This list is modified at module load time to reflect the 837 * capabilities of the host cpu. This capabilities test skips MSRs that are 838 * kvm-specific. Those are put in the beginning of the list. 839 */ 840 841 #define KVM_SAVE_MSRS_BEGIN 10 842 static u32 msrs_to_save[] = { 843 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, 844 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, 845 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, 846 HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, 847 MSR_KVM_PV_EOI_EN, 848 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, 849 MSR_STAR, 850 #ifdef CONFIG_X86_64 851 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, 852 #endif 853 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA 854 }; 855 856 static unsigned num_msrs_to_save; 857 858 static const u32 emulated_msrs[] = { 859 MSR_IA32_TSC_ADJUST, 860 MSR_IA32_TSCDEADLINE, 861 MSR_IA32_MISC_ENABLE, 862 MSR_IA32_MCG_STATUS, 863 MSR_IA32_MCG_CTL, 864 }; 865 866 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) 867 { 868 if (efer & efer_reserved_bits) 869 return false; 870 871 if (efer & EFER_FFXSR) { 872 struct kvm_cpuid_entry2 *feat; 873 874 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 875 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) 876 return false; 877 } 878 879 if (efer & EFER_SVME) { 880 struct kvm_cpuid_entry2 *feat; 881 882 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 883 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) 884 return false; 885 } 886 887 return true; 888 } 889 EXPORT_SYMBOL_GPL(kvm_valid_efer); 890 891 static int set_efer(struct kvm_vcpu *vcpu, u64 efer) 892 { 893 u64 old_efer = vcpu->arch.efer; 894 895 if (!kvm_valid_efer(vcpu, efer)) 896 return 1; 897 898 if (is_paging(vcpu) 899 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) 900 return 1; 901 902 efer &= ~EFER_LMA; 903 efer |= vcpu->arch.efer & EFER_LMA; 904 905 kvm_x86_ops->set_efer(vcpu, efer); 906 907 /* Update reserved bits */ 908 if ((efer ^ old_efer) & EFER_NX) 909 kvm_mmu_reset_context(vcpu); 910 911 return 0; 912 } 913 914 void kvm_enable_efer_bits(u64 mask) 915 { 916 efer_reserved_bits &= ~mask; 917 } 918 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); 919 920 921 /* 922 * Writes msr value into into the appropriate "register". 923 * Returns 0 on success, non-0 otherwise. 924 * Assumes vcpu_load() was already called. 925 */ 926 int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) 927 { 928 return kvm_x86_ops->set_msr(vcpu, msr); 929 } 930 931 /* 932 * Adapt set_msr() to msr_io()'s calling convention 933 */ 934 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 935 { 936 struct msr_data msr; 937 938 msr.data = *data; 939 msr.index = index; 940 msr.host_initiated = true; 941 return kvm_set_msr(vcpu, &msr); 942 } 943 944 #ifdef CONFIG_X86_64 945 struct pvclock_gtod_data { 946 seqcount_t seq; 947 948 struct { /* extract of a clocksource struct */ 949 int vclock_mode; 950 cycle_t cycle_last; 951 cycle_t mask; 952 u32 mult; 953 u32 shift; 954 } clock; 955 956 /* open coded 'struct timespec' */ 957 u64 monotonic_time_snsec; 958 time_t monotonic_time_sec; 959 }; 960 961 static struct pvclock_gtod_data pvclock_gtod_data; 962 963 static void update_pvclock_gtod(struct timekeeper *tk) 964 { 965 struct pvclock_gtod_data *vdata = &pvclock_gtod_data; 966 967 write_seqcount_begin(&vdata->seq); 968 969 /* copy pvclock gtod data */ 970 vdata->clock.vclock_mode = tk->clock->archdata.vclock_mode; 971 vdata->clock.cycle_last = tk->clock->cycle_last; 972 vdata->clock.mask = tk->clock->mask; 973 vdata->clock.mult = tk->mult; 974 vdata->clock.shift = tk->shift; 975 976 vdata->monotonic_time_sec = tk->xtime_sec 977 + tk->wall_to_monotonic.tv_sec; 978 vdata->monotonic_time_snsec = tk->xtime_nsec 979 + (tk->wall_to_monotonic.tv_nsec 980 << tk->shift); 981 while (vdata->monotonic_time_snsec >= 982 (((u64)NSEC_PER_SEC) << tk->shift)) { 983 vdata->monotonic_time_snsec -= 984 ((u64)NSEC_PER_SEC) << tk->shift; 985 vdata->monotonic_time_sec++; 986 } 987 988 write_seqcount_end(&vdata->seq); 989 } 990 #endif 991 992 993 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) 994 { 995 int version; 996 int r; 997 struct pvclock_wall_clock wc; 998 struct timespec boot; 999 1000 if (!wall_clock) 1001 return; 1002 1003 r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); 1004 if (r) 1005 return; 1006 1007 if (version & 1) 1008 ++version; /* first time write, random junk */ 1009 1010 ++version; 1011 1012 kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); 1013 1014 /* 1015 * The guest calculates current wall clock time by adding 1016 * system time (updated by kvm_guest_time_update below) to the 1017 * wall clock specified here. guest system time equals host 1018 * system time for us, thus we must fill in host boot time here. 1019 */ 1020 getboottime(&boot); 1021 1022 if (kvm->arch.kvmclock_offset) { 1023 struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset); 1024 boot = timespec_sub(boot, ts); 1025 } 1026 wc.sec = boot.tv_sec; 1027 wc.nsec = boot.tv_nsec; 1028 wc.version = version; 1029 1030 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); 1031 1032 version++; 1033 kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); 1034 } 1035 1036 static uint32_t div_frac(uint32_t dividend, uint32_t divisor) 1037 { 1038 uint32_t quotient, remainder; 1039 1040 /* Don't try to replace with do_div(), this one calculates 1041 * "(dividend << 32) / divisor" */ 1042 __asm__ ( "divl %4" 1043 : "=a" (quotient), "=d" (remainder) 1044 : "0" (0), "1" (dividend), "r" (divisor) ); 1045 return quotient; 1046 } 1047 1048 static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz, 1049 s8 *pshift, u32 *pmultiplier) 1050 { 1051 uint64_t scaled64; 1052 int32_t shift = 0; 1053 uint64_t tps64; 1054 uint32_t tps32; 1055 1056 tps64 = base_khz * 1000LL; 1057 scaled64 = scaled_khz * 1000LL; 1058 while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) { 1059 tps64 >>= 1; 1060 shift--; 1061 } 1062 1063 tps32 = (uint32_t)tps64; 1064 while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) { 1065 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000) 1066 scaled64 >>= 1; 1067 else 1068 tps32 <<= 1; 1069 shift++; 1070 } 1071 1072 *pshift = shift; 1073 *pmultiplier = div_frac(scaled64, tps32); 1074 1075 pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n", 1076 __func__, base_khz, scaled_khz, shift, *pmultiplier); 1077 } 1078 1079 static inline u64 get_kernel_ns(void) 1080 { 1081 struct timespec ts; 1082 1083 WARN_ON(preemptible()); 1084 ktime_get_ts(&ts); 1085 monotonic_to_bootbased(&ts); 1086 return timespec_to_ns(&ts); 1087 } 1088 1089 #ifdef CONFIG_X86_64 1090 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0); 1091 #endif 1092 1093 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); 1094 unsigned long max_tsc_khz; 1095 1096 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) 1097 { 1098 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, 1099 vcpu->arch.virtual_tsc_shift); 1100 } 1101 1102 static u32 adjust_tsc_khz(u32 khz, s32 ppm) 1103 { 1104 u64 v = (u64)khz * (1000000 + ppm); 1105 do_div(v, 1000000); 1106 return v; 1107 } 1108 1109 static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz) 1110 { 1111 u32 thresh_lo, thresh_hi; 1112 int use_scaling = 0; 1113 1114 /* tsc_khz can be zero if TSC calibration fails */ 1115 if (this_tsc_khz == 0) 1116 return; 1117 1118 /* Compute a scale to convert nanoseconds in TSC cycles */ 1119 kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000, 1120 &vcpu->arch.virtual_tsc_shift, 1121 &vcpu->arch.virtual_tsc_mult); 1122 vcpu->arch.virtual_tsc_khz = this_tsc_khz; 1123 1124 /* 1125 * Compute the variation in TSC rate which is acceptable 1126 * within the range of tolerance and decide if the 1127 * rate being applied is within that bounds of the hardware 1128 * rate. If so, no scaling or compensation need be done. 1129 */ 1130 thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); 1131 thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm); 1132 if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) { 1133 pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi); 1134 use_scaling = 1; 1135 } 1136 kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling); 1137 } 1138 1139 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) 1140 { 1141 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, 1142 vcpu->arch.virtual_tsc_mult, 1143 vcpu->arch.virtual_tsc_shift); 1144 tsc += vcpu->arch.this_tsc_write; 1145 return tsc; 1146 } 1147 1148 void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) 1149 { 1150 #ifdef CONFIG_X86_64 1151 bool vcpus_matched; 1152 bool do_request = false; 1153 struct kvm_arch *ka = &vcpu->kvm->arch; 1154 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 1155 1156 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == 1157 atomic_read(&vcpu->kvm->online_vcpus)); 1158 1159 if (vcpus_matched && gtod->clock.vclock_mode == VCLOCK_TSC) 1160 if (!ka->use_master_clock) 1161 do_request = 1; 1162 1163 if (!vcpus_matched && ka->use_master_clock) 1164 do_request = 1; 1165 1166 if (do_request) 1167 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 1168 1169 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, 1170 atomic_read(&vcpu->kvm->online_vcpus), 1171 ka->use_master_clock, gtod->clock.vclock_mode); 1172 #endif 1173 } 1174 1175 static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) 1176 { 1177 u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu); 1178 vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; 1179 } 1180 1181 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) 1182 { 1183 struct kvm *kvm = vcpu->kvm; 1184 u64 offset, ns, elapsed; 1185 unsigned long flags; 1186 s64 usdiff; 1187 bool matched; 1188 u64 data = msr->data; 1189 1190 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 1191 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); 1192 ns = get_kernel_ns(); 1193 elapsed = ns - kvm->arch.last_tsc_nsec; 1194 1195 if (vcpu->arch.virtual_tsc_khz) { 1196 /* n.b - signed multiplication and division required */ 1197 usdiff = data - kvm->arch.last_tsc_write; 1198 #ifdef CONFIG_X86_64 1199 usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz; 1200 #else 1201 /* do_div() only does unsigned */ 1202 asm("idivl %2; xor %%edx, %%edx" 1203 : "=A"(usdiff) 1204 : "A"(usdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz)); 1205 #endif 1206 do_div(elapsed, 1000); 1207 usdiff -= elapsed; 1208 if (usdiff < 0) 1209 usdiff = -usdiff; 1210 } else 1211 usdiff = USEC_PER_SEC; /* disable TSC match window below */ 1212 1213 /* 1214 * Special case: TSC write with a small delta (1 second) of virtual 1215 * cycle time against real time is interpreted as an attempt to 1216 * synchronize the CPU. 1217 * 1218 * For a reliable TSC, we can match TSC offsets, and for an unstable 1219 * TSC, we add elapsed time in this computation. We could let the 1220 * compensation code attempt to catch up if we fall behind, but 1221 * it's better to try to match offsets from the beginning. 1222 */ 1223 if (usdiff < USEC_PER_SEC && 1224 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { 1225 if (!check_tsc_unstable()) { 1226 offset = kvm->arch.cur_tsc_offset; 1227 pr_debug("kvm: matched tsc offset for %llu\n", data); 1228 } else { 1229 u64 delta = nsec_to_cycles(vcpu, elapsed); 1230 data += delta; 1231 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); 1232 pr_debug("kvm: adjusted tsc offset by %llu\n", delta); 1233 } 1234 matched = true; 1235 } else { 1236 /* 1237 * We split periods of matched TSC writes into generations. 1238 * For each generation, we track the original measured 1239 * nanosecond time, offset, and write, so if TSCs are in 1240 * sync, we can match exact offset, and if not, we can match 1241 * exact software computation in compute_guest_tsc() 1242 * 1243 * These values are tracked in kvm->arch.cur_xxx variables. 1244 */ 1245 kvm->arch.cur_tsc_generation++; 1246 kvm->arch.cur_tsc_nsec = ns; 1247 kvm->arch.cur_tsc_write = data; 1248 kvm->arch.cur_tsc_offset = offset; 1249 matched = false; 1250 pr_debug("kvm: new tsc generation %u, clock %llu\n", 1251 kvm->arch.cur_tsc_generation, data); 1252 } 1253 1254 /* 1255 * We also track th most recent recorded KHZ, write and time to 1256 * allow the matching interval to be extended at each write. 1257 */ 1258 kvm->arch.last_tsc_nsec = ns; 1259 kvm->arch.last_tsc_write = data; 1260 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; 1261 1262 /* Reset of TSC must disable overshoot protection below */ 1263 vcpu->arch.hv_clock.tsc_timestamp = 0; 1264 vcpu->arch.last_guest_tsc = data; 1265 1266 /* Keep track of which generation this VCPU has synchronized to */ 1267 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; 1268 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; 1269 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; 1270 1271 if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated) 1272 update_ia32_tsc_adjust_msr(vcpu, offset); 1273 kvm_x86_ops->write_tsc_offset(vcpu, offset); 1274 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 1275 1276 spin_lock(&kvm->arch.pvclock_gtod_sync_lock); 1277 if (matched) 1278 kvm->arch.nr_vcpus_matched_tsc++; 1279 else 1280 kvm->arch.nr_vcpus_matched_tsc = 0; 1281 1282 kvm_track_tsc_matching(vcpu); 1283 spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); 1284 } 1285 1286 EXPORT_SYMBOL_GPL(kvm_write_tsc); 1287 1288 #ifdef CONFIG_X86_64 1289 1290 static cycle_t read_tsc(void) 1291 { 1292 cycle_t ret; 1293 u64 last; 1294 1295 /* 1296 * Empirically, a fence (of type that depends on the CPU) 1297 * before rdtsc is enough to ensure that rdtsc is ordered 1298 * with respect to loads. The various CPU manuals are unclear 1299 * as to whether rdtsc can be reordered with later loads, 1300 * but no one has ever seen it happen. 1301 */ 1302 rdtsc_barrier(); 1303 ret = (cycle_t)vget_cycles(); 1304 1305 last = pvclock_gtod_data.clock.cycle_last; 1306 1307 if (likely(ret >= last)) 1308 return ret; 1309 1310 /* 1311 * GCC likes to generate cmov here, but this branch is extremely 1312 * predictable (it's just a funciton of time and the likely is 1313 * very likely) and there's a data dependence, so force GCC 1314 * to generate a branch instead. I don't barrier() because 1315 * we don't actually need a barrier, and if this function 1316 * ever gets inlined it will generate worse code. 1317 */ 1318 asm volatile (""); 1319 return last; 1320 } 1321 1322 static inline u64 vgettsc(cycle_t *cycle_now) 1323 { 1324 long v; 1325 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 1326 1327 *cycle_now = read_tsc(); 1328 1329 v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask; 1330 return v * gtod->clock.mult; 1331 } 1332 1333 static int do_monotonic(struct timespec *ts, cycle_t *cycle_now) 1334 { 1335 unsigned long seq; 1336 u64 ns; 1337 int mode; 1338 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 1339 1340 ts->tv_nsec = 0; 1341 do { 1342 seq = read_seqcount_begin(>od->seq); 1343 mode = gtod->clock.vclock_mode; 1344 ts->tv_sec = gtod->monotonic_time_sec; 1345 ns = gtod->monotonic_time_snsec; 1346 ns += vgettsc(cycle_now); 1347 ns >>= gtod->clock.shift; 1348 } while (unlikely(read_seqcount_retry(>od->seq, seq))); 1349 timespec_add_ns(ts, ns); 1350 1351 return mode; 1352 } 1353 1354 /* returns true if host is using tsc clocksource */ 1355 static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now) 1356 { 1357 struct timespec ts; 1358 1359 /* checked again under seqlock below */ 1360 if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC) 1361 return false; 1362 1363 if (do_monotonic(&ts, cycle_now) != VCLOCK_TSC) 1364 return false; 1365 1366 monotonic_to_bootbased(&ts); 1367 *kernel_ns = timespec_to_ns(&ts); 1368 1369 return true; 1370 } 1371 #endif 1372 1373 /* 1374 * 1375 * Assuming a stable TSC across physical CPUS, and a stable TSC 1376 * across virtual CPUs, the following condition is possible. 1377 * Each numbered line represents an event visible to both 1378 * CPUs at the next numbered event. 1379 * 1380 * "timespecX" represents host monotonic time. "tscX" represents 1381 * RDTSC value. 1382 * 1383 * VCPU0 on CPU0 | VCPU1 on CPU1 1384 * 1385 * 1. read timespec0,tsc0 1386 * 2. | timespec1 = timespec0 + N 1387 * | tsc1 = tsc0 + M 1388 * 3. transition to guest | transition to guest 1389 * 4. ret0 = timespec0 + (rdtsc - tsc0) | 1390 * 5. | ret1 = timespec1 + (rdtsc - tsc1) 1391 * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M)) 1392 * 1393 * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity: 1394 * 1395 * - ret0 < ret1 1396 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M)) 1397 * ... 1398 * - 0 < N - M => M < N 1399 * 1400 * That is, when timespec0 != timespec1, M < N. Unfortunately that is not 1401 * always the case (the difference between two distinct xtime instances 1402 * might be smaller then the difference between corresponding TSC reads, 1403 * when updating guest vcpus pvclock areas). 1404 * 1405 * To avoid that problem, do not allow visibility of distinct 1406 * system_timestamp/tsc_timestamp values simultaneously: use a master 1407 * copy of host monotonic time values. Update that master copy 1408 * in lockstep. 1409 * 1410 * Rely on synchronization of host TSCs and guest TSCs for monotonicity. 1411 * 1412 */ 1413 1414 static void pvclock_update_vm_gtod_copy(struct kvm *kvm) 1415 { 1416 #ifdef CONFIG_X86_64 1417 struct kvm_arch *ka = &kvm->arch; 1418 int vclock_mode; 1419 bool host_tsc_clocksource, vcpus_matched; 1420 1421 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == 1422 atomic_read(&kvm->online_vcpus)); 1423 1424 /* 1425 * If the host uses TSC clock, then passthrough TSC as stable 1426 * to the guest. 1427 */ 1428 host_tsc_clocksource = kvm_get_time_and_clockread( 1429 &ka->master_kernel_ns, 1430 &ka->master_cycle_now); 1431 1432 ka->use_master_clock = host_tsc_clocksource & vcpus_matched; 1433 1434 if (ka->use_master_clock) 1435 atomic_set(&kvm_guest_has_master_clock, 1); 1436 1437 vclock_mode = pvclock_gtod_data.clock.vclock_mode; 1438 trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, 1439 vcpus_matched); 1440 #endif 1441 } 1442 1443 static int kvm_guest_time_update(struct kvm_vcpu *v) 1444 { 1445 unsigned long flags, this_tsc_khz; 1446 struct kvm_vcpu_arch *vcpu = &v->arch; 1447 struct kvm_arch *ka = &v->kvm->arch; 1448 s64 kernel_ns, max_kernel_ns; 1449 u64 tsc_timestamp, host_tsc; 1450 struct pvclock_vcpu_time_info guest_hv_clock; 1451 u8 pvclock_flags; 1452 bool use_master_clock; 1453 1454 kernel_ns = 0; 1455 host_tsc = 0; 1456 1457 /* 1458 * If the host uses TSC clock, then passthrough TSC as stable 1459 * to the guest. 1460 */ 1461 spin_lock(&ka->pvclock_gtod_sync_lock); 1462 use_master_clock = ka->use_master_clock; 1463 if (use_master_clock) { 1464 host_tsc = ka->master_cycle_now; 1465 kernel_ns = ka->master_kernel_ns; 1466 } 1467 spin_unlock(&ka->pvclock_gtod_sync_lock); 1468 1469 /* Keep irq disabled to prevent changes to the clock */ 1470 local_irq_save(flags); 1471 this_tsc_khz = __get_cpu_var(cpu_tsc_khz); 1472 if (unlikely(this_tsc_khz == 0)) { 1473 local_irq_restore(flags); 1474 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); 1475 return 1; 1476 } 1477 if (!use_master_clock) { 1478 host_tsc = native_read_tsc(); 1479 kernel_ns = get_kernel_ns(); 1480 } 1481 1482 tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc); 1483 1484 /* 1485 * We may have to catch up the TSC to match elapsed wall clock 1486 * time for two reasons, even if kvmclock is used. 1487 * 1) CPU could have been running below the maximum TSC rate 1488 * 2) Broken TSC compensation resets the base at each VCPU 1489 * entry to avoid unknown leaps of TSC even when running 1490 * again on the same CPU. This may cause apparent elapsed 1491 * time to disappear, and the guest to stand still or run 1492 * very slowly. 1493 */ 1494 if (vcpu->tsc_catchup) { 1495 u64 tsc = compute_guest_tsc(v, kernel_ns); 1496 if (tsc > tsc_timestamp) { 1497 adjust_tsc_offset_guest(v, tsc - tsc_timestamp); 1498 tsc_timestamp = tsc; 1499 } 1500 } 1501 1502 local_irq_restore(flags); 1503 1504 if (!vcpu->pv_time_enabled) 1505 return 0; 1506 1507 /* 1508 * Time as measured by the TSC may go backwards when resetting the base 1509 * tsc_timestamp. The reason for this is that the TSC resolution is 1510 * higher than the resolution of the other clock scales. Thus, many 1511 * possible measurments of the TSC correspond to one measurement of any 1512 * other clock, and so a spread of values is possible. This is not a 1513 * problem for the computation of the nanosecond clock; with TSC rates 1514 * around 1GHZ, there can only be a few cycles which correspond to one 1515 * nanosecond value, and any path through this code will inevitably 1516 * take longer than that. However, with the kernel_ns value itself, 1517 * the precision may be much lower, down to HZ granularity. If the 1518 * first sampling of TSC against kernel_ns ends in the low part of the 1519 * range, and the second in the high end of the range, we can get: 1520 * 1521 * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new 1522 * 1523 * As the sampling errors potentially range in the thousands of cycles, 1524 * it is possible such a time value has already been observed by the 1525 * guest. To protect against this, we must compute the system time as 1526 * observed by the guest and ensure the new system time is greater. 1527 */ 1528 max_kernel_ns = 0; 1529 if (vcpu->hv_clock.tsc_timestamp) { 1530 max_kernel_ns = vcpu->last_guest_tsc - 1531 vcpu->hv_clock.tsc_timestamp; 1532 max_kernel_ns = pvclock_scale_delta(max_kernel_ns, 1533 vcpu->hv_clock.tsc_to_system_mul, 1534 vcpu->hv_clock.tsc_shift); 1535 max_kernel_ns += vcpu->last_kernel_ns; 1536 } 1537 1538 if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) { 1539 kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz, 1540 &vcpu->hv_clock.tsc_shift, 1541 &vcpu->hv_clock.tsc_to_system_mul); 1542 vcpu->hw_tsc_khz = this_tsc_khz; 1543 } 1544 1545 /* with a master <monotonic time, tsc value> tuple, 1546 * pvclock clock reads always increase at the (scaled) rate 1547 * of guest TSC - no need to deal with sampling errors. 1548 */ 1549 if (!use_master_clock) { 1550 if (max_kernel_ns > kernel_ns) 1551 kernel_ns = max_kernel_ns; 1552 } 1553 /* With all the info we got, fill in the values */ 1554 vcpu->hv_clock.tsc_timestamp = tsc_timestamp; 1555 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; 1556 vcpu->last_kernel_ns = kernel_ns; 1557 vcpu->last_guest_tsc = tsc_timestamp; 1558 1559 /* 1560 * The interface expects us to write an even number signaling that the 1561 * update is finished. Since the guest won't see the intermediate 1562 * state, we just increase by 2 at the end. 1563 */ 1564 vcpu->hv_clock.version += 2; 1565 1566 if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, 1567 &guest_hv_clock, sizeof(guest_hv_clock)))) 1568 return 0; 1569 1570 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ 1571 pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); 1572 1573 if (vcpu->pvclock_set_guest_stopped_request) { 1574 pvclock_flags |= PVCLOCK_GUEST_STOPPED; 1575 vcpu->pvclock_set_guest_stopped_request = false; 1576 } 1577 1578 /* If the host uses TSC clocksource, then it is stable */ 1579 if (use_master_clock) 1580 pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; 1581 1582 vcpu->hv_clock.flags = pvclock_flags; 1583 1584 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, 1585 &vcpu->hv_clock, 1586 sizeof(vcpu->hv_clock)); 1587 return 0; 1588 } 1589 1590 static bool msr_mtrr_valid(unsigned msr) 1591 { 1592 switch (msr) { 1593 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1: 1594 case MSR_MTRRfix64K_00000: 1595 case MSR_MTRRfix16K_80000: 1596 case MSR_MTRRfix16K_A0000: 1597 case MSR_MTRRfix4K_C0000: 1598 case MSR_MTRRfix4K_C8000: 1599 case MSR_MTRRfix4K_D0000: 1600 case MSR_MTRRfix4K_D8000: 1601 case MSR_MTRRfix4K_E0000: 1602 case MSR_MTRRfix4K_E8000: 1603 case MSR_MTRRfix4K_F0000: 1604 case MSR_MTRRfix4K_F8000: 1605 case MSR_MTRRdefType: 1606 case MSR_IA32_CR_PAT: 1607 return true; 1608 case 0x2f8: 1609 return true; 1610 } 1611 return false; 1612 } 1613 1614 static bool valid_pat_type(unsigned t) 1615 { 1616 return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */ 1617 } 1618 1619 static bool valid_mtrr_type(unsigned t) 1620 { 1621 return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */ 1622 } 1623 1624 static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1625 { 1626 int i; 1627 1628 if (!msr_mtrr_valid(msr)) 1629 return false; 1630 1631 if (msr == MSR_IA32_CR_PAT) { 1632 for (i = 0; i < 8; i++) 1633 if (!valid_pat_type((data >> (i * 8)) & 0xff)) 1634 return false; 1635 return true; 1636 } else if (msr == MSR_MTRRdefType) { 1637 if (data & ~0xcff) 1638 return false; 1639 return valid_mtrr_type(data & 0xff); 1640 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { 1641 for (i = 0; i < 8 ; i++) 1642 if (!valid_mtrr_type((data >> (i * 8)) & 0xff)) 1643 return false; 1644 return true; 1645 } 1646 1647 /* variable MTRRs */ 1648 return valid_mtrr_type(data & 0xff); 1649 } 1650 1651 static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1652 { 1653 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; 1654 1655 if (!mtrr_valid(vcpu, msr, data)) 1656 return 1; 1657 1658 if (msr == MSR_MTRRdefType) { 1659 vcpu->arch.mtrr_state.def_type = data; 1660 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10; 1661 } else if (msr == MSR_MTRRfix64K_00000) 1662 p[0] = data; 1663 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) 1664 p[1 + msr - MSR_MTRRfix16K_80000] = data; 1665 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) 1666 p[3 + msr - MSR_MTRRfix4K_C0000] = data; 1667 else if (msr == MSR_IA32_CR_PAT) 1668 vcpu->arch.pat = data; 1669 else { /* Variable MTRRs */ 1670 int idx, is_mtrr_mask; 1671 u64 *pt; 1672 1673 idx = (msr - 0x200) / 2; 1674 is_mtrr_mask = msr - 0x200 - 2 * idx; 1675 if (!is_mtrr_mask) 1676 pt = 1677 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; 1678 else 1679 pt = 1680 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; 1681 *pt = data; 1682 } 1683 1684 kvm_mmu_reset_context(vcpu); 1685 return 0; 1686 } 1687 1688 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1689 { 1690 u64 mcg_cap = vcpu->arch.mcg_cap; 1691 unsigned bank_num = mcg_cap & 0xff; 1692 1693 switch (msr) { 1694 case MSR_IA32_MCG_STATUS: 1695 vcpu->arch.mcg_status = data; 1696 break; 1697 case MSR_IA32_MCG_CTL: 1698 if (!(mcg_cap & MCG_CTL_P)) 1699 return 1; 1700 if (data != 0 && data != ~(u64)0) 1701 return -1; 1702 vcpu->arch.mcg_ctl = data; 1703 break; 1704 default: 1705 if (msr >= MSR_IA32_MC0_CTL && 1706 msr < MSR_IA32_MC0_CTL + 4 * bank_num) { 1707 u32 offset = msr - MSR_IA32_MC0_CTL; 1708 /* only 0 or all 1s can be written to IA32_MCi_CTL 1709 * some Linux kernels though clear bit 10 in bank 4 to 1710 * workaround a BIOS/GART TBL issue on AMD K8s, ignore 1711 * this to avoid an uncatched #GP in the guest 1712 */ 1713 if ((offset & 0x3) == 0 && 1714 data != 0 && (data | (1 << 10)) != ~(u64)0) 1715 return -1; 1716 vcpu->arch.mce_banks[offset] = data; 1717 break; 1718 } 1719 return 1; 1720 } 1721 return 0; 1722 } 1723 1724 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) 1725 { 1726 struct kvm *kvm = vcpu->kvm; 1727 int lm = is_long_mode(vcpu); 1728 u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 1729 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; 1730 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 1731 : kvm->arch.xen_hvm_config.blob_size_32; 1732 u32 page_num = data & ~PAGE_MASK; 1733 u64 page_addr = data & PAGE_MASK; 1734 u8 *page; 1735 int r; 1736 1737 r = -E2BIG; 1738 if (page_num >= blob_size) 1739 goto out; 1740 r = -ENOMEM; 1741 page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE); 1742 if (IS_ERR(page)) { 1743 r = PTR_ERR(page); 1744 goto out; 1745 } 1746 if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE)) 1747 goto out_free; 1748 r = 0; 1749 out_free: 1750 kfree(page); 1751 out: 1752 return r; 1753 } 1754 1755 static bool kvm_hv_hypercall_enabled(struct kvm *kvm) 1756 { 1757 return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; 1758 } 1759 1760 static bool kvm_hv_msr_partition_wide(u32 msr) 1761 { 1762 bool r = false; 1763 switch (msr) { 1764 case HV_X64_MSR_GUEST_OS_ID: 1765 case HV_X64_MSR_HYPERCALL: 1766 r = true; 1767 break; 1768 } 1769 1770 return r; 1771 } 1772 1773 static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1774 { 1775 struct kvm *kvm = vcpu->kvm; 1776 1777 switch (msr) { 1778 case HV_X64_MSR_GUEST_OS_ID: 1779 kvm->arch.hv_guest_os_id = data; 1780 /* setting guest os id to zero disables hypercall page */ 1781 if (!kvm->arch.hv_guest_os_id) 1782 kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; 1783 break; 1784 case HV_X64_MSR_HYPERCALL: { 1785 u64 gfn; 1786 unsigned long addr; 1787 u8 instructions[4]; 1788 1789 /* if guest os id is not set hypercall should remain disabled */ 1790 if (!kvm->arch.hv_guest_os_id) 1791 break; 1792 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { 1793 kvm->arch.hv_hypercall = data; 1794 break; 1795 } 1796 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; 1797 addr = gfn_to_hva(kvm, gfn); 1798 if (kvm_is_error_hva(addr)) 1799 return 1; 1800 kvm_x86_ops->patch_hypercall(vcpu, instructions); 1801 ((unsigned char *)instructions)[3] = 0xc3; /* ret */ 1802 if (__copy_to_user((void __user *)addr, instructions, 4)) 1803 return 1; 1804 kvm->arch.hv_hypercall = data; 1805 break; 1806 } 1807 default: 1808 vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " 1809 "data 0x%llx\n", msr, data); 1810 return 1; 1811 } 1812 return 0; 1813 } 1814 1815 static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1816 { 1817 switch (msr) { 1818 case HV_X64_MSR_APIC_ASSIST_PAGE: { 1819 unsigned long addr; 1820 1821 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { 1822 vcpu->arch.hv_vapic = data; 1823 break; 1824 } 1825 addr = gfn_to_hva(vcpu->kvm, data >> 1826 HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT); 1827 if (kvm_is_error_hva(addr)) 1828 return 1; 1829 if (__clear_user((void __user *)addr, PAGE_SIZE)) 1830 return 1; 1831 vcpu->arch.hv_vapic = data; 1832 break; 1833 } 1834 case HV_X64_MSR_EOI: 1835 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); 1836 case HV_X64_MSR_ICR: 1837 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); 1838 case HV_X64_MSR_TPR: 1839 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); 1840 default: 1841 vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " 1842 "data 0x%llx\n", msr, data); 1843 return 1; 1844 } 1845 1846 return 0; 1847 } 1848 1849 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) 1850 { 1851 gpa_t gpa = data & ~0x3f; 1852 1853 /* Bits 2:5 are reserved, Should be zero */ 1854 if (data & 0x3c) 1855 return 1; 1856 1857 vcpu->arch.apf.msr_val = data; 1858 1859 if (!(data & KVM_ASYNC_PF_ENABLED)) { 1860 kvm_clear_async_pf_completion_queue(vcpu); 1861 kvm_async_pf_hash_reset(vcpu); 1862 return 0; 1863 } 1864 1865 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, 1866 sizeof(u32))) 1867 return 1; 1868 1869 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); 1870 kvm_async_pf_wakeup_all(vcpu); 1871 return 0; 1872 } 1873 1874 static void kvmclock_reset(struct kvm_vcpu *vcpu) 1875 { 1876 vcpu->arch.pv_time_enabled = false; 1877 } 1878 1879 static void accumulate_steal_time(struct kvm_vcpu *vcpu) 1880 { 1881 u64 delta; 1882 1883 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) 1884 return; 1885 1886 delta = current->sched_info.run_delay - vcpu->arch.st.last_steal; 1887 vcpu->arch.st.last_steal = current->sched_info.run_delay; 1888 vcpu->arch.st.accum_steal = delta; 1889 } 1890 1891 static void record_steal_time(struct kvm_vcpu *vcpu) 1892 { 1893 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) 1894 return; 1895 1896 if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, 1897 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) 1898 return; 1899 1900 vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal; 1901 vcpu->arch.st.steal.version += 2; 1902 vcpu->arch.st.accum_steal = 0; 1903 1904 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, 1905 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); 1906 } 1907 1908 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 1909 { 1910 bool pr = false; 1911 u32 msr = msr_info->index; 1912 u64 data = msr_info->data; 1913 1914 switch (msr) { 1915 case MSR_AMD64_NB_CFG: 1916 case MSR_IA32_UCODE_REV: 1917 case MSR_IA32_UCODE_WRITE: 1918 case MSR_VM_HSAVE_PA: 1919 case MSR_AMD64_PATCH_LOADER: 1920 case MSR_AMD64_BU_CFG2: 1921 break; 1922 1923 case MSR_EFER: 1924 return set_efer(vcpu, data); 1925 case MSR_K7_HWCR: 1926 data &= ~(u64)0x40; /* ignore flush filter disable */ 1927 data &= ~(u64)0x100; /* ignore ignne emulation enable */ 1928 data &= ~(u64)0x8; /* ignore TLB cache disable */ 1929 if (data != 0) { 1930 vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", 1931 data); 1932 return 1; 1933 } 1934 break; 1935 case MSR_FAM10H_MMIO_CONF_BASE: 1936 if (data != 0) { 1937 vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " 1938 "0x%llx\n", data); 1939 return 1; 1940 } 1941 break; 1942 case MSR_IA32_DEBUGCTLMSR: 1943 if (!data) { 1944 /* We support the non-activated case already */ 1945 break; 1946 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) { 1947 /* Values other than LBR and BTF are vendor-specific, 1948 thus reserved and should throw a #GP */ 1949 return 1; 1950 } 1951 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", 1952 __func__, data); 1953 break; 1954 case 0x200 ... 0x2ff: 1955 return set_msr_mtrr(vcpu, msr, data); 1956 case MSR_IA32_APICBASE: 1957 kvm_set_apic_base(vcpu, data); 1958 break; 1959 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: 1960 return kvm_x2apic_msr_write(vcpu, msr, data); 1961 case MSR_IA32_TSCDEADLINE: 1962 kvm_set_lapic_tscdeadline_msr(vcpu, data); 1963 break; 1964 case MSR_IA32_TSC_ADJUST: 1965 if (guest_cpuid_has_tsc_adjust(vcpu)) { 1966 if (!msr_info->host_initiated) { 1967 u64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; 1968 kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true); 1969 } 1970 vcpu->arch.ia32_tsc_adjust_msr = data; 1971 } 1972 break; 1973 case MSR_IA32_MISC_ENABLE: 1974 vcpu->arch.ia32_misc_enable_msr = data; 1975 break; 1976 case MSR_KVM_WALL_CLOCK_NEW: 1977 case MSR_KVM_WALL_CLOCK: 1978 vcpu->kvm->arch.wall_clock = data; 1979 kvm_write_wall_clock(vcpu->kvm, data); 1980 break; 1981 case MSR_KVM_SYSTEM_TIME_NEW: 1982 case MSR_KVM_SYSTEM_TIME: { 1983 u64 gpa_offset; 1984 kvmclock_reset(vcpu); 1985 1986 vcpu->arch.time = data; 1987 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 1988 1989 /* we verify if the enable bit is set... */ 1990 if (!(data & 1)) 1991 break; 1992 1993 gpa_offset = data & ~(PAGE_MASK | 1); 1994 1995 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, 1996 &vcpu->arch.pv_time, data & ~1ULL, 1997 sizeof(struct pvclock_vcpu_time_info))) 1998 vcpu->arch.pv_time_enabled = false; 1999 else 2000 vcpu->arch.pv_time_enabled = true; 2001 2002 break; 2003 } 2004 case MSR_KVM_ASYNC_PF_EN: 2005 if (kvm_pv_enable_async_pf(vcpu, data)) 2006 return 1; 2007 break; 2008 case MSR_KVM_STEAL_TIME: 2009 2010 if (unlikely(!sched_info_on())) 2011 return 1; 2012 2013 if (data & KVM_STEAL_RESERVED_MASK) 2014 return 1; 2015 2016 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, 2017 data & KVM_STEAL_VALID_BITS, 2018 sizeof(struct kvm_steal_time))) 2019 return 1; 2020 2021 vcpu->arch.st.msr_val = data; 2022 2023 if (!(data & KVM_MSR_ENABLED)) 2024 break; 2025 2026 vcpu->arch.st.last_steal = current->sched_info.run_delay; 2027 2028 preempt_disable(); 2029 accumulate_steal_time(vcpu); 2030 preempt_enable(); 2031 2032 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); 2033 2034 break; 2035 case MSR_KVM_PV_EOI_EN: 2036 if (kvm_lapic_enable_pv_eoi(vcpu, data)) 2037 return 1; 2038 break; 2039 2040 case MSR_IA32_MCG_CTL: 2041 case MSR_IA32_MCG_STATUS: 2042 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: 2043 return set_msr_mce(vcpu, msr, data); 2044 2045 /* Performance counters are not protected by a CPUID bit, 2046 * so we should check all of them in the generic path for the sake of 2047 * cross vendor migration. 2048 * Writing a zero into the event select MSRs disables them, 2049 * which we perfectly emulate ;-). Any other value should be at least 2050 * reported, some guests depend on them. 2051 */ 2052 case MSR_K7_EVNTSEL0: 2053 case MSR_K7_EVNTSEL1: 2054 case MSR_K7_EVNTSEL2: 2055 case MSR_K7_EVNTSEL3: 2056 if (data != 0) 2057 vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: " 2058 "0x%x data 0x%llx\n", msr, data); 2059 break; 2060 /* at least RHEL 4 unconditionally writes to the perfctr registers, 2061 * so we ignore writes to make it happy. 2062 */ 2063 case MSR_K7_PERFCTR0: 2064 case MSR_K7_PERFCTR1: 2065 case MSR_K7_PERFCTR2: 2066 case MSR_K7_PERFCTR3: 2067 vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: " 2068 "0x%x data 0x%llx\n", msr, data); 2069 break; 2070 case MSR_P6_PERFCTR0: 2071 case MSR_P6_PERFCTR1: 2072 pr = true; 2073 case MSR_P6_EVNTSEL0: 2074 case MSR_P6_EVNTSEL1: 2075 if (kvm_pmu_msr(vcpu, msr)) 2076 return kvm_pmu_set_msr(vcpu, msr_info); 2077 2078 if (pr || data != 0) 2079 vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " 2080 "0x%x data 0x%llx\n", msr, data); 2081 break; 2082 case MSR_K7_CLK_CTL: 2083 /* 2084 * Ignore all writes to this no longer documented MSR. 2085 * Writes are only relevant for old K7 processors, 2086 * all pre-dating SVM, but a recommended workaround from 2087 * AMD for these chips. It is possible to specify the 2088 * affected processor models on the command line, hence 2089 * the need to ignore the workaround. 2090 */ 2091 break; 2092 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 2093 if (kvm_hv_msr_partition_wide(msr)) { 2094 int r; 2095 mutex_lock(&vcpu->kvm->lock); 2096 r = set_msr_hyperv_pw(vcpu, msr, data); 2097 mutex_unlock(&vcpu->kvm->lock); 2098 return r; 2099 } else 2100 return set_msr_hyperv(vcpu, msr, data); 2101 break; 2102 case MSR_IA32_BBL_CR_CTL3: 2103 /* Drop writes to this legacy MSR -- see rdmsr 2104 * counterpart for further detail. 2105 */ 2106 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); 2107 break; 2108 case MSR_AMD64_OSVW_ID_LENGTH: 2109 if (!guest_cpuid_has_osvw(vcpu)) 2110 return 1; 2111 vcpu->arch.osvw.length = data; 2112 break; 2113 case MSR_AMD64_OSVW_STATUS: 2114 if (!guest_cpuid_has_osvw(vcpu)) 2115 return 1; 2116 vcpu->arch.osvw.status = data; 2117 break; 2118 default: 2119 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) 2120 return xen_hvm_config(vcpu, data); 2121 if (kvm_pmu_msr(vcpu, msr)) 2122 return kvm_pmu_set_msr(vcpu, msr_info); 2123 if (!ignore_msrs) { 2124 vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", 2125 msr, data); 2126 return 1; 2127 } else { 2128 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", 2129 msr, data); 2130 break; 2131 } 2132 } 2133 return 0; 2134 } 2135 EXPORT_SYMBOL_GPL(kvm_set_msr_common); 2136 2137 2138 /* 2139 * Reads an msr value (of 'msr_index') into 'pdata'. 2140 * Returns 0 on success, non-0 otherwise. 2141 * Assumes vcpu_load() was already called. 2142 */ 2143 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) 2144 { 2145 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata); 2146 } 2147 2148 static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) 2149 { 2150 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; 2151 2152 if (!msr_mtrr_valid(msr)) 2153 return 1; 2154 2155 if (msr == MSR_MTRRdefType) 2156 *pdata = vcpu->arch.mtrr_state.def_type + 2157 (vcpu->arch.mtrr_state.enabled << 10); 2158 else if (msr == MSR_MTRRfix64K_00000) 2159 *pdata = p[0]; 2160 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) 2161 *pdata = p[1 + msr - MSR_MTRRfix16K_80000]; 2162 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) 2163 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000]; 2164 else if (msr == MSR_IA32_CR_PAT) 2165 *pdata = vcpu->arch.pat; 2166 else { /* Variable MTRRs */ 2167 int idx, is_mtrr_mask; 2168 u64 *pt; 2169 2170 idx = (msr - 0x200) / 2; 2171 is_mtrr_mask = msr - 0x200 - 2 * idx; 2172 if (!is_mtrr_mask) 2173 pt = 2174 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; 2175 else 2176 pt = 2177 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; 2178 *pdata = *pt; 2179 } 2180 2181 return 0; 2182 } 2183 2184 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) 2185 { 2186 u64 data; 2187 u64 mcg_cap = vcpu->arch.mcg_cap; 2188 unsigned bank_num = mcg_cap & 0xff; 2189 2190 switch (msr) { 2191 case MSR_IA32_P5_MC_ADDR: 2192 case MSR_IA32_P5_MC_TYPE: 2193 data = 0; 2194 break; 2195 case MSR_IA32_MCG_CAP: 2196 data = vcpu->arch.mcg_cap; 2197 break; 2198 case MSR_IA32_MCG_CTL: 2199 if (!(mcg_cap & MCG_CTL_P)) 2200 return 1; 2201 data = vcpu->arch.mcg_ctl; 2202 break; 2203 case MSR_IA32_MCG_STATUS: 2204 data = vcpu->arch.mcg_status; 2205 break; 2206 default: 2207 if (msr >= MSR_IA32_MC0_CTL && 2208 msr < MSR_IA32_MC0_CTL + 4 * bank_num) { 2209 u32 offset = msr - MSR_IA32_MC0_CTL; 2210 data = vcpu->arch.mce_banks[offset]; 2211 break; 2212 } 2213 return 1; 2214 } 2215 *pdata = data; 2216 return 0; 2217 } 2218 2219 static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) 2220 { 2221 u64 data = 0; 2222 struct kvm *kvm = vcpu->kvm; 2223 2224 switch (msr) { 2225 case HV_X64_MSR_GUEST_OS_ID: 2226 data = kvm->arch.hv_guest_os_id; 2227 break; 2228 case HV_X64_MSR_HYPERCALL: 2229 data = kvm->arch.hv_hypercall; 2230 break; 2231 default: 2232 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); 2233 return 1; 2234 } 2235 2236 *pdata = data; 2237 return 0; 2238 } 2239 2240 static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) 2241 { 2242 u64 data = 0; 2243 2244 switch (msr) { 2245 case HV_X64_MSR_VP_INDEX: { 2246 int r; 2247 struct kvm_vcpu *v; 2248 kvm_for_each_vcpu(r, v, vcpu->kvm) 2249 if (v == vcpu) 2250 data = r; 2251 break; 2252 } 2253 case HV_X64_MSR_EOI: 2254 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); 2255 case HV_X64_MSR_ICR: 2256 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); 2257 case HV_X64_MSR_TPR: 2258 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); 2259 case HV_X64_MSR_APIC_ASSIST_PAGE: 2260 data = vcpu->arch.hv_vapic; 2261 break; 2262 default: 2263 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); 2264 return 1; 2265 } 2266 *pdata = data; 2267 return 0; 2268 } 2269 2270 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) 2271 { 2272 u64 data; 2273 2274 switch (msr) { 2275 case MSR_IA32_PLATFORM_ID: 2276 case MSR_IA32_EBL_CR_POWERON: 2277 case MSR_IA32_DEBUGCTLMSR: 2278 case MSR_IA32_LASTBRANCHFROMIP: 2279 case MSR_IA32_LASTBRANCHTOIP: 2280 case MSR_IA32_LASTINTFROMIP: 2281 case MSR_IA32_LASTINTTOIP: 2282 case MSR_K8_SYSCFG: 2283 case MSR_K7_HWCR: 2284 case MSR_VM_HSAVE_PA: 2285 case MSR_K7_EVNTSEL0: 2286 case MSR_K7_PERFCTR0: 2287 case MSR_K8_INT_PENDING_MSG: 2288 case MSR_AMD64_NB_CFG: 2289 case MSR_FAM10H_MMIO_CONF_BASE: 2290 case MSR_AMD64_BU_CFG2: 2291 data = 0; 2292 break; 2293 case MSR_P6_PERFCTR0: 2294 case MSR_P6_PERFCTR1: 2295 case MSR_P6_EVNTSEL0: 2296 case MSR_P6_EVNTSEL1: 2297 if (kvm_pmu_msr(vcpu, msr)) 2298 return kvm_pmu_get_msr(vcpu, msr, pdata); 2299 data = 0; 2300 break; 2301 case MSR_IA32_UCODE_REV: 2302 data = 0x100000000ULL; 2303 break; 2304 case MSR_MTRRcap: 2305 data = 0x500 | KVM_NR_VAR_MTRR; 2306 break; 2307 case 0x200 ... 0x2ff: 2308 return get_msr_mtrr(vcpu, msr, pdata); 2309 case 0xcd: /* fsb frequency */ 2310 data = 3; 2311 break; 2312 /* 2313 * MSR_EBC_FREQUENCY_ID 2314 * Conservative value valid for even the basic CPU models. 2315 * Models 0,1: 000 in bits 23:21 indicating a bus speed of 2316 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz, 2317 * and 266MHz for model 3, or 4. Set Core Clock 2318 * Frequency to System Bus Frequency Ratio to 1 (bits 2319 * 31:24) even though these are only valid for CPU 2320 * models > 2, however guests may end up dividing or 2321 * multiplying by zero otherwise. 2322 */ 2323 case MSR_EBC_FREQUENCY_ID: 2324 data = 1 << 24; 2325 break; 2326 case MSR_IA32_APICBASE: 2327 data = kvm_get_apic_base(vcpu); 2328 break; 2329 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: 2330 return kvm_x2apic_msr_read(vcpu, msr, pdata); 2331 break; 2332 case MSR_IA32_TSCDEADLINE: 2333 data = kvm_get_lapic_tscdeadline_msr(vcpu); 2334 break; 2335 case MSR_IA32_TSC_ADJUST: 2336 data = (u64)vcpu->arch.ia32_tsc_adjust_msr; 2337 break; 2338 case MSR_IA32_MISC_ENABLE: 2339 data = vcpu->arch.ia32_misc_enable_msr; 2340 break; 2341 case MSR_IA32_PERF_STATUS: 2342 /* TSC increment by tick */ 2343 data = 1000ULL; 2344 /* CPU multiplier */ 2345 data |= (((uint64_t)4ULL) << 40); 2346 break; 2347 case MSR_EFER: 2348 data = vcpu->arch.efer; 2349 break; 2350 case MSR_KVM_WALL_CLOCK: 2351 case MSR_KVM_WALL_CLOCK_NEW: 2352 data = vcpu->kvm->arch.wall_clock; 2353 break; 2354 case MSR_KVM_SYSTEM_TIME: 2355 case MSR_KVM_SYSTEM_TIME_NEW: 2356 data = vcpu->arch.time; 2357 break; 2358 case MSR_KVM_ASYNC_PF_EN: 2359 data = vcpu->arch.apf.msr_val; 2360 break; 2361 case MSR_KVM_STEAL_TIME: 2362 data = vcpu->arch.st.msr_val; 2363 break; 2364 case MSR_KVM_PV_EOI_EN: 2365 data = vcpu->arch.pv_eoi.msr_val; 2366 break; 2367 case MSR_IA32_P5_MC_ADDR: 2368 case MSR_IA32_P5_MC_TYPE: 2369 case MSR_IA32_MCG_CAP: 2370 case MSR_IA32_MCG_CTL: 2371 case MSR_IA32_MCG_STATUS: 2372 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: 2373 return get_msr_mce(vcpu, msr, pdata); 2374 case MSR_K7_CLK_CTL: 2375 /* 2376 * Provide expected ramp-up count for K7. All other 2377 * are set to zero, indicating minimum divisors for 2378 * every field. 2379 * 2380 * This prevents guest kernels on AMD host with CPU 2381 * type 6, model 8 and higher from exploding due to 2382 * the rdmsr failing. 2383 */ 2384 data = 0x20000000; 2385 break; 2386 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 2387 if (kvm_hv_msr_partition_wide(msr)) { 2388 int r; 2389 mutex_lock(&vcpu->kvm->lock); 2390 r = get_msr_hyperv_pw(vcpu, msr, pdata); 2391 mutex_unlock(&vcpu->kvm->lock); 2392 return r; 2393 } else 2394 return get_msr_hyperv(vcpu, msr, pdata); 2395 break; 2396 case MSR_IA32_BBL_CR_CTL3: 2397 /* This legacy MSR exists but isn't fully documented in current 2398 * silicon. It is however accessed by winxp in very narrow 2399 * scenarios where it sets bit #19, itself documented as 2400 * a "reserved" bit. Best effort attempt to source coherent 2401 * read data here should the balance of the register be 2402 * interpreted by the guest: 2403 * 2404 * L2 cache control register 3: 64GB range, 256KB size, 2405 * enabled, latency 0x1, configured 2406 */ 2407 data = 0xbe702111; 2408 break; 2409 case MSR_AMD64_OSVW_ID_LENGTH: 2410 if (!guest_cpuid_has_osvw(vcpu)) 2411 return 1; 2412 data = vcpu->arch.osvw.length; 2413 break; 2414 case MSR_AMD64_OSVW_STATUS: 2415 if (!guest_cpuid_has_osvw(vcpu)) 2416 return 1; 2417 data = vcpu->arch.osvw.status; 2418 break; 2419 default: 2420 if (kvm_pmu_msr(vcpu, msr)) 2421 return kvm_pmu_get_msr(vcpu, msr, pdata); 2422 if (!ignore_msrs) { 2423 vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); 2424 return 1; 2425 } else { 2426 vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr); 2427 data = 0; 2428 } 2429 break; 2430 } 2431 *pdata = data; 2432 return 0; 2433 } 2434 EXPORT_SYMBOL_GPL(kvm_get_msr_common); 2435 2436 /* 2437 * Read or write a bunch of msrs. All parameters are kernel addresses. 2438 * 2439 * @return number of msrs set successfully. 2440 */ 2441 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, 2442 struct kvm_msr_entry *entries, 2443 int (*do_msr)(struct kvm_vcpu *vcpu, 2444 unsigned index, u64 *data)) 2445 { 2446 int i, idx; 2447 2448 idx = srcu_read_lock(&vcpu->kvm->srcu); 2449 for (i = 0; i < msrs->nmsrs; ++i) 2450 if (do_msr(vcpu, entries[i].index, &entries[i].data)) 2451 break; 2452 srcu_read_unlock(&vcpu->kvm->srcu, idx); 2453 2454 return i; 2455 } 2456 2457 /* 2458 * Read or write a bunch of msrs. Parameters are user addresses. 2459 * 2460 * @return number of msrs set successfully. 2461 */ 2462 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, 2463 int (*do_msr)(struct kvm_vcpu *vcpu, 2464 unsigned index, u64 *data), 2465 int writeback) 2466 { 2467 struct kvm_msrs msrs; 2468 struct kvm_msr_entry *entries; 2469 int r, n; 2470 unsigned size; 2471 2472 r = -EFAULT; 2473 if (copy_from_user(&msrs, user_msrs, sizeof msrs)) 2474 goto out; 2475 2476 r = -E2BIG; 2477 if (msrs.nmsrs >= MAX_IO_MSRS) 2478 goto out; 2479 2480 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; 2481 entries = memdup_user(user_msrs->entries, size); 2482 if (IS_ERR(entries)) { 2483 r = PTR_ERR(entries); 2484 goto out; 2485 } 2486 2487 r = n = __msr_io(vcpu, &msrs, entries, do_msr); 2488 if (r < 0) 2489 goto out_free; 2490 2491 r = -EFAULT; 2492 if (writeback && copy_to_user(user_msrs->entries, entries, size)) 2493 goto out_free; 2494 2495 r = n; 2496 2497 out_free: 2498 kfree(entries); 2499 out: 2500 return r; 2501 } 2502 2503 int kvm_dev_ioctl_check_extension(long ext) 2504 { 2505 int r; 2506 2507 switch (ext) { 2508 case KVM_CAP_IRQCHIP: 2509 case KVM_CAP_HLT: 2510 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: 2511 case KVM_CAP_SET_TSS_ADDR: 2512 case KVM_CAP_EXT_CPUID: 2513 case KVM_CAP_CLOCKSOURCE: 2514 case KVM_CAP_PIT: 2515 case KVM_CAP_NOP_IO_DELAY: 2516 case KVM_CAP_MP_STATE: 2517 case KVM_CAP_SYNC_MMU: 2518 case KVM_CAP_USER_NMI: 2519 case KVM_CAP_REINJECT_CONTROL: 2520 case KVM_CAP_IRQ_INJECT_STATUS: 2521 case KVM_CAP_IRQFD: 2522 case KVM_CAP_IOEVENTFD: 2523 case KVM_CAP_PIT2: 2524 case KVM_CAP_PIT_STATE2: 2525 case KVM_CAP_SET_IDENTITY_MAP_ADDR: 2526 case KVM_CAP_XEN_HVM: 2527 case KVM_CAP_ADJUST_CLOCK: 2528 case KVM_CAP_VCPU_EVENTS: 2529 case KVM_CAP_HYPERV: 2530 case KVM_CAP_HYPERV_VAPIC: 2531 case KVM_CAP_HYPERV_SPIN: 2532 case KVM_CAP_PCI_SEGMENT: 2533 case KVM_CAP_DEBUGREGS: 2534 case KVM_CAP_X86_ROBUST_SINGLESTEP: 2535 case KVM_CAP_XSAVE: 2536 case KVM_CAP_ASYNC_PF: 2537 case KVM_CAP_GET_TSC_KHZ: 2538 case KVM_CAP_KVMCLOCK_CTRL: 2539 case KVM_CAP_READONLY_MEM: 2540 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT 2541 case KVM_CAP_ASSIGN_DEV_IRQ: 2542 case KVM_CAP_PCI_2_3: 2543 #endif 2544 r = 1; 2545 break; 2546 case KVM_CAP_COALESCED_MMIO: 2547 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 2548 break; 2549 case KVM_CAP_VAPIC: 2550 r = !kvm_x86_ops->cpu_has_accelerated_tpr(); 2551 break; 2552 case KVM_CAP_NR_VCPUS: 2553 r = KVM_SOFT_MAX_VCPUS; 2554 break; 2555 case KVM_CAP_MAX_VCPUS: 2556 r = KVM_MAX_VCPUS; 2557 break; 2558 case KVM_CAP_NR_MEMSLOTS: 2559 r = KVM_USER_MEM_SLOTS; 2560 break; 2561 case KVM_CAP_PV_MMU: /* obsolete */ 2562 r = 0; 2563 break; 2564 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT 2565 case KVM_CAP_IOMMU: 2566 r = iommu_present(&pci_bus_type); 2567 break; 2568 #endif 2569 case KVM_CAP_MCE: 2570 r = KVM_MAX_MCE_BANKS; 2571 break; 2572 case KVM_CAP_XCRS: 2573 r = cpu_has_xsave; 2574 break; 2575 case KVM_CAP_TSC_CONTROL: 2576 r = kvm_has_tsc_control; 2577 break; 2578 case KVM_CAP_TSC_DEADLINE_TIMER: 2579 r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER); 2580 break; 2581 default: 2582 r = 0; 2583 break; 2584 } 2585 return r; 2586 2587 } 2588 2589 long kvm_arch_dev_ioctl(struct file *filp, 2590 unsigned int ioctl, unsigned long arg) 2591 { 2592 void __user *argp = (void __user *)arg; 2593 long r; 2594 2595 switch (ioctl) { 2596 case KVM_GET_MSR_INDEX_LIST: { 2597 struct kvm_msr_list __user *user_msr_list = argp; 2598 struct kvm_msr_list msr_list; 2599 unsigned n; 2600 2601 r = -EFAULT; 2602 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list)) 2603 goto out; 2604 n = msr_list.nmsrs; 2605 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs); 2606 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) 2607 goto out; 2608 r = -E2BIG; 2609 if (n < msr_list.nmsrs) 2610 goto out; 2611 r = -EFAULT; 2612 if (copy_to_user(user_msr_list->indices, &msrs_to_save, 2613 num_msrs_to_save * sizeof(u32))) 2614 goto out; 2615 if (copy_to_user(user_msr_list->indices + num_msrs_to_save, 2616 &emulated_msrs, 2617 ARRAY_SIZE(emulated_msrs) * sizeof(u32))) 2618 goto out; 2619 r = 0; 2620 break; 2621 } 2622 case KVM_GET_SUPPORTED_CPUID: { 2623 struct kvm_cpuid2 __user *cpuid_arg = argp; 2624 struct kvm_cpuid2 cpuid; 2625 2626 r = -EFAULT; 2627 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) 2628 goto out; 2629 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid, 2630 cpuid_arg->entries); 2631 if (r) 2632 goto out; 2633 2634 r = -EFAULT; 2635 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) 2636 goto out; 2637 r = 0; 2638 break; 2639 } 2640 case KVM_X86_GET_MCE_CAP_SUPPORTED: { 2641 u64 mce_cap; 2642 2643 mce_cap = KVM_MCE_CAP_SUPPORTED; 2644 r = -EFAULT; 2645 if (copy_to_user(argp, &mce_cap, sizeof mce_cap)) 2646 goto out; 2647 r = 0; 2648 break; 2649 } 2650 default: 2651 r = -EINVAL; 2652 } 2653 out: 2654 return r; 2655 } 2656 2657 static void wbinvd_ipi(void *garbage) 2658 { 2659 wbinvd(); 2660 } 2661 2662 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) 2663 { 2664 return vcpu->kvm->arch.iommu_domain && 2665 !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY); 2666 } 2667 2668 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 2669 { 2670 /* Address WBINVD may be executed by guest */ 2671 if (need_emulate_wbinvd(vcpu)) { 2672 if (kvm_x86_ops->has_wbinvd_exit()) 2673 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); 2674 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) 2675 smp_call_function_single(vcpu->cpu, 2676 wbinvd_ipi, NULL, 1); 2677 } 2678 2679 kvm_x86_ops->vcpu_load(vcpu, cpu); 2680 2681 /* Apply any externally detected TSC adjustments (due to suspend) */ 2682 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { 2683 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); 2684 vcpu->arch.tsc_offset_adjustment = 0; 2685 set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); 2686 } 2687 2688 if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { 2689 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : 2690 native_read_tsc() - vcpu->arch.last_host_tsc; 2691 if (tsc_delta < 0) 2692 mark_tsc_unstable("KVM discovered backwards TSC"); 2693 if (check_tsc_unstable()) { 2694 u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu, 2695 vcpu->arch.last_guest_tsc); 2696 kvm_x86_ops->write_tsc_offset(vcpu, offset); 2697 vcpu->arch.tsc_catchup = 1; 2698 } 2699 /* 2700 * On a host with synchronized TSC, there is no need to update 2701 * kvmclock on vcpu->cpu migration 2702 */ 2703 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) 2704 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 2705 if (vcpu->cpu != cpu) 2706 kvm_migrate_timers(vcpu); 2707 vcpu->cpu = cpu; 2708 } 2709 2710 accumulate_steal_time(vcpu); 2711 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); 2712 } 2713 2714 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 2715 { 2716 kvm_x86_ops->vcpu_put(vcpu); 2717 kvm_put_guest_fpu(vcpu); 2718 vcpu->arch.last_host_tsc = native_read_tsc(); 2719 } 2720 2721 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, 2722 struct kvm_lapic_state *s) 2723 { 2724 kvm_x86_ops->sync_pir_to_irr(vcpu); 2725 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s); 2726 2727 return 0; 2728 } 2729 2730 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, 2731 struct kvm_lapic_state *s) 2732 { 2733 kvm_apic_post_state_restore(vcpu, s); 2734 update_cr8_intercept(vcpu); 2735 2736 return 0; 2737 } 2738 2739 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, 2740 struct kvm_interrupt *irq) 2741 { 2742 if (irq->irq >= KVM_NR_INTERRUPTS) 2743 return -EINVAL; 2744 if (irqchip_in_kernel(vcpu->kvm)) 2745 return -ENXIO; 2746 2747 kvm_queue_interrupt(vcpu, irq->irq, false); 2748 kvm_make_request(KVM_REQ_EVENT, vcpu); 2749 2750 return 0; 2751 } 2752 2753 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) 2754 { 2755 kvm_inject_nmi(vcpu); 2756 2757 return 0; 2758 } 2759 2760 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, 2761 struct kvm_tpr_access_ctl *tac) 2762 { 2763 if (tac->flags) 2764 return -EINVAL; 2765 vcpu->arch.tpr_access_reporting = !!tac->enabled; 2766 return 0; 2767 } 2768 2769 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, 2770 u64 mcg_cap) 2771 { 2772 int r; 2773 unsigned bank_num = mcg_cap & 0xff, bank; 2774 2775 r = -EINVAL; 2776 if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS) 2777 goto out; 2778 if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000)) 2779 goto out; 2780 r = 0; 2781 vcpu->arch.mcg_cap = mcg_cap; 2782 /* Init IA32_MCG_CTL to all 1s */ 2783 if (mcg_cap & MCG_CTL_P) 2784 vcpu->arch.mcg_ctl = ~(u64)0; 2785 /* Init IA32_MCi_CTL to all 1s */ 2786 for (bank = 0; bank < bank_num; bank++) 2787 vcpu->arch.mce_banks[bank*4] = ~(u64)0; 2788 out: 2789 return r; 2790 } 2791 2792 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, 2793 struct kvm_x86_mce *mce) 2794 { 2795 u64 mcg_cap = vcpu->arch.mcg_cap; 2796 unsigned bank_num = mcg_cap & 0xff; 2797 u64 *banks = vcpu->arch.mce_banks; 2798 2799 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) 2800 return -EINVAL; 2801 /* 2802 * if IA32_MCG_CTL is not all 1s, the uncorrected error 2803 * reporting is disabled 2804 */ 2805 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && 2806 vcpu->arch.mcg_ctl != ~(u64)0) 2807 return 0; 2808 banks += 4 * mce->bank; 2809 /* 2810 * if IA32_MCi_CTL is not all 1s, the uncorrected error 2811 * reporting is disabled for the bank 2812 */ 2813 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) 2814 return 0; 2815 if (mce->status & MCI_STATUS_UC) { 2816 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || 2817 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { 2818 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 2819 return 0; 2820 } 2821 if (banks[1] & MCI_STATUS_VAL) 2822 mce->status |= MCI_STATUS_OVER; 2823 banks[2] = mce->addr; 2824 banks[3] = mce->misc; 2825 vcpu->arch.mcg_status = mce->mcg_status; 2826 banks[1] = mce->status; 2827 kvm_queue_exception(vcpu, MC_VECTOR); 2828 } else if (!(banks[1] & MCI_STATUS_VAL) 2829 || !(banks[1] & MCI_STATUS_UC)) { 2830 if (banks[1] & MCI_STATUS_VAL) 2831 mce->status |= MCI_STATUS_OVER; 2832 banks[2] = mce->addr; 2833 banks[3] = mce->misc; 2834 banks[1] = mce->status; 2835 } else 2836 banks[1] |= MCI_STATUS_OVER; 2837 return 0; 2838 } 2839 2840 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, 2841 struct kvm_vcpu_events *events) 2842 { 2843 process_nmi(vcpu); 2844 events->exception.injected = 2845 vcpu->arch.exception.pending && 2846 !kvm_exception_is_soft(vcpu->arch.exception.nr); 2847 events->exception.nr = vcpu->arch.exception.nr; 2848 events->exception.has_error_code = vcpu->arch.exception.has_error_code; 2849 events->exception.pad = 0; 2850 events->exception.error_code = vcpu->arch.exception.error_code; 2851 2852 events->interrupt.injected = 2853 vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; 2854 events->interrupt.nr = vcpu->arch.interrupt.nr; 2855 events->interrupt.soft = 0; 2856 events->interrupt.shadow = 2857 kvm_x86_ops->get_interrupt_shadow(vcpu, 2858 KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI); 2859 2860 events->nmi.injected = vcpu->arch.nmi_injected; 2861 events->nmi.pending = vcpu->arch.nmi_pending != 0; 2862 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); 2863 events->nmi.pad = 0; 2864 2865 events->sipi_vector = 0; /* never valid when reporting to user space */ 2866 2867 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING 2868 | KVM_VCPUEVENT_VALID_SHADOW); 2869 memset(&events->reserved, 0, sizeof(events->reserved)); 2870 } 2871 2872 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, 2873 struct kvm_vcpu_events *events) 2874 { 2875 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING 2876 | KVM_VCPUEVENT_VALID_SIPI_VECTOR 2877 | KVM_VCPUEVENT_VALID_SHADOW)) 2878 return -EINVAL; 2879 2880 process_nmi(vcpu); 2881 vcpu->arch.exception.pending = events->exception.injected; 2882 vcpu->arch.exception.nr = events->exception.nr; 2883 vcpu->arch.exception.has_error_code = events->exception.has_error_code; 2884 vcpu->arch.exception.error_code = events->exception.error_code; 2885 2886 vcpu->arch.interrupt.pending = events->interrupt.injected; 2887 vcpu->arch.interrupt.nr = events->interrupt.nr; 2888 vcpu->arch.interrupt.soft = events->interrupt.soft; 2889 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) 2890 kvm_x86_ops->set_interrupt_shadow(vcpu, 2891 events->interrupt.shadow); 2892 2893 vcpu->arch.nmi_injected = events->nmi.injected; 2894 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) 2895 vcpu->arch.nmi_pending = events->nmi.pending; 2896 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); 2897 2898 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && 2899 kvm_vcpu_has_lapic(vcpu)) 2900 vcpu->arch.apic->sipi_vector = events->sipi_vector; 2901 2902 kvm_make_request(KVM_REQ_EVENT, vcpu); 2903 2904 return 0; 2905 } 2906 2907 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, 2908 struct kvm_debugregs *dbgregs) 2909 { 2910 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); 2911 dbgregs->dr6 = vcpu->arch.dr6; 2912 dbgregs->dr7 = vcpu->arch.dr7; 2913 dbgregs->flags = 0; 2914 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); 2915 } 2916 2917 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, 2918 struct kvm_debugregs *dbgregs) 2919 { 2920 if (dbgregs->flags) 2921 return -EINVAL; 2922 2923 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); 2924 vcpu->arch.dr6 = dbgregs->dr6; 2925 vcpu->arch.dr7 = dbgregs->dr7; 2926 2927 return 0; 2928 } 2929 2930 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, 2931 struct kvm_xsave *guest_xsave) 2932 { 2933 if (cpu_has_xsave) 2934 memcpy(guest_xsave->region, 2935 &vcpu->arch.guest_fpu.state->xsave, 2936 xstate_size); 2937 else { 2938 memcpy(guest_xsave->region, 2939 &vcpu->arch.guest_fpu.state->fxsave, 2940 sizeof(struct i387_fxsave_struct)); 2941 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] = 2942 XSTATE_FPSSE; 2943 } 2944 } 2945 2946 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, 2947 struct kvm_xsave *guest_xsave) 2948 { 2949 u64 xstate_bv = 2950 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; 2951 2952 if (cpu_has_xsave) 2953 memcpy(&vcpu->arch.guest_fpu.state->xsave, 2954 guest_xsave->region, xstate_size); 2955 else { 2956 if (xstate_bv & ~XSTATE_FPSSE) 2957 return -EINVAL; 2958 memcpy(&vcpu->arch.guest_fpu.state->fxsave, 2959 guest_xsave->region, sizeof(struct i387_fxsave_struct)); 2960 } 2961 return 0; 2962 } 2963 2964 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, 2965 struct kvm_xcrs *guest_xcrs) 2966 { 2967 if (!cpu_has_xsave) { 2968 guest_xcrs->nr_xcrs = 0; 2969 return; 2970 } 2971 2972 guest_xcrs->nr_xcrs = 1; 2973 guest_xcrs->flags = 0; 2974 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; 2975 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; 2976 } 2977 2978 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, 2979 struct kvm_xcrs *guest_xcrs) 2980 { 2981 int i, r = 0; 2982 2983 if (!cpu_has_xsave) 2984 return -EINVAL; 2985 2986 if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) 2987 return -EINVAL; 2988 2989 for (i = 0; i < guest_xcrs->nr_xcrs; i++) 2990 /* Only support XCR0 currently */ 2991 if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) { 2992 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, 2993 guest_xcrs->xcrs[0].value); 2994 break; 2995 } 2996 if (r) 2997 r = -EINVAL; 2998 return r; 2999 } 3000 3001 /* 3002 * kvm_set_guest_paused() indicates to the guest kernel that it has been 3003 * stopped by the hypervisor. This function will be called from the host only. 3004 * EINVAL is returned when the host attempts to set the flag for a guest that 3005 * does not support pv clocks. 3006 */ 3007 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) 3008 { 3009 if (!vcpu->arch.pv_time_enabled) 3010 return -EINVAL; 3011 vcpu->arch.pvclock_set_guest_stopped_request = true; 3012 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 3013 return 0; 3014 } 3015 3016 long kvm_arch_vcpu_ioctl(struct file *filp, 3017 unsigned int ioctl, unsigned long arg) 3018 { 3019 struct kvm_vcpu *vcpu = filp->private_data; 3020 void __user *argp = (void __user *)arg; 3021 int r; 3022 union { 3023 struct kvm_lapic_state *lapic; 3024 struct kvm_xsave *xsave; 3025 struct kvm_xcrs *xcrs; 3026 void *buffer; 3027 } u; 3028 3029 u.buffer = NULL; 3030 switch (ioctl) { 3031 case KVM_GET_LAPIC: { 3032 r = -EINVAL; 3033 if (!vcpu->arch.apic) 3034 goto out; 3035 u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); 3036 3037 r = -ENOMEM; 3038 if (!u.lapic) 3039 goto out; 3040 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); 3041 if (r) 3042 goto out; 3043 r = -EFAULT; 3044 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state))) 3045 goto out; 3046 r = 0; 3047 break; 3048 } 3049 case KVM_SET_LAPIC: { 3050 r = -EINVAL; 3051 if (!vcpu->arch.apic) 3052 goto out; 3053 u.lapic = memdup_user(argp, sizeof(*u.lapic)); 3054 if (IS_ERR(u.lapic)) 3055 return PTR_ERR(u.lapic); 3056 3057 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); 3058 break; 3059 } 3060 case KVM_INTERRUPT: { 3061 struct kvm_interrupt irq; 3062 3063 r = -EFAULT; 3064 if (copy_from_user(&irq, argp, sizeof irq)) 3065 goto out; 3066 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 3067 break; 3068 } 3069 case KVM_NMI: { 3070 r = kvm_vcpu_ioctl_nmi(vcpu); 3071 break; 3072 } 3073 case KVM_SET_CPUID: { 3074 struct kvm_cpuid __user *cpuid_arg = argp; 3075 struct kvm_cpuid cpuid; 3076 3077 r = -EFAULT; 3078 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) 3079 goto out; 3080 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); 3081 break; 3082 } 3083 case KVM_SET_CPUID2: { 3084 struct kvm_cpuid2 __user *cpuid_arg = argp; 3085 struct kvm_cpuid2 cpuid; 3086 3087 r = -EFAULT; 3088 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) 3089 goto out; 3090 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, 3091 cpuid_arg->entries); 3092 break; 3093 } 3094 case KVM_GET_CPUID2: { 3095 struct kvm_cpuid2 __user *cpuid_arg = argp; 3096 struct kvm_cpuid2 cpuid; 3097 3098 r = -EFAULT; 3099 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) 3100 goto out; 3101 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, 3102 cpuid_arg->entries); 3103 if (r) 3104 goto out; 3105 r = -EFAULT; 3106 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) 3107 goto out; 3108 r = 0; 3109 break; 3110 } 3111 case KVM_GET_MSRS: 3112 r = msr_io(vcpu, argp, kvm_get_msr, 1); 3113 break; 3114 case KVM_SET_MSRS: 3115 r = msr_io(vcpu, argp, do_set_msr, 0); 3116 break; 3117 case KVM_TPR_ACCESS_REPORTING: { 3118 struct kvm_tpr_access_ctl tac; 3119 3120 r = -EFAULT; 3121 if (copy_from_user(&tac, argp, sizeof tac)) 3122 goto out; 3123 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); 3124 if (r) 3125 goto out; 3126 r = -EFAULT; 3127 if (copy_to_user(argp, &tac, sizeof tac)) 3128 goto out; 3129 r = 0; 3130 break; 3131 }; 3132 case KVM_SET_VAPIC_ADDR: { 3133 struct kvm_vapic_addr va; 3134 3135 r = -EINVAL; 3136 if (!irqchip_in_kernel(vcpu->kvm)) 3137 goto out; 3138 r = -EFAULT; 3139 if (copy_from_user(&va, argp, sizeof va)) 3140 goto out; 3141 r = 0; 3142 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); 3143 break; 3144 } 3145 case KVM_X86_SETUP_MCE: { 3146 u64 mcg_cap; 3147 3148 r = -EFAULT; 3149 if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap)) 3150 goto out; 3151 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); 3152 break; 3153 } 3154 case KVM_X86_SET_MCE: { 3155 struct kvm_x86_mce mce; 3156 3157 r = -EFAULT; 3158 if (copy_from_user(&mce, argp, sizeof mce)) 3159 goto out; 3160 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); 3161 break; 3162 } 3163 case KVM_GET_VCPU_EVENTS: { 3164 struct kvm_vcpu_events events; 3165 3166 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); 3167 3168 r = -EFAULT; 3169 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events))) 3170 break; 3171 r = 0; 3172 break; 3173 } 3174 case KVM_SET_VCPU_EVENTS: { 3175 struct kvm_vcpu_events events; 3176 3177 r = -EFAULT; 3178 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events))) 3179 break; 3180 3181 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); 3182 break; 3183 } 3184 case KVM_GET_DEBUGREGS: { 3185 struct kvm_debugregs dbgregs; 3186 3187 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); 3188 3189 r = -EFAULT; 3190 if (copy_to_user(argp, &dbgregs, 3191 sizeof(struct kvm_debugregs))) 3192 break; 3193 r = 0; 3194 break; 3195 } 3196 case KVM_SET_DEBUGREGS: { 3197 struct kvm_debugregs dbgregs; 3198 3199 r = -EFAULT; 3200 if (copy_from_user(&dbgregs, argp, 3201 sizeof(struct kvm_debugregs))) 3202 break; 3203 3204 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); 3205 break; 3206 } 3207 case KVM_GET_XSAVE: { 3208 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL); 3209 r = -ENOMEM; 3210 if (!u.xsave) 3211 break; 3212 3213 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); 3214 3215 r = -EFAULT; 3216 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave))) 3217 break; 3218 r = 0; 3219 break; 3220 } 3221 case KVM_SET_XSAVE: { 3222 u.xsave = memdup_user(argp, sizeof(*u.xsave)); 3223 if (IS_ERR(u.xsave)) 3224 return PTR_ERR(u.xsave); 3225 3226 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); 3227 break; 3228 } 3229 case KVM_GET_XCRS: { 3230 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL); 3231 r = -ENOMEM; 3232 if (!u.xcrs) 3233 break; 3234 3235 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); 3236 3237 r = -EFAULT; 3238 if (copy_to_user(argp, u.xcrs, 3239 sizeof(struct kvm_xcrs))) 3240 break; 3241 r = 0; 3242 break; 3243 } 3244 case KVM_SET_XCRS: { 3245 u.xcrs = memdup_user(argp, sizeof(*u.xcrs)); 3246 if (IS_ERR(u.xcrs)) 3247 return PTR_ERR(u.xcrs); 3248 3249 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); 3250 break; 3251 } 3252 case KVM_SET_TSC_KHZ: { 3253 u32 user_tsc_khz; 3254 3255 r = -EINVAL; 3256 user_tsc_khz = (u32)arg; 3257 3258 if (user_tsc_khz >= kvm_max_guest_tsc_khz) 3259 goto out; 3260 3261 if (user_tsc_khz == 0) 3262 user_tsc_khz = tsc_khz; 3263 3264 kvm_set_tsc_khz(vcpu, user_tsc_khz); 3265 3266 r = 0; 3267 goto out; 3268 } 3269 case KVM_GET_TSC_KHZ: { 3270 r = vcpu->arch.virtual_tsc_khz; 3271 goto out; 3272 } 3273 case KVM_KVMCLOCK_CTRL: { 3274 r = kvm_set_guest_paused(vcpu); 3275 goto out; 3276 } 3277 default: 3278 r = -EINVAL; 3279 } 3280 out: 3281 kfree(u.buffer); 3282 return r; 3283 } 3284 3285 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 3286 { 3287 return VM_FAULT_SIGBUS; 3288 } 3289 3290 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) 3291 { 3292 int ret; 3293 3294 if (addr > (unsigned int)(-3 * PAGE_SIZE)) 3295 return -EINVAL; 3296 ret = kvm_x86_ops->set_tss_addr(kvm, addr); 3297 return ret; 3298 } 3299 3300 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, 3301 u64 ident_addr) 3302 { 3303 kvm->arch.ept_identity_map_addr = ident_addr; 3304 return 0; 3305 } 3306 3307 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, 3308 u32 kvm_nr_mmu_pages) 3309 { 3310 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) 3311 return -EINVAL; 3312 3313 mutex_lock(&kvm->slots_lock); 3314 3315 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); 3316 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; 3317 3318 mutex_unlock(&kvm->slots_lock); 3319 return 0; 3320 } 3321 3322 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) 3323 { 3324 return kvm->arch.n_max_mmu_pages; 3325 } 3326 3327 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) 3328 { 3329 int r; 3330 3331 r = 0; 3332 switch (chip->chip_id) { 3333 case KVM_IRQCHIP_PIC_MASTER: 3334 memcpy(&chip->chip.pic, 3335 &pic_irqchip(kvm)->pics[0], 3336 sizeof(struct kvm_pic_state)); 3337 break; 3338 case KVM_IRQCHIP_PIC_SLAVE: 3339 memcpy(&chip->chip.pic, 3340 &pic_irqchip(kvm)->pics[1], 3341 sizeof(struct kvm_pic_state)); 3342 break; 3343 case KVM_IRQCHIP_IOAPIC: 3344 r = kvm_get_ioapic(kvm, &chip->chip.ioapic); 3345 break; 3346 default: 3347 r = -EINVAL; 3348 break; 3349 } 3350 return r; 3351 } 3352 3353 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) 3354 { 3355 int r; 3356 3357 r = 0; 3358 switch (chip->chip_id) { 3359 case KVM_IRQCHIP_PIC_MASTER: 3360 spin_lock(&pic_irqchip(kvm)->lock); 3361 memcpy(&pic_irqchip(kvm)->pics[0], 3362 &chip->chip.pic, 3363 sizeof(struct kvm_pic_state)); 3364 spin_unlock(&pic_irqchip(kvm)->lock); 3365 break; 3366 case KVM_IRQCHIP_PIC_SLAVE: 3367 spin_lock(&pic_irqchip(kvm)->lock); 3368 memcpy(&pic_irqchip(kvm)->pics[1], 3369 &chip->chip.pic, 3370 sizeof(struct kvm_pic_state)); 3371 spin_unlock(&pic_irqchip(kvm)->lock); 3372 break; 3373 case KVM_IRQCHIP_IOAPIC: 3374 r = kvm_set_ioapic(kvm, &chip->chip.ioapic); 3375 break; 3376 default: 3377 r = -EINVAL; 3378 break; 3379 } 3380 kvm_pic_update_irq(pic_irqchip(kvm)); 3381 return r; 3382 } 3383 3384 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps) 3385 { 3386 int r = 0; 3387 3388 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3389 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state)); 3390 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3391 return r; 3392 } 3393 3394 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) 3395 { 3396 int r = 0; 3397 3398 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3399 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); 3400 kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0); 3401 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3402 return r; 3403 } 3404 3405 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 3406 { 3407 int r = 0; 3408 3409 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3410 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, 3411 sizeof(ps->channels)); 3412 ps->flags = kvm->arch.vpit->pit_state.flags; 3413 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3414 memset(&ps->reserved, 0, sizeof(ps->reserved)); 3415 return r; 3416 } 3417 3418 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 3419 { 3420 int r = 0, start = 0; 3421 u32 prev_legacy, cur_legacy; 3422 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3423 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; 3424 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; 3425 if (!prev_legacy && cur_legacy) 3426 start = 1; 3427 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, 3428 sizeof(kvm->arch.vpit->pit_state.channels)); 3429 kvm->arch.vpit->pit_state.flags = ps->flags; 3430 kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start); 3431 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3432 return r; 3433 } 3434 3435 static int kvm_vm_ioctl_reinject(struct kvm *kvm, 3436 struct kvm_reinject_control *control) 3437 { 3438 if (!kvm->arch.vpit) 3439 return -ENXIO; 3440 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3441 kvm->arch.vpit->pit_state.reinject = control->pit_reinject; 3442 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3443 return 0; 3444 } 3445 3446 /** 3447 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot 3448 * @kvm: kvm instance 3449 * @log: slot id and address to which we copy the log 3450 * 3451 * We need to keep it in mind that VCPU threads can write to the bitmap 3452 * concurrently. So, to avoid losing data, we keep the following order for 3453 * each bit: 3454 * 3455 * 1. Take a snapshot of the bit and clear it if needed. 3456 * 2. Write protect the corresponding page. 3457 * 3. Flush TLB's if needed. 3458 * 4. Copy the snapshot to the userspace. 3459 * 3460 * Between 2 and 3, the guest may write to the page using the remaining TLB 3461 * entry. This is not a problem because the page will be reported dirty at 3462 * step 4 using the snapshot taken before and step 3 ensures that successive 3463 * writes will be logged for the next call. 3464 */ 3465 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 3466 { 3467 int r; 3468 struct kvm_memory_slot *memslot; 3469 unsigned long n, i; 3470 unsigned long *dirty_bitmap; 3471 unsigned long *dirty_bitmap_buffer; 3472 bool is_dirty = false; 3473 3474 mutex_lock(&kvm->slots_lock); 3475 3476 r = -EINVAL; 3477 if (log->slot >= KVM_USER_MEM_SLOTS) 3478 goto out; 3479 3480 memslot = id_to_memslot(kvm->memslots, log->slot); 3481 3482 dirty_bitmap = memslot->dirty_bitmap; 3483 r = -ENOENT; 3484 if (!dirty_bitmap) 3485 goto out; 3486 3487 n = kvm_dirty_bitmap_bytes(memslot); 3488 3489 dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); 3490 memset(dirty_bitmap_buffer, 0, n); 3491 3492 spin_lock(&kvm->mmu_lock); 3493 3494 for (i = 0; i < n / sizeof(long); i++) { 3495 unsigned long mask; 3496 gfn_t offset; 3497 3498 if (!dirty_bitmap[i]) 3499 continue; 3500 3501 is_dirty = true; 3502 3503 mask = xchg(&dirty_bitmap[i], 0); 3504 dirty_bitmap_buffer[i] = mask; 3505 3506 offset = i * BITS_PER_LONG; 3507 kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask); 3508 } 3509 if (is_dirty) 3510 kvm_flush_remote_tlbs(kvm); 3511 3512 spin_unlock(&kvm->mmu_lock); 3513 3514 r = -EFAULT; 3515 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) 3516 goto out; 3517 3518 r = 0; 3519 out: 3520 mutex_unlock(&kvm->slots_lock); 3521 return r; 3522 } 3523 3524 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, 3525 bool line_status) 3526 { 3527 if (!irqchip_in_kernel(kvm)) 3528 return -ENXIO; 3529 3530 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 3531 irq_event->irq, irq_event->level, 3532 line_status); 3533 return 0; 3534 } 3535 3536 long kvm_arch_vm_ioctl(struct file *filp, 3537 unsigned int ioctl, unsigned long arg) 3538 { 3539 struct kvm *kvm = filp->private_data; 3540 void __user *argp = (void __user *)arg; 3541 int r = -ENOTTY; 3542 /* 3543 * This union makes it completely explicit to gcc-3.x 3544 * that these two variables' stack usage should be 3545 * combined, not added together. 3546 */ 3547 union { 3548 struct kvm_pit_state ps; 3549 struct kvm_pit_state2 ps2; 3550 struct kvm_pit_config pit_config; 3551 } u; 3552 3553 switch (ioctl) { 3554 case KVM_SET_TSS_ADDR: 3555 r = kvm_vm_ioctl_set_tss_addr(kvm, arg); 3556 break; 3557 case KVM_SET_IDENTITY_MAP_ADDR: { 3558 u64 ident_addr; 3559 3560 r = -EFAULT; 3561 if (copy_from_user(&ident_addr, argp, sizeof ident_addr)) 3562 goto out; 3563 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); 3564 break; 3565 } 3566 case KVM_SET_NR_MMU_PAGES: 3567 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); 3568 break; 3569 case KVM_GET_NR_MMU_PAGES: 3570 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); 3571 break; 3572 case KVM_CREATE_IRQCHIP: { 3573 struct kvm_pic *vpic; 3574 3575 mutex_lock(&kvm->lock); 3576 r = -EEXIST; 3577 if (kvm->arch.vpic) 3578 goto create_irqchip_unlock; 3579 r = -EINVAL; 3580 if (atomic_read(&kvm->online_vcpus)) 3581 goto create_irqchip_unlock; 3582 r = -ENOMEM; 3583 vpic = kvm_create_pic(kvm); 3584 if (vpic) { 3585 r = kvm_ioapic_init(kvm); 3586 if (r) { 3587 mutex_lock(&kvm->slots_lock); 3588 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, 3589 &vpic->dev_master); 3590 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, 3591 &vpic->dev_slave); 3592 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, 3593 &vpic->dev_eclr); 3594 mutex_unlock(&kvm->slots_lock); 3595 kfree(vpic); 3596 goto create_irqchip_unlock; 3597 } 3598 } else 3599 goto create_irqchip_unlock; 3600 smp_wmb(); 3601 kvm->arch.vpic = vpic; 3602 smp_wmb(); 3603 r = kvm_setup_default_irq_routing(kvm); 3604 if (r) { 3605 mutex_lock(&kvm->slots_lock); 3606 mutex_lock(&kvm->irq_lock); 3607 kvm_ioapic_destroy(kvm); 3608 kvm_destroy_pic(kvm); 3609 mutex_unlock(&kvm->irq_lock); 3610 mutex_unlock(&kvm->slots_lock); 3611 } 3612 create_irqchip_unlock: 3613 mutex_unlock(&kvm->lock); 3614 break; 3615 } 3616 case KVM_CREATE_PIT: 3617 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY; 3618 goto create_pit; 3619 case KVM_CREATE_PIT2: 3620 r = -EFAULT; 3621 if (copy_from_user(&u.pit_config, argp, 3622 sizeof(struct kvm_pit_config))) 3623 goto out; 3624 create_pit: 3625 mutex_lock(&kvm->slots_lock); 3626 r = -EEXIST; 3627 if (kvm->arch.vpit) 3628 goto create_pit_unlock; 3629 r = -ENOMEM; 3630 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); 3631 if (kvm->arch.vpit) 3632 r = 0; 3633 create_pit_unlock: 3634 mutex_unlock(&kvm->slots_lock); 3635 break; 3636 case KVM_GET_IRQCHIP: { 3637 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ 3638 struct kvm_irqchip *chip; 3639 3640 chip = memdup_user(argp, sizeof(*chip)); 3641 if (IS_ERR(chip)) { 3642 r = PTR_ERR(chip); 3643 goto out; 3644 } 3645 3646 r = -ENXIO; 3647 if (!irqchip_in_kernel(kvm)) 3648 goto get_irqchip_out; 3649 r = kvm_vm_ioctl_get_irqchip(kvm, chip); 3650 if (r) 3651 goto get_irqchip_out; 3652 r = -EFAULT; 3653 if (copy_to_user(argp, chip, sizeof *chip)) 3654 goto get_irqchip_out; 3655 r = 0; 3656 get_irqchip_out: 3657 kfree(chip); 3658 break; 3659 } 3660 case KVM_SET_IRQCHIP: { 3661 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ 3662 struct kvm_irqchip *chip; 3663 3664 chip = memdup_user(argp, sizeof(*chip)); 3665 if (IS_ERR(chip)) { 3666 r = PTR_ERR(chip); 3667 goto out; 3668 } 3669 3670 r = -ENXIO; 3671 if (!irqchip_in_kernel(kvm)) 3672 goto set_irqchip_out; 3673 r = kvm_vm_ioctl_set_irqchip(kvm, chip); 3674 if (r) 3675 goto set_irqchip_out; 3676 r = 0; 3677 set_irqchip_out: 3678 kfree(chip); 3679 break; 3680 } 3681 case KVM_GET_PIT: { 3682 r = -EFAULT; 3683 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state))) 3684 goto out; 3685 r = -ENXIO; 3686 if (!kvm->arch.vpit) 3687 goto out; 3688 r = kvm_vm_ioctl_get_pit(kvm, &u.ps); 3689 if (r) 3690 goto out; 3691 r = -EFAULT; 3692 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state))) 3693 goto out; 3694 r = 0; 3695 break; 3696 } 3697 case KVM_SET_PIT: { 3698 r = -EFAULT; 3699 if (copy_from_user(&u.ps, argp, sizeof u.ps)) 3700 goto out; 3701 r = -ENXIO; 3702 if (!kvm->arch.vpit) 3703 goto out; 3704 r = kvm_vm_ioctl_set_pit(kvm, &u.ps); 3705 break; 3706 } 3707 case KVM_GET_PIT2: { 3708 r = -ENXIO; 3709 if (!kvm->arch.vpit) 3710 goto out; 3711 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2); 3712 if (r) 3713 goto out; 3714 r = -EFAULT; 3715 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2))) 3716 goto out; 3717 r = 0; 3718 break; 3719 } 3720 case KVM_SET_PIT2: { 3721 r = -EFAULT; 3722 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) 3723 goto out; 3724 r = -ENXIO; 3725 if (!kvm->arch.vpit) 3726 goto out; 3727 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); 3728 break; 3729 } 3730 case KVM_REINJECT_CONTROL: { 3731 struct kvm_reinject_control control; 3732 r = -EFAULT; 3733 if (copy_from_user(&control, argp, sizeof(control))) 3734 goto out; 3735 r = kvm_vm_ioctl_reinject(kvm, &control); 3736 break; 3737 } 3738 case KVM_XEN_HVM_CONFIG: { 3739 r = -EFAULT; 3740 if (copy_from_user(&kvm->arch.xen_hvm_config, argp, 3741 sizeof(struct kvm_xen_hvm_config))) 3742 goto out; 3743 r = -EINVAL; 3744 if (kvm->arch.xen_hvm_config.flags) 3745 goto out; 3746 r = 0; 3747 break; 3748 } 3749 case KVM_SET_CLOCK: { 3750 struct kvm_clock_data user_ns; 3751 u64 now_ns; 3752 s64 delta; 3753 3754 r = -EFAULT; 3755 if (copy_from_user(&user_ns, argp, sizeof(user_ns))) 3756 goto out; 3757 3758 r = -EINVAL; 3759 if (user_ns.flags) 3760 goto out; 3761 3762 r = 0; 3763 local_irq_disable(); 3764 now_ns = get_kernel_ns(); 3765 delta = user_ns.clock - now_ns; 3766 local_irq_enable(); 3767 kvm->arch.kvmclock_offset = delta; 3768 break; 3769 } 3770 case KVM_GET_CLOCK: { 3771 struct kvm_clock_data user_ns; 3772 u64 now_ns; 3773 3774 local_irq_disable(); 3775 now_ns = get_kernel_ns(); 3776 user_ns.clock = kvm->arch.kvmclock_offset + now_ns; 3777 local_irq_enable(); 3778 user_ns.flags = 0; 3779 memset(&user_ns.pad, 0, sizeof(user_ns.pad)); 3780 3781 r = -EFAULT; 3782 if (copy_to_user(argp, &user_ns, sizeof(user_ns))) 3783 goto out; 3784 r = 0; 3785 break; 3786 } 3787 3788 default: 3789 ; 3790 } 3791 out: 3792 return r; 3793 } 3794 3795 static void kvm_init_msr_list(void) 3796 { 3797 u32 dummy[2]; 3798 unsigned i, j; 3799 3800 /* skip the first msrs in the list. KVM-specific */ 3801 for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) { 3802 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0) 3803 continue; 3804 if (j < i) 3805 msrs_to_save[j] = msrs_to_save[i]; 3806 j++; 3807 } 3808 num_msrs_to_save = j; 3809 } 3810 3811 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, 3812 const void *v) 3813 { 3814 int handled = 0; 3815 int n; 3816 3817 do { 3818 n = min(len, 8); 3819 if (!(vcpu->arch.apic && 3820 !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v)) 3821 && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) 3822 break; 3823 handled += n; 3824 addr += n; 3825 len -= n; 3826 v += n; 3827 } while (len); 3828 3829 return handled; 3830 } 3831 3832 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) 3833 { 3834 int handled = 0; 3835 int n; 3836 3837 do { 3838 n = min(len, 8); 3839 if (!(vcpu->arch.apic && 3840 !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v)) 3841 && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) 3842 break; 3843 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v); 3844 handled += n; 3845 addr += n; 3846 len -= n; 3847 v += n; 3848 } while (len); 3849 3850 return handled; 3851 } 3852 3853 static void kvm_set_segment(struct kvm_vcpu *vcpu, 3854 struct kvm_segment *var, int seg) 3855 { 3856 kvm_x86_ops->set_segment(vcpu, var, seg); 3857 } 3858 3859 void kvm_get_segment(struct kvm_vcpu *vcpu, 3860 struct kvm_segment *var, int seg) 3861 { 3862 kvm_x86_ops->get_segment(vcpu, var, seg); 3863 } 3864 3865 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) 3866 { 3867 gpa_t t_gpa; 3868 struct x86_exception exception; 3869 3870 BUG_ON(!mmu_is_nested(vcpu)); 3871 3872 /* NPT walks are always user-walks */ 3873 access |= PFERR_USER_MASK; 3874 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception); 3875 3876 return t_gpa; 3877 } 3878 3879 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, 3880 struct x86_exception *exception) 3881 { 3882 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3883 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); 3884 } 3885 3886 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, 3887 struct x86_exception *exception) 3888 { 3889 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3890 access |= PFERR_FETCH_MASK; 3891 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); 3892 } 3893 3894 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, 3895 struct x86_exception *exception) 3896 { 3897 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3898 access |= PFERR_WRITE_MASK; 3899 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); 3900 } 3901 3902 /* uses this to access any guest's mapped memory without checking CPL */ 3903 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, 3904 struct x86_exception *exception) 3905 { 3906 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); 3907 } 3908 3909 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, 3910 struct kvm_vcpu *vcpu, u32 access, 3911 struct x86_exception *exception) 3912 { 3913 void *data = val; 3914 int r = X86EMUL_CONTINUE; 3915 3916 while (bytes) { 3917 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, 3918 exception); 3919 unsigned offset = addr & (PAGE_SIZE-1); 3920 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); 3921 int ret; 3922 3923 if (gpa == UNMAPPED_GVA) 3924 return X86EMUL_PROPAGATE_FAULT; 3925 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread); 3926 if (ret < 0) { 3927 r = X86EMUL_IO_NEEDED; 3928 goto out; 3929 } 3930 3931 bytes -= toread; 3932 data += toread; 3933 addr += toread; 3934 } 3935 out: 3936 return r; 3937 } 3938 3939 /* used for instruction fetching */ 3940 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, 3941 gva_t addr, void *val, unsigned int bytes, 3942 struct x86_exception *exception) 3943 { 3944 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 3945 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3946 3947 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 3948 access | PFERR_FETCH_MASK, 3949 exception); 3950 } 3951 3952 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, 3953 gva_t addr, void *val, unsigned int bytes, 3954 struct x86_exception *exception) 3955 { 3956 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 3957 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3958 3959 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, 3960 exception); 3961 } 3962 EXPORT_SYMBOL_GPL(kvm_read_guest_virt); 3963 3964 static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, 3965 gva_t addr, void *val, unsigned int bytes, 3966 struct x86_exception *exception) 3967 { 3968 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 3969 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception); 3970 } 3971 3972 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, 3973 gva_t addr, void *val, 3974 unsigned int bytes, 3975 struct x86_exception *exception) 3976 { 3977 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 3978 void *data = val; 3979 int r = X86EMUL_CONTINUE; 3980 3981 while (bytes) { 3982 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, 3983 PFERR_WRITE_MASK, 3984 exception); 3985 unsigned offset = addr & (PAGE_SIZE-1); 3986 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); 3987 int ret; 3988 3989 if (gpa == UNMAPPED_GVA) 3990 return X86EMUL_PROPAGATE_FAULT; 3991 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite); 3992 if (ret < 0) { 3993 r = X86EMUL_IO_NEEDED; 3994 goto out; 3995 } 3996 3997 bytes -= towrite; 3998 data += towrite; 3999 addr += towrite; 4000 } 4001 out: 4002 return r; 4003 } 4004 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); 4005 4006 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 4007 gpa_t *gpa, struct x86_exception *exception, 4008 bool write) 4009 { 4010 u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) 4011 | (write ? PFERR_WRITE_MASK : 0); 4012 4013 if (vcpu_match_mmio_gva(vcpu, gva) 4014 && !permission_fault(vcpu->arch.walk_mmu, vcpu->arch.access, access)) { 4015 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | 4016 (gva & (PAGE_SIZE - 1)); 4017 trace_vcpu_match_mmio(gva, *gpa, write, false); 4018 return 1; 4019 } 4020 4021 *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); 4022 4023 if (*gpa == UNMAPPED_GVA) 4024 return -1; 4025 4026 /* For APIC access vmexit */ 4027 if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 4028 return 1; 4029 4030 if (vcpu_match_mmio_gpa(vcpu, *gpa)) { 4031 trace_vcpu_match_mmio(gva, *gpa, write, true); 4032 return 1; 4033 } 4034 4035 return 0; 4036 } 4037 4038 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 4039 const void *val, int bytes) 4040 { 4041 int ret; 4042 4043 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); 4044 if (ret < 0) 4045 return 0; 4046 kvm_mmu_pte_write(vcpu, gpa, val, bytes); 4047 return 1; 4048 } 4049 4050 struct read_write_emulator_ops { 4051 int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val, 4052 int bytes); 4053 int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa, 4054 void *val, int bytes); 4055 int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, 4056 int bytes, void *val); 4057 int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, 4058 void *val, int bytes); 4059 bool write; 4060 }; 4061 4062 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) 4063 { 4064 if (vcpu->mmio_read_completed) { 4065 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, 4066 vcpu->mmio_fragments[0].gpa, *(u64 *)val); 4067 vcpu->mmio_read_completed = 0; 4068 return 1; 4069 } 4070 4071 return 0; 4072 } 4073 4074 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, 4075 void *val, int bytes) 4076 { 4077 return !kvm_read_guest(vcpu->kvm, gpa, val, bytes); 4078 } 4079 4080 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, 4081 void *val, int bytes) 4082 { 4083 return emulator_write_phys(vcpu, gpa, val, bytes); 4084 } 4085 4086 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) 4087 { 4088 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val); 4089 return vcpu_mmio_write(vcpu, gpa, bytes, val); 4090 } 4091 4092 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, 4093 void *val, int bytes) 4094 { 4095 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0); 4096 return X86EMUL_IO_NEEDED; 4097 } 4098 4099 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, 4100 void *val, int bytes) 4101 { 4102 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; 4103 4104 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); 4105 return X86EMUL_CONTINUE; 4106 } 4107 4108 static const struct read_write_emulator_ops read_emultor = { 4109 .read_write_prepare = read_prepare, 4110 .read_write_emulate = read_emulate, 4111 .read_write_mmio = vcpu_mmio_read, 4112 .read_write_exit_mmio = read_exit_mmio, 4113 }; 4114 4115 static const struct read_write_emulator_ops write_emultor = { 4116 .read_write_emulate = write_emulate, 4117 .read_write_mmio = write_mmio, 4118 .read_write_exit_mmio = write_exit_mmio, 4119 .write = true, 4120 }; 4121 4122 static int emulator_read_write_onepage(unsigned long addr, void *val, 4123 unsigned int bytes, 4124 struct x86_exception *exception, 4125 struct kvm_vcpu *vcpu, 4126 const struct read_write_emulator_ops *ops) 4127 { 4128 gpa_t gpa; 4129 int handled, ret; 4130 bool write = ops->write; 4131 struct kvm_mmio_fragment *frag; 4132 4133 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); 4134 4135 if (ret < 0) 4136 return X86EMUL_PROPAGATE_FAULT; 4137 4138 /* For APIC access vmexit */ 4139 if (ret) 4140 goto mmio; 4141 4142 if (ops->read_write_emulate(vcpu, gpa, val, bytes)) 4143 return X86EMUL_CONTINUE; 4144 4145 mmio: 4146 /* 4147 * Is this MMIO handled locally? 4148 */ 4149 handled = ops->read_write_mmio(vcpu, gpa, bytes, val); 4150 if (handled == bytes) 4151 return X86EMUL_CONTINUE; 4152 4153 gpa += handled; 4154 bytes -= handled; 4155 val += handled; 4156 4157 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); 4158 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; 4159 frag->gpa = gpa; 4160 frag->data = val; 4161 frag->len = bytes; 4162 return X86EMUL_CONTINUE; 4163 } 4164 4165 int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr, 4166 void *val, unsigned int bytes, 4167 struct x86_exception *exception, 4168 const struct read_write_emulator_ops *ops) 4169 { 4170 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4171 gpa_t gpa; 4172 int rc; 4173 4174 if (ops->read_write_prepare && 4175 ops->read_write_prepare(vcpu, val, bytes)) 4176 return X86EMUL_CONTINUE; 4177 4178 vcpu->mmio_nr_fragments = 0; 4179 4180 /* Crossing a page boundary? */ 4181 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { 4182 int now; 4183 4184 now = -addr & ~PAGE_MASK; 4185 rc = emulator_read_write_onepage(addr, val, now, exception, 4186 vcpu, ops); 4187 4188 if (rc != X86EMUL_CONTINUE) 4189 return rc; 4190 addr += now; 4191 val += now; 4192 bytes -= now; 4193 } 4194 4195 rc = emulator_read_write_onepage(addr, val, bytes, exception, 4196 vcpu, ops); 4197 if (rc != X86EMUL_CONTINUE) 4198 return rc; 4199 4200 if (!vcpu->mmio_nr_fragments) 4201 return rc; 4202 4203 gpa = vcpu->mmio_fragments[0].gpa; 4204 4205 vcpu->mmio_needed = 1; 4206 vcpu->mmio_cur_fragment = 0; 4207 4208 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); 4209 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; 4210 vcpu->run->exit_reason = KVM_EXIT_MMIO; 4211 vcpu->run->mmio.phys_addr = gpa; 4212 4213 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); 4214 } 4215 4216 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, 4217 unsigned long addr, 4218 void *val, 4219 unsigned int bytes, 4220 struct x86_exception *exception) 4221 { 4222 return emulator_read_write(ctxt, addr, val, bytes, 4223 exception, &read_emultor); 4224 } 4225 4226 int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, 4227 unsigned long addr, 4228 const void *val, 4229 unsigned int bytes, 4230 struct x86_exception *exception) 4231 { 4232 return emulator_read_write(ctxt, addr, (void *)val, bytes, 4233 exception, &write_emultor); 4234 } 4235 4236 #define CMPXCHG_TYPE(t, ptr, old, new) \ 4237 (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old)) 4238 4239 #ifdef CONFIG_X86_64 4240 # define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new) 4241 #else 4242 # define CMPXCHG64(ptr, old, new) \ 4243 (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old)) 4244 #endif 4245 4246 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, 4247 unsigned long addr, 4248 const void *old, 4249 const void *new, 4250 unsigned int bytes, 4251 struct x86_exception *exception) 4252 { 4253 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4254 gpa_t gpa; 4255 struct page *page; 4256 char *kaddr; 4257 bool exchanged; 4258 4259 /* guests cmpxchg8b have to be emulated atomically */ 4260 if (bytes > 8 || (bytes & (bytes - 1))) 4261 goto emul_write; 4262 4263 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); 4264 4265 if (gpa == UNMAPPED_GVA || 4266 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 4267 goto emul_write; 4268 4269 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK)) 4270 goto emul_write; 4271 4272 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); 4273 if (is_error_page(page)) 4274 goto emul_write; 4275 4276 kaddr = kmap_atomic(page); 4277 kaddr += offset_in_page(gpa); 4278 switch (bytes) { 4279 case 1: 4280 exchanged = CMPXCHG_TYPE(u8, kaddr, old, new); 4281 break; 4282 case 2: 4283 exchanged = CMPXCHG_TYPE(u16, kaddr, old, new); 4284 break; 4285 case 4: 4286 exchanged = CMPXCHG_TYPE(u32, kaddr, old, new); 4287 break; 4288 case 8: 4289 exchanged = CMPXCHG64(kaddr, old, new); 4290 break; 4291 default: 4292 BUG(); 4293 } 4294 kunmap_atomic(kaddr); 4295 kvm_release_page_dirty(page); 4296 4297 if (!exchanged) 4298 return X86EMUL_CMPXCHG_FAILED; 4299 4300 kvm_mmu_pte_write(vcpu, gpa, new, bytes); 4301 4302 return X86EMUL_CONTINUE; 4303 4304 emul_write: 4305 printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); 4306 4307 return emulator_write_emulated(ctxt, addr, new, bytes, exception); 4308 } 4309 4310 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) 4311 { 4312 /* TODO: String I/O for in kernel device */ 4313 int r; 4314 4315 if (vcpu->arch.pio.in) 4316 r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port, 4317 vcpu->arch.pio.size, pd); 4318 else 4319 r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS, 4320 vcpu->arch.pio.port, vcpu->arch.pio.size, 4321 pd); 4322 return r; 4323 } 4324 4325 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, 4326 unsigned short port, void *val, 4327 unsigned int count, bool in) 4328 { 4329 trace_kvm_pio(!in, port, size, count); 4330 4331 vcpu->arch.pio.port = port; 4332 vcpu->arch.pio.in = in; 4333 vcpu->arch.pio.count = count; 4334 vcpu->arch.pio.size = size; 4335 4336 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { 4337 vcpu->arch.pio.count = 0; 4338 return 1; 4339 } 4340 4341 vcpu->run->exit_reason = KVM_EXIT_IO; 4342 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; 4343 vcpu->run->io.size = size; 4344 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; 4345 vcpu->run->io.count = count; 4346 vcpu->run->io.port = port; 4347 4348 return 0; 4349 } 4350 4351 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, 4352 int size, unsigned short port, void *val, 4353 unsigned int count) 4354 { 4355 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4356 int ret; 4357 4358 if (vcpu->arch.pio.count) 4359 goto data_avail; 4360 4361 ret = emulator_pio_in_out(vcpu, size, port, val, count, true); 4362 if (ret) { 4363 data_avail: 4364 memcpy(val, vcpu->arch.pio_data, size * count); 4365 vcpu->arch.pio.count = 0; 4366 return 1; 4367 } 4368 4369 return 0; 4370 } 4371 4372 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, 4373 int size, unsigned short port, 4374 const void *val, unsigned int count) 4375 { 4376 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4377 4378 memcpy(vcpu->arch.pio_data, val, size * count); 4379 return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); 4380 } 4381 4382 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) 4383 { 4384 return kvm_x86_ops->get_segment_base(vcpu, seg); 4385 } 4386 4387 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) 4388 { 4389 kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); 4390 } 4391 4392 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) 4393 { 4394 if (!need_emulate_wbinvd(vcpu)) 4395 return X86EMUL_CONTINUE; 4396 4397 if (kvm_x86_ops->has_wbinvd_exit()) { 4398 int cpu = get_cpu(); 4399 4400 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); 4401 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, 4402 wbinvd_ipi, NULL, 1); 4403 put_cpu(); 4404 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); 4405 } else 4406 wbinvd(); 4407 return X86EMUL_CONTINUE; 4408 } 4409 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); 4410 4411 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) 4412 { 4413 kvm_emulate_wbinvd(emul_to_vcpu(ctxt)); 4414 } 4415 4416 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest) 4417 { 4418 return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); 4419 } 4420 4421 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) 4422 { 4423 4424 return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value); 4425 } 4426 4427 static u64 mk_cr_64(u64 curr_cr, u32 new_val) 4428 { 4429 return (curr_cr & ~((1ULL << 32) - 1)) | new_val; 4430 } 4431 4432 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr) 4433 { 4434 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4435 unsigned long value; 4436 4437 switch (cr) { 4438 case 0: 4439 value = kvm_read_cr0(vcpu); 4440 break; 4441 case 2: 4442 value = vcpu->arch.cr2; 4443 break; 4444 case 3: 4445 value = kvm_read_cr3(vcpu); 4446 break; 4447 case 4: 4448 value = kvm_read_cr4(vcpu); 4449 break; 4450 case 8: 4451 value = kvm_get_cr8(vcpu); 4452 break; 4453 default: 4454 kvm_err("%s: unexpected cr %u\n", __func__, cr); 4455 return 0; 4456 } 4457 4458 return value; 4459 } 4460 4461 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) 4462 { 4463 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4464 int res = 0; 4465 4466 switch (cr) { 4467 case 0: 4468 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); 4469 break; 4470 case 2: 4471 vcpu->arch.cr2 = val; 4472 break; 4473 case 3: 4474 res = kvm_set_cr3(vcpu, val); 4475 break; 4476 case 4: 4477 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); 4478 break; 4479 case 8: 4480 res = kvm_set_cr8(vcpu, val); 4481 break; 4482 default: 4483 kvm_err("%s: unexpected cr %u\n", __func__, cr); 4484 res = -1; 4485 } 4486 4487 return res; 4488 } 4489 4490 static void emulator_set_rflags(struct x86_emulate_ctxt *ctxt, ulong val) 4491 { 4492 kvm_set_rflags(emul_to_vcpu(ctxt), val); 4493 } 4494 4495 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) 4496 { 4497 return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt)); 4498 } 4499 4500 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 4501 { 4502 kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt); 4503 } 4504 4505 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 4506 { 4507 kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt); 4508 } 4509 4510 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 4511 { 4512 kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt); 4513 } 4514 4515 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 4516 { 4517 kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt); 4518 } 4519 4520 static unsigned long emulator_get_cached_segment_base( 4521 struct x86_emulate_ctxt *ctxt, int seg) 4522 { 4523 return get_segment_base(emul_to_vcpu(ctxt), seg); 4524 } 4525 4526 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, 4527 struct desc_struct *desc, u32 *base3, 4528 int seg) 4529 { 4530 struct kvm_segment var; 4531 4532 kvm_get_segment(emul_to_vcpu(ctxt), &var, seg); 4533 *selector = var.selector; 4534 4535 if (var.unusable) { 4536 memset(desc, 0, sizeof(*desc)); 4537 return false; 4538 } 4539 4540 if (var.g) 4541 var.limit >>= 12; 4542 set_desc_limit(desc, var.limit); 4543 set_desc_base(desc, (unsigned long)var.base); 4544 #ifdef CONFIG_X86_64 4545 if (base3) 4546 *base3 = var.base >> 32; 4547 #endif 4548 desc->type = var.type; 4549 desc->s = var.s; 4550 desc->dpl = var.dpl; 4551 desc->p = var.present; 4552 desc->avl = var.avl; 4553 desc->l = var.l; 4554 desc->d = var.db; 4555 desc->g = var.g; 4556 4557 return true; 4558 } 4559 4560 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, 4561 struct desc_struct *desc, u32 base3, 4562 int seg) 4563 { 4564 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4565 struct kvm_segment var; 4566 4567 var.selector = selector; 4568 var.base = get_desc_base(desc); 4569 #ifdef CONFIG_X86_64 4570 var.base |= ((u64)base3) << 32; 4571 #endif 4572 var.limit = get_desc_limit(desc); 4573 if (desc->g) 4574 var.limit = (var.limit << 12) | 0xfff; 4575 var.type = desc->type; 4576 var.present = desc->p; 4577 var.dpl = desc->dpl; 4578 var.db = desc->d; 4579 var.s = desc->s; 4580 var.l = desc->l; 4581 var.g = desc->g; 4582 var.avl = desc->avl; 4583 var.present = desc->p; 4584 var.unusable = !var.present; 4585 var.padding = 0; 4586 4587 kvm_set_segment(vcpu, &var, seg); 4588 return; 4589 } 4590 4591 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, 4592 u32 msr_index, u64 *pdata) 4593 { 4594 return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata); 4595 } 4596 4597 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, 4598 u32 msr_index, u64 data) 4599 { 4600 struct msr_data msr; 4601 4602 msr.data = data; 4603 msr.index = msr_index; 4604 msr.host_initiated = false; 4605 return kvm_set_msr(emul_to_vcpu(ctxt), &msr); 4606 } 4607 4608 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, 4609 u32 pmc, u64 *pdata) 4610 { 4611 return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata); 4612 } 4613 4614 static void emulator_halt(struct x86_emulate_ctxt *ctxt) 4615 { 4616 emul_to_vcpu(ctxt)->arch.halt_request = 1; 4617 } 4618 4619 static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt) 4620 { 4621 preempt_disable(); 4622 kvm_load_guest_fpu(emul_to_vcpu(ctxt)); 4623 /* 4624 * CR0.TS may reference the host fpu state, not the guest fpu state, 4625 * so it may be clear at this point. 4626 */ 4627 clts(); 4628 } 4629 4630 static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt) 4631 { 4632 preempt_enable(); 4633 } 4634 4635 static int emulator_intercept(struct x86_emulate_ctxt *ctxt, 4636 struct x86_instruction_info *info, 4637 enum x86_intercept_stage stage) 4638 { 4639 return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); 4640 } 4641 4642 static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, 4643 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) 4644 { 4645 kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx); 4646 } 4647 4648 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) 4649 { 4650 return kvm_register_read(emul_to_vcpu(ctxt), reg); 4651 } 4652 4653 static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val) 4654 { 4655 kvm_register_write(emul_to_vcpu(ctxt), reg, val); 4656 } 4657 4658 static const struct x86_emulate_ops emulate_ops = { 4659 .read_gpr = emulator_read_gpr, 4660 .write_gpr = emulator_write_gpr, 4661 .read_std = kvm_read_guest_virt_system, 4662 .write_std = kvm_write_guest_virt_system, 4663 .fetch = kvm_fetch_guest_virt, 4664 .read_emulated = emulator_read_emulated, 4665 .write_emulated = emulator_write_emulated, 4666 .cmpxchg_emulated = emulator_cmpxchg_emulated, 4667 .invlpg = emulator_invlpg, 4668 .pio_in_emulated = emulator_pio_in_emulated, 4669 .pio_out_emulated = emulator_pio_out_emulated, 4670 .get_segment = emulator_get_segment, 4671 .set_segment = emulator_set_segment, 4672 .get_cached_segment_base = emulator_get_cached_segment_base, 4673 .get_gdt = emulator_get_gdt, 4674 .get_idt = emulator_get_idt, 4675 .set_gdt = emulator_set_gdt, 4676 .set_idt = emulator_set_idt, 4677 .get_cr = emulator_get_cr, 4678 .set_cr = emulator_set_cr, 4679 .set_rflags = emulator_set_rflags, 4680 .cpl = emulator_get_cpl, 4681 .get_dr = emulator_get_dr, 4682 .set_dr = emulator_set_dr, 4683 .set_msr = emulator_set_msr, 4684 .get_msr = emulator_get_msr, 4685 .read_pmc = emulator_read_pmc, 4686 .halt = emulator_halt, 4687 .wbinvd = emulator_wbinvd, 4688 .fix_hypercall = emulator_fix_hypercall, 4689 .get_fpu = emulator_get_fpu, 4690 .put_fpu = emulator_put_fpu, 4691 .intercept = emulator_intercept, 4692 .get_cpuid = emulator_get_cpuid, 4693 }; 4694 4695 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) 4696 { 4697 u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask); 4698 /* 4699 * an sti; sti; sequence only disable interrupts for the first 4700 * instruction. So, if the last instruction, be it emulated or 4701 * not, left the system with the INT_STI flag enabled, it 4702 * means that the last instruction is an sti. We should not 4703 * leave the flag on in this case. The same goes for mov ss 4704 */ 4705 if (!(int_shadow & mask)) 4706 kvm_x86_ops->set_interrupt_shadow(vcpu, mask); 4707 } 4708 4709 static void inject_emulated_exception(struct kvm_vcpu *vcpu) 4710 { 4711 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; 4712 if (ctxt->exception.vector == PF_VECTOR) 4713 kvm_propagate_fault(vcpu, &ctxt->exception); 4714 else if (ctxt->exception.error_code_valid) 4715 kvm_queue_exception_e(vcpu, ctxt->exception.vector, 4716 ctxt->exception.error_code); 4717 else 4718 kvm_queue_exception(vcpu, ctxt->exception.vector); 4719 } 4720 4721 static void init_decode_cache(struct x86_emulate_ctxt *ctxt) 4722 { 4723 memset(&ctxt->twobyte, 0, 4724 (void *)&ctxt->_regs - (void *)&ctxt->twobyte); 4725 4726 ctxt->fetch.start = 0; 4727 ctxt->fetch.end = 0; 4728 ctxt->io_read.pos = 0; 4729 ctxt->io_read.end = 0; 4730 ctxt->mem_read.pos = 0; 4731 ctxt->mem_read.end = 0; 4732 } 4733 4734 static void init_emulate_ctxt(struct kvm_vcpu *vcpu) 4735 { 4736 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; 4737 int cs_db, cs_l; 4738 4739 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 4740 4741 ctxt->eflags = kvm_get_rflags(vcpu); 4742 ctxt->eip = kvm_rip_read(vcpu); 4743 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : 4744 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : 4745 cs_l ? X86EMUL_MODE_PROT64 : 4746 cs_db ? X86EMUL_MODE_PROT32 : 4747 X86EMUL_MODE_PROT16; 4748 ctxt->guest_mode = is_guest_mode(vcpu); 4749 4750 init_decode_cache(ctxt); 4751 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; 4752 } 4753 4754 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) 4755 { 4756 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; 4757 int ret; 4758 4759 init_emulate_ctxt(vcpu); 4760 4761 ctxt->op_bytes = 2; 4762 ctxt->ad_bytes = 2; 4763 ctxt->_eip = ctxt->eip + inc_eip; 4764 ret = emulate_int_real(ctxt, irq); 4765 4766 if (ret != X86EMUL_CONTINUE) 4767 return EMULATE_FAIL; 4768 4769 ctxt->eip = ctxt->_eip; 4770 kvm_rip_write(vcpu, ctxt->eip); 4771 kvm_set_rflags(vcpu, ctxt->eflags); 4772 4773 if (irq == NMI_VECTOR) 4774 vcpu->arch.nmi_pending = 0; 4775 else 4776 vcpu->arch.interrupt.pending = false; 4777 4778 return EMULATE_DONE; 4779 } 4780 EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt); 4781 4782 static int handle_emulation_failure(struct kvm_vcpu *vcpu) 4783 { 4784 int r = EMULATE_DONE; 4785 4786 ++vcpu->stat.insn_emulation_fail; 4787 trace_kvm_emulate_insn_failed(vcpu); 4788 if (!is_guest_mode(vcpu)) { 4789 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 4790 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 4791 vcpu->run->internal.ndata = 0; 4792 r = EMULATE_FAIL; 4793 } 4794 kvm_queue_exception(vcpu, UD_VECTOR); 4795 4796 return r; 4797 } 4798 4799 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, 4800 bool write_fault_to_shadow_pgtable, 4801 int emulation_type) 4802 { 4803 gpa_t gpa = cr2; 4804 pfn_t pfn; 4805 4806 if (emulation_type & EMULTYPE_NO_REEXECUTE) 4807 return false; 4808 4809 if (!vcpu->arch.mmu.direct_map) { 4810 /* 4811 * Write permission should be allowed since only 4812 * write access need to be emulated. 4813 */ 4814 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); 4815 4816 /* 4817 * If the mapping is invalid in guest, let cpu retry 4818 * it to generate fault. 4819 */ 4820 if (gpa == UNMAPPED_GVA) 4821 return true; 4822 } 4823 4824 /* 4825 * Do not retry the unhandleable instruction if it faults on the 4826 * readonly host memory, otherwise it will goto a infinite loop: 4827 * retry instruction -> write #PF -> emulation fail -> retry 4828 * instruction -> ... 4829 */ 4830 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); 4831 4832 /* 4833 * If the instruction failed on the error pfn, it can not be fixed, 4834 * report the error to userspace. 4835 */ 4836 if (is_error_noslot_pfn(pfn)) 4837 return false; 4838 4839 kvm_release_pfn_clean(pfn); 4840 4841 /* The instructions are well-emulated on direct mmu. */ 4842 if (vcpu->arch.mmu.direct_map) { 4843 unsigned int indirect_shadow_pages; 4844 4845 spin_lock(&vcpu->kvm->mmu_lock); 4846 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; 4847 spin_unlock(&vcpu->kvm->mmu_lock); 4848 4849 if (indirect_shadow_pages) 4850 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 4851 4852 return true; 4853 } 4854 4855 /* 4856 * if emulation was due to access to shadowed page table 4857 * and it failed try to unshadow page and re-enter the 4858 * guest to let CPU execute the instruction. 4859 */ 4860 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 4861 4862 /* 4863 * If the access faults on its page table, it can not 4864 * be fixed by unprotecting shadow page and it should 4865 * be reported to userspace. 4866 */ 4867 return !write_fault_to_shadow_pgtable; 4868 } 4869 4870 static bool retry_instruction(struct x86_emulate_ctxt *ctxt, 4871 unsigned long cr2, int emulation_type) 4872 { 4873 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4874 unsigned long last_retry_eip, last_retry_addr, gpa = cr2; 4875 4876 last_retry_eip = vcpu->arch.last_retry_eip; 4877 last_retry_addr = vcpu->arch.last_retry_addr; 4878 4879 /* 4880 * If the emulation is caused by #PF and it is non-page_table 4881 * writing instruction, it means the VM-EXIT is caused by shadow 4882 * page protected, we can zap the shadow page and retry this 4883 * instruction directly. 4884 * 4885 * Note: if the guest uses a non-page-table modifying instruction 4886 * on the PDE that points to the instruction, then we will unmap 4887 * the instruction and go to an infinite loop. So, we cache the 4888 * last retried eip and the last fault address, if we meet the eip 4889 * and the address again, we can break out of the potential infinite 4890 * loop. 4891 */ 4892 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; 4893 4894 if (!(emulation_type & EMULTYPE_RETRY)) 4895 return false; 4896 4897 if (x86_page_table_writing_insn(ctxt)) 4898 return false; 4899 4900 if (ctxt->eip == last_retry_eip && last_retry_addr == cr2) 4901 return false; 4902 4903 vcpu->arch.last_retry_eip = ctxt->eip; 4904 vcpu->arch.last_retry_addr = cr2; 4905 4906 if (!vcpu->arch.mmu.direct_map) 4907 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); 4908 4909 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 4910 4911 return true; 4912 } 4913 4914 static int complete_emulated_mmio(struct kvm_vcpu *vcpu); 4915 static int complete_emulated_pio(struct kvm_vcpu *vcpu); 4916 4917 int x86_emulate_instruction(struct kvm_vcpu *vcpu, 4918 unsigned long cr2, 4919 int emulation_type, 4920 void *insn, 4921 int insn_len) 4922 { 4923 int r; 4924 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; 4925 bool writeback = true; 4926 bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; 4927 4928 /* 4929 * Clear write_fault_to_shadow_pgtable here to ensure it is 4930 * never reused. 4931 */ 4932 vcpu->arch.write_fault_to_shadow_pgtable = false; 4933 kvm_clear_exception_queue(vcpu); 4934 4935 if (!(emulation_type & EMULTYPE_NO_DECODE)) { 4936 init_emulate_ctxt(vcpu); 4937 ctxt->interruptibility = 0; 4938 ctxt->have_exception = false; 4939 ctxt->perm_ok = false; 4940 4941 ctxt->only_vendor_specific_insn 4942 = emulation_type & EMULTYPE_TRAP_UD; 4943 4944 r = x86_decode_insn(ctxt, insn, insn_len); 4945 4946 trace_kvm_emulate_insn_start(vcpu); 4947 ++vcpu->stat.insn_emulation; 4948 if (r != EMULATION_OK) { 4949 if (emulation_type & EMULTYPE_TRAP_UD) 4950 return EMULATE_FAIL; 4951 if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, 4952 emulation_type)) 4953 return EMULATE_DONE; 4954 if (emulation_type & EMULTYPE_SKIP) 4955 return EMULATE_FAIL; 4956 return handle_emulation_failure(vcpu); 4957 } 4958 } 4959 4960 if (emulation_type & EMULTYPE_SKIP) { 4961 kvm_rip_write(vcpu, ctxt->_eip); 4962 return EMULATE_DONE; 4963 } 4964 4965 if (retry_instruction(ctxt, cr2, emulation_type)) 4966 return EMULATE_DONE; 4967 4968 /* this is needed for vmware backdoor interface to work since it 4969 changes registers values during IO operation */ 4970 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { 4971 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; 4972 emulator_invalidate_register_cache(ctxt); 4973 } 4974 4975 restart: 4976 r = x86_emulate_insn(ctxt); 4977 4978 if (r == EMULATION_INTERCEPTED) 4979 return EMULATE_DONE; 4980 4981 if (r == EMULATION_FAILED) { 4982 if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, 4983 emulation_type)) 4984 return EMULATE_DONE; 4985 4986 return handle_emulation_failure(vcpu); 4987 } 4988 4989 if (ctxt->have_exception) { 4990 inject_emulated_exception(vcpu); 4991 r = EMULATE_DONE; 4992 } else if (vcpu->arch.pio.count) { 4993 if (!vcpu->arch.pio.in) 4994 vcpu->arch.pio.count = 0; 4995 else { 4996 writeback = false; 4997 vcpu->arch.complete_userspace_io = complete_emulated_pio; 4998 } 4999 r = EMULATE_DO_MMIO; 5000 } else if (vcpu->mmio_needed) { 5001 if (!vcpu->mmio_is_write) 5002 writeback = false; 5003 r = EMULATE_DO_MMIO; 5004 vcpu->arch.complete_userspace_io = complete_emulated_mmio; 5005 } else if (r == EMULATION_RESTART) 5006 goto restart; 5007 else 5008 r = EMULATE_DONE; 5009 5010 if (writeback) { 5011 toggle_interruptibility(vcpu, ctxt->interruptibility); 5012 kvm_set_rflags(vcpu, ctxt->eflags); 5013 kvm_make_request(KVM_REQ_EVENT, vcpu); 5014 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 5015 kvm_rip_write(vcpu, ctxt->eip); 5016 } else 5017 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; 5018 5019 return r; 5020 } 5021 EXPORT_SYMBOL_GPL(x86_emulate_instruction); 5022 5023 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port) 5024 { 5025 unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX); 5026 int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, 5027 size, port, &val, 1); 5028 /* do not return to emulator after return from userspace */ 5029 vcpu->arch.pio.count = 0; 5030 return ret; 5031 } 5032 EXPORT_SYMBOL_GPL(kvm_fast_pio_out); 5033 5034 static void tsc_bad(void *info) 5035 { 5036 __this_cpu_write(cpu_tsc_khz, 0); 5037 } 5038 5039 static void tsc_khz_changed(void *data) 5040 { 5041 struct cpufreq_freqs *freq = data; 5042 unsigned long khz = 0; 5043 5044 if (data) 5045 khz = freq->new; 5046 else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 5047 khz = cpufreq_quick_get(raw_smp_processor_id()); 5048 if (!khz) 5049 khz = tsc_khz; 5050 __this_cpu_write(cpu_tsc_khz, khz); 5051 } 5052 5053 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 5054 void *data) 5055 { 5056 struct cpufreq_freqs *freq = data; 5057 struct kvm *kvm; 5058 struct kvm_vcpu *vcpu; 5059 int i, send_ipi = 0; 5060 5061 /* 5062 * We allow guests to temporarily run on slowing clocks, 5063 * provided we notify them after, or to run on accelerating 5064 * clocks, provided we notify them before. Thus time never 5065 * goes backwards. 5066 * 5067 * However, we have a problem. We can't atomically update 5068 * the frequency of a given CPU from this function; it is 5069 * merely a notifier, which can be called from any CPU. 5070 * Changing the TSC frequency at arbitrary points in time 5071 * requires a recomputation of local variables related to 5072 * the TSC for each VCPU. We must flag these local variables 5073 * to be updated and be sure the update takes place with the 5074 * new frequency before any guests proceed. 5075 * 5076 * Unfortunately, the combination of hotplug CPU and frequency 5077 * change creates an intractable locking scenario; the order 5078 * of when these callouts happen is undefined with respect to 5079 * CPU hotplug, and they can race with each other. As such, 5080 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is 5081 * undefined; you can actually have a CPU frequency change take 5082 * place in between the computation of X and the setting of the 5083 * variable. To protect against this problem, all updates of 5084 * the per_cpu tsc_khz variable are done in an interrupt 5085 * protected IPI, and all callers wishing to update the value 5086 * must wait for a synchronous IPI to complete (which is trivial 5087 * if the caller is on the CPU already). This establishes the 5088 * necessary total order on variable updates. 5089 * 5090 * Note that because a guest time update may take place 5091 * anytime after the setting of the VCPU's request bit, the 5092 * correct TSC value must be set before the request. However, 5093 * to ensure the update actually makes it to any guest which 5094 * starts running in hardware virtualization between the set 5095 * and the acquisition of the spinlock, we must also ping the 5096 * CPU after setting the request bit. 5097 * 5098 */ 5099 5100 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) 5101 return 0; 5102 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) 5103 return 0; 5104 5105 smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); 5106 5107 raw_spin_lock(&kvm_lock); 5108 list_for_each_entry(kvm, &vm_list, vm_list) { 5109 kvm_for_each_vcpu(i, vcpu, kvm) { 5110 if (vcpu->cpu != freq->cpu) 5111 continue; 5112 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 5113 if (vcpu->cpu != smp_processor_id()) 5114 send_ipi = 1; 5115 } 5116 } 5117 raw_spin_unlock(&kvm_lock); 5118 5119 if (freq->old < freq->new && send_ipi) { 5120 /* 5121 * We upscale the frequency. Must make the guest 5122 * doesn't see old kvmclock values while running with 5123 * the new frequency, otherwise we risk the guest sees 5124 * time go backwards. 5125 * 5126 * In case we update the frequency for another cpu 5127 * (which might be in guest context) send an interrupt 5128 * to kick the cpu out of guest context. Next time 5129 * guest context is entered kvmclock will be updated, 5130 * so the guest will not see stale values. 5131 */ 5132 smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); 5133 } 5134 return 0; 5135 } 5136 5137 static struct notifier_block kvmclock_cpufreq_notifier_block = { 5138 .notifier_call = kvmclock_cpufreq_notifier 5139 }; 5140 5141 static int kvmclock_cpu_notifier(struct notifier_block *nfb, 5142 unsigned long action, void *hcpu) 5143 { 5144 unsigned int cpu = (unsigned long)hcpu; 5145 5146 switch (action) { 5147 case CPU_ONLINE: 5148 case CPU_DOWN_FAILED: 5149 smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); 5150 break; 5151 case CPU_DOWN_PREPARE: 5152 smp_call_function_single(cpu, tsc_bad, NULL, 1); 5153 break; 5154 } 5155 return NOTIFY_OK; 5156 } 5157 5158 static struct notifier_block kvmclock_cpu_notifier_block = { 5159 .notifier_call = kvmclock_cpu_notifier, 5160 .priority = -INT_MAX 5161 }; 5162 5163 static void kvm_timer_init(void) 5164 { 5165 int cpu; 5166 5167 max_tsc_khz = tsc_khz; 5168 register_hotcpu_notifier(&kvmclock_cpu_notifier_block); 5169 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { 5170 #ifdef CONFIG_CPU_FREQ 5171 struct cpufreq_policy policy; 5172 memset(&policy, 0, sizeof(policy)); 5173 cpu = get_cpu(); 5174 cpufreq_get_policy(&policy, cpu); 5175 if (policy.cpuinfo.max_freq) 5176 max_tsc_khz = policy.cpuinfo.max_freq; 5177 put_cpu(); 5178 #endif 5179 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, 5180 CPUFREQ_TRANSITION_NOTIFIER); 5181 } 5182 pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz); 5183 for_each_online_cpu(cpu) 5184 smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); 5185 } 5186 5187 static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu); 5188 5189 int kvm_is_in_guest(void) 5190 { 5191 return __this_cpu_read(current_vcpu) != NULL; 5192 } 5193 5194 static int kvm_is_user_mode(void) 5195 { 5196 int user_mode = 3; 5197 5198 if (__this_cpu_read(current_vcpu)) 5199 user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu)); 5200 5201 return user_mode != 0; 5202 } 5203 5204 static unsigned long kvm_get_guest_ip(void) 5205 { 5206 unsigned long ip = 0; 5207 5208 if (__this_cpu_read(current_vcpu)) 5209 ip = kvm_rip_read(__this_cpu_read(current_vcpu)); 5210 5211 return ip; 5212 } 5213 5214 static struct perf_guest_info_callbacks kvm_guest_cbs = { 5215 .is_in_guest = kvm_is_in_guest, 5216 .is_user_mode = kvm_is_user_mode, 5217 .get_guest_ip = kvm_get_guest_ip, 5218 }; 5219 5220 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu) 5221 { 5222 __this_cpu_write(current_vcpu, vcpu); 5223 } 5224 EXPORT_SYMBOL_GPL(kvm_before_handle_nmi); 5225 5226 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu) 5227 { 5228 __this_cpu_write(current_vcpu, NULL); 5229 } 5230 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi); 5231 5232 static void kvm_set_mmio_spte_mask(void) 5233 { 5234 u64 mask; 5235 int maxphyaddr = boot_cpu_data.x86_phys_bits; 5236 5237 /* 5238 * Set the reserved bits and the present bit of an paging-structure 5239 * entry to generate page fault with PFER.RSV = 1. 5240 */ 5241 mask = ((1ull << (62 - maxphyaddr + 1)) - 1) << maxphyaddr; 5242 mask |= 1ull; 5243 5244 #ifdef CONFIG_X86_64 5245 /* 5246 * If reserved bit is not supported, clear the present bit to disable 5247 * mmio page fault. 5248 */ 5249 if (maxphyaddr == 52) 5250 mask &= ~1ull; 5251 #endif 5252 5253 kvm_mmu_set_mmio_spte_mask(mask); 5254 } 5255 5256 #ifdef CONFIG_X86_64 5257 static void pvclock_gtod_update_fn(struct work_struct *work) 5258 { 5259 struct kvm *kvm; 5260 5261 struct kvm_vcpu *vcpu; 5262 int i; 5263 5264 raw_spin_lock(&kvm_lock); 5265 list_for_each_entry(kvm, &vm_list, vm_list) 5266 kvm_for_each_vcpu(i, vcpu, kvm) 5267 set_bit(KVM_REQ_MASTERCLOCK_UPDATE, &vcpu->requests); 5268 atomic_set(&kvm_guest_has_master_clock, 0); 5269 raw_spin_unlock(&kvm_lock); 5270 } 5271 5272 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn); 5273 5274 /* 5275 * Notification about pvclock gtod data update. 5276 */ 5277 static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused, 5278 void *priv) 5279 { 5280 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 5281 struct timekeeper *tk = priv; 5282 5283 update_pvclock_gtod(tk); 5284 5285 /* disable master clock if host does not trust, or does not 5286 * use, TSC clocksource 5287 */ 5288 if (gtod->clock.vclock_mode != VCLOCK_TSC && 5289 atomic_read(&kvm_guest_has_master_clock) != 0) 5290 queue_work(system_long_wq, &pvclock_gtod_work); 5291 5292 return 0; 5293 } 5294 5295 static struct notifier_block pvclock_gtod_notifier = { 5296 .notifier_call = pvclock_gtod_notify, 5297 }; 5298 #endif 5299 5300 int kvm_arch_init(void *opaque) 5301 { 5302 int r; 5303 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque; 5304 5305 if (kvm_x86_ops) { 5306 printk(KERN_ERR "kvm: already loaded the other module\n"); 5307 r = -EEXIST; 5308 goto out; 5309 } 5310 5311 if (!ops->cpu_has_kvm_support()) { 5312 printk(KERN_ERR "kvm: no hardware support\n"); 5313 r = -EOPNOTSUPP; 5314 goto out; 5315 } 5316 if (ops->disabled_by_bios()) { 5317 printk(KERN_ERR "kvm: disabled by bios\n"); 5318 r = -EOPNOTSUPP; 5319 goto out; 5320 } 5321 5322 r = -ENOMEM; 5323 shared_msrs = alloc_percpu(struct kvm_shared_msrs); 5324 if (!shared_msrs) { 5325 printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n"); 5326 goto out; 5327 } 5328 5329 r = kvm_mmu_module_init(); 5330 if (r) 5331 goto out_free_percpu; 5332 5333 kvm_set_mmio_spte_mask(); 5334 kvm_init_msr_list(); 5335 5336 kvm_x86_ops = ops; 5337 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, 5338 PT_DIRTY_MASK, PT64_NX_MASK, 0); 5339 5340 kvm_timer_init(); 5341 5342 perf_register_guest_info_callbacks(&kvm_guest_cbs); 5343 5344 if (cpu_has_xsave) 5345 host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); 5346 5347 kvm_lapic_init(); 5348 #ifdef CONFIG_X86_64 5349 pvclock_gtod_register_notifier(&pvclock_gtod_notifier); 5350 #endif 5351 5352 return 0; 5353 5354 out_free_percpu: 5355 free_percpu(shared_msrs); 5356 out: 5357 return r; 5358 } 5359 5360 void kvm_arch_exit(void) 5361 { 5362 perf_unregister_guest_info_callbacks(&kvm_guest_cbs); 5363 5364 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 5365 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, 5366 CPUFREQ_TRANSITION_NOTIFIER); 5367 unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block); 5368 #ifdef CONFIG_X86_64 5369 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); 5370 #endif 5371 kvm_x86_ops = NULL; 5372 kvm_mmu_module_exit(); 5373 free_percpu(shared_msrs); 5374 } 5375 5376 int kvm_emulate_halt(struct kvm_vcpu *vcpu) 5377 { 5378 ++vcpu->stat.halt_exits; 5379 if (irqchip_in_kernel(vcpu->kvm)) { 5380 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; 5381 return 1; 5382 } else { 5383 vcpu->run->exit_reason = KVM_EXIT_HLT; 5384 return 0; 5385 } 5386 } 5387 EXPORT_SYMBOL_GPL(kvm_emulate_halt); 5388 5389 int kvm_hv_hypercall(struct kvm_vcpu *vcpu) 5390 { 5391 u64 param, ingpa, outgpa, ret; 5392 uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; 5393 bool fast, longmode; 5394 int cs_db, cs_l; 5395 5396 /* 5397 * hypercall generates UD from non zero cpl and real mode 5398 * per HYPER-V spec 5399 */ 5400 if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { 5401 kvm_queue_exception(vcpu, UD_VECTOR); 5402 return 0; 5403 } 5404 5405 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 5406 longmode = is_long_mode(vcpu) && cs_l == 1; 5407 5408 if (!longmode) { 5409 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | 5410 (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff); 5411 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | 5412 (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff); 5413 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | 5414 (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff); 5415 } 5416 #ifdef CONFIG_X86_64 5417 else { 5418 param = kvm_register_read(vcpu, VCPU_REGS_RCX); 5419 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX); 5420 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8); 5421 } 5422 #endif 5423 5424 code = param & 0xffff; 5425 fast = (param >> 16) & 0x1; 5426 rep_cnt = (param >> 32) & 0xfff; 5427 rep_idx = (param >> 48) & 0xfff; 5428 5429 trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); 5430 5431 switch (code) { 5432 case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT: 5433 kvm_vcpu_on_spin(vcpu); 5434 break; 5435 default: 5436 res = HV_STATUS_INVALID_HYPERCALL_CODE; 5437 break; 5438 } 5439 5440 ret = res | (((u64)rep_done & 0xfff) << 32); 5441 if (longmode) { 5442 kvm_register_write(vcpu, VCPU_REGS_RAX, ret); 5443 } else { 5444 kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32); 5445 kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff); 5446 } 5447 5448 return 1; 5449 } 5450 5451 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) 5452 { 5453 unsigned long nr, a0, a1, a2, a3, ret; 5454 int r = 1; 5455 5456 if (kvm_hv_hypercall_enabled(vcpu->kvm)) 5457 return kvm_hv_hypercall(vcpu); 5458 5459 nr = kvm_register_read(vcpu, VCPU_REGS_RAX); 5460 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); 5461 a1 = kvm_register_read(vcpu, VCPU_REGS_RCX); 5462 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX); 5463 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI); 5464 5465 trace_kvm_hypercall(nr, a0, a1, a2, a3); 5466 5467 if (!is_long_mode(vcpu)) { 5468 nr &= 0xFFFFFFFF; 5469 a0 &= 0xFFFFFFFF; 5470 a1 &= 0xFFFFFFFF; 5471 a2 &= 0xFFFFFFFF; 5472 a3 &= 0xFFFFFFFF; 5473 } 5474 5475 if (kvm_x86_ops->get_cpl(vcpu) != 0) { 5476 ret = -KVM_EPERM; 5477 goto out; 5478 } 5479 5480 switch (nr) { 5481 case KVM_HC_VAPIC_POLL_IRQ: 5482 ret = 0; 5483 break; 5484 default: 5485 ret = -KVM_ENOSYS; 5486 break; 5487 } 5488 out: 5489 kvm_register_write(vcpu, VCPU_REGS_RAX, ret); 5490 ++vcpu->stat.hypercalls; 5491 return r; 5492 } 5493 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); 5494 5495 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) 5496 { 5497 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 5498 char instruction[3]; 5499 unsigned long rip = kvm_rip_read(vcpu); 5500 5501 /* 5502 * Blow out the MMU to ensure that no other VCPU has an active mapping 5503 * to ensure that the updated hypercall appears atomically across all 5504 * VCPUs. 5505 */ 5506 kvm_mmu_zap_all(vcpu->kvm); 5507 5508 kvm_x86_ops->patch_hypercall(vcpu, instruction); 5509 5510 return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); 5511 } 5512 5513 /* 5514 * Check if userspace requested an interrupt window, and that the 5515 * interrupt window is open. 5516 * 5517 * No need to exit to userspace if we already have an interrupt queued. 5518 */ 5519 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) 5520 { 5521 return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) && 5522 vcpu->run->request_interrupt_window && 5523 kvm_arch_interrupt_allowed(vcpu)); 5524 } 5525 5526 static void post_kvm_run_save(struct kvm_vcpu *vcpu) 5527 { 5528 struct kvm_run *kvm_run = vcpu->run; 5529 5530 kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; 5531 kvm_run->cr8 = kvm_get_cr8(vcpu); 5532 kvm_run->apic_base = kvm_get_apic_base(vcpu); 5533 if (irqchip_in_kernel(vcpu->kvm)) 5534 kvm_run->ready_for_interrupt_injection = 1; 5535 else 5536 kvm_run->ready_for_interrupt_injection = 5537 kvm_arch_interrupt_allowed(vcpu) && 5538 !kvm_cpu_has_interrupt(vcpu) && 5539 !kvm_event_needs_reinjection(vcpu); 5540 } 5541 5542 static int vapic_enter(struct kvm_vcpu *vcpu) 5543 { 5544 struct kvm_lapic *apic = vcpu->arch.apic; 5545 struct page *page; 5546 5547 if (!apic || !apic->vapic_addr) 5548 return 0; 5549 5550 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); 5551 if (is_error_page(page)) 5552 return -EFAULT; 5553 5554 vcpu->arch.apic->vapic_page = page; 5555 return 0; 5556 } 5557 5558 static void vapic_exit(struct kvm_vcpu *vcpu) 5559 { 5560 struct kvm_lapic *apic = vcpu->arch.apic; 5561 int idx; 5562 5563 if (!apic || !apic->vapic_addr) 5564 return; 5565 5566 idx = srcu_read_lock(&vcpu->kvm->srcu); 5567 kvm_release_page_dirty(apic->vapic_page); 5568 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); 5569 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5570 } 5571 5572 static void update_cr8_intercept(struct kvm_vcpu *vcpu) 5573 { 5574 int max_irr, tpr; 5575 5576 if (!kvm_x86_ops->update_cr8_intercept) 5577 return; 5578 5579 if (!vcpu->arch.apic) 5580 return; 5581 5582 if (!vcpu->arch.apic->vapic_addr) 5583 max_irr = kvm_lapic_find_highest_irr(vcpu); 5584 else 5585 max_irr = -1; 5586 5587 if (max_irr != -1) 5588 max_irr >>= 4; 5589 5590 tpr = kvm_lapic_get_cr8(vcpu); 5591 5592 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); 5593 } 5594 5595 static void inject_pending_event(struct kvm_vcpu *vcpu) 5596 { 5597 /* try to reinject previous events if any */ 5598 if (vcpu->arch.exception.pending) { 5599 trace_kvm_inj_exception(vcpu->arch.exception.nr, 5600 vcpu->arch.exception.has_error_code, 5601 vcpu->arch.exception.error_code); 5602 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, 5603 vcpu->arch.exception.has_error_code, 5604 vcpu->arch.exception.error_code, 5605 vcpu->arch.exception.reinject); 5606 return; 5607 } 5608 5609 if (vcpu->arch.nmi_injected) { 5610 kvm_x86_ops->set_nmi(vcpu); 5611 return; 5612 } 5613 5614 if (vcpu->arch.interrupt.pending) { 5615 kvm_x86_ops->set_irq(vcpu); 5616 return; 5617 } 5618 5619 /* try to inject new event if pending */ 5620 if (vcpu->arch.nmi_pending) { 5621 if (kvm_x86_ops->nmi_allowed(vcpu)) { 5622 --vcpu->arch.nmi_pending; 5623 vcpu->arch.nmi_injected = true; 5624 kvm_x86_ops->set_nmi(vcpu); 5625 } 5626 } else if (kvm_cpu_has_injectable_intr(vcpu)) { 5627 if (kvm_x86_ops->interrupt_allowed(vcpu)) { 5628 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), 5629 false); 5630 kvm_x86_ops->set_irq(vcpu); 5631 } 5632 } 5633 } 5634 5635 static void process_nmi(struct kvm_vcpu *vcpu) 5636 { 5637 unsigned limit = 2; 5638 5639 /* 5640 * x86 is limited to one NMI running, and one NMI pending after it. 5641 * If an NMI is already in progress, limit further NMIs to just one. 5642 * Otherwise, allow two (and we'll inject the first one immediately). 5643 */ 5644 if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) 5645 limit = 1; 5646 5647 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); 5648 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); 5649 kvm_make_request(KVM_REQ_EVENT, vcpu); 5650 } 5651 5652 static void kvm_gen_update_masterclock(struct kvm *kvm) 5653 { 5654 #ifdef CONFIG_X86_64 5655 int i; 5656 struct kvm_vcpu *vcpu; 5657 struct kvm_arch *ka = &kvm->arch; 5658 5659 spin_lock(&ka->pvclock_gtod_sync_lock); 5660 kvm_make_mclock_inprogress_request(kvm); 5661 /* no guest entries from this point */ 5662 pvclock_update_vm_gtod_copy(kvm); 5663 5664 kvm_for_each_vcpu(i, vcpu, kvm) 5665 set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); 5666 5667 /* guest entries allowed */ 5668 kvm_for_each_vcpu(i, vcpu, kvm) 5669 clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests); 5670 5671 spin_unlock(&ka->pvclock_gtod_sync_lock); 5672 #endif 5673 } 5674 5675 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) 5676 { 5677 u64 eoi_exit_bitmap[4]; 5678 u32 tmr[8]; 5679 5680 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) 5681 return; 5682 5683 memset(eoi_exit_bitmap, 0, 32); 5684 memset(tmr, 0, 32); 5685 5686 kvm_ioapic_scan_entry(vcpu, eoi_exit_bitmap, tmr); 5687 kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap); 5688 kvm_apic_update_tmr(vcpu, tmr); 5689 } 5690 5691 static int vcpu_enter_guest(struct kvm_vcpu *vcpu) 5692 { 5693 int r; 5694 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && 5695 vcpu->run->request_interrupt_window; 5696 bool req_immediate_exit = false; 5697 5698 if (vcpu->requests) { 5699 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) 5700 kvm_mmu_unload(vcpu); 5701 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) 5702 __kvm_migrate_timers(vcpu); 5703 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) 5704 kvm_gen_update_masterclock(vcpu->kvm); 5705 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { 5706 r = kvm_guest_time_update(vcpu); 5707 if (unlikely(r)) 5708 goto out; 5709 } 5710 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) 5711 kvm_mmu_sync_roots(vcpu); 5712 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) 5713 kvm_x86_ops->tlb_flush(vcpu); 5714 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { 5715 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; 5716 r = 0; 5717 goto out; 5718 } 5719 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { 5720 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; 5721 r = 0; 5722 goto out; 5723 } 5724 if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) { 5725 vcpu->fpu_active = 0; 5726 kvm_x86_ops->fpu_deactivate(vcpu); 5727 } 5728 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { 5729 /* Page is swapped out. Do synthetic halt */ 5730 vcpu->arch.apf.halted = true; 5731 r = 1; 5732 goto out; 5733 } 5734 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) 5735 record_steal_time(vcpu); 5736 if (kvm_check_request(KVM_REQ_NMI, vcpu)) 5737 process_nmi(vcpu); 5738 if (kvm_check_request(KVM_REQ_PMU, vcpu)) 5739 kvm_handle_pmu_event(vcpu); 5740 if (kvm_check_request(KVM_REQ_PMI, vcpu)) 5741 kvm_deliver_pmi(vcpu); 5742 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) 5743 vcpu_scan_ioapic(vcpu); 5744 } 5745 5746 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { 5747 kvm_apic_accept_events(vcpu); 5748 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { 5749 r = 1; 5750 goto out; 5751 } 5752 5753 inject_pending_event(vcpu); 5754 5755 /* enable NMI/IRQ window open exits if needed */ 5756 if (vcpu->arch.nmi_pending) 5757 req_immediate_exit = 5758 kvm_x86_ops->enable_nmi_window(vcpu) != 0; 5759 else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) 5760 req_immediate_exit = 5761 kvm_x86_ops->enable_irq_window(vcpu) != 0; 5762 5763 if (kvm_lapic_enabled(vcpu)) { 5764 /* 5765 * Update architecture specific hints for APIC 5766 * virtual interrupt delivery. 5767 */ 5768 if (kvm_x86_ops->hwapic_irr_update) 5769 kvm_x86_ops->hwapic_irr_update(vcpu, 5770 kvm_lapic_find_highest_irr(vcpu)); 5771 update_cr8_intercept(vcpu); 5772 kvm_lapic_sync_to_vapic(vcpu); 5773 } 5774 } 5775 5776 r = kvm_mmu_reload(vcpu); 5777 if (unlikely(r)) { 5778 goto cancel_injection; 5779 } 5780 5781 preempt_disable(); 5782 5783 kvm_x86_ops->prepare_guest_switch(vcpu); 5784 if (vcpu->fpu_active) 5785 kvm_load_guest_fpu(vcpu); 5786 kvm_load_guest_xcr0(vcpu); 5787 5788 vcpu->mode = IN_GUEST_MODE; 5789 5790 /* We should set ->mode before check ->requests, 5791 * see the comment in make_all_cpus_request. 5792 */ 5793 smp_mb(); 5794 5795 local_irq_disable(); 5796 5797 if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests 5798 || need_resched() || signal_pending(current)) { 5799 vcpu->mode = OUTSIDE_GUEST_MODE; 5800 smp_wmb(); 5801 local_irq_enable(); 5802 preempt_enable(); 5803 r = 1; 5804 goto cancel_injection; 5805 } 5806 5807 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 5808 5809 if (req_immediate_exit) 5810 smp_send_reschedule(vcpu->cpu); 5811 5812 kvm_guest_enter(); 5813 5814 if (unlikely(vcpu->arch.switch_db_regs)) { 5815 set_debugreg(0, 7); 5816 set_debugreg(vcpu->arch.eff_db[0], 0); 5817 set_debugreg(vcpu->arch.eff_db[1], 1); 5818 set_debugreg(vcpu->arch.eff_db[2], 2); 5819 set_debugreg(vcpu->arch.eff_db[3], 3); 5820 } 5821 5822 trace_kvm_entry(vcpu->vcpu_id); 5823 kvm_x86_ops->run(vcpu); 5824 5825 /* 5826 * If the guest has used debug registers, at least dr7 5827 * will be disabled while returning to the host. 5828 * If we don't have active breakpoints in the host, we don't 5829 * care about the messed up debug address registers. But if 5830 * we have some of them active, restore the old state. 5831 */ 5832 if (hw_breakpoint_active()) 5833 hw_breakpoint_restore(); 5834 5835 vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, 5836 native_read_tsc()); 5837 5838 vcpu->mode = OUTSIDE_GUEST_MODE; 5839 smp_wmb(); 5840 5841 /* Interrupt is enabled by handle_external_intr() */ 5842 kvm_x86_ops->handle_external_intr(vcpu); 5843 5844 ++vcpu->stat.exits; 5845 5846 /* 5847 * We must have an instruction between local_irq_enable() and 5848 * kvm_guest_exit(), so the timer interrupt isn't delayed by 5849 * the interrupt shadow. The stat.exits increment will do nicely. 5850 * But we need to prevent reordering, hence this barrier(): 5851 */ 5852 barrier(); 5853 5854 kvm_guest_exit(); 5855 5856 preempt_enable(); 5857 5858 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 5859 5860 /* 5861 * Profile KVM exit RIPs: 5862 */ 5863 if (unlikely(prof_on == KVM_PROFILING)) { 5864 unsigned long rip = kvm_rip_read(vcpu); 5865 profile_hit(KVM_PROFILING, (void *)rip); 5866 } 5867 5868 if (unlikely(vcpu->arch.tsc_always_catchup)) 5869 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 5870 5871 if (vcpu->arch.apic_attention) 5872 kvm_lapic_sync_from_vapic(vcpu); 5873 5874 r = kvm_x86_ops->handle_exit(vcpu); 5875 return r; 5876 5877 cancel_injection: 5878 kvm_x86_ops->cancel_injection(vcpu); 5879 if (unlikely(vcpu->arch.apic_attention)) 5880 kvm_lapic_sync_from_vapic(vcpu); 5881 out: 5882 return r; 5883 } 5884 5885 5886 static int __vcpu_run(struct kvm_vcpu *vcpu) 5887 { 5888 int r; 5889 struct kvm *kvm = vcpu->kvm; 5890 5891 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 5892 r = vapic_enter(vcpu); 5893 if (r) { 5894 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 5895 return r; 5896 } 5897 5898 r = 1; 5899 while (r > 0) { 5900 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && 5901 !vcpu->arch.apf.halted) 5902 r = vcpu_enter_guest(vcpu); 5903 else { 5904 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 5905 kvm_vcpu_block(vcpu); 5906 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 5907 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { 5908 kvm_apic_accept_events(vcpu); 5909 switch(vcpu->arch.mp_state) { 5910 case KVM_MP_STATE_HALTED: 5911 vcpu->arch.mp_state = 5912 KVM_MP_STATE_RUNNABLE; 5913 case KVM_MP_STATE_RUNNABLE: 5914 vcpu->arch.apf.halted = false; 5915 break; 5916 case KVM_MP_STATE_INIT_RECEIVED: 5917 break; 5918 default: 5919 r = -EINTR; 5920 break; 5921 } 5922 } 5923 } 5924 5925 if (r <= 0) 5926 break; 5927 5928 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests); 5929 if (kvm_cpu_has_pending_timer(vcpu)) 5930 kvm_inject_pending_timer_irqs(vcpu); 5931 5932 if (dm_request_for_irq_injection(vcpu)) { 5933 r = -EINTR; 5934 vcpu->run->exit_reason = KVM_EXIT_INTR; 5935 ++vcpu->stat.request_irq_exits; 5936 } 5937 5938 kvm_check_async_pf_completion(vcpu); 5939 5940 if (signal_pending(current)) { 5941 r = -EINTR; 5942 vcpu->run->exit_reason = KVM_EXIT_INTR; 5943 ++vcpu->stat.signal_exits; 5944 } 5945 if (need_resched()) { 5946 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 5947 kvm_resched(vcpu); 5948 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 5949 } 5950 } 5951 5952 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 5953 5954 vapic_exit(vcpu); 5955 5956 return r; 5957 } 5958 5959 static inline int complete_emulated_io(struct kvm_vcpu *vcpu) 5960 { 5961 int r; 5962 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 5963 r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE); 5964 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 5965 if (r != EMULATE_DONE) 5966 return 0; 5967 return 1; 5968 } 5969 5970 static int complete_emulated_pio(struct kvm_vcpu *vcpu) 5971 { 5972 BUG_ON(!vcpu->arch.pio.count); 5973 5974 return complete_emulated_io(vcpu); 5975 } 5976 5977 /* 5978 * Implements the following, as a state machine: 5979 * 5980 * read: 5981 * for each fragment 5982 * for each mmio piece in the fragment 5983 * write gpa, len 5984 * exit 5985 * copy data 5986 * execute insn 5987 * 5988 * write: 5989 * for each fragment 5990 * for each mmio piece in the fragment 5991 * write gpa, len 5992 * copy data 5993 * exit 5994 */ 5995 static int complete_emulated_mmio(struct kvm_vcpu *vcpu) 5996 { 5997 struct kvm_run *run = vcpu->run; 5998 struct kvm_mmio_fragment *frag; 5999 unsigned len; 6000 6001 BUG_ON(!vcpu->mmio_needed); 6002 6003 /* Complete previous fragment */ 6004 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; 6005 len = min(8u, frag->len); 6006 if (!vcpu->mmio_is_write) 6007 memcpy(frag->data, run->mmio.data, len); 6008 6009 if (frag->len <= 8) { 6010 /* Switch to the next fragment. */ 6011 frag++; 6012 vcpu->mmio_cur_fragment++; 6013 } else { 6014 /* Go forward to the next mmio piece. */ 6015 frag->data += len; 6016 frag->gpa += len; 6017 frag->len -= len; 6018 } 6019 6020 if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) { 6021 vcpu->mmio_needed = 0; 6022 if (vcpu->mmio_is_write) 6023 return 1; 6024 vcpu->mmio_read_completed = 1; 6025 return complete_emulated_io(vcpu); 6026 } 6027 6028 run->exit_reason = KVM_EXIT_MMIO; 6029 run->mmio.phys_addr = frag->gpa; 6030 if (vcpu->mmio_is_write) 6031 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); 6032 run->mmio.len = min(8u, frag->len); 6033 run->mmio.is_write = vcpu->mmio_is_write; 6034 vcpu->arch.complete_userspace_io = complete_emulated_mmio; 6035 return 0; 6036 } 6037 6038 6039 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 6040 { 6041 int r; 6042 sigset_t sigsaved; 6043 6044 if (!tsk_used_math(current) && init_fpu(current)) 6045 return -ENOMEM; 6046 6047 if (vcpu->sigset_active) 6048 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 6049 6050 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { 6051 kvm_vcpu_block(vcpu); 6052 kvm_apic_accept_events(vcpu); 6053 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 6054 r = -EAGAIN; 6055 goto out; 6056 } 6057 6058 /* re-sync apic's tpr */ 6059 if (!irqchip_in_kernel(vcpu->kvm)) { 6060 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { 6061 r = -EINVAL; 6062 goto out; 6063 } 6064 } 6065 6066 if (unlikely(vcpu->arch.complete_userspace_io)) { 6067 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; 6068 vcpu->arch.complete_userspace_io = NULL; 6069 r = cui(vcpu); 6070 if (r <= 0) 6071 goto out; 6072 } else 6073 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); 6074 6075 r = __vcpu_run(vcpu); 6076 6077 out: 6078 post_kvm_run_save(vcpu); 6079 if (vcpu->sigset_active) 6080 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 6081 6082 return r; 6083 } 6084 6085 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 6086 { 6087 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { 6088 /* 6089 * We are here if userspace calls get_regs() in the middle of 6090 * instruction emulation. Registers state needs to be copied 6091 * back from emulation context to vcpu. Userspace shouldn't do 6092 * that usually, but some bad designed PV devices (vmware 6093 * backdoor interface) need this to work 6094 */ 6095 emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt); 6096 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 6097 } 6098 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX); 6099 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX); 6100 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX); 6101 regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX); 6102 regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI); 6103 regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI); 6104 regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); 6105 regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP); 6106 #ifdef CONFIG_X86_64 6107 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8); 6108 regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9); 6109 regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10); 6110 regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11); 6111 regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12); 6112 regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13); 6113 regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14); 6114 regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15); 6115 #endif 6116 6117 regs->rip = kvm_rip_read(vcpu); 6118 regs->rflags = kvm_get_rflags(vcpu); 6119 6120 return 0; 6121 } 6122 6123 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 6124 { 6125 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; 6126 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 6127 6128 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax); 6129 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx); 6130 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx); 6131 kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx); 6132 kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi); 6133 kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi); 6134 kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp); 6135 kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp); 6136 #ifdef CONFIG_X86_64 6137 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8); 6138 kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9); 6139 kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10); 6140 kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11); 6141 kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12); 6142 kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13); 6143 kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14); 6144 kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15); 6145 #endif 6146 6147 kvm_rip_write(vcpu, regs->rip); 6148 kvm_set_rflags(vcpu, regs->rflags); 6149 6150 vcpu->arch.exception.pending = false; 6151 6152 kvm_make_request(KVM_REQ_EVENT, vcpu); 6153 6154 return 0; 6155 } 6156 6157 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) 6158 { 6159 struct kvm_segment cs; 6160 6161 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); 6162 *db = cs.db; 6163 *l = cs.l; 6164 } 6165 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits); 6166 6167 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 6168 struct kvm_sregs *sregs) 6169 { 6170 struct desc_ptr dt; 6171 6172 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 6173 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 6174 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); 6175 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); 6176 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); 6177 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); 6178 6179 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 6180 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 6181 6182 kvm_x86_ops->get_idt(vcpu, &dt); 6183 sregs->idt.limit = dt.size; 6184 sregs->idt.base = dt.address; 6185 kvm_x86_ops->get_gdt(vcpu, &dt); 6186 sregs->gdt.limit = dt.size; 6187 sregs->gdt.base = dt.address; 6188 6189 sregs->cr0 = kvm_read_cr0(vcpu); 6190 sregs->cr2 = vcpu->arch.cr2; 6191 sregs->cr3 = kvm_read_cr3(vcpu); 6192 sregs->cr4 = kvm_read_cr4(vcpu); 6193 sregs->cr8 = kvm_get_cr8(vcpu); 6194 sregs->efer = vcpu->arch.efer; 6195 sregs->apic_base = kvm_get_apic_base(vcpu); 6196 6197 memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap); 6198 6199 if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft) 6200 set_bit(vcpu->arch.interrupt.nr, 6201 (unsigned long *)sregs->interrupt_bitmap); 6202 6203 return 0; 6204 } 6205 6206 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 6207 struct kvm_mp_state *mp_state) 6208 { 6209 kvm_apic_accept_events(vcpu); 6210 mp_state->mp_state = vcpu->arch.mp_state; 6211 return 0; 6212 } 6213 6214 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 6215 struct kvm_mp_state *mp_state) 6216 { 6217 if (!kvm_vcpu_has_lapic(vcpu) && 6218 mp_state->mp_state != KVM_MP_STATE_RUNNABLE) 6219 return -EINVAL; 6220 6221 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { 6222 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 6223 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); 6224 } else 6225 vcpu->arch.mp_state = mp_state->mp_state; 6226 kvm_make_request(KVM_REQ_EVENT, vcpu); 6227 return 0; 6228 } 6229 6230 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, 6231 int reason, bool has_error_code, u32 error_code) 6232 { 6233 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; 6234 int ret; 6235 6236 init_emulate_ctxt(vcpu); 6237 6238 ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason, 6239 has_error_code, error_code); 6240 6241 if (ret) 6242 return EMULATE_FAIL; 6243 6244 kvm_rip_write(vcpu, ctxt->eip); 6245 kvm_set_rflags(vcpu, ctxt->eflags); 6246 kvm_make_request(KVM_REQ_EVENT, vcpu); 6247 return EMULATE_DONE; 6248 } 6249 EXPORT_SYMBOL_GPL(kvm_task_switch); 6250 6251 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 6252 struct kvm_sregs *sregs) 6253 { 6254 int mmu_reset_needed = 0; 6255 int pending_vec, max_bits, idx; 6256 struct desc_ptr dt; 6257 6258 if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE)) 6259 return -EINVAL; 6260 6261 dt.size = sregs->idt.limit; 6262 dt.address = sregs->idt.base; 6263 kvm_x86_ops->set_idt(vcpu, &dt); 6264 dt.size = sregs->gdt.limit; 6265 dt.address = sregs->gdt.base; 6266 kvm_x86_ops->set_gdt(vcpu, &dt); 6267 6268 vcpu->arch.cr2 = sregs->cr2; 6269 mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; 6270 vcpu->arch.cr3 = sregs->cr3; 6271 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); 6272 6273 kvm_set_cr8(vcpu, sregs->cr8); 6274 6275 mmu_reset_needed |= vcpu->arch.efer != sregs->efer; 6276 kvm_x86_ops->set_efer(vcpu, sregs->efer); 6277 kvm_set_apic_base(vcpu, sregs->apic_base); 6278 6279 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; 6280 kvm_x86_ops->set_cr0(vcpu, sregs->cr0); 6281 vcpu->arch.cr0 = sregs->cr0; 6282 6283 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 6284 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); 6285 if (sregs->cr4 & X86_CR4_OSXSAVE) 6286 kvm_update_cpuid(vcpu); 6287 6288 idx = srcu_read_lock(&vcpu->kvm->srcu); 6289 if (!is_long_mode(vcpu) && is_pae(vcpu)) { 6290 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); 6291 mmu_reset_needed = 1; 6292 } 6293 srcu_read_unlock(&vcpu->kvm->srcu, idx); 6294 6295 if (mmu_reset_needed) 6296 kvm_mmu_reset_context(vcpu); 6297 6298 max_bits = KVM_NR_INTERRUPTS; 6299 pending_vec = find_first_bit( 6300 (const unsigned long *)sregs->interrupt_bitmap, max_bits); 6301 if (pending_vec < max_bits) { 6302 kvm_queue_interrupt(vcpu, pending_vec, false); 6303 pr_debug("Set back pending irq %d\n", pending_vec); 6304 } 6305 6306 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 6307 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 6308 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); 6309 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); 6310 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); 6311 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); 6312 6313 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 6314 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 6315 6316 update_cr8_intercept(vcpu); 6317 6318 /* Older userspace won't unhalt the vcpu on reset. */ 6319 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && 6320 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && 6321 !is_protmode(vcpu)) 6322 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 6323 6324 kvm_make_request(KVM_REQ_EVENT, vcpu); 6325 6326 return 0; 6327 } 6328 6329 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 6330 struct kvm_guest_debug *dbg) 6331 { 6332 unsigned long rflags; 6333 int i, r; 6334 6335 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { 6336 r = -EBUSY; 6337 if (vcpu->arch.exception.pending) 6338 goto out; 6339 if (dbg->control & KVM_GUESTDBG_INJECT_DB) 6340 kvm_queue_exception(vcpu, DB_VECTOR); 6341 else 6342 kvm_queue_exception(vcpu, BP_VECTOR); 6343 } 6344 6345 /* 6346 * Read rflags as long as potentially injected trace flags are still 6347 * filtered out. 6348 */ 6349 rflags = kvm_get_rflags(vcpu); 6350 6351 vcpu->guest_debug = dbg->control; 6352 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) 6353 vcpu->guest_debug = 0; 6354 6355 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { 6356 for (i = 0; i < KVM_NR_DB_REGS; ++i) 6357 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; 6358 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; 6359 } else { 6360 for (i = 0; i < KVM_NR_DB_REGS; i++) 6361 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; 6362 } 6363 kvm_update_dr7(vcpu); 6364 6365 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 6366 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + 6367 get_segment_base(vcpu, VCPU_SREG_CS); 6368 6369 /* 6370 * Trigger an rflags update that will inject or remove the trace 6371 * flags. 6372 */ 6373 kvm_set_rflags(vcpu, rflags); 6374 6375 kvm_x86_ops->update_db_bp_intercept(vcpu); 6376 6377 r = 0; 6378 6379 out: 6380 6381 return r; 6382 } 6383 6384 /* 6385 * Translate a guest virtual address to a guest physical address. 6386 */ 6387 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 6388 struct kvm_translation *tr) 6389 { 6390 unsigned long vaddr = tr->linear_address; 6391 gpa_t gpa; 6392 int idx; 6393 6394 idx = srcu_read_lock(&vcpu->kvm->srcu); 6395 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); 6396 srcu_read_unlock(&vcpu->kvm->srcu, idx); 6397 tr->physical_address = gpa; 6398 tr->valid = gpa != UNMAPPED_GVA; 6399 tr->writeable = 1; 6400 tr->usermode = 0; 6401 6402 return 0; 6403 } 6404 6405 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 6406 { 6407 struct i387_fxsave_struct *fxsave = 6408 &vcpu->arch.guest_fpu.state->fxsave; 6409 6410 memcpy(fpu->fpr, fxsave->st_space, 128); 6411 fpu->fcw = fxsave->cwd; 6412 fpu->fsw = fxsave->swd; 6413 fpu->ftwx = fxsave->twd; 6414 fpu->last_opcode = fxsave->fop; 6415 fpu->last_ip = fxsave->rip; 6416 fpu->last_dp = fxsave->rdp; 6417 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space); 6418 6419 return 0; 6420 } 6421 6422 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 6423 { 6424 struct i387_fxsave_struct *fxsave = 6425 &vcpu->arch.guest_fpu.state->fxsave; 6426 6427 memcpy(fxsave->st_space, fpu->fpr, 128); 6428 fxsave->cwd = fpu->fcw; 6429 fxsave->swd = fpu->fsw; 6430 fxsave->twd = fpu->ftwx; 6431 fxsave->fop = fpu->last_opcode; 6432 fxsave->rip = fpu->last_ip; 6433 fxsave->rdp = fpu->last_dp; 6434 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space); 6435 6436 return 0; 6437 } 6438 6439 int fx_init(struct kvm_vcpu *vcpu) 6440 { 6441 int err; 6442 6443 err = fpu_alloc(&vcpu->arch.guest_fpu); 6444 if (err) 6445 return err; 6446 6447 fpu_finit(&vcpu->arch.guest_fpu); 6448 6449 /* 6450 * Ensure guest xcr0 is valid for loading 6451 */ 6452 vcpu->arch.xcr0 = XSTATE_FP; 6453 6454 vcpu->arch.cr0 |= X86_CR0_ET; 6455 6456 return 0; 6457 } 6458 EXPORT_SYMBOL_GPL(fx_init); 6459 6460 static void fx_free(struct kvm_vcpu *vcpu) 6461 { 6462 fpu_free(&vcpu->arch.guest_fpu); 6463 } 6464 6465 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) 6466 { 6467 if (vcpu->guest_fpu_loaded) 6468 return; 6469 6470 /* 6471 * Restore all possible states in the guest, 6472 * and assume host would use all available bits. 6473 * Guest xcr0 would be loaded later. 6474 */ 6475 kvm_put_guest_xcr0(vcpu); 6476 vcpu->guest_fpu_loaded = 1; 6477 __kernel_fpu_begin(); 6478 fpu_restore_checking(&vcpu->arch.guest_fpu); 6479 trace_kvm_fpu(1); 6480 } 6481 6482 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) 6483 { 6484 kvm_put_guest_xcr0(vcpu); 6485 6486 if (!vcpu->guest_fpu_loaded) 6487 return; 6488 6489 vcpu->guest_fpu_loaded = 0; 6490 fpu_save_init(&vcpu->arch.guest_fpu); 6491 __kernel_fpu_end(); 6492 ++vcpu->stat.fpu_reload; 6493 kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu); 6494 trace_kvm_fpu(0); 6495 } 6496 6497 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 6498 { 6499 kvmclock_reset(vcpu); 6500 6501 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); 6502 fx_free(vcpu); 6503 kvm_x86_ops->vcpu_free(vcpu); 6504 } 6505 6506 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, 6507 unsigned int id) 6508 { 6509 if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) 6510 printk_once(KERN_WARNING 6511 "kvm: SMP vm created on host with unstable TSC; " 6512 "guest TSC will not be reliable\n"); 6513 return kvm_x86_ops->vcpu_create(kvm, id); 6514 } 6515 6516 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 6517 { 6518 int r; 6519 6520 vcpu->arch.mtrr_state.have_fixed = 1; 6521 r = vcpu_load(vcpu); 6522 if (r) 6523 return r; 6524 kvm_vcpu_reset(vcpu); 6525 r = kvm_mmu_setup(vcpu); 6526 vcpu_put(vcpu); 6527 6528 return r; 6529 } 6530 6531 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 6532 { 6533 int r; 6534 struct msr_data msr; 6535 6536 r = vcpu_load(vcpu); 6537 if (r) 6538 return r; 6539 msr.data = 0x0; 6540 msr.index = MSR_IA32_TSC; 6541 msr.host_initiated = true; 6542 kvm_write_tsc(vcpu, &msr); 6543 vcpu_put(vcpu); 6544 6545 return r; 6546 } 6547 6548 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 6549 { 6550 int r; 6551 vcpu->arch.apf.msr_val = 0; 6552 6553 r = vcpu_load(vcpu); 6554 BUG_ON(r); 6555 kvm_mmu_unload(vcpu); 6556 vcpu_put(vcpu); 6557 6558 fx_free(vcpu); 6559 kvm_x86_ops->vcpu_free(vcpu); 6560 } 6561 6562 void kvm_vcpu_reset(struct kvm_vcpu *vcpu) 6563 { 6564 atomic_set(&vcpu->arch.nmi_queued, 0); 6565 vcpu->arch.nmi_pending = 0; 6566 vcpu->arch.nmi_injected = false; 6567 6568 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); 6569 vcpu->arch.dr6 = DR6_FIXED_1; 6570 vcpu->arch.dr7 = DR7_FIXED_1; 6571 kvm_update_dr7(vcpu); 6572 6573 kvm_make_request(KVM_REQ_EVENT, vcpu); 6574 vcpu->arch.apf.msr_val = 0; 6575 vcpu->arch.st.msr_val = 0; 6576 6577 kvmclock_reset(vcpu); 6578 6579 kvm_clear_async_pf_completion_queue(vcpu); 6580 kvm_async_pf_hash_reset(vcpu); 6581 vcpu->arch.apf.halted = false; 6582 6583 kvm_pmu_reset(vcpu); 6584 6585 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); 6586 vcpu->arch.regs_avail = ~0; 6587 vcpu->arch.regs_dirty = ~0; 6588 6589 kvm_x86_ops->vcpu_reset(vcpu); 6590 } 6591 6592 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector) 6593 { 6594 struct kvm_segment cs; 6595 6596 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); 6597 cs.selector = vector << 8; 6598 cs.base = vector << 12; 6599 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); 6600 kvm_rip_write(vcpu, 0); 6601 } 6602 6603 int kvm_arch_hardware_enable(void *garbage) 6604 { 6605 struct kvm *kvm; 6606 struct kvm_vcpu *vcpu; 6607 int i; 6608 int ret; 6609 u64 local_tsc; 6610 u64 max_tsc = 0; 6611 bool stable, backwards_tsc = false; 6612 6613 kvm_shared_msr_cpu_online(); 6614 ret = kvm_x86_ops->hardware_enable(garbage); 6615 if (ret != 0) 6616 return ret; 6617 6618 local_tsc = native_read_tsc(); 6619 stable = !check_tsc_unstable(); 6620 list_for_each_entry(kvm, &vm_list, vm_list) { 6621 kvm_for_each_vcpu(i, vcpu, kvm) { 6622 if (!stable && vcpu->cpu == smp_processor_id()) 6623 set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); 6624 if (stable && vcpu->arch.last_host_tsc > local_tsc) { 6625 backwards_tsc = true; 6626 if (vcpu->arch.last_host_tsc > max_tsc) 6627 max_tsc = vcpu->arch.last_host_tsc; 6628 } 6629 } 6630 } 6631 6632 /* 6633 * Sometimes, even reliable TSCs go backwards. This happens on 6634 * platforms that reset TSC during suspend or hibernate actions, but 6635 * maintain synchronization. We must compensate. Fortunately, we can 6636 * detect that condition here, which happens early in CPU bringup, 6637 * before any KVM threads can be running. Unfortunately, we can't 6638 * bring the TSCs fully up to date with real time, as we aren't yet far 6639 * enough into CPU bringup that we know how much real time has actually 6640 * elapsed; our helper function, get_kernel_ns() will be using boot 6641 * variables that haven't been updated yet. 6642 * 6643 * So we simply find the maximum observed TSC above, then record the 6644 * adjustment to TSC in each VCPU. When the VCPU later gets loaded, 6645 * the adjustment will be applied. Note that we accumulate 6646 * adjustments, in case multiple suspend cycles happen before some VCPU 6647 * gets a chance to run again. In the event that no KVM threads get a 6648 * chance to run, we will miss the entire elapsed period, as we'll have 6649 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may 6650 * loose cycle time. This isn't too big a deal, since the loss will be 6651 * uniform across all VCPUs (not to mention the scenario is extremely 6652 * unlikely). It is possible that a second hibernate recovery happens 6653 * much faster than a first, causing the observed TSC here to be 6654 * smaller; this would require additional padding adjustment, which is 6655 * why we set last_host_tsc to the local tsc observed here. 6656 * 6657 * N.B. - this code below runs only on platforms with reliable TSC, 6658 * as that is the only way backwards_tsc is set above. Also note 6659 * that this runs for ALL vcpus, which is not a bug; all VCPUs should 6660 * have the same delta_cyc adjustment applied if backwards_tsc 6661 * is detected. Note further, this adjustment is only done once, 6662 * as we reset last_host_tsc on all VCPUs to stop this from being 6663 * called multiple times (one for each physical CPU bringup). 6664 * 6665 * Platforms with unreliable TSCs don't have to deal with this, they 6666 * will be compensated by the logic in vcpu_load, which sets the TSC to 6667 * catchup mode. This will catchup all VCPUs to real time, but cannot 6668 * guarantee that they stay in perfect synchronization. 6669 */ 6670 if (backwards_tsc) { 6671 u64 delta_cyc = max_tsc - local_tsc; 6672 list_for_each_entry(kvm, &vm_list, vm_list) { 6673 kvm_for_each_vcpu(i, vcpu, kvm) { 6674 vcpu->arch.tsc_offset_adjustment += delta_cyc; 6675 vcpu->arch.last_host_tsc = local_tsc; 6676 set_bit(KVM_REQ_MASTERCLOCK_UPDATE, 6677 &vcpu->requests); 6678 } 6679 6680 /* 6681 * We have to disable TSC offset matching.. if you were 6682 * booting a VM while issuing an S4 host suspend.... 6683 * you may have some problem. Solving this issue is 6684 * left as an exercise to the reader. 6685 */ 6686 kvm->arch.last_tsc_nsec = 0; 6687 kvm->arch.last_tsc_write = 0; 6688 } 6689 6690 } 6691 return 0; 6692 } 6693 6694 void kvm_arch_hardware_disable(void *garbage) 6695 { 6696 kvm_x86_ops->hardware_disable(garbage); 6697 drop_user_return_notifiers(garbage); 6698 } 6699 6700 int kvm_arch_hardware_setup(void) 6701 { 6702 return kvm_x86_ops->hardware_setup(); 6703 } 6704 6705 void kvm_arch_hardware_unsetup(void) 6706 { 6707 kvm_x86_ops->hardware_unsetup(); 6708 } 6709 6710 void kvm_arch_check_processor_compat(void *rtn) 6711 { 6712 kvm_x86_ops->check_processor_compatibility(rtn); 6713 } 6714 6715 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) 6716 { 6717 return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); 6718 } 6719 6720 struct static_key kvm_no_apic_vcpu __read_mostly; 6721 6722 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 6723 { 6724 struct page *page; 6725 struct kvm *kvm; 6726 int r; 6727 6728 BUG_ON(vcpu->kvm == NULL); 6729 kvm = vcpu->kvm; 6730 6731 vcpu->arch.emulate_ctxt.ops = &emulate_ops; 6732 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu)) 6733 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 6734 else 6735 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; 6736 6737 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 6738 if (!page) { 6739 r = -ENOMEM; 6740 goto fail; 6741 } 6742 vcpu->arch.pio_data = page_address(page); 6743 6744 kvm_set_tsc_khz(vcpu, max_tsc_khz); 6745 6746 r = kvm_mmu_create(vcpu); 6747 if (r < 0) 6748 goto fail_free_pio_data; 6749 6750 if (irqchip_in_kernel(kvm)) { 6751 r = kvm_create_lapic(vcpu); 6752 if (r < 0) 6753 goto fail_mmu_destroy; 6754 } else 6755 static_key_slow_inc(&kvm_no_apic_vcpu); 6756 6757 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, 6758 GFP_KERNEL); 6759 if (!vcpu->arch.mce_banks) { 6760 r = -ENOMEM; 6761 goto fail_free_lapic; 6762 } 6763 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; 6764 6765 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) { 6766 r = -ENOMEM; 6767 goto fail_free_mce_banks; 6768 } 6769 6770 r = fx_init(vcpu); 6771 if (r) 6772 goto fail_free_wbinvd_dirty_mask; 6773 6774 vcpu->arch.ia32_tsc_adjust_msr = 0x0; 6775 vcpu->arch.pv_time_enabled = false; 6776 kvm_async_pf_hash_reset(vcpu); 6777 kvm_pmu_init(vcpu); 6778 6779 return 0; 6780 fail_free_wbinvd_dirty_mask: 6781 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); 6782 fail_free_mce_banks: 6783 kfree(vcpu->arch.mce_banks); 6784 fail_free_lapic: 6785 kvm_free_lapic(vcpu); 6786 fail_mmu_destroy: 6787 kvm_mmu_destroy(vcpu); 6788 fail_free_pio_data: 6789 free_page((unsigned long)vcpu->arch.pio_data); 6790 fail: 6791 return r; 6792 } 6793 6794 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 6795 { 6796 int idx; 6797 6798 kvm_pmu_destroy(vcpu); 6799 kfree(vcpu->arch.mce_banks); 6800 kvm_free_lapic(vcpu); 6801 idx = srcu_read_lock(&vcpu->kvm->srcu); 6802 kvm_mmu_destroy(vcpu); 6803 srcu_read_unlock(&vcpu->kvm->srcu, idx); 6804 free_page((unsigned long)vcpu->arch.pio_data); 6805 if (!irqchip_in_kernel(vcpu->kvm)) 6806 static_key_slow_dec(&kvm_no_apic_vcpu); 6807 } 6808 6809 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 6810 { 6811 if (type) 6812 return -EINVAL; 6813 6814 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); 6815 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); 6816 6817 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ 6818 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); 6819 /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */ 6820 set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, 6821 &kvm->arch.irq_sources_bitmap); 6822 6823 raw_spin_lock_init(&kvm->arch.tsc_write_lock); 6824 mutex_init(&kvm->arch.apic_map_lock); 6825 spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock); 6826 6827 pvclock_update_vm_gtod_copy(kvm); 6828 6829 return 0; 6830 } 6831 6832 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) 6833 { 6834 int r; 6835 r = vcpu_load(vcpu); 6836 BUG_ON(r); 6837 kvm_mmu_unload(vcpu); 6838 vcpu_put(vcpu); 6839 } 6840 6841 static void kvm_free_vcpus(struct kvm *kvm) 6842 { 6843 unsigned int i; 6844 struct kvm_vcpu *vcpu; 6845 6846 /* 6847 * Unpin any mmu pages first. 6848 */ 6849 kvm_for_each_vcpu(i, vcpu, kvm) { 6850 kvm_clear_async_pf_completion_queue(vcpu); 6851 kvm_unload_vcpu_mmu(vcpu); 6852 } 6853 kvm_for_each_vcpu(i, vcpu, kvm) 6854 kvm_arch_vcpu_free(vcpu); 6855 6856 mutex_lock(&kvm->lock); 6857 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 6858 kvm->vcpus[i] = NULL; 6859 6860 atomic_set(&kvm->online_vcpus, 0); 6861 mutex_unlock(&kvm->lock); 6862 } 6863 6864 void kvm_arch_sync_events(struct kvm *kvm) 6865 { 6866 kvm_free_all_assigned_devices(kvm); 6867 kvm_free_pit(kvm); 6868 } 6869 6870 void kvm_arch_destroy_vm(struct kvm *kvm) 6871 { 6872 if (current->mm == kvm->mm) { 6873 /* 6874 * Free memory regions allocated on behalf of userspace, 6875 * unless the the memory map has changed due to process exit 6876 * or fd copying. 6877 */ 6878 struct kvm_userspace_memory_region mem; 6879 memset(&mem, 0, sizeof(mem)); 6880 mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; 6881 kvm_set_memory_region(kvm, &mem); 6882 6883 mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; 6884 kvm_set_memory_region(kvm, &mem); 6885 6886 mem.slot = TSS_PRIVATE_MEMSLOT; 6887 kvm_set_memory_region(kvm, &mem); 6888 } 6889 kvm_iommu_unmap_guest(kvm); 6890 kfree(kvm->arch.vpic); 6891 kfree(kvm->arch.vioapic); 6892 kvm_free_vcpus(kvm); 6893 if (kvm->arch.apic_access_page) 6894 put_page(kvm->arch.apic_access_page); 6895 if (kvm->arch.ept_identity_pagetable) 6896 put_page(kvm->arch.ept_identity_pagetable); 6897 kfree(rcu_dereference_check(kvm->arch.apic_map, 1)); 6898 } 6899 6900 void kvm_arch_free_memslot(struct kvm_memory_slot *free, 6901 struct kvm_memory_slot *dont) 6902 { 6903 int i; 6904 6905 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { 6906 if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) { 6907 kvm_kvfree(free->arch.rmap[i]); 6908 free->arch.rmap[i] = NULL; 6909 } 6910 if (i == 0) 6911 continue; 6912 6913 if (!dont || free->arch.lpage_info[i - 1] != 6914 dont->arch.lpage_info[i - 1]) { 6915 kvm_kvfree(free->arch.lpage_info[i - 1]); 6916 free->arch.lpage_info[i - 1] = NULL; 6917 } 6918 } 6919 } 6920 6921 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) 6922 { 6923 int i; 6924 6925 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { 6926 unsigned long ugfn; 6927 int lpages; 6928 int level = i + 1; 6929 6930 lpages = gfn_to_index(slot->base_gfn + npages - 1, 6931 slot->base_gfn, level) + 1; 6932 6933 slot->arch.rmap[i] = 6934 kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i])); 6935 if (!slot->arch.rmap[i]) 6936 goto out_free; 6937 if (i == 0) 6938 continue; 6939 6940 slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages * 6941 sizeof(*slot->arch.lpage_info[i - 1])); 6942 if (!slot->arch.lpage_info[i - 1]) 6943 goto out_free; 6944 6945 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) 6946 slot->arch.lpage_info[i - 1][0].write_count = 1; 6947 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) 6948 slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1; 6949 ugfn = slot->userspace_addr >> PAGE_SHIFT; 6950 /* 6951 * If the gfn and userspace address are not aligned wrt each 6952 * other, or if explicitly asked to, disable large page 6953 * support for this slot 6954 */ 6955 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) || 6956 !kvm_largepages_enabled()) { 6957 unsigned long j; 6958 6959 for (j = 0; j < lpages; ++j) 6960 slot->arch.lpage_info[i - 1][j].write_count = 1; 6961 } 6962 } 6963 6964 return 0; 6965 6966 out_free: 6967 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { 6968 kvm_kvfree(slot->arch.rmap[i]); 6969 slot->arch.rmap[i] = NULL; 6970 if (i == 0) 6971 continue; 6972 6973 kvm_kvfree(slot->arch.lpage_info[i - 1]); 6974 slot->arch.lpage_info[i - 1] = NULL; 6975 } 6976 return -ENOMEM; 6977 } 6978 6979 int kvm_arch_prepare_memory_region(struct kvm *kvm, 6980 struct kvm_memory_slot *memslot, 6981 struct kvm_userspace_memory_region *mem, 6982 enum kvm_mr_change change) 6983 { 6984 /* 6985 * Only private memory slots need to be mapped here since 6986 * KVM_SET_MEMORY_REGION ioctl is no longer supported. 6987 */ 6988 if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) { 6989 unsigned long userspace_addr; 6990 6991 /* 6992 * MAP_SHARED to prevent internal slot pages from being moved 6993 * by fork()/COW. 6994 */ 6995 userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE, 6996 PROT_READ | PROT_WRITE, 6997 MAP_SHARED | MAP_ANONYMOUS, 0); 6998 6999 if (IS_ERR((void *)userspace_addr)) 7000 return PTR_ERR((void *)userspace_addr); 7001 7002 memslot->userspace_addr = userspace_addr; 7003 } 7004 7005 return 0; 7006 } 7007 7008 void kvm_arch_commit_memory_region(struct kvm *kvm, 7009 struct kvm_userspace_memory_region *mem, 7010 const struct kvm_memory_slot *old, 7011 enum kvm_mr_change change) 7012 { 7013 7014 int nr_mmu_pages = 0; 7015 7016 if ((mem->slot >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_DELETE)) { 7017 int ret; 7018 7019 ret = vm_munmap(old->userspace_addr, 7020 old->npages * PAGE_SIZE); 7021 if (ret < 0) 7022 printk(KERN_WARNING 7023 "kvm_vm_ioctl_set_memory_region: " 7024 "failed to munmap memory\n"); 7025 } 7026 7027 if (!kvm->arch.n_requested_mmu_pages) 7028 nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); 7029 7030 if (nr_mmu_pages) 7031 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); 7032 /* 7033 * Write protect all pages for dirty logging. 7034 * Existing largepage mappings are destroyed here and new ones will 7035 * not be created until the end of the logging. 7036 */ 7037 if ((change != KVM_MR_DELETE) && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) 7038 kvm_mmu_slot_remove_write_access(kvm, mem->slot); 7039 /* 7040 * If memory slot is created, or moved, we need to clear all 7041 * mmio sptes. 7042 */ 7043 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 7044 kvm_mmu_zap_mmio_sptes(kvm); 7045 kvm_reload_remote_mmus(kvm); 7046 } 7047 } 7048 7049 void kvm_arch_flush_shadow_all(struct kvm *kvm) 7050 { 7051 kvm_mmu_zap_all(kvm); 7052 kvm_reload_remote_mmus(kvm); 7053 } 7054 7055 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 7056 struct kvm_memory_slot *slot) 7057 { 7058 kvm_arch_flush_shadow_all(kvm); 7059 } 7060 7061 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 7062 { 7063 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && 7064 !vcpu->arch.apf.halted) 7065 || !list_empty_careful(&vcpu->async_pf.done) 7066 || kvm_apic_has_events(vcpu) 7067 || atomic_read(&vcpu->arch.nmi_queued) || 7068 (kvm_arch_interrupt_allowed(vcpu) && 7069 kvm_cpu_has_interrupt(vcpu)); 7070 } 7071 7072 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 7073 { 7074 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; 7075 } 7076 7077 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) 7078 { 7079 return kvm_x86_ops->interrupt_allowed(vcpu); 7080 } 7081 7082 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) 7083 { 7084 unsigned long current_rip = kvm_rip_read(vcpu) + 7085 get_segment_base(vcpu, VCPU_SREG_CS); 7086 7087 return current_rip == linear_rip; 7088 } 7089 EXPORT_SYMBOL_GPL(kvm_is_linear_rip); 7090 7091 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) 7092 { 7093 unsigned long rflags; 7094 7095 rflags = kvm_x86_ops->get_rflags(vcpu); 7096 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 7097 rflags &= ~X86_EFLAGS_TF; 7098 return rflags; 7099 } 7100 EXPORT_SYMBOL_GPL(kvm_get_rflags); 7101 7102 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 7103 { 7104 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && 7105 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) 7106 rflags |= X86_EFLAGS_TF; 7107 kvm_x86_ops->set_rflags(vcpu, rflags); 7108 kvm_make_request(KVM_REQ_EVENT, vcpu); 7109 } 7110 EXPORT_SYMBOL_GPL(kvm_set_rflags); 7111 7112 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) 7113 { 7114 int r; 7115 7116 if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || 7117 is_error_page(work->page)) 7118 return; 7119 7120 r = kvm_mmu_reload(vcpu); 7121 if (unlikely(r)) 7122 return; 7123 7124 if (!vcpu->arch.mmu.direct_map && 7125 work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu)) 7126 return; 7127 7128 vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); 7129 } 7130 7131 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) 7132 { 7133 return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); 7134 } 7135 7136 static inline u32 kvm_async_pf_next_probe(u32 key) 7137 { 7138 return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1); 7139 } 7140 7141 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 7142 { 7143 u32 key = kvm_async_pf_hash_fn(gfn); 7144 7145 while (vcpu->arch.apf.gfns[key] != ~0) 7146 key = kvm_async_pf_next_probe(key); 7147 7148 vcpu->arch.apf.gfns[key] = gfn; 7149 } 7150 7151 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) 7152 { 7153 int i; 7154 u32 key = kvm_async_pf_hash_fn(gfn); 7155 7156 for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) && 7157 (vcpu->arch.apf.gfns[key] != gfn && 7158 vcpu->arch.apf.gfns[key] != ~0); i++) 7159 key = kvm_async_pf_next_probe(key); 7160 7161 return key; 7162 } 7163 7164 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 7165 { 7166 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; 7167 } 7168 7169 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 7170 { 7171 u32 i, j, k; 7172 7173 i = j = kvm_async_pf_gfn_slot(vcpu, gfn); 7174 while (true) { 7175 vcpu->arch.apf.gfns[i] = ~0; 7176 do { 7177 j = kvm_async_pf_next_probe(j); 7178 if (vcpu->arch.apf.gfns[j] == ~0) 7179 return; 7180 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); 7181 /* 7182 * k lies cyclically in ]i,j] 7183 * | i.k.j | 7184 * |....j i.k.| or |.k..j i...| 7185 */ 7186 } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j)); 7187 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; 7188 i = j; 7189 } 7190 } 7191 7192 static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) 7193 { 7194 7195 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, 7196 sizeof(val)); 7197 } 7198 7199 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 7200 struct kvm_async_pf *work) 7201 { 7202 struct x86_exception fault; 7203 7204 trace_kvm_async_pf_not_present(work->arch.token, work->gva); 7205 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); 7206 7207 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) || 7208 (vcpu->arch.apf.send_user_only && 7209 kvm_x86_ops->get_cpl(vcpu) == 0)) 7210 kvm_make_request(KVM_REQ_APF_HALT, vcpu); 7211 else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) { 7212 fault.vector = PF_VECTOR; 7213 fault.error_code_valid = true; 7214 fault.error_code = 0; 7215 fault.nested_page_fault = false; 7216 fault.address = work->arch.token; 7217 kvm_inject_page_fault(vcpu, &fault); 7218 } 7219 } 7220 7221 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 7222 struct kvm_async_pf *work) 7223 { 7224 struct x86_exception fault; 7225 7226 trace_kvm_async_pf_ready(work->arch.token, work->gva); 7227 if (is_error_page(work->page)) 7228 work->arch.token = ~0; /* broadcast wakeup */ 7229 else 7230 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); 7231 7232 if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && 7233 !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { 7234 fault.vector = PF_VECTOR; 7235 fault.error_code_valid = true; 7236 fault.error_code = 0; 7237 fault.nested_page_fault = false; 7238 fault.address = work->arch.token; 7239 kvm_inject_page_fault(vcpu, &fault); 7240 } 7241 vcpu->arch.apf.halted = false; 7242 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 7243 } 7244 7245 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) 7246 { 7247 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) 7248 return true; 7249 else 7250 return !kvm_event_needs_reinjection(vcpu) && 7251 kvm_x86_ops->interrupt_allowed(vcpu); 7252 } 7253 7254 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); 7255 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); 7256 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); 7257 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr); 7258 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr); 7259 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun); 7260 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit); 7261 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject); 7262 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit); 7263 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga); 7264 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); 7265 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts); 7266