1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * derived from drivers/kvm/kvm_main.c 5 * 6 * Copyright (C) 2006 Qumranet, Inc. 7 * Copyright (C) 2008 Qumranet, Inc. 8 * Copyright IBM Corporation, 2008 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 10 * 11 * Authors: 12 * Avi Kivity <avi@qumranet.com> 13 * Yaniv Kamay <yaniv@qumranet.com> 14 * Amit Shah <amit.shah@qumranet.com> 15 * Ben-Ami Yassour <benami@il.ibm.com> 16 * 17 * This work is licensed under the terms of the GNU GPL, version 2. See 18 * the COPYING file in the top-level directory. 19 * 20 */ 21 22 #include <linux/kvm_host.h> 23 #include "irq.h" 24 #include "mmu.h" 25 #include "i8254.h" 26 #include "tss.h" 27 #include "kvm_cache_regs.h" 28 #include "x86.h" 29 #include "cpuid.h" 30 31 #include <linux/clocksource.h> 32 #include <linux/interrupt.h> 33 #include <linux/kvm.h> 34 #include <linux/fs.h> 35 #include <linux/vmalloc.h> 36 #include <linux/module.h> 37 #include <linux/mman.h> 38 #include <linux/highmem.h> 39 #include <linux/iommu.h> 40 #include <linux/intel-iommu.h> 41 #include <linux/cpufreq.h> 42 #include <linux/user-return-notifier.h> 43 #include <linux/srcu.h> 44 #include <linux/slab.h> 45 #include <linux/perf_event.h> 46 #include <linux/uaccess.h> 47 #include <linux/hash.h> 48 #include <linux/pci.h> 49 #include <linux/timekeeper_internal.h> 50 #include <linux/pvclock_gtod.h> 51 #include <trace/events/kvm.h> 52 53 #define CREATE_TRACE_POINTS 54 #include "trace.h" 55 56 #include <asm/debugreg.h> 57 #include <asm/msr.h> 58 #include <asm/desc.h> 59 #include <asm/mtrr.h> 60 #include <asm/mce.h> 61 #include <asm/i387.h> 62 #include <asm/fpu-internal.h> /* Ugh! */ 63 #include <asm/xcr.h> 64 #include <asm/pvclock.h> 65 #include <asm/div64.h> 66 67 #define MAX_IO_MSRS 256 68 #define KVM_MAX_MCE_BANKS 32 69 #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P) 70 71 #define emul_to_vcpu(ctxt) \ 72 container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt) 73 74 /* EFER defaults: 75 * - enable syscall per default because its emulated by KVM 76 * - enable LME and LMA per default on 64 bit KVM 77 */ 78 #ifdef CONFIG_X86_64 79 static 80 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA)); 81 #else 82 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); 83 #endif 84 85 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM 86 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 87 88 static void update_cr8_intercept(struct kvm_vcpu *vcpu); 89 static void process_nmi(struct kvm_vcpu *vcpu); 90 91 struct kvm_x86_ops *kvm_x86_ops; 92 EXPORT_SYMBOL_GPL(kvm_x86_ops); 93 94 static bool ignore_msrs = 0; 95 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); 96 97 bool kvm_has_tsc_control; 98 EXPORT_SYMBOL_GPL(kvm_has_tsc_control); 99 u32 kvm_max_guest_tsc_khz; 100 EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz); 101 102 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */ 103 static u32 tsc_tolerance_ppm = 250; 104 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); 105 106 #define KVM_NR_SHARED_MSRS 16 107 108 struct kvm_shared_msrs_global { 109 int nr; 110 u32 msrs[KVM_NR_SHARED_MSRS]; 111 }; 112 113 struct kvm_shared_msrs { 114 struct user_return_notifier urn; 115 bool registered; 116 struct kvm_shared_msr_values { 117 u64 host; 118 u64 curr; 119 } values[KVM_NR_SHARED_MSRS]; 120 }; 121 122 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; 123 static struct kvm_shared_msrs __percpu *shared_msrs; 124 125 struct kvm_stats_debugfs_item debugfs_entries[] = { 126 { "pf_fixed", VCPU_STAT(pf_fixed) }, 127 { "pf_guest", VCPU_STAT(pf_guest) }, 128 { "tlb_flush", VCPU_STAT(tlb_flush) }, 129 { "invlpg", VCPU_STAT(invlpg) }, 130 { "exits", VCPU_STAT(exits) }, 131 { "io_exits", VCPU_STAT(io_exits) }, 132 { "mmio_exits", VCPU_STAT(mmio_exits) }, 133 { "signal_exits", VCPU_STAT(signal_exits) }, 134 { "irq_window", VCPU_STAT(irq_window_exits) }, 135 { "nmi_window", VCPU_STAT(nmi_window_exits) }, 136 { "halt_exits", VCPU_STAT(halt_exits) }, 137 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 138 { "hypercalls", VCPU_STAT(hypercalls) }, 139 { "request_irq", VCPU_STAT(request_irq_exits) }, 140 { "irq_exits", VCPU_STAT(irq_exits) }, 141 { "host_state_reload", VCPU_STAT(host_state_reload) }, 142 { "efer_reload", VCPU_STAT(efer_reload) }, 143 { "fpu_reload", VCPU_STAT(fpu_reload) }, 144 { "insn_emulation", VCPU_STAT(insn_emulation) }, 145 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) }, 146 { "irq_injections", VCPU_STAT(irq_injections) }, 147 { "nmi_injections", VCPU_STAT(nmi_injections) }, 148 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, 149 { "mmu_pte_write", VM_STAT(mmu_pte_write) }, 150 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, 151 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) }, 152 { "mmu_flooded", VM_STAT(mmu_flooded) }, 153 { "mmu_recycled", VM_STAT(mmu_recycled) }, 154 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, 155 { "mmu_unsync", VM_STAT(mmu_unsync) }, 156 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, 157 { "largepages", VM_STAT(lpages) }, 158 { NULL } 159 }; 160 161 u64 __read_mostly host_xcr0; 162 163 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); 164 165 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) 166 { 167 int i; 168 for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++) 169 vcpu->arch.apf.gfns[i] = ~0; 170 } 171 172 static void kvm_on_user_return(struct user_return_notifier *urn) 173 { 174 unsigned slot; 175 struct kvm_shared_msrs *locals 176 = container_of(urn, struct kvm_shared_msrs, urn); 177 struct kvm_shared_msr_values *values; 178 179 for (slot = 0; slot < shared_msrs_global.nr; ++slot) { 180 values = &locals->values[slot]; 181 if (values->host != values->curr) { 182 wrmsrl(shared_msrs_global.msrs[slot], values->host); 183 values->curr = values->host; 184 } 185 } 186 locals->registered = false; 187 user_return_notifier_unregister(urn); 188 } 189 190 static void shared_msr_update(unsigned slot, u32 msr) 191 { 192 u64 value; 193 unsigned int cpu = smp_processor_id(); 194 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); 195 196 /* only read, and nobody should modify it at this time, 197 * so don't need lock */ 198 if (slot >= shared_msrs_global.nr) { 199 printk(KERN_ERR "kvm: invalid MSR slot!"); 200 return; 201 } 202 rdmsrl_safe(msr, &value); 203 smsr->values[slot].host = value; 204 smsr->values[slot].curr = value; 205 } 206 207 void kvm_define_shared_msr(unsigned slot, u32 msr) 208 { 209 if (slot >= shared_msrs_global.nr) 210 shared_msrs_global.nr = slot + 1; 211 shared_msrs_global.msrs[slot] = msr; 212 /* we need ensured the shared_msr_global have been updated */ 213 smp_wmb(); 214 } 215 EXPORT_SYMBOL_GPL(kvm_define_shared_msr); 216 217 static void kvm_shared_msr_cpu_online(void) 218 { 219 unsigned i; 220 221 for (i = 0; i < shared_msrs_global.nr; ++i) 222 shared_msr_update(i, shared_msrs_global.msrs[i]); 223 } 224 225 void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) 226 { 227 unsigned int cpu = smp_processor_id(); 228 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); 229 230 if (((value ^ smsr->values[slot].curr) & mask) == 0) 231 return; 232 smsr->values[slot].curr = value; 233 wrmsrl(shared_msrs_global.msrs[slot], value); 234 if (!smsr->registered) { 235 smsr->urn.on_user_return = kvm_on_user_return; 236 user_return_notifier_register(&smsr->urn); 237 smsr->registered = true; 238 } 239 } 240 EXPORT_SYMBOL_GPL(kvm_set_shared_msr); 241 242 static void drop_user_return_notifiers(void *ignore) 243 { 244 unsigned int cpu = smp_processor_id(); 245 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); 246 247 if (smsr->registered) 248 kvm_on_user_return(&smsr->urn); 249 } 250 251 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) 252 { 253 return vcpu->arch.apic_base; 254 } 255 EXPORT_SYMBOL_GPL(kvm_get_apic_base); 256 257 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data) 258 { 259 /* TODO: reserve bits check */ 260 kvm_lapic_set_base(vcpu, data); 261 } 262 EXPORT_SYMBOL_GPL(kvm_set_apic_base); 263 264 asmlinkage void kvm_spurious_fault(void) 265 { 266 /* Fault while not rebooting. We want the trace. */ 267 BUG(); 268 } 269 EXPORT_SYMBOL_GPL(kvm_spurious_fault); 270 271 #define EXCPT_BENIGN 0 272 #define EXCPT_CONTRIBUTORY 1 273 #define EXCPT_PF 2 274 275 static int exception_class(int vector) 276 { 277 switch (vector) { 278 case PF_VECTOR: 279 return EXCPT_PF; 280 case DE_VECTOR: 281 case TS_VECTOR: 282 case NP_VECTOR: 283 case SS_VECTOR: 284 case GP_VECTOR: 285 return EXCPT_CONTRIBUTORY; 286 default: 287 break; 288 } 289 return EXCPT_BENIGN; 290 } 291 292 static void kvm_multiple_exception(struct kvm_vcpu *vcpu, 293 unsigned nr, bool has_error, u32 error_code, 294 bool reinject) 295 { 296 u32 prev_nr; 297 int class1, class2; 298 299 kvm_make_request(KVM_REQ_EVENT, vcpu); 300 301 if (!vcpu->arch.exception.pending) { 302 queue: 303 vcpu->arch.exception.pending = true; 304 vcpu->arch.exception.has_error_code = has_error; 305 vcpu->arch.exception.nr = nr; 306 vcpu->arch.exception.error_code = error_code; 307 vcpu->arch.exception.reinject = reinject; 308 return; 309 } 310 311 /* to check exception */ 312 prev_nr = vcpu->arch.exception.nr; 313 if (prev_nr == DF_VECTOR) { 314 /* triple fault -> shutdown */ 315 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 316 return; 317 } 318 class1 = exception_class(prev_nr); 319 class2 = exception_class(nr); 320 if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) 321 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) { 322 /* generate double fault per SDM Table 5-5 */ 323 vcpu->arch.exception.pending = true; 324 vcpu->arch.exception.has_error_code = true; 325 vcpu->arch.exception.nr = DF_VECTOR; 326 vcpu->arch.exception.error_code = 0; 327 } else 328 /* replace previous exception with a new one in a hope 329 that instruction re-execution will regenerate lost 330 exception */ 331 goto queue; 332 } 333 334 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) 335 { 336 kvm_multiple_exception(vcpu, nr, false, 0, false); 337 } 338 EXPORT_SYMBOL_GPL(kvm_queue_exception); 339 340 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) 341 { 342 kvm_multiple_exception(vcpu, nr, false, 0, true); 343 } 344 EXPORT_SYMBOL_GPL(kvm_requeue_exception); 345 346 void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) 347 { 348 if (err) 349 kvm_inject_gp(vcpu, 0); 350 else 351 kvm_x86_ops->skip_emulated_instruction(vcpu); 352 } 353 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); 354 355 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) 356 { 357 ++vcpu->stat.pf_guest; 358 vcpu->arch.cr2 = fault->address; 359 kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); 360 } 361 EXPORT_SYMBOL_GPL(kvm_inject_page_fault); 362 363 void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) 364 { 365 if (mmu_is_nested(vcpu) && !fault->nested_page_fault) 366 vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); 367 else 368 vcpu->arch.mmu.inject_page_fault(vcpu, fault); 369 } 370 371 void kvm_inject_nmi(struct kvm_vcpu *vcpu) 372 { 373 atomic_inc(&vcpu->arch.nmi_queued); 374 kvm_make_request(KVM_REQ_NMI, vcpu); 375 } 376 EXPORT_SYMBOL_GPL(kvm_inject_nmi); 377 378 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) 379 { 380 kvm_multiple_exception(vcpu, nr, true, error_code, false); 381 } 382 EXPORT_SYMBOL_GPL(kvm_queue_exception_e); 383 384 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) 385 { 386 kvm_multiple_exception(vcpu, nr, true, error_code, true); 387 } 388 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); 389 390 /* 391 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue 392 * a #GP and return false. 393 */ 394 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) 395 { 396 if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl) 397 return true; 398 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 399 return false; 400 } 401 EXPORT_SYMBOL_GPL(kvm_require_cpl); 402 403 /* 404 * This function will be used to read from the physical memory of the currently 405 * running guest. The difference to kvm_read_guest_page is that this function 406 * can read from guest physical or from the guest's guest physical memory. 407 */ 408 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 409 gfn_t ngfn, void *data, int offset, int len, 410 u32 access) 411 { 412 gfn_t real_gfn; 413 gpa_t ngpa; 414 415 ngpa = gfn_to_gpa(ngfn); 416 real_gfn = mmu->translate_gpa(vcpu, ngpa, access); 417 if (real_gfn == UNMAPPED_GVA) 418 return -EFAULT; 419 420 real_gfn = gpa_to_gfn(real_gfn); 421 422 return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len); 423 } 424 EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu); 425 426 int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, 427 void *data, int offset, int len, u32 access) 428 { 429 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, 430 data, offset, len, access); 431 } 432 433 /* 434 * Load the pae pdptrs. Return true is they are all valid. 435 */ 436 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) 437 { 438 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; 439 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; 440 int i; 441 int ret; 442 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; 443 444 ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte, 445 offset * sizeof(u64), sizeof(pdpte), 446 PFERR_USER_MASK|PFERR_WRITE_MASK); 447 if (ret < 0) { 448 ret = 0; 449 goto out; 450 } 451 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { 452 if (is_present_gpte(pdpte[i]) && 453 (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) { 454 ret = 0; 455 goto out; 456 } 457 } 458 ret = 1; 459 460 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); 461 __set_bit(VCPU_EXREG_PDPTR, 462 (unsigned long *)&vcpu->arch.regs_avail); 463 __set_bit(VCPU_EXREG_PDPTR, 464 (unsigned long *)&vcpu->arch.regs_dirty); 465 out: 466 467 return ret; 468 } 469 EXPORT_SYMBOL_GPL(load_pdptrs); 470 471 static bool pdptrs_changed(struct kvm_vcpu *vcpu) 472 { 473 u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; 474 bool changed = true; 475 int offset; 476 gfn_t gfn; 477 int r; 478 479 if (is_long_mode(vcpu) || !is_pae(vcpu)) 480 return false; 481 482 if (!test_bit(VCPU_EXREG_PDPTR, 483 (unsigned long *)&vcpu->arch.regs_avail)) 484 return true; 485 486 gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT; 487 offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1); 488 r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), 489 PFERR_USER_MASK | PFERR_WRITE_MASK); 490 if (r < 0) 491 goto out; 492 changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; 493 out: 494 495 return changed; 496 } 497 498 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 499 { 500 unsigned long old_cr0 = kvm_read_cr0(vcpu); 501 unsigned long update_bits = X86_CR0_PG | X86_CR0_WP | 502 X86_CR0_CD | X86_CR0_NW; 503 504 cr0 |= X86_CR0_ET; 505 506 #ifdef CONFIG_X86_64 507 if (cr0 & 0xffffffff00000000UL) 508 return 1; 509 #endif 510 511 cr0 &= ~CR0_RESERVED_BITS; 512 513 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) 514 return 1; 515 516 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) 517 return 1; 518 519 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { 520 #ifdef CONFIG_X86_64 521 if ((vcpu->arch.efer & EFER_LME)) { 522 int cs_db, cs_l; 523 524 if (!is_pae(vcpu)) 525 return 1; 526 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 527 if (cs_l) 528 return 1; 529 } else 530 #endif 531 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, 532 kvm_read_cr3(vcpu))) 533 return 1; 534 } 535 536 if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) 537 return 1; 538 539 kvm_x86_ops->set_cr0(vcpu, cr0); 540 541 if ((cr0 ^ old_cr0) & X86_CR0_PG) { 542 kvm_clear_async_pf_completion_queue(vcpu); 543 kvm_async_pf_hash_reset(vcpu); 544 } 545 546 if ((cr0 ^ old_cr0) & update_bits) 547 kvm_mmu_reset_context(vcpu); 548 return 0; 549 } 550 EXPORT_SYMBOL_GPL(kvm_set_cr0); 551 552 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) 553 { 554 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); 555 } 556 EXPORT_SYMBOL_GPL(kvm_lmsw); 557 558 static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) 559 { 560 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && 561 !vcpu->guest_xcr0_loaded) { 562 /* kvm_set_xcr() also depends on this */ 563 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); 564 vcpu->guest_xcr0_loaded = 1; 565 } 566 } 567 568 static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) 569 { 570 if (vcpu->guest_xcr0_loaded) { 571 if (vcpu->arch.xcr0 != host_xcr0) 572 xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); 573 vcpu->guest_xcr0_loaded = 0; 574 } 575 } 576 577 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) 578 { 579 u64 xcr0; 580 u64 valid_bits; 581 582 /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ 583 if (index != XCR_XFEATURE_ENABLED_MASK) 584 return 1; 585 xcr0 = xcr; 586 if (!(xcr0 & XSTATE_FP)) 587 return 1; 588 if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) 589 return 1; 590 591 /* 592 * Do not allow the guest to set bits that we do not support 593 * saving. However, xcr0 bit 0 is always set, even if the 594 * emulated CPU does not support XSAVE (see fx_init). 595 */ 596 valid_bits = vcpu->arch.guest_supported_xcr0 | XSTATE_FP; 597 if (xcr0 & ~valid_bits) 598 return 1; 599 600 kvm_put_guest_xcr0(vcpu); 601 vcpu->arch.xcr0 = xcr0; 602 return 0; 603 } 604 605 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) 606 { 607 if (kvm_x86_ops->get_cpl(vcpu) != 0 || 608 __kvm_set_xcr(vcpu, index, xcr)) { 609 kvm_inject_gp(vcpu, 0); 610 return 1; 611 } 612 return 0; 613 } 614 EXPORT_SYMBOL_GPL(kvm_set_xcr); 615 616 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 617 { 618 unsigned long old_cr4 = kvm_read_cr4(vcpu); 619 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | 620 X86_CR4_PAE | X86_CR4_SMEP; 621 if (cr4 & CR4_RESERVED_BITS) 622 return 1; 623 624 if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE)) 625 return 1; 626 627 if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP)) 628 return 1; 629 630 if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE)) 631 return 1; 632 633 if (is_long_mode(vcpu)) { 634 if (!(cr4 & X86_CR4_PAE)) 635 return 1; 636 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) 637 && ((cr4 ^ old_cr4) & pdptr_bits) 638 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, 639 kvm_read_cr3(vcpu))) 640 return 1; 641 642 if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { 643 if (!guest_cpuid_has_pcid(vcpu)) 644 return 1; 645 646 /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ 647 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) 648 return 1; 649 } 650 651 if (kvm_x86_ops->set_cr4(vcpu, cr4)) 652 return 1; 653 654 if (((cr4 ^ old_cr4) & pdptr_bits) || 655 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) 656 kvm_mmu_reset_context(vcpu); 657 658 if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) 659 kvm_update_cpuid(vcpu); 660 661 return 0; 662 } 663 EXPORT_SYMBOL_GPL(kvm_set_cr4); 664 665 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 666 { 667 if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { 668 kvm_mmu_sync_roots(vcpu); 669 kvm_mmu_flush_tlb(vcpu); 670 return 0; 671 } 672 673 if (is_long_mode(vcpu)) { 674 if (kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) { 675 if (cr3 & CR3_PCID_ENABLED_RESERVED_BITS) 676 return 1; 677 } else 678 if (cr3 & CR3_L_MODE_RESERVED_BITS) 679 return 1; 680 } else { 681 if (is_pae(vcpu)) { 682 if (cr3 & CR3_PAE_RESERVED_BITS) 683 return 1; 684 if (is_paging(vcpu) && 685 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) 686 return 1; 687 } 688 /* 689 * We don't check reserved bits in nonpae mode, because 690 * this isn't enforced, and VMware depends on this. 691 */ 692 } 693 694 vcpu->arch.cr3 = cr3; 695 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); 696 kvm_mmu_new_cr3(vcpu); 697 return 0; 698 } 699 EXPORT_SYMBOL_GPL(kvm_set_cr3); 700 701 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) 702 { 703 if (cr8 & CR8_RESERVED_BITS) 704 return 1; 705 if (irqchip_in_kernel(vcpu->kvm)) 706 kvm_lapic_set_tpr(vcpu, cr8); 707 else 708 vcpu->arch.cr8 = cr8; 709 return 0; 710 } 711 EXPORT_SYMBOL_GPL(kvm_set_cr8); 712 713 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) 714 { 715 if (irqchip_in_kernel(vcpu->kvm)) 716 return kvm_lapic_get_cr8(vcpu); 717 else 718 return vcpu->arch.cr8; 719 } 720 EXPORT_SYMBOL_GPL(kvm_get_cr8); 721 722 static void kvm_update_dr7(struct kvm_vcpu *vcpu) 723 { 724 unsigned long dr7; 725 726 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 727 dr7 = vcpu->arch.guest_debug_dr7; 728 else 729 dr7 = vcpu->arch.dr7; 730 kvm_x86_ops->set_dr7(vcpu, dr7); 731 vcpu->arch.switch_db_regs = (dr7 & DR7_BP_EN_MASK); 732 } 733 734 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) 735 { 736 switch (dr) { 737 case 0 ... 3: 738 vcpu->arch.db[dr] = val; 739 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) 740 vcpu->arch.eff_db[dr] = val; 741 break; 742 case 4: 743 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) 744 return 1; /* #UD */ 745 /* fall through */ 746 case 6: 747 if (val & 0xffffffff00000000ULL) 748 return -1; /* #GP */ 749 vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1; 750 break; 751 case 5: 752 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) 753 return 1; /* #UD */ 754 /* fall through */ 755 default: /* 7 */ 756 if (val & 0xffffffff00000000ULL) 757 return -1; /* #GP */ 758 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; 759 kvm_update_dr7(vcpu); 760 break; 761 } 762 763 return 0; 764 } 765 766 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) 767 { 768 int res; 769 770 res = __kvm_set_dr(vcpu, dr, val); 771 if (res > 0) 772 kvm_queue_exception(vcpu, UD_VECTOR); 773 else if (res < 0) 774 kvm_inject_gp(vcpu, 0); 775 776 return res; 777 } 778 EXPORT_SYMBOL_GPL(kvm_set_dr); 779 780 static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) 781 { 782 switch (dr) { 783 case 0 ... 3: 784 *val = vcpu->arch.db[dr]; 785 break; 786 case 4: 787 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) 788 return 1; 789 /* fall through */ 790 case 6: 791 *val = vcpu->arch.dr6; 792 break; 793 case 5: 794 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) 795 return 1; 796 /* fall through */ 797 default: /* 7 */ 798 *val = vcpu->arch.dr7; 799 break; 800 } 801 802 return 0; 803 } 804 805 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) 806 { 807 if (_kvm_get_dr(vcpu, dr, val)) { 808 kvm_queue_exception(vcpu, UD_VECTOR); 809 return 1; 810 } 811 return 0; 812 } 813 EXPORT_SYMBOL_GPL(kvm_get_dr); 814 815 bool kvm_rdpmc(struct kvm_vcpu *vcpu) 816 { 817 u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); 818 u64 data; 819 int err; 820 821 err = kvm_pmu_read_pmc(vcpu, ecx, &data); 822 if (err) 823 return err; 824 kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data); 825 kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32); 826 return err; 827 } 828 EXPORT_SYMBOL_GPL(kvm_rdpmc); 829 830 /* 831 * List of msr numbers which we expose to userspace through KVM_GET_MSRS 832 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. 833 * 834 * This list is modified at module load time to reflect the 835 * capabilities of the host cpu. This capabilities test skips MSRs that are 836 * kvm-specific. Those are put in the beginning of the list. 837 */ 838 839 #define KVM_SAVE_MSRS_BEGIN 10 840 static u32 msrs_to_save[] = { 841 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, 842 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, 843 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, 844 HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, 845 MSR_KVM_PV_EOI_EN, 846 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, 847 MSR_STAR, 848 #ifdef CONFIG_X86_64 849 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, 850 #endif 851 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, 852 MSR_IA32_FEATURE_CONTROL 853 }; 854 855 static unsigned num_msrs_to_save; 856 857 static const u32 emulated_msrs[] = { 858 MSR_IA32_TSC_ADJUST, 859 MSR_IA32_TSCDEADLINE, 860 MSR_IA32_MISC_ENABLE, 861 MSR_IA32_MCG_STATUS, 862 MSR_IA32_MCG_CTL, 863 }; 864 865 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) 866 { 867 if (efer & efer_reserved_bits) 868 return false; 869 870 if (efer & EFER_FFXSR) { 871 struct kvm_cpuid_entry2 *feat; 872 873 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 874 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) 875 return false; 876 } 877 878 if (efer & EFER_SVME) { 879 struct kvm_cpuid_entry2 *feat; 880 881 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 882 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) 883 return false; 884 } 885 886 return true; 887 } 888 EXPORT_SYMBOL_GPL(kvm_valid_efer); 889 890 static int set_efer(struct kvm_vcpu *vcpu, u64 efer) 891 { 892 u64 old_efer = vcpu->arch.efer; 893 894 if (!kvm_valid_efer(vcpu, efer)) 895 return 1; 896 897 if (is_paging(vcpu) 898 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) 899 return 1; 900 901 efer &= ~EFER_LMA; 902 efer |= vcpu->arch.efer & EFER_LMA; 903 904 kvm_x86_ops->set_efer(vcpu, efer); 905 906 /* Update reserved bits */ 907 if ((efer ^ old_efer) & EFER_NX) 908 kvm_mmu_reset_context(vcpu); 909 910 return 0; 911 } 912 913 void kvm_enable_efer_bits(u64 mask) 914 { 915 efer_reserved_bits &= ~mask; 916 } 917 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); 918 919 920 /* 921 * Writes msr value into into the appropriate "register". 922 * Returns 0 on success, non-0 otherwise. 923 * Assumes vcpu_load() was already called. 924 */ 925 int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) 926 { 927 return kvm_x86_ops->set_msr(vcpu, msr); 928 } 929 930 /* 931 * Adapt set_msr() to msr_io()'s calling convention 932 */ 933 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 934 { 935 struct msr_data msr; 936 937 msr.data = *data; 938 msr.index = index; 939 msr.host_initiated = true; 940 return kvm_set_msr(vcpu, &msr); 941 } 942 943 #ifdef CONFIG_X86_64 944 struct pvclock_gtod_data { 945 seqcount_t seq; 946 947 struct { /* extract of a clocksource struct */ 948 int vclock_mode; 949 cycle_t cycle_last; 950 cycle_t mask; 951 u32 mult; 952 u32 shift; 953 } clock; 954 955 /* open coded 'struct timespec' */ 956 u64 monotonic_time_snsec; 957 time_t monotonic_time_sec; 958 }; 959 960 static struct pvclock_gtod_data pvclock_gtod_data; 961 962 static void update_pvclock_gtod(struct timekeeper *tk) 963 { 964 struct pvclock_gtod_data *vdata = &pvclock_gtod_data; 965 966 write_seqcount_begin(&vdata->seq); 967 968 /* copy pvclock gtod data */ 969 vdata->clock.vclock_mode = tk->clock->archdata.vclock_mode; 970 vdata->clock.cycle_last = tk->clock->cycle_last; 971 vdata->clock.mask = tk->clock->mask; 972 vdata->clock.mult = tk->mult; 973 vdata->clock.shift = tk->shift; 974 975 vdata->monotonic_time_sec = tk->xtime_sec 976 + tk->wall_to_monotonic.tv_sec; 977 vdata->monotonic_time_snsec = tk->xtime_nsec 978 + (tk->wall_to_monotonic.tv_nsec 979 << tk->shift); 980 while (vdata->monotonic_time_snsec >= 981 (((u64)NSEC_PER_SEC) << tk->shift)) { 982 vdata->monotonic_time_snsec -= 983 ((u64)NSEC_PER_SEC) << tk->shift; 984 vdata->monotonic_time_sec++; 985 } 986 987 write_seqcount_end(&vdata->seq); 988 } 989 #endif 990 991 992 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) 993 { 994 int version; 995 int r; 996 struct pvclock_wall_clock wc; 997 struct timespec boot; 998 999 if (!wall_clock) 1000 return; 1001 1002 r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); 1003 if (r) 1004 return; 1005 1006 if (version & 1) 1007 ++version; /* first time write, random junk */ 1008 1009 ++version; 1010 1011 kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); 1012 1013 /* 1014 * The guest calculates current wall clock time by adding 1015 * system time (updated by kvm_guest_time_update below) to the 1016 * wall clock specified here. guest system time equals host 1017 * system time for us, thus we must fill in host boot time here. 1018 */ 1019 getboottime(&boot); 1020 1021 if (kvm->arch.kvmclock_offset) { 1022 struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset); 1023 boot = timespec_sub(boot, ts); 1024 } 1025 wc.sec = boot.tv_sec; 1026 wc.nsec = boot.tv_nsec; 1027 wc.version = version; 1028 1029 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); 1030 1031 version++; 1032 kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); 1033 } 1034 1035 static uint32_t div_frac(uint32_t dividend, uint32_t divisor) 1036 { 1037 uint32_t quotient, remainder; 1038 1039 /* Don't try to replace with do_div(), this one calculates 1040 * "(dividend << 32) / divisor" */ 1041 __asm__ ( "divl %4" 1042 : "=a" (quotient), "=d" (remainder) 1043 : "0" (0), "1" (dividend), "r" (divisor) ); 1044 return quotient; 1045 } 1046 1047 static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz, 1048 s8 *pshift, u32 *pmultiplier) 1049 { 1050 uint64_t scaled64; 1051 int32_t shift = 0; 1052 uint64_t tps64; 1053 uint32_t tps32; 1054 1055 tps64 = base_khz * 1000LL; 1056 scaled64 = scaled_khz * 1000LL; 1057 while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) { 1058 tps64 >>= 1; 1059 shift--; 1060 } 1061 1062 tps32 = (uint32_t)tps64; 1063 while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) { 1064 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000) 1065 scaled64 >>= 1; 1066 else 1067 tps32 <<= 1; 1068 shift++; 1069 } 1070 1071 *pshift = shift; 1072 *pmultiplier = div_frac(scaled64, tps32); 1073 1074 pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n", 1075 __func__, base_khz, scaled_khz, shift, *pmultiplier); 1076 } 1077 1078 static inline u64 get_kernel_ns(void) 1079 { 1080 struct timespec ts; 1081 1082 WARN_ON(preemptible()); 1083 ktime_get_ts(&ts); 1084 monotonic_to_bootbased(&ts); 1085 return timespec_to_ns(&ts); 1086 } 1087 1088 #ifdef CONFIG_X86_64 1089 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0); 1090 #endif 1091 1092 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); 1093 unsigned long max_tsc_khz; 1094 1095 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) 1096 { 1097 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, 1098 vcpu->arch.virtual_tsc_shift); 1099 } 1100 1101 static u32 adjust_tsc_khz(u32 khz, s32 ppm) 1102 { 1103 u64 v = (u64)khz * (1000000 + ppm); 1104 do_div(v, 1000000); 1105 return v; 1106 } 1107 1108 static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz) 1109 { 1110 u32 thresh_lo, thresh_hi; 1111 int use_scaling = 0; 1112 1113 /* tsc_khz can be zero if TSC calibration fails */ 1114 if (this_tsc_khz == 0) 1115 return; 1116 1117 /* Compute a scale to convert nanoseconds in TSC cycles */ 1118 kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000, 1119 &vcpu->arch.virtual_tsc_shift, 1120 &vcpu->arch.virtual_tsc_mult); 1121 vcpu->arch.virtual_tsc_khz = this_tsc_khz; 1122 1123 /* 1124 * Compute the variation in TSC rate which is acceptable 1125 * within the range of tolerance and decide if the 1126 * rate being applied is within that bounds of the hardware 1127 * rate. If so, no scaling or compensation need be done. 1128 */ 1129 thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); 1130 thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm); 1131 if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) { 1132 pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi); 1133 use_scaling = 1; 1134 } 1135 kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling); 1136 } 1137 1138 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) 1139 { 1140 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, 1141 vcpu->arch.virtual_tsc_mult, 1142 vcpu->arch.virtual_tsc_shift); 1143 tsc += vcpu->arch.this_tsc_write; 1144 return tsc; 1145 } 1146 1147 void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) 1148 { 1149 #ifdef CONFIG_X86_64 1150 bool vcpus_matched; 1151 bool do_request = false; 1152 struct kvm_arch *ka = &vcpu->kvm->arch; 1153 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 1154 1155 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == 1156 atomic_read(&vcpu->kvm->online_vcpus)); 1157 1158 if (vcpus_matched && gtod->clock.vclock_mode == VCLOCK_TSC) 1159 if (!ka->use_master_clock) 1160 do_request = 1; 1161 1162 if (!vcpus_matched && ka->use_master_clock) 1163 do_request = 1; 1164 1165 if (do_request) 1166 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 1167 1168 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, 1169 atomic_read(&vcpu->kvm->online_vcpus), 1170 ka->use_master_clock, gtod->clock.vclock_mode); 1171 #endif 1172 } 1173 1174 static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) 1175 { 1176 u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu); 1177 vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; 1178 } 1179 1180 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) 1181 { 1182 struct kvm *kvm = vcpu->kvm; 1183 u64 offset, ns, elapsed; 1184 unsigned long flags; 1185 s64 usdiff; 1186 bool matched; 1187 u64 data = msr->data; 1188 1189 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 1190 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); 1191 ns = get_kernel_ns(); 1192 elapsed = ns - kvm->arch.last_tsc_nsec; 1193 1194 if (vcpu->arch.virtual_tsc_khz) { 1195 int faulted = 0; 1196 1197 /* n.b - signed multiplication and division required */ 1198 usdiff = data - kvm->arch.last_tsc_write; 1199 #ifdef CONFIG_X86_64 1200 usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz; 1201 #else 1202 /* do_div() only does unsigned */ 1203 asm("1: idivl %[divisor]\n" 1204 "2: xor %%edx, %%edx\n" 1205 " movl $0, %[faulted]\n" 1206 "3:\n" 1207 ".section .fixup,\"ax\"\n" 1208 "4: movl $1, %[faulted]\n" 1209 " jmp 3b\n" 1210 ".previous\n" 1211 1212 _ASM_EXTABLE(1b, 4b) 1213 1214 : "=A"(usdiff), [faulted] "=r" (faulted) 1215 : "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz)); 1216 1217 #endif 1218 do_div(elapsed, 1000); 1219 usdiff -= elapsed; 1220 if (usdiff < 0) 1221 usdiff = -usdiff; 1222 1223 /* idivl overflow => difference is larger than USEC_PER_SEC */ 1224 if (faulted) 1225 usdiff = USEC_PER_SEC; 1226 } else 1227 usdiff = USEC_PER_SEC; /* disable TSC match window below */ 1228 1229 /* 1230 * Special case: TSC write with a small delta (1 second) of virtual 1231 * cycle time against real time is interpreted as an attempt to 1232 * synchronize the CPU. 1233 * 1234 * For a reliable TSC, we can match TSC offsets, and for an unstable 1235 * TSC, we add elapsed time in this computation. We could let the 1236 * compensation code attempt to catch up if we fall behind, but 1237 * it's better to try to match offsets from the beginning. 1238 */ 1239 if (usdiff < USEC_PER_SEC && 1240 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { 1241 if (!check_tsc_unstable()) { 1242 offset = kvm->arch.cur_tsc_offset; 1243 pr_debug("kvm: matched tsc offset for %llu\n", data); 1244 } else { 1245 u64 delta = nsec_to_cycles(vcpu, elapsed); 1246 data += delta; 1247 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); 1248 pr_debug("kvm: adjusted tsc offset by %llu\n", delta); 1249 } 1250 matched = true; 1251 } else { 1252 /* 1253 * We split periods of matched TSC writes into generations. 1254 * For each generation, we track the original measured 1255 * nanosecond time, offset, and write, so if TSCs are in 1256 * sync, we can match exact offset, and if not, we can match 1257 * exact software computation in compute_guest_tsc() 1258 * 1259 * These values are tracked in kvm->arch.cur_xxx variables. 1260 */ 1261 kvm->arch.cur_tsc_generation++; 1262 kvm->arch.cur_tsc_nsec = ns; 1263 kvm->arch.cur_tsc_write = data; 1264 kvm->arch.cur_tsc_offset = offset; 1265 matched = false; 1266 pr_debug("kvm: new tsc generation %u, clock %llu\n", 1267 kvm->arch.cur_tsc_generation, data); 1268 } 1269 1270 /* 1271 * We also track th most recent recorded KHZ, write and time to 1272 * allow the matching interval to be extended at each write. 1273 */ 1274 kvm->arch.last_tsc_nsec = ns; 1275 kvm->arch.last_tsc_write = data; 1276 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; 1277 1278 /* Reset of TSC must disable overshoot protection below */ 1279 vcpu->arch.hv_clock.tsc_timestamp = 0; 1280 vcpu->arch.last_guest_tsc = data; 1281 1282 /* Keep track of which generation this VCPU has synchronized to */ 1283 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; 1284 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; 1285 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; 1286 1287 if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated) 1288 update_ia32_tsc_adjust_msr(vcpu, offset); 1289 kvm_x86_ops->write_tsc_offset(vcpu, offset); 1290 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 1291 1292 spin_lock(&kvm->arch.pvclock_gtod_sync_lock); 1293 if (matched) 1294 kvm->arch.nr_vcpus_matched_tsc++; 1295 else 1296 kvm->arch.nr_vcpus_matched_tsc = 0; 1297 1298 kvm_track_tsc_matching(vcpu); 1299 spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); 1300 } 1301 1302 EXPORT_SYMBOL_GPL(kvm_write_tsc); 1303 1304 #ifdef CONFIG_X86_64 1305 1306 static cycle_t read_tsc(void) 1307 { 1308 cycle_t ret; 1309 u64 last; 1310 1311 /* 1312 * Empirically, a fence (of type that depends on the CPU) 1313 * before rdtsc is enough to ensure that rdtsc is ordered 1314 * with respect to loads. The various CPU manuals are unclear 1315 * as to whether rdtsc can be reordered with later loads, 1316 * but no one has ever seen it happen. 1317 */ 1318 rdtsc_barrier(); 1319 ret = (cycle_t)vget_cycles(); 1320 1321 last = pvclock_gtod_data.clock.cycle_last; 1322 1323 if (likely(ret >= last)) 1324 return ret; 1325 1326 /* 1327 * GCC likes to generate cmov here, but this branch is extremely 1328 * predictable (it's just a funciton of time and the likely is 1329 * very likely) and there's a data dependence, so force GCC 1330 * to generate a branch instead. I don't barrier() because 1331 * we don't actually need a barrier, and if this function 1332 * ever gets inlined it will generate worse code. 1333 */ 1334 asm volatile (""); 1335 return last; 1336 } 1337 1338 static inline u64 vgettsc(cycle_t *cycle_now) 1339 { 1340 long v; 1341 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 1342 1343 *cycle_now = read_tsc(); 1344 1345 v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask; 1346 return v * gtod->clock.mult; 1347 } 1348 1349 static int do_monotonic(struct timespec *ts, cycle_t *cycle_now) 1350 { 1351 unsigned long seq; 1352 u64 ns; 1353 int mode; 1354 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 1355 1356 ts->tv_nsec = 0; 1357 do { 1358 seq = read_seqcount_begin(>od->seq); 1359 mode = gtod->clock.vclock_mode; 1360 ts->tv_sec = gtod->monotonic_time_sec; 1361 ns = gtod->monotonic_time_snsec; 1362 ns += vgettsc(cycle_now); 1363 ns >>= gtod->clock.shift; 1364 } while (unlikely(read_seqcount_retry(>od->seq, seq))); 1365 timespec_add_ns(ts, ns); 1366 1367 return mode; 1368 } 1369 1370 /* returns true if host is using tsc clocksource */ 1371 static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now) 1372 { 1373 struct timespec ts; 1374 1375 /* checked again under seqlock below */ 1376 if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC) 1377 return false; 1378 1379 if (do_monotonic(&ts, cycle_now) != VCLOCK_TSC) 1380 return false; 1381 1382 monotonic_to_bootbased(&ts); 1383 *kernel_ns = timespec_to_ns(&ts); 1384 1385 return true; 1386 } 1387 #endif 1388 1389 /* 1390 * 1391 * Assuming a stable TSC across physical CPUS, and a stable TSC 1392 * across virtual CPUs, the following condition is possible. 1393 * Each numbered line represents an event visible to both 1394 * CPUs at the next numbered event. 1395 * 1396 * "timespecX" represents host monotonic time. "tscX" represents 1397 * RDTSC value. 1398 * 1399 * VCPU0 on CPU0 | VCPU1 on CPU1 1400 * 1401 * 1. read timespec0,tsc0 1402 * 2. | timespec1 = timespec0 + N 1403 * | tsc1 = tsc0 + M 1404 * 3. transition to guest | transition to guest 1405 * 4. ret0 = timespec0 + (rdtsc - tsc0) | 1406 * 5. | ret1 = timespec1 + (rdtsc - tsc1) 1407 * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M)) 1408 * 1409 * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity: 1410 * 1411 * - ret0 < ret1 1412 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M)) 1413 * ... 1414 * - 0 < N - M => M < N 1415 * 1416 * That is, when timespec0 != timespec1, M < N. Unfortunately that is not 1417 * always the case (the difference between two distinct xtime instances 1418 * might be smaller then the difference between corresponding TSC reads, 1419 * when updating guest vcpus pvclock areas). 1420 * 1421 * To avoid that problem, do not allow visibility of distinct 1422 * system_timestamp/tsc_timestamp values simultaneously: use a master 1423 * copy of host monotonic time values. Update that master copy 1424 * in lockstep. 1425 * 1426 * Rely on synchronization of host TSCs and guest TSCs for monotonicity. 1427 * 1428 */ 1429 1430 static void pvclock_update_vm_gtod_copy(struct kvm *kvm) 1431 { 1432 #ifdef CONFIG_X86_64 1433 struct kvm_arch *ka = &kvm->arch; 1434 int vclock_mode; 1435 bool host_tsc_clocksource, vcpus_matched; 1436 1437 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == 1438 atomic_read(&kvm->online_vcpus)); 1439 1440 /* 1441 * If the host uses TSC clock, then passthrough TSC as stable 1442 * to the guest. 1443 */ 1444 host_tsc_clocksource = kvm_get_time_and_clockread( 1445 &ka->master_kernel_ns, 1446 &ka->master_cycle_now); 1447 1448 ka->use_master_clock = host_tsc_clocksource & vcpus_matched; 1449 1450 if (ka->use_master_clock) 1451 atomic_set(&kvm_guest_has_master_clock, 1); 1452 1453 vclock_mode = pvclock_gtod_data.clock.vclock_mode; 1454 trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, 1455 vcpus_matched); 1456 #endif 1457 } 1458 1459 static void kvm_gen_update_masterclock(struct kvm *kvm) 1460 { 1461 #ifdef CONFIG_X86_64 1462 int i; 1463 struct kvm_vcpu *vcpu; 1464 struct kvm_arch *ka = &kvm->arch; 1465 1466 spin_lock(&ka->pvclock_gtod_sync_lock); 1467 kvm_make_mclock_inprogress_request(kvm); 1468 /* no guest entries from this point */ 1469 pvclock_update_vm_gtod_copy(kvm); 1470 1471 kvm_for_each_vcpu(i, vcpu, kvm) 1472 set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); 1473 1474 /* guest entries allowed */ 1475 kvm_for_each_vcpu(i, vcpu, kvm) 1476 clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests); 1477 1478 spin_unlock(&ka->pvclock_gtod_sync_lock); 1479 #endif 1480 } 1481 1482 static int kvm_guest_time_update(struct kvm_vcpu *v) 1483 { 1484 unsigned long flags, this_tsc_khz; 1485 struct kvm_vcpu_arch *vcpu = &v->arch; 1486 struct kvm_arch *ka = &v->kvm->arch; 1487 s64 kernel_ns, max_kernel_ns; 1488 u64 tsc_timestamp, host_tsc; 1489 struct pvclock_vcpu_time_info guest_hv_clock; 1490 u8 pvclock_flags; 1491 bool use_master_clock; 1492 1493 kernel_ns = 0; 1494 host_tsc = 0; 1495 1496 /* 1497 * If the host uses TSC clock, then passthrough TSC as stable 1498 * to the guest. 1499 */ 1500 spin_lock(&ka->pvclock_gtod_sync_lock); 1501 use_master_clock = ka->use_master_clock; 1502 if (use_master_clock) { 1503 host_tsc = ka->master_cycle_now; 1504 kernel_ns = ka->master_kernel_ns; 1505 } 1506 spin_unlock(&ka->pvclock_gtod_sync_lock); 1507 1508 /* Keep irq disabled to prevent changes to the clock */ 1509 local_irq_save(flags); 1510 this_tsc_khz = __get_cpu_var(cpu_tsc_khz); 1511 if (unlikely(this_tsc_khz == 0)) { 1512 local_irq_restore(flags); 1513 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); 1514 return 1; 1515 } 1516 if (!use_master_clock) { 1517 host_tsc = native_read_tsc(); 1518 kernel_ns = get_kernel_ns(); 1519 } 1520 1521 tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc); 1522 1523 /* 1524 * We may have to catch up the TSC to match elapsed wall clock 1525 * time for two reasons, even if kvmclock is used. 1526 * 1) CPU could have been running below the maximum TSC rate 1527 * 2) Broken TSC compensation resets the base at each VCPU 1528 * entry to avoid unknown leaps of TSC even when running 1529 * again on the same CPU. This may cause apparent elapsed 1530 * time to disappear, and the guest to stand still or run 1531 * very slowly. 1532 */ 1533 if (vcpu->tsc_catchup) { 1534 u64 tsc = compute_guest_tsc(v, kernel_ns); 1535 if (tsc > tsc_timestamp) { 1536 adjust_tsc_offset_guest(v, tsc - tsc_timestamp); 1537 tsc_timestamp = tsc; 1538 } 1539 } 1540 1541 local_irq_restore(flags); 1542 1543 if (!vcpu->pv_time_enabled) 1544 return 0; 1545 1546 /* 1547 * Time as measured by the TSC may go backwards when resetting the base 1548 * tsc_timestamp. The reason for this is that the TSC resolution is 1549 * higher than the resolution of the other clock scales. Thus, many 1550 * possible measurments of the TSC correspond to one measurement of any 1551 * other clock, and so a spread of values is possible. This is not a 1552 * problem for the computation of the nanosecond clock; with TSC rates 1553 * around 1GHZ, there can only be a few cycles which correspond to one 1554 * nanosecond value, and any path through this code will inevitably 1555 * take longer than that. However, with the kernel_ns value itself, 1556 * the precision may be much lower, down to HZ granularity. If the 1557 * first sampling of TSC against kernel_ns ends in the low part of the 1558 * range, and the second in the high end of the range, we can get: 1559 * 1560 * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new 1561 * 1562 * As the sampling errors potentially range in the thousands of cycles, 1563 * it is possible such a time value has already been observed by the 1564 * guest. To protect against this, we must compute the system time as 1565 * observed by the guest and ensure the new system time is greater. 1566 */ 1567 max_kernel_ns = 0; 1568 if (vcpu->hv_clock.tsc_timestamp) { 1569 max_kernel_ns = vcpu->last_guest_tsc - 1570 vcpu->hv_clock.tsc_timestamp; 1571 max_kernel_ns = pvclock_scale_delta(max_kernel_ns, 1572 vcpu->hv_clock.tsc_to_system_mul, 1573 vcpu->hv_clock.tsc_shift); 1574 max_kernel_ns += vcpu->last_kernel_ns; 1575 } 1576 1577 if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) { 1578 kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz, 1579 &vcpu->hv_clock.tsc_shift, 1580 &vcpu->hv_clock.tsc_to_system_mul); 1581 vcpu->hw_tsc_khz = this_tsc_khz; 1582 } 1583 1584 /* with a master <monotonic time, tsc value> tuple, 1585 * pvclock clock reads always increase at the (scaled) rate 1586 * of guest TSC - no need to deal with sampling errors. 1587 */ 1588 if (!use_master_clock) { 1589 if (max_kernel_ns > kernel_ns) 1590 kernel_ns = max_kernel_ns; 1591 } 1592 /* With all the info we got, fill in the values */ 1593 vcpu->hv_clock.tsc_timestamp = tsc_timestamp; 1594 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; 1595 vcpu->last_kernel_ns = kernel_ns; 1596 vcpu->last_guest_tsc = tsc_timestamp; 1597 1598 /* 1599 * The interface expects us to write an even number signaling that the 1600 * update is finished. Since the guest won't see the intermediate 1601 * state, we just increase by 2 at the end. 1602 */ 1603 vcpu->hv_clock.version += 2; 1604 1605 if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, 1606 &guest_hv_clock, sizeof(guest_hv_clock)))) 1607 return 0; 1608 1609 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ 1610 pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); 1611 1612 if (vcpu->pvclock_set_guest_stopped_request) { 1613 pvclock_flags |= PVCLOCK_GUEST_STOPPED; 1614 vcpu->pvclock_set_guest_stopped_request = false; 1615 } 1616 1617 /* If the host uses TSC clocksource, then it is stable */ 1618 if (use_master_clock) 1619 pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; 1620 1621 vcpu->hv_clock.flags = pvclock_flags; 1622 1623 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, 1624 &vcpu->hv_clock, 1625 sizeof(vcpu->hv_clock)); 1626 return 0; 1627 } 1628 1629 /* 1630 * kvmclock updates which are isolated to a given vcpu, such as 1631 * vcpu->cpu migration, should not allow system_timestamp from 1632 * the rest of the vcpus to remain static. Otherwise ntp frequency 1633 * correction applies to one vcpu's system_timestamp but not 1634 * the others. 1635 * 1636 * So in those cases, request a kvmclock update for all vcpus. 1637 * The worst case for a remote vcpu to update its kvmclock 1638 * is then bounded by maximum nohz sleep latency. 1639 */ 1640 1641 static void kvm_gen_kvmclock_update(struct kvm_vcpu *v) 1642 { 1643 int i; 1644 struct kvm *kvm = v->kvm; 1645 struct kvm_vcpu *vcpu; 1646 1647 kvm_for_each_vcpu(i, vcpu, kvm) { 1648 set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); 1649 kvm_vcpu_kick(vcpu); 1650 } 1651 } 1652 1653 static bool msr_mtrr_valid(unsigned msr) 1654 { 1655 switch (msr) { 1656 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1: 1657 case MSR_MTRRfix64K_00000: 1658 case MSR_MTRRfix16K_80000: 1659 case MSR_MTRRfix16K_A0000: 1660 case MSR_MTRRfix4K_C0000: 1661 case MSR_MTRRfix4K_C8000: 1662 case MSR_MTRRfix4K_D0000: 1663 case MSR_MTRRfix4K_D8000: 1664 case MSR_MTRRfix4K_E0000: 1665 case MSR_MTRRfix4K_E8000: 1666 case MSR_MTRRfix4K_F0000: 1667 case MSR_MTRRfix4K_F8000: 1668 case MSR_MTRRdefType: 1669 case MSR_IA32_CR_PAT: 1670 return true; 1671 case 0x2f8: 1672 return true; 1673 } 1674 return false; 1675 } 1676 1677 static bool valid_pat_type(unsigned t) 1678 { 1679 return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */ 1680 } 1681 1682 static bool valid_mtrr_type(unsigned t) 1683 { 1684 return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */ 1685 } 1686 1687 static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1688 { 1689 int i; 1690 1691 if (!msr_mtrr_valid(msr)) 1692 return false; 1693 1694 if (msr == MSR_IA32_CR_PAT) { 1695 for (i = 0; i < 8; i++) 1696 if (!valid_pat_type((data >> (i * 8)) & 0xff)) 1697 return false; 1698 return true; 1699 } else if (msr == MSR_MTRRdefType) { 1700 if (data & ~0xcff) 1701 return false; 1702 return valid_mtrr_type(data & 0xff); 1703 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { 1704 for (i = 0; i < 8 ; i++) 1705 if (!valid_mtrr_type((data >> (i * 8)) & 0xff)) 1706 return false; 1707 return true; 1708 } 1709 1710 /* variable MTRRs */ 1711 return valid_mtrr_type(data & 0xff); 1712 } 1713 1714 static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1715 { 1716 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; 1717 1718 if (!mtrr_valid(vcpu, msr, data)) 1719 return 1; 1720 1721 if (msr == MSR_MTRRdefType) { 1722 vcpu->arch.mtrr_state.def_type = data; 1723 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10; 1724 } else if (msr == MSR_MTRRfix64K_00000) 1725 p[0] = data; 1726 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) 1727 p[1 + msr - MSR_MTRRfix16K_80000] = data; 1728 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) 1729 p[3 + msr - MSR_MTRRfix4K_C0000] = data; 1730 else if (msr == MSR_IA32_CR_PAT) 1731 vcpu->arch.pat = data; 1732 else { /* Variable MTRRs */ 1733 int idx, is_mtrr_mask; 1734 u64 *pt; 1735 1736 idx = (msr - 0x200) / 2; 1737 is_mtrr_mask = msr - 0x200 - 2 * idx; 1738 if (!is_mtrr_mask) 1739 pt = 1740 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; 1741 else 1742 pt = 1743 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; 1744 *pt = data; 1745 } 1746 1747 kvm_mmu_reset_context(vcpu); 1748 return 0; 1749 } 1750 1751 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1752 { 1753 u64 mcg_cap = vcpu->arch.mcg_cap; 1754 unsigned bank_num = mcg_cap & 0xff; 1755 1756 switch (msr) { 1757 case MSR_IA32_MCG_STATUS: 1758 vcpu->arch.mcg_status = data; 1759 break; 1760 case MSR_IA32_MCG_CTL: 1761 if (!(mcg_cap & MCG_CTL_P)) 1762 return 1; 1763 if (data != 0 && data != ~(u64)0) 1764 return -1; 1765 vcpu->arch.mcg_ctl = data; 1766 break; 1767 default: 1768 if (msr >= MSR_IA32_MC0_CTL && 1769 msr < MSR_IA32_MC0_CTL + 4 * bank_num) { 1770 u32 offset = msr - MSR_IA32_MC0_CTL; 1771 /* only 0 or all 1s can be written to IA32_MCi_CTL 1772 * some Linux kernels though clear bit 10 in bank 4 to 1773 * workaround a BIOS/GART TBL issue on AMD K8s, ignore 1774 * this to avoid an uncatched #GP in the guest 1775 */ 1776 if ((offset & 0x3) == 0 && 1777 data != 0 && (data | (1 << 10)) != ~(u64)0) 1778 return -1; 1779 vcpu->arch.mce_banks[offset] = data; 1780 break; 1781 } 1782 return 1; 1783 } 1784 return 0; 1785 } 1786 1787 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) 1788 { 1789 struct kvm *kvm = vcpu->kvm; 1790 int lm = is_long_mode(vcpu); 1791 u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 1792 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; 1793 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 1794 : kvm->arch.xen_hvm_config.blob_size_32; 1795 u32 page_num = data & ~PAGE_MASK; 1796 u64 page_addr = data & PAGE_MASK; 1797 u8 *page; 1798 int r; 1799 1800 r = -E2BIG; 1801 if (page_num >= blob_size) 1802 goto out; 1803 r = -ENOMEM; 1804 page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE); 1805 if (IS_ERR(page)) { 1806 r = PTR_ERR(page); 1807 goto out; 1808 } 1809 if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE)) 1810 goto out_free; 1811 r = 0; 1812 out_free: 1813 kfree(page); 1814 out: 1815 return r; 1816 } 1817 1818 static bool kvm_hv_hypercall_enabled(struct kvm *kvm) 1819 { 1820 return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; 1821 } 1822 1823 static bool kvm_hv_msr_partition_wide(u32 msr) 1824 { 1825 bool r = false; 1826 switch (msr) { 1827 case HV_X64_MSR_GUEST_OS_ID: 1828 case HV_X64_MSR_HYPERCALL: 1829 r = true; 1830 break; 1831 } 1832 1833 return r; 1834 } 1835 1836 static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1837 { 1838 struct kvm *kvm = vcpu->kvm; 1839 1840 switch (msr) { 1841 case HV_X64_MSR_GUEST_OS_ID: 1842 kvm->arch.hv_guest_os_id = data; 1843 /* setting guest os id to zero disables hypercall page */ 1844 if (!kvm->arch.hv_guest_os_id) 1845 kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; 1846 break; 1847 case HV_X64_MSR_HYPERCALL: { 1848 u64 gfn; 1849 unsigned long addr; 1850 u8 instructions[4]; 1851 1852 /* if guest os id is not set hypercall should remain disabled */ 1853 if (!kvm->arch.hv_guest_os_id) 1854 break; 1855 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { 1856 kvm->arch.hv_hypercall = data; 1857 break; 1858 } 1859 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; 1860 addr = gfn_to_hva(kvm, gfn); 1861 if (kvm_is_error_hva(addr)) 1862 return 1; 1863 kvm_x86_ops->patch_hypercall(vcpu, instructions); 1864 ((unsigned char *)instructions)[3] = 0xc3; /* ret */ 1865 if (__copy_to_user((void __user *)addr, instructions, 4)) 1866 return 1; 1867 kvm->arch.hv_hypercall = data; 1868 break; 1869 } 1870 default: 1871 vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " 1872 "data 0x%llx\n", msr, data); 1873 return 1; 1874 } 1875 return 0; 1876 } 1877 1878 static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1879 { 1880 switch (msr) { 1881 case HV_X64_MSR_APIC_ASSIST_PAGE: { 1882 unsigned long addr; 1883 1884 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { 1885 vcpu->arch.hv_vapic = data; 1886 break; 1887 } 1888 addr = gfn_to_hva(vcpu->kvm, data >> 1889 HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT); 1890 if (kvm_is_error_hva(addr)) 1891 return 1; 1892 if (__clear_user((void __user *)addr, PAGE_SIZE)) 1893 return 1; 1894 vcpu->arch.hv_vapic = data; 1895 break; 1896 } 1897 case HV_X64_MSR_EOI: 1898 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); 1899 case HV_X64_MSR_ICR: 1900 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); 1901 case HV_X64_MSR_TPR: 1902 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); 1903 default: 1904 vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " 1905 "data 0x%llx\n", msr, data); 1906 return 1; 1907 } 1908 1909 return 0; 1910 } 1911 1912 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) 1913 { 1914 gpa_t gpa = data & ~0x3f; 1915 1916 /* Bits 2:5 are reserved, Should be zero */ 1917 if (data & 0x3c) 1918 return 1; 1919 1920 vcpu->arch.apf.msr_val = data; 1921 1922 if (!(data & KVM_ASYNC_PF_ENABLED)) { 1923 kvm_clear_async_pf_completion_queue(vcpu); 1924 kvm_async_pf_hash_reset(vcpu); 1925 return 0; 1926 } 1927 1928 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, 1929 sizeof(u32))) 1930 return 1; 1931 1932 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); 1933 kvm_async_pf_wakeup_all(vcpu); 1934 return 0; 1935 } 1936 1937 static void kvmclock_reset(struct kvm_vcpu *vcpu) 1938 { 1939 vcpu->arch.pv_time_enabled = false; 1940 } 1941 1942 static void accumulate_steal_time(struct kvm_vcpu *vcpu) 1943 { 1944 u64 delta; 1945 1946 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) 1947 return; 1948 1949 delta = current->sched_info.run_delay - vcpu->arch.st.last_steal; 1950 vcpu->arch.st.last_steal = current->sched_info.run_delay; 1951 vcpu->arch.st.accum_steal = delta; 1952 } 1953 1954 static void record_steal_time(struct kvm_vcpu *vcpu) 1955 { 1956 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) 1957 return; 1958 1959 if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, 1960 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) 1961 return; 1962 1963 vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal; 1964 vcpu->arch.st.steal.version += 2; 1965 vcpu->arch.st.accum_steal = 0; 1966 1967 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, 1968 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); 1969 } 1970 1971 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 1972 { 1973 bool pr = false; 1974 u32 msr = msr_info->index; 1975 u64 data = msr_info->data; 1976 1977 switch (msr) { 1978 case MSR_AMD64_NB_CFG: 1979 case MSR_IA32_UCODE_REV: 1980 case MSR_IA32_UCODE_WRITE: 1981 case MSR_VM_HSAVE_PA: 1982 case MSR_AMD64_PATCH_LOADER: 1983 case MSR_AMD64_BU_CFG2: 1984 break; 1985 1986 case MSR_EFER: 1987 return set_efer(vcpu, data); 1988 case MSR_K7_HWCR: 1989 data &= ~(u64)0x40; /* ignore flush filter disable */ 1990 data &= ~(u64)0x100; /* ignore ignne emulation enable */ 1991 data &= ~(u64)0x8; /* ignore TLB cache disable */ 1992 if (data != 0) { 1993 vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", 1994 data); 1995 return 1; 1996 } 1997 break; 1998 case MSR_FAM10H_MMIO_CONF_BASE: 1999 if (data != 0) { 2000 vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " 2001 "0x%llx\n", data); 2002 return 1; 2003 } 2004 break; 2005 case MSR_IA32_DEBUGCTLMSR: 2006 if (!data) { 2007 /* We support the non-activated case already */ 2008 break; 2009 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) { 2010 /* Values other than LBR and BTF are vendor-specific, 2011 thus reserved and should throw a #GP */ 2012 return 1; 2013 } 2014 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", 2015 __func__, data); 2016 break; 2017 case 0x200 ... 0x2ff: 2018 return set_msr_mtrr(vcpu, msr, data); 2019 case MSR_IA32_APICBASE: 2020 kvm_set_apic_base(vcpu, data); 2021 break; 2022 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: 2023 return kvm_x2apic_msr_write(vcpu, msr, data); 2024 case MSR_IA32_TSCDEADLINE: 2025 kvm_set_lapic_tscdeadline_msr(vcpu, data); 2026 break; 2027 case MSR_IA32_TSC_ADJUST: 2028 if (guest_cpuid_has_tsc_adjust(vcpu)) { 2029 if (!msr_info->host_initiated) { 2030 u64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; 2031 kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true); 2032 } 2033 vcpu->arch.ia32_tsc_adjust_msr = data; 2034 } 2035 break; 2036 case MSR_IA32_MISC_ENABLE: 2037 vcpu->arch.ia32_misc_enable_msr = data; 2038 break; 2039 case MSR_KVM_WALL_CLOCK_NEW: 2040 case MSR_KVM_WALL_CLOCK: 2041 vcpu->kvm->arch.wall_clock = data; 2042 kvm_write_wall_clock(vcpu->kvm, data); 2043 break; 2044 case MSR_KVM_SYSTEM_TIME_NEW: 2045 case MSR_KVM_SYSTEM_TIME: { 2046 u64 gpa_offset; 2047 kvmclock_reset(vcpu); 2048 2049 vcpu->arch.time = data; 2050 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); 2051 2052 /* we verify if the enable bit is set... */ 2053 if (!(data & 1)) 2054 break; 2055 2056 gpa_offset = data & ~(PAGE_MASK | 1); 2057 2058 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, 2059 &vcpu->arch.pv_time, data & ~1ULL, 2060 sizeof(struct pvclock_vcpu_time_info))) 2061 vcpu->arch.pv_time_enabled = false; 2062 else 2063 vcpu->arch.pv_time_enabled = true; 2064 2065 break; 2066 } 2067 case MSR_KVM_ASYNC_PF_EN: 2068 if (kvm_pv_enable_async_pf(vcpu, data)) 2069 return 1; 2070 break; 2071 case MSR_KVM_STEAL_TIME: 2072 2073 if (unlikely(!sched_info_on())) 2074 return 1; 2075 2076 if (data & KVM_STEAL_RESERVED_MASK) 2077 return 1; 2078 2079 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, 2080 data & KVM_STEAL_VALID_BITS, 2081 sizeof(struct kvm_steal_time))) 2082 return 1; 2083 2084 vcpu->arch.st.msr_val = data; 2085 2086 if (!(data & KVM_MSR_ENABLED)) 2087 break; 2088 2089 vcpu->arch.st.last_steal = current->sched_info.run_delay; 2090 2091 preempt_disable(); 2092 accumulate_steal_time(vcpu); 2093 preempt_enable(); 2094 2095 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); 2096 2097 break; 2098 case MSR_KVM_PV_EOI_EN: 2099 if (kvm_lapic_enable_pv_eoi(vcpu, data)) 2100 return 1; 2101 break; 2102 2103 case MSR_IA32_MCG_CTL: 2104 case MSR_IA32_MCG_STATUS: 2105 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: 2106 return set_msr_mce(vcpu, msr, data); 2107 2108 /* Performance counters are not protected by a CPUID bit, 2109 * so we should check all of them in the generic path for the sake of 2110 * cross vendor migration. 2111 * Writing a zero into the event select MSRs disables them, 2112 * which we perfectly emulate ;-). Any other value should be at least 2113 * reported, some guests depend on them. 2114 */ 2115 case MSR_K7_EVNTSEL0: 2116 case MSR_K7_EVNTSEL1: 2117 case MSR_K7_EVNTSEL2: 2118 case MSR_K7_EVNTSEL3: 2119 if (data != 0) 2120 vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: " 2121 "0x%x data 0x%llx\n", msr, data); 2122 break; 2123 /* at least RHEL 4 unconditionally writes to the perfctr registers, 2124 * so we ignore writes to make it happy. 2125 */ 2126 case MSR_K7_PERFCTR0: 2127 case MSR_K7_PERFCTR1: 2128 case MSR_K7_PERFCTR2: 2129 case MSR_K7_PERFCTR3: 2130 vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: " 2131 "0x%x data 0x%llx\n", msr, data); 2132 break; 2133 case MSR_P6_PERFCTR0: 2134 case MSR_P6_PERFCTR1: 2135 pr = true; 2136 case MSR_P6_EVNTSEL0: 2137 case MSR_P6_EVNTSEL1: 2138 if (kvm_pmu_msr(vcpu, msr)) 2139 return kvm_pmu_set_msr(vcpu, msr_info); 2140 2141 if (pr || data != 0) 2142 vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " 2143 "0x%x data 0x%llx\n", msr, data); 2144 break; 2145 case MSR_K7_CLK_CTL: 2146 /* 2147 * Ignore all writes to this no longer documented MSR. 2148 * Writes are only relevant for old K7 processors, 2149 * all pre-dating SVM, but a recommended workaround from 2150 * AMD for these chips. It is possible to specify the 2151 * affected processor models on the command line, hence 2152 * the need to ignore the workaround. 2153 */ 2154 break; 2155 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 2156 if (kvm_hv_msr_partition_wide(msr)) { 2157 int r; 2158 mutex_lock(&vcpu->kvm->lock); 2159 r = set_msr_hyperv_pw(vcpu, msr, data); 2160 mutex_unlock(&vcpu->kvm->lock); 2161 return r; 2162 } else 2163 return set_msr_hyperv(vcpu, msr, data); 2164 break; 2165 case MSR_IA32_BBL_CR_CTL3: 2166 /* Drop writes to this legacy MSR -- see rdmsr 2167 * counterpart for further detail. 2168 */ 2169 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); 2170 break; 2171 case MSR_AMD64_OSVW_ID_LENGTH: 2172 if (!guest_cpuid_has_osvw(vcpu)) 2173 return 1; 2174 vcpu->arch.osvw.length = data; 2175 break; 2176 case MSR_AMD64_OSVW_STATUS: 2177 if (!guest_cpuid_has_osvw(vcpu)) 2178 return 1; 2179 vcpu->arch.osvw.status = data; 2180 break; 2181 default: 2182 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) 2183 return xen_hvm_config(vcpu, data); 2184 if (kvm_pmu_msr(vcpu, msr)) 2185 return kvm_pmu_set_msr(vcpu, msr_info); 2186 if (!ignore_msrs) { 2187 vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", 2188 msr, data); 2189 return 1; 2190 } else { 2191 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", 2192 msr, data); 2193 break; 2194 } 2195 } 2196 return 0; 2197 } 2198 EXPORT_SYMBOL_GPL(kvm_set_msr_common); 2199 2200 2201 /* 2202 * Reads an msr value (of 'msr_index') into 'pdata'. 2203 * Returns 0 on success, non-0 otherwise. 2204 * Assumes vcpu_load() was already called. 2205 */ 2206 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) 2207 { 2208 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata); 2209 } 2210 2211 static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) 2212 { 2213 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; 2214 2215 if (!msr_mtrr_valid(msr)) 2216 return 1; 2217 2218 if (msr == MSR_MTRRdefType) 2219 *pdata = vcpu->arch.mtrr_state.def_type + 2220 (vcpu->arch.mtrr_state.enabled << 10); 2221 else if (msr == MSR_MTRRfix64K_00000) 2222 *pdata = p[0]; 2223 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) 2224 *pdata = p[1 + msr - MSR_MTRRfix16K_80000]; 2225 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) 2226 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000]; 2227 else if (msr == MSR_IA32_CR_PAT) 2228 *pdata = vcpu->arch.pat; 2229 else { /* Variable MTRRs */ 2230 int idx, is_mtrr_mask; 2231 u64 *pt; 2232 2233 idx = (msr - 0x200) / 2; 2234 is_mtrr_mask = msr - 0x200 - 2 * idx; 2235 if (!is_mtrr_mask) 2236 pt = 2237 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; 2238 else 2239 pt = 2240 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; 2241 *pdata = *pt; 2242 } 2243 2244 return 0; 2245 } 2246 2247 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) 2248 { 2249 u64 data; 2250 u64 mcg_cap = vcpu->arch.mcg_cap; 2251 unsigned bank_num = mcg_cap & 0xff; 2252 2253 switch (msr) { 2254 case MSR_IA32_P5_MC_ADDR: 2255 case MSR_IA32_P5_MC_TYPE: 2256 data = 0; 2257 break; 2258 case MSR_IA32_MCG_CAP: 2259 data = vcpu->arch.mcg_cap; 2260 break; 2261 case MSR_IA32_MCG_CTL: 2262 if (!(mcg_cap & MCG_CTL_P)) 2263 return 1; 2264 data = vcpu->arch.mcg_ctl; 2265 break; 2266 case MSR_IA32_MCG_STATUS: 2267 data = vcpu->arch.mcg_status; 2268 break; 2269 default: 2270 if (msr >= MSR_IA32_MC0_CTL && 2271 msr < MSR_IA32_MC0_CTL + 4 * bank_num) { 2272 u32 offset = msr - MSR_IA32_MC0_CTL; 2273 data = vcpu->arch.mce_banks[offset]; 2274 break; 2275 } 2276 return 1; 2277 } 2278 *pdata = data; 2279 return 0; 2280 } 2281 2282 static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) 2283 { 2284 u64 data = 0; 2285 struct kvm *kvm = vcpu->kvm; 2286 2287 switch (msr) { 2288 case HV_X64_MSR_GUEST_OS_ID: 2289 data = kvm->arch.hv_guest_os_id; 2290 break; 2291 case HV_X64_MSR_HYPERCALL: 2292 data = kvm->arch.hv_hypercall; 2293 break; 2294 default: 2295 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); 2296 return 1; 2297 } 2298 2299 *pdata = data; 2300 return 0; 2301 } 2302 2303 static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) 2304 { 2305 u64 data = 0; 2306 2307 switch (msr) { 2308 case HV_X64_MSR_VP_INDEX: { 2309 int r; 2310 struct kvm_vcpu *v; 2311 kvm_for_each_vcpu(r, v, vcpu->kvm) 2312 if (v == vcpu) 2313 data = r; 2314 break; 2315 } 2316 case HV_X64_MSR_EOI: 2317 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); 2318 case HV_X64_MSR_ICR: 2319 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); 2320 case HV_X64_MSR_TPR: 2321 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); 2322 case HV_X64_MSR_APIC_ASSIST_PAGE: 2323 data = vcpu->arch.hv_vapic; 2324 break; 2325 default: 2326 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); 2327 return 1; 2328 } 2329 *pdata = data; 2330 return 0; 2331 } 2332 2333 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) 2334 { 2335 u64 data; 2336 2337 switch (msr) { 2338 case MSR_IA32_PLATFORM_ID: 2339 case MSR_IA32_EBL_CR_POWERON: 2340 case MSR_IA32_DEBUGCTLMSR: 2341 case MSR_IA32_LASTBRANCHFROMIP: 2342 case MSR_IA32_LASTBRANCHTOIP: 2343 case MSR_IA32_LASTINTFROMIP: 2344 case MSR_IA32_LASTINTTOIP: 2345 case MSR_K8_SYSCFG: 2346 case MSR_K7_HWCR: 2347 case MSR_VM_HSAVE_PA: 2348 case MSR_K7_EVNTSEL0: 2349 case MSR_K7_PERFCTR0: 2350 case MSR_K8_INT_PENDING_MSG: 2351 case MSR_AMD64_NB_CFG: 2352 case MSR_FAM10H_MMIO_CONF_BASE: 2353 case MSR_AMD64_BU_CFG2: 2354 data = 0; 2355 break; 2356 case MSR_P6_PERFCTR0: 2357 case MSR_P6_PERFCTR1: 2358 case MSR_P6_EVNTSEL0: 2359 case MSR_P6_EVNTSEL1: 2360 if (kvm_pmu_msr(vcpu, msr)) 2361 return kvm_pmu_get_msr(vcpu, msr, pdata); 2362 data = 0; 2363 break; 2364 case MSR_IA32_UCODE_REV: 2365 data = 0x100000000ULL; 2366 break; 2367 case MSR_MTRRcap: 2368 data = 0x500 | KVM_NR_VAR_MTRR; 2369 break; 2370 case 0x200 ... 0x2ff: 2371 return get_msr_mtrr(vcpu, msr, pdata); 2372 case 0xcd: /* fsb frequency */ 2373 data = 3; 2374 break; 2375 /* 2376 * MSR_EBC_FREQUENCY_ID 2377 * Conservative value valid for even the basic CPU models. 2378 * Models 0,1: 000 in bits 23:21 indicating a bus speed of 2379 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz, 2380 * and 266MHz for model 3, or 4. Set Core Clock 2381 * Frequency to System Bus Frequency Ratio to 1 (bits 2382 * 31:24) even though these are only valid for CPU 2383 * models > 2, however guests may end up dividing or 2384 * multiplying by zero otherwise. 2385 */ 2386 case MSR_EBC_FREQUENCY_ID: 2387 data = 1 << 24; 2388 break; 2389 case MSR_IA32_APICBASE: 2390 data = kvm_get_apic_base(vcpu); 2391 break; 2392 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: 2393 return kvm_x2apic_msr_read(vcpu, msr, pdata); 2394 break; 2395 case MSR_IA32_TSCDEADLINE: 2396 data = kvm_get_lapic_tscdeadline_msr(vcpu); 2397 break; 2398 case MSR_IA32_TSC_ADJUST: 2399 data = (u64)vcpu->arch.ia32_tsc_adjust_msr; 2400 break; 2401 case MSR_IA32_MISC_ENABLE: 2402 data = vcpu->arch.ia32_misc_enable_msr; 2403 break; 2404 case MSR_IA32_PERF_STATUS: 2405 /* TSC increment by tick */ 2406 data = 1000ULL; 2407 /* CPU multiplier */ 2408 data |= (((uint64_t)4ULL) << 40); 2409 break; 2410 case MSR_EFER: 2411 data = vcpu->arch.efer; 2412 break; 2413 case MSR_KVM_WALL_CLOCK: 2414 case MSR_KVM_WALL_CLOCK_NEW: 2415 data = vcpu->kvm->arch.wall_clock; 2416 break; 2417 case MSR_KVM_SYSTEM_TIME: 2418 case MSR_KVM_SYSTEM_TIME_NEW: 2419 data = vcpu->arch.time; 2420 break; 2421 case MSR_KVM_ASYNC_PF_EN: 2422 data = vcpu->arch.apf.msr_val; 2423 break; 2424 case MSR_KVM_STEAL_TIME: 2425 data = vcpu->arch.st.msr_val; 2426 break; 2427 case MSR_KVM_PV_EOI_EN: 2428 data = vcpu->arch.pv_eoi.msr_val; 2429 break; 2430 case MSR_IA32_P5_MC_ADDR: 2431 case MSR_IA32_P5_MC_TYPE: 2432 case MSR_IA32_MCG_CAP: 2433 case MSR_IA32_MCG_CTL: 2434 case MSR_IA32_MCG_STATUS: 2435 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: 2436 return get_msr_mce(vcpu, msr, pdata); 2437 case MSR_K7_CLK_CTL: 2438 /* 2439 * Provide expected ramp-up count for K7. All other 2440 * are set to zero, indicating minimum divisors for 2441 * every field. 2442 * 2443 * This prevents guest kernels on AMD host with CPU 2444 * type 6, model 8 and higher from exploding due to 2445 * the rdmsr failing. 2446 */ 2447 data = 0x20000000; 2448 break; 2449 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 2450 if (kvm_hv_msr_partition_wide(msr)) { 2451 int r; 2452 mutex_lock(&vcpu->kvm->lock); 2453 r = get_msr_hyperv_pw(vcpu, msr, pdata); 2454 mutex_unlock(&vcpu->kvm->lock); 2455 return r; 2456 } else 2457 return get_msr_hyperv(vcpu, msr, pdata); 2458 break; 2459 case MSR_IA32_BBL_CR_CTL3: 2460 /* This legacy MSR exists but isn't fully documented in current 2461 * silicon. It is however accessed by winxp in very narrow 2462 * scenarios where it sets bit #19, itself documented as 2463 * a "reserved" bit. Best effort attempt to source coherent 2464 * read data here should the balance of the register be 2465 * interpreted by the guest: 2466 * 2467 * L2 cache control register 3: 64GB range, 256KB size, 2468 * enabled, latency 0x1, configured 2469 */ 2470 data = 0xbe702111; 2471 break; 2472 case MSR_AMD64_OSVW_ID_LENGTH: 2473 if (!guest_cpuid_has_osvw(vcpu)) 2474 return 1; 2475 data = vcpu->arch.osvw.length; 2476 break; 2477 case MSR_AMD64_OSVW_STATUS: 2478 if (!guest_cpuid_has_osvw(vcpu)) 2479 return 1; 2480 data = vcpu->arch.osvw.status; 2481 break; 2482 default: 2483 if (kvm_pmu_msr(vcpu, msr)) 2484 return kvm_pmu_get_msr(vcpu, msr, pdata); 2485 if (!ignore_msrs) { 2486 vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); 2487 return 1; 2488 } else { 2489 vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr); 2490 data = 0; 2491 } 2492 break; 2493 } 2494 *pdata = data; 2495 return 0; 2496 } 2497 EXPORT_SYMBOL_GPL(kvm_get_msr_common); 2498 2499 /* 2500 * Read or write a bunch of msrs. All parameters are kernel addresses. 2501 * 2502 * @return number of msrs set successfully. 2503 */ 2504 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, 2505 struct kvm_msr_entry *entries, 2506 int (*do_msr)(struct kvm_vcpu *vcpu, 2507 unsigned index, u64 *data)) 2508 { 2509 int i, idx; 2510 2511 idx = srcu_read_lock(&vcpu->kvm->srcu); 2512 for (i = 0; i < msrs->nmsrs; ++i) 2513 if (do_msr(vcpu, entries[i].index, &entries[i].data)) 2514 break; 2515 srcu_read_unlock(&vcpu->kvm->srcu, idx); 2516 2517 return i; 2518 } 2519 2520 /* 2521 * Read or write a bunch of msrs. Parameters are user addresses. 2522 * 2523 * @return number of msrs set successfully. 2524 */ 2525 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, 2526 int (*do_msr)(struct kvm_vcpu *vcpu, 2527 unsigned index, u64 *data), 2528 int writeback) 2529 { 2530 struct kvm_msrs msrs; 2531 struct kvm_msr_entry *entries; 2532 int r, n; 2533 unsigned size; 2534 2535 r = -EFAULT; 2536 if (copy_from_user(&msrs, user_msrs, sizeof msrs)) 2537 goto out; 2538 2539 r = -E2BIG; 2540 if (msrs.nmsrs >= MAX_IO_MSRS) 2541 goto out; 2542 2543 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; 2544 entries = memdup_user(user_msrs->entries, size); 2545 if (IS_ERR(entries)) { 2546 r = PTR_ERR(entries); 2547 goto out; 2548 } 2549 2550 r = n = __msr_io(vcpu, &msrs, entries, do_msr); 2551 if (r < 0) 2552 goto out_free; 2553 2554 r = -EFAULT; 2555 if (writeback && copy_to_user(user_msrs->entries, entries, size)) 2556 goto out_free; 2557 2558 r = n; 2559 2560 out_free: 2561 kfree(entries); 2562 out: 2563 return r; 2564 } 2565 2566 int kvm_dev_ioctl_check_extension(long ext) 2567 { 2568 int r; 2569 2570 switch (ext) { 2571 case KVM_CAP_IRQCHIP: 2572 case KVM_CAP_HLT: 2573 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: 2574 case KVM_CAP_SET_TSS_ADDR: 2575 case KVM_CAP_EXT_CPUID: 2576 case KVM_CAP_EXT_EMUL_CPUID: 2577 case KVM_CAP_CLOCKSOURCE: 2578 case KVM_CAP_PIT: 2579 case KVM_CAP_NOP_IO_DELAY: 2580 case KVM_CAP_MP_STATE: 2581 case KVM_CAP_SYNC_MMU: 2582 case KVM_CAP_USER_NMI: 2583 case KVM_CAP_REINJECT_CONTROL: 2584 case KVM_CAP_IRQ_INJECT_STATUS: 2585 case KVM_CAP_IRQFD: 2586 case KVM_CAP_IOEVENTFD: 2587 case KVM_CAP_PIT2: 2588 case KVM_CAP_PIT_STATE2: 2589 case KVM_CAP_SET_IDENTITY_MAP_ADDR: 2590 case KVM_CAP_XEN_HVM: 2591 case KVM_CAP_ADJUST_CLOCK: 2592 case KVM_CAP_VCPU_EVENTS: 2593 case KVM_CAP_HYPERV: 2594 case KVM_CAP_HYPERV_VAPIC: 2595 case KVM_CAP_HYPERV_SPIN: 2596 case KVM_CAP_PCI_SEGMENT: 2597 case KVM_CAP_DEBUGREGS: 2598 case KVM_CAP_X86_ROBUST_SINGLESTEP: 2599 case KVM_CAP_XSAVE: 2600 case KVM_CAP_ASYNC_PF: 2601 case KVM_CAP_GET_TSC_KHZ: 2602 case KVM_CAP_KVMCLOCK_CTRL: 2603 case KVM_CAP_READONLY_MEM: 2604 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT 2605 case KVM_CAP_ASSIGN_DEV_IRQ: 2606 case KVM_CAP_PCI_2_3: 2607 #endif 2608 r = 1; 2609 break; 2610 case KVM_CAP_COALESCED_MMIO: 2611 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 2612 break; 2613 case KVM_CAP_VAPIC: 2614 r = !kvm_x86_ops->cpu_has_accelerated_tpr(); 2615 break; 2616 case KVM_CAP_NR_VCPUS: 2617 r = KVM_SOFT_MAX_VCPUS; 2618 break; 2619 case KVM_CAP_MAX_VCPUS: 2620 r = KVM_MAX_VCPUS; 2621 break; 2622 case KVM_CAP_NR_MEMSLOTS: 2623 r = KVM_USER_MEM_SLOTS; 2624 break; 2625 case KVM_CAP_PV_MMU: /* obsolete */ 2626 r = 0; 2627 break; 2628 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT 2629 case KVM_CAP_IOMMU: 2630 r = iommu_present(&pci_bus_type); 2631 break; 2632 #endif 2633 case KVM_CAP_MCE: 2634 r = KVM_MAX_MCE_BANKS; 2635 break; 2636 case KVM_CAP_XCRS: 2637 r = cpu_has_xsave; 2638 break; 2639 case KVM_CAP_TSC_CONTROL: 2640 r = kvm_has_tsc_control; 2641 break; 2642 case KVM_CAP_TSC_DEADLINE_TIMER: 2643 r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER); 2644 break; 2645 default: 2646 r = 0; 2647 break; 2648 } 2649 return r; 2650 2651 } 2652 2653 long kvm_arch_dev_ioctl(struct file *filp, 2654 unsigned int ioctl, unsigned long arg) 2655 { 2656 void __user *argp = (void __user *)arg; 2657 long r; 2658 2659 switch (ioctl) { 2660 case KVM_GET_MSR_INDEX_LIST: { 2661 struct kvm_msr_list __user *user_msr_list = argp; 2662 struct kvm_msr_list msr_list; 2663 unsigned n; 2664 2665 r = -EFAULT; 2666 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list)) 2667 goto out; 2668 n = msr_list.nmsrs; 2669 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs); 2670 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) 2671 goto out; 2672 r = -E2BIG; 2673 if (n < msr_list.nmsrs) 2674 goto out; 2675 r = -EFAULT; 2676 if (copy_to_user(user_msr_list->indices, &msrs_to_save, 2677 num_msrs_to_save * sizeof(u32))) 2678 goto out; 2679 if (copy_to_user(user_msr_list->indices + num_msrs_to_save, 2680 &emulated_msrs, 2681 ARRAY_SIZE(emulated_msrs) * sizeof(u32))) 2682 goto out; 2683 r = 0; 2684 break; 2685 } 2686 case KVM_GET_SUPPORTED_CPUID: 2687 case KVM_GET_EMULATED_CPUID: { 2688 struct kvm_cpuid2 __user *cpuid_arg = argp; 2689 struct kvm_cpuid2 cpuid; 2690 2691 r = -EFAULT; 2692 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) 2693 goto out; 2694 2695 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, 2696 ioctl); 2697 if (r) 2698 goto out; 2699 2700 r = -EFAULT; 2701 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) 2702 goto out; 2703 r = 0; 2704 break; 2705 } 2706 case KVM_X86_GET_MCE_CAP_SUPPORTED: { 2707 u64 mce_cap; 2708 2709 mce_cap = KVM_MCE_CAP_SUPPORTED; 2710 r = -EFAULT; 2711 if (copy_to_user(argp, &mce_cap, sizeof mce_cap)) 2712 goto out; 2713 r = 0; 2714 break; 2715 } 2716 default: 2717 r = -EINVAL; 2718 } 2719 out: 2720 return r; 2721 } 2722 2723 static void wbinvd_ipi(void *garbage) 2724 { 2725 wbinvd(); 2726 } 2727 2728 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) 2729 { 2730 return kvm_arch_has_noncoherent_dma(vcpu->kvm); 2731 } 2732 2733 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 2734 { 2735 /* Address WBINVD may be executed by guest */ 2736 if (need_emulate_wbinvd(vcpu)) { 2737 if (kvm_x86_ops->has_wbinvd_exit()) 2738 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); 2739 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) 2740 smp_call_function_single(vcpu->cpu, 2741 wbinvd_ipi, NULL, 1); 2742 } 2743 2744 kvm_x86_ops->vcpu_load(vcpu, cpu); 2745 2746 /* Apply any externally detected TSC adjustments (due to suspend) */ 2747 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { 2748 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); 2749 vcpu->arch.tsc_offset_adjustment = 0; 2750 set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); 2751 } 2752 2753 if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { 2754 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : 2755 native_read_tsc() - vcpu->arch.last_host_tsc; 2756 if (tsc_delta < 0) 2757 mark_tsc_unstable("KVM discovered backwards TSC"); 2758 if (check_tsc_unstable()) { 2759 u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu, 2760 vcpu->arch.last_guest_tsc); 2761 kvm_x86_ops->write_tsc_offset(vcpu, offset); 2762 vcpu->arch.tsc_catchup = 1; 2763 } 2764 /* 2765 * On a host with synchronized TSC, there is no need to update 2766 * kvmclock on vcpu->cpu migration 2767 */ 2768 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) 2769 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); 2770 if (vcpu->cpu != cpu) 2771 kvm_migrate_timers(vcpu); 2772 vcpu->cpu = cpu; 2773 } 2774 2775 accumulate_steal_time(vcpu); 2776 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); 2777 } 2778 2779 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 2780 { 2781 kvm_x86_ops->vcpu_put(vcpu); 2782 kvm_put_guest_fpu(vcpu); 2783 vcpu->arch.last_host_tsc = native_read_tsc(); 2784 } 2785 2786 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, 2787 struct kvm_lapic_state *s) 2788 { 2789 kvm_x86_ops->sync_pir_to_irr(vcpu); 2790 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s); 2791 2792 return 0; 2793 } 2794 2795 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, 2796 struct kvm_lapic_state *s) 2797 { 2798 kvm_apic_post_state_restore(vcpu, s); 2799 update_cr8_intercept(vcpu); 2800 2801 return 0; 2802 } 2803 2804 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, 2805 struct kvm_interrupt *irq) 2806 { 2807 if (irq->irq >= KVM_NR_INTERRUPTS) 2808 return -EINVAL; 2809 if (irqchip_in_kernel(vcpu->kvm)) 2810 return -ENXIO; 2811 2812 kvm_queue_interrupt(vcpu, irq->irq, false); 2813 kvm_make_request(KVM_REQ_EVENT, vcpu); 2814 2815 return 0; 2816 } 2817 2818 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) 2819 { 2820 kvm_inject_nmi(vcpu); 2821 2822 return 0; 2823 } 2824 2825 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, 2826 struct kvm_tpr_access_ctl *tac) 2827 { 2828 if (tac->flags) 2829 return -EINVAL; 2830 vcpu->arch.tpr_access_reporting = !!tac->enabled; 2831 return 0; 2832 } 2833 2834 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, 2835 u64 mcg_cap) 2836 { 2837 int r; 2838 unsigned bank_num = mcg_cap & 0xff, bank; 2839 2840 r = -EINVAL; 2841 if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS) 2842 goto out; 2843 if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000)) 2844 goto out; 2845 r = 0; 2846 vcpu->arch.mcg_cap = mcg_cap; 2847 /* Init IA32_MCG_CTL to all 1s */ 2848 if (mcg_cap & MCG_CTL_P) 2849 vcpu->arch.mcg_ctl = ~(u64)0; 2850 /* Init IA32_MCi_CTL to all 1s */ 2851 for (bank = 0; bank < bank_num; bank++) 2852 vcpu->arch.mce_banks[bank*4] = ~(u64)0; 2853 out: 2854 return r; 2855 } 2856 2857 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, 2858 struct kvm_x86_mce *mce) 2859 { 2860 u64 mcg_cap = vcpu->arch.mcg_cap; 2861 unsigned bank_num = mcg_cap & 0xff; 2862 u64 *banks = vcpu->arch.mce_banks; 2863 2864 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) 2865 return -EINVAL; 2866 /* 2867 * if IA32_MCG_CTL is not all 1s, the uncorrected error 2868 * reporting is disabled 2869 */ 2870 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && 2871 vcpu->arch.mcg_ctl != ~(u64)0) 2872 return 0; 2873 banks += 4 * mce->bank; 2874 /* 2875 * if IA32_MCi_CTL is not all 1s, the uncorrected error 2876 * reporting is disabled for the bank 2877 */ 2878 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) 2879 return 0; 2880 if (mce->status & MCI_STATUS_UC) { 2881 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || 2882 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { 2883 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 2884 return 0; 2885 } 2886 if (banks[1] & MCI_STATUS_VAL) 2887 mce->status |= MCI_STATUS_OVER; 2888 banks[2] = mce->addr; 2889 banks[3] = mce->misc; 2890 vcpu->arch.mcg_status = mce->mcg_status; 2891 banks[1] = mce->status; 2892 kvm_queue_exception(vcpu, MC_VECTOR); 2893 } else if (!(banks[1] & MCI_STATUS_VAL) 2894 || !(banks[1] & MCI_STATUS_UC)) { 2895 if (banks[1] & MCI_STATUS_VAL) 2896 mce->status |= MCI_STATUS_OVER; 2897 banks[2] = mce->addr; 2898 banks[3] = mce->misc; 2899 banks[1] = mce->status; 2900 } else 2901 banks[1] |= MCI_STATUS_OVER; 2902 return 0; 2903 } 2904 2905 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, 2906 struct kvm_vcpu_events *events) 2907 { 2908 process_nmi(vcpu); 2909 events->exception.injected = 2910 vcpu->arch.exception.pending && 2911 !kvm_exception_is_soft(vcpu->arch.exception.nr); 2912 events->exception.nr = vcpu->arch.exception.nr; 2913 events->exception.has_error_code = vcpu->arch.exception.has_error_code; 2914 events->exception.pad = 0; 2915 events->exception.error_code = vcpu->arch.exception.error_code; 2916 2917 events->interrupt.injected = 2918 vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; 2919 events->interrupt.nr = vcpu->arch.interrupt.nr; 2920 events->interrupt.soft = 0; 2921 events->interrupt.shadow = 2922 kvm_x86_ops->get_interrupt_shadow(vcpu, 2923 KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI); 2924 2925 events->nmi.injected = vcpu->arch.nmi_injected; 2926 events->nmi.pending = vcpu->arch.nmi_pending != 0; 2927 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); 2928 events->nmi.pad = 0; 2929 2930 events->sipi_vector = 0; /* never valid when reporting to user space */ 2931 2932 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING 2933 | KVM_VCPUEVENT_VALID_SHADOW); 2934 memset(&events->reserved, 0, sizeof(events->reserved)); 2935 } 2936 2937 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, 2938 struct kvm_vcpu_events *events) 2939 { 2940 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING 2941 | KVM_VCPUEVENT_VALID_SIPI_VECTOR 2942 | KVM_VCPUEVENT_VALID_SHADOW)) 2943 return -EINVAL; 2944 2945 process_nmi(vcpu); 2946 vcpu->arch.exception.pending = events->exception.injected; 2947 vcpu->arch.exception.nr = events->exception.nr; 2948 vcpu->arch.exception.has_error_code = events->exception.has_error_code; 2949 vcpu->arch.exception.error_code = events->exception.error_code; 2950 2951 vcpu->arch.interrupt.pending = events->interrupt.injected; 2952 vcpu->arch.interrupt.nr = events->interrupt.nr; 2953 vcpu->arch.interrupt.soft = events->interrupt.soft; 2954 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) 2955 kvm_x86_ops->set_interrupt_shadow(vcpu, 2956 events->interrupt.shadow); 2957 2958 vcpu->arch.nmi_injected = events->nmi.injected; 2959 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) 2960 vcpu->arch.nmi_pending = events->nmi.pending; 2961 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); 2962 2963 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && 2964 kvm_vcpu_has_lapic(vcpu)) 2965 vcpu->arch.apic->sipi_vector = events->sipi_vector; 2966 2967 kvm_make_request(KVM_REQ_EVENT, vcpu); 2968 2969 return 0; 2970 } 2971 2972 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, 2973 struct kvm_debugregs *dbgregs) 2974 { 2975 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); 2976 dbgregs->dr6 = vcpu->arch.dr6; 2977 dbgregs->dr7 = vcpu->arch.dr7; 2978 dbgregs->flags = 0; 2979 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); 2980 } 2981 2982 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, 2983 struct kvm_debugregs *dbgregs) 2984 { 2985 if (dbgregs->flags) 2986 return -EINVAL; 2987 2988 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); 2989 vcpu->arch.dr6 = dbgregs->dr6; 2990 vcpu->arch.dr7 = dbgregs->dr7; 2991 2992 return 0; 2993 } 2994 2995 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, 2996 struct kvm_xsave *guest_xsave) 2997 { 2998 if (cpu_has_xsave) { 2999 memcpy(guest_xsave->region, 3000 &vcpu->arch.guest_fpu.state->xsave, 3001 vcpu->arch.guest_xstate_size); 3002 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] &= 3003 vcpu->arch.guest_supported_xcr0 | XSTATE_FPSSE; 3004 } else { 3005 memcpy(guest_xsave->region, 3006 &vcpu->arch.guest_fpu.state->fxsave, 3007 sizeof(struct i387_fxsave_struct)); 3008 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] = 3009 XSTATE_FPSSE; 3010 } 3011 } 3012 3013 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, 3014 struct kvm_xsave *guest_xsave) 3015 { 3016 u64 xstate_bv = 3017 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; 3018 3019 if (cpu_has_xsave) { 3020 /* 3021 * Here we allow setting states that are not present in 3022 * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility 3023 * with old userspace. 3024 */ 3025 if (xstate_bv & ~KVM_SUPPORTED_XCR0) 3026 return -EINVAL; 3027 if (xstate_bv & ~host_xcr0) 3028 return -EINVAL; 3029 memcpy(&vcpu->arch.guest_fpu.state->xsave, 3030 guest_xsave->region, vcpu->arch.guest_xstate_size); 3031 } else { 3032 if (xstate_bv & ~XSTATE_FPSSE) 3033 return -EINVAL; 3034 memcpy(&vcpu->arch.guest_fpu.state->fxsave, 3035 guest_xsave->region, sizeof(struct i387_fxsave_struct)); 3036 } 3037 return 0; 3038 } 3039 3040 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, 3041 struct kvm_xcrs *guest_xcrs) 3042 { 3043 if (!cpu_has_xsave) { 3044 guest_xcrs->nr_xcrs = 0; 3045 return; 3046 } 3047 3048 guest_xcrs->nr_xcrs = 1; 3049 guest_xcrs->flags = 0; 3050 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; 3051 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; 3052 } 3053 3054 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, 3055 struct kvm_xcrs *guest_xcrs) 3056 { 3057 int i, r = 0; 3058 3059 if (!cpu_has_xsave) 3060 return -EINVAL; 3061 3062 if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) 3063 return -EINVAL; 3064 3065 for (i = 0; i < guest_xcrs->nr_xcrs; i++) 3066 /* Only support XCR0 currently */ 3067 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) { 3068 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, 3069 guest_xcrs->xcrs[i].value); 3070 break; 3071 } 3072 if (r) 3073 r = -EINVAL; 3074 return r; 3075 } 3076 3077 /* 3078 * kvm_set_guest_paused() indicates to the guest kernel that it has been 3079 * stopped by the hypervisor. This function will be called from the host only. 3080 * EINVAL is returned when the host attempts to set the flag for a guest that 3081 * does not support pv clocks. 3082 */ 3083 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) 3084 { 3085 if (!vcpu->arch.pv_time_enabled) 3086 return -EINVAL; 3087 vcpu->arch.pvclock_set_guest_stopped_request = true; 3088 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 3089 return 0; 3090 } 3091 3092 long kvm_arch_vcpu_ioctl(struct file *filp, 3093 unsigned int ioctl, unsigned long arg) 3094 { 3095 struct kvm_vcpu *vcpu = filp->private_data; 3096 void __user *argp = (void __user *)arg; 3097 int r; 3098 union { 3099 struct kvm_lapic_state *lapic; 3100 struct kvm_xsave *xsave; 3101 struct kvm_xcrs *xcrs; 3102 void *buffer; 3103 } u; 3104 3105 u.buffer = NULL; 3106 switch (ioctl) { 3107 case KVM_GET_LAPIC: { 3108 r = -EINVAL; 3109 if (!vcpu->arch.apic) 3110 goto out; 3111 u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); 3112 3113 r = -ENOMEM; 3114 if (!u.lapic) 3115 goto out; 3116 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); 3117 if (r) 3118 goto out; 3119 r = -EFAULT; 3120 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state))) 3121 goto out; 3122 r = 0; 3123 break; 3124 } 3125 case KVM_SET_LAPIC: { 3126 r = -EINVAL; 3127 if (!vcpu->arch.apic) 3128 goto out; 3129 u.lapic = memdup_user(argp, sizeof(*u.lapic)); 3130 if (IS_ERR(u.lapic)) 3131 return PTR_ERR(u.lapic); 3132 3133 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); 3134 break; 3135 } 3136 case KVM_INTERRUPT: { 3137 struct kvm_interrupt irq; 3138 3139 r = -EFAULT; 3140 if (copy_from_user(&irq, argp, sizeof irq)) 3141 goto out; 3142 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 3143 break; 3144 } 3145 case KVM_NMI: { 3146 r = kvm_vcpu_ioctl_nmi(vcpu); 3147 break; 3148 } 3149 case KVM_SET_CPUID: { 3150 struct kvm_cpuid __user *cpuid_arg = argp; 3151 struct kvm_cpuid cpuid; 3152 3153 r = -EFAULT; 3154 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) 3155 goto out; 3156 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); 3157 break; 3158 } 3159 case KVM_SET_CPUID2: { 3160 struct kvm_cpuid2 __user *cpuid_arg = argp; 3161 struct kvm_cpuid2 cpuid; 3162 3163 r = -EFAULT; 3164 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) 3165 goto out; 3166 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, 3167 cpuid_arg->entries); 3168 break; 3169 } 3170 case KVM_GET_CPUID2: { 3171 struct kvm_cpuid2 __user *cpuid_arg = argp; 3172 struct kvm_cpuid2 cpuid; 3173 3174 r = -EFAULT; 3175 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) 3176 goto out; 3177 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, 3178 cpuid_arg->entries); 3179 if (r) 3180 goto out; 3181 r = -EFAULT; 3182 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) 3183 goto out; 3184 r = 0; 3185 break; 3186 } 3187 case KVM_GET_MSRS: 3188 r = msr_io(vcpu, argp, kvm_get_msr, 1); 3189 break; 3190 case KVM_SET_MSRS: 3191 r = msr_io(vcpu, argp, do_set_msr, 0); 3192 break; 3193 case KVM_TPR_ACCESS_REPORTING: { 3194 struct kvm_tpr_access_ctl tac; 3195 3196 r = -EFAULT; 3197 if (copy_from_user(&tac, argp, sizeof tac)) 3198 goto out; 3199 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); 3200 if (r) 3201 goto out; 3202 r = -EFAULT; 3203 if (copy_to_user(argp, &tac, sizeof tac)) 3204 goto out; 3205 r = 0; 3206 break; 3207 }; 3208 case KVM_SET_VAPIC_ADDR: { 3209 struct kvm_vapic_addr va; 3210 3211 r = -EINVAL; 3212 if (!irqchip_in_kernel(vcpu->kvm)) 3213 goto out; 3214 r = -EFAULT; 3215 if (copy_from_user(&va, argp, sizeof va)) 3216 goto out; 3217 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); 3218 break; 3219 } 3220 case KVM_X86_SETUP_MCE: { 3221 u64 mcg_cap; 3222 3223 r = -EFAULT; 3224 if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap)) 3225 goto out; 3226 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); 3227 break; 3228 } 3229 case KVM_X86_SET_MCE: { 3230 struct kvm_x86_mce mce; 3231 3232 r = -EFAULT; 3233 if (copy_from_user(&mce, argp, sizeof mce)) 3234 goto out; 3235 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); 3236 break; 3237 } 3238 case KVM_GET_VCPU_EVENTS: { 3239 struct kvm_vcpu_events events; 3240 3241 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); 3242 3243 r = -EFAULT; 3244 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events))) 3245 break; 3246 r = 0; 3247 break; 3248 } 3249 case KVM_SET_VCPU_EVENTS: { 3250 struct kvm_vcpu_events events; 3251 3252 r = -EFAULT; 3253 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events))) 3254 break; 3255 3256 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); 3257 break; 3258 } 3259 case KVM_GET_DEBUGREGS: { 3260 struct kvm_debugregs dbgregs; 3261 3262 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); 3263 3264 r = -EFAULT; 3265 if (copy_to_user(argp, &dbgregs, 3266 sizeof(struct kvm_debugregs))) 3267 break; 3268 r = 0; 3269 break; 3270 } 3271 case KVM_SET_DEBUGREGS: { 3272 struct kvm_debugregs dbgregs; 3273 3274 r = -EFAULT; 3275 if (copy_from_user(&dbgregs, argp, 3276 sizeof(struct kvm_debugregs))) 3277 break; 3278 3279 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); 3280 break; 3281 } 3282 case KVM_GET_XSAVE: { 3283 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL); 3284 r = -ENOMEM; 3285 if (!u.xsave) 3286 break; 3287 3288 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); 3289 3290 r = -EFAULT; 3291 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave))) 3292 break; 3293 r = 0; 3294 break; 3295 } 3296 case KVM_SET_XSAVE: { 3297 u.xsave = memdup_user(argp, sizeof(*u.xsave)); 3298 if (IS_ERR(u.xsave)) 3299 return PTR_ERR(u.xsave); 3300 3301 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); 3302 break; 3303 } 3304 case KVM_GET_XCRS: { 3305 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL); 3306 r = -ENOMEM; 3307 if (!u.xcrs) 3308 break; 3309 3310 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); 3311 3312 r = -EFAULT; 3313 if (copy_to_user(argp, u.xcrs, 3314 sizeof(struct kvm_xcrs))) 3315 break; 3316 r = 0; 3317 break; 3318 } 3319 case KVM_SET_XCRS: { 3320 u.xcrs = memdup_user(argp, sizeof(*u.xcrs)); 3321 if (IS_ERR(u.xcrs)) 3322 return PTR_ERR(u.xcrs); 3323 3324 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); 3325 break; 3326 } 3327 case KVM_SET_TSC_KHZ: { 3328 u32 user_tsc_khz; 3329 3330 r = -EINVAL; 3331 user_tsc_khz = (u32)arg; 3332 3333 if (user_tsc_khz >= kvm_max_guest_tsc_khz) 3334 goto out; 3335 3336 if (user_tsc_khz == 0) 3337 user_tsc_khz = tsc_khz; 3338 3339 kvm_set_tsc_khz(vcpu, user_tsc_khz); 3340 3341 r = 0; 3342 goto out; 3343 } 3344 case KVM_GET_TSC_KHZ: { 3345 r = vcpu->arch.virtual_tsc_khz; 3346 goto out; 3347 } 3348 case KVM_KVMCLOCK_CTRL: { 3349 r = kvm_set_guest_paused(vcpu); 3350 goto out; 3351 } 3352 default: 3353 r = -EINVAL; 3354 } 3355 out: 3356 kfree(u.buffer); 3357 return r; 3358 } 3359 3360 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 3361 { 3362 return VM_FAULT_SIGBUS; 3363 } 3364 3365 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) 3366 { 3367 int ret; 3368 3369 if (addr > (unsigned int)(-3 * PAGE_SIZE)) 3370 return -EINVAL; 3371 ret = kvm_x86_ops->set_tss_addr(kvm, addr); 3372 return ret; 3373 } 3374 3375 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, 3376 u64 ident_addr) 3377 { 3378 kvm->arch.ept_identity_map_addr = ident_addr; 3379 return 0; 3380 } 3381 3382 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, 3383 u32 kvm_nr_mmu_pages) 3384 { 3385 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) 3386 return -EINVAL; 3387 3388 mutex_lock(&kvm->slots_lock); 3389 3390 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); 3391 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; 3392 3393 mutex_unlock(&kvm->slots_lock); 3394 return 0; 3395 } 3396 3397 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) 3398 { 3399 return kvm->arch.n_max_mmu_pages; 3400 } 3401 3402 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) 3403 { 3404 int r; 3405 3406 r = 0; 3407 switch (chip->chip_id) { 3408 case KVM_IRQCHIP_PIC_MASTER: 3409 memcpy(&chip->chip.pic, 3410 &pic_irqchip(kvm)->pics[0], 3411 sizeof(struct kvm_pic_state)); 3412 break; 3413 case KVM_IRQCHIP_PIC_SLAVE: 3414 memcpy(&chip->chip.pic, 3415 &pic_irqchip(kvm)->pics[1], 3416 sizeof(struct kvm_pic_state)); 3417 break; 3418 case KVM_IRQCHIP_IOAPIC: 3419 r = kvm_get_ioapic(kvm, &chip->chip.ioapic); 3420 break; 3421 default: 3422 r = -EINVAL; 3423 break; 3424 } 3425 return r; 3426 } 3427 3428 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) 3429 { 3430 int r; 3431 3432 r = 0; 3433 switch (chip->chip_id) { 3434 case KVM_IRQCHIP_PIC_MASTER: 3435 spin_lock(&pic_irqchip(kvm)->lock); 3436 memcpy(&pic_irqchip(kvm)->pics[0], 3437 &chip->chip.pic, 3438 sizeof(struct kvm_pic_state)); 3439 spin_unlock(&pic_irqchip(kvm)->lock); 3440 break; 3441 case KVM_IRQCHIP_PIC_SLAVE: 3442 spin_lock(&pic_irqchip(kvm)->lock); 3443 memcpy(&pic_irqchip(kvm)->pics[1], 3444 &chip->chip.pic, 3445 sizeof(struct kvm_pic_state)); 3446 spin_unlock(&pic_irqchip(kvm)->lock); 3447 break; 3448 case KVM_IRQCHIP_IOAPIC: 3449 r = kvm_set_ioapic(kvm, &chip->chip.ioapic); 3450 break; 3451 default: 3452 r = -EINVAL; 3453 break; 3454 } 3455 kvm_pic_update_irq(pic_irqchip(kvm)); 3456 return r; 3457 } 3458 3459 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps) 3460 { 3461 int r = 0; 3462 3463 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3464 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state)); 3465 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3466 return r; 3467 } 3468 3469 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) 3470 { 3471 int r = 0; 3472 3473 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3474 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); 3475 kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0); 3476 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3477 return r; 3478 } 3479 3480 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 3481 { 3482 int r = 0; 3483 3484 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3485 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, 3486 sizeof(ps->channels)); 3487 ps->flags = kvm->arch.vpit->pit_state.flags; 3488 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3489 memset(&ps->reserved, 0, sizeof(ps->reserved)); 3490 return r; 3491 } 3492 3493 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 3494 { 3495 int r = 0, start = 0; 3496 u32 prev_legacy, cur_legacy; 3497 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3498 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; 3499 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; 3500 if (!prev_legacy && cur_legacy) 3501 start = 1; 3502 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, 3503 sizeof(kvm->arch.vpit->pit_state.channels)); 3504 kvm->arch.vpit->pit_state.flags = ps->flags; 3505 kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start); 3506 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3507 return r; 3508 } 3509 3510 static int kvm_vm_ioctl_reinject(struct kvm *kvm, 3511 struct kvm_reinject_control *control) 3512 { 3513 if (!kvm->arch.vpit) 3514 return -ENXIO; 3515 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3516 kvm->arch.vpit->pit_state.reinject = control->pit_reinject; 3517 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3518 return 0; 3519 } 3520 3521 /** 3522 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot 3523 * @kvm: kvm instance 3524 * @log: slot id and address to which we copy the log 3525 * 3526 * We need to keep it in mind that VCPU threads can write to the bitmap 3527 * concurrently. So, to avoid losing data, we keep the following order for 3528 * each bit: 3529 * 3530 * 1. Take a snapshot of the bit and clear it if needed. 3531 * 2. Write protect the corresponding page. 3532 * 3. Flush TLB's if needed. 3533 * 4. Copy the snapshot to the userspace. 3534 * 3535 * Between 2 and 3, the guest may write to the page using the remaining TLB 3536 * entry. This is not a problem because the page will be reported dirty at 3537 * step 4 using the snapshot taken before and step 3 ensures that successive 3538 * writes will be logged for the next call. 3539 */ 3540 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 3541 { 3542 int r; 3543 struct kvm_memory_slot *memslot; 3544 unsigned long n, i; 3545 unsigned long *dirty_bitmap; 3546 unsigned long *dirty_bitmap_buffer; 3547 bool is_dirty = false; 3548 3549 mutex_lock(&kvm->slots_lock); 3550 3551 r = -EINVAL; 3552 if (log->slot >= KVM_USER_MEM_SLOTS) 3553 goto out; 3554 3555 memslot = id_to_memslot(kvm->memslots, log->slot); 3556 3557 dirty_bitmap = memslot->dirty_bitmap; 3558 r = -ENOENT; 3559 if (!dirty_bitmap) 3560 goto out; 3561 3562 n = kvm_dirty_bitmap_bytes(memslot); 3563 3564 dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); 3565 memset(dirty_bitmap_buffer, 0, n); 3566 3567 spin_lock(&kvm->mmu_lock); 3568 3569 for (i = 0; i < n / sizeof(long); i++) { 3570 unsigned long mask; 3571 gfn_t offset; 3572 3573 if (!dirty_bitmap[i]) 3574 continue; 3575 3576 is_dirty = true; 3577 3578 mask = xchg(&dirty_bitmap[i], 0); 3579 dirty_bitmap_buffer[i] = mask; 3580 3581 offset = i * BITS_PER_LONG; 3582 kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask); 3583 } 3584 if (is_dirty) 3585 kvm_flush_remote_tlbs(kvm); 3586 3587 spin_unlock(&kvm->mmu_lock); 3588 3589 r = -EFAULT; 3590 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) 3591 goto out; 3592 3593 r = 0; 3594 out: 3595 mutex_unlock(&kvm->slots_lock); 3596 return r; 3597 } 3598 3599 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, 3600 bool line_status) 3601 { 3602 if (!irqchip_in_kernel(kvm)) 3603 return -ENXIO; 3604 3605 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 3606 irq_event->irq, irq_event->level, 3607 line_status); 3608 return 0; 3609 } 3610 3611 long kvm_arch_vm_ioctl(struct file *filp, 3612 unsigned int ioctl, unsigned long arg) 3613 { 3614 struct kvm *kvm = filp->private_data; 3615 void __user *argp = (void __user *)arg; 3616 int r = -ENOTTY; 3617 /* 3618 * This union makes it completely explicit to gcc-3.x 3619 * that these two variables' stack usage should be 3620 * combined, not added together. 3621 */ 3622 union { 3623 struct kvm_pit_state ps; 3624 struct kvm_pit_state2 ps2; 3625 struct kvm_pit_config pit_config; 3626 } u; 3627 3628 switch (ioctl) { 3629 case KVM_SET_TSS_ADDR: 3630 r = kvm_vm_ioctl_set_tss_addr(kvm, arg); 3631 break; 3632 case KVM_SET_IDENTITY_MAP_ADDR: { 3633 u64 ident_addr; 3634 3635 r = -EFAULT; 3636 if (copy_from_user(&ident_addr, argp, sizeof ident_addr)) 3637 goto out; 3638 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); 3639 break; 3640 } 3641 case KVM_SET_NR_MMU_PAGES: 3642 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); 3643 break; 3644 case KVM_GET_NR_MMU_PAGES: 3645 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); 3646 break; 3647 case KVM_CREATE_IRQCHIP: { 3648 struct kvm_pic *vpic; 3649 3650 mutex_lock(&kvm->lock); 3651 r = -EEXIST; 3652 if (kvm->arch.vpic) 3653 goto create_irqchip_unlock; 3654 r = -EINVAL; 3655 if (atomic_read(&kvm->online_vcpus)) 3656 goto create_irqchip_unlock; 3657 r = -ENOMEM; 3658 vpic = kvm_create_pic(kvm); 3659 if (vpic) { 3660 r = kvm_ioapic_init(kvm); 3661 if (r) { 3662 mutex_lock(&kvm->slots_lock); 3663 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, 3664 &vpic->dev_master); 3665 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, 3666 &vpic->dev_slave); 3667 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, 3668 &vpic->dev_eclr); 3669 mutex_unlock(&kvm->slots_lock); 3670 kfree(vpic); 3671 goto create_irqchip_unlock; 3672 } 3673 } else 3674 goto create_irqchip_unlock; 3675 smp_wmb(); 3676 kvm->arch.vpic = vpic; 3677 smp_wmb(); 3678 r = kvm_setup_default_irq_routing(kvm); 3679 if (r) { 3680 mutex_lock(&kvm->slots_lock); 3681 mutex_lock(&kvm->irq_lock); 3682 kvm_ioapic_destroy(kvm); 3683 kvm_destroy_pic(kvm); 3684 mutex_unlock(&kvm->irq_lock); 3685 mutex_unlock(&kvm->slots_lock); 3686 } 3687 create_irqchip_unlock: 3688 mutex_unlock(&kvm->lock); 3689 break; 3690 } 3691 case KVM_CREATE_PIT: 3692 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY; 3693 goto create_pit; 3694 case KVM_CREATE_PIT2: 3695 r = -EFAULT; 3696 if (copy_from_user(&u.pit_config, argp, 3697 sizeof(struct kvm_pit_config))) 3698 goto out; 3699 create_pit: 3700 mutex_lock(&kvm->slots_lock); 3701 r = -EEXIST; 3702 if (kvm->arch.vpit) 3703 goto create_pit_unlock; 3704 r = -ENOMEM; 3705 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); 3706 if (kvm->arch.vpit) 3707 r = 0; 3708 create_pit_unlock: 3709 mutex_unlock(&kvm->slots_lock); 3710 break; 3711 case KVM_GET_IRQCHIP: { 3712 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ 3713 struct kvm_irqchip *chip; 3714 3715 chip = memdup_user(argp, sizeof(*chip)); 3716 if (IS_ERR(chip)) { 3717 r = PTR_ERR(chip); 3718 goto out; 3719 } 3720 3721 r = -ENXIO; 3722 if (!irqchip_in_kernel(kvm)) 3723 goto get_irqchip_out; 3724 r = kvm_vm_ioctl_get_irqchip(kvm, chip); 3725 if (r) 3726 goto get_irqchip_out; 3727 r = -EFAULT; 3728 if (copy_to_user(argp, chip, sizeof *chip)) 3729 goto get_irqchip_out; 3730 r = 0; 3731 get_irqchip_out: 3732 kfree(chip); 3733 break; 3734 } 3735 case KVM_SET_IRQCHIP: { 3736 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ 3737 struct kvm_irqchip *chip; 3738 3739 chip = memdup_user(argp, sizeof(*chip)); 3740 if (IS_ERR(chip)) { 3741 r = PTR_ERR(chip); 3742 goto out; 3743 } 3744 3745 r = -ENXIO; 3746 if (!irqchip_in_kernel(kvm)) 3747 goto set_irqchip_out; 3748 r = kvm_vm_ioctl_set_irqchip(kvm, chip); 3749 if (r) 3750 goto set_irqchip_out; 3751 r = 0; 3752 set_irqchip_out: 3753 kfree(chip); 3754 break; 3755 } 3756 case KVM_GET_PIT: { 3757 r = -EFAULT; 3758 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state))) 3759 goto out; 3760 r = -ENXIO; 3761 if (!kvm->arch.vpit) 3762 goto out; 3763 r = kvm_vm_ioctl_get_pit(kvm, &u.ps); 3764 if (r) 3765 goto out; 3766 r = -EFAULT; 3767 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state))) 3768 goto out; 3769 r = 0; 3770 break; 3771 } 3772 case KVM_SET_PIT: { 3773 r = -EFAULT; 3774 if (copy_from_user(&u.ps, argp, sizeof u.ps)) 3775 goto out; 3776 r = -ENXIO; 3777 if (!kvm->arch.vpit) 3778 goto out; 3779 r = kvm_vm_ioctl_set_pit(kvm, &u.ps); 3780 break; 3781 } 3782 case KVM_GET_PIT2: { 3783 r = -ENXIO; 3784 if (!kvm->arch.vpit) 3785 goto out; 3786 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2); 3787 if (r) 3788 goto out; 3789 r = -EFAULT; 3790 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2))) 3791 goto out; 3792 r = 0; 3793 break; 3794 } 3795 case KVM_SET_PIT2: { 3796 r = -EFAULT; 3797 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) 3798 goto out; 3799 r = -ENXIO; 3800 if (!kvm->arch.vpit) 3801 goto out; 3802 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); 3803 break; 3804 } 3805 case KVM_REINJECT_CONTROL: { 3806 struct kvm_reinject_control control; 3807 r = -EFAULT; 3808 if (copy_from_user(&control, argp, sizeof(control))) 3809 goto out; 3810 r = kvm_vm_ioctl_reinject(kvm, &control); 3811 break; 3812 } 3813 case KVM_XEN_HVM_CONFIG: { 3814 r = -EFAULT; 3815 if (copy_from_user(&kvm->arch.xen_hvm_config, argp, 3816 sizeof(struct kvm_xen_hvm_config))) 3817 goto out; 3818 r = -EINVAL; 3819 if (kvm->arch.xen_hvm_config.flags) 3820 goto out; 3821 r = 0; 3822 break; 3823 } 3824 case KVM_SET_CLOCK: { 3825 struct kvm_clock_data user_ns; 3826 u64 now_ns; 3827 s64 delta; 3828 3829 r = -EFAULT; 3830 if (copy_from_user(&user_ns, argp, sizeof(user_ns))) 3831 goto out; 3832 3833 r = -EINVAL; 3834 if (user_ns.flags) 3835 goto out; 3836 3837 r = 0; 3838 local_irq_disable(); 3839 now_ns = get_kernel_ns(); 3840 delta = user_ns.clock - now_ns; 3841 local_irq_enable(); 3842 kvm->arch.kvmclock_offset = delta; 3843 kvm_gen_update_masterclock(kvm); 3844 break; 3845 } 3846 case KVM_GET_CLOCK: { 3847 struct kvm_clock_data user_ns; 3848 u64 now_ns; 3849 3850 local_irq_disable(); 3851 now_ns = get_kernel_ns(); 3852 user_ns.clock = kvm->arch.kvmclock_offset + now_ns; 3853 local_irq_enable(); 3854 user_ns.flags = 0; 3855 memset(&user_ns.pad, 0, sizeof(user_ns.pad)); 3856 3857 r = -EFAULT; 3858 if (copy_to_user(argp, &user_ns, sizeof(user_ns))) 3859 goto out; 3860 r = 0; 3861 break; 3862 } 3863 3864 default: 3865 ; 3866 } 3867 out: 3868 return r; 3869 } 3870 3871 static void kvm_init_msr_list(void) 3872 { 3873 u32 dummy[2]; 3874 unsigned i, j; 3875 3876 /* skip the first msrs in the list. KVM-specific */ 3877 for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) { 3878 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0) 3879 continue; 3880 if (j < i) 3881 msrs_to_save[j] = msrs_to_save[i]; 3882 j++; 3883 } 3884 num_msrs_to_save = j; 3885 } 3886 3887 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, 3888 const void *v) 3889 { 3890 int handled = 0; 3891 int n; 3892 3893 do { 3894 n = min(len, 8); 3895 if (!(vcpu->arch.apic && 3896 !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v)) 3897 && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) 3898 break; 3899 handled += n; 3900 addr += n; 3901 len -= n; 3902 v += n; 3903 } while (len); 3904 3905 return handled; 3906 } 3907 3908 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) 3909 { 3910 int handled = 0; 3911 int n; 3912 3913 do { 3914 n = min(len, 8); 3915 if (!(vcpu->arch.apic && 3916 !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v)) 3917 && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) 3918 break; 3919 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v); 3920 handled += n; 3921 addr += n; 3922 len -= n; 3923 v += n; 3924 } while (len); 3925 3926 return handled; 3927 } 3928 3929 static void kvm_set_segment(struct kvm_vcpu *vcpu, 3930 struct kvm_segment *var, int seg) 3931 { 3932 kvm_x86_ops->set_segment(vcpu, var, seg); 3933 } 3934 3935 void kvm_get_segment(struct kvm_vcpu *vcpu, 3936 struct kvm_segment *var, int seg) 3937 { 3938 kvm_x86_ops->get_segment(vcpu, var, seg); 3939 } 3940 3941 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) 3942 { 3943 gpa_t t_gpa; 3944 struct x86_exception exception; 3945 3946 BUG_ON(!mmu_is_nested(vcpu)); 3947 3948 /* NPT walks are always user-walks */ 3949 access |= PFERR_USER_MASK; 3950 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception); 3951 3952 return t_gpa; 3953 } 3954 3955 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, 3956 struct x86_exception *exception) 3957 { 3958 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3959 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); 3960 } 3961 3962 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, 3963 struct x86_exception *exception) 3964 { 3965 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3966 access |= PFERR_FETCH_MASK; 3967 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); 3968 } 3969 3970 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, 3971 struct x86_exception *exception) 3972 { 3973 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3974 access |= PFERR_WRITE_MASK; 3975 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); 3976 } 3977 3978 /* uses this to access any guest's mapped memory without checking CPL */ 3979 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, 3980 struct x86_exception *exception) 3981 { 3982 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); 3983 } 3984 3985 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, 3986 struct kvm_vcpu *vcpu, u32 access, 3987 struct x86_exception *exception) 3988 { 3989 void *data = val; 3990 int r = X86EMUL_CONTINUE; 3991 3992 while (bytes) { 3993 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, 3994 exception); 3995 unsigned offset = addr & (PAGE_SIZE-1); 3996 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); 3997 int ret; 3998 3999 if (gpa == UNMAPPED_GVA) 4000 return X86EMUL_PROPAGATE_FAULT; 4001 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread); 4002 if (ret < 0) { 4003 r = X86EMUL_IO_NEEDED; 4004 goto out; 4005 } 4006 4007 bytes -= toread; 4008 data += toread; 4009 addr += toread; 4010 } 4011 out: 4012 return r; 4013 } 4014 4015 /* used for instruction fetching */ 4016 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, 4017 gva_t addr, void *val, unsigned int bytes, 4018 struct x86_exception *exception) 4019 { 4020 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4021 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 4022 4023 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 4024 access | PFERR_FETCH_MASK, 4025 exception); 4026 } 4027 4028 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, 4029 gva_t addr, void *val, unsigned int bytes, 4030 struct x86_exception *exception) 4031 { 4032 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4033 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 4034 4035 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, 4036 exception); 4037 } 4038 EXPORT_SYMBOL_GPL(kvm_read_guest_virt); 4039 4040 static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, 4041 gva_t addr, void *val, unsigned int bytes, 4042 struct x86_exception *exception) 4043 { 4044 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4045 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception); 4046 } 4047 4048 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, 4049 gva_t addr, void *val, 4050 unsigned int bytes, 4051 struct x86_exception *exception) 4052 { 4053 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4054 void *data = val; 4055 int r = X86EMUL_CONTINUE; 4056 4057 while (bytes) { 4058 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, 4059 PFERR_WRITE_MASK, 4060 exception); 4061 unsigned offset = addr & (PAGE_SIZE-1); 4062 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); 4063 int ret; 4064 4065 if (gpa == UNMAPPED_GVA) 4066 return X86EMUL_PROPAGATE_FAULT; 4067 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite); 4068 if (ret < 0) { 4069 r = X86EMUL_IO_NEEDED; 4070 goto out; 4071 } 4072 4073 bytes -= towrite; 4074 data += towrite; 4075 addr += towrite; 4076 } 4077 out: 4078 return r; 4079 } 4080 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); 4081 4082 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 4083 gpa_t *gpa, struct x86_exception *exception, 4084 bool write) 4085 { 4086 u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) 4087 | (write ? PFERR_WRITE_MASK : 0); 4088 4089 if (vcpu_match_mmio_gva(vcpu, gva) 4090 && !permission_fault(vcpu->arch.walk_mmu, vcpu->arch.access, access)) { 4091 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | 4092 (gva & (PAGE_SIZE - 1)); 4093 trace_vcpu_match_mmio(gva, *gpa, write, false); 4094 return 1; 4095 } 4096 4097 *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); 4098 4099 if (*gpa == UNMAPPED_GVA) 4100 return -1; 4101 4102 /* For APIC access vmexit */ 4103 if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 4104 return 1; 4105 4106 if (vcpu_match_mmio_gpa(vcpu, *gpa)) { 4107 trace_vcpu_match_mmio(gva, *gpa, write, true); 4108 return 1; 4109 } 4110 4111 return 0; 4112 } 4113 4114 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 4115 const void *val, int bytes) 4116 { 4117 int ret; 4118 4119 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); 4120 if (ret < 0) 4121 return 0; 4122 kvm_mmu_pte_write(vcpu, gpa, val, bytes); 4123 return 1; 4124 } 4125 4126 struct read_write_emulator_ops { 4127 int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val, 4128 int bytes); 4129 int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa, 4130 void *val, int bytes); 4131 int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, 4132 int bytes, void *val); 4133 int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, 4134 void *val, int bytes); 4135 bool write; 4136 }; 4137 4138 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) 4139 { 4140 if (vcpu->mmio_read_completed) { 4141 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, 4142 vcpu->mmio_fragments[0].gpa, *(u64 *)val); 4143 vcpu->mmio_read_completed = 0; 4144 return 1; 4145 } 4146 4147 return 0; 4148 } 4149 4150 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, 4151 void *val, int bytes) 4152 { 4153 return !kvm_read_guest(vcpu->kvm, gpa, val, bytes); 4154 } 4155 4156 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, 4157 void *val, int bytes) 4158 { 4159 return emulator_write_phys(vcpu, gpa, val, bytes); 4160 } 4161 4162 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) 4163 { 4164 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val); 4165 return vcpu_mmio_write(vcpu, gpa, bytes, val); 4166 } 4167 4168 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, 4169 void *val, int bytes) 4170 { 4171 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0); 4172 return X86EMUL_IO_NEEDED; 4173 } 4174 4175 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, 4176 void *val, int bytes) 4177 { 4178 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; 4179 4180 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); 4181 return X86EMUL_CONTINUE; 4182 } 4183 4184 static const struct read_write_emulator_ops read_emultor = { 4185 .read_write_prepare = read_prepare, 4186 .read_write_emulate = read_emulate, 4187 .read_write_mmio = vcpu_mmio_read, 4188 .read_write_exit_mmio = read_exit_mmio, 4189 }; 4190 4191 static const struct read_write_emulator_ops write_emultor = { 4192 .read_write_emulate = write_emulate, 4193 .read_write_mmio = write_mmio, 4194 .read_write_exit_mmio = write_exit_mmio, 4195 .write = true, 4196 }; 4197 4198 static int emulator_read_write_onepage(unsigned long addr, void *val, 4199 unsigned int bytes, 4200 struct x86_exception *exception, 4201 struct kvm_vcpu *vcpu, 4202 const struct read_write_emulator_ops *ops) 4203 { 4204 gpa_t gpa; 4205 int handled, ret; 4206 bool write = ops->write; 4207 struct kvm_mmio_fragment *frag; 4208 4209 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); 4210 4211 if (ret < 0) 4212 return X86EMUL_PROPAGATE_FAULT; 4213 4214 /* For APIC access vmexit */ 4215 if (ret) 4216 goto mmio; 4217 4218 if (ops->read_write_emulate(vcpu, gpa, val, bytes)) 4219 return X86EMUL_CONTINUE; 4220 4221 mmio: 4222 /* 4223 * Is this MMIO handled locally? 4224 */ 4225 handled = ops->read_write_mmio(vcpu, gpa, bytes, val); 4226 if (handled == bytes) 4227 return X86EMUL_CONTINUE; 4228 4229 gpa += handled; 4230 bytes -= handled; 4231 val += handled; 4232 4233 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); 4234 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; 4235 frag->gpa = gpa; 4236 frag->data = val; 4237 frag->len = bytes; 4238 return X86EMUL_CONTINUE; 4239 } 4240 4241 int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr, 4242 void *val, unsigned int bytes, 4243 struct x86_exception *exception, 4244 const struct read_write_emulator_ops *ops) 4245 { 4246 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4247 gpa_t gpa; 4248 int rc; 4249 4250 if (ops->read_write_prepare && 4251 ops->read_write_prepare(vcpu, val, bytes)) 4252 return X86EMUL_CONTINUE; 4253 4254 vcpu->mmio_nr_fragments = 0; 4255 4256 /* Crossing a page boundary? */ 4257 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { 4258 int now; 4259 4260 now = -addr & ~PAGE_MASK; 4261 rc = emulator_read_write_onepage(addr, val, now, exception, 4262 vcpu, ops); 4263 4264 if (rc != X86EMUL_CONTINUE) 4265 return rc; 4266 addr += now; 4267 val += now; 4268 bytes -= now; 4269 } 4270 4271 rc = emulator_read_write_onepage(addr, val, bytes, exception, 4272 vcpu, ops); 4273 if (rc != X86EMUL_CONTINUE) 4274 return rc; 4275 4276 if (!vcpu->mmio_nr_fragments) 4277 return rc; 4278 4279 gpa = vcpu->mmio_fragments[0].gpa; 4280 4281 vcpu->mmio_needed = 1; 4282 vcpu->mmio_cur_fragment = 0; 4283 4284 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); 4285 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; 4286 vcpu->run->exit_reason = KVM_EXIT_MMIO; 4287 vcpu->run->mmio.phys_addr = gpa; 4288 4289 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); 4290 } 4291 4292 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, 4293 unsigned long addr, 4294 void *val, 4295 unsigned int bytes, 4296 struct x86_exception *exception) 4297 { 4298 return emulator_read_write(ctxt, addr, val, bytes, 4299 exception, &read_emultor); 4300 } 4301 4302 int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, 4303 unsigned long addr, 4304 const void *val, 4305 unsigned int bytes, 4306 struct x86_exception *exception) 4307 { 4308 return emulator_read_write(ctxt, addr, (void *)val, bytes, 4309 exception, &write_emultor); 4310 } 4311 4312 #define CMPXCHG_TYPE(t, ptr, old, new) \ 4313 (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old)) 4314 4315 #ifdef CONFIG_X86_64 4316 # define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new) 4317 #else 4318 # define CMPXCHG64(ptr, old, new) \ 4319 (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old)) 4320 #endif 4321 4322 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, 4323 unsigned long addr, 4324 const void *old, 4325 const void *new, 4326 unsigned int bytes, 4327 struct x86_exception *exception) 4328 { 4329 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4330 gpa_t gpa; 4331 struct page *page; 4332 char *kaddr; 4333 bool exchanged; 4334 4335 /* guests cmpxchg8b have to be emulated atomically */ 4336 if (bytes > 8 || (bytes & (bytes - 1))) 4337 goto emul_write; 4338 4339 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); 4340 4341 if (gpa == UNMAPPED_GVA || 4342 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 4343 goto emul_write; 4344 4345 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK)) 4346 goto emul_write; 4347 4348 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); 4349 if (is_error_page(page)) 4350 goto emul_write; 4351 4352 kaddr = kmap_atomic(page); 4353 kaddr += offset_in_page(gpa); 4354 switch (bytes) { 4355 case 1: 4356 exchanged = CMPXCHG_TYPE(u8, kaddr, old, new); 4357 break; 4358 case 2: 4359 exchanged = CMPXCHG_TYPE(u16, kaddr, old, new); 4360 break; 4361 case 4: 4362 exchanged = CMPXCHG_TYPE(u32, kaddr, old, new); 4363 break; 4364 case 8: 4365 exchanged = CMPXCHG64(kaddr, old, new); 4366 break; 4367 default: 4368 BUG(); 4369 } 4370 kunmap_atomic(kaddr); 4371 kvm_release_page_dirty(page); 4372 4373 if (!exchanged) 4374 return X86EMUL_CMPXCHG_FAILED; 4375 4376 kvm_mmu_pte_write(vcpu, gpa, new, bytes); 4377 4378 return X86EMUL_CONTINUE; 4379 4380 emul_write: 4381 printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); 4382 4383 return emulator_write_emulated(ctxt, addr, new, bytes, exception); 4384 } 4385 4386 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) 4387 { 4388 /* TODO: String I/O for in kernel device */ 4389 int r; 4390 4391 if (vcpu->arch.pio.in) 4392 r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port, 4393 vcpu->arch.pio.size, pd); 4394 else 4395 r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS, 4396 vcpu->arch.pio.port, vcpu->arch.pio.size, 4397 pd); 4398 return r; 4399 } 4400 4401 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, 4402 unsigned short port, void *val, 4403 unsigned int count, bool in) 4404 { 4405 trace_kvm_pio(!in, port, size, count); 4406 4407 vcpu->arch.pio.port = port; 4408 vcpu->arch.pio.in = in; 4409 vcpu->arch.pio.count = count; 4410 vcpu->arch.pio.size = size; 4411 4412 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { 4413 vcpu->arch.pio.count = 0; 4414 return 1; 4415 } 4416 4417 vcpu->run->exit_reason = KVM_EXIT_IO; 4418 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; 4419 vcpu->run->io.size = size; 4420 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; 4421 vcpu->run->io.count = count; 4422 vcpu->run->io.port = port; 4423 4424 return 0; 4425 } 4426 4427 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, 4428 int size, unsigned short port, void *val, 4429 unsigned int count) 4430 { 4431 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4432 int ret; 4433 4434 if (vcpu->arch.pio.count) 4435 goto data_avail; 4436 4437 ret = emulator_pio_in_out(vcpu, size, port, val, count, true); 4438 if (ret) { 4439 data_avail: 4440 memcpy(val, vcpu->arch.pio_data, size * count); 4441 vcpu->arch.pio.count = 0; 4442 return 1; 4443 } 4444 4445 return 0; 4446 } 4447 4448 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, 4449 int size, unsigned short port, 4450 const void *val, unsigned int count) 4451 { 4452 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4453 4454 memcpy(vcpu->arch.pio_data, val, size * count); 4455 return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); 4456 } 4457 4458 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) 4459 { 4460 return kvm_x86_ops->get_segment_base(vcpu, seg); 4461 } 4462 4463 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) 4464 { 4465 kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); 4466 } 4467 4468 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) 4469 { 4470 if (!need_emulate_wbinvd(vcpu)) 4471 return X86EMUL_CONTINUE; 4472 4473 if (kvm_x86_ops->has_wbinvd_exit()) { 4474 int cpu = get_cpu(); 4475 4476 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); 4477 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, 4478 wbinvd_ipi, NULL, 1); 4479 put_cpu(); 4480 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); 4481 } else 4482 wbinvd(); 4483 return X86EMUL_CONTINUE; 4484 } 4485 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); 4486 4487 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) 4488 { 4489 kvm_emulate_wbinvd(emul_to_vcpu(ctxt)); 4490 } 4491 4492 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest) 4493 { 4494 return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); 4495 } 4496 4497 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) 4498 { 4499 4500 return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value); 4501 } 4502 4503 static u64 mk_cr_64(u64 curr_cr, u32 new_val) 4504 { 4505 return (curr_cr & ~((1ULL << 32) - 1)) | new_val; 4506 } 4507 4508 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr) 4509 { 4510 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4511 unsigned long value; 4512 4513 switch (cr) { 4514 case 0: 4515 value = kvm_read_cr0(vcpu); 4516 break; 4517 case 2: 4518 value = vcpu->arch.cr2; 4519 break; 4520 case 3: 4521 value = kvm_read_cr3(vcpu); 4522 break; 4523 case 4: 4524 value = kvm_read_cr4(vcpu); 4525 break; 4526 case 8: 4527 value = kvm_get_cr8(vcpu); 4528 break; 4529 default: 4530 kvm_err("%s: unexpected cr %u\n", __func__, cr); 4531 return 0; 4532 } 4533 4534 return value; 4535 } 4536 4537 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) 4538 { 4539 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4540 int res = 0; 4541 4542 switch (cr) { 4543 case 0: 4544 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); 4545 break; 4546 case 2: 4547 vcpu->arch.cr2 = val; 4548 break; 4549 case 3: 4550 res = kvm_set_cr3(vcpu, val); 4551 break; 4552 case 4: 4553 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); 4554 break; 4555 case 8: 4556 res = kvm_set_cr8(vcpu, val); 4557 break; 4558 default: 4559 kvm_err("%s: unexpected cr %u\n", __func__, cr); 4560 res = -1; 4561 } 4562 4563 return res; 4564 } 4565 4566 static void emulator_set_rflags(struct x86_emulate_ctxt *ctxt, ulong val) 4567 { 4568 kvm_set_rflags(emul_to_vcpu(ctxt), val); 4569 } 4570 4571 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) 4572 { 4573 return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt)); 4574 } 4575 4576 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 4577 { 4578 kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt); 4579 } 4580 4581 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 4582 { 4583 kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt); 4584 } 4585 4586 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 4587 { 4588 kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt); 4589 } 4590 4591 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 4592 { 4593 kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt); 4594 } 4595 4596 static unsigned long emulator_get_cached_segment_base( 4597 struct x86_emulate_ctxt *ctxt, int seg) 4598 { 4599 return get_segment_base(emul_to_vcpu(ctxt), seg); 4600 } 4601 4602 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, 4603 struct desc_struct *desc, u32 *base3, 4604 int seg) 4605 { 4606 struct kvm_segment var; 4607 4608 kvm_get_segment(emul_to_vcpu(ctxt), &var, seg); 4609 *selector = var.selector; 4610 4611 if (var.unusable) { 4612 memset(desc, 0, sizeof(*desc)); 4613 return false; 4614 } 4615 4616 if (var.g) 4617 var.limit >>= 12; 4618 set_desc_limit(desc, var.limit); 4619 set_desc_base(desc, (unsigned long)var.base); 4620 #ifdef CONFIG_X86_64 4621 if (base3) 4622 *base3 = var.base >> 32; 4623 #endif 4624 desc->type = var.type; 4625 desc->s = var.s; 4626 desc->dpl = var.dpl; 4627 desc->p = var.present; 4628 desc->avl = var.avl; 4629 desc->l = var.l; 4630 desc->d = var.db; 4631 desc->g = var.g; 4632 4633 return true; 4634 } 4635 4636 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, 4637 struct desc_struct *desc, u32 base3, 4638 int seg) 4639 { 4640 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4641 struct kvm_segment var; 4642 4643 var.selector = selector; 4644 var.base = get_desc_base(desc); 4645 #ifdef CONFIG_X86_64 4646 var.base |= ((u64)base3) << 32; 4647 #endif 4648 var.limit = get_desc_limit(desc); 4649 if (desc->g) 4650 var.limit = (var.limit << 12) | 0xfff; 4651 var.type = desc->type; 4652 var.present = desc->p; 4653 var.dpl = desc->dpl; 4654 var.db = desc->d; 4655 var.s = desc->s; 4656 var.l = desc->l; 4657 var.g = desc->g; 4658 var.avl = desc->avl; 4659 var.present = desc->p; 4660 var.unusable = !var.present; 4661 var.padding = 0; 4662 4663 kvm_set_segment(vcpu, &var, seg); 4664 return; 4665 } 4666 4667 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, 4668 u32 msr_index, u64 *pdata) 4669 { 4670 return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata); 4671 } 4672 4673 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, 4674 u32 msr_index, u64 data) 4675 { 4676 struct msr_data msr; 4677 4678 msr.data = data; 4679 msr.index = msr_index; 4680 msr.host_initiated = false; 4681 return kvm_set_msr(emul_to_vcpu(ctxt), &msr); 4682 } 4683 4684 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, 4685 u32 pmc, u64 *pdata) 4686 { 4687 return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata); 4688 } 4689 4690 static void emulator_halt(struct x86_emulate_ctxt *ctxt) 4691 { 4692 emul_to_vcpu(ctxt)->arch.halt_request = 1; 4693 } 4694 4695 static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt) 4696 { 4697 preempt_disable(); 4698 kvm_load_guest_fpu(emul_to_vcpu(ctxt)); 4699 /* 4700 * CR0.TS may reference the host fpu state, not the guest fpu state, 4701 * so it may be clear at this point. 4702 */ 4703 clts(); 4704 } 4705 4706 static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt) 4707 { 4708 preempt_enable(); 4709 } 4710 4711 static int emulator_intercept(struct x86_emulate_ctxt *ctxt, 4712 struct x86_instruction_info *info, 4713 enum x86_intercept_stage stage) 4714 { 4715 return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); 4716 } 4717 4718 static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, 4719 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) 4720 { 4721 kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx); 4722 } 4723 4724 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) 4725 { 4726 return kvm_register_read(emul_to_vcpu(ctxt), reg); 4727 } 4728 4729 static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val) 4730 { 4731 kvm_register_write(emul_to_vcpu(ctxt), reg, val); 4732 } 4733 4734 static const struct x86_emulate_ops emulate_ops = { 4735 .read_gpr = emulator_read_gpr, 4736 .write_gpr = emulator_write_gpr, 4737 .read_std = kvm_read_guest_virt_system, 4738 .write_std = kvm_write_guest_virt_system, 4739 .fetch = kvm_fetch_guest_virt, 4740 .read_emulated = emulator_read_emulated, 4741 .write_emulated = emulator_write_emulated, 4742 .cmpxchg_emulated = emulator_cmpxchg_emulated, 4743 .invlpg = emulator_invlpg, 4744 .pio_in_emulated = emulator_pio_in_emulated, 4745 .pio_out_emulated = emulator_pio_out_emulated, 4746 .get_segment = emulator_get_segment, 4747 .set_segment = emulator_set_segment, 4748 .get_cached_segment_base = emulator_get_cached_segment_base, 4749 .get_gdt = emulator_get_gdt, 4750 .get_idt = emulator_get_idt, 4751 .set_gdt = emulator_set_gdt, 4752 .set_idt = emulator_set_idt, 4753 .get_cr = emulator_get_cr, 4754 .set_cr = emulator_set_cr, 4755 .set_rflags = emulator_set_rflags, 4756 .cpl = emulator_get_cpl, 4757 .get_dr = emulator_get_dr, 4758 .set_dr = emulator_set_dr, 4759 .set_msr = emulator_set_msr, 4760 .get_msr = emulator_get_msr, 4761 .read_pmc = emulator_read_pmc, 4762 .halt = emulator_halt, 4763 .wbinvd = emulator_wbinvd, 4764 .fix_hypercall = emulator_fix_hypercall, 4765 .get_fpu = emulator_get_fpu, 4766 .put_fpu = emulator_put_fpu, 4767 .intercept = emulator_intercept, 4768 .get_cpuid = emulator_get_cpuid, 4769 }; 4770 4771 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) 4772 { 4773 u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask); 4774 /* 4775 * an sti; sti; sequence only disable interrupts for the first 4776 * instruction. So, if the last instruction, be it emulated or 4777 * not, left the system with the INT_STI flag enabled, it 4778 * means that the last instruction is an sti. We should not 4779 * leave the flag on in this case. The same goes for mov ss 4780 */ 4781 if (!(int_shadow & mask)) 4782 kvm_x86_ops->set_interrupt_shadow(vcpu, mask); 4783 } 4784 4785 static void inject_emulated_exception(struct kvm_vcpu *vcpu) 4786 { 4787 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; 4788 if (ctxt->exception.vector == PF_VECTOR) 4789 kvm_propagate_fault(vcpu, &ctxt->exception); 4790 else if (ctxt->exception.error_code_valid) 4791 kvm_queue_exception_e(vcpu, ctxt->exception.vector, 4792 ctxt->exception.error_code); 4793 else 4794 kvm_queue_exception(vcpu, ctxt->exception.vector); 4795 } 4796 4797 static void init_decode_cache(struct x86_emulate_ctxt *ctxt) 4798 { 4799 memset(&ctxt->opcode_len, 0, 4800 (void *)&ctxt->_regs - (void *)&ctxt->opcode_len); 4801 4802 ctxt->fetch.start = 0; 4803 ctxt->fetch.end = 0; 4804 ctxt->io_read.pos = 0; 4805 ctxt->io_read.end = 0; 4806 ctxt->mem_read.pos = 0; 4807 ctxt->mem_read.end = 0; 4808 } 4809 4810 static void init_emulate_ctxt(struct kvm_vcpu *vcpu) 4811 { 4812 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; 4813 int cs_db, cs_l; 4814 4815 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 4816 4817 ctxt->eflags = kvm_get_rflags(vcpu); 4818 ctxt->eip = kvm_rip_read(vcpu); 4819 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : 4820 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : 4821 cs_l ? X86EMUL_MODE_PROT64 : 4822 cs_db ? X86EMUL_MODE_PROT32 : 4823 X86EMUL_MODE_PROT16; 4824 ctxt->guest_mode = is_guest_mode(vcpu); 4825 4826 init_decode_cache(ctxt); 4827 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; 4828 } 4829 4830 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) 4831 { 4832 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; 4833 int ret; 4834 4835 init_emulate_ctxt(vcpu); 4836 4837 ctxt->op_bytes = 2; 4838 ctxt->ad_bytes = 2; 4839 ctxt->_eip = ctxt->eip + inc_eip; 4840 ret = emulate_int_real(ctxt, irq); 4841 4842 if (ret != X86EMUL_CONTINUE) 4843 return EMULATE_FAIL; 4844 4845 ctxt->eip = ctxt->_eip; 4846 kvm_rip_write(vcpu, ctxt->eip); 4847 kvm_set_rflags(vcpu, ctxt->eflags); 4848 4849 if (irq == NMI_VECTOR) 4850 vcpu->arch.nmi_pending = 0; 4851 else 4852 vcpu->arch.interrupt.pending = false; 4853 4854 return EMULATE_DONE; 4855 } 4856 EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt); 4857 4858 static int handle_emulation_failure(struct kvm_vcpu *vcpu) 4859 { 4860 int r = EMULATE_DONE; 4861 4862 ++vcpu->stat.insn_emulation_fail; 4863 trace_kvm_emulate_insn_failed(vcpu); 4864 if (!is_guest_mode(vcpu)) { 4865 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 4866 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 4867 vcpu->run->internal.ndata = 0; 4868 r = EMULATE_FAIL; 4869 } 4870 kvm_queue_exception(vcpu, UD_VECTOR); 4871 4872 return r; 4873 } 4874 4875 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, 4876 bool write_fault_to_shadow_pgtable, 4877 int emulation_type) 4878 { 4879 gpa_t gpa = cr2; 4880 pfn_t pfn; 4881 4882 if (emulation_type & EMULTYPE_NO_REEXECUTE) 4883 return false; 4884 4885 if (!vcpu->arch.mmu.direct_map) { 4886 /* 4887 * Write permission should be allowed since only 4888 * write access need to be emulated. 4889 */ 4890 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); 4891 4892 /* 4893 * If the mapping is invalid in guest, let cpu retry 4894 * it to generate fault. 4895 */ 4896 if (gpa == UNMAPPED_GVA) 4897 return true; 4898 } 4899 4900 /* 4901 * Do not retry the unhandleable instruction if it faults on the 4902 * readonly host memory, otherwise it will goto a infinite loop: 4903 * retry instruction -> write #PF -> emulation fail -> retry 4904 * instruction -> ... 4905 */ 4906 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); 4907 4908 /* 4909 * If the instruction failed on the error pfn, it can not be fixed, 4910 * report the error to userspace. 4911 */ 4912 if (is_error_noslot_pfn(pfn)) 4913 return false; 4914 4915 kvm_release_pfn_clean(pfn); 4916 4917 /* The instructions are well-emulated on direct mmu. */ 4918 if (vcpu->arch.mmu.direct_map) { 4919 unsigned int indirect_shadow_pages; 4920 4921 spin_lock(&vcpu->kvm->mmu_lock); 4922 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; 4923 spin_unlock(&vcpu->kvm->mmu_lock); 4924 4925 if (indirect_shadow_pages) 4926 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 4927 4928 return true; 4929 } 4930 4931 /* 4932 * if emulation was due to access to shadowed page table 4933 * and it failed try to unshadow page and re-enter the 4934 * guest to let CPU execute the instruction. 4935 */ 4936 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 4937 4938 /* 4939 * If the access faults on its page table, it can not 4940 * be fixed by unprotecting shadow page and it should 4941 * be reported to userspace. 4942 */ 4943 return !write_fault_to_shadow_pgtable; 4944 } 4945 4946 static bool retry_instruction(struct x86_emulate_ctxt *ctxt, 4947 unsigned long cr2, int emulation_type) 4948 { 4949 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4950 unsigned long last_retry_eip, last_retry_addr, gpa = cr2; 4951 4952 last_retry_eip = vcpu->arch.last_retry_eip; 4953 last_retry_addr = vcpu->arch.last_retry_addr; 4954 4955 /* 4956 * If the emulation is caused by #PF and it is non-page_table 4957 * writing instruction, it means the VM-EXIT is caused by shadow 4958 * page protected, we can zap the shadow page and retry this 4959 * instruction directly. 4960 * 4961 * Note: if the guest uses a non-page-table modifying instruction 4962 * on the PDE that points to the instruction, then we will unmap 4963 * the instruction and go to an infinite loop. So, we cache the 4964 * last retried eip and the last fault address, if we meet the eip 4965 * and the address again, we can break out of the potential infinite 4966 * loop. 4967 */ 4968 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; 4969 4970 if (!(emulation_type & EMULTYPE_RETRY)) 4971 return false; 4972 4973 if (x86_page_table_writing_insn(ctxt)) 4974 return false; 4975 4976 if (ctxt->eip == last_retry_eip && last_retry_addr == cr2) 4977 return false; 4978 4979 vcpu->arch.last_retry_eip = ctxt->eip; 4980 vcpu->arch.last_retry_addr = cr2; 4981 4982 if (!vcpu->arch.mmu.direct_map) 4983 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); 4984 4985 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 4986 4987 return true; 4988 } 4989 4990 static int complete_emulated_mmio(struct kvm_vcpu *vcpu); 4991 static int complete_emulated_pio(struct kvm_vcpu *vcpu); 4992 4993 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, 4994 unsigned long *db) 4995 { 4996 u32 dr6 = 0; 4997 int i; 4998 u32 enable, rwlen; 4999 5000 enable = dr7; 5001 rwlen = dr7 >> 16; 5002 for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4) 5003 if ((enable & 3) && (rwlen & 15) == type && db[i] == addr) 5004 dr6 |= (1 << i); 5005 return dr6; 5006 } 5007 5008 static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, int *r) 5009 { 5010 struct kvm_run *kvm_run = vcpu->run; 5011 5012 /* 5013 * Use the "raw" value to see if TF was passed to the processor. 5014 * Note that the new value of the flags has not been saved yet. 5015 * 5016 * This is correct even for TF set by the guest, because "the 5017 * processor will not generate this exception after the instruction 5018 * that sets the TF flag". 5019 */ 5020 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); 5021 5022 if (unlikely(rflags & X86_EFLAGS_TF)) { 5023 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { 5024 kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1; 5025 kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; 5026 kvm_run->debug.arch.exception = DB_VECTOR; 5027 kvm_run->exit_reason = KVM_EXIT_DEBUG; 5028 *r = EMULATE_USER_EXIT; 5029 } else { 5030 vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF; 5031 /* 5032 * "Certain debug exceptions may clear bit 0-3. The 5033 * remaining contents of the DR6 register are never 5034 * cleared by the processor". 5035 */ 5036 vcpu->arch.dr6 &= ~15; 5037 vcpu->arch.dr6 |= DR6_BS; 5038 kvm_queue_exception(vcpu, DB_VECTOR); 5039 } 5040 } 5041 } 5042 5043 static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) 5044 { 5045 struct kvm_run *kvm_run = vcpu->run; 5046 unsigned long eip = vcpu->arch.emulate_ctxt.eip; 5047 u32 dr6 = 0; 5048 5049 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && 5050 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { 5051 dr6 = kvm_vcpu_check_hw_bp(eip, 0, 5052 vcpu->arch.guest_debug_dr7, 5053 vcpu->arch.eff_db); 5054 5055 if (dr6 != 0) { 5056 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; 5057 kvm_run->debug.arch.pc = kvm_rip_read(vcpu) + 5058 get_segment_base(vcpu, VCPU_SREG_CS); 5059 5060 kvm_run->debug.arch.exception = DB_VECTOR; 5061 kvm_run->exit_reason = KVM_EXIT_DEBUG; 5062 *r = EMULATE_USER_EXIT; 5063 return true; 5064 } 5065 } 5066 5067 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK)) { 5068 dr6 = kvm_vcpu_check_hw_bp(eip, 0, 5069 vcpu->arch.dr7, 5070 vcpu->arch.db); 5071 5072 if (dr6 != 0) { 5073 vcpu->arch.dr6 &= ~15; 5074 vcpu->arch.dr6 |= dr6; 5075 kvm_queue_exception(vcpu, DB_VECTOR); 5076 *r = EMULATE_DONE; 5077 return true; 5078 } 5079 } 5080 5081 return false; 5082 } 5083 5084 int x86_emulate_instruction(struct kvm_vcpu *vcpu, 5085 unsigned long cr2, 5086 int emulation_type, 5087 void *insn, 5088 int insn_len) 5089 { 5090 int r; 5091 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; 5092 bool writeback = true; 5093 bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; 5094 5095 /* 5096 * Clear write_fault_to_shadow_pgtable here to ensure it is 5097 * never reused. 5098 */ 5099 vcpu->arch.write_fault_to_shadow_pgtable = false; 5100 kvm_clear_exception_queue(vcpu); 5101 5102 if (!(emulation_type & EMULTYPE_NO_DECODE)) { 5103 init_emulate_ctxt(vcpu); 5104 5105 /* 5106 * We will reenter on the same instruction since 5107 * we do not set complete_userspace_io. This does not 5108 * handle watchpoints yet, those would be handled in 5109 * the emulate_ops. 5110 */ 5111 if (kvm_vcpu_check_breakpoint(vcpu, &r)) 5112 return r; 5113 5114 ctxt->interruptibility = 0; 5115 ctxt->have_exception = false; 5116 ctxt->perm_ok = false; 5117 5118 ctxt->ud = emulation_type & EMULTYPE_TRAP_UD; 5119 5120 r = x86_decode_insn(ctxt, insn, insn_len); 5121 5122 trace_kvm_emulate_insn_start(vcpu); 5123 ++vcpu->stat.insn_emulation; 5124 if (r != EMULATION_OK) { 5125 if (emulation_type & EMULTYPE_TRAP_UD) 5126 return EMULATE_FAIL; 5127 if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, 5128 emulation_type)) 5129 return EMULATE_DONE; 5130 if (emulation_type & EMULTYPE_SKIP) 5131 return EMULATE_FAIL; 5132 return handle_emulation_failure(vcpu); 5133 } 5134 } 5135 5136 if (emulation_type & EMULTYPE_SKIP) { 5137 kvm_rip_write(vcpu, ctxt->_eip); 5138 return EMULATE_DONE; 5139 } 5140 5141 if (retry_instruction(ctxt, cr2, emulation_type)) 5142 return EMULATE_DONE; 5143 5144 /* this is needed for vmware backdoor interface to work since it 5145 changes registers values during IO operation */ 5146 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { 5147 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; 5148 emulator_invalidate_register_cache(ctxt); 5149 } 5150 5151 restart: 5152 r = x86_emulate_insn(ctxt); 5153 5154 if (r == EMULATION_INTERCEPTED) 5155 return EMULATE_DONE; 5156 5157 if (r == EMULATION_FAILED) { 5158 if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, 5159 emulation_type)) 5160 return EMULATE_DONE; 5161 5162 return handle_emulation_failure(vcpu); 5163 } 5164 5165 if (ctxt->have_exception) { 5166 inject_emulated_exception(vcpu); 5167 r = EMULATE_DONE; 5168 } else if (vcpu->arch.pio.count) { 5169 if (!vcpu->arch.pio.in) { 5170 /* FIXME: return into emulator if single-stepping. */ 5171 vcpu->arch.pio.count = 0; 5172 } else { 5173 writeback = false; 5174 vcpu->arch.complete_userspace_io = complete_emulated_pio; 5175 } 5176 r = EMULATE_USER_EXIT; 5177 } else if (vcpu->mmio_needed) { 5178 if (!vcpu->mmio_is_write) 5179 writeback = false; 5180 r = EMULATE_USER_EXIT; 5181 vcpu->arch.complete_userspace_io = complete_emulated_mmio; 5182 } else if (r == EMULATION_RESTART) 5183 goto restart; 5184 else 5185 r = EMULATE_DONE; 5186 5187 if (writeback) { 5188 toggle_interruptibility(vcpu, ctxt->interruptibility); 5189 kvm_make_request(KVM_REQ_EVENT, vcpu); 5190 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 5191 kvm_rip_write(vcpu, ctxt->eip); 5192 if (r == EMULATE_DONE) 5193 kvm_vcpu_check_singlestep(vcpu, &r); 5194 kvm_set_rflags(vcpu, ctxt->eflags); 5195 } else 5196 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; 5197 5198 return r; 5199 } 5200 EXPORT_SYMBOL_GPL(x86_emulate_instruction); 5201 5202 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port) 5203 { 5204 unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX); 5205 int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, 5206 size, port, &val, 1); 5207 /* do not return to emulator after return from userspace */ 5208 vcpu->arch.pio.count = 0; 5209 return ret; 5210 } 5211 EXPORT_SYMBOL_GPL(kvm_fast_pio_out); 5212 5213 static void tsc_bad(void *info) 5214 { 5215 __this_cpu_write(cpu_tsc_khz, 0); 5216 } 5217 5218 static void tsc_khz_changed(void *data) 5219 { 5220 struct cpufreq_freqs *freq = data; 5221 unsigned long khz = 0; 5222 5223 if (data) 5224 khz = freq->new; 5225 else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 5226 khz = cpufreq_quick_get(raw_smp_processor_id()); 5227 if (!khz) 5228 khz = tsc_khz; 5229 __this_cpu_write(cpu_tsc_khz, khz); 5230 } 5231 5232 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 5233 void *data) 5234 { 5235 struct cpufreq_freqs *freq = data; 5236 struct kvm *kvm; 5237 struct kvm_vcpu *vcpu; 5238 int i, send_ipi = 0; 5239 5240 /* 5241 * We allow guests to temporarily run on slowing clocks, 5242 * provided we notify them after, or to run on accelerating 5243 * clocks, provided we notify them before. Thus time never 5244 * goes backwards. 5245 * 5246 * However, we have a problem. We can't atomically update 5247 * the frequency of a given CPU from this function; it is 5248 * merely a notifier, which can be called from any CPU. 5249 * Changing the TSC frequency at arbitrary points in time 5250 * requires a recomputation of local variables related to 5251 * the TSC for each VCPU. We must flag these local variables 5252 * to be updated and be sure the update takes place with the 5253 * new frequency before any guests proceed. 5254 * 5255 * Unfortunately, the combination of hotplug CPU and frequency 5256 * change creates an intractable locking scenario; the order 5257 * of when these callouts happen is undefined with respect to 5258 * CPU hotplug, and they can race with each other. As such, 5259 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is 5260 * undefined; you can actually have a CPU frequency change take 5261 * place in between the computation of X and the setting of the 5262 * variable. To protect against this problem, all updates of 5263 * the per_cpu tsc_khz variable are done in an interrupt 5264 * protected IPI, and all callers wishing to update the value 5265 * must wait for a synchronous IPI to complete (which is trivial 5266 * if the caller is on the CPU already). This establishes the 5267 * necessary total order on variable updates. 5268 * 5269 * Note that because a guest time update may take place 5270 * anytime after the setting of the VCPU's request bit, the 5271 * correct TSC value must be set before the request. However, 5272 * to ensure the update actually makes it to any guest which 5273 * starts running in hardware virtualization between the set 5274 * and the acquisition of the spinlock, we must also ping the 5275 * CPU after setting the request bit. 5276 * 5277 */ 5278 5279 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) 5280 return 0; 5281 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) 5282 return 0; 5283 5284 smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); 5285 5286 spin_lock(&kvm_lock); 5287 list_for_each_entry(kvm, &vm_list, vm_list) { 5288 kvm_for_each_vcpu(i, vcpu, kvm) { 5289 if (vcpu->cpu != freq->cpu) 5290 continue; 5291 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 5292 if (vcpu->cpu != smp_processor_id()) 5293 send_ipi = 1; 5294 } 5295 } 5296 spin_unlock(&kvm_lock); 5297 5298 if (freq->old < freq->new && send_ipi) { 5299 /* 5300 * We upscale the frequency. Must make the guest 5301 * doesn't see old kvmclock values while running with 5302 * the new frequency, otherwise we risk the guest sees 5303 * time go backwards. 5304 * 5305 * In case we update the frequency for another cpu 5306 * (which might be in guest context) send an interrupt 5307 * to kick the cpu out of guest context. Next time 5308 * guest context is entered kvmclock will be updated, 5309 * so the guest will not see stale values. 5310 */ 5311 smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); 5312 } 5313 return 0; 5314 } 5315 5316 static struct notifier_block kvmclock_cpufreq_notifier_block = { 5317 .notifier_call = kvmclock_cpufreq_notifier 5318 }; 5319 5320 static int kvmclock_cpu_notifier(struct notifier_block *nfb, 5321 unsigned long action, void *hcpu) 5322 { 5323 unsigned int cpu = (unsigned long)hcpu; 5324 5325 switch (action) { 5326 case CPU_ONLINE: 5327 case CPU_DOWN_FAILED: 5328 smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); 5329 break; 5330 case CPU_DOWN_PREPARE: 5331 smp_call_function_single(cpu, tsc_bad, NULL, 1); 5332 break; 5333 } 5334 return NOTIFY_OK; 5335 } 5336 5337 static struct notifier_block kvmclock_cpu_notifier_block = { 5338 .notifier_call = kvmclock_cpu_notifier, 5339 .priority = -INT_MAX 5340 }; 5341 5342 static void kvm_timer_init(void) 5343 { 5344 int cpu; 5345 5346 max_tsc_khz = tsc_khz; 5347 register_hotcpu_notifier(&kvmclock_cpu_notifier_block); 5348 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { 5349 #ifdef CONFIG_CPU_FREQ 5350 struct cpufreq_policy policy; 5351 memset(&policy, 0, sizeof(policy)); 5352 cpu = get_cpu(); 5353 cpufreq_get_policy(&policy, cpu); 5354 if (policy.cpuinfo.max_freq) 5355 max_tsc_khz = policy.cpuinfo.max_freq; 5356 put_cpu(); 5357 #endif 5358 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, 5359 CPUFREQ_TRANSITION_NOTIFIER); 5360 } 5361 pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz); 5362 for_each_online_cpu(cpu) 5363 smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); 5364 } 5365 5366 static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu); 5367 5368 int kvm_is_in_guest(void) 5369 { 5370 return __this_cpu_read(current_vcpu) != NULL; 5371 } 5372 5373 static int kvm_is_user_mode(void) 5374 { 5375 int user_mode = 3; 5376 5377 if (__this_cpu_read(current_vcpu)) 5378 user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu)); 5379 5380 return user_mode != 0; 5381 } 5382 5383 static unsigned long kvm_get_guest_ip(void) 5384 { 5385 unsigned long ip = 0; 5386 5387 if (__this_cpu_read(current_vcpu)) 5388 ip = kvm_rip_read(__this_cpu_read(current_vcpu)); 5389 5390 return ip; 5391 } 5392 5393 static struct perf_guest_info_callbacks kvm_guest_cbs = { 5394 .is_in_guest = kvm_is_in_guest, 5395 .is_user_mode = kvm_is_user_mode, 5396 .get_guest_ip = kvm_get_guest_ip, 5397 }; 5398 5399 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu) 5400 { 5401 __this_cpu_write(current_vcpu, vcpu); 5402 } 5403 EXPORT_SYMBOL_GPL(kvm_before_handle_nmi); 5404 5405 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu) 5406 { 5407 __this_cpu_write(current_vcpu, NULL); 5408 } 5409 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi); 5410 5411 static void kvm_set_mmio_spte_mask(void) 5412 { 5413 u64 mask; 5414 int maxphyaddr = boot_cpu_data.x86_phys_bits; 5415 5416 /* 5417 * Set the reserved bits and the present bit of an paging-structure 5418 * entry to generate page fault with PFER.RSV = 1. 5419 */ 5420 /* Mask the reserved physical address bits. */ 5421 mask = ((1ull << (51 - maxphyaddr + 1)) - 1) << maxphyaddr; 5422 5423 /* Bit 62 is always reserved for 32bit host. */ 5424 mask |= 0x3ull << 62; 5425 5426 /* Set the present bit. */ 5427 mask |= 1ull; 5428 5429 #ifdef CONFIG_X86_64 5430 /* 5431 * If reserved bit is not supported, clear the present bit to disable 5432 * mmio page fault. 5433 */ 5434 if (maxphyaddr == 52) 5435 mask &= ~1ull; 5436 #endif 5437 5438 kvm_mmu_set_mmio_spte_mask(mask); 5439 } 5440 5441 #ifdef CONFIG_X86_64 5442 static void pvclock_gtod_update_fn(struct work_struct *work) 5443 { 5444 struct kvm *kvm; 5445 5446 struct kvm_vcpu *vcpu; 5447 int i; 5448 5449 spin_lock(&kvm_lock); 5450 list_for_each_entry(kvm, &vm_list, vm_list) 5451 kvm_for_each_vcpu(i, vcpu, kvm) 5452 set_bit(KVM_REQ_MASTERCLOCK_UPDATE, &vcpu->requests); 5453 atomic_set(&kvm_guest_has_master_clock, 0); 5454 spin_unlock(&kvm_lock); 5455 } 5456 5457 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn); 5458 5459 /* 5460 * Notification about pvclock gtod data update. 5461 */ 5462 static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused, 5463 void *priv) 5464 { 5465 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 5466 struct timekeeper *tk = priv; 5467 5468 update_pvclock_gtod(tk); 5469 5470 /* disable master clock if host does not trust, or does not 5471 * use, TSC clocksource 5472 */ 5473 if (gtod->clock.vclock_mode != VCLOCK_TSC && 5474 atomic_read(&kvm_guest_has_master_clock) != 0) 5475 queue_work(system_long_wq, &pvclock_gtod_work); 5476 5477 return 0; 5478 } 5479 5480 static struct notifier_block pvclock_gtod_notifier = { 5481 .notifier_call = pvclock_gtod_notify, 5482 }; 5483 #endif 5484 5485 int kvm_arch_init(void *opaque) 5486 { 5487 int r; 5488 struct kvm_x86_ops *ops = opaque; 5489 5490 if (kvm_x86_ops) { 5491 printk(KERN_ERR "kvm: already loaded the other module\n"); 5492 r = -EEXIST; 5493 goto out; 5494 } 5495 5496 if (!ops->cpu_has_kvm_support()) { 5497 printk(KERN_ERR "kvm: no hardware support\n"); 5498 r = -EOPNOTSUPP; 5499 goto out; 5500 } 5501 if (ops->disabled_by_bios()) { 5502 printk(KERN_ERR "kvm: disabled by bios\n"); 5503 r = -EOPNOTSUPP; 5504 goto out; 5505 } 5506 5507 r = -ENOMEM; 5508 shared_msrs = alloc_percpu(struct kvm_shared_msrs); 5509 if (!shared_msrs) { 5510 printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n"); 5511 goto out; 5512 } 5513 5514 r = kvm_mmu_module_init(); 5515 if (r) 5516 goto out_free_percpu; 5517 5518 kvm_set_mmio_spte_mask(); 5519 kvm_init_msr_list(); 5520 5521 kvm_x86_ops = ops; 5522 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, 5523 PT_DIRTY_MASK, PT64_NX_MASK, 0); 5524 5525 kvm_timer_init(); 5526 5527 perf_register_guest_info_callbacks(&kvm_guest_cbs); 5528 5529 if (cpu_has_xsave) 5530 host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); 5531 5532 kvm_lapic_init(); 5533 #ifdef CONFIG_X86_64 5534 pvclock_gtod_register_notifier(&pvclock_gtod_notifier); 5535 #endif 5536 5537 return 0; 5538 5539 out_free_percpu: 5540 free_percpu(shared_msrs); 5541 out: 5542 return r; 5543 } 5544 5545 void kvm_arch_exit(void) 5546 { 5547 perf_unregister_guest_info_callbacks(&kvm_guest_cbs); 5548 5549 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 5550 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, 5551 CPUFREQ_TRANSITION_NOTIFIER); 5552 unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block); 5553 #ifdef CONFIG_X86_64 5554 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); 5555 #endif 5556 kvm_x86_ops = NULL; 5557 kvm_mmu_module_exit(); 5558 free_percpu(shared_msrs); 5559 } 5560 5561 int kvm_emulate_halt(struct kvm_vcpu *vcpu) 5562 { 5563 ++vcpu->stat.halt_exits; 5564 if (irqchip_in_kernel(vcpu->kvm)) { 5565 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; 5566 return 1; 5567 } else { 5568 vcpu->run->exit_reason = KVM_EXIT_HLT; 5569 return 0; 5570 } 5571 } 5572 EXPORT_SYMBOL_GPL(kvm_emulate_halt); 5573 5574 int kvm_hv_hypercall(struct kvm_vcpu *vcpu) 5575 { 5576 u64 param, ingpa, outgpa, ret; 5577 uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; 5578 bool fast, longmode; 5579 int cs_db, cs_l; 5580 5581 /* 5582 * hypercall generates UD from non zero cpl and real mode 5583 * per HYPER-V spec 5584 */ 5585 if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { 5586 kvm_queue_exception(vcpu, UD_VECTOR); 5587 return 0; 5588 } 5589 5590 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 5591 longmode = is_long_mode(vcpu) && cs_l == 1; 5592 5593 if (!longmode) { 5594 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | 5595 (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff); 5596 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | 5597 (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff); 5598 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | 5599 (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff); 5600 } 5601 #ifdef CONFIG_X86_64 5602 else { 5603 param = kvm_register_read(vcpu, VCPU_REGS_RCX); 5604 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX); 5605 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8); 5606 } 5607 #endif 5608 5609 code = param & 0xffff; 5610 fast = (param >> 16) & 0x1; 5611 rep_cnt = (param >> 32) & 0xfff; 5612 rep_idx = (param >> 48) & 0xfff; 5613 5614 trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); 5615 5616 switch (code) { 5617 case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT: 5618 kvm_vcpu_on_spin(vcpu); 5619 break; 5620 default: 5621 res = HV_STATUS_INVALID_HYPERCALL_CODE; 5622 break; 5623 } 5624 5625 ret = res | (((u64)rep_done & 0xfff) << 32); 5626 if (longmode) { 5627 kvm_register_write(vcpu, VCPU_REGS_RAX, ret); 5628 } else { 5629 kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32); 5630 kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff); 5631 } 5632 5633 return 1; 5634 } 5635 5636 /* 5637 * kvm_pv_kick_cpu_op: Kick a vcpu. 5638 * 5639 * @apicid - apicid of vcpu to be kicked. 5640 */ 5641 static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid) 5642 { 5643 struct kvm_lapic_irq lapic_irq; 5644 5645 lapic_irq.shorthand = 0; 5646 lapic_irq.dest_mode = 0; 5647 lapic_irq.dest_id = apicid; 5648 5649 lapic_irq.delivery_mode = APIC_DM_REMRD; 5650 kvm_irq_delivery_to_apic(kvm, 0, &lapic_irq, NULL); 5651 } 5652 5653 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) 5654 { 5655 unsigned long nr, a0, a1, a2, a3, ret; 5656 int r = 1; 5657 5658 if (kvm_hv_hypercall_enabled(vcpu->kvm)) 5659 return kvm_hv_hypercall(vcpu); 5660 5661 nr = kvm_register_read(vcpu, VCPU_REGS_RAX); 5662 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); 5663 a1 = kvm_register_read(vcpu, VCPU_REGS_RCX); 5664 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX); 5665 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI); 5666 5667 trace_kvm_hypercall(nr, a0, a1, a2, a3); 5668 5669 if (!is_long_mode(vcpu)) { 5670 nr &= 0xFFFFFFFF; 5671 a0 &= 0xFFFFFFFF; 5672 a1 &= 0xFFFFFFFF; 5673 a2 &= 0xFFFFFFFF; 5674 a3 &= 0xFFFFFFFF; 5675 } 5676 5677 if (kvm_x86_ops->get_cpl(vcpu) != 0) { 5678 ret = -KVM_EPERM; 5679 goto out; 5680 } 5681 5682 switch (nr) { 5683 case KVM_HC_VAPIC_POLL_IRQ: 5684 ret = 0; 5685 break; 5686 case KVM_HC_KICK_CPU: 5687 kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); 5688 ret = 0; 5689 break; 5690 default: 5691 ret = -KVM_ENOSYS; 5692 break; 5693 } 5694 out: 5695 kvm_register_write(vcpu, VCPU_REGS_RAX, ret); 5696 ++vcpu->stat.hypercalls; 5697 return r; 5698 } 5699 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); 5700 5701 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) 5702 { 5703 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 5704 char instruction[3]; 5705 unsigned long rip = kvm_rip_read(vcpu); 5706 5707 kvm_x86_ops->patch_hypercall(vcpu, instruction); 5708 5709 return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); 5710 } 5711 5712 /* 5713 * Check if userspace requested an interrupt window, and that the 5714 * interrupt window is open. 5715 * 5716 * No need to exit to userspace if we already have an interrupt queued. 5717 */ 5718 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) 5719 { 5720 return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) && 5721 vcpu->run->request_interrupt_window && 5722 kvm_arch_interrupt_allowed(vcpu)); 5723 } 5724 5725 static void post_kvm_run_save(struct kvm_vcpu *vcpu) 5726 { 5727 struct kvm_run *kvm_run = vcpu->run; 5728 5729 kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; 5730 kvm_run->cr8 = kvm_get_cr8(vcpu); 5731 kvm_run->apic_base = kvm_get_apic_base(vcpu); 5732 if (irqchip_in_kernel(vcpu->kvm)) 5733 kvm_run->ready_for_interrupt_injection = 1; 5734 else 5735 kvm_run->ready_for_interrupt_injection = 5736 kvm_arch_interrupt_allowed(vcpu) && 5737 !kvm_cpu_has_interrupt(vcpu) && 5738 !kvm_event_needs_reinjection(vcpu); 5739 } 5740 5741 static void update_cr8_intercept(struct kvm_vcpu *vcpu) 5742 { 5743 int max_irr, tpr; 5744 5745 if (!kvm_x86_ops->update_cr8_intercept) 5746 return; 5747 5748 if (!vcpu->arch.apic) 5749 return; 5750 5751 if (!vcpu->arch.apic->vapic_addr) 5752 max_irr = kvm_lapic_find_highest_irr(vcpu); 5753 else 5754 max_irr = -1; 5755 5756 if (max_irr != -1) 5757 max_irr >>= 4; 5758 5759 tpr = kvm_lapic_get_cr8(vcpu); 5760 5761 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); 5762 } 5763 5764 static void inject_pending_event(struct kvm_vcpu *vcpu) 5765 { 5766 /* try to reinject previous events if any */ 5767 if (vcpu->arch.exception.pending) { 5768 trace_kvm_inj_exception(vcpu->arch.exception.nr, 5769 vcpu->arch.exception.has_error_code, 5770 vcpu->arch.exception.error_code); 5771 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, 5772 vcpu->arch.exception.has_error_code, 5773 vcpu->arch.exception.error_code, 5774 vcpu->arch.exception.reinject); 5775 return; 5776 } 5777 5778 if (vcpu->arch.nmi_injected) { 5779 kvm_x86_ops->set_nmi(vcpu); 5780 return; 5781 } 5782 5783 if (vcpu->arch.interrupt.pending) { 5784 kvm_x86_ops->set_irq(vcpu); 5785 return; 5786 } 5787 5788 /* try to inject new event if pending */ 5789 if (vcpu->arch.nmi_pending) { 5790 if (kvm_x86_ops->nmi_allowed(vcpu)) { 5791 --vcpu->arch.nmi_pending; 5792 vcpu->arch.nmi_injected = true; 5793 kvm_x86_ops->set_nmi(vcpu); 5794 } 5795 } else if (kvm_cpu_has_injectable_intr(vcpu)) { 5796 if (kvm_x86_ops->interrupt_allowed(vcpu)) { 5797 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), 5798 false); 5799 kvm_x86_ops->set_irq(vcpu); 5800 } 5801 } 5802 } 5803 5804 static void process_nmi(struct kvm_vcpu *vcpu) 5805 { 5806 unsigned limit = 2; 5807 5808 /* 5809 * x86 is limited to one NMI running, and one NMI pending after it. 5810 * If an NMI is already in progress, limit further NMIs to just one. 5811 * Otherwise, allow two (and we'll inject the first one immediately). 5812 */ 5813 if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) 5814 limit = 1; 5815 5816 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); 5817 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); 5818 kvm_make_request(KVM_REQ_EVENT, vcpu); 5819 } 5820 5821 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) 5822 { 5823 u64 eoi_exit_bitmap[4]; 5824 u32 tmr[8]; 5825 5826 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) 5827 return; 5828 5829 memset(eoi_exit_bitmap, 0, 32); 5830 memset(tmr, 0, 32); 5831 5832 kvm_ioapic_scan_entry(vcpu, eoi_exit_bitmap, tmr); 5833 kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap); 5834 kvm_apic_update_tmr(vcpu, tmr); 5835 } 5836 5837 static int vcpu_enter_guest(struct kvm_vcpu *vcpu) 5838 { 5839 int r; 5840 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && 5841 vcpu->run->request_interrupt_window; 5842 bool req_immediate_exit = false; 5843 5844 if (vcpu->requests) { 5845 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) 5846 kvm_mmu_unload(vcpu); 5847 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) 5848 __kvm_migrate_timers(vcpu); 5849 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) 5850 kvm_gen_update_masterclock(vcpu->kvm); 5851 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu)) 5852 kvm_gen_kvmclock_update(vcpu); 5853 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { 5854 r = kvm_guest_time_update(vcpu); 5855 if (unlikely(r)) 5856 goto out; 5857 } 5858 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) 5859 kvm_mmu_sync_roots(vcpu); 5860 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) 5861 kvm_x86_ops->tlb_flush(vcpu); 5862 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { 5863 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; 5864 r = 0; 5865 goto out; 5866 } 5867 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { 5868 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; 5869 r = 0; 5870 goto out; 5871 } 5872 if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) { 5873 vcpu->fpu_active = 0; 5874 kvm_x86_ops->fpu_deactivate(vcpu); 5875 } 5876 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { 5877 /* Page is swapped out. Do synthetic halt */ 5878 vcpu->arch.apf.halted = true; 5879 r = 1; 5880 goto out; 5881 } 5882 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) 5883 record_steal_time(vcpu); 5884 if (kvm_check_request(KVM_REQ_NMI, vcpu)) 5885 process_nmi(vcpu); 5886 if (kvm_check_request(KVM_REQ_PMU, vcpu)) 5887 kvm_handle_pmu_event(vcpu); 5888 if (kvm_check_request(KVM_REQ_PMI, vcpu)) 5889 kvm_deliver_pmi(vcpu); 5890 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) 5891 vcpu_scan_ioapic(vcpu); 5892 } 5893 5894 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { 5895 kvm_apic_accept_events(vcpu); 5896 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { 5897 r = 1; 5898 goto out; 5899 } 5900 5901 inject_pending_event(vcpu); 5902 5903 /* enable NMI/IRQ window open exits if needed */ 5904 if (vcpu->arch.nmi_pending) 5905 req_immediate_exit = 5906 kvm_x86_ops->enable_nmi_window(vcpu) != 0; 5907 else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) 5908 req_immediate_exit = 5909 kvm_x86_ops->enable_irq_window(vcpu) != 0; 5910 5911 if (kvm_lapic_enabled(vcpu)) { 5912 /* 5913 * Update architecture specific hints for APIC 5914 * virtual interrupt delivery. 5915 */ 5916 if (kvm_x86_ops->hwapic_irr_update) 5917 kvm_x86_ops->hwapic_irr_update(vcpu, 5918 kvm_lapic_find_highest_irr(vcpu)); 5919 update_cr8_intercept(vcpu); 5920 kvm_lapic_sync_to_vapic(vcpu); 5921 } 5922 } 5923 5924 r = kvm_mmu_reload(vcpu); 5925 if (unlikely(r)) { 5926 goto cancel_injection; 5927 } 5928 5929 preempt_disable(); 5930 5931 kvm_x86_ops->prepare_guest_switch(vcpu); 5932 if (vcpu->fpu_active) 5933 kvm_load_guest_fpu(vcpu); 5934 kvm_load_guest_xcr0(vcpu); 5935 5936 vcpu->mode = IN_GUEST_MODE; 5937 5938 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 5939 5940 /* We should set ->mode before check ->requests, 5941 * see the comment in make_all_cpus_request. 5942 */ 5943 smp_mb__after_srcu_read_unlock(); 5944 5945 local_irq_disable(); 5946 5947 if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests 5948 || need_resched() || signal_pending(current)) { 5949 vcpu->mode = OUTSIDE_GUEST_MODE; 5950 smp_wmb(); 5951 local_irq_enable(); 5952 preempt_enable(); 5953 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 5954 r = 1; 5955 goto cancel_injection; 5956 } 5957 5958 if (req_immediate_exit) 5959 smp_send_reschedule(vcpu->cpu); 5960 5961 kvm_guest_enter(); 5962 5963 if (unlikely(vcpu->arch.switch_db_regs)) { 5964 set_debugreg(0, 7); 5965 set_debugreg(vcpu->arch.eff_db[0], 0); 5966 set_debugreg(vcpu->arch.eff_db[1], 1); 5967 set_debugreg(vcpu->arch.eff_db[2], 2); 5968 set_debugreg(vcpu->arch.eff_db[3], 3); 5969 } 5970 5971 trace_kvm_entry(vcpu->vcpu_id); 5972 kvm_x86_ops->run(vcpu); 5973 5974 /* 5975 * If the guest has used debug registers, at least dr7 5976 * will be disabled while returning to the host. 5977 * If we don't have active breakpoints in the host, we don't 5978 * care about the messed up debug address registers. But if 5979 * we have some of them active, restore the old state. 5980 */ 5981 if (hw_breakpoint_active()) 5982 hw_breakpoint_restore(); 5983 5984 vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, 5985 native_read_tsc()); 5986 5987 vcpu->mode = OUTSIDE_GUEST_MODE; 5988 smp_wmb(); 5989 5990 /* Interrupt is enabled by handle_external_intr() */ 5991 kvm_x86_ops->handle_external_intr(vcpu); 5992 5993 ++vcpu->stat.exits; 5994 5995 /* 5996 * We must have an instruction between local_irq_enable() and 5997 * kvm_guest_exit(), so the timer interrupt isn't delayed by 5998 * the interrupt shadow. The stat.exits increment will do nicely. 5999 * But we need to prevent reordering, hence this barrier(): 6000 */ 6001 barrier(); 6002 6003 kvm_guest_exit(); 6004 6005 preempt_enable(); 6006 6007 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 6008 6009 /* 6010 * Profile KVM exit RIPs: 6011 */ 6012 if (unlikely(prof_on == KVM_PROFILING)) { 6013 unsigned long rip = kvm_rip_read(vcpu); 6014 profile_hit(KVM_PROFILING, (void *)rip); 6015 } 6016 6017 if (unlikely(vcpu->arch.tsc_always_catchup)) 6018 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 6019 6020 if (vcpu->arch.apic_attention) 6021 kvm_lapic_sync_from_vapic(vcpu); 6022 6023 r = kvm_x86_ops->handle_exit(vcpu); 6024 return r; 6025 6026 cancel_injection: 6027 kvm_x86_ops->cancel_injection(vcpu); 6028 if (unlikely(vcpu->arch.apic_attention)) 6029 kvm_lapic_sync_from_vapic(vcpu); 6030 out: 6031 return r; 6032 } 6033 6034 6035 static int __vcpu_run(struct kvm_vcpu *vcpu) 6036 { 6037 int r; 6038 struct kvm *kvm = vcpu->kvm; 6039 6040 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 6041 6042 r = 1; 6043 while (r > 0) { 6044 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && 6045 !vcpu->arch.apf.halted) 6046 r = vcpu_enter_guest(vcpu); 6047 else { 6048 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 6049 kvm_vcpu_block(vcpu); 6050 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 6051 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { 6052 kvm_apic_accept_events(vcpu); 6053 switch(vcpu->arch.mp_state) { 6054 case KVM_MP_STATE_HALTED: 6055 vcpu->arch.pv.pv_unhalted = false; 6056 vcpu->arch.mp_state = 6057 KVM_MP_STATE_RUNNABLE; 6058 case KVM_MP_STATE_RUNNABLE: 6059 vcpu->arch.apf.halted = false; 6060 break; 6061 case KVM_MP_STATE_INIT_RECEIVED: 6062 break; 6063 default: 6064 r = -EINTR; 6065 break; 6066 } 6067 } 6068 } 6069 6070 if (r <= 0) 6071 break; 6072 6073 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests); 6074 if (kvm_cpu_has_pending_timer(vcpu)) 6075 kvm_inject_pending_timer_irqs(vcpu); 6076 6077 if (dm_request_for_irq_injection(vcpu)) { 6078 r = -EINTR; 6079 vcpu->run->exit_reason = KVM_EXIT_INTR; 6080 ++vcpu->stat.request_irq_exits; 6081 } 6082 6083 kvm_check_async_pf_completion(vcpu); 6084 6085 if (signal_pending(current)) { 6086 r = -EINTR; 6087 vcpu->run->exit_reason = KVM_EXIT_INTR; 6088 ++vcpu->stat.signal_exits; 6089 } 6090 if (need_resched()) { 6091 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 6092 kvm_resched(vcpu); 6093 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 6094 } 6095 } 6096 6097 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 6098 6099 return r; 6100 } 6101 6102 static inline int complete_emulated_io(struct kvm_vcpu *vcpu) 6103 { 6104 int r; 6105 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 6106 r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE); 6107 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 6108 if (r != EMULATE_DONE) 6109 return 0; 6110 return 1; 6111 } 6112 6113 static int complete_emulated_pio(struct kvm_vcpu *vcpu) 6114 { 6115 BUG_ON(!vcpu->arch.pio.count); 6116 6117 return complete_emulated_io(vcpu); 6118 } 6119 6120 /* 6121 * Implements the following, as a state machine: 6122 * 6123 * read: 6124 * for each fragment 6125 * for each mmio piece in the fragment 6126 * write gpa, len 6127 * exit 6128 * copy data 6129 * execute insn 6130 * 6131 * write: 6132 * for each fragment 6133 * for each mmio piece in the fragment 6134 * write gpa, len 6135 * copy data 6136 * exit 6137 */ 6138 static int complete_emulated_mmio(struct kvm_vcpu *vcpu) 6139 { 6140 struct kvm_run *run = vcpu->run; 6141 struct kvm_mmio_fragment *frag; 6142 unsigned len; 6143 6144 BUG_ON(!vcpu->mmio_needed); 6145 6146 /* Complete previous fragment */ 6147 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; 6148 len = min(8u, frag->len); 6149 if (!vcpu->mmio_is_write) 6150 memcpy(frag->data, run->mmio.data, len); 6151 6152 if (frag->len <= 8) { 6153 /* Switch to the next fragment. */ 6154 frag++; 6155 vcpu->mmio_cur_fragment++; 6156 } else { 6157 /* Go forward to the next mmio piece. */ 6158 frag->data += len; 6159 frag->gpa += len; 6160 frag->len -= len; 6161 } 6162 6163 if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) { 6164 vcpu->mmio_needed = 0; 6165 6166 /* FIXME: return into emulator if single-stepping. */ 6167 if (vcpu->mmio_is_write) 6168 return 1; 6169 vcpu->mmio_read_completed = 1; 6170 return complete_emulated_io(vcpu); 6171 } 6172 6173 run->exit_reason = KVM_EXIT_MMIO; 6174 run->mmio.phys_addr = frag->gpa; 6175 if (vcpu->mmio_is_write) 6176 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); 6177 run->mmio.len = min(8u, frag->len); 6178 run->mmio.is_write = vcpu->mmio_is_write; 6179 vcpu->arch.complete_userspace_io = complete_emulated_mmio; 6180 return 0; 6181 } 6182 6183 6184 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 6185 { 6186 int r; 6187 sigset_t sigsaved; 6188 6189 if (!tsk_used_math(current) && init_fpu(current)) 6190 return -ENOMEM; 6191 6192 if (vcpu->sigset_active) 6193 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 6194 6195 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { 6196 kvm_vcpu_block(vcpu); 6197 kvm_apic_accept_events(vcpu); 6198 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 6199 r = -EAGAIN; 6200 goto out; 6201 } 6202 6203 /* re-sync apic's tpr */ 6204 if (!irqchip_in_kernel(vcpu->kvm)) { 6205 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { 6206 r = -EINVAL; 6207 goto out; 6208 } 6209 } 6210 6211 if (unlikely(vcpu->arch.complete_userspace_io)) { 6212 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; 6213 vcpu->arch.complete_userspace_io = NULL; 6214 r = cui(vcpu); 6215 if (r <= 0) 6216 goto out; 6217 } else 6218 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); 6219 6220 r = __vcpu_run(vcpu); 6221 6222 out: 6223 post_kvm_run_save(vcpu); 6224 if (vcpu->sigset_active) 6225 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 6226 6227 return r; 6228 } 6229 6230 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 6231 { 6232 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { 6233 /* 6234 * We are here if userspace calls get_regs() in the middle of 6235 * instruction emulation. Registers state needs to be copied 6236 * back from emulation context to vcpu. Userspace shouldn't do 6237 * that usually, but some bad designed PV devices (vmware 6238 * backdoor interface) need this to work 6239 */ 6240 emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt); 6241 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 6242 } 6243 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX); 6244 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX); 6245 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX); 6246 regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX); 6247 regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI); 6248 regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI); 6249 regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); 6250 regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP); 6251 #ifdef CONFIG_X86_64 6252 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8); 6253 regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9); 6254 regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10); 6255 regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11); 6256 regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12); 6257 regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13); 6258 regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14); 6259 regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15); 6260 #endif 6261 6262 regs->rip = kvm_rip_read(vcpu); 6263 regs->rflags = kvm_get_rflags(vcpu); 6264 6265 return 0; 6266 } 6267 6268 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 6269 { 6270 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; 6271 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 6272 6273 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax); 6274 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx); 6275 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx); 6276 kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx); 6277 kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi); 6278 kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi); 6279 kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp); 6280 kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp); 6281 #ifdef CONFIG_X86_64 6282 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8); 6283 kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9); 6284 kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10); 6285 kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11); 6286 kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12); 6287 kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13); 6288 kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14); 6289 kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15); 6290 #endif 6291 6292 kvm_rip_write(vcpu, regs->rip); 6293 kvm_set_rflags(vcpu, regs->rflags); 6294 6295 vcpu->arch.exception.pending = false; 6296 6297 kvm_make_request(KVM_REQ_EVENT, vcpu); 6298 6299 return 0; 6300 } 6301 6302 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) 6303 { 6304 struct kvm_segment cs; 6305 6306 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); 6307 *db = cs.db; 6308 *l = cs.l; 6309 } 6310 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits); 6311 6312 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 6313 struct kvm_sregs *sregs) 6314 { 6315 struct desc_ptr dt; 6316 6317 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 6318 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 6319 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); 6320 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); 6321 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); 6322 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); 6323 6324 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 6325 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 6326 6327 kvm_x86_ops->get_idt(vcpu, &dt); 6328 sregs->idt.limit = dt.size; 6329 sregs->idt.base = dt.address; 6330 kvm_x86_ops->get_gdt(vcpu, &dt); 6331 sregs->gdt.limit = dt.size; 6332 sregs->gdt.base = dt.address; 6333 6334 sregs->cr0 = kvm_read_cr0(vcpu); 6335 sregs->cr2 = vcpu->arch.cr2; 6336 sregs->cr3 = kvm_read_cr3(vcpu); 6337 sregs->cr4 = kvm_read_cr4(vcpu); 6338 sregs->cr8 = kvm_get_cr8(vcpu); 6339 sregs->efer = vcpu->arch.efer; 6340 sregs->apic_base = kvm_get_apic_base(vcpu); 6341 6342 memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap); 6343 6344 if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft) 6345 set_bit(vcpu->arch.interrupt.nr, 6346 (unsigned long *)sregs->interrupt_bitmap); 6347 6348 return 0; 6349 } 6350 6351 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 6352 struct kvm_mp_state *mp_state) 6353 { 6354 kvm_apic_accept_events(vcpu); 6355 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED && 6356 vcpu->arch.pv.pv_unhalted) 6357 mp_state->mp_state = KVM_MP_STATE_RUNNABLE; 6358 else 6359 mp_state->mp_state = vcpu->arch.mp_state; 6360 6361 return 0; 6362 } 6363 6364 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 6365 struct kvm_mp_state *mp_state) 6366 { 6367 if (!kvm_vcpu_has_lapic(vcpu) && 6368 mp_state->mp_state != KVM_MP_STATE_RUNNABLE) 6369 return -EINVAL; 6370 6371 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { 6372 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 6373 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); 6374 } else 6375 vcpu->arch.mp_state = mp_state->mp_state; 6376 kvm_make_request(KVM_REQ_EVENT, vcpu); 6377 return 0; 6378 } 6379 6380 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, 6381 int reason, bool has_error_code, u32 error_code) 6382 { 6383 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; 6384 int ret; 6385 6386 init_emulate_ctxt(vcpu); 6387 6388 ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason, 6389 has_error_code, error_code); 6390 6391 if (ret) 6392 return EMULATE_FAIL; 6393 6394 kvm_rip_write(vcpu, ctxt->eip); 6395 kvm_set_rflags(vcpu, ctxt->eflags); 6396 kvm_make_request(KVM_REQ_EVENT, vcpu); 6397 return EMULATE_DONE; 6398 } 6399 EXPORT_SYMBOL_GPL(kvm_task_switch); 6400 6401 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 6402 struct kvm_sregs *sregs) 6403 { 6404 int mmu_reset_needed = 0; 6405 int pending_vec, max_bits, idx; 6406 struct desc_ptr dt; 6407 6408 if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE)) 6409 return -EINVAL; 6410 6411 dt.size = sregs->idt.limit; 6412 dt.address = sregs->idt.base; 6413 kvm_x86_ops->set_idt(vcpu, &dt); 6414 dt.size = sregs->gdt.limit; 6415 dt.address = sregs->gdt.base; 6416 kvm_x86_ops->set_gdt(vcpu, &dt); 6417 6418 vcpu->arch.cr2 = sregs->cr2; 6419 mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; 6420 vcpu->arch.cr3 = sregs->cr3; 6421 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); 6422 6423 kvm_set_cr8(vcpu, sregs->cr8); 6424 6425 mmu_reset_needed |= vcpu->arch.efer != sregs->efer; 6426 kvm_x86_ops->set_efer(vcpu, sregs->efer); 6427 kvm_set_apic_base(vcpu, sregs->apic_base); 6428 6429 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; 6430 kvm_x86_ops->set_cr0(vcpu, sregs->cr0); 6431 vcpu->arch.cr0 = sregs->cr0; 6432 6433 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 6434 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); 6435 if (sregs->cr4 & X86_CR4_OSXSAVE) 6436 kvm_update_cpuid(vcpu); 6437 6438 idx = srcu_read_lock(&vcpu->kvm->srcu); 6439 if (!is_long_mode(vcpu) && is_pae(vcpu)) { 6440 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); 6441 mmu_reset_needed = 1; 6442 } 6443 srcu_read_unlock(&vcpu->kvm->srcu, idx); 6444 6445 if (mmu_reset_needed) 6446 kvm_mmu_reset_context(vcpu); 6447 6448 max_bits = KVM_NR_INTERRUPTS; 6449 pending_vec = find_first_bit( 6450 (const unsigned long *)sregs->interrupt_bitmap, max_bits); 6451 if (pending_vec < max_bits) { 6452 kvm_queue_interrupt(vcpu, pending_vec, false); 6453 pr_debug("Set back pending irq %d\n", pending_vec); 6454 } 6455 6456 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 6457 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 6458 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); 6459 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); 6460 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); 6461 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); 6462 6463 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 6464 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 6465 6466 update_cr8_intercept(vcpu); 6467 6468 /* Older userspace won't unhalt the vcpu on reset. */ 6469 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && 6470 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && 6471 !is_protmode(vcpu)) 6472 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 6473 6474 kvm_make_request(KVM_REQ_EVENT, vcpu); 6475 6476 return 0; 6477 } 6478 6479 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 6480 struct kvm_guest_debug *dbg) 6481 { 6482 unsigned long rflags; 6483 int i, r; 6484 6485 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { 6486 r = -EBUSY; 6487 if (vcpu->arch.exception.pending) 6488 goto out; 6489 if (dbg->control & KVM_GUESTDBG_INJECT_DB) 6490 kvm_queue_exception(vcpu, DB_VECTOR); 6491 else 6492 kvm_queue_exception(vcpu, BP_VECTOR); 6493 } 6494 6495 /* 6496 * Read rflags as long as potentially injected trace flags are still 6497 * filtered out. 6498 */ 6499 rflags = kvm_get_rflags(vcpu); 6500 6501 vcpu->guest_debug = dbg->control; 6502 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) 6503 vcpu->guest_debug = 0; 6504 6505 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { 6506 for (i = 0; i < KVM_NR_DB_REGS; ++i) 6507 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; 6508 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; 6509 } else { 6510 for (i = 0; i < KVM_NR_DB_REGS; i++) 6511 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; 6512 } 6513 kvm_update_dr7(vcpu); 6514 6515 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 6516 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + 6517 get_segment_base(vcpu, VCPU_SREG_CS); 6518 6519 /* 6520 * Trigger an rflags update that will inject or remove the trace 6521 * flags. 6522 */ 6523 kvm_set_rflags(vcpu, rflags); 6524 6525 kvm_x86_ops->update_db_bp_intercept(vcpu); 6526 6527 r = 0; 6528 6529 out: 6530 6531 return r; 6532 } 6533 6534 /* 6535 * Translate a guest virtual address to a guest physical address. 6536 */ 6537 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 6538 struct kvm_translation *tr) 6539 { 6540 unsigned long vaddr = tr->linear_address; 6541 gpa_t gpa; 6542 int idx; 6543 6544 idx = srcu_read_lock(&vcpu->kvm->srcu); 6545 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); 6546 srcu_read_unlock(&vcpu->kvm->srcu, idx); 6547 tr->physical_address = gpa; 6548 tr->valid = gpa != UNMAPPED_GVA; 6549 tr->writeable = 1; 6550 tr->usermode = 0; 6551 6552 return 0; 6553 } 6554 6555 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 6556 { 6557 struct i387_fxsave_struct *fxsave = 6558 &vcpu->arch.guest_fpu.state->fxsave; 6559 6560 memcpy(fpu->fpr, fxsave->st_space, 128); 6561 fpu->fcw = fxsave->cwd; 6562 fpu->fsw = fxsave->swd; 6563 fpu->ftwx = fxsave->twd; 6564 fpu->last_opcode = fxsave->fop; 6565 fpu->last_ip = fxsave->rip; 6566 fpu->last_dp = fxsave->rdp; 6567 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space); 6568 6569 return 0; 6570 } 6571 6572 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 6573 { 6574 struct i387_fxsave_struct *fxsave = 6575 &vcpu->arch.guest_fpu.state->fxsave; 6576 6577 memcpy(fxsave->st_space, fpu->fpr, 128); 6578 fxsave->cwd = fpu->fcw; 6579 fxsave->swd = fpu->fsw; 6580 fxsave->twd = fpu->ftwx; 6581 fxsave->fop = fpu->last_opcode; 6582 fxsave->rip = fpu->last_ip; 6583 fxsave->rdp = fpu->last_dp; 6584 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space); 6585 6586 return 0; 6587 } 6588 6589 int fx_init(struct kvm_vcpu *vcpu) 6590 { 6591 int err; 6592 6593 err = fpu_alloc(&vcpu->arch.guest_fpu); 6594 if (err) 6595 return err; 6596 6597 fpu_finit(&vcpu->arch.guest_fpu); 6598 6599 /* 6600 * Ensure guest xcr0 is valid for loading 6601 */ 6602 vcpu->arch.xcr0 = XSTATE_FP; 6603 6604 vcpu->arch.cr0 |= X86_CR0_ET; 6605 6606 return 0; 6607 } 6608 EXPORT_SYMBOL_GPL(fx_init); 6609 6610 static void fx_free(struct kvm_vcpu *vcpu) 6611 { 6612 fpu_free(&vcpu->arch.guest_fpu); 6613 } 6614 6615 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) 6616 { 6617 if (vcpu->guest_fpu_loaded) 6618 return; 6619 6620 /* 6621 * Restore all possible states in the guest, 6622 * and assume host would use all available bits. 6623 * Guest xcr0 would be loaded later. 6624 */ 6625 kvm_put_guest_xcr0(vcpu); 6626 vcpu->guest_fpu_loaded = 1; 6627 __kernel_fpu_begin(); 6628 fpu_restore_checking(&vcpu->arch.guest_fpu); 6629 trace_kvm_fpu(1); 6630 } 6631 6632 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) 6633 { 6634 kvm_put_guest_xcr0(vcpu); 6635 6636 if (!vcpu->guest_fpu_loaded) 6637 return; 6638 6639 vcpu->guest_fpu_loaded = 0; 6640 fpu_save_init(&vcpu->arch.guest_fpu); 6641 __kernel_fpu_end(); 6642 ++vcpu->stat.fpu_reload; 6643 kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu); 6644 trace_kvm_fpu(0); 6645 } 6646 6647 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 6648 { 6649 kvmclock_reset(vcpu); 6650 6651 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); 6652 fx_free(vcpu); 6653 kvm_x86_ops->vcpu_free(vcpu); 6654 } 6655 6656 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, 6657 unsigned int id) 6658 { 6659 if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) 6660 printk_once(KERN_WARNING 6661 "kvm: SMP vm created on host with unstable TSC; " 6662 "guest TSC will not be reliable\n"); 6663 return kvm_x86_ops->vcpu_create(kvm, id); 6664 } 6665 6666 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 6667 { 6668 int r; 6669 6670 vcpu->arch.mtrr_state.have_fixed = 1; 6671 r = vcpu_load(vcpu); 6672 if (r) 6673 return r; 6674 kvm_vcpu_reset(vcpu); 6675 kvm_mmu_setup(vcpu); 6676 vcpu_put(vcpu); 6677 6678 return r; 6679 } 6680 6681 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 6682 { 6683 int r; 6684 struct msr_data msr; 6685 6686 r = vcpu_load(vcpu); 6687 if (r) 6688 return r; 6689 msr.data = 0x0; 6690 msr.index = MSR_IA32_TSC; 6691 msr.host_initiated = true; 6692 kvm_write_tsc(vcpu, &msr); 6693 vcpu_put(vcpu); 6694 6695 return r; 6696 } 6697 6698 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 6699 { 6700 int r; 6701 vcpu->arch.apf.msr_val = 0; 6702 6703 r = vcpu_load(vcpu); 6704 BUG_ON(r); 6705 kvm_mmu_unload(vcpu); 6706 vcpu_put(vcpu); 6707 6708 fx_free(vcpu); 6709 kvm_x86_ops->vcpu_free(vcpu); 6710 } 6711 6712 void kvm_vcpu_reset(struct kvm_vcpu *vcpu) 6713 { 6714 atomic_set(&vcpu->arch.nmi_queued, 0); 6715 vcpu->arch.nmi_pending = 0; 6716 vcpu->arch.nmi_injected = false; 6717 6718 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); 6719 vcpu->arch.dr6 = DR6_FIXED_1; 6720 vcpu->arch.dr7 = DR7_FIXED_1; 6721 kvm_update_dr7(vcpu); 6722 6723 kvm_make_request(KVM_REQ_EVENT, vcpu); 6724 vcpu->arch.apf.msr_val = 0; 6725 vcpu->arch.st.msr_val = 0; 6726 6727 kvmclock_reset(vcpu); 6728 6729 kvm_clear_async_pf_completion_queue(vcpu); 6730 kvm_async_pf_hash_reset(vcpu); 6731 vcpu->arch.apf.halted = false; 6732 6733 kvm_pmu_reset(vcpu); 6734 6735 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); 6736 vcpu->arch.regs_avail = ~0; 6737 vcpu->arch.regs_dirty = ~0; 6738 6739 kvm_x86_ops->vcpu_reset(vcpu); 6740 } 6741 6742 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector) 6743 { 6744 struct kvm_segment cs; 6745 6746 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); 6747 cs.selector = vector << 8; 6748 cs.base = vector << 12; 6749 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); 6750 kvm_rip_write(vcpu, 0); 6751 } 6752 6753 int kvm_arch_hardware_enable(void *garbage) 6754 { 6755 struct kvm *kvm; 6756 struct kvm_vcpu *vcpu; 6757 int i; 6758 int ret; 6759 u64 local_tsc; 6760 u64 max_tsc = 0; 6761 bool stable, backwards_tsc = false; 6762 6763 kvm_shared_msr_cpu_online(); 6764 ret = kvm_x86_ops->hardware_enable(garbage); 6765 if (ret != 0) 6766 return ret; 6767 6768 local_tsc = native_read_tsc(); 6769 stable = !check_tsc_unstable(); 6770 list_for_each_entry(kvm, &vm_list, vm_list) { 6771 kvm_for_each_vcpu(i, vcpu, kvm) { 6772 if (!stable && vcpu->cpu == smp_processor_id()) 6773 set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); 6774 if (stable && vcpu->arch.last_host_tsc > local_tsc) { 6775 backwards_tsc = true; 6776 if (vcpu->arch.last_host_tsc > max_tsc) 6777 max_tsc = vcpu->arch.last_host_tsc; 6778 } 6779 } 6780 } 6781 6782 /* 6783 * Sometimes, even reliable TSCs go backwards. This happens on 6784 * platforms that reset TSC during suspend or hibernate actions, but 6785 * maintain synchronization. We must compensate. Fortunately, we can 6786 * detect that condition here, which happens early in CPU bringup, 6787 * before any KVM threads can be running. Unfortunately, we can't 6788 * bring the TSCs fully up to date with real time, as we aren't yet far 6789 * enough into CPU bringup that we know how much real time has actually 6790 * elapsed; our helper function, get_kernel_ns() will be using boot 6791 * variables that haven't been updated yet. 6792 * 6793 * So we simply find the maximum observed TSC above, then record the 6794 * adjustment to TSC in each VCPU. When the VCPU later gets loaded, 6795 * the adjustment will be applied. Note that we accumulate 6796 * adjustments, in case multiple suspend cycles happen before some VCPU 6797 * gets a chance to run again. In the event that no KVM threads get a 6798 * chance to run, we will miss the entire elapsed period, as we'll have 6799 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may 6800 * loose cycle time. This isn't too big a deal, since the loss will be 6801 * uniform across all VCPUs (not to mention the scenario is extremely 6802 * unlikely). It is possible that a second hibernate recovery happens 6803 * much faster than a first, causing the observed TSC here to be 6804 * smaller; this would require additional padding adjustment, which is 6805 * why we set last_host_tsc to the local tsc observed here. 6806 * 6807 * N.B. - this code below runs only on platforms with reliable TSC, 6808 * as that is the only way backwards_tsc is set above. Also note 6809 * that this runs for ALL vcpus, which is not a bug; all VCPUs should 6810 * have the same delta_cyc adjustment applied if backwards_tsc 6811 * is detected. Note further, this adjustment is only done once, 6812 * as we reset last_host_tsc on all VCPUs to stop this from being 6813 * called multiple times (one for each physical CPU bringup). 6814 * 6815 * Platforms with unreliable TSCs don't have to deal with this, they 6816 * will be compensated by the logic in vcpu_load, which sets the TSC to 6817 * catchup mode. This will catchup all VCPUs to real time, but cannot 6818 * guarantee that they stay in perfect synchronization. 6819 */ 6820 if (backwards_tsc) { 6821 u64 delta_cyc = max_tsc - local_tsc; 6822 list_for_each_entry(kvm, &vm_list, vm_list) { 6823 kvm_for_each_vcpu(i, vcpu, kvm) { 6824 vcpu->arch.tsc_offset_adjustment += delta_cyc; 6825 vcpu->arch.last_host_tsc = local_tsc; 6826 set_bit(KVM_REQ_MASTERCLOCK_UPDATE, 6827 &vcpu->requests); 6828 } 6829 6830 /* 6831 * We have to disable TSC offset matching.. if you were 6832 * booting a VM while issuing an S4 host suspend.... 6833 * you may have some problem. Solving this issue is 6834 * left as an exercise to the reader. 6835 */ 6836 kvm->arch.last_tsc_nsec = 0; 6837 kvm->arch.last_tsc_write = 0; 6838 } 6839 6840 } 6841 return 0; 6842 } 6843 6844 void kvm_arch_hardware_disable(void *garbage) 6845 { 6846 kvm_x86_ops->hardware_disable(garbage); 6847 drop_user_return_notifiers(garbage); 6848 } 6849 6850 int kvm_arch_hardware_setup(void) 6851 { 6852 return kvm_x86_ops->hardware_setup(); 6853 } 6854 6855 void kvm_arch_hardware_unsetup(void) 6856 { 6857 kvm_x86_ops->hardware_unsetup(); 6858 } 6859 6860 void kvm_arch_check_processor_compat(void *rtn) 6861 { 6862 kvm_x86_ops->check_processor_compatibility(rtn); 6863 } 6864 6865 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) 6866 { 6867 return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); 6868 } 6869 6870 struct static_key kvm_no_apic_vcpu __read_mostly; 6871 6872 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 6873 { 6874 struct page *page; 6875 struct kvm *kvm; 6876 int r; 6877 6878 BUG_ON(vcpu->kvm == NULL); 6879 kvm = vcpu->kvm; 6880 6881 vcpu->arch.pv.pv_unhalted = false; 6882 vcpu->arch.emulate_ctxt.ops = &emulate_ops; 6883 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu)) 6884 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 6885 else 6886 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; 6887 6888 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 6889 if (!page) { 6890 r = -ENOMEM; 6891 goto fail; 6892 } 6893 vcpu->arch.pio_data = page_address(page); 6894 6895 kvm_set_tsc_khz(vcpu, max_tsc_khz); 6896 6897 r = kvm_mmu_create(vcpu); 6898 if (r < 0) 6899 goto fail_free_pio_data; 6900 6901 if (irqchip_in_kernel(kvm)) { 6902 r = kvm_create_lapic(vcpu); 6903 if (r < 0) 6904 goto fail_mmu_destroy; 6905 } else 6906 static_key_slow_inc(&kvm_no_apic_vcpu); 6907 6908 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, 6909 GFP_KERNEL); 6910 if (!vcpu->arch.mce_banks) { 6911 r = -ENOMEM; 6912 goto fail_free_lapic; 6913 } 6914 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; 6915 6916 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) { 6917 r = -ENOMEM; 6918 goto fail_free_mce_banks; 6919 } 6920 6921 r = fx_init(vcpu); 6922 if (r) 6923 goto fail_free_wbinvd_dirty_mask; 6924 6925 vcpu->arch.ia32_tsc_adjust_msr = 0x0; 6926 vcpu->arch.pv_time_enabled = false; 6927 6928 vcpu->arch.guest_supported_xcr0 = 0; 6929 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; 6930 6931 kvm_async_pf_hash_reset(vcpu); 6932 kvm_pmu_init(vcpu); 6933 6934 return 0; 6935 fail_free_wbinvd_dirty_mask: 6936 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); 6937 fail_free_mce_banks: 6938 kfree(vcpu->arch.mce_banks); 6939 fail_free_lapic: 6940 kvm_free_lapic(vcpu); 6941 fail_mmu_destroy: 6942 kvm_mmu_destroy(vcpu); 6943 fail_free_pio_data: 6944 free_page((unsigned long)vcpu->arch.pio_data); 6945 fail: 6946 return r; 6947 } 6948 6949 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 6950 { 6951 int idx; 6952 6953 kvm_pmu_destroy(vcpu); 6954 kfree(vcpu->arch.mce_banks); 6955 kvm_free_lapic(vcpu); 6956 idx = srcu_read_lock(&vcpu->kvm->srcu); 6957 kvm_mmu_destroy(vcpu); 6958 srcu_read_unlock(&vcpu->kvm->srcu, idx); 6959 free_page((unsigned long)vcpu->arch.pio_data); 6960 if (!irqchip_in_kernel(vcpu->kvm)) 6961 static_key_slow_dec(&kvm_no_apic_vcpu); 6962 } 6963 6964 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 6965 { 6966 if (type) 6967 return -EINVAL; 6968 6969 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); 6970 INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); 6971 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); 6972 atomic_set(&kvm->arch.noncoherent_dma_count, 0); 6973 6974 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ 6975 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); 6976 /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */ 6977 set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, 6978 &kvm->arch.irq_sources_bitmap); 6979 6980 raw_spin_lock_init(&kvm->arch.tsc_write_lock); 6981 mutex_init(&kvm->arch.apic_map_lock); 6982 spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock); 6983 6984 pvclock_update_vm_gtod_copy(kvm); 6985 6986 return 0; 6987 } 6988 6989 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) 6990 { 6991 int r; 6992 r = vcpu_load(vcpu); 6993 BUG_ON(r); 6994 kvm_mmu_unload(vcpu); 6995 vcpu_put(vcpu); 6996 } 6997 6998 static void kvm_free_vcpus(struct kvm *kvm) 6999 { 7000 unsigned int i; 7001 struct kvm_vcpu *vcpu; 7002 7003 /* 7004 * Unpin any mmu pages first. 7005 */ 7006 kvm_for_each_vcpu(i, vcpu, kvm) { 7007 kvm_clear_async_pf_completion_queue(vcpu); 7008 kvm_unload_vcpu_mmu(vcpu); 7009 } 7010 kvm_for_each_vcpu(i, vcpu, kvm) 7011 kvm_arch_vcpu_free(vcpu); 7012 7013 mutex_lock(&kvm->lock); 7014 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 7015 kvm->vcpus[i] = NULL; 7016 7017 atomic_set(&kvm->online_vcpus, 0); 7018 mutex_unlock(&kvm->lock); 7019 } 7020 7021 void kvm_arch_sync_events(struct kvm *kvm) 7022 { 7023 kvm_free_all_assigned_devices(kvm); 7024 kvm_free_pit(kvm); 7025 } 7026 7027 void kvm_arch_destroy_vm(struct kvm *kvm) 7028 { 7029 if (current->mm == kvm->mm) { 7030 /* 7031 * Free memory regions allocated on behalf of userspace, 7032 * unless the the memory map has changed due to process exit 7033 * or fd copying. 7034 */ 7035 struct kvm_userspace_memory_region mem; 7036 memset(&mem, 0, sizeof(mem)); 7037 mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; 7038 kvm_set_memory_region(kvm, &mem); 7039 7040 mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; 7041 kvm_set_memory_region(kvm, &mem); 7042 7043 mem.slot = TSS_PRIVATE_MEMSLOT; 7044 kvm_set_memory_region(kvm, &mem); 7045 } 7046 kvm_iommu_unmap_guest(kvm); 7047 kfree(kvm->arch.vpic); 7048 kfree(kvm->arch.vioapic); 7049 kvm_free_vcpus(kvm); 7050 if (kvm->arch.apic_access_page) 7051 put_page(kvm->arch.apic_access_page); 7052 if (kvm->arch.ept_identity_pagetable) 7053 put_page(kvm->arch.ept_identity_pagetable); 7054 kfree(rcu_dereference_check(kvm->arch.apic_map, 1)); 7055 } 7056 7057 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 7058 struct kvm_memory_slot *dont) 7059 { 7060 int i; 7061 7062 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { 7063 if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) { 7064 kvm_kvfree(free->arch.rmap[i]); 7065 free->arch.rmap[i] = NULL; 7066 } 7067 if (i == 0) 7068 continue; 7069 7070 if (!dont || free->arch.lpage_info[i - 1] != 7071 dont->arch.lpage_info[i - 1]) { 7072 kvm_kvfree(free->arch.lpage_info[i - 1]); 7073 free->arch.lpage_info[i - 1] = NULL; 7074 } 7075 } 7076 } 7077 7078 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 7079 unsigned long npages) 7080 { 7081 int i; 7082 7083 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { 7084 unsigned long ugfn; 7085 int lpages; 7086 int level = i + 1; 7087 7088 lpages = gfn_to_index(slot->base_gfn + npages - 1, 7089 slot->base_gfn, level) + 1; 7090 7091 slot->arch.rmap[i] = 7092 kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i])); 7093 if (!slot->arch.rmap[i]) 7094 goto out_free; 7095 if (i == 0) 7096 continue; 7097 7098 slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages * 7099 sizeof(*slot->arch.lpage_info[i - 1])); 7100 if (!slot->arch.lpage_info[i - 1]) 7101 goto out_free; 7102 7103 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) 7104 slot->arch.lpage_info[i - 1][0].write_count = 1; 7105 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) 7106 slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1; 7107 ugfn = slot->userspace_addr >> PAGE_SHIFT; 7108 /* 7109 * If the gfn and userspace address are not aligned wrt each 7110 * other, or if explicitly asked to, disable large page 7111 * support for this slot 7112 */ 7113 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) || 7114 !kvm_largepages_enabled()) { 7115 unsigned long j; 7116 7117 for (j = 0; j < lpages; ++j) 7118 slot->arch.lpage_info[i - 1][j].write_count = 1; 7119 } 7120 } 7121 7122 return 0; 7123 7124 out_free: 7125 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { 7126 kvm_kvfree(slot->arch.rmap[i]); 7127 slot->arch.rmap[i] = NULL; 7128 if (i == 0) 7129 continue; 7130 7131 kvm_kvfree(slot->arch.lpage_info[i - 1]); 7132 slot->arch.lpage_info[i - 1] = NULL; 7133 } 7134 return -ENOMEM; 7135 } 7136 7137 void kvm_arch_memslots_updated(struct kvm *kvm) 7138 { 7139 /* 7140 * memslots->generation has been incremented. 7141 * mmio generation may have reached its maximum value. 7142 */ 7143 kvm_mmu_invalidate_mmio_sptes(kvm); 7144 } 7145 7146 int kvm_arch_prepare_memory_region(struct kvm *kvm, 7147 struct kvm_memory_slot *memslot, 7148 struct kvm_userspace_memory_region *mem, 7149 enum kvm_mr_change change) 7150 { 7151 /* 7152 * Only private memory slots need to be mapped here since 7153 * KVM_SET_MEMORY_REGION ioctl is no longer supported. 7154 */ 7155 if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) { 7156 unsigned long userspace_addr; 7157 7158 /* 7159 * MAP_SHARED to prevent internal slot pages from being moved 7160 * by fork()/COW. 7161 */ 7162 userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE, 7163 PROT_READ | PROT_WRITE, 7164 MAP_SHARED | MAP_ANONYMOUS, 0); 7165 7166 if (IS_ERR((void *)userspace_addr)) 7167 return PTR_ERR((void *)userspace_addr); 7168 7169 memslot->userspace_addr = userspace_addr; 7170 } 7171 7172 return 0; 7173 } 7174 7175 void kvm_arch_commit_memory_region(struct kvm *kvm, 7176 struct kvm_userspace_memory_region *mem, 7177 const struct kvm_memory_slot *old, 7178 enum kvm_mr_change change) 7179 { 7180 7181 int nr_mmu_pages = 0; 7182 7183 if ((mem->slot >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_DELETE)) { 7184 int ret; 7185 7186 ret = vm_munmap(old->userspace_addr, 7187 old->npages * PAGE_SIZE); 7188 if (ret < 0) 7189 printk(KERN_WARNING 7190 "kvm_vm_ioctl_set_memory_region: " 7191 "failed to munmap memory\n"); 7192 } 7193 7194 if (!kvm->arch.n_requested_mmu_pages) 7195 nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); 7196 7197 if (nr_mmu_pages) 7198 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); 7199 /* 7200 * Write protect all pages for dirty logging. 7201 * Existing largepage mappings are destroyed here and new ones will 7202 * not be created until the end of the logging. 7203 */ 7204 if ((change != KVM_MR_DELETE) && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) 7205 kvm_mmu_slot_remove_write_access(kvm, mem->slot); 7206 } 7207 7208 void kvm_arch_flush_shadow_all(struct kvm *kvm) 7209 { 7210 kvm_mmu_invalidate_zap_all_pages(kvm); 7211 } 7212 7213 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 7214 struct kvm_memory_slot *slot) 7215 { 7216 kvm_mmu_invalidate_zap_all_pages(kvm); 7217 } 7218 7219 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 7220 { 7221 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && 7222 !vcpu->arch.apf.halted) 7223 || !list_empty_careful(&vcpu->async_pf.done) 7224 || kvm_apic_has_events(vcpu) 7225 || vcpu->arch.pv.pv_unhalted 7226 || atomic_read(&vcpu->arch.nmi_queued) || 7227 (kvm_arch_interrupt_allowed(vcpu) && 7228 kvm_cpu_has_interrupt(vcpu)); 7229 } 7230 7231 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 7232 { 7233 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; 7234 } 7235 7236 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) 7237 { 7238 return kvm_x86_ops->interrupt_allowed(vcpu); 7239 } 7240 7241 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) 7242 { 7243 unsigned long current_rip = kvm_rip_read(vcpu) + 7244 get_segment_base(vcpu, VCPU_SREG_CS); 7245 7246 return current_rip == linear_rip; 7247 } 7248 EXPORT_SYMBOL_GPL(kvm_is_linear_rip); 7249 7250 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) 7251 { 7252 unsigned long rflags; 7253 7254 rflags = kvm_x86_ops->get_rflags(vcpu); 7255 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 7256 rflags &= ~X86_EFLAGS_TF; 7257 return rflags; 7258 } 7259 EXPORT_SYMBOL_GPL(kvm_get_rflags); 7260 7261 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 7262 { 7263 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && 7264 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) 7265 rflags |= X86_EFLAGS_TF; 7266 kvm_x86_ops->set_rflags(vcpu, rflags); 7267 kvm_make_request(KVM_REQ_EVENT, vcpu); 7268 } 7269 EXPORT_SYMBOL_GPL(kvm_set_rflags); 7270 7271 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) 7272 { 7273 int r; 7274 7275 if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || 7276 work->wakeup_all) 7277 return; 7278 7279 r = kvm_mmu_reload(vcpu); 7280 if (unlikely(r)) 7281 return; 7282 7283 if (!vcpu->arch.mmu.direct_map && 7284 work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu)) 7285 return; 7286 7287 vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); 7288 } 7289 7290 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) 7291 { 7292 return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); 7293 } 7294 7295 static inline u32 kvm_async_pf_next_probe(u32 key) 7296 { 7297 return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1); 7298 } 7299 7300 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 7301 { 7302 u32 key = kvm_async_pf_hash_fn(gfn); 7303 7304 while (vcpu->arch.apf.gfns[key] != ~0) 7305 key = kvm_async_pf_next_probe(key); 7306 7307 vcpu->arch.apf.gfns[key] = gfn; 7308 } 7309 7310 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) 7311 { 7312 int i; 7313 u32 key = kvm_async_pf_hash_fn(gfn); 7314 7315 for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) && 7316 (vcpu->arch.apf.gfns[key] != gfn && 7317 vcpu->arch.apf.gfns[key] != ~0); i++) 7318 key = kvm_async_pf_next_probe(key); 7319 7320 return key; 7321 } 7322 7323 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 7324 { 7325 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; 7326 } 7327 7328 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 7329 { 7330 u32 i, j, k; 7331 7332 i = j = kvm_async_pf_gfn_slot(vcpu, gfn); 7333 while (true) { 7334 vcpu->arch.apf.gfns[i] = ~0; 7335 do { 7336 j = kvm_async_pf_next_probe(j); 7337 if (vcpu->arch.apf.gfns[j] == ~0) 7338 return; 7339 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); 7340 /* 7341 * k lies cyclically in ]i,j] 7342 * | i.k.j | 7343 * |....j i.k.| or |.k..j i...| 7344 */ 7345 } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j)); 7346 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; 7347 i = j; 7348 } 7349 } 7350 7351 static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) 7352 { 7353 7354 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, 7355 sizeof(val)); 7356 } 7357 7358 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 7359 struct kvm_async_pf *work) 7360 { 7361 struct x86_exception fault; 7362 7363 trace_kvm_async_pf_not_present(work->arch.token, work->gva); 7364 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); 7365 7366 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) || 7367 (vcpu->arch.apf.send_user_only && 7368 kvm_x86_ops->get_cpl(vcpu) == 0)) 7369 kvm_make_request(KVM_REQ_APF_HALT, vcpu); 7370 else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) { 7371 fault.vector = PF_VECTOR; 7372 fault.error_code_valid = true; 7373 fault.error_code = 0; 7374 fault.nested_page_fault = false; 7375 fault.address = work->arch.token; 7376 kvm_inject_page_fault(vcpu, &fault); 7377 } 7378 } 7379 7380 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 7381 struct kvm_async_pf *work) 7382 { 7383 struct x86_exception fault; 7384 7385 trace_kvm_async_pf_ready(work->arch.token, work->gva); 7386 if (work->wakeup_all) 7387 work->arch.token = ~0; /* broadcast wakeup */ 7388 else 7389 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); 7390 7391 if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && 7392 !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { 7393 fault.vector = PF_VECTOR; 7394 fault.error_code_valid = true; 7395 fault.error_code = 0; 7396 fault.nested_page_fault = false; 7397 fault.address = work->arch.token; 7398 kvm_inject_page_fault(vcpu, &fault); 7399 } 7400 vcpu->arch.apf.halted = false; 7401 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 7402 } 7403 7404 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) 7405 { 7406 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) 7407 return true; 7408 else 7409 return !kvm_event_needs_reinjection(vcpu) && 7410 kvm_x86_ops->interrupt_allowed(vcpu); 7411 } 7412 7413 void kvm_arch_register_noncoherent_dma(struct kvm *kvm) 7414 { 7415 atomic_inc(&kvm->arch.noncoherent_dma_count); 7416 } 7417 EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma); 7418 7419 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) 7420 { 7421 atomic_dec(&kvm->arch.noncoherent_dma_count); 7422 } 7423 EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma); 7424 7425 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) 7426 { 7427 return atomic_read(&kvm->arch.noncoherent_dma_count); 7428 } 7429 EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma); 7430 7431 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); 7432 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); 7433 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); 7434 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr); 7435 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr); 7436 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun); 7437 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit); 7438 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject); 7439 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit); 7440 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga); 7441 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); 7442 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts); 7443 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset); 7444