1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This module enables machines with Intel VT-x extensions to run virtual 5 * machines without emulation or binary translation. 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * 9 * Authors: 10 * Avi Kivity <avi@qumranet.com> 11 * Yaniv Kamay <yaniv@qumranet.com> 12 * 13 * This work is licensed under the terms of the GNU GPL, version 2. See 14 * the COPYING file in the top-level directory. 15 * 16 */ 17 18 #include "iodev.h" 19 20 #include <linux/kvm_host.h> 21 #include <linux/kvm.h> 22 #include <linux/module.h> 23 #include <linux/errno.h> 24 #include <linux/percpu.h> 25 #include <linux/mm.h> 26 #include <linux/miscdevice.h> 27 #include <linux/vmalloc.h> 28 #include <linux/reboot.h> 29 #include <linux/debugfs.h> 30 #include <linux/highmem.h> 31 #include <linux/file.h> 32 #include <linux/sysdev.h> 33 #include <linux/cpu.h> 34 #include <linux/sched.h> 35 #include <linux/cpumask.h> 36 #include <linux/smp.h> 37 #include <linux/anon_inodes.h> 38 #include <linux/profile.h> 39 #include <linux/kvm_para.h> 40 #include <linux/pagemap.h> 41 #include <linux/mman.h> 42 #include <linux/swap.h> 43 #include <linux/bitops.h> 44 #include <linux/spinlock.h> 45 #include <linux/compat.h> 46 #include <linux/srcu.h> 47 #include <linux/hugetlb.h> 48 #include <linux/slab.h> 49 50 #include <asm/processor.h> 51 #include <asm/io.h> 52 #include <asm/uaccess.h> 53 #include <asm/pgtable.h> 54 #include <asm-generic/bitops/le.h> 55 56 #include "coalesced_mmio.h" 57 58 #define CREATE_TRACE_POINTS 59 #include <trace/events/kvm.h> 60 61 MODULE_AUTHOR("Qumranet"); 62 MODULE_LICENSE("GPL"); 63 64 /* 65 * Ordering of locks: 66 * 67 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 68 */ 69 70 DEFINE_SPINLOCK(kvm_lock); 71 LIST_HEAD(vm_list); 72 73 static cpumask_var_t cpus_hardware_enabled; 74 static int kvm_usage_count = 0; 75 static atomic_t hardware_enable_failed; 76 77 struct kmem_cache *kvm_vcpu_cache; 78 EXPORT_SYMBOL_GPL(kvm_vcpu_cache); 79 80 static __read_mostly struct preempt_ops kvm_preempt_ops; 81 82 struct dentry *kvm_debugfs_dir; 83 84 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 85 unsigned long arg); 86 static int hardware_enable_all(void); 87 static void hardware_disable_all(void); 88 89 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 90 91 static bool kvm_rebooting; 92 93 static bool largepages_enabled = true; 94 95 inline int kvm_is_mmio_pfn(pfn_t pfn) 96 { 97 if (pfn_valid(pfn)) { 98 struct page *page = compound_head(pfn_to_page(pfn)); 99 return PageReserved(page); 100 } 101 102 return true; 103 } 104 105 /* 106 * Switches to specified vcpu, until a matching vcpu_put() 107 */ 108 void vcpu_load(struct kvm_vcpu *vcpu) 109 { 110 int cpu; 111 112 mutex_lock(&vcpu->mutex); 113 cpu = get_cpu(); 114 preempt_notifier_register(&vcpu->preempt_notifier); 115 kvm_arch_vcpu_load(vcpu, cpu); 116 put_cpu(); 117 } 118 119 void vcpu_put(struct kvm_vcpu *vcpu) 120 { 121 preempt_disable(); 122 kvm_arch_vcpu_put(vcpu); 123 preempt_notifier_unregister(&vcpu->preempt_notifier); 124 preempt_enable(); 125 mutex_unlock(&vcpu->mutex); 126 } 127 128 static void ack_flush(void *_completed) 129 { 130 } 131 132 static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) 133 { 134 int i, cpu, me; 135 cpumask_var_t cpus; 136 bool called = true; 137 struct kvm_vcpu *vcpu; 138 139 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 140 141 raw_spin_lock(&kvm->requests_lock); 142 me = smp_processor_id(); 143 kvm_for_each_vcpu(i, vcpu, kvm) { 144 if (test_and_set_bit(req, &vcpu->requests)) 145 continue; 146 cpu = vcpu->cpu; 147 if (cpus != NULL && cpu != -1 && cpu != me) 148 cpumask_set_cpu(cpu, cpus); 149 } 150 if (unlikely(cpus == NULL)) 151 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); 152 else if (!cpumask_empty(cpus)) 153 smp_call_function_many(cpus, ack_flush, NULL, 1); 154 else 155 called = false; 156 raw_spin_unlock(&kvm->requests_lock); 157 free_cpumask_var(cpus); 158 return called; 159 } 160 161 void kvm_flush_remote_tlbs(struct kvm *kvm) 162 { 163 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 164 ++kvm->stat.remote_tlb_flush; 165 } 166 167 void kvm_reload_remote_mmus(struct kvm *kvm) 168 { 169 make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); 170 } 171 172 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 173 { 174 struct page *page; 175 int r; 176 177 mutex_init(&vcpu->mutex); 178 vcpu->cpu = -1; 179 vcpu->kvm = kvm; 180 vcpu->vcpu_id = id; 181 init_waitqueue_head(&vcpu->wq); 182 183 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 184 if (!page) { 185 r = -ENOMEM; 186 goto fail; 187 } 188 vcpu->run = page_address(page); 189 190 r = kvm_arch_vcpu_init(vcpu); 191 if (r < 0) 192 goto fail_free_run; 193 return 0; 194 195 fail_free_run: 196 free_page((unsigned long)vcpu->run); 197 fail: 198 return r; 199 } 200 EXPORT_SYMBOL_GPL(kvm_vcpu_init); 201 202 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) 203 { 204 kvm_arch_vcpu_uninit(vcpu); 205 free_page((unsigned long)vcpu->run); 206 } 207 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); 208 209 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 210 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 211 { 212 return container_of(mn, struct kvm, mmu_notifier); 213 } 214 215 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, 216 struct mm_struct *mm, 217 unsigned long address) 218 { 219 struct kvm *kvm = mmu_notifier_to_kvm(mn); 220 int need_tlb_flush, idx; 221 222 /* 223 * When ->invalidate_page runs, the linux pte has been zapped 224 * already but the page is still allocated until 225 * ->invalidate_page returns. So if we increase the sequence 226 * here the kvm page fault will notice if the spte can't be 227 * established because the page is going to be freed. If 228 * instead the kvm page fault establishes the spte before 229 * ->invalidate_page runs, kvm_unmap_hva will release it 230 * before returning. 231 * 232 * The sequence increase only need to be seen at spin_unlock 233 * time, and not at spin_lock time. 234 * 235 * Increasing the sequence after the spin_unlock would be 236 * unsafe because the kvm page fault could then establish the 237 * pte after kvm_unmap_hva returned, without noticing the page 238 * is going to be freed. 239 */ 240 idx = srcu_read_lock(&kvm->srcu); 241 spin_lock(&kvm->mmu_lock); 242 kvm->mmu_notifier_seq++; 243 need_tlb_flush = kvm_unmap_hva(kvm, address); 244 spin_unlock(&kvm->mmu_lock); 245 srcu_read_unlock(&kvm->srcu, idx); 246 247 /* we've to flush the tlb before the pages can be freed */ 248 if (need_tlb_flush) 249 kvm_flush_remote_tlbs(kvm); 250 251 } 252 253 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 254 struct mm_struct *mm, 255 unsigned long address, 256 pte_t pte) 257 { 258 struct kvm *kvm = mmu_notifier_to_kvm(mn); 259 int idx; 260 261 idx = srcu_read_lock(&kvm->srcu); 262 spin_lock(&kvm->mmu_lock); 263 kvm->mmu_notifier_seq++; 264 kvm_set_spte_hva(kvm, address, pte); 265 spin_unlock(&kvm->mmu_lock); 266 srcu_read_unlock(&kvm->srcu, idx); 267 } 268 269 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 270 struct mm_struct *mm, 271 unsigned long start, 272 unsigned long end) 273 { 274 struct kvm *kvm = mmu_notifier_to_kvm(mn); 275 int need_tlb_flush = 0, idx; 276 277 idx = srcu_read_lock(&kvm->srcu); 278 spin_lock(&kvm->mmu_lock); 279 /* 280 * The count increase must become visible at unlock time as no 281 * spte can be established without taking the mmu_lock and 282 * count is also read inside the mmu_lock critical section. 283 */ 284 kvm->mmu_notifier_count++; 285 for (; start < end; start += PAGE_SIZE) 286 need_tlb_flush |= kvm_unmap_hva(kvm, start); 287 spin_unlock(&kvm->mmu_lock); 288 srcu_read_unlock(&kvm->srcu, idx); 289 290 /* we've to flush the tlb before the pages can be freed */ 291 if (need_tlb_flush) 292 kvm_flush_remote_tlbs(kvm); 293 } 294 295 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 296 struct mm_struct *mm, 297 unsigned long start, 298 unsigned long end) 299 { 300 struct kvm *kvm = mmu_notifier_to_kvm(mn); 301 302 spin_lock(&kvm->mmu_lock); 303 /* 304 * This sequence increase will notify the kvm page fault that 305 * the page that is going to be mapped in the spte could have 306 * been freed. 307 */ 308 kvm->mmu_notifier_seq++; 309 /* 310 * The above sequence increase must be visible before the 311 * below count decrease but both values are read by the kvm 312 * page fault under mmu_lock spinlock so we don't need to add 313 * a smb_wmb() here in between the two. 314 */ 315 kvm->mmu_notifier_count--; 316 spin_unlock(&kvm->mmu_lock); 317 318 BUG_ON(kvm->mmu_notifier_count < 0); 319 } 320 321 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 322 struct mm_struct *mm, 323 unsigned long address) 324 { 325 struct kvm *kvm = mmu_notifier_to_kvm(mn); 326 int young, idx; 327 328 idx = srcu_read_lock(&kvm->srcu); 329 spin_lock(&kvm->mmu_lock); 330 young = kvm_age_hva(kvm, address); 331 spin_unlock(&kvm->mmu_lock); 332 srcu_read_unlock(&kvm->srcu, idx); 333 334 if (young) 335 kvm_flush_remote_tlbs(kvm); 336 337 return young; 338 } 339 340 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 341 struct mm_struct *mm) 342 { 343 struct kvm *kvm = mmu_notifier_to_kvm(mn); 344 int idx; 345 346 idx = srcu_read_lock(&kvm->srcu); 347 kvm_arch_flush_shadow(kvm); 348 srcu_read_unlock(&kvm->srcu, idx); 349 } 350 351 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 352 .invalidate_page = kvm_mmu_notifier_invalidate_page, 353 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 354 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 355 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 356 .change_pte = kvm_mmu_notifier_change_pte, 357 .release = kvm_mmu_notifier_release, 358 }; 359 360 static int kvm_init_mmu_notifier(struct kvm *kvm) 361 { 362 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 363 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 364 } 365 366 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 367 368 static int kvm_init_mmu_notifier(struct kvm *kvm) 369 { 370 return 0; 371 } 372 373 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 374 375 static struct kvm *kvm_create_vm(void) 376 { 377 int r = 0, i; 378 struct kvm *kvm = kvm_arch_create_vm(); 379 380 if (IS_ERR(kvm)) 381 goto out; 382 383 r = hardware_enable_all(); 384 if (r) 385 goto out_err_nodisable; 386 387 #ifdef CONFIG_HAVE_KVM_IRQCHIP 388 INIT_HLIST_HEAD(&kvm->mask_notifier_list); 389 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 390 #endif 391 392 r = -ENOMEM; 393 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 394 if (!kvm->memslots) 395 goto out_err; 396 if (init_srcu_struct(&kvm->srcu)) 397 goto out_err; 398 for (i = 0; i < KVM_NR_BUSES; i++) { 399 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), 400 GFP_KERNEL); 401 if (!kvm->buses[i]) { 402 cleanup_srcu_struct(&kvm->srcu); 403 goto out_err; 404 } 405 } 406 407 r = kvm_init_mmu_notifier(kvm); 408 if (r) { 409 cleanup_srcu_struct(&kvm->srcu); 410 goto out_err; 411 } 412 413 kvm->mm = current->mm; 414 atomic_inc(&kvm->mm->mm_count); 415 spin_lock_init(&kvm->mmu_lock); 416 raw_spin_lock_init(&kvm->requests_lock); 417 kvm_eventfd_init(kvm); 418 mutex_init(&kvm->lock); 419 mutex_init(&kvm->irq_lock); 420 mutex_init(&kvm->slots_lock); 421 atomic_set(&kvm->users_count, 1); 422 spin_lock(&kvm_lock); 423 list_add(&kvm->vm_list, &vm_list); 424 spin_unlock(&kvm_lock); 425 out: 426 return kvm; 427 428 out_err: 429 hardware_disable_all(); 430 out_err_nodisable: 431 for (i = 0; i < KVM_NR_BUSES; i++) 432 kfree(kvm->buses[i]); 433 kfree(kvm->memslots); 434 kfree(kvm); 435 return ERR_PTR(r); 436 } 437 438 /* 439 * Free any memory in @free but not in @dont. 440 */ 441 static void kvm_free_physmem_slot(struct kvm_memory_slot *free, 442 struct kvm_memory_slot *dont) 443 { 444 int i; 445 446 if (!dont || free->rmap != dont->rmap) 447 vfree(free->rmap); 448 449 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 450 vfree(free->dirty_bitmap); 451 452 453 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { 454 if (!dont || free->lpage_info[i] != dont->lpage_info[i]) { 455 vfree(free->lpage_info[i]); 456 free->lpage_info[i] = NULL; 457 } 458 } 459 460 free->npages = 0; 461 free->dirty_bitmap = NULL; 462 free->rmap = NULL; 463 } 464 465 void kvm_free_physmem(struct kvm *kvm) 466 { 467 int i; 468 struct kvm_memslots *slots = kvm->memslots; 469 470 for (i = 0; i < slots->nmemslots; ++i) 471 kvm_free_physmem_slot(&slots->memslots[i], NULL); 472 473 kfree(kvm->memslots); 474 } 475 476 static void kvm_destroy_vm(struct kvm *kvm) 477 { 478 int i; 479 struct mm_struct *mm = kvm->mm; 480 481 kvm_arch_sync_events(kvm); 482 spin_lock(&kvm_lock); 483 list_del(&kvm->vm_list); 484 spin_unlock(&kvm_lock); 485 kvm_free_irq_routing(kvm); 486 for (i = 0; i < KVM_NR_BUSES; i++) 487 kvm_io_bus_destroy(kvm->buses[i]); 488 kvm_coalesced_mmio_free(kvm); 489 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 490 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 491 #else 492 kvm_arch_flush_shadow(kvm); 493 #endif 494 kvm_arch_destroy_vm(kvm); 495 hardware_disable_all(); 496 mmdrop(mm); 497 } 498 499 void kvm_get_kvm(struct kvm *kvm) 500 { 501 atomic_inc(&kvm->users_count); 502 } 503 EXPORT_SYMBOL_GPL(kvm_get_kvm); 504 505 void kvm_put_kvm(struct kvm *kvm) 506 { 507 if (atomic_dec_and_test(&kvm->users_count)) 508 kvm_destroy_vm(kvm); 509 } 510 EXPORT_SYMBOL_GPL(kvm_put_kvm); 511 512 513 static int kvm_vm_release(struct inode *inode, struct file *filp) 514 { 515 struct kvm *kvm = filp->private_data; 516 517 kvm_irqfd_release(kvm); 518 519 kvm_put_kvm(kvm); 520 return 0; 521 } 522 523 /* 524 * Allocate some memory and give it an address in the guest physical address 525 * space. 526 * 527 * Discontiguous memory is allowed, mostly for framebuffers. 528 * 529 * Must be called holding mmap_sem for write. 530 */ 531 int __kvm_set_memory_region(struct kvm *kvm, 532 struct kvm_userspace_memory_region *mem, 533 int user_alloc) 534 { 535 int r, flush_shadow = 0; 536 gfn_t base_gfn; 537 unsigned long npages; 538 unsigned long i; 539 struct kvm_memory_slot *memslot; 540 struct kvm_memory_slot old, new; 541 struct kvm_memslots *slots, *old_memslots; 542 543 r = -EINVAL; 544 /* General sanity checks */ 545 if (mem->memory_size & (PAGE_SIZE - 1)) 546 goto out; 547 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 548 goto out; 549 if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1))) 550 goto out; 551 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) 552 goto out; 553 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 554 goto out; 555 556 memslot = &kvm->memslots->memslots[mem->slot]; 557 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 558 npages = mem->memory_size >> PAGE_SHIFT; 559 560 r = -EINVAL; 561 if (npages > KVM_MEM_MAX_NR_PAGES) 562 goto out; 563 564 if (!npages) 565 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; 566 567 new = old = *memslot; 568 569 new.base_gfn = base_gfn; 570 new.npages = npages; 571 new.flags = mem->flags; 572 573 /* Disallow changing a memory slot's size. */ 574 r = -EINVAL; 575 if (npages && old.npages && npages != old.npages) 576 goto out_free; 577 578 /* Check for overlaps */ 579 r = -EEXIST; 580 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { 581 struct kvm_memory_slot *s = &kvm->memslots->memslots[i]; 582 583 if (s == memslot || !s->npages) 584 continue; 585 if (!((base_gfn + npages <= s->base_gfn) || 586 (base_gfn >= s->base_gfn + s->npages))) 587 goto out_free; 588 } 589 590 /* Free page dirty bitmap if unneeded */ 591 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 592 new.dirty_bitmap = NULL; 593 594 r = -ENOMEM; 595 596 /* Allocate if a slot is being created */ 597 #ifndef CONFIG_S390 598 if (npages && !new.rmap) { 599 new.rmap = vmalloc(npages * sizeof(struct page *)); 600 601 if (!new.rmap) 602 goto out_free; 603 604 memset(new.rmap, 0, npages * sizeof(*new.rmap)); 605 606 new.user_alloc = user_alloc; 607 new.userspace_addr = mem->userspace_addr; 608 } 609 if (!npages) 610 goto skip_lpage; 611 612 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { 613 unsigned long ugfn; 614 unsigned long j; 615 int lpages; 616 int level = i + 2; 617 618 /* Avoid unused variable warning if no large pages */ 619 (void)level; 620 621 if (new.lpage_info[i]) 622 continue; 623 624 lpages = 1 + (base_gfn + npages - 1) / 625 KVM_PAGES_PER_HPAGE(level); 626 lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level); 627 628 new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i])); 629 630 if (!new.lpage_info[i]) 631 goto out_free; 632 633 memset(new.lpage_info[i], 0, 634 lpages * sizeof(*new.lpage_info[i])); 635 636 if (base_gfn % KVM_PAGES_PER_HPAGE(level)) 637 new.lpage_info[i][0].write_count = 1; 638 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level)) 639 new.lpage_info[i][lpages - 1].write_count = 1; 640 ugfn = new.userspace_addr >> PAGE_SHIFT; 641 /* 642 * If the gfn and userspace address are not aligned wrt each 643 * other, or if explicitly asked to, disable large page 644 * support for this slot 645 */ 646 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) || 647 !largepages_enabled) 648 for (j = 0; j < lpages; ++j) 649 new.lpage_info[i][j].write_count = 1; 650 } 651 652 skip_lpage: 653 654 /* Allocate page dirty bitmap if needed */ 655 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 656 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new); 657 658 new.dirty_bitmap = vmalloc(dirty_bytes); 659 if (!new.dirty_bitmap) 660 goto out_free; 661 memset(new.dirty_bitmap, 0, dirty_bytes); 662 /* destroy any largepage mappings for dirty tracking */ 663 if (old.npages) 664 flush_shadow = 1; 665 } 666 #else /* not defined CONFIG_S390 */ 667 new.user_alloc = user_alloc; 668 if (user_alloc) 669 new.userspace_addr = mem->userspace_addr; 670 #endif /* not defined CONFIG_S390 */ 671 672 if (!npages) { 673 r = -ENOMEM; 674 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 675 if (!slots) 676 goto out_free; 677 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); 678 if (mem->slot >= slots->nmemslots) 679 slots->nmemslots = mem->slot + 1; 680 slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID; 681 682 old_memslots = kvm->memslots; 683 rcu_assign_pointer(kvm->memslots, slots); 684 synchronize_srcu_expedited(&kvm->srcu); 685 /* From this point no new shadow pages pointing to a deleted 686 * memslot will be created. 687 * 688 * validation of sp->gfn happens in: 689 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 690 * - kvm_is_visible_gfn (mmu_check_roots) 691 */ 692 kvm_arch_flush_shadow(kvm); 693 kfree(old_memslots); 694 } 695 696 r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc); 697 if (r) 698 goto out_free; 699 700 #ifdef CONFIG_DMAR 701 /* map the pages in iommu page table */ 702 if (npages) { 703 r = kvm_iommu_map_pages(kvm, &new); 704 if (r) 705 goto out_free; 706 } 707 #endif 708 709 r = -ENOMEM; 710 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 711 if (!slots) 712 goto out_free; 713 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); 714 if (mem->slot >= slots->nmemslots) 715 slots->nmemslots = mem->slot + 1; 716 717 /* actual memory is freed via old in kvm_free_physmem_slot below */ 718 if (!npages) { 719 new.rmap = NULL; 720 new.dirty_bitmap = NULL; 721 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) 722 new.lpage_info[i] = NULL; 723 } 724 725 slots->memslots[mem->slot] = new; 726 old_memslots = kvm->memslots; 727 rcu_assign_pointer(kvm->memslots, slots); 728 synchronize_srcu_expedited(&kvm->srcu); 729 730 kvm_arch_commit_memory_region(kvm, mem, old, user_alloc); 731 732 kvm_free_physmem_slot(&old, &new); 733 kfree(old_memslots); 734 735 if (flush_shadow) 736 kvm_arch_flush_shadow(kvm); 737 738 return 0; 739 740 out_free: 741 kvm_free_physmem_slot(&new, &old); 742 out: 743 return r; 744 745 } 746 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 747 748 int kvm_set_memory_region(struct kvm *kvm, 749 struct kvm_userspace_memory_region *mem, 750 int user_alloc) 751 { 752 int r; 753 754 mutex_lock(&kvm->slots_lock); 755 r = __kvm_set_memory_region(kvm, mem, user_alloc); 756 mutex_unlock(&kvm->slots_lock); 757 return r; 758 } 759 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 760 761 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 762 struct 763 kvm_userspace_memory_region *mem, 764 int user_alloc) 765 { 766 if (mem->slot >= KVM_MEMORY_SLOTS) 767 return -EINVAL; 768 return kvm_set_memory_region(kvm, mem, user_alloc); 769 } 770 771 int kvm_get_dirty_log(struct kvm *kvm, 772 struct kvm_dirty_log *log, int *is_dirty) 773 { 774 struct kvm_memory_slot *memslot; 775 int r, i; 776 unsigned long n; 777 unsigned long any = 0; 778 779 r = -EINVAL; 780 if (log->slot >= KVM_MEMORY_SLOTS) 781 goto out; 782 783 memslot = &kvm->memslots->memslots[log->slot]; 784 r = -ENOENT; 785 if (!memslot->dirty_bitmap) 786 goto out; 787 788 n = kvm_dirty_bitmap_bytes(memslot); 789 790 for (i = 0; !any && i < n/sizeof(long); ++i) 791 any = memslot->dirty_bitmap[i]; 792 793 r = -EFAULT; 794 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) 795 goto out; 796 797 if (any) 798 *is_dirty = 1; 799 800 r = 0; 801 out: 802 return r; 803 } 804 805 void kvm_disable_largepages(void) 806 { 807 largepages_enabled = false; 808 } 809 EXPORT_SYMBOL_GPL(kvm_disable_largepages); 810 811 int is_error_page(struct page *page) 812 { 813 return page == bad_page; 814 } 815 EXPORT_SYMBOL_GPL(is_error_page); 816 817 int is_error_pfn(pfn_t pfn) 818 { 819 return pfn == bad_pfn; 820 } 821 EXPORT_SYMBOL_GPL(is_error_pfn); 822 823 static inline unsigned long bad_hva(void) 824 { 825 return PAGE_OFFSET; 826 } 827 828 int kvm_is_error_hva(unsigned long addr) 829 { 830 return addr == bad_hva(); 831 } 832 EXPORT_SYMBOL_GPL(kvm_is_error_hva); 833 834 struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) 835 { 836 int i; 837 struct kvm_memslots *slots = kvm_memslots(kvm); 838 839 for (i = 0; i < slots->nmemslots; ++i) { 840 struct kvm_memory_slot *memslot = &slots->memslots[i]; 841 842 if (gfn >= memslot->base_gfn 843 && gfn < memslot->base_gfn + memslot->npages) 844 return memslot; 845 } 846 return NULL; 847 } 848 EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased); 849 850 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 851 { 852 gfn = unalias_gfn(kvm, gfn); 853 return gfn_to_memslot_unaliased(kvm, gfn); 854 } 855 856 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 857 { 858 int i; 859 struct kvm_memslots *slots = kvm_memslots(kvm); 860 861 gfn = unalias_gfn_instantiation(kvm, gfn); 862 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { 863 struct kvm_memory_slot *memslot = &slots->memslots[i]; 864 865 if (memslot->flags & KVM_MEMSLOT_INVALID) 866 continue; 867 868 if (gfn >= memslot->base_gfn 869 && gfn < memslot->base_gfn + memslot->npages) 870 return 1; 871 } 872 return 0; 873 } 874 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 875 876 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) 877 { 878 struct vm_area_struct *vma; 879 unsigned long addr, size; 880 881 size = PAGE_SIZE; 882 883 addr = gfn_to_hva(kvm, gfn); 884 if (kvm_is_error_hva(addr)) 885 return PAGE_SIZE; 886 887 down_read(¤t->mm->mmap_sem); 888 vma = find_vma(current->mm, addr); 889 if (!vma) 890 goto out; 891 892 size = vma_kernel_pagesize(vma); 893 894 out: 895 up_read(¤t->mm->mmap_sem); 896 897 return size; 898 } 899 900 int memslot_id(struct kvm *kvm, gfn_t gfn) 901 { 902 int i; 903 struct kvm_memslots *slots = kvm_memslots(kvm); 904 struct kvm_memory_slot *memslot = NULL; 905 906 gfn = unalias_gfn(kvm, gfn); 907 for (i = 0; i < slots->nmemslots; ++i) { 908 memslot = &slots->memslots[i]; 909 910 if (gfn >= memslot->base_gfn 911 && gfn < memslot->base_gfn + memslot->npages) 912 break; 913 } 914 915 return memslot - slots->memslots; 916 } 917 918 static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 919 { 920 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; 921 } 922 923 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 924 { 925 struct kvm_memory_slot *slot; 926 927 gfn = unalias_gfn_instantiation(kvm, gfn); 928 slot = gfn_to_memslot_unaliased(kvm, gfn); 929 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 930 return bad_hva(); 931 return gfn_to_hva_memslot(slot, gfn); 932 } 933 EXPORT_SYMBOL_GPL(gfn_to_hva); 934 935 static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr) 936 { 937 struct page *page[1]; 938 int npages; 939 pfn_t pfn; 940 941 might_sleep(); 942 943 npages = get_user_pages_fast(addr, 1, 1, page); 944 945 if (unlikely(npages != 1)) { 946 struct vm_area_struct *vma; 947 948 down_read(¤t->mm->mmap_sem); 949 vma = find_vma(current->mm, addr); 950 951 if (vma == NULL || addr < vma->vm_start || 952 !(vma->vm_flags & VM_PFNMAP)) { 953 up_read(¤t->mm->mmap_sem); 954 get_page(bad_page); 955 return page_to_pfn(bad_page); 956 } 957 958 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 959 up_read(¤t->mm->mmap_sem); 960 BUG_ON(!kvm_is_mmio_pfn(pfn)); 961 } else 962 pfn = page_to_pfn(page[0]); 963 964 return pfn; 965 } 966 967 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 968 { 969 unsigned long addr; 970 971 addr = gfn_to_hva(kvm, gfn); 972 if (kvm_is_error_hva(addr)) { 973 get_page(bad_page); 974 return page_to_pfn(bad_page); 975 } 976 977 return hva_to_pfn(kvm, addr); 978 } 979 EXPORT_SYMBOL_GPL(gfn_to_pfn); 980 981 pfn_t gfn_to_pfn_memslot(struct kvm *kvm, 982 struct kvm_memory_slot *slot, gfn_t gfn) 983 { 984 unsigned long addr = gfn_to_hva_memslot(slot, gfn); 985 return hva_to_pfn(kvm, addr); 986 } 987 988 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 989 { 990 pfn_t pfn; 991 992 pfn = gfn_to_pfn(kvm, gfn); 993 if (!kvm_is_mmio_pfn(pfn)) 994 return pfn_to_page(pfn); 995 996 WARN_ON(kvm_is_mmio_pfn(pfn)); 997 998 get_page(bad_page); 999 return bad_page; 1000 } 1001 1002 EXPORT_SYMBOL_GPL(gfn_to_page); 1003 1004 void kvm_release_page_clean(struct page *page) 1005 { 1006 kvm_release_pfn_clean(page_to_pfn(page)); 1007 } 1008 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 1009 1010 void kvm_release_pfn_clean(pfn_t pfn) 1011 { 1012 if (!kvm_is_mmio_pfn(pfn)) 1013 put_page(pfn_to_page(pfn)); 1014 } 1015 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1016 1017 void kvm_release_page_dirty(struct page *page) 1018 { 1019 kvm_release_pfn_dirty(page_to_pfn(page)); 1020 } 1021 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 1022 1023 void kvm_release_pfn_dirty(pfn_t pfn) 1024 { 1025 kvm_set_pfn_dirty(pfn); 1026 kvm_release_pfn_clean(pfn); 1027 } 1028 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); 1029 1030 void kvm_set_page_dirty(struct page *page) 1031 { 1032 kvm_set_pfn_dirty(page_to_pfn(page)); 1033 } 1034 EXPORT_SYMBOL_GPL(kvm_set_page_dirty); 1035 1036 void kvm_set_pfn_dirty(pfn_t pfn) 1037 { 1038 if (!kvm_is_mmio_pfn(pfn)) { 1039 struct page *page = pfn_to_page(pfn); 1040 if (!PageReserved(page)) 1041 SetPageDirty(page); 1042 } 1043 } 1044 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 1045 1046 void kvm_set_pfn_accessed(pfn_t pfn) 1047 { 1048 if (!kvm_is_mmio_pfn(pfn)) 1049 mark_page_accessed(pfn_to_page(pfn)); 1050 } 1051 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1052 1053 void kvm_get_pfn(pfn_t pfn) 1054 { 1055 if (!kvm_is_mmio_pfn(pfn)) 1056 get_page(pfn_to_page(pfn)); 1057 } 1058 EXPORT_SYMBOL_GPL(kvm_get_pfn); 1059 1060 static int next_segment(unsigned long len, int offset) 1061 { 1062 if (len > PAGE_SIZE - offset) 1063 return PAGE_SIZE - offset; 1064 else 1065 return len; 1066 } 1067 1068 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 1069 int len) 1070 { 1071 int r; 1072 unsigned long addr; 1073 1074 addr = gfn_to_hva(kvm, gfn); 1075 if (kvm_is_error_hva(addr)) 1076 return -EFAULT; 1077 r = copy_from_user(data, (void __user *)addr + offset, len); 1078 if (r) 1079 return -EFAULT; 1080 return 0; 1081 } 1082 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 1083 1084 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 1085 { 1086 gfn_t gfn = gpa >> PAGE_SHIFT; 1087 int seg; 1088 int offset = offset_in_page(gpa); 1089 int ret; 1090 1091 while ((seg = next_segment(len, offset)) != 0) { 1092 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 1093 if (ret < 0) 1094 return ret; 1095 offset = 0; 1096 len -= seg; 1097 data += seg; 1098 ++gfn; 1099 } 1100 return 0; 1101 } 1102 EXPORT_SYMBOL_GPL(kvm_read_guest); 1103 1104 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 1105 unsigned long len) 1106 { 1107 int r; 1108 unsigned long addr; 1109 gfn_t gfn = gpa >> PAGE_SHIFT; 1110 int offset = offset_in_page(gpa); 1111 1112 addr = gfn_to_hva(kvm, gfn); 1113 if (kvm_is_error_hva(addr)) 1114 return -EFAULT; 1115 pagefault_disable(); 1116 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 1117 pagefault_enable(); 1118 if (r) 1119 return -EFAULT; 1120 return 0; 1121 } 1122 EXPORT_SYMBOL(kvm_read_guest_atomic); 1123 1124 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 1125 int offset, int len) 1126 { 1127 int r; 1128 unsigned long addr; 1129 1130 addr = gfn_to_hva(kvm, gfn); 1131 if (kvm_is_error_hva(addr)) 1132 return -EFAULT; 1133 r = copy_to_user((void __user *)addr + offset, data, len); 1134 if (r) 1135 return -EFAULT; 1136 mark_page_dirty(kvm, gfn); 1137 return 0; 1138 } 1139 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 1140 1141 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 1142 unsigned long len) 1143 { 1144 gfn_t gfn = gpa >> PAGE_SHIFT; 1145 int seg; 1146 int offset = offset_in_page(gpa); 1147 int ret; 1148 1149 while ((seg = next_segment(len, offset)) != 0) { 1150 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 1151 if (ret < 0) 1152 return ret; 1153 offset = 0; 1154 len -= seg; 1155 data += seg; 1156 ++gfn; 1157 } 1158 return 0; 1159 } 1160 1161 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) 1162 { 1163 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len); 1164 } 1165 EXPORT_SYMBOL_GPL(kvm_clear_guest_page); 1166 1167 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 1168 { 1169 gfn_t gfn = gpa >> PAGE_SHIFT; 1170 int seg; 1171 int offset = offset_in_page(gpa); 1172 int ret; 1173 1174 while ((seg = next_segment(len, offset)) != 0) { 1175 ret = kvm_clear_guest_page(kvm, gfn, offset, seg); 1176 if (ret < 0) 1177 return ret; 1178 offset = 0; 1179 len -= seg; 1180 ++gfn; 1181 } 1182 return 0; 1183 } 1184 EXPORT_SYMBOL_GPL(kvm_clear_guest); 1185 1186 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 1187 { 1188 struct kvm_memory_slot *memslot; 1189 1190 gfn = unalias_gfn(kvm, gfn); 1191 memslot = gfn_to_memslot_unaliased(kvm, gfn); 1192 if (memslot && memslot->dirty_bitmap) { 1193 unsigned long rel_gfn = gfn - memslot->base_gfn; 1194 1195 generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); 1196 } 1197 } 1198 1199 /* 1200 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 1201 */ 1202 void kvm_vcpu_block(struct kvm_vcpu *vcpu) 1203 { 1204 DEFINE_WAIT(wait); 1205 1206 for (;;) { 1207 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1208 1209 if (kvm_arch_vcpu_runnable(vcpu)) { 1210 set_bit(KVM_REQ_UNHALT, &vcpu->requests); 1211 break; 1212 } 1213 if (kvm_cpu_has_pending_timer(vcpu)) 1214 break; 1215 if (signal_pending(current)) 1216 break; 1217 1218 schedule(); 1219 } 1220 1221 finish_wait(&vcpu->wq, &wait); 1222 } 1223 1224 void kvm_resched(struct kvm_vcpu *vcpu) 1225 { 1226 if (!need_resched()) 1227 return; 1228 cond_resched(); 1229 } 1230 EXPORT_SYMBOL_GPL(kvm_resched); 1231 1232 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu) 1233 { 1234 ktime_t expires; 1235 DEFINE_WAIT(wait); 1236 1237 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1238 1239 /* Sleep for 100 us, and hope lock-holder got scheduled */ 1240 expires = ktime_add_ns(ktime_get(), 100000UL); 1241 schedule_hrtimeout(&expires, HRTIMER_MODE_ABS); 1242 1243 finish_wait(&vcpu->wq, &wait); 1244 } 1245 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 1246 1247 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1248 { 1249 struct kvm_vcpu *vcpu = vma->vm_file->private_data; 1250 struct page *page; 1251 1252 if (vmf->pgoff == 0) 1253 page = virt_to_page(vcpu->run); 1254 #ifdef CONFIG_X86 1255 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 1256 page = virt_to_page(vcpu->arch.pio_data); 1257 #endif 1258 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1259 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 1260 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 1261 #endif 1262 else 1263 return VM_FAULT_SIGBUS; 1264 get_page(page); 1265 vmf->page = page; 1266 return 0; 1267 } 1268 1269 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 1270 .fault = kvm_vcpu_fault, 1271 }; 1272 1273 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 1274 { 1275 vma->vm_ops = &kvm_vcpu_vm_ops; 1276 return 0; 1277 } 1278 1279 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 1280 { 1281 struct kvm_vcpu *vcpu = filp->private_data; 1282 1283 kvm_put_kvm(vcpu->kvm); 1284 return 0; 1285 } 1286 1287 static struct file_operations kvm_vcpu_fops = { 1288 .release = kvm_vcpu_release, 1289 .unlocked_ioctl = kvm_vcpu_ioctl, 1290 .compat_ioctl = kvm_vcpu_ioctl, 1291 .mmap = kvm_vcpu_mmap, 1292 }; 1293 1294 /* 1295 * Allocates an inode for the vcpu. 1296 */ 1297 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 1298 { 1299 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR); 1300 } 1301 1302 /* 1303 * Creates some virtual cpus. Good luck creating more than one. 1304 */ 1305 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 1306 { 1307 int r; 1308 struct kvm_vcpu *vcpu, *v; 1309 1310 vcpu = kvm_arch_vcpu_create(kvm, id); 1311 if (IS_ERR(vcpu)) 1312 return PTR_ERR(vcpu); 1313 1314 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 1315 1316 r = kvm_arch_vcpu_setup(vcpu); 1317 if (r) 1318 return r; 1319 1320 mutex_lock(&kvm->lock); 1321 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { 1322 r = -EINVAL; 1323 goto vcpu_destroy; 1324 } 1325 1326 kvm_for_each_vcpu(r, v, kvm) 1327 if (v->vcpu_id == id) { 1328 r = -EEXIST; 1329 goto vcpu_destroy; 1330 } 1331 1332 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); 1333 1334 /* Now it's all set up, let userspace reach it */ 1335 kvm_get_kvm(kvm); 1336 r = create_vcpu_fd(vcpu); 1337 if (r < 0) { 1338 kvm_put_kvm(kvm); 1339 goto vcpu_destroy; 1340 } 1341 1342 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; 1343 smp_wmb(); 1344 atomic_inc(&kvm->online_vcpus); 1345 1346 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 1347 if (kvm->bsp_vcpu_id == id) 1348 kvm->bsp_vcpu = vcpu; 1349 #endif 1350 mutex_unlock(&kvm->lock); 1351 return r; 1352 1353 vcpu_destroy: 1354 mutex_unlock(&kvm->lock); 1355 kvm_arch_vcpu_destroy(vcpu); 1356 return r; 1357 } 1358 1359 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 1360 { 1361 if (sigset) { 1362 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1363 vcpu->sigset_active = 1; 1364 vcpu->sigset = *sigset; 1365 } else 1366 vcpu->sigset_active = 0; 1367 return 0; 1368 } 1369 1370 static long kvm_vcpu_ioctl(struct file *filp, 1371 unsigned int ioctl, unsigned long arg) 1372 { 1373 struct kvm_vcpu *vcpu = filp->private_data; 1374 void __user *argp = (void __user *)arg; 1375 int r; 1376 struct kvm_fpu *fpu = NULL; 1377 struct kvm_sregs *kvm_sregs = NULL; 1378 1379 if (vcpu->kvm->mm != current->mm) 1380 return -EIO; 1381 switch (ioctl) { 1382 case KVM_RUN: 1383 r = -EINVAL; 1384 if (arg) 1385 goto out; 1386 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); 1387 break; 1388 case KVM_GET_REGS: { 1389 struct kvm_regs *kvm_regs; 1390 1391 r = -ENOMEM; 1392 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 1393 if (!kvm_regs) 1394 goto out; 1395 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 1396 if (r) 1397 goto out_free1; 1398 r = -EFAULT; 1399 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 1400 goto out_free1; 1401 r = 0; 1402 out_free1: 1403 kfree(kvm_regs); 1404 break; 1405 } 1406 case KVM_SET_REGS: { 1407 struct kvm_regs *kvm_regs; 1408 1409 r = -ENOMEM; 1410 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 1411 if (!kvm_regs) 1412 goto out; 1413 r = -EFAULT; 1414 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs))) 1415 goto out_free2; 1416 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 1417 if (r) 1418 goto out_free2; 1419 r = 0; 1420 out_free2: 1421 kfree(kvm_regs); 1422 break; 1423 } 1424 case KVM_GET_SREGS: { 1425 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 1426 r = -ENOMEM; 1427 if (!kvm_sregs) 1428 goto out; 1429 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 1430 if (r) 1431 goto out; 1432 r = -EFAULT; 1433 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 1434 goto out; 1435 r = 0; 1436 break; 1437 } 1438 case KVM_SET_SREGS: { 1439 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 1440 r = -ENOMEM; 1441 if (!kvm_sregs) 1442 goto out; 1443 r = -EFAULT; 1444 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs))) 1445 goto out; 1446 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 1447 if (r) 1448 goto out; 1449 r = 0; 1450 break; 1451 } 1452 case KVM_GET_MP_STATE: { 1453 struct kvm_mp_state mp_state; 1454 1455 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 1456 if (r) 1457 goto out; 1458 r = -EFAULT; 1459 if (copy_to_user(argp, &mp_state, sizeof mp_state)) 1460 goto out; 1461 r = 0; 1462 break; 1463 } 1464 case KVM_SET_MP_STATE: { 1465 struct kvm_mp_state mp_state; 1466 1467 r = -EFAULT; 1468 if (copy_from_user(&mp_state, argp, sizeof mp_state)) 1469 goto out; 1470 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 1471 if (r) 1472 goto out; 1473 r = 0; 1474 break; 1475 } 1476 case KVM_TRANSLATE: { 1477 struct kvm_translation tr; 1478 1479 r = -EFAULT; 1480 if (copy_from_user(&tr, argp, sizeof tr)) 1481 goto out; 1482 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 1483 if (r) 1484 goto out; 1485 r = -EFAULT; 1486 if (copy_to_user(argp, &tr, sizeof tr)) 1487 goto out; 1488 r = 0; 1489 break; 1490 } 1491 case KVM_SET_GUEST_DEBUG: { 1492 struct kvm_guest_debug dbg; 1493 1494 r = -EFAULT; 1495 if (copy_from_user(&dbg, argp, sizeof dbg)) 1496 goto out; 1497 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 1498 if (r) 1499 goto out; 1500 r = 0; 1501 break; 1502 } 1503 case KVM_SET_SIGNAL_MASK: { 1504 struct kvm_signal_mask __user *sigmask_arg = argp; 1505 struct kvm_signal_mask kvm_sigmask; 1506 sigset_t sigset, *p; 1507 1508 p = NULL; 1509 if (argp) { 1510 r = -EFAULT; 1511 if (copy_from_user(&kvm_sigmask, argp, 1512 sizeof kvm_sigmask)) 1513 goto out; 1514 r = -EINVAL; 1515 if (kvm_sigmask.len != sizeof sigset) 1516 goto out; 1517 r = -EFAULT; 1518 if (copy_from_user(&sigset, sigmask_arg->sigset, 1519 sizeof sigset)) 1520 goto out; 1521 p = &sigset; 1522 } 1523 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 1524 break; 1525 } 1526 case KVM_GET_FPU: { 1527 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 1528 r = -ENOMEM; 1529 if (!fpu) 1530 goto out; 1531 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 1532 if (r) 1533 goto out; 1534 r = -EFAULT; 1535 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 1536 goto out; 1537 r = 0; 1538 break; 1539 } 1540 case KVM_SET_FPU: { 1541 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 1542 r = -ENOMEM; 1543 if (!fpu) 1544 goto out; 1545 r = -EFAULT; 1546 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu))) 1547 goto out; 1548 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 1549 if (r) 1550 goto out; 1551 r = 0; 1552 break; 1553 } 1554 default: 1555 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 1556 } 1557 out: 1558 kfree(fpu); 1559 kfree(kvm_sregs); 1560 return r; 1561 } 1562 1563 static long kvm_vm_ioctl(struct file *filp, 1564 unsigned int ioctl, unsigned long arg) 1565 { 1566 struct kvm *kvm = filp->private_data; 1567 void __user *argp = (void __user *)arg; 1568 int r; 1569 1570 if (kvm->mm != current->mm) 1571 return -EIO; 1572 switch (ioctl) { 1573 case KVM_CREATE_VCPU: 1574 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 1575 if (r < 0) 1576 goto out; 1577 break; 1578 case KVM_SET_USER_MEMORY_REGION: { 1579 struct kvm_userspace_memory_region kvm_userspace_mem; 1580 1581 r = -EFAULT; 1582 if (copy_from_user(&kvm_userspace_mem, argp, 1583 sizeof kvm_userspace_mem)) 1584 goto out; 1585 1586 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1); 1587 if (r) 1588 goto out; 1589 break; 1590 } 1591 case KVM_GET_DIRTY_LOG: { 1592 struct kvm_dirty_log log; 1593 1594 r = -EFAULT; 1595 if (copy_from_user(&log, argp, sizeof log)) 1596 goto out; 1597 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 1598 if (r) 1599 goto out; 1600 break; 1601 } 1602 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1603 case KVM_REGISTER_COALESCED_MMIO: { 1604 struct kvm_coalesced_mmio_zone zone; 1605 r = -EFAULT; 1606 if (copy_from_user(&zone, argp, sizeof zone)) 1607 goto out; 1608 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 1609 if (r) 1610 goto out; 1611 r = 0; 1612 break; 1613 } 1614 case KVM_UNREGISTER_COALESCED_MMIO: { 1615 struct kvm_coalesced_mmio_zone zone; 1616 r = -EFAULT; 1617 if (copy_from_user(&zone, argp, sizeof zone)) 1618 goto out; 1619 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 1620 if (r) 1621 goto out; 1622 r = 0; 1623 break; 1624 } 1625 #endif 1626 case KVM_IRQFD: { 1627 struct kvm_irqfd data; 1628 1629 r = -EFAULT; 1630 if (copy_from_user(&data, argp, sizeof data)) 1631 goto out; 1632 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags); 1633 break; 1634 } 1635 case KVM_IOEVENTFD: { 1636 struct kvm_ioeventfd data; 1637 1638 r = -EFAULT; 1639 if (copy_from_user(&data, argp, sizeof data)) 1640 goto out; 1641 r = kvm_ioeventfd(kvm, &data); 1642 break; 1643 } 1644 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 1645 case KVM_SET_BOOT_CPU_ID: 1646 r = 0; 1647 mutex_lock(&kvm->lock); 1648 if (atomic_read(&kvm->online_vcpus) != 0) 1649 r = -EBUSY; 1650 else 1651 kvm->bsp_vcpu_id = arg; 1652 mutex_unlock(&kvm->lock); 1653 break; 1654 #endif 1655 default: 1656 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 1657 if (r == -ENOTTY) 1658 r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg); 1659 } 1660 out: 1661 return r; 1662 } 1663 1664 #ifdef CONFIG_COMPAT 1665 struct compat_kvm_dirty_log { 1666 __u32 slot; 1667 __u32 padding1; 1668 union { 1669 compat_uptr_t dirty_bitmap; /* one bit per page */ 1670 __u64 padding2; 1671 }; 1672 }; 1673 1674 static long kvm_vm_compat_ioctl(struct file *filp, 1675 unsigned int ioctl, unsigned long arg) 1676 { 1677 struct kvm *kvm = filp->private_data; 1678 int r; 1679 1680 if (kvm->mm != current->mm) 1681 return -EIO; 1682 switch (ioctl) { 1683 case KVM_GET_DIRTY_LOG: { 1684 struct compat_kvm_dirty_log compat_log; 1685 struct kvm_dirty_log log; 1686 1687 r = -EFAULT; 1688 if (copy_from_user(&compat_log, (void __user *)arg, 1689 sizeof(compat_log))) 1690 goto out; 1691 log.slot = compat_log.slot; 1692 log.padding1 = compat_log.padding1; 1693 log.padding2 = compat_log.padding2; 1694 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 1695 1696 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 1697 if (r) 1698 goto out; 1699 break; 1700 } 1701 default: 1702 r = kvm_vm_ioctl(filp, ioctl, arg); 1703 } 1704 1705 out: 1706 return r; 1707 } 1708 #endif 1709 1710 static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1711 { 1712 struct page *page[1]; 1713 unsigned long addr; 1714 int npages; 1715 gfn_t gfn = vmf->pgoff; 1716 struct kvm *kvm = vma->vm_file->private_data; 1717 1718 addr = gfn_to_hva(kvm, gfn); 1719 if (kvm_is_error_hva(addr)) 1720 return VM_FAULT_SIGBUS; 1721 1722 npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page, 1723 NULL); 1724 if (unlikely(npages != 1)) 1725 return VM_FAULT_SIGBUS; 1726 1727 vmf->page = page[0]; 1728 return 0; 1729 } 1730 1731 static const struct vm_operations_struct kvm_vm_vm_ops = { 1732 .fault = kvm_vm_fault, 1733 }; 1734 1735 static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma) 1736 { 1737 vma->vm_ops = &kvm_vm_vm_ops; 1738 return 0; 1739 } 1740 1741 static struct file_operations kvm_vm_fops = { 1742 .release = kvm_vm_release, 1743 .unlocked_ioctl = kvm_vm_ioctl, 1744 #ifdef CONFIG_COMPAT 1745 .compat_ioctl = kvm_vm_compat_ioctl, 1746 #endif 1747 .mmap = kvm_vm_mmap, 1748 }; 1749 1750 static int kvm_dev_ioctl_create_vm(void) 1751 { 1752 int fd, r; 1753 struct kvm *kvm; 1754 1755 kvm = kvm_create_vm(); 1756 if (IS_ERR(kvm)) 1757 return PTR_ERR(kvm); 1758 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1759 r = kvm_coalesced_mmio_init(kvm); 1760 if (r < 0) { 1761 kvm_put_kvm(kvm); 1762 return r; 1763 } 1764 #endif 1765 fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 1766 if (fd < 0) 1767 kvm_put_kvm(kvm); 1768 1769 return fd; 1770 } 1771 1772 static long kvm_dev_ioctl_check_extension_generic(long arg) 1773 { 1774 switch (arg) { 1775 case KVM_CAP_USER_MEMORY: 1776 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 1777 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 1778 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 1779 case KVM_CAP_SET_BOOT_CPU_ID: 1780 #endif 1781 case KVM_CAP_INTERNAL_ERROR_DATA: 1782 return 1; 1783 #ifdef CONFIG_HAVE_KVM_IRQCHIP 1784 case KVM_CAP_IRQ_ROUTING: 1785 return KVM_MAX_IRQ_ROUTES; 1786 #endif 1787 default: 1788 break; 1789 } 1790 return kvm_dev_ioctl_check_extension(arg); 1791 } 1792 1793 static long kvm_dev_ioctl(struct file *filp, 1794 unsigned int ioctl, unsigned long arg) 1795 { 1796 long r = -EINVAL; 1797 1798 switch (ioctl) { 1799 case KVM_GET_API_VERSION: 1800 r = -EINVAL; 1801 if (arg) 1802 goto out; 1803 r = KVM_API_VERSION; 1804 break; 1805 case KVM_CREATE_VM: 1806 r = -EINVAL; 1807 if (arg) 1808 goto out; 1809 r = kvm_dev_ioctl_create_vm(); 1810 break; 1811 case KVM_CHECK_EXTENSION: 1812 r = kvm_dev_ioctl_check_extension_generic(arg); 1813 break; 1814 case KVM_GET_VCPU_MMAP_SIZE: 1815 r = -EINVAL; 1816 if (arg) 1817 goto out; 1818 r = PAGE_SIZE; /* struct kvm_run */ 1819 #ifdef CONFIG_X86 1820 r += PAGE_SIZE; /* pio data page */ 1821 #endif 1822 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1823 r += PAGE_SIZE; /* coalesced mmio ring page */ 1824 #endif 1825 break; 1826 case KVM_TRACE_ENABLE: 1827 case KVM_TRACE_PAUSE: 1828 case KVM_TRACE_DISABLE: 1829 r = -EOPNOTSUPP; 1830 break; 1831 default: 1832 return kvm_arch_dev_ioctl(filp, ioctl, arg); 1833 } 1834 out: 1835 return r; 1836 } 1837 1838 static struct file_operations kvm_chardev_ops = { 1839 .unlocked_ioctl = kvm_dev_ioctl, 1840 .compat_ioctl = kvm_dev_ioctl, 1841 }; 1842 1843 static struct miscdevice kvm_dev = { 1844 KVM_MINOR, 1845 "kvm", 1846 &kvm_chardev_ops, 1847 }; 1848 1849 static void hardware_enable(void *junk) 1850 { 1851 int cpu = raw_smp_processor_id(); 1852 int r; 1853 1854 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 1855 return; 1856 1857 cpumask_set_cpu(cpu, cpus_hardware_enabled); 1858 1859 r = kvm_arch_hardware_enable(NULL); 1860 1861 if (r) { 1862 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 1863 atomic_inc(&hardware_enable_failed); 1864 printk(KERN_INFO "kvm: enabling virtualization on " 1865 "CPU%d failed\n", cpu); 1866 } 1867 } 1868 1869 static void hardware_disable(void *junk) 1870 { 1871 int cpu = raw_smp_processor_id(); 1872 1873 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 1874 return; 1875 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 1876 kvm_arch_hardware_disable(NULL); 1877 } 1878 1879 static void hardware_disable_all_nolock(void) 1880 { 1881 BUG_ON(!kvm_usage_count); 1882 1883 kvm_usage_count--; 1884 if (!kvm_usage_count) 1885 on_each_cpu(hardware_disable, NULL, 1); 1886 } 1887 1888 static void hardware_disable_all(void) 1889 { 1890 spin_lock(&kvm_lock); 1891 hardware_disable_all_nolock(); 1892 spin_unlock(&kvm_lock); 1893 } 1894 1895 static int hardware_enable_all(void) 1896 { 1897 int r = 0; 1898 1899 spin_lock(&kvm_lock); 1900 1901 kvm_usage_count++; 1902 if (kvm_usage_count == 1) { 1903 atomic_set(&hardware_enable_failed, 0); 1904 on_each_cpu(hardware_enable, NULL, 1); 1905 1906 if (atomic_read(&hardware_enable_failed)) { 1907 hardware_disable_all_nolock(); 1908 r = -EBUSY; 1909 } 1910 } 1911 1912 spin_unlock(&kvm_lock); 1913 1914 return r; 1915 } 1916 1917 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, 1918 void *v) 1919 { 1920 int cpu = (long)v; 1921 1922 if (!kvm_usage_count) 1923 return NOTIFY_OK; 1924 1925 val &= ~CPU_TASKS_FROZEN; 1926 switch (val) { 1927 case CPU_DYING: 1928 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", 1929 cpu); 1930 hardware_disable(NULL); 1931 break; 1932 case CPU_ONLINE: 1933 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", 1934 cpu); 1935 smp_call_function_single(cpu, hardware_enable, NULL, 1); 1936 break; 1937 } 1938 return NOTIFY_OK; 1939 } 1940 1941 1942 asmlinkage void kvm_handle_fault_on_reboot(void) 1943 { 1944 if (kvm_rebooting) 1945 /* spin while reset goes on */ 1946 while (true) 1947 ; 1948 /* Fault while not rebooting. We want the trace. */ 1949 BUG(); 1950 } 1951 EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot); 1952 1953 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 1954 void *v) 1955 { 1956 /* 1957 * Some (well, at least mine) BIOSes hang on reboot if 1958 * in vmx root mode. 1959 * 1960 * And Intel TXT required VMX off for all cpu when system shutdown. 1961 */ 1962 printk(KERN_INFO "kvm: exiting hardware virtualization\n"); 1963 kvm_rebooting = true; 1964 on_each_cpu(hardware_disable, NULL, 1); 1965 return NOTIFY_OK; 1966 } 1967 1968 static struct notifier_block kvm_reboot_notifier = { 1969 .notifier_call = kvm_reboot, 1970 .priority = 0, 1971 }; 1972 1973 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 1974 { 1975 int i; 1976 1977 for (i = 0; i < bus->dev_count; i++) { 1978 struct kvm_io_device *pos = bus->devs[i]; 1979 1980 kvm_iodevice_destructor(pos); 1981 } 1982 kfree(bus); 1983 } 1984 1985 /* kvm_io_bus_write - called under kvm->slots_lock */ 1986 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 1987 int len, const void *val) 1988 { 1989 int i; 1990 struct kvm_io_bus *bus; 1991 1992 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 1993 for (i = 0; i < bus->dev_count; i++) 1994 if (!kvm_iodevice_write(bus->devs[i], addr, len, val)) 1995 return 0; 1996 return -EOPNOTSUPP; 1997 } 1998 1999 /* kvm_io_bus_read - called under kvm->slots_lock */ 2000 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 2001 int len, void *val) 2002 { 2003 int i; 2004 struct kvm_io_bus *bus; 2005 2006 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 2007 for (i = 0; i < bus->dev_count; i++) 2008 if (!kvm_iodevice_read(bus->devs[i], addr, len, val)) 2009 return 0; 2010 return -EOPNOTSUPP; 2011 } 2012 2013 /* Caller must hold slots_lock. */ 2014 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, 2015 struct kvm_io_device *dev) 2016 { 2017 struct kvm_io_bus *new_bus, *bus; 2018 2019 bus = kvm->buses[bus_idx]; 2020 if (bus->dev_count > NR_IOBUS_DEVS-1) 2021 return -ENOSPC; 2022 2023 new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); 2024 if (!new_bus) 2025 return -ENOMEM; 2026 memcpy(new_bus, bus, sizeof(struct kvm_io_bus)); 2027 new_bus->devs[new_bus->dev_count++] = dev; 2028 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 2029 synchronize_srcu_expedited(&kvm->srcu); 2030 kfree(bus); 2031 2032 return 0; 2033 } 2034 2035 /* Caller must hold slots_lock. */ 2036 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 2037 struct kvm_io_device *dev) 2038 { 2039 int i, r; 2040 struct kvm_io_bus *new_bus, *bus; 2041 2042 new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); 2043 if (!new_bus) 2044 return -ENOMEM; 2045 2046 bus = kvm->buses[bus_idx]; 2047 memcpy(new_bus, bus, sizeof(struct kvm_io_bus)); 2048 2049 r = -ENOENT; 2050 for (i = 0; i < new_bus->dev_count; i++) 2051 if (new_bus->devs[i] == dev) { 2052 r = 0; 2053 new_bus->devs[i] = new_bus->devs[--new_bus->dev_count]; 2054 break; 2055 } 2056 2057 if (r) { 2058 kfree(new_bus); 2059 return r; 2060 } 2061 2062 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 2063 synchronize_srcu_expedited(&kvm->srcu); 2064 kfree(bus); 2065 return r; 2066 } 2067 2068 static struct notifier_block kvm_cpu_notifier = { 2069 .notifier_call = kvm_cpu_hotplug, 2070 .priority = 20, /* must be > scheduler priority */ 2071 }; 2072 2073 static int vm_stat_get(void *_offset, u64 *val) 2074 { 2075 unsigned offset = (long)_offset; 2076 struct kvm *kvm; 2077 2078 *val = 0; 2079 spin_lock(&kvm_lock); 2080 list_for_each_entry(kvm, &vm_list, vm_list) 2081 *val += *(u32 *)((void *)kvm + offset); 2082 spin_unlock(&kvm_lock); 2083 return 0; 2084 } 2085 2086 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n"); 2087 2088 static int vcpu_stat_get(void *_offset, u64 *val) 2089 { 2090 unsigned offset = (long)_offset; 2091 struct kvm *kvm; 2092 struct kvm_vcpu *vcpu; 2093 int i; 2094 2095 *val = 0; 2096 spin_lock(&kvm_lock); 2097 list_for_each_entry(kvm, &vm_list, vm_list) 2098 kvm_for_each_vcpu(i, vcpu, kvm) 2099 *val += *(u32 *)((void *)vcpu + offset); 2100 2101 spin_unlock(&kvm_lock); 2102 return 0; 2103 } 2104 2105 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); 2106 2107 static const struct file_operations *stat_fops[] = { 2108 [KVM_STAT_VCPU] = &vcpu_stat_fops, 2109 [KVM_STAT_VM] = &vm_stat_fops, 2110 }; 2111 2112 static void kvm_init_debug(void) 2113 { 2114 struct kvm_stats_debugfs_item *p; 2115 2116 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 2117 for (p = debugfs_entries; p->name; ++p) 2118 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir, 2119 (void *)(long)p->offset, 2120 stat_fops[p->kind]); 2121 } 2122 2123 static void kvm_exit_debug(void) 2124 { 2125 struct kvm_stats_debugfs_item *p; 2126 2127 for (p = debugfs_entries; p->name; ++p) 2128 debugfs_remove(p->dentry); 2129 debugfs_remove(kvm_debugfs_dir); 2130 } 2131 2132 static int kvm_suspend(struct sys_device *dev, pm_message_t state) 2133 { 2134 if (kvm_usage_count) 2135 hardware_disable(NULL); 2136 return 0; 2137 } 2138 2139 static int kvm_resume(struct sys_device *dev) 2140 { 2141 if (kvm_usage_count) 2142 hardware_enable(NULL); 2143 return 0; 2144 } 2145 2146 static struct sysdev_class kvm_sysdev_class = { 2147 .name = "kvm", 2148 .suspend = kvm_suspend, 2149 .resume = kvm_resume, 2150 }; 2151 2152 static struct sys_device kvm_sysdev = { 2153 .id = 0, 2154 .cls = &kvm_sysdev_class, 2155 }; 2156 2157 struct page *bad_page; 2158 pfn_t bad_pfn; 2159 2160 static inline 2161 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 2162 { 2163 return container_of(pn, struct kvm_vcpu, preempt_notifier); 2164 } 2165 2166 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 2167 { 2168 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 2169 2170 kvm_arch_vcpu_load(vcpu, cpu); 2171 } 2172 2173 static void kvm_sched_out(struct preempt_notifier *pn, 2174 struct task_struct *next) 2175 { 2176 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 2177 2178 kvm_arch_vcpu_put(vcpu); 2179 } 2180 2181 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 2182 struct module *module) 2183 { 2184 int r; 2185 int cpu; 2186 2187 r = kvm_arch_init(opaque); 2188 if (r) 2189 goto out_fail; 2190 2191 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 2192 2193 if (bad_page == NULL) { 2194 r = -ENOMEM; 2195 goto out; 2196 } 2197 2198 bad_pfn = page_to_pfn(bad_page); 2199 2200 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 2201 r = -ENOMEM; 2202 goto out_free_0; 2203 } 2204 2205 r = kvm_arch_hardware_setup(); 2206 if (r < 0) 2207 goto out_free_0a; 2208 2209 for_each_online_cpu(cpu) { 2210 smp_call_function_single(cpu, 2211 kvm_arch_check_processor_compat, 2212 &r, 1); 2213 if (r < 0) 2214 goto out_free_1; 2215 } 2216 2217 r = register_cpu_notifier(&kvm_cpu_notifier); 2218 if (r) 2219 goto out_free_2; 2220 register_reboot_notifier(&kvm_reboot_notifier); 2221 2222 r = sysdev_class_register(&kvm_sysdev_class); 2223 if (r) 2224 goto out_free_3; 2225 2226 r = sysdev_register(&kvm_sysdev); 2227 if (r) 2228 goto out_free_4; 2229 2230 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 2231 if (!vcpu_align) 2232 vcpu_align = __alignof__(struct kvm_vcpu); 2233 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, 2234 0, NULL); 2235 if (!kvm_vcpu_cache) { 2236 r = -ENOMEM; 2237 goto out_free_5; 2238 } 2239 2240 kvm_chardev_ops.owner = module; 2241 kvm_vm_fops.owner = module; 2242 kvm_vcpu_fops.owner = module; 2243 2244 r = misc_register(&kvm_dev); 2245 if (r) { 2246 printk(KERN_ERR "kvm: misc device register failed\n"); 2247 goto out_free; 2248 } 2249 2250 kvm_preempt_ops.sched_in = kvm_sched_in; 2251 kvm_preempt_ops.sched_out = kvm_sched_out; 2252 2253 kvm_init_debug(); 2254 2255 return 0; 2256 2257 out_free: 2258 kmem_cache_destroy(kvm_vcpu_cache); 2259 out_free_5: 2260 sysdev_unregister(&kvm_sysdev); 2261 out_free_4: 2262 sysdev_class_unregister(&kvm_sysdev_class); 2263 out_free_3: 2264 unregister_reboot_notifier(&kvm_reboot_notifier); 2265 unregister_cpu_notifier(&kvm_cpu_notifier); 2266 out_free_2: 2267 out_free_1: 2268 kvm_arch_hardware_unsetup(); 2269 out_free_0a: 2270 free_cpumask_var(cpus_hardware_enabled); 2271 out_free_0: 2272 __free_page(bad_page); 2273 out: 2274 kvm_arch_exit(); 2275 out_fail: 2276 return r; 2277 } 2278 EXPORT_SYMBOL_GPL(kvm_init); 2279 2280 void kvm_exit(void) 2281 { 2282 kvm_exit_debug(); 2283 misc_deregister(&kvm_dev); 2284 kmem_cache_destroy(kvm_vcpu_cache); 2285 sysdev_unregister(&kvm_sysdev); 2286 sysdev_class_unregister(&kvm_sysdev_class); 2287 unregister_reboot_notifier(&kvm_reboot_notifier); 2288 unregister_cpu_notifier(&kvm_cpu_notifier); 2289 on_each_cpu(hardware_disable, NULL, 1); 2290 kvm_arch_hardware_unsetup(); 2291 kvm_arch_exit(); 2292 free_cpumask_var(cpus_hardware_enabled); 2293 __free_page(bad_page); 2294 } 2295 EXPORT_SYMBOL_GPL(kvm_exit); 2296