1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This module enables machines with Intel VT-x extensions to run virtual 5 * machines without emulation or binary translation. 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * 9 * Authors: 10 * Avi Kivity <avi@qumranet.com> 11 * Yaniv Kamay <yaniv@qumranet.com> 12 * 13 * This work is licensed under the terms of the GNU GPL, version 2. See 14 * the COPYING file in the top-level directory. 15 * 16 */ 17 18 #include "iodev.h" 19 20 #include <linux/kvm_host.h> 21 #include <linux/kvm.h> 22 #include <linux/module.h> 23 #include <linux/errno.h> 24 #include <linux/percpu.h> 25 #include <linux/gfp.h> 26 #include <linux/mm.h> 27 #include <linux/miscdevice.h> 28 #include <linux/vmalloc.h> 29 #include <linux/reboot.h> 30 #include <linux/debugfs.h> 31 #include <linux/highmem.h> 32 #include <linux/file.h> 33 #include <linux/sysdev.h> 34 #include <linux/cpu.h> 35 #include <linux/sched.h> 36 #include <linux/cpumask.h> 37 #include <linux/smp.h> 38 #include <linux/anon_inodes.h> 39 #include <linux/profile.h> 40 #include <linux/kvm_para.h> 41 #include <linux/pagemap.h> 42 #include <linux/mman.h> 43 #include <linux/swap.h> 44 #include <linux/bitops.h> 45 #include <linux/spinlock.h> 46 #include <linux/compat.h> 47 48 #include <asm/processor.h> 49 #include <asm/io.h> 50 #include <asm/uaccess.h> 51 #include <asm/pgtable.h> 52 #include <asm-generic/bitops/le.h> 53 54 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 55 #include "coalesced_mmio.h" 56 #endif 57 58 #define CREATE_TRACE_POINTS 59 #include <trace/events/kvm.h> 60 61 MODULE_AUTHOR("Qumranet"); 62 MODULE_LICENSE("GPL"); 63 64 /* 65 * Ordering of locks: 66 * 67 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 68 */ 69 70 DEFINE_SPINLOCK(kvm_lock); 71 LIST_HEAD(vm_list); 72 73 static cpumask_var_t cpus_hardware_enabled; 74 static int kvm_usage_count = 0; 75 static atomic_t hardware_enable_failed; 76 77 struct kmem_cache *kvm_vcpu_cache; 78 EXPORT_SYMBOL_GPL(kvm_vcpu_cache); 79 80 static __read_mostly struct preempt_ops kvm_preempt_ops; 81 82 struct dentry *kvm_debugfs_dir; 83 84 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 85 unsigned long arg); 86 static int hardware_enable_all(void); 87 static void hardware_disable_all(void); 88 89 static bool kvm_rebooting; 90 91 static bool largepages_enabled = true; 92 93 inline int kvm_is_mmio_pfn(pfn_t pfn) 94 { 95 if (pfn_valid(pfn)) { 96 struct page *page = compound_head(pfn_to_page(pfn)); 97 return PageReserved(page); 98 } 99 100 return true; 101 } 102 103 /* 104 * Switches to specified vcpu, until a matching vcpu_put() 105 */ 106 void vcpu_load(struct kvm_vcpu *vcpu) 107 { 108 int cpu; 109 110 mutex_lock(&vcpu->mutex); 111 cpu = get_cpu(); 112 preempt_notifier_register(&vcpu->preempt_notifier); 113 kvm_arch_vcpu_load(vcpu, cpu); 114 put_cpu(); 115 } 116 117 void vcpu_put(struct kvm_vcpu *vcpu) 118 { 119 preempt_disable(); 120 kvm_arch_vcpu_put(vcpu); 121 preempt_notifier_unregister(&vcpu->preempt_notifier); 122 preempt_enable(); 123 mutex_unlock(&vcpu->mutex); 124 } 125 126 static void ack_flush(void *_completed) 127 { 128 } 129 130 static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) 131 { 132 int i, cpu, me; 133 cpumask_var_t cpus; 134 bool called = true; 135 struct kvm_vcpu *vcpu; 136 137 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 138 139 spin_lock(&kvm->requests_lock); 140 me = smp_processor_id(); 141 kvm_for_each_vcpu(i, vcpu, kvm) { 142 if (test_and_set_bit(req, &vcpu->requests)) 143 continue; 144 cpu = vcpu->cpu; 145 if (cpus != NULL && cpu != -1 && cpu != me) 146 cpumask_set_cpu(cpu, cpus); 147 } 148 if (unlikely(cpus == NULL)) 149 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); 150 else if (!cpumask_empty(cpus)) 151 smp_call_function_many(cpus, ack_flush, NULL, 1); 152 else 153 called = false; 154 spin_unlock(&kvm->requests_lock); 155 free_cpumask_var(cpus); 156 return called; 157 } 158 159 void kvm_flush_remote_tlbs(struct kvm *kvm) 160 { 161 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 162 ++kvm->stat.remote_tlb_flush; 163 } 164 165 void kvm_reload_remote_mmus(struct kvm *kvm) 166 { 167 make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); 168 } 169 170 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 171 { 172 struct page *page; 173 int r; 174 175 mutex_init(&vcpu->mutex); 176 vcpu->cpu = -1; 177 vcpu->kvm = kvm; 178 vcpu->vcpu_id = id; 179 init_waitqueue_head(&vcpu->wq); 180 181 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 182 if (!page) { 183 r = -ENOMEM; 184 goto fail; 185 } 186 vcpu->run = page_address(page); 187 188 r = kvm_arch_vcpu_init(vcpu); 189 if (r < 0) 190 goto fail_free_run; 191 return 0; 192 193 fail_free_run: 194 free_page((unsigned long)vcpu->run); 195 fail: 196 return r; 197 } 198 EXPORT_SYMBOL_GPL(kvm_vcpu_init); 199 200 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) 201 { 202 kvm_arch_vcpu_uninit(vcpu); 203 free_page((unsigned long)vcpu->run); 204 } 205 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); 206 207 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 208 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 209 { 210 return container_of(mn, struct kvm, mmu_notifier); 211 } 212 213 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, 214 struct mm_struct *mm, 215 unsigned long address) 216 { 217 struct kvm *kvm = mmu_notifier_to_kvm(mn); 218 int need_tlb_flush; 219 220 /* 221 * When ->invalidate_page runs, the linux pte has been zapped 222 * already but the page is still allocated until 223 * ->invalidate_page returns. So if we increase the sequence 224 * here the kvm page fault will notice if the spte can't be 225 * established because the page is going to be freed. If 226 * instead the kvm page fault establishes the spte before 227 * ->invalidate_page runs, kvm_unmap_hva will release it 228 * before returning. 229 * 230 * The sequence increase only need to be seen at spin_unlock 231 * time, and not at spin_lock time. 232 * 233 * Increasing the sequence after the spin_unlock would be 234 * unsafe because the kvm page fault could then establish the 235 * pte after kvm_unmap_hva returned, without noticing the page 236 * is going to be freed. 237 */ 238 spin_lock(&kvm->mmu_lock); 239 kvm->mmu_notifier_seq++; 240 need_tlb_flush = kvm_unmap_hva(kvm, address); 241 spin_unlock(&kvm->mmu_lock); 242 243 /* we've to flush the tlb before the pages can be freed */ 244 if (need_tlb_flush) 245 kvm_flush_remote_tlbs(kvm); 246 247 } 248 249 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 250 struct mm_struct *mm, 251 unsigned long address, 252 pte_t pte) 253 { 254 struct kvm *kvm = mmu_notifier_to_kvm(mn); 255 256 spin_lock(&kvm->mmu_lock); 257 kvm->mmu_notifier_seq++; 258 kvm_set_spte_hva(kvm, address, pte); 259 spin_unlock(&kvm->mmu_lock); 260 } 261 262 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 263 struct mm_struct *mm, 264 unsigned long start, 265 unsigned long end) 266 { 267 struct kvm *kvm = mmu_notifier_to_kvm(mn); 268 int need_tlb_flush = 0; 269 270 spin_lock(&kvm->mmu_lock); 271 /* 272 * The count increase must become visible at unlock time as no 273 * spte can be established without taking the mmu_lock and 274 * count is also read inside the mmu_lock critical section. 275 */ 276 kvm->mmu_notifier_count++; 277 for (; start < end; start += PAGE_SIZE) 278 need_tlb_flush |= kvm_unmap_hva(kvm, start); 279 spin_unlock(&kvm->mmu_lock); 280 281 /* we've to flush the tlb before the pages can be freed */ 282 if (need_tlb_flush) 283 kvm_flush_remote_tlbs(kvm); 284 } 285 286 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 287 struct mm_struct *mm, 288 unsigned long start, 289 unsigned long end) 290 { 291 struct kvm *kvm = mmu_notifier_to_kvm(mn); 292 293 spin_lock(&kvm->mmu_lock); 294 /* 295 * This sequence increase will notify the kvm page fault that 296 * the page that is going to be mapped in the spte could have 297 * been freed. 298 */ 299 kvm->mmu_notifier_seq++; 300 /* 301 * The above sequence increase must be visible before the 302 * below count decrease but both values are read by the kvm 303 * page fault under mmu_lock spinlock so we don't need to add 304 * a smb_wmb() here in between the two. 305 */ 306 kvm->mmu_notifier_count--; 307 spin_unlock(&kvm->mmu_lock); 308 309 BUG_ON(kvm->mmu_notifier_count < 0); 310 } 311 312 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 313 struct mm_struct *mm, 314 unsigned long address) 315 { 316 struct kvm *kvm = mmu_notifier_to_kvm(mn); 317 int young; 318 319 spin_lock(&kvm->mmu_lock); 320 young = kvm_age_hva(kvm, address); 321 spin_unlock(&kvm->mmu_lock); 322 323 if (young) 324 kvm_flush_remote_tlbs(kvm); 325 326 return young; 327 } 328 329 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 330 struct mm_struct *mm) 331 { 332 struct kvm *kvm = mmu_notifier_to_kvm(mn); 333 kvm_arch_flush_shadow(kvm); 334 } 335 336 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 337 .invalidate_page = kvm_mmu_notifier_invalidate_page, 338 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 339 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 340 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 341 .change_pte = kvm_mmu_notifier_change_pte, 342 .release = kvm_mmu_notifier_release, 343 }; 344 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 345 346 static struct kvm *kvm_create_vm(void) 347 { 348 int r = 0; 349 struct kvm *kvm = kvm_arch_create_vm(); 350 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 351 struct page *page; 352 #endif 353 354 if (IS_ERR(kvm)) 355 goto out; 356 357 r = hardware_enable_all(); 358 if (r) 359 goto out_err_nodisable; 360 361 #ifdef CONFIG_HAVE_KVM_IRQCHIP 362 INIT_HLIST_HEAD(&kvm->mask_notifier_list); 363 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 364 #endif 365 366 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 367 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 368 if (!page) { 369 r = -ENOMEM; 370 goto out_err; 371 } 372 kvm->coalesced_mmio_ring = 373 (struct kvm_coalesced_mmio_ring *)page_address(page); 374 #endif 375 376 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 377 { 378 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 379 r = mmu_notifier_register(&kvm->mmu_notifier, current->mm); 380 if (r) { 381 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 382 put_page(page); 383 #endif 384 goto out_err; 385 } 386 } 387 #endif 388 389 kvm->mm = current->mm; 390 atomic_inc(&kvm->mm->mm_count); 391 spin_lock_init(&kvm->mmu_lock); 392 spin_lock_init(&kvm->requests_lock); 393 kvm_io_bus_init(&kvm->pio_bus); 394 kvm_eventfd_init(kvm); 395 mutex_init(&kvm->lock); 396 mutex_init(&kvm->irq_lock); 397 kvm_io_bus_init(&kvm->mmio_bus); 398 init_rwsem(&kvm->slots_lock); 399 atomic_set(&kvm->users_count, 1); 400 spin_lock(&kvm_lock); 401 list_add(&kvm->vm_list, &vm_list); 402 spin_unlock(&kvm_lock); 403 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 404 kvm_coalesced_mmio_init(kvm); 405 #endif 406 out: 407 return kvm; 408 409 #if defined(KVM_COALESCED_MMIO_PAGE_OFFSET) || \ 410 (defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)) 411 out_err: 412 hardware_disable_all(); 413 #endif 414 out_err_nodisable: 415 kfree(kvm); 416 return ERR_PTR(r); 417 } 418 419 /* 420 * Free any memory in @free but not in @dont. 421 */ 422 static void kvm_free_physmem_slot(struct kvm_memory_slot *free, 423 struct kvm_memory_slot *dont) 424 { 425 int i; 426 427 if (!dont || free->rmap != dont->rmap) 428 vfree(free->rmap); 429 430 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 431 vfree(free->dirty_bitmap); 432 433 434 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { 435 if (!dont || free->lpage_info[i] != dont->lpage_info[i]) { 436 vfree(free->lpage_info[i]); 437 free->lpage_info[i] = NULL; 438 } 439 } 440 441 free->npages = 0; 442 free->dirty_bitmap = NULL; 443 free->rmap = NULL; 444 } 445 446 void kvm_free_physmem(struct kvm *kvm) 447 { 448 int i; 449 450 for (i = 0; i < kvm->nmemslots; ++i) 451 kvm_free_physmem_slot(&kvm->memslots[i], NULL); 452 } 453 454 static void kvm_destroy_vm(struct kvm *kvm) 455 { 456 struct mm_struct *mm = kvm->mm; 457 458 kvm_arch_sync_events(kvm); 459 spin_lock(&kvm_lock); 460 list_del(&kvm->vm_list); 461 spin_unlock(&kvm_lock); 462 kvm_free_irq_routing(kvm); 463 kvm_io_bus_destroy(&kvm->pio_bus); 464 kvm_io_bus_destroy(&kvm->mmio_bus); 465 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 466 if (kvm->coalesced_mmio_ring != NULL) 467 free_page((unsigned long)kvm->coalesced_mmio_ring); 468 #endif 469 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 470 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 471 #else 472 kvm_arch_flush_shadow(kvm); 473 #endif 474 kvm_arch_destroy_vm(kvm); 475 hardware_disable_all(); 476 mmdrop(mm); 477 } 478 479 void kvm_get_kvm(struct kvm *kvm) 480 { 481 atomic_inc(&kvm->users_count); 482 } 483 EXPORT_SYMBOL_GPL(kvm_get_kvm); 484 485 void kvm_put_kvm(struct kvm *kvm) 486 { 487 if (atomic_dec_and_test(&kvm->users_count)) 488 kvm_destroy_vm(kvm); 489 } 490 EXPORT_SYMBOL_GPL(kvm_put_kvm); 491 492 493 static int kvm_vm_release(struct inode *inode, struct file *filp) 494 { 495 struct kvm *kvm = filp->private_data; 496 497 kvm_irqfd_release(kvm); 498 499 kvm_put_kvm(kvm); 500 return 0; 501 } 502 503 /* 504 * Allocate some memory and give it an address in the guest physical address 505 * space. 506 * 507 * Discontiguous memory is allowed, mostly for framebuffers. 508 * 509 * Must be called holding mmap_sem for write. 510 */ 511 int __kvm_set_memory_region(struct kvm *kvm, 512 struct kvm_userspace_memory_region *mem, 513 int user_alloc) 514 { 515 int r; 516 gfn_t base_gfn; 517 unsigned long npages; 518 unsigned long i; 519 struct kvm_memory_slot *memslot; 520 struct kvm_memory_slot old, new; 521 522 r = -EINVAL; 523 /* General sanity checks */ 524 if (mem->memory_size & (PAGE_SIZE - 1)) 525 goto out; 526 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 527 goto out; 528 if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1))) 529 goto out; 530 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) 531 goto out; 532 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 533 goto out; 534 535 memslot = &kvm->memslots[mem->slot]; 536 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 537 npages = mem->memory_size >> PAGE_SHIFT; 538 539 if (!npages) 540 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; 541 542 new = old = *memslot; 543 544 new.base_gfn = base_gfn; 545 new.npages = npages; 546 new.flags = mem->flags; 547 548 /* Disallow changing a memory slot's size. */ 549 r = -EINVAL; 550 if (npages && old.npages && npages != old.npages) 551 goto out_free; 552 553 /* Check for overlaps */ 554 r = -EEXIST; 555 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { 556 struct kvm_memory_slot *s = &kvm->memslots[i]; 557 558 if (s == memslot || !s->npages) 559 continue; 560 if (!((base_gfn + npages <= s->base_gfn) || 561 (base_gfn >= s->base_gfn + s->npages))) 562 goto out_free; 563 } 564 565 /* Free page dirty bitmap if unneeded */ 566 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 567 new.dirty_bitmap = NULL; 568 569 r = -ENOMEM; 570 571 /* Allocate if a slot is being created */ 572 #ifndef CONFIG_S390 573 if (npages && !new.rmap) { 574 new.rmap = vmalloc(npages * sizeof(struct page *)); 575 576 if (!new.rmap) 577 goto out_free; 578 579 memset(new.rmap, 0, npages * sizeof(*new.rmap)); 580 581 new.user_alloc = user_alloc; 582 /* 583 * hva_to_rmmap() serialzies with the mmu_lock and to be 584 * safe it has to ignore memslots with !user_alloc && 585 * !userspace_addr. 586 */ 587 if (user_alloc) 588 new.userspace_addr = mem->userspace_addr; 589 else 590 new.userspace_addr = 0; 591 } 592 if (!npages) 593 goto skip_lpage; 594 595 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { 596 unsigned long ugfn; 597 unsigned long j; 598 int lpages; 599 int level = i + 2; 600 601 /* Avoid unused variable warning if no large pages */ 602 (void)level; 603 604 if (new.lpage_info[i]) 605 continue; 606 607 lpages = 1 + (base_gfn + npages - 1) / 608 KVM_PAGES_PER_HPAGE(level); 609 lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level); 610 611 new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i])); 612 613 if (!new.lpage_info[i]) 614 goto out_free; 615 616 memset(new.lpage_info[i], 0, 617 lpages * sizeof(*new.lpage_info[i])); 618 619 if (base_gfn % KVM_PAGES_PER_HPAGE(level)) 620 new.lpage_info[i][0].write_count = 1; 621 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level)) 622 new.lpage_info[i][lpages - 1].write_count = 1; 623 ugfn = new.userspace_addr >> PAGE_SHIFT; 624 /* 625 * If the gfn and userspace address are not aligned wrt each 626 * other, or if explicitly asked to, disable large page 627 * support for this slot 628 */ 629 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) || 630 !largepages_enabled) 631 for (j = 0; j < lpages; ++j) 632 new.lpage_info[i][j].write_count = 1; 633 } 634 635 skip_lpage: 636 637 /* Allocate page dirty bitmap if needed */ 638 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 639 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; 640 641 new.dirty_bitmap = vmalloc(dirty_bytes); 642 if (!new.dirty_bitmap) 643 goto out_free; 644 memset(new.dirty_bitmap, 0, dirty_bytes); 645 if (old.npages) 646 kvm_arch_flush_shadow(kvm); 647 } 648 #else /* not defined CONFIG_S390 */ 649 new.user_alloc = user_alloc; 650 if (user_alloc) 651 new.userspace_addr = mem->userspace_addr; 652 #endif /* not defined CONFIG_S390 */ 653 654 if (!npages) 655 kvm_arch_flush_shadow(kvm); 656 657 spin_lock(&kvm->mmu_lock); 658 if (mem->slot >= kvm->nmemslots) 659 kvm->nmemslots = mem->slot + 1; 660 661 *memslot = new; 662 spin_unlock(&kvm->mmu_lock); 663 664 r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc); 665 if (r) { 666 spin_lock(&kvm->mmu_lock); 667 *memslot = old; 668 spin_unlock(&kvm->mmu_lock); 669 goto out_free; 670 } 671 672 kvm_free_physmem_slot(&old, npages ? &new : NULL); 673 /* Slot deletion case: we have to update the current slot */ 674 spin_lock(&kvm->mmu_lock); 675 if (!npages) 676 *memslot = old; 677 spin_unlock(&kvm->mmu_lock); 678 #ifdef CONFIG_DMAR 679 /* map the pages in iommu page table */ 680 r = kvm_iommu_map_pages(kvm, base_gfn, npages); 681 if (r) 682 goto out; 683 #endif 684 return 0; 685 686 out_free: 687 kvm_free_physmem_slot(&new, &old); 688 out: 689 return r; 690 691 } 692 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 693 694 int kvm_set_memory_region(struct kvm *kvm, 695 struct kvm_userspace_memory_region *mem, 696 int user_alloc) 697 { 698 int r; 699 700 down_write(&kvm->slots_lock); 701 r = __kvm_set_memory_region(kvm, mem, user_alloc); 702 up_write(&kvm->slots_lock); 703 return r; 704 } 705 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 706 707 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 708 struct 709 kvm_userspace_memory_region *mem, 710 int user_alloc) 711 { 712 if (mem->slot >= KVM_MEMORY_SLOTS) 713 return -EINVAL; 714 return kvm_set_memory_region(kvm, mem, user_alloc); 715 } 716 717 int kvm_get_dirty_log(struct kvm *kvm, 718 struct kvm_dirty_log *log, int *is_dirty) 719 { 720 struct kvm_memory_slot *memslot; 721 int r, i; 722 int n; 723 unsigned long any = 0; 724 725 r = -EINVAL; 726 if (log->slot >= KVM_MEMORY_SLOTS) 727 goto out; 728 729 memslot = &kvm->memslots[log->slot]; 730 r = -ENOENT; 731 if (!memslot->dirty_bitmap) 732 goto out; 733 734 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 735 736 for (i = 0; !any && i < n/sizeof(long); ++i) 737 any = memslot->dirty_bitmap[i]; 738 739 r = -EFAULT; 740 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) 741 goto out; 742 743 if (any) 744 *is_dirty = 1; 745 746 r = 0; 747 out: 748 return r; 749 } 750 751 void kvm_disable_largepages(void) 752 { 753 largepages_enabled = false; 754 } 755 EXPORT_SYMBOL_GPL(kvm_disable_largepages); 756 757 int is_error_page(struct page *page) 758 { 759 return page == bad_page; 760 } 761 EXPORT_SYMBOL_GPL(is_error_page); 762 763 int is_error_pfn(pfn_t pfn) 764 { 765 return pfn == bad_pfn; 766 } 767 EXPORT_SYMBOL_GPL(is_error_pfn); 768 769 static inline unsigned long bad_hva(void) 770 { 771 return PAGE_OFFSET; 772 } 773 774 int kvm_is_error_hva(unsigned long addr) 775 { 776 return addr == bad_hva(); 777 } 778 EXPORT_SYMBOL_GPL(kvm_is_error_hva); 779 780 struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) 781 { 782 int i; 783 784 for (i = 0; i < kvm->nmemslots; ++i) { 785 struct kvm_memory_slot *memslot = &kvm->memslots[i]; 786 787 if (gfn >= memslot->base_gfn 788 && gfn < memslot->base_gfn + memslot->npages) 789 return memslot; 790 } 791 return NULL; 792 } 793 EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased); 794 795 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 796 { 797 gfn = unalias_gfn(kvm, gfn); 798 return gfn_to_memslot_unaliased(kvm, gfn); 799 } 800 801 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 802 { 803 int i; 804 805 gfn = unalias_gfn(kvm, gfn); 806 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { 807 struct kvm_memory_slot *memslot = &kvm->memslots[i]; 808 809 if (gfn >= memslot->base_gfn 810 && gfn < memslot->base_gfn + memslot->npages) 811 return 1; 812 } 813 return 0; 814 } 815 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 816 817 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 818 { 819 struct kvm_memory_slot *slot; 820 821 gfn = unalias_gfn(kvm, gfn); 822 slot = gfn_to_memslot_unaliased(kvm, gfn); 823 if (!slot) 824 return bad_hva(); 825 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE); 826 } 827 EXPORT_SYMBOL_GPL(gfn_to_hva); 828 829 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 830 { 831 struct page *page[1]; 832 unsigned long addr; 833 int npages; 834 pfn_t pfn; 835 836 might_sleep(); 837 838 addr = gfn_to_hva(kvm, gfn); 839 if (kvm_is_error_hva(addr)) { 840 get_page(bad_page); 841 return page_to_pfn(bad_page); 842 } 843 844 npages = get_user_pages_fast(addr, 1, 1, page); 845 846 if (unlikely(npages != 1)) { 847 struct vm_area_struct *vma; 848 849 down_read(¤t->mm->mmap_sem); 850 vma = find_vma(current->mm, addr); 851 852 if (vma == NULL || addr < vma->vm_start || 853 !(vma->vm_flags & VM_PFNMAP)) { 854 up_read(¤t->mm->mmap_sem); 855 get_page(bad_page); 856 return page_to_pfn(bad_page); 857 } 858 859 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 860 up_read(¤t->mm->mmap_sem); 861 BUG_ON(!kvm_is_mmio_pfn(pfn)); 862 } else 863 pfn = page_to_pfn(page[0]); 864 865 return pfn; 866 } 867 868 EXPORT_SYMBOL_GPL(gfn_to_pfn); 869 870 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 871 { 872 pfn_t pfn; 873 874 pfn = gfn_to_pfn(kvm, gfn); 875 if (!kvm_is_mmio_pfn(pfn)) 876 return pfn_to_page(pfn); 877 878 WARN_ON(kvm_is_mmio_pfn(pfn)); 879 880 get_page(bad_page); 881 return bad_page; 882 } 883 884 EXPORT_SYMBOL_GPL(gfn_to_page); 885 886 void kvm_release_page_clean(struct page *page) 887 { 888 kvm_release_pfn_clean(page_to_pfn(page)); 889 } 890 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 891 892 void kvm_release_pfn_clean(pfn_t pfn) 893 { 894 if (!kvm_is_mmio_pfn(pfn)) 895 put_page(pfn_to_page(pfn)); 896 } 897 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 898 899 void kvm_release_page_dirty(struct page *page) 900 { 901 kvm_release_pfn_dirty(page_to_pfn(page)); 902 } 903 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 904 905 void kvm_release_pfn_dirty(pfn_t pfn) 906 { 907 kvm_set_pfn_dirty(pfn); 908 kvm_release_pfn_clean(pfn); 909 } 910 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); 911 912 void kvm_set_page_dirty(struct page *page) 913 { 914 kvm_set_pfn_dirty(page_to_pfn(page)); 915 } 916 EXPORT_SYMBOL_GPL(kvm_set_page_dirty); 917 918 void kvm_set_pfn_dirty(pfn_t pfn) 919 { 920 if (!kvm_is_mmio_pfn(pfn)) { 921 struct page *page = pfn_to_page(pfn); 922 if (!PageReserved(page)) 923 SetPageDirty(page); 924 } 925 } 926 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 927 928 void kvm_set_pfn_accessed(pfn_t pfn) 929 { 930 if (!kvm_is_mmio_pfn(pfn)) 931 mark_page_accessed(pfn_to_page(pfn)); 932 } 933 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 934 935 void kvm_get_pfn(pfn_t pfn) 936 { 937 if (!kvm_is_mmio_pfn(pfn)) 938 get_page(pfn_to_page(pfn)); 939 } 940 EXPORT_SYMBOL_GPL(kvm_get_pfn); 941 942 static int next_segment(unsigned long len, int offset) 943 { 944 if (len > PAGE_SIZE - offset) 945 return PAGE_SIZE - offset; 946 else 947 return len; 948 } 949 950 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 951 int len) 952 { 953 int r; 954 unsigned long addr; 955 956 addr = gfn_to_hva(kvm, gfn); 957 if (kvm_is_error_hva(addr)) 958 return -EFAULT; 959 r = copy_from_user(data, (void __user *)addr + offset, len); 960 if (r) 961 return -EFAULT; 962 return 0; 963 } 964 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 965 966 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 967 { 968 gfn_t gfn = gpa >> PAGE_SHIFT; 969 int seg; 970 int offset = offset_in_page(gpa); 971 int ret; 972 973 while ((seg = next_segment(len, offset)) != 0) { 974 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 975 if (ret < 0) 976 return ret; 977 offset = 0; 978 len -= seg; 979 data += seg; 980 ++gfn; 981 } 982 return 0; 983 } 984 EXPORT_SYMBOL_GPL(kvm_read_guest); 985 986 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 987 unsigned long len) 988 { 989 int r; 990 unsigned long addr; 991 gfn_t gfn = gpa >> PAGE_SHIFT; 992 int offset = offset_in_page(gpa); 993 994 addr = gfn_to_hva(kvm, gfn); 995 if (kvm_is_error_hva(addr)) 996 return -EFAULT; 997 pagefault_disable(); 998 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 999 pagefault_enable(); 1000 if (r) 1001 return -EFAULT; 1002 return 0; 1003 } 1004 EXPORT_SYMBOL(kvm_read_guest_atomic); 1005 1006 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 1007 int offset, int len) 1008 { 1009 int r; 1010 unsigned long addr; 1011 1012 addr = gfn_to_hva(kvm, gfn); 1013 if (kvm_is_error_hva(addr)) 1014 return -EFAULT; 1015 r = copy_to_user((void __user *)addr + offset, data, len); 1016 if (r) 1017 return -EFAULT; 1018 mark_page_dirty(kvm, gfn); 1019 return 0; 1020 } 1021 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 1022 1023 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 1024 unsigned long len) 1025 { 1026 gfn_t gfn = gpa >> PAGE_SHIFT; 1027 int seg; 1028 int offset = offset_in_page(gpa); 1029 int ret; 1030 1031 while ((seg = next_segment(len, offset)) != 0) { 1032 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 1033 if (ret < 0) 1034 return ret; 1035 offset = 0; 1036 len -= seg; 1037 data += seg; 1038 ++gfn; 1039 } 1040 return 0; 1041 } 1042 1043 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) 1044 { 1045 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len); 1046 } 1047 EXPORT_SYMBOL_GPL(kvm_clear_guest_page); 1048 1049 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 1050 { 1051 gfn_t gfn = gpa >> PAGE_SHIFT; 1052 int seg; 1053 int offset = offset_in_page(gpa); 1054 int ret; 1055 1056 while ((seg = next_segment(len, offset)) != 0) { 1057 ret = kvm_clear_guest_page(kvm, gfn, offset, seg); 1058 if (ret < 0) 1059 return ret; 1060 offset = 0; 1061 len -= seg; 1062 ++gfn; 1063 } 1064 return 0; 1065 } 1066 EXPORT_SYMBOL_GPL(kvm_clear_guest); 1067 1068 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 1069 { 1070 struct kvm_memory_slot *memslot; 1071 1072 gfn = unalias_gfn(kvm, gfn); 1073 memslot = gfn_to_memslot_unaliased(kvm, gfn); 1074 if (memslot && memslot->dirty_bitmap) { 1075 unsigned long rel_gfn = gfn - memslot->base_gfn; 1076 1077 /* avoid RMW */ 1078 if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap)) 1079 generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); 1080 } 1081 } 1082 1083 /* 1084 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 1085 */ 1086 void kvm_vcpu_block(struct kvm_vcpu *vcpu) 1087 { 1088 DEFINE_WAIT(wait); 1089 1090 for (;;) { 1091 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1092 1093 if (kvm_arch_vcpu_runnable(vcpu)) { 1094 set_bit(KVM_REQ_UNHALT, &vcpu->requests); 1095 break; 1096 } 1097 if (kvm_cpu_has_pending_timer(vcpu)) 1098 break; 1099 if (signal_pending(current)) 1100 break; 1101 1102 schedule(); 1103 } 1104 1105 finish_wait(&vcpu->wq, &wait); 1106 } 1107 1108 void kvm_resched(struct kvm_vcpu *vcpu) 1109 { 1110 if (!need_resched()) 1111 return; 1112 cond_resched(); 1113 } 1114 EXPORT_SYMBOL_GPL(kvm_resched); 1115 1116 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu) 1117 { 1118 ktime_t expires; 1119 DEFINE_WAIT(wait); 1120 1121 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1122 1123 /* Sleep for 100 us, and hope lock-holder got scheduled */ 1124 expires = ktime_add_ns(ktime_get(), 100000UL); 1125 schedule_hrtimeout(&expires, HRTIMER_MODE_ABS); 1126 1127 finish_wait(&vcpu->wq, &wait); 1128 } 1129 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 1130 1131 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1132 { 1133 struct kvm_vcpu *vcpu = vma->vm_file->private_data; 1134 struct page *page; 1135 1136 if (vmf->pgoff == 0) 1137 page = virt_to_page(vcpu->run); 1138 #ifdef CONFIG_X86 1139 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 1140 page = virt_to_page(vcpu->arch.pio_data); 1141 #endif 1142 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1143 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 1144 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 1145 #endif 1146 else 1147 return VM_FAULT_SIGBUS; 1148 get_page(page); 1149 vmf->page = page; 1150 return 0; 1151 } 1152 1153 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 1154 .fault = kvm_vcpu_fault, 1155 }; 1156 1157 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 1158 { 1159 vma->vm_ops = &kvm_vcpu_vm_ops; 1160 return 0; 1161 } 1162 1163 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 1164 { 1165 struct kvm_vcpu *vcpu = filp->private_data; 1166 1167 kvm_put_kvm(vcpu->kvm); 1168 return 0; 1169 } 1170 1171 static struct file_operations kvm_vcpu_fops = { 1172 .release = kvm_vcpu_release, 1173 .unlocked_ioctl = kvm_vcpu_ioctl, 1174 .compat_ioctl = kvm_vcpu_ioctl, 1175 .mmap = kvm_vcpu_mmap, 1176 }; 1177 1178 /* 1179 * Allocates an inode for the vcpu. 1180 */ 1181 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 1182 { 1183 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR); 1184 } 1185 1186 /* 1187 * Creates some virtual cpus. Good luck creating more than one. 1188 */ 1189 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 1190 { 1191 int r; 1192 struct kvm_vcpu *vcpu, *v; 1193 1194 vcpu = kvm_arch_vcpu_create(kvm, id); 1195 if (IS_ERR(vcpu)) 1196 return PTR_ERR(vcpu); 1197 1198 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 1199 1200 r = kvm_arch_vcpu_setup(vcpu); 1201 if (r) 1202 return r; 1203 1204 mutex_lock(&kvm->lock); 1205 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { 1206 r = -EINVAL; 1207 goto vcpu_destroy; 1208 } 1209 1210 kvm_for_each_vcpu(r, v, kvm) 1211 if (v->vcpu_id == id) { 1212 r = -EEXIST; 1213 goto vcpu_destroy; 1214 } 1215 1216 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); 1217 1218 /* Now it's all set up, let userspace reach it */ 1219 kvm_get_kvm(kvm); 1220 r = create_vcpu_fd(vcpu); 1221 if (r < 0) { 1222 kvm_put_kvm(kvm); 1223 goto vcpu_destroy; 1224 } 1225 1226 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; 1227 smp_wmb(); 1228 atomic_inc(&kvm->online_vcpus); 1229 1230 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 1231 if (kvm->bsp_vcpu_id == id) 1232 kvm->bsp_vcpu = vcpu; 1233 #endif 1234 mutex_unlock(&kvm->lock); 1235 return r; 1236 1237 vcpu_destroy: 1238 mutex_unlock(&kvm->lock); 1239 kvm_arch_vcpu_destroy(vcpu); 1240 return r; 1241 } 1242 1243 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 1244 { 1245 if (sigset) { 1246 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1247 vcpu->sigset_active = 1; 1248 vcpu->sigset = *sigset; 1249 } else 1250 vcpu->sigset_active = 0; 1251 return 0; 1252 } 1253 1254 static long kvm_vcpu_ioctl(struct file *filp, 1255 unsigned int ioctl, unsigned long arg) 1256 { 1257 struct kvm_vcpu *vcpu = filp->private_data; 1258 void __user *argp = (void __user *)arg; 1259 int r; 1260 struct kvm_fpu *fpu = NULL; 1261 struct kvm_sregs *kvm_sregs = NULL; 1262 1263 if (vcpu->kvm->mm != current->mm) 1264 return -EIO; 1265 switch (ioctl) { 1266 case KVM_RUN: 1267 r = -EINVAL; 1268 if (arg) 1269 goto out; 1270 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); 1271 break; 1272 case KVM_GET_REGS: { 1273 struct kvm_regs *kvm_regs; 1274 1275 r = -ENOMEM; 1276 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 1277 if (!kvm_regs) 1278 goto out; 1279 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 1280 if (r) 1281 goto out_free1; 1282 r = -EFAULT; 1283 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 1284 goto out_free1; 1285 r = 0; 1286 out_free1: 1287 kfree(kvm_regs); 1288 break; 1289 } 1290 case KVM_SET_REGS: { 1291 struct kvm_regs *kvm_regs; 1292 1293 r = -ENOMEM; 1294 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 1295 if (!kvm_regs) 1296 goto out; 1297 r = -EFAULT; 1298 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs))) 1299 goto out_free2; 1300 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 1301 if (r) 1302 goto out_free2; 1303 r = 0; 1304 out_free2: 1305 kfree(kvm_regs); 1306 break; 1307 } 1308 case KVM_GET_SREGS: { 1309 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 1310 r = -ENOMEM; 1311 if (!kvm_sregs) 1312 goto out; 1313 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 1314 if (r) 1315 goto out; 1316 r = -EFAULT; 1317 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 1318 goto out; 1319 r = 0; 1320 break; 1321 } 1322 case KVM_SET_SREGS: { 1323 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 1324 r = -ENOMEM; 1325 if (!kvm_sregs) 1326 goto out; 1327 r = -EFAULT; 1328 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs))) 1329 goto out; 1330 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 1331 if (r) 1332 goto out; 1333 r = 0; 1334 break; 1335 } 1336 case KVM_GET_MP_STATE: { 1337 struct kvm_mp_state mp_state; 1338 1339 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 1340 if (r) 1341 goto out; 1342 r = -EFAULT; 1343 if (copy_to_user(argp, &mp_state, sizeof mp_state)) 1344 goto out; 1345 r = 0; 1346 break; 1347 } 1348 case KVM_SET_MP_STATE: { 1349 struct kvm_mp_state mp_state; 1350 1351 r = -EFAULT; 1352 if (copy_from_user(&mp_state, argp, sizeof mp_state)) 1353 goto out; 1354 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 1355 if (r) 1356 goto out; 1357 r = 0; 1358 break; 1359 } 1360 case KVM_TRANSLATE: { 1361 struct kvm_translation tr; 1362 1363 r = -EFAULT; 1364 if (copy_from_user(&tr, argp, sizeof tr)) 1365 goto out; 1366 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 1367 if (r) 1368 goto out; 1369 r = -EFAULT; 1370 if (copy_to_user(argp, &tr, sizeof tr)) 1371 goto out; 1372 r = 0; 1373 break; 1374 } 1375 case KVM_SET_GUEST_DEBUG: { 1376 struct kvm_guest_debug dbg; 1377 1378 r = -EFAULT; 1379 if (copy_from_user(&dbg, argp, sizeof dbg)) 1380 goto out; 1381 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 1382 if (r) 1383 goto out; 1384 r = 0; 1385 break; 1386 } 1387 case KVM_SET_SIGNAL_MASK: { 1388 struct kvm_signal_mask __user *sigmask_arg = argp; 1389 struct kvm_signal_mask kvm_sigmask; 1390 sigset_t sigset, *p; 1391 1392 p = NULL; 1393 if (argp) { 1394 r = -EFAULT; 1395 if (copy_from_user(&kvm_sigmask, argp, 1396 sizeof kvm_sigmask)) 1397 goto out; 1398 r = -EINVAL; 1399 if (kvm_sigmask.len != sizeof sigset) 1400 goto out; 1401 r = -EFAULT; 1402 if (copy_from_user(&sigset, sigmask_arg->sigset, 1403 sizeof sigset)) 1404 goto out; 1405 p = &sigset; 1406 } 1407 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 1408 break; 1409 } 1410 case KVM_GET_FPU: { 1411 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 1412 r = -ENOMEM; 1413 if (!fpu) 1414 goto out; 1415 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 1416 if (r) 1417 goto out; 1418 r = -EFAULT; 1419 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 1420 goto out; 1421 r = 0; 1422 break; 1423 } 1424 case KVM_SET_FPU: { 1425 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 1426 r = -ENOMEM; 1427 if (!fpu) 1428 goto out; 1429 r = -EFAULT; 1430 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu))) 1431 goto out; 1432 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 1433 if (r) 1434 goto out; 1435 r = 0; 1436 break; 1437 } 1438 default: 1439 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 1440 } 1441 out: 1442 kfree(fpu); 1443 kfree(kvm_sregs); 1444 return r; 1445 } 1446 1447 static long kvm_vm_ioctl(struct file *filp, 1448 unsigned int ioctl, unsigned long arg) 1449 { 1450 struct kvm *kvm = filp->private_data; 1451 void __user *argp = (void __user *)arg; 1452 int r; 1453 1454 if (kvm->mm != current->mm) 1455 return -EIO; 1456 switch (ioctl) { 1457 case KVM_CREATE_VCPU: 1458 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 1459 if (r < 0) 1460 goto out; 1461 break; 1462 case KVM_SET_USER_MEMORY_REGION: { 1463 struct kvm_userspace_memory_region kvm_userspace_mem; 1464 1465 r = -EFAULT; 1466 if (copy_from_user(&kvm_userspace_mem, argp, 1467 sizeof kvm_userspace_mem)) 1468 goto out; 1469 1470 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1); 1471 if (r) 1472 goto out; 1473 break; 1474 } 1475 case KVM_GET_DIRTY_LOG: { 1476 struct kvm_dirty_log log; 1477 1478 r = -EFAULT; 1479 if (copy_from_user(&log, argp, sizeof log)) 1480 goto out; 1481 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 1482 if (r) 1483 goto out; 1484 break; 1485 } 1486 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1487 case KVM_REGISTER_COALESCED_MMIO: { 1488 struct kvm_coalesced_mmio_zone zone; 1489 r = -EFAULT; 1490 if (copy_from_user(&zone, argp, sizeof zone)) 1491 goto out; 1492 r = -ENXIO; 1493 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 1494 if (r) 1495 goto out; 1496 r = 0; 1497 break; 1498 } 1499 case KVM_UNREGISTER_COALESCED_MMIO: { 1500 struct kvm_coalesced_mmio_zone zone; 1501 r = -EFAULT; 1502 if (copy_from_user(&zone, argp, sizeof zone)) 1503 goto out; 1504 r = -ENXIO; 1505 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 1506 if (r) 1507 goto out; 1508 r = 0; 1509 break; 1510 } 1511 #endif 1512 case KVM_IRQFD: { 1513 struct kvm_irqfd data; 1514 1515 r = -EFAULT; 1516 if (copy_from_user(&data, argp, sizeof data)) 1517 goto out; 1518 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags); 1519 break; 1520 } 1521 case KVM_IOEVENTFD: { 1522 struct kvm_ioeventfd data; 1523 1524 r = -EFAULT; 1525 if (copy_from_user(&data, argp, sizeof data)) 1526 goto out; 1527 r = kvm_ioeventfd(kvm, &data); 1528 break; 1529 } 1530 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 1531 case KVM_SET_BOOT_CPU_ID: 1532 r = 0; 1533 mutex_lock(&kvm->lock); 1534 if (atomic_read(&kvm->online_vcpus) != 0) 1535 r = -EBUSY; 1536 else 1537 kvm->bsp_vcpu_id = arg; 1538 mutex_unlock(&kvm->lock); 1539 break; 1540 #endif 1541 default: 1542 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 1543 if (r == -ENOTTY) 1544 r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg); 1545 } 1546 out: 1547 return r; 1548 } 1549 1550 #ifdef CONFIG_COMPAT 1551 struct compat_kvm_dirty_log { 1552 __u32 slot; 1553 __u32 padding1; 1554 union { 1555 compat_uptr_t dirty_bitmap; /* one bit per page */ 1556 __u64 padding2; 1557 }; 1558 }; 1559 1560 static long kvm_vm_compat_ioctl(struct file *filp, 1561 unsigned int ioctl, unsigned long arg) 1562 { 1563 struct kvm *kvm = filp->private_data; 1564 int r; 1565 1566 if (kvm->mm != current->mm) 1567 return -EIO; 1568 switch (ioctl) { 1569 case KVM_GET_DIRTY_LOG: { 1570 struct compat_kvm_dirty_log compat_log; 1571 struct kvm_dirty_log log; 1572 1573 r = -EFAULT; 1574 if (copy_from_user(&compat_log, (void __user *)arg, 1575 sizeof(compat_log))) 1576 goto out; 1577 log.slot = compat_log.slot; 1578 log.padding1 = compat_log.padding1; 1579 log.padding2 = compat_log.padding2; 1580 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 1581 1582 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 1583 if (r) 1584 goto out; 1585 break; 1586 } 1587 default: 1588 r = kvm_vm_ioctl(filp, ioctl, arg); 1589 } 1590 1591 out: 1592 return r; 1593 } 1594 #endif 1595 1596 static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1597 { 1598 struct page *page[1]; 1599 unsigned long addr; 1600 int npages; 1601 gfn_t gfn = vmf->pgoff; 1602 struct kvm *kvm = vma->vm_file->private_data; 1603 1604 addr = gfn_to_hva(kvm, gfn); 1605 if (kvm_is_error_hva(addr)) 1606 return VM_FAULT_SIGBUS; 1607 1608 npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page, 1609 NULL); 1610 if (unlikely(npages != 1)) 1611 return VM_FAULT_SIGBUS; 1612 1613 vmf->page = page[0]; 1614 return 0; 1615 } 1616 1617 static const struct vm_operations_struct kvm_vm_vm_ops = { 1618 .fault = kvm_vm_fault, 1619 }; 1620 1621 static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma) 1622 { 1623 vma->vm_ops = &kvm_vm_vm_ops; 1624 return 0; 1625 } 1626 1627 static struct file_operations kvm_vm_fops = { 1628 .release = kvm_vm_release, 1629 .unlocked_ioctl = kvm_vm_ioctl, 1630 #ifdef CONFIG_COMPAT 1631 .compat_ioctl = kvm_vm_compat_ioctl, 1632 #endif 1633 .mmap = kvm_vm_mmap, 1634 }; 1635 1636 static int kvm_dev_ioctl_create_vm(void) 1637 { 1638 int fd; 1639 struct kvm *kvm; 1640 1641 kvm = kvm_create_vm(); 1642 if (IS_ERR(kvm)) 1643 return PTR_ERR(kvm); 1644 fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 1645 if (fd < 0) 1646 kvm_put_kvm(kvm); 1647 1648 return fd; 1649 } 1650 1651 static long kvm_dev_ioctl_check_extension_generic(long arg) 1652 { 1653 switch (arg) { 1654 case KVM_CAP_USER_MEMORY: 1655 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 1656 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 1657 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 1658 case KVM_CAP_SET_BOOT_CPU_ID: 1659 #endif 1660 case KVM_CAP_INTERNAL_ERROR_DATA: 1661 return 1; 1662 #ifdef CONFIG_HAVE_KVM_IRQCHIP 1663 case KVM_CAP_IRQ_ROUTING: 1664 return KVM_MAX_IRQ_ROUTES; 1665 #endif 1666 default: 1667 break; 1668 } 1669 return kvm_dev_ioctl_check_extension(arg); 1670 } 1671 1672 static long kvm_dev_ioctl(struct file *filp, 1673 unsigned int ioctl, unsigned long arg) 1674 { 1675 long r = -EINVAL; 1676 1677 switch (ioctl) { 1678 case KVM_GET_API_VERSION: 1679 r = -EINVAL; 1680 if (arg) 1681 goto out; 1682 r = KVM_API_VERSION; 1683 break; 1684 case KVM_CREATE_VM: 1685 r = -EINVAL; 1686 if (arg) 1687 goto out; 1688 r = kvm_dev_ioctl_create_vm(); 1689 break; 1690 case KVM_CHECK_EXTENSION: 1691 r = kvm_dev_ioctl_check_extension_generic(arg); 1692 break; 1693 case KVM_GET_VCPU_MMAP_SIZE: 1694 r = -EINVAL; 1695 if (arg) 1696 goto out; 1697 r = PAGE_SIZE; /* struct kvm_run */ 1698 #ifdef CONFIG_X86 1699 r += PAGE_SIZE; /* pio data page */ 1700 #endif 1701 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1702 r += PAGE_SIZE; /* coalesced mmio ring page */ 1703 #endif 1704 break; 1705 case KVM_TRACE_ENABLE: 1706 case KVM_TRACE_PAUSE: 1707 case KVM_TRACE_DISABLE: 1708 r = -EOPNOTSUPP; 1709 break; 1710 default: 1711 return kvm_arch_dev_ioctl(filp, ioctl, arg); 1712 } 1713 out: 1714 return r; 1715 } 1716 1717 static struct file_operations kvm_chardev_ops = { 1718 .unlocked_ioctl = kvm_dev_ioctl, 1719 .compat_ioctl = kvm_dev_ioctl, 1720 }; 1721 1722 static struct miscdevice kvm_dev = { 1723 KVM_MINOR, 1724 "kvm", 1725 &kvm_chardev_ops, 1726 }; 1727 1728 static void hardware_enable(void *junk) 1729 { 1730 int cpu = raw_smp_processor_id(); 1731 int r; 1732 1733 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 1734 return; 1735 1736 cpumask_set_cpu(cpu, cpus_hardware_enabled); 1737 1738 r = kvm_arch_hardware_enable(NULL); 1739 1740 if (r) { 1741 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 1742 atomic_inc(&hardware_enable_failed); 1743 printk(KERN_INFO "kvm: enabling virtualization on " 1744 "CPU%d failed\n", cpu); 1745 } 1746 } 1747 1748 static void hardware_disable(void *junk) 1749 { 1750 int cpu = raw_smp_processor_id(); 1751 1752 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 1753 return; 1754 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 1755 kvm_arch_hardware_disable(NULL); 1756 } 1757 1758 static void hardware_disable_all_nolock(void) 1759 { 1760 BUG_ON(!kvm_usage_count); 1761 1762 kvm_usage_count--; 1763 if (!kvm_usage_count) 1764 on_each_cpu(hardware_disable, NULL, 1); 1765 } 1766 1767 static void hardware_disable_all(void) 1768 { 1769 spin_lock(&kvm_lock); 1770 hardware_disable_all_nolock(); 1771 spin_unlock(&kvm_lock); 1772 } 1773 1774 static int hardware_enable_all(void) 1775 { 1776 int r = 0; 1777 1778 spin_lock(&kvm_lock); 1779 1780 kvm_usage_count++; 1781 if (kvm_usage_count == 1) { 1782 atomic_set(&hardware_enable_failed, 0); 1783 on_each_cpu(hardware_enable, NULL, 1); 1784 1785 if (atomic_read(&hardware_enable_failed)) { 1786 hardware_disable_all_nolock(); 1787 r = -EBUSY; 1788 } 1789 } 1790 1791 spin_unlock(&kvm_lock); 1792 1793 return r; 1794 } 1795 1796 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, 1797 void *v) 1798 { 1799 int cpu = (long)v; 1800 1801 if (!kvm_usage_count) 1802 return NOTIFY_OK; 1803 1804 val &= ~CPU_TASKS_FROZEN; 1805 switch (val) { 1806 case CPU_DYING: 1807 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", 1808 cpu); 1809 hardware_disable(NULL); 1810 break; 1811 case CPU_UP_CANCELED: 1812 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", 1813 cpu); 1814 smp_call_function_single(cpu, hardware_disable, NULL, 1); 1815 break; 1816 case CPU_ONLINE: 1817 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", 1818 cpu); 1819 smp_call_function_single(cpu, hardware_enable, NULL, 1); 1820 break; 1821 } 1822 return NOTIFY_OK; 1823 } 1824 1825 1826 asmlinkage void kvm_handle_fault_on_reboot(void) 1827 { 1828 if (kvm_rebooting) 1829 /* spin while reset goes on */ 1830 while (true) 1831 ; 1832 /* Fault while not rebooting. We want the trace. */ 1833 BUG(); 1834 } 1835 EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot); 1836 1837 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 1838 void *v) 1839 { 1840 /* 1841 * Some (well, at least mine) BIOSes hang on reboot if 1842 * in vmx root mode. 1843 * 1844 * And Intel TXT required VMX off for all cpu when system shutdown. 1845 */ 1846 printk(KERN_INFO "kvm: exiting hardware virtualization\n"); 1847 kvm_rebooting = true; 1848 on_each_cpu(hardware_disable, NULL, 1); 1849 return NOTIFY_OK; 1850 } 1851 1852 static struct notifier_block kvm_reboot_notifier = { 1853 .notifier_call = kvm_reboot, 1854 .priority = 0, 1855 }; 1856 1857 void kvm_io_bus_init(struct kvm_io_bus *bus) 1858 { 1859 memset(bus, 0, sizeof(*bus)); 1860 } 1861 1862 void kvm_io_bus_destroy(struct kvm_io_bus *bus) 1863 { 1864 int i; 1865 1866 for (i = 0; i < bus->dev_count; i++) { 1867 struct kvm_io_device *pos = bus->devs[i]; 1868 1869 kvm_iodevice_destructor(pos); 1870 } 1871 } 1872 1873 /* kvm_io_bus_write - called under kvm->slots_lock */ 1874 int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr, 1875 int len, const void *val) 1876 { 1877 int i; 1878 for (i = 0; i < bus->dev_count; i++) 1879 if (!kvm_iodevice_write(bus->devs[i], addr, len, val)) 1880 return 0; 1881 return -EOPNOTSUPP; 1882 } 1883 1884 /* kvm_io_bus_read - called under kvm->slots_lock */ 1885 int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, void *val) 1886 { 1887 int i; 1888 for (i = 0; i < bus->dev_count; i++) 1889 if (!kvm_iodevice_read(bus->devs[i], addr, len, val)) 1890 return 0; 1891 return -EOPNOTSUPP; 1892 } 1893 1894 int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus, 1895 struct kvm_io_device *dev) 1896 { 1897 int ret; 1898 1899 down_write(&kvm->slots_lock); 1900 ret = __kvm_io_bus_register_dev(bus, dev); 1901 up_write(&kvm->slots_lock); 1902 1903 return ret; 1904 } 1905 1906 /* An unlocked version. Caller must have write lock on slots_lock. */ 1907 int __kvm_io_bus_register_dev(struct kvm_io_bus *bus, 1908 struct kvm_io_device *dev) 1909 { 1910 if (bus->dev_count > NR_IOBUS_DEVS-1) 1911 return -ENOSPC; 1912 1913 bus->devs[bus->dev_count++] = dev; 1914 1915 return 0; 1916 } 1917 1918 void kvm_io_bus_unregister_dev(struct kvm *kvm, 1919 struct kvm_io_bus *bus, 1920 struct kvm_io_device *dev) 1921 { 1922 down_write(&kvm->slots_lock); 1923 __kvm_io_bus_unregister_dev(bus, dev); 1924 up_write(&kvm->slots_lock); 1925 } 1926 1927 /* An unlocked version. Caller must have write lock on slots_lock. */ 1928 void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus, 1929 struct kvm_io_device *dev) 1930 { 1931 int i; 1932 1933 for (i = 0; i < bus->dev_count; i++) 1934 if (bus->devs[i] == dev) { 1935 bus->devs[i] = bus->devs[--bus->dev_count]; 1936 break; 1937 } 1938 } 1939 1940 static struct notifier_block kvm_cpu_notifier = { 1941 .notifier_call = kvm_cpu_hotplug, 1942 .priority = 20, /* must be > scheduler priority */ 1943 }; 1944 1945 static int vm_stat_get(void *_offset, u64 *val) 1946 { 1947 unsigned offset = (long)_offset; 1948 struct kvm *kvm; 1949 1950 *val = 0; 1951 spin_lock(&kvm_lock); 1952 list_for_each_entry(kvm, &vm_list, vm_list) 1953 *val += *(u32 *)((void *)kvm + offset); 1954 spin_unlock(&kvm_lock); 1955 return 0; 1956 } 1957 1958 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n"); 1959 1960 static int vcpu_stat_get(void *_offset, u64 *val) 1961 { 1962 unsigned offset = (long)_offset; 1963 struct kvm *kvm; 1964 struct kvm_vcpu *vcpu; 1965 int i; 1966 1967 *val = 0; 1968 spin_lock(&kvm_lock); 1969 list_for_each_entry(kvm, &vm_list, vm_list) 1970 kvm_for_each_vcpu(i, vcpu, kvm) 1971 *val += *(u32 *)((void *)vcpu + offset); 1972 1973 spin_unlock(&kvm_lock); 1974 return 0; 1975 } 1976 1977 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); 1978 1979 static const struct file_operations *stat_fops[] = { 1980 [KVM_STAT_VCPU] = &vcpu_stat_fops, 1981 [KVM_STAT_VM] = &vm_stat_fops, 1982 }; 1983 1984 static void kvm_init_debug(void) 1985 { 1986 struct kvm_stats_debugfs_item *p; 1987 1988 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 1989 for (p = debugfs_entries; p->name; ++p) 1990 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir, 1991 (void *)(long)p->offset, 1992 stat_fops[p->kind]); 1993 } 1994 1995 static void kvm_exit_debug(void) 1996 { 1997 struct kvm_stats_debugfs_item *p; 1998 1999 for (p = debugfs_entries; p->name; ++p) 2000 debugfs_remove(p->dentry); 2001 debugfs_remove(kvm_debugfs_dir); 2002 } 2003 2004 static int kvm_suspend(struct sys_device *dev, pm_message_t state) 2005 { 2006 if (kvm_usage_count) 2007 hardware_disable(NULL); 2008 return 0; 2009 } 2010 2011 static int kvm_resume(struct sys_device *dev) 2012 { 2013 if (kvm_usage_count) 2014 hardware_enable(NULL); 2015 return 0; 2016 } 2017 2018 static struct sysdev_class kvm_sysdev_class = { 2019 .name = "kvm", 2020 .suspend = kvm_suspend, 2021 .resume = kvm_resume, 2022 }; 2023 2024 static struct sys_device kvm_sysdev = { 2025 .id = 0, 2026 .cls = &kvm_sysdev_class, 2027 }; 2028 2029 struct page *bad_page; 2030 pfn_t bad_pfn; 2031 2032 static inline 2033 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 2034 { 2035 return container_of(pn, struct kvm_vcpu, preempt_notifier); 2036 } 2037 2038 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 2039 { 2040 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 2041 2042 kvm_arch_vcpu_load(vcpu, cpu); 2043 } 2044 2045 static void kvm_sched_out(struct preempt_notifier *pn, 2046 struct task_struct *next) 2047 { 2048 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 2049 2050 kvm_arch_vcpu_put(vcpu); 2051 } 2052 2053 int kvm_init(void *opaque, unsigned int vcpu_size, 2054 struct module *module) 2055 { 2056 int r; 2057 int cpu; 2058 2059 r = kvm_arch_init(opaque); 2060 if (r) 2061 goto out_fail; 2062 2063 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 2064 2065 if (bad_page == NULL) { 2066 r = -ENOMEM; 2067 goto out; 2068 } 2069 2070 bad_pfn = page_to_pfn(bad_page); 2071 2072 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 2073 r = -ENOMEM; 2074 goto out_free_0; 2075 } 2076 2077 r = kvm_arch_hardware_setup(); 2078 if (r < 0) 2079 goto out_free_0a; 2080 2081 for_each_online_cpu(cpu) { 2082 smp_call_function_single(cpu, 2083 kvm_arch_check_processor_compat, 2084 &r, 1); 2085 if (r < 0) 2086 goto out_free_1; 2087 } 2088 2089 r = register_cpu_notifier(&kvm_cpu_notifier); 2090 if (r) 2091 goto out_free_2; 2092 register_reboot_notifier(&kvm_reboot_notifier); 2093 2094 r = sysdev_class_register(&kvm_sysdev_class); 2095 if (r) 2096 goto out_free_3; 2097 2098 r = sysdev_register(&kvm_sysdev); 2099 if (r) 2100 goto out_free_4; 2101 2102 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 2103 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, 2104 __alignof__(struct kvm_vcpu), 2105 0, NULL); 2106 if (!kvm_vcpu_cache) { 2107 r = -ENOMEM; 2108 goto out_free_5; 2109 } 2110 2111 kvm_chardev_ops.owner = module; 2112 kvm_vm_fops.owner = module; 2113 kvm_vcpu_fops.owner = module; 2114 2115 r = misc_register(&kvm_dev); 2116 if (r) { 2117 printk(KERN_ERR "kvm: misc device register failed\n"); 2118 goto out_free; 2119 } 2120 2121 kvm_preempt_ops.sched_in = kvm_sched_in; 2122 kvm_preempt_ops.sched_out = kvm_sched_out; 2123 2124 kvm_init_debug(); 2125 2126 return 0; 2127 2128 out_free: 2129 kmem_cache_destroy(kvm_vcpu_cache); 2130 out_free_5: 2131 sysdev_unregister(&kvm_sysdev); 2132 out_free_4: 2133 sysdev_class_unregister(&kvm_sysdev_class); 2134 out_free_3: 2135 unregister_reboot_notifier(&kvm_reboot_notifier); 2136 unregister_cpu_notifier(&kvm_cpu_notifier); 2137 out_free_2: 2138 out_free_1: 2139 kvm_arch_hardware_unsetup(); 2140 out_free_0a: 2141 free_cpumask_var(cpus_hardware_enabled); 2142 out_free_0: 2143 __free_page(bad_page); 2144 out: 2145 kvm_arch_exit(); 2146 out_fail: 2147 return r; 2148 } 2149 EXPORT_SYMBOL_GPL(kvm_init); 2150 2151 void kvm_exit(void) 2152 { 2153 tracepoint_synchronize_unregister(); 2154 kvm_exit_debug(); 2155 misc_deregister(&kvm_dev); 2156 kmem_cache_destroy(kvm_vcpu_cache); 2157 sysdev_unregister(&kvm_sysdev); 2158 sysdev_class_unregister(&kvm_sysdev_class); 2159 unregister_reboot_notifier(&kvm_reboot_notifier); 2160 unregister_cpu_notifier(&kvm_cpu_notifier); 2161 on_each_cpu(hardware_disable, NULL, 1); 2162 kvm_arch_hardware_unsetup(); 2163 kvm_arch_exit(); 2164 free_cpumask_var(cpus_hardware_enabled); 2165 __free_page(bad_page); 2166 } 2167 EXPORT_SYMBOL_GPL(kvm_exit); 2168