1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This module enables machines with Intel VT-x extensions to run virtual 5 * machines without emulation or binary translation. 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Avi Kivity <avi@qumranet.com> 12 * Yaniv Kamay <yaniv@qumranet.com> 13 * 14 * This work is licensed under the terms of the GNU GPL, version 2. See 15 * the COPYING file in the top-level directory. 16 * 17 */ 18 19 #include <kvm/iodev.h> 20 21 #include <linux/kvm_host.h> 22 #include <linux/kvm.h> 23 #include <linux/module.h> 24 #include <linux/errno.h> 25 #include <linux/percpu.h> 26 #include <linux/mm.h> 27 #include <linux/miscdevice.h> 28 #include <linux/vmalloc.h> 29 #include <linux/reboot.h> 30 #include <linux/debugfs.h> 31 #include <linux/highmem.h> 32 #include <linux/file.h> 33 #include <linux/syscore_ops.h> 34 #include <linux/cpu.h> 35 #include <linux/sched.h> 36 #include <linux/cpumask.h> 37 #include <linux/smp.h> 38 #include <linux/anon_inodes.h> 39 #include <linux/profile.h> 40 #include <linux/kvm_para.h> 41 #include <linux/pagemap.h> 42 #include <linux/mman.h> 43 #include <linux/swap.h> 44 #include <linux/bitops.h> 45 #include <linux/spinlock.h> 46 #include <linux/compat.h> 47 #include <linux/srcu.h> 48 #include <linux/hugetlb.h> 49 #include <linux/slab.h> 50 #include <linux/sort.h> 51 #include <linux/bsearch.h> 52 53 #include <asm/processor.h> 54 #include <asm/io.h> 55 #include <asm/ioctl.h> 56 #include <asm/uaccess.h> 57 #include <asm/pgtable.h> 58 59 #include "coalesced_mmio.h" 60 #include "async_pf.h" 61 #include "vfio.h" 62 63 #define CREATE_TRACE_POINTS 64 #include <trace/events/kvm.h> 65 66 MODULE_AUTHOR("Qumranet"); 67 MODULE_LICENSE("GPL"); 68 69 static unsigned int halt_poll_ns; 70 module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR); 71 72 /* 73 * Ordering of locks: 74 * 75 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 76 */ 77 78 DEFINE_SPINLOCK(kvm_lock); 79 static DEFINE_RAW_SPINLOCK(kvm_count_lock); 80 LIST_HEAD(vm_list); 81 82 static cpumask_var_t cpus_hardware_enabled; 83 static int kvm_usage_count; 84 static atomic_t hardware_enable_failed; 85 86 struct kmem_cache *kvm_vcpu_cache; 87 EXPORT_SYMBOL_GPL(kvm_vcpu_cache); 88 89 static __read_mostly struct preempt_ops kvm_preempt_ops; 90 91 struct dentry *kvm_debugfs_dir; 92 93 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 94 unsigned long arg); 95 #ifdef CONFIG_KVM_COMPAT 96 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, 97 unsigned long arg); 98 #endif 99 static int hardware_enable_all(void); 100 static void hardware_disable_all(void); 101 102 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 103 104 static void kvm_release_pfn_dirty(pfn_t pfn); 105 static void mark_page_dirty_in_slot(struct kvm *kvm, 106 struct kvm_memory_slot *memslot, gfn_t gfn); 107 108 __visible bool kvm_rebooting; 109 EXPORT_SYMBOL_GPL(kvm_rebooting); 110 111 static bool largepages_enabled = true; 112 113 bool kvm_is_reserved_pfn(pfn_t pfn) 114 { 115 if (pfn_valid(pfn)) 116 return PageReserved(pfn_to_page(pfn)); 117 118 return true; 119 } 120 121 /* 122 * Switches to specified vcpu, until a matching vcpu_put() 123 */ 124 int vcpu_load(struct kvm_vcpu *vcpu) 125 { 126 int cpu; 127 128 if (mutex_lock_killable(&vcpu->mutex)) 129 return -EINTR; 130 cpu = get_cpu(); 131 preempt_notifier_register(&vcpu->preempt_notifier); 132 kvm_arch_vcpu_load(vcpu, cpu); 133 put_cpu(); 134 return 0; 135 } 136 137 void vcpu_put(struct kvm_vcpu *vcpu) 138 { 139 preempt_disable(); 140 kvm_arch_vcpu_put(vcpu); 141 preempt_notifier_unregister(&vcpu->preempt_notifier); 142 preempt_enable(); 143 mutex_unlock(&vcpu->mutex); 144 } 145 146 static void ack_flush(void *_completed) 147 { 148 } 149 150 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) 151 { 152 int i, cpu, me; 153 cpumask_var_t cpus; 154 bool called = true; 155 struct kvm_vcpu *vcpu; 156 157 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 158 159 me = get_cpu(); 160 kvm_for_each_vcpu(i, vcpu, kvm) { 161 kvm_make_request(req, vcpu); 162 cpu = vcpu->cpu; 163 164 /* Set ->requests bit before we read ->mode */ 165 smp_mb(); 166 167 if (cpus != NULL && cpu != -1 && cpu != me && 168 kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE) 169 cpumask_set_cpu(cpu, cpus); 170 } 171 if (unlikely(cpus == NULL)) 172 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); 173 else if (!cpumask_empty(cpus)) 174 smp_call_function_many(cpus, ack_flush, NULL, 1); 175 else 176 called = false; 177 put_cpu(); 178 free_cpumask_var(cpus); 179 return called; 180 } 181 182 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL 183 void kvm_flush_remote_tlbs(struct kvm *kvm) 184 { 185 long dirty_count = kvm->tlbs_dirty; 186 187 smp_mb(); 188 if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 189 ++kvm->stat.remote_tlb_flush; 190 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); 191 } 192 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); 193 #endif 194 195 void kvm_reload_remote_mmus(struct kvm *kvm) 196 { 197 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); 198 } 199 200 void kvm_make_mclock_inprogress_request(struct kvm *kvm) 201 { 202 kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); 203 } 204 205 void kvm_make_scan_ioapic_request(struct kvm *kvm) 206 { 207 kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC); 208 } 209 210 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 211 { 212 struct page *page; 213 int r; 214 215 mutex_init(&vcpu->mutex); 216 vcpu->cpu = -1; 217 vcpu->kvm = kvm; 218 vcpu->vcpu_id = id; 219 vcpu->pid = NULL; 220 init_waitqueue_head(&vcpu->wq); 221 kvm_async_pf_vcpu_init(vcpu); 222 223 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 224 if (!page) { 225 r = -ENOMEM; 226 goto fail; 227 } 228 vcpu->run = page_address(page); 229 230 kvm_vcpu_set_in_spin_loop(vcpu, false); 231 kvm_vcpu_set_dy_eligible(vcpu, false); 232 vcpu->preempted = false; 233 234 r = kvm_arch_vcpu_init(vcpu); 235 if (r < 0) 236 goto fail_free_run; 237 return 0; 238 239 fail_free_run: 240 free_page((unsigned long)vcpu->run); 241 fail: 242 return r; 243 } 244 EXPORT_SYMBOL_GPL(kvm_vcpu_init); 245 246 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) 247 { 248 put_pid(vcpu->pid); 249 kvm_arch_vcpu_uninit(vcpu); 250 free_page((unsigned long)vcpu->run); 251 } 252 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); 253 254 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 255 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 256 { 257 return container_of(mn, struct kvm, mmu_notifier); 258 } 259 260 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, 261 struct mm_struct *mm, 262 unsigned long address) 263 { 264 struct kvm *kvm = mmu_notifier_to_kvm(mn); 265 int need_tlb_flush, idx; 266 267 /* 268 * When ->invalidate_page runs, the linux pte has been zapped 269 * already but the page is still allocated until 270 * ->invalidate_page returns. So if we increase the sequence 271 * here the kvm page fault will notice if the spte can't be 272 * established because the page is going to be freed. If 273 * instead the kvm page fault establishes the spte before 274 * ->invalidate_page runs, kvm_unmap_hva will release it 275 * before returning. 276 * 277 * The sequence increase only need to be seen at spin_unlock 278 * time, and not at spin_lock time. 279 * 280 * Increasing the sequence after the spin_unlock would be 281 * unsafe because the kvm page fault could then establish the 282 * pte after kvm_unmap_hva returned, without noticing the page 283 * is going to be freed. 284 */ 285 idx = srcu_read_lock(&kvm->srcu); 286 spin_lock(&kvm->mmu_lock); 287 288 kvm->mmu_notifier_seq++; 289 need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty; 290 /* we've to flush the tlb before the pages can be freed */ 291 if (need_tlb_flush) 292 kvm_flush_remote_tlbs(kvm); 293 294 spin_unlock(&kvm->mmu_lock); 295 296 kvm_arch_mmu_notifier_invalidate_page(kvm, address); 297 298 srcu_read_unlock(&kvm->srcu, idx); 299 } 300 301 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 302 struct mm_struct *mm, 303 unsigned long address, 304 pte_t pte) 305 { 306 struct kvm *kvm = mmu_notifier_to_kvm(mn); 307 int idx; 308 309 idx = srcu_read_lock(&kvm->srcu); 310 spin_lock(&kvm->mmu_lock); 311 kvm->mmu_notifier_seq++; 312 kvm_set_spte_hva(kvm, address, pte); 313 spin_unlock(&kvm->mmu_lock); 314 srcu_read_unlock(&kvm->srcu, idx); 315 } 316 317 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 318 struct mm_struct *mm, 319 unsigned long start, 320 unsigned long end) 321 { 322 struct kvm *kvm = mmu_notifier_to_kvm(mn); 323 int need_tlb_flush = 0, idx; 324 325 idx = srcu_read_lock(&kvm->srcu); 326 spin_lock(&kvm->mmu_lock); 327 /* 328 * The count increase must become visible at unlock time as no 329 * spte can be established without taking the mmu_lock and 330 * count is also read inside the mmu_lock critical section. 331 */ 332 kvm->mmu_notifier_count++; 333 need_tlb_flush = kvm_unmap_hva_range(kvm, start, end); 334 need_tlb_flush |= kvm->tlbs_dirty; 335 /* we've to flush the tlb before the pages can be freed */ 336 if (need_tlb_flush) 337 kvm_flush_remote_tlbs(kvm); 338 339 spin_unlock(&kvm->mmu_lock); 340 srcu_read_unlock(&kvm->srcu, idx); 341 } 342 343 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 344 struct mm_struct *mm, 345 unsigned long start, 346 unsigned long end) 347 { 348 struct kvm *kvm = mmu_notifier_to_kvm(mn); 349 350 spin_lock(&kvm->mmu_lock); 351 /* 352 * This sequence increase will notify the kvm page fault that 353 * the page that is going to be mapped in the spte could have 354 * been freed. 355 */ 356 kvm->mmu_notifier_seq++; 357 smp_wmb(); 358 /* 359 * The above sequence increase must be visible before the 360 * below count decrease, which is ensured by the smp_wmb above 361 * in conjunction with the smp_rmb in mmu_notifier_retry(). 362 */ 363 kvm->mmu_notifier_count--; 364 spin_unlock(&kvm->mmu_lock); 365 366 BUG_ON(kvm->mmu_notifier_count < 0); 367 } 368 369 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 370 struct mm_struct *mm, 371 unsigned long start, 372 unsigned long end) 373 { 374 struct kvm *kvm = mmu_notifier_to_kvm(mn); 375 int young, idx; 376 377 idx = srcu_read_lock(&kvm->srcu); 378 spin_lock(&kvm->mmu_lock); 379 380 young = kvm_age_hva(kvm, start, end); 381 if (young) 382 kvm_flush_remote_tlbs(kvm); 383 384 spin_unlock(&kvm->mmu_lock); 385 srcu_read_unlock(&kvm->srcu, idx); 386 387 return young; 388 } 389 390 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, 391 struct mm_struct *mm, 392 unsigned long address) 393 { 394 struct kvm *kvm = mmu_notifier_to_kvm(mn); 395 int young, idx; 396 397 idx = srcu_read_lock(&kvm->srcu); 398 spin_lock(&kvm->mmu_lock); 399 young = kvm_test_age_hva(kvm, address); 400 spin_unlock(&kvm->mmu_lock); 401 srcu_read_unlock(&kvm->srcu, idx); 402 403 return young; 404 } 405 406 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 407 struct mm_struct *mm) 408 { 409 struct kvm *kvm = mmu_notifier_to_kvm(mn); 410 int idx; 411 412 idx = srcu_read_lock(&kvm->srcu); 413 kvm_arch_flush_shadow_all(kvm); 414 srcu_read_unlock(&kvm->srcu, idx); 415 } 416 417 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 418 .invalidate_page = kvm_mmu_notifier_invalidate_page, 419 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 420 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 421 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 422 .test_young = kvm_mmu_notifier_test_young, 423 .change_pte = kvm_mmu_notifier_change_pte, 424 .release = kvm_mmu_notifier_release, 425 }; 426 427 static int kvm_init_mmu_notifier(struct kvm *kvm) 428 { 429 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 430 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 431 } 432 433 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 434 435 static int kvm_init_mmu_notifier(struct kvm *kvm) 436 { 437 return 0; 438 } 439 440 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 441 442 static void kvm_init_memslots_id(struct kvm *kvm) 443 { 444 int i; 445 struct kvm_memslots *slots = kvm->memslots; 446 447 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) 448 slots->id_to_index[i] = slots->memslots[i].id = i; 449 } 450 451 static struct kvm *kvm_create_vm(unsigned long type) 452 { 453 int r, i; 454 struct kvm *kvm = kvm_arch_alloc_vm(); 455 456 if (!kvm) 457 return ERR_PTR(-ENOMEM); 458 459 r = kvm_arch_init_vm(kvm, type); 460 if (r) 461 goto out_err_no_disable; 462 463 r = hardware_enable_all(); 464 if (r) 465 goto out_err_no_disable; 466 467 #ifdef CONFIG_HAVE_KVM_IRQFD 468 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 469 #endif 470 471 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 472 473 r = -ENOMEM; 474 kvm->memslots = kvm_kvzalloc(sizeof(struct kvm_memslots)); 475 if (!kvm->memslots) 476 goto out_err_no_srcu; 477 478 /* 479 * Init kvm generation close to the maximum to easily test the 480 * code of handling generation number wrap-around. 481 */ 482 kvm->memslots->generation = -150; 483 484 kvm_init_memslots_id(kvm); 485 if (init_srcu_struct(&kvm->srcu)) 486 goto out_err_no_srcu; 487 if (init_srcu_struct(&kvm->irq_srcu)) 488 goto out_err_no_irq_srcu; 489 for (i = 0; i < KVM_NR_BUSES; i++) { 490 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), 491 GFP_KERNEL); 492 if (!kvm->buses[i]) 493 goto out_err; 494 } 495 496 spin_lock_init(&kvm->mmu_lock); 497 kvm->mm = current->mm; 498 atomic_inc(&kvm->mm->mm_count); 499 kvm_eventfd_init(kvm); 500 mutex_init(&kvm->lock); 501 mutex_init(&kvm->irq_lock); 502 mutex_init(&kvm->slots_lock); 503 atomic_set(&kvm->users_count, 1); 504 INIT_LIST_HEAD(&kvm->devices); 505 506 r = kvm_init_mmu_notifier(kvm); 507 if (r) 508 goto out_err; 509 510 spin_lock(&kvm_lock); 511 list_add(&kvm->vm_list, &vm_list); 512 spin_unlock(&kvm_lock); 513 514 return kvm; 515 516 out_err: 517 cleanup_srcu_struct(&kvm->irq_srcu); 518 out_err_no_irq_srcu: 519 cleanup_srcu_struct(&kvm->srcu); 520 out_err_no_srcu: 521 hardware_disable_all(); 522 out_err_no_disable: 523 for (i = 0; i < KVM_NR_BUSES; i++) 524 kfree(kvm->buses[i]); 525 kvfree(kvm->memslots); 526 kvm_arch_free_vm(kvm); 527 return ERR_PTR(r); 528 } 529 530 /* 531 * Avoid using vmalloc for a small buffer. 532 * Should not be used when the size is statically known. 533 */ 534 void *kvm_kvzalloc(unsigned long size) 535 { 536 if (size > PAGE_SIZE) 537 return vzalloc(size); 538 else 539 return kzalloc(size, GFP_KERNEL); 540 } 541 542 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) 543 { 544 if (!memslot->dirty_bitmap) 545 return; 546 547 kvfree(memslot->dirty_bitmap); 548 memslot->dirty_bitmap = NULL; 549 } 550 551 /* 552 * Free any memory in @free but not in @dont. 553 */ 554 static void kvm_free_physmem_slot(struct kvm *kvm, struct kvm_memory_slot *free, 555 struct kvm_memory_slot *dont) 556 { 557 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 558 kvm_destroy_dirty_bitmap(free); 559 560 kvm_arch_free_memslot(kvm, free, dont); 561 562 free->npages = 0; 563 } 564 565 static void kvm_free_physmem(struct kvm *kvm) 566 { 567 struct kvm_memslots *slots = kvm->memslots; 568 struct kvm_memory_slot *memslot; 569 570 kvm_for_each_memslot(memslot, slots) 571 kvm_free_physmem_slot(kvm, memslot, NULL); 572 573 kvfree(kvm->memslots); 574 } 575 576 static void kvm_destroy_devices(struct kvm *kvm) 577 { 578 struct list_head *node, *tmp; 579 580 list_for_each_safe(node, tmp, &kvm->devices) { 581 struct kvm_device *dev = 582 list_entry(node, struct kvm_device, vm_node); 583 584 list_del(node); 585 dev->ops->destroy(dev); 586 } 587 } 588 589 static void kvm_destroy_vm(struct kvm *kvm) 590 { 591 int i; 592 struct mm_struct *mm = kvm->mm; 593 594 kvm_arch_sync_events(kvm); 595 spin_lock(&kvm_lock); 596 list_del(&kvm->vm_list); 597 spin_unlock(&kvm_lock); 598 kvm_free_irq_routing(kvm); 599 for (i = 0; i < KVM_NR_BUSES; i++) 600 kvm_io_bus_destroy(kvm->buses[i]); 601 kvm_coalesced_mmio_free(kvm); 602 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 603 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 604 #else 605 kvm_arch_flush_shadow_all(kvm); 606 #endif 607 kvm_arch_destroy_vm(kvm); 608 kvm_destroy_devices(kvm); 609 kvm_free_physmem(kvm); 610 cleanup_srcu_struct(&kvm->irq_srcu); 611 cleanup_srcu_struct(&kvm->srcu); 612 kvm_arch_free_vm(kvm); 613 hardware_disable_all(); 614 mmdrop(mm); 615 } 616 617 void kvm_get_kvm(struct kvm *kvm) 618 { 619 atomic_inc(&kvm->users_count); 620 } 621 EXPORT_SYMBOL_GPL(kvm_get_kvm); 622 623 void kvm_put_kvm(struct kvm *kvm) 624 { 625 if (atomic_dec_and_test(&kvm->users_count)) 626 kvm_destroy_vm(kvm); 627 } 628 EXPORT_SYMBOL_GPL(kvm_put_kvm); 629 630 631 static int kvm_vm_release(struct inode *inode, struct file *filp) 632 { 633 struct kvm *kvm = filp->private_data; 634 635 kvm_irqfd_release(kvm); 636 637 kvm_put_kvm(kvm); 638 return 0; 639 } 640 641 /* 642 * Allocation size is twice as large as the actual dirty bitmap size. 643 * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed. 644 */ 645 static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) 646 { 647 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); 648 649 memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes); 650 if (!memslot->dirty_bitmap) 651 return -ENOMEM; 652 653 return 0; 654 } 655 656 /* 657 * Insert memslot and re-sort memslots based on their GFN, 658 * so binary search could be used to lookup GFN. 659 * Sorting algorithm takes advantage of having initially 660 * sorted array and known changed memslot position. 661 */ 662 static void update_memslots(struct kvm_memslots *slots, 663 struct kvm_memory_slot *new) 664 { 665 int id = new->id; 666 int i = slots->id_to_index[id]; 667 struct kvm_memory_slot *mslots = slots->memslots; 668 669 WARN_ON(mslots[i].id != id); 670 if (!new->npages) { 671 WARN_ON(!mslots[i].npages); 672 new->base_gfn = 0; 673 new->flags = 0; 674 if (mslots[i].npages) 675 slots->used_slots--; 676 } else { 677 if (!mslots[i].npages) 678 slots->used_slots++; 679 } 680 681 while (i < KVM_MEM_SLOTS_NUM - 1 && 682 new->base_gfn <= mslots[i + 1].base_gfn) { 683 if (!mslots[i + 1].npages) 684 break; 685 mslots[i] = mslots[i + 1]; 686 slots->id_to_index[mslots[i].id] = i; 687 i++; 688 } 689 690 /* 691 * The ">=" is needed when creating a slot with base_gfn == 0, 692 * so that it moves before all those with base_gfn == npages == 0. 693 * 694 * On the other hand, if new->npages is zero, the above loop has 695 * already left i pointing to the beginning of the empty part of 696 * mslots, and the ">=" would move the hole backwards in this 697 * case---which is wrong. So skip the loop when deleting a slot. 698 */ 699 if (new->npages) { 700 while (i > 0 && 701 new->base_gfn >= mslots[i - 1].base_gfn) { 702 mslots[i] = mslots[i - 1]; 703 slots->id_to_index[mslots[i].id] = i; 704 i--; 705 } 706 } else 707 WARN_ON_ONCE(i != slots->used_slots); 708 709 mslots[i] = *new; 710 slots->id_to_index[mslots[i].id] = i; 711 } 712 713 static int check_memory_region_flags(struct kvm_userspace_memory_region *mem) 714 { 715 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; 716 717 #ifdef __KVM_HAVE_READONLY_MEM 718 valid_flags |= KVM_MEM_READONLY; 719 #endif 720 721 if (mem->flags & ~valid_flags) 722 return -EINVAL; 723 724 return 0; 725 } 726 727 static struct kvm_memslots *install_new_memslots(struct kvm *kvm, 728 struct kvm_memslots *slots) 729 { 730 struct kvm_memslots *old_memslots = kvm->memslots; 731 732 /* 733 * Set the low bit in the generation, which disables SPTE caching 734 * until the end of synchronize_srcu_expedited. 735 */ 736 WARN_ON(old_memslots->generation & 1); 737 slots->generation = old_memslots->generation + 1; 738 739 rcu_assign_pointer(kvm->memslots, slots); 740 synchronize_srcu_expedited(&kvm->srcu); 741 742 /* 743 * Increment the new memslot generation a second time. This prevents 744 * vm exits that race with memslot updates from caching a memslot 745 * generation that will (potentially) be valid forever. 746 */ 747 slots->generation++; 748 749 kvm_arch_memslots_updated(kvm); 750 751 return old_memslots; 752 } 753 754 /* 755 * Allocate some memory and give it an address in the guest physical address 756 * space. 757 * 758 * Discontiguous memory is allowed, mostly for framebuffers. 759 * 760 * Must be called holding kvm->slots_lock for write. 761 */ 762 int __kvm_set_memory_region(struct kvm *kvm, 763 struct kvm_userspace_memory_region *mem) 764 { 765 int r; 766 gfn_t base_gfn; 767 unsigned long npages; 768 struct kvm_memory_slot *slot; 769 struct kvm_memory_slot old, new; 770 struct kvm_memslots *slots = NULL, *old_memslots; 771 enum kvm_mr_change change; 772 773 r = check_memory_region_flags(mem); 774 if (r) 775 goto out; 776 777 r = -EINVAL; 778 /* General sanity checks */ 779 if (mem->memory_size & (PAGE_SIZE - 1)) 780 goto out; 781 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 782 goto out; 783 /* We can read the guest memory with __xxx_user() later on. */ 784 if ((mem->slot < KVM_USER_MEM_SLOTS) && 785 ((mem->userspace_addr & (PAGE_SIZE - 1)) || 786 !access_ok(VERIFY_WRITE, 787 (void __user *)(unsigned long)mem->userspace_addr, 788 mem->memory_size))) 789 goto out; 790 if (mem->slot >= KVM_MEM_SLOTS_NUM) 791 goto out; 792 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 793 goto out; 794 795 slot = id_to_memslot(kvm->memslots, mem->slot); 796 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 797 npages = mem->memory_size >> PAGE_SHIFT; 798 799 if (npages > KVM_MEM_MAX_NR_PAGES) 800 goto out; 801 802 if (!npages) 803 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; 804 805 new = old = *slot; 806 807 new.id = mem->slot; 808 new.base_gfn = base_gfn; 809 new.npages = npages; 810 new.flags = mem->flags; 811 812 if (npages) { 813 if (!old.npages) 814 change = KVM_MR_CREATE; 815 else { /* Modify an existing slot. */ 816 if ((mem->userspace_addr != old.userspace_addr) || 817 (npages != old.npages) || 818 ((new.flags ^ old.flags) & KVM_MEM_READONLY)) 819 goto out; 820 821 if (base_gfn != old.base_gfn) 822 change = KVM_MR_MOVE; 823 else if (new.flags != old.flags) 824 change = KVM_MR_FLAGS_ONLY; 825 else { /* Nothing to change. */ 826 r = 0; 827 goto out; 828 } 829 } 830 } else if (old.npages) { 831 change = KVM_MR_DELETE; 832 } else /* Modify a non-existent slot: disallowed. */ 833 goto out; 834 835 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 836 /* Check for overlaps */ 837 r = -EEXIST; 838 kvm_for_each_memslot(slot, kvm->memslots) { 839 if ((slot->id >= KVM_USER_MEM_SLOTS) || 840 (slot->id == mem->slot)) 841 continue; 842 if (!((base_gfn + npages <= slot->base_gfn) || 843 (base_gfn >= slot->base_gfn + slot->npages))) 844 goto out; 845 } 846 } 847 848 /* Free page dirty bitmap if unneeded */ 849 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 850 new.dirty_bitmap = NULL; 851 852 r = -ENOMEM; 853 if (change == KVM_MR_CREATE) { 854 new.userspace_addr = mem->userspace_addr; 855 856 if (kvm_arch_create_memslot(kvm, &new, npages)) 857 goto out_free; 858 } 859 860 /* Allocate page dirty bitmap if needed */ 861 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 862 if (kvm_create_dirty_bitmap(&new) < 0) 863 goto out_free; 864 } 865 866 slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); 867 if (!slots) 868 goto out_free; 869 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); 870 871 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { 872 slot = id_to_memslot(slots, mem->slot); 873 slot->flags |= KVM_MEMSLOT_INVALID; 874 875 old_memslots = install_new_memslots(kvm, slots); 876 877 /* slot was deleted or moved, clear iommu mapping */ 878 kvm_iommu_unmap_pages(kvm, &old); 879 /* From this point no new shadow pages pointing to a deleted, 880 * or moved, memslot will be created. 881 * 882 * validation of sp->gfn happens in: 883 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 884 * - kvm_is_visible_gfn (mmu_check_roots) 885 */ 886 kvm_arch_flush_shadow_memslot(kvm, slot); 887 888 /* 889 * We can re-use the old_memslots from above, the only difference 890 * from the currently installed memslots is the invalid flag. This 891 * will get overwritten by update_memslots anyway. 892 */ 893 slots = old_memslots; 894 } 895 896 r = kvm_arch_prepare_memory_region(kvm, &new, mem, change); 897 if (r) 898 goto out_slots; 899 900 /* actual memory is freed via old in kvm_free_physmem_slot below */ 901 if (change == KVM_MR_DELETE) { 902 new.dirty_bitmap = NULL; 903 memset(&new.arch, 0, sizeof(new.arch)); 904 } 905 906 update_memslots(slots, &new); 907 old_memslots = install_new_memslots(kvm, slots); 908 909 kvm_arch_commit_memory_region(kvm, mem, &old, change); 910 911 kvm_free_physmem_slot(kvm, &old, &new); 912 kvfree(old_memslots); 913 914 /* 915 * IOMMU mapping: New slots need to be mapped. Old slots need to be 916 * un-mapped and re-mapped if their base changes. Since base change 917 * unmapping is handled above with slot deletion, mapping alone is 918 * needed here. Anything else the iommu might care about for existing 919 * slots (size changes, userspace addr changes and read-only flag 920 * changes) is disallowed above, so any other attribute changes getting 921 * here can be skipped. 922 */ 923 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 924 r = kvm_iommu_map_pages(kvm, &new); 925 return r; 926 } 927 928 return 0; 929 930 out_slots: 931 kvfree(slots); 932 out_free: 933 kvm_free_physmem_slot(kvm, &new, &old); 934 out: 935 return r; 936 } 937 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 938 939 int kvm_set_memory_region(struct kvm *kvm, 940 struct kvm_userspace_memory_region *mem) 941 { 942 int r; 943 944 mutex_lock(&kvm->slots_lock); 945 r = __kvm_set_memory_region(kvm, mem); 946 mutex_unlock(&kvm->slots_lock); 947 return r; 948 } 949 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 950 951 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 952 struct kvm_userspace_memory_region *mem) 953 { 954 if (mem->slot >= KVM_USER_MEM_SLOTS) 955 return -EINVAL; 956 return kvm_set_memory_region(kvm, mem); 957 } 958 959 int kvm_get_dirty_log(struct kvm *kvm, 960 struct kvm_dirty_log *log, int *is_dirty) 961 { 962 struct kvm_memory_slot *memslot; 963 int r, i; 964 unsigned long n; 965 unsigned long any = 0; 966 967 r = -EINVAL; 968 if (log->slot >= KVM_USER_MEM_SLOTS) 969 goto out; 970 971 memslot = id_to_memslot(kvm->memslots, log->slot); 972 r = -ENOENT; 973 if (!memslot->dirty_bitmap) 974 goto out; 975 976 n = kvm_dirty_bitmap_bytes(memslot); 977 978 for (i = 0; !any && i < n/sizeof(long); ++i) 979 any = memslot->dirty_bitmap[i]; 980 981 r = -EFAULT; 982 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) 983 goto out; 984 985 if (any) 986 *is_dirty = 1; 987 988 r = 0; 989 out: 990 return r; 991 } 992 EXPORT_SYMBOL_GPL(kvm_get_dirty_log); 993 994 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 995 /** 996 * kvm_get_dirty_log_protect - get a snapshot of dirty pages, and if any pages 997 * are dirty write protect them for next write. 998 * @kvm: pointer to kvm instance 999 * @log: slot id and address to which we copy the log 1000 * @is_dirty: flag set if any page is dirty 1001 * 1002 * We need to keep it in mind that VCPU threads can write to the bitmap 1003 * concurrently. So, to avoid losing track of dirty pages we keep the 1004 * following order: 1005 * 1006 * 1. Take a snapshot of the bit and clear it if needed. 1007 * 2. Write protect the corresponding page. 1008 * 3. Copy the snapshot to the userspace. 1009 * 4. Upon return caller flushes TLB's if needed. 1010 * 1011 * Between 2 and 4, the guest may write to the page using the remaining TLB 1012 * entry. This is not a problem because the page is reported dirty using 1013 * the snapshot taken before and step 4 ensures that writes done after 1014 * exiting to userspace will be logged for the next call. 1015 * 1016 */ 1017 int kvm_get_dirty_log_protect(struct kvm *kvm, 1018 struct kvm_dirty_log *log, bool *is_dirty) 1019 { 1020 struct kvm_memory_slot *memslot; 1021 int r, i; 1022 unsigned long n; 1023 unsigned long *dirty_bitmap; 1024 unsigned long *dirty_bitmap_buffer; 1025 1026 r = -EINVAL; 1027 if (log->slot >= KVM_USER_MEM_SLOTS) 1028 goto out; 1029 1030 memslot = id_to_memslot(kvm->memslots, log->slot); 1031 1032 dirty_bitmap = memslot->dirty_bitmap; 1033 r = -ENOENT; 1034 if (!dirty_bitmap) 1035 goto out; 1036 1037 n = kvm_dirty_bitmap_bytes(memslot); 1038 1039 dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); 1040 memset(dirty_bitmap_buffer, 0, n); 1041 1042 spin_lock(&kvm->mmu_lock); 1043 *is_dirty = false; 1044 for (i = 0; i < n / sizeof(long); i++) { 1045 unsigned long mask; 1046 gfn_t offset; 1047 1048 if (!dirty_bitmap[i]) 1049 continue; 1050 1051 *is_dirty = true; 1052 1053 mask = xchg(&dirty_bitmap[i], 0); 1054 dirty_bitmap_buffer[i] = mask; 1055 1056 if (mask) { 1057 offset = i * BITS_PER_LONG; 1058 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 1059 offset, mask); 1060 } 1061 } 1062 1063 spin_unlock(&kvm->mmu_lock); 1064 1065 r = -EFAULT; 1066 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) 1067 goto out; 1068 1069 r = 0; 1070 out: 1071 return r; 1072 } 1073 EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect); 1074 #endif 1075 1076 bool kvm_largepages_enabled(void) 1077 { 1078 return largepages_enabled; 1079 } 1080 1081 void kvm_disable_largepages(void) 1082 { 1083 largepages_enabled = false; 1084 } 1085 EXPORT_SYMBOL_GPL(kvm_disable_largepages); 1086 1087 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 1088 { 1089 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 1090 } 1091 EXPORT_SYMBOL_GPL(gfn_to_memslot); 1092 1093 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 1094 { 1095 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); 1096 1097 if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS || 1098 memslot->flags & KVM_MEMSLOT_INVALID) 1099 return 0; 1100 1101 return 1; 1102 } 1103 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 1104 1105 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) 1106 { 1107 struct vm_area_struct *vma; 1108 unsigned long addr, size; 1109 1110 size = PAGE_SIZE; 1111 1112 addr = gfn_to_hva(kvm, gfn); 1113 if (kvm_is_error_hva(addr)) 1114 return PAGE_SIZE; 1115 1116 down_read(¤t->mm->mmap_sem); 1117 vma = find_vma(current->mm, addr); 1118 if (!vma) 1119 goto out; 1120 1121 size = vma_kernel_pagesize(vma); 1122 1123 out: 1124 up_read(¤t->mm->mmap_sem); 1125 1126 return size; 1127 } 1128 1129 static bool memslot_is_readonly(struct kvm_memory_slot *slot) 1130 { 1131 return slot->flags & KVM_MEM_READONLY; 1132 } 1133 1134 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1135 gfn_t *nr_pages, bool write) 1136 { 1137 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 1138 return KVM_HVA_ERR_BAD; 1139 1140 if (memslot_is_readonly(slot) && write) 1141 return KVM_HVA_ERR_RO_BAD; 1142 1143 if (nr_pages) 1144 *nr_pages = slot->npages - (gfn - slot->base_gfn); 1145 1146 return __gfn_to_hva_memslot(slot, gfn); 1147 } 1148 1149 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1150 gfn_t *nr_pages) 1151 { 1152 return __gfn_to_hva_many(slot, gfn, nr_pages, true); 1153 } 1154 1155 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 1156 gfn_t gfn) 1157 { 1158 return gfn_to_hva_many(slot, gfn, NULL); 1159 } 1160 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); 1161 1162 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 1163 { 1164 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); 1165 } 1166 EXPORT_SYMBOL_GPL(gfn_to_hva); 1167 1168 /* 1169 * If writable is set to false, the hva returned by this function is only 1170 * allowed to be read. 1171 */ 1172 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, 1173 gfn_t gfn, bool *writable) 1174 { 1175 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); 1176 1177 if (!kvm_is_error_hva(hva) && writable) 1178 *writable = !memslot_is_readonly(slot); 1179 1180 return hva; 1181 } 1182 1183 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 1184 { 1185 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1186 1187 return gfn_to_hva_memslot_prot(slot, gfn, writable); 1188 } 1189 1190 static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, 1191 unsigned long start, int write, struct page **page) 1192 { 1193 int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET; 1194 1195 if (write) 1196 flags |= FOLL_WRITE; 1197 1198 return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL); 1199 } 1200 1201 static inline int check_user_page_hwpoison(unsigned long addr) 1202 { 1203 int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE; 1204 1205 rc = __get_user_pages(current, current->mm, addr, 1, 1206 flags, NULL, NULL, NULL); 1207 return rc == -EHWPOISON; 1208 } 1209 1210 /* 1211 * The atomic path to get the writable pfn which will be stored in @pfn, 1212 * true indicates success, otherwise false is returned. 1213 */ 1214 static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, 1215 bool write_fault, bool *writable, pfn_t *pfn) 1216 { 1217 struct page *page[1]; 1218 int npages; 1219 1220 if (!(async || atomic)) 1221 return false; 1222 1223 /* 1224 * Fast pin a writable pfn only if it is a write fault request 1225 * or the caller allows to map a writable pfn for a read fault 1226 * request. 1227 */ 1228 if (!(write_fault || writable)) 1229 return false; 1230 1231 npages = __get_user_pages_fast(addr, 1, 1, page); 1232 if (npages == 1) { 1233 *pfn = page_to_pfn(page[0]); 1234 1235 if (writable) 1236 *writable = true; 1237 return true; 1238 } 1239 1240 return false; 1241 } 1242 1243 /* 1244 * The slow path to get the pfn of the specified host virtual address, 1245 * 1 indicates success, -errno is returned if error is detected. 1246 */ 1247 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, 1248 bool *writable, pfn_t *pfn) 1249 { 1250 struct page *page[1]; 1251 int npages = 0; 1252 1253 might_sleep(); 1254 1255 if (writable) 1256 *writable = write_fault; 1257 1258 if (async) { 1259 down_read(¤t->mm->mmap_sem); 1260 npages = get_user_page_nowait(current, current->mm, 1261 addr, write_fault, page); 1262 up_read(¤t->mm->mmap_sem); 1263 } else 1264 npages = __get_user_pages_unlocked(current, current->mm, addr, 1, 1265 write_fault, 0, page, 1266 FOLL_TOUCH|FOLL_HWPOISON); 1267 if (npages != 1) 1268 return npages; 1269 1270 /* map read fault as writable if possible */ 1271 if (unlikely(!write_fault) && writable) { 1272 struct page *wpage[1]; 1273 1274 npages = __get_user_pages_fast(addr, 1, 1, wpage); 1275 if (npages == 1) { 1276 *writable = true; 1277 put_page(page[0]); 1278 page[0] = wpage[0]; 1279 } 1280 1281 npages = 1; 1282 } 1283 *pfn = page_to_pfn(page[0]); 1284 return npages; 1285 } 1286 1287 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) 1288 { 1289 if (unlikely(!(vma->vm_flags & VM_READ))) 1290 return false; 1291 1292 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) 1293 return false; 1294 1295 return true; 1296 } 1297 1298 /* 1299 * Pin guest page in memory and return its pfn. 1300 * @addr: host virtual address which maps memory to the guest 1301 * @atomic: whether this function can sleep 1302 * @async: whether this function need to wait IO complete if the 1303 * host page is not in the memory 1304 * @write_fault: whether we should get a writable host page 1305 * @writable: whether it allows to map a writable host page for !@write_fault 1306 * 1307 * The function will map a writable host page for these two cases: 1308 * 1): @write_fault = true 1309 * 2): @write_fault = false && @writable, @writable will tell the caller 1310 * whether the mapping is writable. 1311 */ 1312 static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, 1313 bool write_fault, bool *writable) 1314 { 1315 struct vm_area_struct *vma; 1316 pfn_t pfn = 0; 1317 int npages; 1318 1319 /* we can do it either atomically or asynchronously, not both */ 1320 BUG_ON(atomic && async); 1321 1322 if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn)) 1323 return pfn; 1324 1325 if (atomic) 1326 return KVM_PFN_ERR_FAULT; 1327 1328 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); 1329 if (npages == 1) 1330 return pfn; 1331 1332 down_read(¤t->mm->mmap_sem); 1333 if (npages == -EHWPOISON || 1334 (!async && check_user_page_hwpoison(addr))) { 1335 pfn = KVM_PFN_ERR_HWPOISON; 1336 goto exit; 1337 } 1338 1339 vma = find_vma_intersection(current->mm, addr, addr + 1); 1340 1341 if (vma == NULL) 1342 pfn = KVM_PFN_ERR_FAULT; 1343 else if ((vma->vm_flags & VM_PFNMAP)) { 1344 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + 1345 vma->vm_pgoff; 1346 BUG_ON(!kvm_is_reserved_pfn(pfn)); 1347 } else { 1348 if (async && vma_is_valid(vma, write_fault)) 1349 *async = true; 1350 pfn = KVM_PFN_ERR_FAULT; 1351 } 1352 exit: 1353 up_read(¤t->mm->mmap_sem); 1354 return pfn; 1355 } 1356 1357 static pfn_t 1358 __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, 1359 bool *async, bool write_fault, bool *writable) 1360 { 1361 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); 1362 1363 if (addr == KVM_HVA_ERR_RO_BAD) 1364 return KVM_PFN_ERR_RO_FAULT; 1365 1366 if (kvm_is_error_hva(addr)) 1367 return KVM_PFN_NOSLOT; 1368 1369 /* Do not map writable pfn in the readonly memslot. */ 1370 if (writable && memslot_is_readonly(slot)) { 1371 *writable = false; 1372 writable = NULL; 1373 } 1374 1375 return hva_to_pfn(addr, atomic, async, write_fault, 1376 writable); 1377 } 1378 1379 static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async, 1380 bool write_fault, bool *writable) 1381 { 1382 struct kvm_memory_slot *slot; 1383 1384 if (async) 1385 *async = false; 1386 1387 slot = gfn_to_memslot(kvm, gfn); 1388 1389 return __gfn_to_pfn_memslot(slot, gfn, atomic, async, write_fault, 1390 writable); 1391 } 1392 1393 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) 1394 { 1395 return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL); 1396 } 1397 EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic); 1398 1399 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, 1400 bool write_fault, bool *writable) 1401 { 1402 return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable); 1403 } 1404 EXPORT_SYMBOL_GPL(gfn_to_pfn_async); 1405 1406 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 1407 { 1408 return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL); 1409 } 1410 EXPORT_SYMBOL_GPL(gfn_to_pfn); 1411 1412 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 1413 bool *writable) 1414 { 1415 return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable); 1416 } 1417 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 1418 1419 pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 1420 { 1421 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL); 1422 } 1423 1424 pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) 1425 { 1426 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL); 1427 } 1428 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); 1429 1430 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, 1431 int nr_pages) 1432 { 1433 unsigned long addr; 1434 gfn_t entry; 1435 1436 addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry); 1437 if (kvm_is_error_hva(addr)) 1438 return -1; 1439 1440 if (entry < nr_pages) 1441 return 0; 1442 1443 return __get_user_pages_fast(addr, nr_pages, 1, pages); 1444 } 1445 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); 1446 1447 static struct page *kvm_pfn_to_page(pfn_t pfn) 1448 { 1449 if (is_error_noslot_pfn(pfn)) 1450 return KVM_ERR_PTR_BAD_PAGE; 1451 1452 if (kvm_is_reserved_pfn(pfn)) { 1453 WARN_ON(1); 1454 return KVM_ERR_PTR_BAD_PAGE; 1455 } 1456 1457 return pfn_to_page(pfn); 1458 } 1459 1460 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 1461 { 1462 pfn_t pfn; 1463 1464 pfn = gfn_to_pfn(kvm, gfn); 1465 1466 return kvm_pfn_to_page(pfn); 1467 } 1468 EXPORT_SYMBOL_GPL(gfn_to_page); 1469 1470 void kvm_release_page_clean(struct page *page) 1471 { 1472 WARN_ON(is_error_page(page)); 1473 1474 kvm_release_pfn_clean(page_to_pfn(page)); 1475 } 1476 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 1477 1478 void kvm_release_pfn_clean(pfn_t pfn) 1479 { 1480 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) 1481 put_page(pfn_to_page(pfn)); 1482 } 1483 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1484 1485 void kvm_release_page_dirty(struct page *page) 1486 { 1487 WARN_ON(is_error_page(page)); 1488 1489 kvm_release_pfn_dirty(page_to_pfn(page)); 1490 } 1491 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 1492 1493 static void kvm_release_pfn_dirty(pfn_t pfn) 1494 { 1495 kvm_set_pfn_dirty(pfn); 1496 kvm_release_pfn_clean(pfn); 1497 } 1498 1499 void kvm_set_pfn_dirty(pfn_t pfn) 1500 { 1501 if (!kvm_is_reserved_pfn(pfn)) { 1502 struct page *page = pfn_to_page(pfn); 1503 1504 if (!PageReserved(page)) 1505 SetPageDirty(page); 1506 } 1507 } 1508 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 1509 1510 void kvm_set_pfn_accessed(pfn_t pfn) 1511 { 1512 if (!kvm_is_reserved_pfn(pfn)) 1513 mark_page_accessed(pfn_to_page(pfn)); 1514 } 1515 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1516 1517 void kvm_get_pfn(pfn_t pfn) 1518 { 1519 if (!kvm_is_reserved_pfn(pfn)) 1520 get_page(pfn_to_page(pfn)); 1521 } 1522 EXPORT_SYMBOL_GPL(kvm_get_pfn); 1523 1524 static int next_segment(unsigned long len, int offset) 1525 { 1526 if (len > PAGE_SIZE - offset) 1527 return PAGE_SIZE - offset; 1528 else 1529 return len; 1530 } 1531 1532 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 1533 int len) 1534 { 1535 int r; 1536 unsigned long addr; 1537 1538 addr = gfn_to_hva_prot(kvm, gfn, NULL); 1539 if (kvm_is_error_hva(addr)) 1540 return -EFAULT; 1541 r = __copy_from_user(data, (void __user *)addr + offset, len); 1542 if (r) 1543 return -EFAULT; 1544 return 0; 1545 } 1546 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 1547 1548 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 1549 { 1550 gfn_t gfn = gpa >> PAGE_SHIFT; 1551 int seg; 1552 int offset = offset_in_page(gpa); 1553 int ret; 1554 1555 while ((seg = next_segment(len, offset)) != 0) { 1556 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 1557 if (ret < 0) 1558 return ret; 1559 offset = 0; 1560 len -= seg; 1561 data += seg; 1562 ++gfn; 1563 } 1564 return 0; 1565 } 1566 EXPORT_SYMBOL_GPL(kvm_read_guest); 1567 1568 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 1569 unsigned long len) 1570 { 1571 int r; 1572 unsigned long addr; 1573 gfn_t gfn = gpa >> PAGE_SHIFT; 1574 int offset = offset_in_page(gpa); 1575 1576 addr = gfn_to_hva_prot(kvm, gfn, NULL); 1577 if (kvm_is_error_hva(addr)) 1578 return -EFAULT; 1579 pagefault_disable(); 1580 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 1581 pagefault_enable(); 1582 if (r) 1583 return -EFAULT; 1584 return 0; 1585 } 1586 EXPORT_SYMBOL(kvm_read_guest_atomic); 1587 1588 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 1589 int offset, int len) 1590 { 1591 int r; 1592 unsigned long addr; 1593 1594 addr = gfn_to_hva(kvm, gfn); 1595 if (kvm_is_error_hva(addr)) 1596 return -EFAULT; 1597 r = __copy_to_user((void __user *)addr + offset, data, len); 1598 if (r) 1599 return -EFAULT; 1600 mark_page_dirty(kvm, gfn); 1601 return 0; 1602 } 1603 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 1604 1605 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 1606 unsigned long len) 1607 { 1608 gfn_t gfn = gpa >> PAGE_SHIFT; 1609 int seg; 1610 int offset = offset_in_page(gpa); 1611 int ret; 1612 1613 while ((seg = next_segment(len, offset)) != 0) { 1614 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 1615 if (ret < 0) 1616 return ret; 1617 offset = 0; 1618 len -= seg; 1619 data += seg; 1620 ++gfn; 1621 } 1622 return 0; 1623 } 1624 EXPORT_SYMBOL_GPL(kvm_write_guest); 1625 1626 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1627 gpa_t gpa, unsigned long len) 1628 { 1629 struct kvm_memslots *slots = kvm_memslots(kvm); 1630 int offset = offset_in_page(gpa); 1631 gfn_t start_gfn = gpa >> PAGE_SHIFT; 1632 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; 1633 gfn_t nr_pages_needed = end_gfn - start_gfn + 1; 1634 gfn_t nr_pages_avail; 1635 1636 ghc->gpa = gpa; 1637 ghc->generation = slots->generation; 1638 ghc->len = len; 1639 ghc->memslot = gfn_to_memslot(kvm, start_gfn); 1640 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL); 1641 if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) { 1642 ghc->hva += offset; 1643 } else { 1644 /* 1645 * If the requested region crosses two memslots, we still 1646 * verify that the entire region is valid here. 1647 */ 1648 while (start_gfn <= end_gfn) { 1649 ghc->memslot = gfn_to_memslot(kvm, start_gfn); 1650 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, 1651 &nr_pages_avail); 1652 if (kvm_is_error_hva(ghc->hva)) 1653 return -EFAULT; 1654 start_gfn += nr_pages_avail; 1655 } 1656 /* Use the slow path for cross page reads and writes. */ 1657 ghc->memslot = NULL; 1658 } 1659 return 0; 1660 } 1661 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 1662 1663 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1664 void *data, unsigned long len) 1665 { 1666 struct kvm_memslots *slots = kvm_memslots(kvm); 1667 int r; 1668 1669 BUG_ON(len > ghc->len); 1670 1671 if (slots->generation != ghc->generation) 1672 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); 1673 1674 if (unlikely(!ghc->memslot)) 1675 return kvm_write_guest(kvm, ghc->gpa, data, len); 1676 1677 if (kvm_is_error_hva(ghc->hva)) 1678 return -EFAULT; 1679 1680 r = __copy_to_user((void __user *)ghc->hva, data, len); 1681 if (r) 1682 return -EFAULT; 1683 mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT); 1684 1685 return 0; 1686 } 1687 EXPORT_SYMBOL_GPL(kvm_write_guest_cached); 1688 1689 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1690 void *data, unsigned long len) 1691 { 1692 struct kvm_memslots *slots = kvm_memslots(kvm); 1693 int r; 1694 1695 BUG_ON(len > ghc->len); 1696 1697 if (slots->generation != ghc->generation) 1698 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); 1699 1700 if (unlikely(!ghc->memslot)) 1701 return kvm_read_guest(kvm, ghc->gpa, data, len); 1702 1703 if (kvm_is_error_hva(ghc->hva)) 1704 return -EFAULT; 1705 1706 r = __copy_from_user(data, (void __user *)ghc->hva, len); 1707 if (r) 1708 return -EFAULT; 1709 1710 return 0; 1711 } 1712 EXPORT_SYMBOL_GPL(kvm_read_guest_cached); 1713 1714 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) 1715 { 1716 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 1717 1718 return kvm_write_guest_page(kvm, gfn, zero_page, offset, len); 1719 } 1720 EXPORT_SYMBOL_GPL(kvm_clear_guest_page); 1721 1722 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 1723 { 1724 gfn_t gfn = gpa >> PAGE_SHIFT; 1725 int seg; 1726 int offset = offset_in_page(gpa); 1727 int ret; 1728 1729 while ((seg = next_segment(len, offset)) != 0) { 1730 ret = kvm_clear_guest_page(kvm, gfn, offset, seg); 1731 if (ret < 0) 1732 return ret; 1733 offset = 0; 1734 len -= seg; 1735 ++gfn; 1736 } 1737 return 0; 1738 } 1739 EXPORT_SYMBOL_GPL(kvm_clear_guest); 1740 1741 static void mark_page_dirty_in_slot(struct kvm *kvm, 1742 struct kvm_memory_slot *memslot, 1743 gfn_t gfn) 1744 { 1745 if (memslot && memslot->dirty_bitmap) { 1746 unsigned long rel_gfn = gfn - memslot->base_gfn; 1747 1748 set_bit_le(rel_gfn, memslot->dirty_bitmap); 1749 } 1750 } 1751 1752 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 1753 { 1754 struct kvm_memory_slot *memslot; 1755 1756 memslot = gfn_to_memslot(kvm, gfn); 1757 mark_page_dirty_in_slot(kvm, memslot, gfn); 1758 } 1759 EXPORT_SYMBOL_GPL(mark_page_dirty); 1760 1761 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) 1762 { 1763 if (kvm_arch_vcpu_runnable(vcpu)) { 1764 kvm_make_request(KVM_REQ_UNHALT, vcpu); 1765 return -EINTR; 1766 } 1767 if (kvm_cpu_has_pending_timer(vcpu)) 1768 return -EINTR; 1769 if (signal_pending(current)) 1770 return -EINTR; 1771 1772 return 0; 1773 } 1774 1775 /* 1776 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 1777 */ 1778 void kvm_vcpu_block(struct kvm_vcpu *vcpu) 1779 { 1780 ktime_t start, cur; 1781 DEFINE_WAIT(wait); 1782 bool waited = false; 1783 1784 start = cur = ktime_get(); 1785 if (halt_poll_ns) { 1786 ktime_t stop = ktime_add_ns(ktime_get(), halt_poll_ns); 1787 1788 do { 1789 /* 1790 * This sets KVM_REQ_UNHALT if an interrupt 1791 * arrives. 1792 */ 1793 if (kvm_vcpu_check_block(vcpu) < 0) { 1794 ++vcpu->stat.halt_successful_poll; 1795 goto out; 1796 } 1797 cur = ktime_get(); 1798 } while (single_task_running() && ktime_before(cur, stop)); 1799 } 1800 1801 for (;;) { 1802 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1803 1804 if (kvm_vcpu_check_block(vcpu) < 0) 1805 break; 1806 1807 waited = true; 1808 schedule(); 1809 } 1810 1811 finish_wait(&vcpu->wq, &wait); 1812 cur = ktime_get(); 1813 1814 out: 1815 trace_kvm_vcpu_wakeup(ktime_to_ns(cur) - ktime_to_ns(start), waited); 1816 } 1817 EXPORT_SYMBOL_GPL(kvm_vcpu_block); 1818 1819 #ifndef CONFIG_S390 1820 /* 1821 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. 1822 */ 1823 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 1824 { 1825 int me; 1826 int cpu = vcpu->cpu; 1827 wait_queue_head_t *wqp; 1828 1829 wqp = kvm_arch_vcpu_wq(vcpu); 1830 if (waitqueue_active(wqp)) { 1831 wake_up_interruptible(wqp); 1832 ++vcpu->stat.halt_wakeup; 1833 } 1834 1835 me = get_cpu(); 1836 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) 1837 if (kvm_arch_vcpu_should_kick(vcpu)) 1838 smp_send_reschedule(cpu); 1839 put_cpu(); 1840 } 1841 EXPORT_SYMBOL_GPL(kvm_vcpu_kick); 1842 #endif /* !CONFIG_S390 */ 1843 1844 int kvm_vcpu_yield_to(struct kvm_vcpu *target) 1845 { 1846 struct pid *pid; 1847 struct task_struct *task = NULL; 1848 int ret = 0; 1849 1850 rcu_read_lock(); 1851 pid = rcu_dereference(target->pid); 1852 if (pid) 1853 task = get_pid_task(pid, PIDTYPE_PID); 1854 rcu_read_unlock(); 1855 if (!task) 1856 return ret; 1857 ret = yield_to(task, 1); 1858 put_task_struct(task); 1859 1860 return ret; 1861 } 1862 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); 1863 1864 /* 1865 * Helper that checks whether a VCPU is eligible for directed yield. 1866 * Most eligible candidate to yield is decided by following heuristics: 1867 * 1868 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently 1869 * (preempted lock holder), indicated by @in_spin_loop. 1870 * Set at the beiginning and cleared at the end of interception/PLE handler. 1871 * 1872 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get 1873 * chance last time (mostly it has become eligible now since we have probably 1874 * yielded to lockholder in last iteration. This is done by toggling 1875 * @dy_eligible each time a VCPU checked for eligibility.) 1876 * 1877 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding 1878 * to preempted lock-holder could result in wrong VCPU selection and CPU 1879 * burning. Giving priority for a potential lock-holder increases lock 1880 * progress. 1881 * 1882 * Since algorithm is based on heuristics, accessing another VCPU data without 1883 * locking does not harm. It may result in trying to yield to same VCPU, fail 1884 * and continue with next VCPU and so on. 1885 */ 1886 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) 1887 { 1888 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 1889 bool eligible; 1890 1891 eligible = !vcpu->spin_loop.in_spin_loop || 1892 vcpu->spin_loop.dy_eligible; 1893 1894 if (vcpu->spin_loop.in_spin_loop) 1895 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); 1896 1897 return eligible; 1898 #else 1899 return true; 1900 #endif 1901 } 1902 1903 void kvm_vcpu_on_spin(struct kvm_vcpu *me) 1904 { 1905 struct kvm *kvm = me->kvm; 1906 struct kvm_vcpu *vcpu; 1907 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 1908 int yielded = 0; 1909 int try = 3; 1910 int pass; 1911 int i; 1912 1913 kvm_vcpu_set_in_spin_loop(me, true); 1914 /* 1915 * We boost the priority of a VCPU that is runnable but not 1916 * currently running, because it got preempted by something 1917 * else and called schedule in __vcpu_run. Hopefully that 1918 * VCPU is holding the lock that we need and will release it. 1919 * We approximate round-robin by starting at the last boosted VCPU. 1920 */ 1921 for (pass = 0; pass < 2 && !yielded && try; pass++) { 1922 kvm_for_each_vcpu(i, vcpu, kvm) { 1923 if (!pass && i <= last_boosted_vcpu) { 1924 i = last_boosted_vcpu; 1925 continue; 1926 } else if (pass && i > last_boosted_vcpu) 1927 break; 1928 if (!ACCESS_ONCE(vcpu->preempted)) 1929 continue; 1930 if (vcpu == me) 1931 continue; 1932 if (waitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) 1933 continue; 1934 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 1935 continue; 1936 1937 yielded = kvm_vcpu_yield_to(vcpu); 1938 if (yielded > 0) { 1939 kvm->last_boosted_vcpu = i; 1940 break; 1941 } else if (yielded < 0) { 1942 try--; 1943 if (!try) 1944 break; 1945 } 1946 } 1947 } 1948 kvm_vcpu_set_in_spin_loop(me, false); 1949 1950 /* Ensure vcpu is not eligible during next spinloop */ 1951 kvm_vcpu_set_dy_eligible(me, false); 1952 } 1953 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 1954 1955 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1956 { 1957 struct kvm_vcpu *vcpu = vma->vm_file->private_data; 1958 struct page *page; 1959 1960 if (vmf->pgoff == 0) 1961 page = virt_to_page(vcpu->run); 1962 #ifdef CONFIG_X86 1963 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 1964 page = virt_to_page(vcpu->arch.pio_data); 1965 #endif 1966 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1967 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 1968 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 1969 #endif 1970 else 1971 return kvm_arch_vcpu_fault(vcpu, vmf); 1972 get_page(page); 1973 vmf->page = page; 1974 return 0; 1975 } 1976 1977 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 1978 .fault = kvm_vcpu_fault, 1979 }; 1980 1981 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 1982 { 1983 vma->vm_ops = &kvm_vcpu_vm_ops; 1984 return 0; 1985 } 1986 1987 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 1988 { 1989 struct kvm_vcpu *vcpu = filp->private_data; 1990 1991 kvm_put_kvm(vcpu->kvm); 1992 return 0; 1993 } 1994 1995 static struct file_operations kvm_vcpu_fops = { 1996 .release = kvm_vcpu_release, 1997 .unlocked_ioctl = kvm_vcpu_ioctl, 1998 #ifdef CONFIG_KVM_COMPAT 1999 .compat_ioctl = kvm_vcpu_compat_ioctl, 2000 #endif 2001 .mmap = kvm_vcpu_mmap, 2002 .llseek = noop_llseek, 2003 }; 2004 2005 /* 2006 * Allocates an inode for the vcpu. 2007 */ 2008 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 2009 { 2010 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); 2011 } 2012 2013 /* 2014 * Creates some virtual cpus. Good luck creating more than one. 2015 */ 2016 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 2017 { 2018 int r; 2019 struct kvm_vcpu *vcpu, *v; 2020 2021 if (id >= KVM_MAX_VCPUS) 2022 return -EINVAL; 2023 2024 vcpu = kvm_arch_vcpu_create(kvm, id); 2025 if (IS_ERR(vcpu)) 2026 return PTR_ERR(vcpu); 2027 2028 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 2029 2030 r = kvm_arch_vcpu_setup(vcpu); 2031 if (r) 2032 goto vcpu_destroy; 2033 2034 mutex_lock(&kvm->lock); 2035 if (!kvm_vcpu_compatible(vcpu)) { 2036 r = -EINVAL; 2037 goto unlock_vcpu_destroy; 2038 } 2039 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { 2040 r = -EINVAL; 2041 goto unlock_vcpu_destroy; 2042 } 2043 2044 kvm_for_each_vcpu(r, v, kvm) 2045 if (v->vcpu_id == id) { 2046 r = -EEXIST; 2047 goto unlock_vcpu_destroy; 2048 } 2049 2050 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); 2051 2052 /* Now it's all set up, let userspace reach it */ 2053 kvm_get_kvm(kvm); 2054 r = create_vcpu_fd(vcpu); 2055 if (r < 0) { 2056 kvm_put_kvm(kvm); 2057 goto unlock_vcpu_destroy; 2058 } 2059 2060 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; 2061 smp_wmb(); 2062 atomic_inc(&kvm->online_vcpus); 2063 2064 mutex_unlock(&kvm->lock); 2065 kvm_arch_vcpu_postcreate(vcpu); 2066 return r; 2067 2068 unlock_vcpu_destroy: 2069 mutex_unlock(&kvm->lock); 2070 vcpu_destroy: 2071 kvm_arch_vcpu_destroy(vcpu); 2072 return r; 2073 } 2074 2075 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 2076 { 2077 if (sigset) { 2078 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2079 vcpu->sigset_active = 1; 2080 vcpu->sigset = *sigset; 2081 } else 2082 vcpu->sigset_active = 0; 2083 return 0; 2084 } 2085 2086 static long kvm_vcpu_ioctl(struct file *filp, 2087 unsigned int ioctl, unsigned long arg) 2088 { 2089 struct kvm_vcpu *vcpu = filp->private_data; 2090 void __user *argp = (void __user *)arg; 2091 int r; 2092 struct kvm_fpu *fpu = NULL; 2093 struct kvm_sregs *kvm_sregs = NULL; 2094 2095 if (vcpu->kvm->mm != current->mm) 2096 return -EIO; 2097 2098 if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) 2099 return -EINVAL; 2100 2101 #if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS) 2102 /* 2103 * Special cases: vcpu ioctls that are asynchronous to vcpu execution, 2104 * so vcpu_load() would break it. 2105 */ 2106 if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_S390_IRQ || ioctl == KVM_INTERRUPT) 2107 return kvm_arch_vcpu_ioctl(filp, ioctl, arg); 2108 #endif 2109 2110 2111 r = vcpu_load(vcpu); 2112 if (r) 2113 return r; 2114 switch (ioctl) { 2115 case KVM_RUN: 2116 r = -EINVAL; 2117 if (arg) 2118 goto out; 2119 if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) { 2120 /* The thread running this VCPU changed. */ 2121 struct pid *oldpid = vcpu->pid; 2122 struct pid *newpid = get_task_pid(current, PIDTYPE_PID); 2123 2124 rcu_assign_pointer(vcpu->pid, newpid); 2125 if (oldpid) 2126 synchronize_rcu(); 2127 put_pid(oldpid); 2128 } 2129 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); 2130 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 2131 break; 2132 case KVM_GET_REGS: { 2133 struct kvm_regs *kvm_regs; 2134 2135 r = -ENOMEM; 2136 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 2137 if (!kvm_regs) 2138 goto out; 2139 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 2140 if (r) 2141 goto out_free1; 2142 r = -EFAULT; 2143 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 2144 goto out_free1; 2145 r = 0; 2146 out_free1: 2147 kfree(kvm_regs); 2148 break; 2149 } 2150 case KVM_SET_REGS: { 2151 struct kvm_regs *kvm_regs; 2152 2153 r = -ENOMEM; 2154 kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); 2155 if (IS_ERR(kvm_regs)) { 2156 r = PTR_ERR(kvm_regs); 2157 goto out; 2158 } 2159 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 2160 kfree(kvm_regs); 2161 break; 2162 } 2163 case KVM_GET_SREGS: { 2164 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 2165 r = -ENOMEM; 2166 if (!kvm_sregs) 2167 goto out; 2168 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 2169 if (r) 2170 goto out; 2171 r = -EFAULT; 2172 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 2173 goto out; 2174 r = 0; 2175 break; 2176 } 2177 case KVM_SET_SREGS: { 2178 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); 2179 if (IS_ERR(kvm_sregs)) { 2180 r = PTR_ERR(kvm_sregs); 2181 kvm_sregs = NULL; 2182 goto out; 2183 } 2184 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 2185 break; 2186 } 2187 case KVM_GET_MP_STATE: { 2188 struct kvm_mp_state mp_state; 2189 2190 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 2191 if (r) 2192 goto out; 2193 r = -EFAULT; 2194 if (copy_to_user(argp, &mp_state, sizeof(mp_state))) 2195 goto out; 2196 r = 0; 2197 break; 2198 } 2199 case KVM_SET_MP_STATE: { 2200 struct kvm_mp_state mp_state; 2201 2202 r = -EFAULT; 2203 if (copy_from_user(&mp_state, argp, sizeof(mp_state))) 2204 goto out; 2205 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 2206 break; 2207 } 2208 case KVM_TRANSLATE: { 2209 struct kvm_translation tr; 2210 2211 r = -EFAULT; 2212 if (copy_from_user(&tr, argp, sizeof(tr))) 2213 goto out; 2214 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 2215 if (r) 2216 goto out; 2217 r = -EFAULT; 2218 if (copy_to_user(argp, &tr, sizeof(tr))) 2219 goto out; 2220 r = 0; 2221 break; 2222 } 2223 case KVM_SET_GUEST_DEBUG: { 2224 struct kvm_guest_debug dbg; 2225 2226 r = -EFAULT; 2227 if (copy_from_user(&dbg, argp, sizeof(dbg))) 2228 goto out; 2229 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 2230 break; 2231 } 2232 case KVM_SET_SIGNAL_MASK: { 2233 struct kvm_signal_mask __user *sigmask_arg = argp; 2234 struct kvm_signal_mask kvm_sigmask; 2235 sigset_t sigset, *p; 2236 2237 p = NULL; 2238 if (argp) { 2239 r = -EFAULT; 2240 if (copy_from_user(&kvm_sigmask, argp, 2241 sizeof(kvm_sigmask))) 2242 goto out; 2243 r = -EINVAL; 2244 if (kvm_sigmask.len != sizeof(sigset)) 2245 goto out; 2246 r = -EFAULT; 2247 if (copy_from_user(&sigset, sigmask_arg->sigset, 2248 sizeof(sigset))) 2249 goto out; 2250 p = &sigset; 2251 } 2252 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 2253 break; 2254 } 2255 case KVM_GET_FPU: { 2256 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 2257 r = -ENOMEM; 2258 if (!fpu) 2259 goto out; 2260 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 2261 if (r) 2262 goto out; 2263 r = -EFAULT; 2264 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 2265 goto out; 2266 r = 0; 2267 break; 2268 } 2269 case KVM_SET_FPU: { 2270 fpu = memdup_user(argp, sizeof(*fpu)); 2271 if (IS_ERR(fpu)) { 2272 r = PTR_ERR(fpu); 2273 fpu = NULL; 2274 goto out; 2275 } 2276 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 2277 break; 2278 } 2279 default: 2280 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 2281 } 2282 out: 2283 vcpu_put(vcpu); 2284 kfree(fpu); 2285 kfree(kvm_sregs); 2286 return r; 2287 } 2288 2289 #ifdef CONFIG_KVM_COMPAT 2290 static long kvm_vcpu_compat_ioctl(struct file *filp, 2291 unsigned int ioctl, unsigned long arg) 2292 { 2293 struct kvm_vcpu *vcpu = filp->private_data; 2294 void __user *argp = compat_ptr(arg); 2295 int r; 2296 2297 if (vcpu->kvm->mm != current->mm) 2298 return -EIO; 2299 2300 switch (ioctl) { 2301 case KVM_SET_SIGNAL_MASK: { 2302 struct kvm_signal_mask __user *sigmask_arg = argp; 2303 struct kvm_signal_mask kvm_sigmask; 2304 compat_sigset_t csigset; 2305 sigset_t sigset; 2306 2307 if (argp) { 2308 r = -EFAULT; 2309 if (copy_from_user(&kvm_sigmask, argp, 2310 sizeof(kvm_sigmask))) 2311 goto out; 2312 r = -EINVAL; 2313 if (kvm_sigmask.len != sizeof(csigset)) 2314 goto out; 2315 r = -EFAULT; 2316 if (copy_from_user(&csigset, sigmask_arg->sigset, 2317 sizeof(csigset))) 2318 goto out; 2319 sigset_from_compat(&sigset, &csigset); 2320 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 2321 } else 2322 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); 2323 break; 2324 } 2325 default: 2326 r = kvm_vcpu_ioctl(filp, ioctl, arg); 2327 } 2328 2329 out: 2330 return r; 2331 } 2332 #endif 2333 2334 static int kvm_device_ioctl_attr(struct kvm_device *dev, 2335 int (*accessor)(struct kvm_device *dev, 2336 struct kvm_device_attr *attr), 2337 unsigned long arg) 2338 { 2339 struct kvm_device_attr attr; 2340 2341 if (!accessor) 2342 return -EPERM; 2343 2344 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 2345 return -EFAULT; 2346 2347 return accessor(dev, &attr); 2348 } 2349 2350 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, 2351 unsigned long arg) 2352 { 2353 struct kvm_device *dev = filp->private_data; 2354 2355 switch (ioctl) { 2356 case KVM_SET_DEVICE_ATTR: 2357 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); 2358 case KVM_GET_DEVICE_ATTR: 2359 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); 2360 case KVM_HAS_DEVICE_ATTR: 2361 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); 2362 default: 2363 if (dev->ops->ioctl) 2364 return dev->ops->ioctl(dev, ioctl, arg); 2365 2366 return -ENOTTY; 2367 } 2368 } 2369 2370 static int kvm_device_release(struct inode *inode, struct file *filp) 2371 { 2372 struct kvm_device *dev = filp->private_data; 2373 struct kvm *kvm = dev->kvm; 2374 2375 kvm_put_kvm(kvm); 2376 return 0; 2377 } 2378 2379 static const struct file_operations kvm_device_fops = { 2380 .unlocked_ioctl = kvm_device_ioctl, 2381 #ifdef CONFIG_KVM_COMPAT 2382 .compat_ioctl = kvm_device_ioctl, 2383 #endif 2384 .release = kvm_device_release, 2385 }; 2386 2387 struct kvm_device *kvm_device_from_filp(struct file *filp) 2388 { 2389 if (filp->f_op != &kvm_device_fops) 2390 return NULL; 2391 2392 return filp->private_data; 2393 } 2394 2395 static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { 2396 #ifdef CONFIG_KVM_MPIC 2397 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, 2398 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, 2399 #endif 2400 2401 #ifdef CONFIG_KVM_XICS 2402 [KVM_DEV_TYPE_XICS] = &kvm_xics_ops, 2403 #endif 2404 }; 2405 2406 int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type) 2407 { 2408 if (type >= ARRAY_SIZE(kvm_device_ops_table)) 2409 return -ENOSPC; 2410 2411 if (kvm_device_ops_table[type] != NULL) 2412 return -EEXIST; 2413 2414 kvm_device_ops_table[type] = ops; 2415 return 0; 2416 } 2417 2418 void kvm_unregister_device_ops(u32 type) 2419 { 2420 if (kvm_device_ops_table[type] != NULL) 2421 kvm_device_ops_table[type] = NULL; 2422 } 2423 2424 static int kvm_ioctl_create_device(struct kvm *kvm, 2425 struct kvm_create_device *cd) 2426 { 2427 struct kvm_device_ops *ops = NULL; 2428 struct kvm_device *dev; 2429 bool test = cd->flags & KVM_CREATE_DEVICE_TEST; 2430 int ret; 2431 2432 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) 2433 return -ENODEV; 2434 2435 ops = kvm_device_ops_table[cd->type]; 2436 if (ops == NULL) 2437 return -ENODEV; 2438 2439 if (test) 2440 return 0; 2441 2442 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2443 if (!dev) 2444 return -ENOMEM; 2445 2446 dev->ops = ops; 2447 dev->kvm = kvm; 2448 2449 ret = ops->create(dev, cd->type); 2450 if (ret < 0) { 2451 kfree(dev); 2452 return ret; 2453 } 2454 2455 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 2456 if (ret < 0) { 2457 ops->destroy(dev); 2458 return ret; 2459 } 2460 2461 list_add(&dev->vm_node, &kvm->devices); 2462 kvm_get_kvm(kvm); 2463 cd->fd = ret; 2464 return 0; 2465 } 2466 2467 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) 2468 { 2469 switch (arg) { 2470 case KVM_CAP_USER_MEMORY: 2471 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 2472 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 2473 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 2474 case KVM_CAP_SET_BOOT_CPU_ID: 2475 #endif 2476 case KVM_CAP_INTERNAL_ERROR_DATA: 2477 #ifdef CONFIG_HAVE_KVM_MSI 2478 case KVM_CAP_SIGNAL_MSI: 2479 #endif 2480 #ifdef CONFIG_HAVE_KVM_IRQFD 2481 case KVM_CAP_IRQFD: 2482 case KVM_CAP_IRQFD_RESAMPLE: 2483 #endif 2484 case KVM_CAP_CHECK_EXTENSION_VM: 2485 return 1; 2486 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 2487 case KVM_CAP_IRQ_ROUTING: 2488 return KVM_MAX_IRQ_ROUTES; 2489 #endif 2490 default: 2491 break; 2492 } 2493 return kvm_vm_ioctl_check_extension(kvm, arg); 2494 } 2495 2496 static long kvm_vm_ioctl(struct file *filp, 2497 unsigned int ioctl, unsigned long arg) 2498 { 2499 struct kvm *kvm = filp->private_data; 2500 void __user *argp = (void __user *)arg; 2501 int r; 2502 2503 if (kvm->mm != current->mm) 2504 return -EIO; 2505 switch (ioctl) { 2506 case KVM_CREATE_VCPU: 2507 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 2508 break; 2509 case KVM_SET_USER_MEMORY_REGION: { 2510 struct kvm_userspace_memory_region kvm_userspace_mem; 2511 2512 r = -EFAULT; 2513 if (copy_from_user(&kvm_userspace_mem, argp, 2514 sizeof(kvm_userspace_mem))) 2515 goto out; 2516 2517 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); 2518 break; 2519 } 2520 case KVM_GET_DIRTY_LOG: { 2521 struct kvm_dirty_log log; 2522 2523 r = -EFAULT; 2524 if (copy_from_user(&log, argp, sizeof(log))) 2525 goto out; 2526 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 2527 break; 2528 } 2529 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2530 case KVM_REGISTER_COALESCED_MMIO: { 2531 struct kvm_coalesced_mmio_zone zone; 2532 2533 r = -EFAULT; 2534 if (copy_from_user(&zone, argp, sizeof(zone))) 2535 goto out; 2536 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 2537 break; 2538 } 2539 case KVM_UNREGISTER_COALESCED_MMIO: { 2540 struct kvm_coalesced_mmio_zone zone; 2541 2542 r = -EFAULT; 2543 if (copy_from_user(&zone, argp, sizeof(zone))) 2544 goto out; 2545 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 2546 break; 2547 } 2548 #endif 2549 case KVM_IRQFD: { 2550 struct kvm_irqfd data; 2551 2552 r = -EFAULT; 2553 if (copy_from_user(&data, argp, sizeof(data))) 2554 goto out; 2555 r = kvm_irqfd(kvm, &data); 2556 break; 2557 } 2558 case KVM_IOEVENTFD: { 2559 struct kvm_ioeventfd data; 2560 2561 r = -EFAULT; 2562 if (copy_from_user(&data, argp, sizeof(data))) 2563 goto out; 2564 r = kvm_ioeventfd(kvm, &data); 2565 break; 2566 } 2567 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 2568 case KVM_SET_BOOT_CPU_ID: 2569 r = 0; 2570 mutex_lock(&kvm->lock); 2571 if (atomic_read(&kvm->online_vcpus) != 0) 2572 r = -EBUSY; 2573 else 2574 kvm->bsp_vcpu_id = arg; 2575 mutex_unlock(&kvm->lock); 2576 break; 2577 #endif 2578 #ifdef CONFIG_HAVE_KVM_MSI 2579 case KVM_SIGNAL_MSI: { 2580 struct kvm_msi msi; 2581 2582 r = -EFAULT; 2583 if (copy_from_user(&msi, argp, sizeof(msi))) 2584 goto out; 2585 r = kvm_send_userspace_msi(kvm, &msi); 2586 break; 2587 } 2588 #endif 2589 #ifdef __KVM_HAVE_IRQ_LINE 2590 case KVM_IRQ_LINE_STATUS: 2591 case KVM_IRQ_LINE: { 2592 struct kvm_irq_level irq_event; 2593 2594 r = -EFAULT; 2595 if (copy_from_user(&irq_event, argp, sizeof(irq_event))) 2596 goto out; 2597 2598 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, 2599 ioctl == KVM_IRQ_LINE_STATUS); 2600 if (r) 2601 goto out; 2602 2603 r = -EFAULT; 2604 if (ioctl == KVM_IRQ_LINE_STATUS) { 2605 if (copy_to_user(argp, &irq_event, sizeof(irq_event))) 2606 goto out; 2607 } 2608 2609 r = 0; 2610 break; 2611 } 2612 #endif 2613 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 2614 case KVM_SET_GSI_ROUTING: { 2615 struct kvm_irq_routing routing; 2616 struct kvm_irq_routing __user *urouting; 2617 struct kvm_irq_routing_entry *entries; 2618 2619 r = -EFAULT; 2620 if (copy_from_user(&routing, argp, sizeof(routing))) 2621 goto out; 2622 r = -EINVAL; 2623 if (routing.nr >= KVM_MAX_IRQ_ROUTES) 2624 goto out; 2625 if (routing.flags) 2626 goto out; 2627 r = -ENOMEM; 2628 entries = vmalloc(routing.nr * sizeof(*entries)); 2629 if (!entries) 2630 goto out; 2631 r = -EFAULT; 2632 urouting = argp; 2633 if (copy_from_user(entries, urouting->entries, 2634 routing.nr * sizeof(*entries))) 2635 goto out_free_irq_routing; 2636 r = kvm_set_irq_routing(kvm, entries, routing.nr, 2637 routing.flags); 2638 out_free_irq_routing: 2639 vfree(entries); 2640 break; 2641 } 2642 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ 2643 case KVM_CREATE_DEVICE: { 2644 struct kvm_create_device cd; 2645 2646 r = -EFAULT; 2647 if (copy_from_user(&cd, argp, sizeof(cd))) 2648 goto out; 2649 2650 r = kvm_ioctl_create_device(kvm, &cd); 2651 if (r) 2652 goto out; 2653 2654 r = -EFAULT; 2655 if (copy_to_user(argp, &cd, sizeof(cd))) 2656 goto out; 2657 2658 r = 0; 2659 break; 2660 } 2661 case KVM_CHECK_EXTENSION: 2662 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); 2663 break; 2664 default: 2665 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 2666 } 2667 out: 2668 return r; 2669 } 2670 2671 #ifdef CONFIG_KVM_COMPAT 2672 struct compat_kvm_dirty_log { 2673 __u32 slot; 2674 __u32 padding1; 2675 union { 2676 compat_uptr_t dirty_bitmap; /* one bit per page */ 2677 __u64 padding2; 2678 }; 2679 }; 2680 2681 static long kvm_vm_compat_ioctl(struct file *filp, 2682 unsigned int ioctl, unsigned long arg) 2683 { 2684 struct kvm *kvm = filp->private_data; 2685 int r; 2686 2687 if (kvm->mm != current->mm) 2688 return -EIO; 2689 switch (ioctl) { 2690 case KVM_GET_DIRTY_LOG: { 2691 struct compat_kvm_dirty_log compat_log; 2692 struct kvm_dirty_log log; 2693 2694 r = -EFAULT; 2695 if (copy_from_user(&compat_log, (void __user *)arg, 2696 sizeof(compat_log))) 2697 goto out; 2698 log.slot = compat_log.slot; 2699 log.padding1 = compat_log.padding1; 2700 log.padding2 = compat_log.padding2; 2701 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 2702 2703 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 2704 break; 2705 } 2706 default: 2707 r = kvm_vm_ioctl(filp, ioctl, arg); 2708 } 2709 2710 out: 2711 return r; 2712 } 2713 #endif 2714 2715 static struct file_operations kvm_vm_fops = { 2716 .release = kvm_vm_release, 2717 .unlocked_ioctl = kvm_vm_ioctl, 2718 #ifdef CONFIG_KVM_COMPAT 2719 .compat_ioctl = kvm_vm_compat_ioctl, 2720 #endif 2721 .llseek = noop_llseek, 2722 }; 2723 2724 static int kvm_dev_ioctl_create_vm(unsigned long type) 2725 { 2726 int r; 2727 struct kvm *kvm; 2728 2729 kvm = kvm_create_vm(type); 2730 if (IS_ERR(kvm)) 2731 return PTR_ERR(kvm); 2732 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2733 r = kvm_coalesced_mmio_init(kvm); 2734 if (r < 0) { 2735 kvm_put_kvm(kvm); 2736 return r; 2737 } 2738 #endif 2739 r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR | O_CLOEXEC); 2740 if (r < 0) 2741 kvm_put_kvm(kvm); 2742 2743 return r; 2744 } 2745 2746 static long kvm_dev_ioctl(struct file *filp, 2747 unsigned int ioctl, unsigned long arg) 2748 { 2749 long r = -EINVAL; 2750 2751 switch (ioctl) { 2752 case KVM_GET_API_VERSION: 2753 if (arg) 2754 goto out; 2755 r = KVM_API_VERSION; 2756 break; 2757 case KVM_CREATE_VM: 2758 r = kvm_dev_ioctl_create_vm(arg); 2759 break; 2760 case KVM_CHECK_EXTENSION: 2761 r = kvm_vm_ioctl_check_extension_generic(NULL, arg); 2762 break; 2763 case KVM_GET_VCPU_MMAP_SIZE: 2764 if (arg) 2765 goto out; 2766 r = PAGE_SIZE; /* struct kvm_run */ 2767 #ifdef CONFIG_X86 2768 r += PAGE_SIZE; /* pio data page */ 2769 #endif 2770 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2771 r += PAGE_SIZE; /* coalesced mmio ring page */ 2772 #endif 2773 break; 2774 case KVM_TRACE_ENABLE: 2775 case KVM_TRACE_PAUSE: 2776 case KVM_TRACE_DISABLE: 2777 r = -EOPNOTSUPP; 2778 break; 2779 default: 2780 return kvm_arch_dev_ioctl(filp, ioctl, arg); 2781 } 2782 out: 2783 return r; 2784 } 2785 2786 static struct file_operations kvm_chardev_ops = { 2787 .unlocked_ioctl = kvm_dev_ioctl, 2788 .compat_ioctl = kvm_dev_ioctl, 2789 .llseek = noop_llseek, 2790 }; 2791 2792 static struct miscdevice kvm_dev = { 2793 KVM_MINOR, 2794 "kvm", 2795 &kvm_chardev_ops, 2796 }; 2797 2798 static void hardware_enable_nolock(void *junk) 2799 { 2800 int cpu = raw_smp_processor_id(); 2801 int r; 2802 2803 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 2804 return; 2805 2806 cpumask_set_cpu(cpu, cpus_hardware_enabled); 2807 2808 r = kvm_arch_hardware_enable(); 2809 2810 if (r) { 2811 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 2812 atomic_inc(&hardware_enable_failed); 2813 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu); 2814 } 2815 } 2816 2817 static void hardware_enable(void) 2818 { 2819 raw_spin_lock(&kvm_count_lock); 2820 if (kvm_usage_count) 2821 hardware_enable_nolock(NULL); 2822 raw_spin_unlock(&kvm_count_lock); 2823 } 2824 2825 static void hardware_disable_nolock(void *junk) 2826 { 2827 int cpu = raw_smp_processor_id(); 2828 2829 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 2830 return; 2831 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 2832 kvm_arch_hardware_disable(); 2833 } 2834 2835 static void hardware_disable(void) 2836 { 2837 raw_spin_lock(&kvm_count_lock); 2838 if (kvm_usage_count) 2839 hardware_disable_nolock(NULL); 2840 raw_spin_unlock(&kvm_count_lock); 2841 } 2842 2843 static void hardware_disable_all_nolock(void) 2844 { 2845 BUG_ON(!kvm_usage_count); 2846 2847 kvm_usage_count--; 2848 if (!kvm_usage_count) 2849 on_each_cpu(hardware_disable_nolock, NULL, 1); 2850 } 2851 2852 static void hardware_disable_all(void) 2853 { 2854 raw_spin_lock(&kvm_count_lock); 2855 hardware_disable_all_nolock(); 2856 raw_spin_unlock(&kvm_count_lock); 2857 } 2858 2859 static int hardware_enable_all(void) 2860 { 2861 int r = 0; 2862 2863 raw_spin_lock(&kvm_count_lock); 2864 2865 kvm_usage_count++; 2866 if (kvm_usage_count == 1) { 2867 atomic_set(&hardware_enable_failed, 0); 2868 on_each_cpu(hardware_enable_nolock, NULL, 1); 2869 2870 if (atomic_read(&hardware_enable_failed)) { 2871 hardware_disable_all_nolock(); 2872 r = -EBUSY; 2873 } 2874 } 2875 2876 raw_spin_unlock(&kvm_count_lock); 2877 2878 return r; 2879 } 2880 2881 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, 2882 void *v) 2883 { 2884 int cpu = (long)v; 2885 2886 val &= ~CPU_TASKS_FROZEN; 2887 switch (val) { 2888 case CPU_DYING: 2889 pr_info("kvm: disabling virtualization on CPU%d\n", 2890 cpu); 2891 hardware_disable(); 2892 break; 2893 case CPU_STARTING: 2894 pr_info("kvm: enabling virtualization on CPU%d\n", 2895 cpu); 2896 hardware_enable(); 2897 break; 2898 } 2899 return NOTIFY_OK; 2900 } 2901 2902 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 2903 void *v) 2904 { 2905 /* 2906 * Some (well, at least mine) BIOSes hang on reboot if 2907 * in vmx root mode. 2908 * 2909 * And Intel TXT required VMX off for all cpu when system shutdown. 2910 */ 2911 pr_info("kvm: exiting hardware virtualization\n"); 2912 kvm_rebooting = true; 2913 on_each_cpu(hardware_disable_nolock, NULL, 1); 2914 return NOTIFY_OK; 2915 } 2916 2917 static struct notifier_block kvm_reboot_notifier = { 2918 .notifier_call = kvm_reboot, 2919 .priority = 0, 2920 }; 2921 2922 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 2923 { 2924 int i; 2925 2926 for (i = 0; i < bus->dev_count; i++) { 2927 struct kvm_io_device *pos = bus->range[i].dev; 2928 2929 kvm_iodevice_destructor(pos); 2930 } 2931 kfree(bus); 2932 } 2933 2934 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, 2935 const struct kvm_io_range *r2) 2936 { 2937 if (r1->addr < r2->addr) 2938 return -1; 2939 if (r1->addr + r1->len > r2->addr + r2->len) 2940 return 1; 2941 return 0; 2942 } 2943 2944 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) 2945 { 2946 return kvm_io_bus_cmp(p1, p2); 2947 } 2948 2949 static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, 2950 gpa_t addr, int len) 2951 { 2952 bus->range[bus->dev_count++] = (struct kvm_io_range) { 2953 .addr = addr, 2954 .len = len, 2955 .dev = dev, 2956 }; 2957 2958 sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range), 2959 kvm_io_bus_sort_cmp, NULL); 2960 2961 return 0; 2962 } 2963 2964 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, 2965 gpa_t addr, int len) 2966 { 2967 struct kvm_io_range *range, key; 2968 int off; 2969 2970 key = (struct kvm_io_range) { 2971 .addr = addr, 2972 .len = len, 2973 }; 2974 2975 range = bsearch(&key, bus->range, bus->dev_count, 2976 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); 2977 if (range == NULL) 2978 return -ENOENT; 2979 2980 off = range - bus->range; 2981 2982 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) 2983 off--; 2984 2985 return off; 2986 } 2987 2988 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 2989 struct kvm_io_range *range, const void *val) 2990 { 2991 int idx; 2992 2993 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 2994 if (idx < 0) 2995 return -EOPNOTSUPP; 2996 2997 while (idx < bus->dev_count && 2998 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 2999 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, 3000 range->len, val)) 3001 return idx; 3002 idx++; 3003 } 3004 3005 return -EOPNOTSUPP; 3006 } 3007 3008 /* kvm_io_bus_write - called under kvm->slots_lock */ 3009 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 3010 int len, const void *val) 3011 { 3012 struct kvm_io_bus *bus; 3013 struct kvm_io_range range; 3014 int r; 3015 3016 range = (struct kvm_io_range) { 3017 .addr = addr, 3018 .len = len, 3019 }; 3020 3021 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3022 r = __kvm_io_bus_write(vcpu, bus, &range, val); 3023 return r < 0 ? r : 0; 3024 } 3025 3026 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ 3027 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 3028 gpa_t addr, int len, const void *val, long cookie) 3029 { 3030 struct kvm_io_bus *bus; 3031 struct kvm_io_range range; 3032 3033 range = (struct kvm_io_range) { 3034 .addr = addr, 3035 .len = len, 3036 }; 3037 3038 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3039 3040 /* First try the device referenced by cookie. */ 3041 if ((cookie >= 0) && (cookie < bus->dev_count) && 3042 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) 3043 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, 3044 val)) 3045 return cookie; 3046 3047 /* 3048 * cookie contained garbage; fall back to search and return the 3049 * correct cookie value. 3050 */ 3051 return __kvm_io_bus_write(vcpu, bus, &range, val); 3052 } 3053 3054 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 3055 struct kvm_io_range *range, void *val) 3056 { 3057 int idx; 3058 3059 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 3060 if (idx < 0) 3061 return -EOPNOTSUPP; 3062 3063 while (idx < bus->dev_count && 3064 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 3065 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, 3066 range->len, val)) 3067 return idx; 3068 idx++; 3069 } 3070 3071 return -EOPNOTSUPP; 3072 } 3073 EXPORT_SYMBOL_GPL(kvm_io_bus_write); 3074 3075 /* kvm_io_bus_read - called under kvm->slots_lock */ 3076 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 3077 int len, void *val) 3078 { 3079 struct kvm_io_bus *bus; 3080 struct kvm_io_range range; 3081 int r; 3082 3083 range = (struct kvm_io_range) { 3084 .addr = addr, 3085 .len = len, 3086 }; 3087 3088 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3089 r = __kvm_io_bus_read(vcpu, bus, &range, val); 3090 return r < 0 ? r : 0; 3091 } 3092 3093 3094 /* Caller must hold slots_lock. */ 3095 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 3096 int len, struct kvm_io_device *dev) 3097 { 3098 struct kvm_io_bus *new_bus, *bus; 3099 3100 bus = kvm->buses[bus_idx]; 3101 /* exclude ioeventfd which is limited by maximum fd */ 3102 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 3103 return -ENOSPC; 3104 3105 new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) * 3106 sizeof(struct kvm_io_range)), GFP_KERNEL); 3107 if (!new_bus) 3108 return -ENOMEM; 3109 memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count * 3110 sizeof(struct kvm_io_range))); 3111 kvm_io_bus_insert_dev(new_bus, dev, addr, len); 3112 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3113 synchronize_srcu_expedited(&kvm->srcu); 3114 kfree(bus); 3115 3116 return 0; 3117 } 3118 3119 /* Caller must hold slots_lock. */ 3120 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 3121 struct kvm_io_device *dev) 3122 { 3123 int i, r; 3124 struct kvm_io_bus *new_bus, *bus; 3125 3126 bus = kvm->buses[bus_idx]; 3127 r = -ENOENT; 3128 for (i = 0; i < bus->dev_count; i++) 3129 if (bus->range[i].dev == dev) { 3130 r = 0; 3131 break; 3132 } 3133 3134 if (r) 3135 return r; 3136 3137 new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) * 3138 sizeof(struct kvm_io_range)), GFP_KERNEL); 3139 if (!new_bus) 3140 return -ENOMEM; 3141 3142 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 3143 new_bus->dev_count--; 3144 memcpy(new_bus->range + i, bus->range + i + 1, 3145 (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); 3146 3147 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3148 synchronize_srcu_expedited(&kvm->srcu); 3149 kfree(bus); 3150 return r; 3151 } 3152 3153 static struct notifier_block kvm_cpu_notifier = { 3154 .notifier_call = kvm_cpu_hotplug, 3155 }; 3156 3157 static int vm_stat_get(void *_offset, u64 *val) 3158 { 3159 unsigned offset = (long)_offset; 3160 struct kvm *kvm; 3161 3162 *val = 0; 3163 spin_lock(&kvm_lock); 3164 list_for_each_entry(kvm, &vm_list, vm_list) 3165 *val += *(u32 *)((void *)kvm + offset); 3166 spin_unlock(&kvm_lock); 3167 return 0; 3168 } 3169 3170 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n"); 3171 3172 static int vcpu_stat_get(void *_offset, u64 *val) 3173 { 3174 unsigned offset = (long)_offset; 3175 struct kvm *kvm; 3176 struct kvm_vcpu *vcpu; 3177 int i; 3178 3179 *val = 0; 3180 spin_lock(&kvm_lock); 3181 list_for_each_entry(kvm, &vm_list, vm_list) 3182 kvm_for_each_vcpu(i, vcpu, kvm) 3183 *val += *(u32 *)((void *)vcpu + offset); 3184 3185 spin_unlock(&kvm_lock); 3186 return 0; 3187 } 3188 3189 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); 3190 3191 static const struct file_operations *stat_fops[] = { 3192 [KVM_STAT_VCPU] = &vcpu_stat_fops, 3193 [KVM_STAT_VM] = &vm_stat_fops, 3194 }; 3195 3196 static int kvm_init_debug(void) 3197 { 3198 int r = -EEXIST; 3199 struct kvm_stats_debugfs_item *p; 3200 3201 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 3202 if (kvm_debugfs_dir == NULL) 3203 goto out; 3204 3205 for (p = debugfs_entries; p->name; ++p) { 3206 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir, 3207 (void *)(long)p->offset, 3208 stat_fops[p->kind]); 3209 if (p->dentry == NULL) 3210 goto out_dir; 3211 } 3212 3213 return 0; 3214 3215 out_dir: 3216 debugfs_remove_recursive(kvm_debugfs_dir); 3217 out: 3218 return r; 3219 } 3220 3221 static void kvm_exit_debug(void) 3222 { 3223 struct kvm_stats_debugfs_item *p; 3224 3225 for (p = debugfs_entries; p->name; ++p) 3226 debugfs_remove(p->dentry); 3227 debugfs_remove(kvm_debugfs_dir); 3228 } 3229 3230 static int kvm_suspend(void) 3231 { 3232 if (kvm_usage_count) 3233 hardware_disable_nolock(NULL); 3234 return 0; 3235 } 3236 3237 static void kvm_resume(void) 3238 { 3239 if (kvm_usage_count) { 3240 WARN_ON(raw_spin_is_locked(&kvm_count_lock)); 3241 hardware_enable_nolock(NULL); 3242 } 3243 } 3244 3245 static struct syscore_ops kvm_syscore_ops = { 3246 .suspend = kvm_suspend, 3247 .resume = kvm_resume, 3248 }; 3249 3250 static inline 3251 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 3252 { 3253 return container_of(pn, struct kvm_vcpu, preempt_notifier); 3254 } 3255 3256 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 3257 { 3258 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 3259 3260 if (vcpu->preempted) 3261 vcpu->preempted = false; 3262 3263 kvm_arch_sched_in(vcpu, cpu); 3264 3265 kvm_arch_vcpu_load(vcpu, cpu); 3266 } 3267 3268 static void kvm_sched_out(struct preempt_notifier *pn, 3269 struct task_struct *next) 3270 { 3271 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 3272 3273 if (current->state == TASK_RUNNING) 3274 vcpu->preempted = true; 3275 kvm_arch_vcpu_put(vcpu); 3276 } 3277 3278 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 3279 struct module *module) 3280 { 3281 int r; 3282 int cpu; 3283 3284 r = kvm_arch_init(opaque); 3285 if (r) 3286 goto out_fail; 3287 3288 /* 3289 * kvm_arch_init makes sure there's at most one caller 3290 * for architectures that support multiple implementations, 3291 * like intel and amd on x86. 3292 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating 3293 * conflicts in case kvm is already setup for another implementation. 3294 */ 3295 r = kvm_irqfd_init(); 3296 if (r) 3297 goto out_irqfd; 3298 3299 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 3300 r = -ENOMEM; 3301 goto out_free_0; 3302 } 3303 3304 r = kvm_arch_hardware_setup(); 3305 if (r < 0) 3306 goto out_free_0a; 3307 3308 for_each_online_cpu(cpu) { 3309 smp_call_function_single(cpu, 3310 kvm_arch_check_processor_compat, 3311 &r, 1); 3312 if (r < 0) 3313 goto out_free_1; 3314 } 3315 3316 r = register_cpu_notifier(&kvm_cpu_notifier); 3317 if (r) 3318 goto out_free_2; 3319 register_reboot_notifier(&kvm_reboot_notifier); 3320 3321 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 3322 if (!vcpu_align) 3323 vcpu_align = __alignof__(struct kvm_vcpu); 3324 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, 3325 0, NULL); 3326 if (!kvm_vcpu_cache) { 3327 r = -ENOMEM; 3328 goto out_free_3; 3329 } 3330 3331 r = kvm_async_pf_init(); 3332 if (r) 3333 goto out_free; 3334 3335 kvm_chardev_ops.owner = module; 3336 kvm_vm_fops.owner = module; 3337 kvm_vcpu_fops.owner = module; 3338 3339 r = misc_register(&kvm_dev); 3340 if (r) { 3341 pr_err("kvm: misc device register failed\n"); 3342 goto out_unreg; 3343 } 3344 3345 register_syscore_ops(&kvm_syscore_ops); 3346 3347 kvm_preempt_ops.sched_in = kvm_sched_in; 3348 kvm_preempt_ops.sched_out = kvm_sched_out; 3349 3350 r = kvm_init_debug(); 3351 if (r) { 3352 pr_err("kvm: create debugfs files failed\n"); 3353 goto out_undebugfs; 3354 } 3355 3356 r = kvm_vfio_ops_init(); 3357 WARN_ON(r); 3358 3359 return 0; 3360 3361 out_undebugfs: 3362 unregister_syscore_ops(&kvm_syscore_ops); 3363 misc_deregister(&kvm_dev); 3364 out_unreg: 3365 kvm_async_pf_deinit(); 3366 out_free: 3367 kmem_cache_destroy(kvm_vcpu_cache); 3368 out_free_3: 3369 unregister_reboot_notifier(&kvm_reboot_notifier); 3370 unregister_cpu_notifier(&kvm_cpu_notifier); 3371 out_free_2: 3372 out_free_1: 3373 kvm_arch_hardware_unsetup(); 3374 out_free_0a: 3375 free_cpumask_var(cpus_hardware_enabled); 3376 out_free_0: 3377 kvm_irqfd_exit(); 3378 out_irqfd: 3379 kvm_arch_exit(); 3380 out_fail: 3381 return r; 3382 } 3383 EXPORT_SYMBOL_GPL(kvm_init); 3384 3385 void kvm_exit(void) 3386 { 3387 kvm_exit_debug(); 3388 misc_deregister(&kvm_dev); 3389 kmem_cache_destroy(kvm_vcpu_cache); 3390 kvm_async_pf_deinit(); 3391 unregister_syscore_ops(&kvm_syscore_ops); 3392 unregister_reboot_notifier(&kvm_reboot_notifier); 3393 unregister_cpu_notifier(&kvm_cpu_notifier); 3394 on_each_cpu(hardware_disable_nolock, NULL, 1); 3395 kvm_arch_hardware_unsetup(); 3396 kvm_arch_exit(); 3397 kvm_irqfd_exit(); 3398 free_cpumask_var(cpus_hardware_enabled); 3399 kvm_vfio_ops_exit(); 3400 } 3401 EXPORT_SYMBOL_GPL(kvm_exit); 3402