1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This module enables machines with Intel VT-x extensions to run virtual 5 * machines without emulation or binary translation. 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Avi Kivity <avi@qumranet.com> 12 * Yaniv Kamay <yaniv@qumranet.com> 13 * 14 * This work is licensed under the terms of the GNU GPL, version 2. See 15 * the COPYING file in the top-level directory. 16 * 17 */ 18 19 #include "iodev.h" 20 21 #include <linux/kvm_host.h> 22 #include <linux/kvm.h> 23 #include <linux/module.h> 24 #include <linux/errno.h> 25 #include <linux/percpu.h> 26 #include <linux/mm.h> 27 #include <linux/miscdevice.h> 28 #include <linux/vmalloc.h> 29 #include <linux/reboot.h> 30 #include <linux/debugfs.h> 31 #include <linux/highmem.h> 32 #include <linux/file.h> 33 #include <linux/syscore_ops.h> 34 #include <linux/cpu.h> 35 #include <linux/sched.h> 36 #include <linux/cpumask.h> 37 #include <linux/smp.h> 38 #include <linux/anon_inodes.h> 39 #include <linux/profile.h> 40 #include <linux/kvm_para.h> 41 #include <linux/pagemap.h> 42 #include <linux/mman.h> 43 #include <linux/swap.h> 44 #include <linux/bitops.h> 45 #include <linux/spinlock.h> 46 #include <linux/compat.h> 47 #include <linux/srcu.h> 48 #include <linux/hugetlb.h> 49 #include <linux/slab.h> 50 #include <linux/sort.h> 51 #include <linux/bsearch.h> 52 53 #include <asm/processor.h> 54 #include <asm/io.h> 55 #include <asm/uaccess.h> 56 #include <asm/pgtable.h> 57 58 #include "coalesced_mmio.h" 59 #include "async_pf.h" 60 61 #define CREATE_TRACE_POINTS 62 #include <trace/events/kvm.h> 63 64 MODULE_AUTHOR("Qumranet"); 65 MODULE_LICENSE("GPL"); 66 67 /* 68 * Ordering of locks: 69 * 70 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 71 */ 72 73 DEFINE_SPINLOCK(kvm_lock); 74 static DEFINE_RAW_SPINLOCK(kvm_count_lock); 75 LIST_HEAD(vm_list); 76 77 static cpumask_var_t cpus_hardware_enabled; 78 static int kvm_usage_count = 0; 79 static atomic_t hardware_enable_failed; 80 81 struct kmem_cache *kvm_vcpu_cache; 82 EXPORT_SYMBOL_GPL(kvm_vcpu_cache); 83 84 static __read_mostly struct preempt_ops kvm_preempt_ops; 85 86 struct dentry *kvm_debugfs_dir; 87 88 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 89 unsigned long arg); 90 #ifdef CONFIG_COMPAT 91 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, 92 unsigned long arg); 93 #endif 94 static int hardware_enable_all(void); 95 static void hardware_disable_all(void); 96 97 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 98 static void update_memslots(struct kvm_memslots *slots, 99 struct kvm_memory_slot *new, u64 last_generation); 100 101 static void kvm_release_pfn_dirty(pfn_t pfn); 102 static void mark_page_dirty_in_slot(struct kvm *kvm, 103 struct kvm_memory_slot *memslot, gfn_t gfn); 104 105 bool kvm_rebooting; 106 EXPORT_SYMBOL_GPL(kvm_rebooting); 107 108 static bool largepages_enabled = true; 109 110 bool kvm_is_mmio_pfn(pfn_t pfn) 111 { 112 if (pfn_valid(pfn)) 113 return PageReserved(pfn_to_page(pfn)); 114 115 return true; 116 } 117 118 /* 119 * Switches to specified vcpu, until a matching vcpu_put() 120 */ 121 int vcpu_load(struct kvm_vcpu *vcpu) 122 { 123 int cpu; 124 125 if (mutex_lock_killable(&vcpu->mutex)) 126 return -EINTR; 127 if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) { 128 /* The thread running this VCPU changed. */ 129 struct pid *oldpid = vcpu->pid; 130 struct pid *newpid = get_task_pid(current, PIDTYPE_PID); 131 rcu_assign_pointer(vcpu->pid, newpid); 132 synchronize_rcu(); 133 put_pid(oldpid); 134 } 135 cpu = get_cpu(); 136 preempt_notifier_register(&vcpu->preempt_notifier); 137 kvm_arch_vcpu_load(vcpu, cpu); 138 put_cpu(); 139 return 0; 140 } 141 142 void vcpu_put(struct kvm_vcpu *vcpu) 143 { 144 preempt_disable(); 145 kvm_arch_vcpu_put(vcpu); 146 preempt_notifier_unregister(&vcpu->preempt_notifier); 147 preempt_enable(); 148 mutex_unlock(&vcpu->mutex); 149 } 150 151 static void ack_flush(void *_completed) 152 { 153 } 154 155 static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) 156 { 157 int i, cpu, me; 158 cpumask_var_t cpus; 159 bool called = true; 160 struct kvm_vcpu *vcpu; 161 162 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 163 164 me = get_cpu(); 165 kvm_for_each_vcpu(i, vcpu, kvm) { 166 kvm_make_request(req, vcpu); 167 cpu = vcpu->cpu; 168 169 /* Set ->requests bit before we read ->mode */ 170 smp_mb(); 171 172 if (cpus != NULL && cpu != -1 && cpu != me && 173 kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE) 174 cpumask_set_cpu(cpu, cpus); 175 } 176 if (unlikely(cpus == NULL)) 177 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); 178 else if (!cpumask_empty(cpus)) 179 smp_call_function_many(cpus, ack_flush, NULL, 1); 180 else 181 called = false; 182 put_cpu(); 183 free_cpumask_var(cpus); 184 return called; 185 } 186 187 void kvm_flush_remote_tlbs(struct kvm *kvm) 188 { 189 long dirty_count = kvm->tlbs_dirty; 190 191 smp_mb(); 192 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 193 ++kvm->stat.remote_tlb_flush; 194 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); 195 } 196 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); 197 198 void kvm_reload_remote_mmus(struct kvm *kvm) 199 { 200 make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); 201 } 202 203 void kvm_make_mclock_inprogress_request(struct kvm *kvm) 204 { 205 make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); 206 } 207 208 void kvm_make_scan_ioapic_request(struct kvm *kvm) 209 { 210 make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC); 211 } 212 213 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 214 { 215 struct page *page; 216 int r; 217 218 mutex_init(&vcpu->mutex); 219 vcpu->cpu = -1; 220 vcpu->kvm = kvm; 221 vcpu->vcpu_id = id; 222 vcpu->pid = NULL; 223 init_waitqueue_head(&vcpu->wq); 224 kvm_async_pf_vcpu_init(vcpu); 225 226 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 227 if (!page) { 228 r = -ENOMEM; 229 goto fail; 230 } 231 vcpu->run = page_address(page); 232 233 kvm_vcpu_set_in_spin_loop(vcpu, false); 234 kvm_vcpu_set_dy_eligible(vcpu, false); 235 vcpu->preempted = false; 236 237 r = kvm_arch_vcpu_init(vcpu); 238 if (r < 0) 239 goto fail_free_run; 240 return 0; 241 242 fail_free_run: 243 free_page((unsigned long)vcpu->run); 244 fail: 245 return r; 246 } 247 EXPORT_SYMBOL_GPL(kvm_vcpu_init); 248 249 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) 250 { 251 put_pid(vcpu->pid); 252 kvm_arch_vcpu_uninit(vcpu); 253 free_page((unsigned long)vcpu->run); 254 } 255 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); 256 257 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 258 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 259 { 260 return container_of(mn, struct kvm, mmu_notifier); 261 } 262 263 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, 264 struct mm_struct *mm, 265 unsigned long address) 266 { 267 struct kvm *kvm = mmu_notifier_to_kvm(mn); 268 int need_tlb_flush, idx; 269 270 /* 271 * When ->invalidate_page runs, the linux pte has been zapped 272 * already but the page is still allocated until 273 * ->invalidate_page returns. So if we increase the sequence 274 * here the kvm page fault will notice if the spte can't be 275 * established because the page is going to be freed. If 276 * instead the kvm page fault establishes the spte before 277 * ->invalidate_page runs, kvm_unmap_hva will release it 278 * before returning. 279 * 280 * The sequence increase only need to be seen at spin_unlock 281 * time, and not at spin_lock time. 282 * 283 * Increasing the sequence after the spin_unlock would be 284 * unsafe because the kvm page fault could then establish the 285 * pte after kvm_unmap_hva returned, without noticing the page 286 * is going to be freed. 287 */ 288 idx = srcu_read_lock(&kvm->srcu); 289 spin_lock(&kvm->mmu_lock); 290 291 kvm->mmu_notifier_seq++; 292 need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty; 293 /* we've to flush the tlb before the pages can be freed */ 294 if (need_tlb_flush) 295 kvm_flush_remote_tlbs(kvm); 296 297 spin_unlock(&kvm->mmu_lock); 298 srcu_read_unlock(&kvm->srcu, idx); 299 } 300 301 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 302 struct mm_struct *mm, 303 unsigned long address, 304 pte_t pte) 305 { 306 struct kvm *kvm = mmu_notifier_to_kvm(mn); 307 int idx; 308 309 idx = srcu_read_lock(&kvm->srcu); 310 spin_lock(&kvm->mmu_lock); 311 kvm->mmu_notifier_seq++; 312 kvm_set_spte_hva(kvm, address, pte); 313 spin_unlock(&kvm->mmu_lock); 314 srcu_read_unlock(&kvm->srcu, idx); 315 } 316 317 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 318 struct mm_struct *mm, 319 unsigned long start, 320 unsigned long end) 321 { 322 struct kvm *kvm = mmu_notifier_to_kvm(mn); 323 int need_tlb_flush = 0, idx; 324 325 idx = srcu_read_lock(&kvm->srcu); 326 spin_lock(&kvm->mmu_lock); 327 /* 328 * The count increase must become visible at unlock time as no 329 * spte can be established without taking the mmu_lock and 330 * count is also read inside the mmu_lock critical section. 331 */ 332 kvm->mmu_notifier_count++; 333 need_tlb_flush = kvm_unmap_hva_range(kvm, start, end); 334 need_tlb_flush |= kvm->tlbs_dirty; 335 /* we've to flush the tlb before the pages can be freed */ 336 if (need_tlb_flush) 337 kvm_flush_remote_tlbs(kvm); 338 339 spin_unlock(&kvm->mmu_lock); 340 srcu_read_unlock(&kvm->srcu, idx); 341 } 342 343 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 344 struct mm_struct *mm, 345 unsigned long start, 346 unsigned long end) 347 { 348 struct kvm *kvm = mmu_notifier_to_kvm(mn); 349 350 spin_lock(&kvm->mmu_lock); 351 /* 352 * This sequence increase will notify the kvm page fault that 353 * the page that is going to be mapped in the spte could have 354 * been freed. 355 */ 356 kvm->mmu_notifier_seq++; 357 smp_wmb(); 358 /* 359 * The above sequence increase must be visible before the 360 * below count decrease, which is ensured by the smp_wmb above 361 * in conjunction with the smp_rmb in mmu_notifier_retry(). 362 */ 363 kvm->mmu_notifier_count--; 364 spin_unlock(&kvm->mmu_lock); 365 366 BUG_ON(kvm->mmu_notifier_count < 0); 367 } 368 369 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 370 struct mm_struct *mm, 371 unsigned long address) 372 { 373 struct kvm *kvm = mmu_notifier_to_kvm(mn); 374 int young, idx; 375 376 idx = srcu_read_lock(&kvm->srcu); 377 spin_lock(&kvm->mmu_lock); 378 379 young = kvm_age_hva(kvm, address); 380 if (young) 381 kvm_flush_remote_tlbs(kvm); 382 383 spin_unlock(&kvm->mmu_lock); 384 srcu_read_unlock(&kvm->srcu, idx); 385 386 return young; 387 } 388 389 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, 390 struct mm_struct *mm, 391 unsigned long address) 392 { 393 struct kvm *kvm = mmu_notifier_to_kvm(mn); 394 int young, idx; 395 396 idx = srcu_read_lock(&kvm->srcu); 397 spin_lock(&kvm->mmu_lock); 398 young = kvm_test_age_hva(kvm, address); 399 spin_unlock(&kvm->mmu_lock); 400 srcu_read_unlock(&kvm->srcu, idx); 401 402 return young; 403 } 404 405 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 406 struct mm_struct *mm) 407 { 408 struct kvm *kvm = mmu_notifier_to_kvm(mn); 409 int idx; 410 411 idx = srcu_read_lock(&kvm->srcu); 412 kvm_arch_flush_shadow_all(kvm); 413 srcu_read_unlock(&kvm->srcu, idx); 414 } 415 416 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 417 .invalidate_page = kvm_mmu_notifier_invalidate_page, 418 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 419 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 420 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 421 .test_young = kvm_mmu_notifier_test_young, 422 .change_pte = kvm_mmu_notifier_change_pte, 423 .release = kvm_mmu_notifier_release, 424 }; 425 426 static int kvm_init_mmu_notifier(struct kvm *kvm) 427 { 428 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 429 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 430 } 431 432 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 433 434 static int kvm_init_mmu_notifier(struct kvm *kvm) 435 { 436 return 0; 437 } 438 439 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 440 441 static void kvm_init_memslots_id(struct kvm *kvm) 442 { 443 int i; 444 struct kvm_memslots *slots = kvm->memslots; 445 446 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) 447 slots->id_to_index[i] = slots->memslots[i].id = i; 448 } 449 450 static struct kvm *kvm_create_vm(unsigned long type) 451 { 452 int r, i; 453 struct kvm *kvm = kvm_arch_alloc_vm(); 454 455 if (!kvm) 456 return ERR_PTR(-ENOMEM); 457 458 r = kvm_arch_init_vm(kvm, type); 459 if (r) 460 goto out_err_nodisable; 461 462 r = hardware_enable_all(); 463 if (r) 464 goto out_err_nodisable; 465 466 #ifdef CONFIG_HAVE_KVM_IRQCHIP 467 INIT_HLIST_HEAD(&kvm->mask_notifier_list); 468 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 469 #endif 470 471 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 472 473 r = -ENOMEM; 474 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 475 if (!kvm->memslots) 476 goto out_err_nosrcu; 477 kvm_init_memslots_id(kvm); 478 if (init_srcu_struct(&kvm->srcu)) 479 goto out_err_nosrcu; 480 for (i = 0; i < KVM_NR_BUSES; i++) { 481 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), 482 GFP_KERNEL); 483 if (!kvm->buses[i]) 484 goto out_err; 485 } 486 487 spin_lock_init(&kvm->mmu_lock); 488 kvm->mm = current->mm; 489 atomic_inc(&kvm->mm->mm_count); 490 kvm_eventfd_init(kvm); 491 mutex_init(&kvm->lock); 492 mutex_init(&kvm->irq_lock); 493 mutex_init(&kvm->slots_lock); 494 atomic_set(&kvm->users_count, 1); 495 INIT_LIST_HEAD(&kvm->devices); 496 497 r = kvm_init_mmu_notifier(kvm); 498 if (r) 499 goto out_err; 500 501 spin_lock(&kvm_lock); 502 list_add(&kvm->vm_list, &vm_list); 503 spin_unlock(&kvm_lock); 504 505 return kvm; 506 507 out_err: 508 cleanup_srcu_struct(&kvm->srcu); 509 out_err_nosrcu: 510 hardware_disable_all(); 511 out_err_nodisable: 512 for (i = 0; i < KVM_NR_BUSES; i++) 513 kfree(kvm->buses[i]); 514 kfree(kvm->memslots); 515 kvm_arch_free_vm(kvm); 516 return ERR_PTR(r); 517 } 518 519 /* 520 * Avoid using vmalloc for a small buffer. 521 * Should not be used when the size is statically known. 522 */ 523 void *kvm_kvzalloc(unsigned long size) 524 { 525 if (size > PAGE_SIZE) 526 return vzalloc(size); 527 else 528 return kzalloc(size, GFP_KERNEL); 529 } 530 531 void kvm_kvfree(const void *addr) 532 { 533 if (is_vmalloc_addr(addr)) 534 vfree(addr); 535 else 536 kfree(addr); 537 } 538 539 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) 540 { 541 if (!memslot->dirty_bitmap) 542 return; 543 544 kvm_kvfree(memslot->dirty_bitmap); 545 memslot->dirty_bitmap = NULL; 546 } 547 548 /* 549 * Free any memory in @free but not in @dont. 550 */ 551 static void kvm_free_physmem_slot(struct kvm *kvm, struct kvm_memory_slot *free, 552 struct kvm_memory_slot *dont) 553 { 554 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 555 kvm_destroy_dirty_bitmap(free); 556 557 kvm_arch_free_memslot(kvm, free, dont); 558 559 free->npages = 0; 560 } 561 562 static void kvm_free_physmem(struct kvm *kvm) 563 { 564 struct kvm_memslots *slots = kvm->memslots; 565 struct kvm_memory_slot *memslot; 566 567 kvm_for_each_memslot(memslot, slots) 568 kvm_free_physmem_slot(kvm, memslot, NULL); 569 570 kfree(kvm->memslots); 571 } 572 573 static void kvm_destroy_devices(struct kvm *kvm) 574 { 575 struct list_head *node, *tmp; 576 577 list_for_each_safe(node, tmp, &kvm->devices) { 578 struct kvm_device *dev = 579 list_entry(node, struct kvm_device, vm_node); 580 581 list_del(node); 582 dev->ops->destroy(dev); 583 } 584 } 585 586 static void kvm_destroy_vm(struct kvm *kvm) 587 { 588 int i; 589 struct mm_struct *mm = kvm->mm; 590 591 kvm_arch_sync_events(kvm); 592 spin_lock(&kvm_lock); 593 list_del(&kvm->vm_list); 594 spin_unlock(&kvm_lock); 595 kvm_free_irq_routing(kvm); 596 for (i = 0; i < KVM_NR_BUSES; i++) 597 kvm_io_bus_destroy(kvm->buses[i]); 598 kvm_coalesced_mmio_free(kvm); 599 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 600 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 601 #else 602 kvm_arch_flush_shadow_all(kvm); 603 #endif 604 kvm_arch_destroy_vm(kvm); 605 kvm_destroy_devices(kvm); 606 kvm_free_physmem(kvm); 607 cleanup_srcu_struct(&kvm->srcu); 608 kvm_arch_free_vm(kvm); 609 hardware_disable_all(); 610 mmdrop(mm); 611 } 612 613 void kvm_get_kvm(struct kvm *kvm) 614 { 615 atomic_inc(&kvm->users_count); 616 } 617 EXPORT_SYMBOL_GPL(kvm_get_kvm); 618 619 void kvm_put_kvm(struct kvm *kvm) 620 { 621 if (atomic_dec_and_test(&kvm->users_count)) 622 kvm_destroy_vm(kvm); 623 } 624 EXPORT_SYMBOL_GPL(kvm_put_kvm); 625 626 627 static int kvm_vm_release(struct inode *inode, struct file *filp) 628 { 629 struct kvm *kvm = filp->private_data; 630 631 kvm_irqfd_release(kvm); 632 633 kvm_put_kvm(kvm); 634 return 0; 635 } 636 637 /* 638 * Allocation size is twice as large as the actual dirty bitmap size. 639 * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed. 640 */ 641 static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) 642 { 643 #ifndef CONFIG_S390 644 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); 645 646 memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes); 647 if (!memslot->dirty_bitmap) 648 return -ENOMEM; 649 650 #endif /* !CONFIG_S390 */ 651 return 0; 652 } 653 654 static int cmp_memslot(const void *slot1, const void *slot2) 655 { 656 struct kvm_memory_slot *s1, *s2; 657 658 s1 = (struct kvm_memory_slot *)slot1; 659 s2 = (struct kvm_memory_slot *)slot2; 660 661 if (s1->npages < s2->npages) 662 return 1; 663 if (s1->npages > s2->npages) 664 return -1; 665 666 return 0; 667 } 668 669 /* 670 * Sort the memslots base on its size, so the larger slots 671 * will get better fit. 672 */ 673 static void sort_memslots(struct kvm_memslots *slots) 674 { 675 int i; 676 677 sort(slots->memslots, KVM_MEM_SLOTS_NUM, 678 sizeof(struct kvm_memory_slot), cmp_memslot, NULL); 679 680 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) 681 slots->id_to_index[slots->memslots[i].id] = i; 682 } 683 684 static void update_memslots(struct kvm_memslots *slots, 685 struct kvm_memory_slot *new, 686 u64 last_generation) 687 { 688 if (new) { 689 int id = new->id; 690 struct kvm_memory_slot *old = id_to_memslot(slots, id); 691 unsigned long npages = old->npages; 692 693 *old = *new; 694 if (new->npages != npages) 695 sort_memslots(slots); 696 } 697 698 slots->generation = last_generation + 1; 699 } 700 701 static int check_memory_region_flags(struct kvm_userspace_memory_region *mem) 702 { 703 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; 704 705 #ifdef KVM_CAP_READONLY_MEM 706 valid_flags |= KVM_MEM_READONLY; 707 #endif 708 709 if (mem->flags & ~valid_flags) 710 return -EINVAL; 711 712 return 0; 713 } 714 715 static struct kvm_memslots *install_new_memslots(struct kvm *kvm, 716 struct kvm_memslots *slots, struct kvm_memory_slot *new) 717 { 718 struct kvm_memslots *old_memslots = kvm->memslots; 719 720 update_memslots(slots, new, kvm->memslots->generation); 721 rcu_assign_pointer(kvm->memslots, slots); 722 synchronize_srcu_expedited(&kvm->srcu); 723 724 kvm_arch_memslots_updated(kvm); 725 726 return old_memslots; 727 } 728 729 /* 730 * Allocate some memory and give it an address in the guest physical address 731 * space. 732 * 733 * Discontiguous memory is allowed, mostly for framebuffers. 734 * 735 * Must be called holding mmap_sem for write. 736 */ 737 int __kvm_set_memory_region(struct kvm *kvm, 738 struct kvm_userspace_memory_region *mem) 739 { 740 int r; 741 gfn_t base_gfn; 742 unsigned long npages; 743 struct kvm_memory_slot *slot; 744 struct kvm_memory_slot old, new; 745 struct kvm_memslots *slots = NULL, *old_memslots; 746 enum kvm_mr_change change; 747 748 r = check_memory_region_flags(mem); 749 if (r) 750 goto out; 751 752 r = -EINVAL; 753 /* General sanity checks */ 754 if (mem->memory_size & (PAGE_SIZE - 1)) 755 goto out; 756 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 757 goto out; 758 /* We can read the guest memory with __xxx_user() later on. */ 759 if ((mem->slot < KVM_USER_MEM_SLOTS) && 760 ((mem->userspace_addr & (PAGE_SIZE - 1)) || 761 !access_ok(VERIFY_WRITE, 762 (void __user *)(unsigned long)mem->userspace_addr, 763 mem->memory_size))) 764 goto out; 765 if (mem->slot >= KVM_MEM_SLOTS_NUM) 766 goto out; 767 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 768 goto out; 769 770 slot = id_to_memslot(kvm->memslots, mem->slot); 771 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 772 npages = mem->memory_size >> PAGE_SHIFT; 773 774 r = -EINVAL; 775 if (npages > KVM_MEM_MAX_NR_PAGES) 776 goto out; 777 778 if (!npages) 779 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; 780 781 new = old = *slot; 782 783 new.id = mem->slot; 784 new.base_gfn = base_gfn; 785 new.npages = npages; 786 new.flags = mem->flags; 787 788 r = -EINVAL; 789 if (npages) { 790 if (!old.npages) 791 change = KVM_MR_CREATE; 792 else { /* Modify an existing slot. */ 793 if ((mem->userspace_addr != old.userspace_addr) || 794 (npages != old.npages) || 795 ((new.flags ^ old.flags) & KVM_MEM_READONLY)) 796 goto out; 797 798 if (base_gfn != old.base_gfn) 799 change = KVM_MR_MOVE; 800 else if (new.flags != old.flags) 801 change = KVM_MR_FLAGS_ONLY; 802 else { /* Nothing to change. */ 803 r = 0; 804 goto out; 805 } 806 } 807 } else if (old.npages) { 808 change = KVM_MR_DELETE; 809 } else /* Modify a non-existent slot: disallowed. */ 810 goto out; 811 812 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 813 /* Check for overlaps */ 814 r = -EEXIST; 815 kvm_for_each_memslot(slot, kvm->memslots) { 816 if ((slot->id >= KVM_USER_MEM_SLOTS) || 817 (slot->id == mem->slot)) 818 continue; 819 if (!((base_gfn + npages <= slot->base_gfn) || 820 (base_gfn >= slot->base_gfn + slot->npages))) 821 goto out; 822 } 823 } 824 825 /* Free page dirty bitmap if unneeded */ 826 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 827 new.dirty_bitmap = NULL; 828 829 r = -ENOMEM; 830 if (change == KVM_MR_CREATE) { 831 new.userspace_addr = mem->userspace_addr; 832 833 if (kvm_arch_create_memslot(kvm, &new, npages)) 834 goto out_free; 835 } 836 837 /* Allocate page dirty bitmap if needed */ 838 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 839 if (kvm_create_dirty_bitmap(&new) < 0) 840 goto out_free; 841 } 842 843 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { 844 r = -ENOMEM; 845 slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), 846 GFP_KERNEL); 847 if (!slots) 848 goto out_free; 849 slot = id_to_memslot(slots, mem->slot); 850 slot->flags |= KVM_MEMSLOT_INVALID; 851 852 old_memslots = install_new_memslots(kvm, slots, NULL); 853 854 /* slot was deleted or moved, clear iommu mapping */ 855 kvm_iommu_unmap_pages(kvm, &old); 856 /* From this point no new shadow pages pointing to a deleted, 857 * or moved, memslot will be created. 858 * 859 * validation of sp->gfn happens in: 860 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 861 * - kvm_is_visible_gfn (mmu_check_roots) 862 */ 863 kvm_arch_flush_shadow_memslot(kvm, slot); 864 slots = old_memslots; 865 } 866 867 r = kvm_arch_prepare_memory_region(kvm, &new, mem, change); 868 if (r) 869 goto out_slots; 870 871 r = -ENOMEM; 872 /* 873 * We can re-use the old_memslots from above, the only difference 874 * from the currently installed memslots is the invalid flag. This 875 * will get overwritten by update_memslots anyway. 876 */ 877 if (!slots) { 878 slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), 879 GFP_KERNEL); 880 if (!slots) 881 goto out_free; 882 } 883 884 /* actual memory is freed via old in kvm_free_physmem_slot below */ 885 if (change == KVM_MR_DELETE) { 886 new.dirty_bitmap = NULL; 887 memset(&new.arch, 0, sizeof(new.arch)); 888 } 889 890 old_memslots = install_new_memslots(kvm, slots, &new); 891 892 kvm_arch_commit_memory_region(kvm, mem, &old, change); 893 894 kvm_free_physmem_slot(kvm, &old, &new); 895 kfree(old_memslots); 896 897 /* 898 * IOMMU mapping: New slots need to be mapped. Old slots need to be 899 * un-mapped and re-mapped if their base changes. Since base change 900 * unmapping is handled above with slot deletion, mapping alone is 901 * needed here. Anything else the iommu might care about for existing 902 * slots (size changes, userspace addr changes and read-only flag 903 * changes) is disallowed above, so any other attribute changes getting 904 * here can be skipped. 905 */ 906 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 907 r = kvm_iommu_map_pages(kvm, &new); 908 return r; 909 } 910 911 return 0; 912 913 out_slots: 914 kfree(slots); 915 out_free: 916 kvm_free_physmem_slot(kvm, &new, &old); 917 out: 918 return r; 919 } 920 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 921 922 int kvm_set_memory_region(struct kvm *kvm, 923 struct kvm_userspace_memory_region *mem) 924 { 925 int r; 926 927 mutex_lock(&kvm->slots_lock); 928 r = __kvm_set_memory_region(kvm, mem); 929 mutex_unlock(&kvm->slots_lock); 930 return r; 931 } 932 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 933 934 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 935 struct kvm_userspace_memory_region *mem) 936 { 937 if (mem->slot >= KVM_USER_MEM_SLOTS) 938 return -EINVAL; 939 return kvm_set_memory_region(kvm, mem); 940 } 941 942 int kvm_get_dirty_log(struct kvm *kvm, 943 struct kvm_dirty_log *log, int *is_dirty) 944 { 945 struct kvm_memory_slot *memslot; 946 int r, i; 947 unsigned long n; 948 unsigned long any = 0; 949 950 r = -EINVAL; 951 if (log->slot >= KVM_USER_MEM_SLOTS) 952 goto out; 953 954 memslot = id_to_memslot(kvm->memslots, log->slot); 955 r = -ENOENT; 956 if (!memslot->dirty_bitmap) 957 goto out; 958 959 n = kvm_dirty_bitmap_bytes(memslot); 960 961 for (i = 0; !any && i < n/sizeof(long); ++i) 962 any = memslot->dirty_bitmap[i]; 963 964 r = -EFAULT; 965 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) 966 goto out; 967 968 if (any) 969 *is_dirty = 1; 970 971 r = 0; 972 out: 973 return r; 974 } 975 EXPORT_SYMBOL_GPL(kvm_get_dirty_log); 976 977 bool kvm_largepages_enabled(void) 978 { 979 return largepages_enabled; 980 } 981 982 void kvm_disable_largepages(void) 983 { 984 largepages_enabled = false; 985 } 986 EXPORT_SYMBOL_GPL(kvm_disable_largepages); 987 988 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 989 { 990 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 991 } 992 EXPORT_SYMBOL_GPL(gfn_to_memslot); 993 994 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 995 { 996 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); 997 998 if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS || 999 memslot->flags & KVM_MEMSLOT_INVALID) 1000 return 0; 1001 1002 return 1; 1003 } 1004 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 1005 1006 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) 1007 { 1008 struct vm_area_struct *vma; 1009 unsigned long addr, size; 1010 1011 size = PAGE_SIZE; 1012 1013 addr = gfn_to_hva(kvm, gfn); 1014 if (kvm_is_error_hva(addr)) 1015 return PAGE_SIZE; 1016 1017 down_read(¤t->mm->mmap_sem); 1018 vma = find_vma(current->mm, addr); 1019 if (!vma) 1020 goto out; 1021 1022 size = vma_kernel_pagesize(vma); 1023 1024 out: 1025 up_read(¤t->mm->mmap_sem); 1026 1027 return size; 1028 } 1029 1030 static bool memslot_is_readonly(struct kvm_memory_slot *slot) 1031 { 1032 return slot->flags & KVM_MEM_READONLY; 1033 } 1034 1035 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1036 gfn_t *nr_pages, bool write) 1037 { 1038 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 1039 return KVM_HVA_ERR_BAD; 1040 1041 if (memslot_is_readonly(slot) && write) 1042 return KVM_HVA_ERR_RO_BAD; 1043 1044 if (nr_pages) 1045 *nr_pages = slot->npages - (gfn - slot->base_gfn); 1046 1047 return __gfn_to_hva_memslot(slot, gfn); 1048 } 1049 1050 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1051 gfn_t *nr_pages) 1052 { 1053 return __gfn_to_hva_many(slot, gfn, nr_pages, true); 1054 } 1055 1056 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 1057 gfn_t gfn) 1058 { 1059 return gfn_to_hva_many(slot, gfn, NULL); 1060 } 1061 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); 1062 1063 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 1064 { 1065 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); 1066 } 1067 EXPORT_SYMBOL_GPL(gfn_to_hva); 1068 1069 /* 1070 * If writable is set to false, the hva returned by this function is only 1071 * allowed to be read. 1072 */ 1073 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 1074 { 1075 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1076 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); 1077 1078 if (!kvm_is_error_hva(hva) && writable) 1079 *writable = !memslot_is_readonly(slot); 1080 1081 return hva; 1082 } 1083 1084 static int kvm_read_hva(void *data, void __user *hva, int len) 1085 { 1086 return __copy_from_user(data, hva, len); 1087 } 1088 1089 static int kvm_read_hva_atomic(void *data, void __user *hva, int len) 1090 { 1091 return __copy_from_user_inatomic(data, hva, len); 1092 } 1093 1094 static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, 1095 unsigned long start, int write, struct page **page) 1096 { 1097 int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET; 1098 1099 if (write) 1100 flags |= FOLL_WRITE; 1101 1102 return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL); 1103 } 1104 1105 static inline int check_user_page_hwpoison(unsigned long addr) 1106 { 1107 int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE; 1108 1109 rc = __get_user_pages(current, current->mm, addr, 1, 1110 flags, NULL, NULL, NULL); 1111 return rc == -EHWPOISON; 1112 } 1113 1114 /* 1115 * The atomic path to get the writable pfn which will be stored in @pfn, 1116 * true indicates success, otherwise false is returned. 1117 */ 1118 static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, 1119 bool write_fault, bool *writable, pfn_t *pfn) 1120 { 1121 struct page *page[1]; 1122 int npages; 1123 1124 if (!(async || atomic)) 1125 return false; 1126 1127 /* 1128 * Fast pin a writable pfn only if it is a write fault request 1129 * or the caller allows to map a writable pfn for a read fault 1130 * request. 1131 */ 1132 if (!(write_fault || writable)) 1133 return false; 1134 1135 npages = __get_user_pages_fast(addr, 1, 1, page); 1136 if (npages == 1) { 1137 *pfn = page_to_pfn(page[0]); 1138 1139 if (writable) 1140 *writable = true; 1141 return true; 1142 } 1143 1144 return false; 1145 } 1146 1147 /* 1148 * The slow path to get the pfn of the specified host virtual address, 1149 * 1 indicates success, -errno is returned if error is detected. 1150 */ 1151 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, 1152 bool *writable, pfn_t *pfn) 1153 { 1154 struct page *page[1]; 1155 int npages = 0; 1156 1157 might_sleep(); 1158 1159 if (writable) 1160 *writable = write_fault; 1161 1162 if (async) { 1163 down_read(¤t->mm->mmap_sem); 1164 npages = get_user_page_nowait(current, current->mm, 1165 addr, write_fault, page); 1166 up_read(¤t->mm->mmap_sem); 1167 } else 1168 npages = get_user_pages_fast(addr, 1, write_fault, 1169 page); 1170 if (npages != 1) 1171 return npages; 1172 1173 /* map read fault as writable if possible */ 1174 if (unlikely(!write_fault) && writable) { 1175 struct page *wpage[1]; 1176 1177 npages = __get_user_pages_fast(addr, 1, 1, wpage); 1178 if (npages == 1) { 1179 *writable = true; 1180 put_page(page[0]); 1181 page[0] = wpage[0]; 1182 } 1183 1184 npages = 1; 1185 } 1186 *pfn = page_to_pfn(page[0]); 1187 return npages; 1188 } 1189 1190 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) 1191 { 1192 if (unlikely(!(vma->vm_flags & VM_READ))) 1193 return false; 1194 1195 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) 1196 return false; 1197 1198 return true; 1199 } 1200 1201 /* 1202 * Pin guest page in memory and return its pfn. 1203 * @addr: host virtual address which maps memory to the guest 1204 * @atomic: whether this function can sleep 1205 * @async: whether this function need to wait IO complete if the 1206 * host page is not in the memory 1207 * @write_fault: whether we should get a writable host page 1208 * @writable: whether it allows to map a writable host page for !@write_fault 1209 * 1210 * The function will map a writable host page for these two cases: 1211 * 1): @write_fault = true 1212 * 2): @write_fault = false && @writable, @writable will tell the caller 1213 * whether the mapping is writable. 1214 */ 1215 static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, 1216 bool write_fault, bool *writable) 1217 { 1218 struct vm_area_struct *vma; 1219 pfn_t pfn = 0; 1220 int npages; 1221 1222 /* we can do it either atomically or asynchronously, not both */ 1223 BUG_ON(atomic && async); 1224 1225 if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn)) 1226 return pfn; 1227 1228 if (atomic) 1229 return KVM_PFN_ERR_FAULT; 1230 1231 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); 1232 if (npages == 1) 1233 return pfn; 1234 1235 down_read(¤t->mm->mmap_sem); 1236 if (npages == -EHWPOISON || 1237 (!async && check_user_page_hwpoison(addr))) { 1238 pfn = KVM_PFN_ERR_HWPOISON; 1239 goto exit; 1240 } 1241 1242 vma = find_vma_intersection(current->mm, addr, addr + 1); 1243 1244 if (vma == NULL) 1245 pfn = KVM_PFN_ERR_FAULT; 1246 else if ((vma->vm_flags & VM_PFNMAP)) { 1247 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + 1248 vma->vm_pgoff; 1249 BUG_ON(!kvm_is_mmio_pfn(pfn)); 1250 } else { 1251 if (async && vma_is_valid(vma, write_fault)) 1252 *async = true; 1253 pfn = KVM_PFN_ERR_FAULT; 1254 } 1255 exit: 1256 up_read(¤t->mm->mmap_sem); 1257 return pfn; 1258 } 1259 1260 static pfn_t 1261 __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, 1262 bool *async, bool write_fault, bool *writable) 1263 { 1264 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); 1265 1266 if (addr == KVM_HVA_ERR_RO_BAD) 1267 return KVM_PFN_ERR_RO_FAULT; 1268 1269 if (kvm_is_error_hva(addr)) 1270 return KVM_PFN_NOSLOT; 1271 1272 /* Do not map writable pfn in the readonly memslot. */ 1273 if (writable && memslot_is_readonly(slot)) { 1274 *writable = false; 1275 writable = NULL; 1276 } 1277 1278 return hva_to_pfn(addr, atomic, async, write_fault, 1279 writable); 1280 } 1281 1282 static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async, 1283 bool write_fault, bool *writable) 1284 { 1285 struct kvm_memory_slot *slot; 1286 1287 if (async) 1288 *async = false; 1289 1290 slot = gfn_to_memslot(kvm, gfn); 1291 1292 return __gfn_to_pfn_memslot(slot, gfn, atomic, async, write_fault, 1293 writable); 1294 } 1295 1296 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) 1297 { 1298 return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL); 1299 } 1300 EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic); 1301 1302 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, 1303 bool write_fault, bool *writable) 1304 { 1305 return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable); 1306 } 1307 EXPORT_SYMBOL_GPL(gfn_to_pfn_async); 1308 1309 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 1310 { 1311 return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL); 1312 } 1313 EXPORT_SYMBOL_GPL(gfn_to_pfn); 1314 1315 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 1316 bool *writable) 1317 { 1318 return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable); 1319 } 1320 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 1321 1322 pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 1323 { 1324 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL); 1325 } 1326 1327 pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) 1328 { 1329 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL); 1330 } 1331 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); 1332 1333 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, 1334 int nr_pages) 1335 { 1336 unsigned long addr; 1337 gfn_t entry; 1338 1339 addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry); 1340 if (kvm_is_error_hva(addr)) 1341 return -1; 1342 1343 if (entry < nr_pages) 1344 return 0; 1345 1346 return __get_user_pages_fast(addr, nr_pages, 1, pages); 1347 } 1348 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); 1349 1350 static struct page *kvm_pfn_to_page(pfn_t pfn) 1351 { 1352 if (is_error_noslot_pfn(pfn)) 1353 return KVM_ERR_PTR_BAD_PAGE; 1354 1355 if (kvm_is_mmio_pfn(pfn)) { 1356 WARN_ON(1); 1357 return KVM_ERR_PTR_BAD_PAGE; 1358 } 1359 1360 return pfn_to_page(pfn); 1361 } 1362 1363 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 1364 { 1365 pfn_t pfn; 1366 1367 pfn = gfn_to_pfn(kvm, gfn); 1368 1369 return kvm_pfn_to_page(pfn); 1370 } 1371 1372 EXPORT_SYMBOL_GPL(gfn_to_page); 1373 1374 void kvm_release_page_clean(struct page *page) 1375 { 1376 WARN_ON(is_error_page(page)); 1377 1378 kvm_release_pfn_clean(page_to_pfn(page)); 1379 } 1380 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 1381 1382 void kvm_release_pfn_clean(pfn_t pfn) 1383 { 1384 if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn)) 1385 put_page(pfn_to_page(pfn)); 1386 } 1387 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1388 1389 void kvm_release_page_dirty(struct page *page) 1390 { 1391 WARN_ON(is_error_page(page)); 1392 1393 kvm_release_pfn_dirty(page_to_pfn(page)); 1394 } 1395 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 1396 1397 static void kvm_release_pfn_dirty(pfn_t pfn) 1398 { 1399 kvm_set_pfn_dirty(pfn); 1400 kvm_release_pfn_clean(pfn); 1401 } 1402 1403 void kvm_set_pfn_dirty(pfn_t pfn) 1404 { 1405 if (!kvm_is_mmio_pfn(pfn)) { 1406 struct page *page = pfn_to_page(pfn); 1407 if (!PageReserved(page)) 1408 SetPageDirty(page); 1409 } 1410 } 1411 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 1412 1413 void kvm_set_pfn_accessed(pfn_t pfn) 1414 { 1415 if (!kvm_is_mmio_pfn(pfn)) 1416 mark_page_accessed(pfn_to_page(pfn)); 1417 } 1418 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1419 1420 void kvm_get_pfn(pfn_t pfn) 1421 { 1422 if (!kvm_is_mmio_pfn(pfn)) 1423 get_page(pfn_to_page(pfn)); 1424 } 1425 EXPORT_SYMBOL_GPL(kvm_get_pfn); 1426 1427 static int next_segment(unsigned long len, int offset) 1428 { 1429 if (len > PAGE_SIZE - offset) 1430 return PAGE_SIZE - offset; 1431 else 1432 return len; 1433 } 1434 1435 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 1436 int len) 1437 { 1438 int r; 1439 unsigned long addr; 1440 1441 addr = gfn_to_hva_prot(kvm, gfn, NULL); 1442 if (kvm_is_error_hva(addr)) 1443 return -EFAULT; 1444 r = kvm_read_hva(data, (void __user *)addr + offset, len); 1445 if (r) 1446 return -EFAULT; 1447 return 0; 1448 } 1449 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 1450 1451 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 1452 { 1453 gfn_t gfn = gpa >> PAGE_SHIFT; 1454 int seg; 1455 int offset = offset_in_page(gpa); 1456 int ret; 1457 1458 while ((seg = next_segment(len, offset)) != 0) { 1459 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 1460 if (ret < 0) 1461 return ret; 1462 offset = 0; 1463 len -= seg; 1464 data += seg; 1465 ++gfn; 1466 } 1467 return 0; 1468 } 1469 EXPORT_SYMBOL_GPL(kvm_read_guest); 1470 1471 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 1472 unsigned long len) 1473 { 1474 int r; 1475 unsigned long addr; 1476 gfn_t gfn = gpa >> PAGE_SHIFT; 1477 int offset = offset_in_page(gpa); 1478 1479 addr = gfn_to_hva_prot(kvm, gfn, NULL); 1480 if (kvm_is_error_hva(addr)) 1481 return -EFAULT; 1482 pagefault_disable(); 1483 r = kvm_read_hva_atomic(data, (void __user *)addr + offset, len); 1484 pagefault_enable(); 1485 if (r) 1486 return -EFAULT; 1487 return 0; 1488 } 1489 EXPORT_SYMBOL(kvm_read_guest_atomic); 1490 1491 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 1492 int offset, int len) 1493 { 1494 int r; 1495 unsigned long addr; 1496 1497 addr = gfn_to_hva(kvm, gfn); 1498 if (kvm_is_error_hva(addr)) 1499 return -EFAULT; 1500 r = __copy_to_user((void __user *)addr + offset, data, len); 1501 if (r) 1502 return -EFAULT; 1503 mark_page_dirty(kvm, gfn); 1504 return 0; 1505 } 1506 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 1507 1508 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 1509 unsigned long len) 1510 { 1511 gfn_t gfn = gpa >> PAGE_SHIFT; 1512 int seg; 1513 int offset = offset_in_page(gpa); 1514 int ret; 1515 1516 while ((seg = next_segment(len, offset)) != 0) { 1517 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 1518 if (ret < 0) 1519 return ret; 1520 offset = 0; 1521 len -= seg; 1522 data += seg; 1523 ++gfn; 1524 } 1525 return 0; 1526 } 1527 1528 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1529 gpa_t gpa, unsigned long len) 1530 { 1531 struct kvm_memslots *slots = kvm_memslots(kvm); 1532 int offset = offset_in_page(gpa); 1533 gfn_t start_gfn = gpa >> PAGE_SHIFT; 1534 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; 1535 gfn_t nr_pages_needed = end_gfn - start_gfn + 1; 1536 gfn_t nr_pages_avail; 1537 1538 ghc->gpa = gpa; 1539 ghc->generation = slots->generation; 1540 ghc->len = len; 1541 ghc->memslot = gfn_to_memslot(kvm, start_gfn); 1542 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail); 1543 if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) { 1544 ghc->hva += offset; 1545 } else { 1546 /* 1547 * If the requested region crosses two memslots, we still 1548 * verify that the entire region is valid here. 1549 */ 1550 while (start_gfn <= end_gfn) { 1551 ghc->memslot = gfn_to_memslot(kvm, start_gfn); 1552 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, 1553 &nr_pages_avail); 1554 if (kvm_is_error_hva(ghc->hva)) 1555 return -EFAULT; 1556 start_gfn += nr_pages_avail; 1557 } 1558 /* Use the slow path for cross page reads and writes. */ 1559 ghc->memslot = NULL; 1560 } 1561 return 0; 1562 } 1563 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 1564 1565 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1566 void *data, unsigned long len) 1567 { 1568 struct kvm_memslots *slots = kvm_memslots(kvm); 1569 int r; 1570 1571 BUG_ON(len > ghc->len); 1572 1573 if (slots->generation != ghc->generation) 1574 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); 1575 1576 if (unlikely(!ghc->memslot)) 1577 return kvm_write_guest(kvm, ghc->gpa, data, len); 1578 1579 if (kvm_is_error_hva(ghc->hva)) 1580 return -EFAULT; 1581 1582 r = __copy_to_user((void __user *)ghc->hva, data, len); 1583 if (r) 1584 return -EFAULT; 1585 mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT); 1586 1587 return 0; 1588 } 1589 EXPORT_SYMBOL_GPL(kvm_write_guest_cached); 1590 1591 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1592 void *data, unsigned long len) 1593 { 1594 struct kvm_memslots *slots = kvm_memslots(kvm); 1595 int r; 1596 1597 BUG_ON(len > ghc->len); 1598 1599 if (slots->generation != ghc->generation) 1600 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); 1601 1602 if (unlikely(!ghc->memslot)) 1603 return kvm_read_guest(kvm, ghc->gpa, data, len); 1604 1605 if (kvm_is_error_hva(ghc->hva)) 1606 return -EFAULT; 1607 1608 r = __copy_from_user(data, (void __user *)ghc->hva, len); 1609 if (r) 1610 return -EFAULT; 1611 1612 return 0; 1613 } 1614 EXPORT_SYMBOL_GPL(kvm_read_guest_cached); 1615 1616 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) 1617 { 1618 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 1619 1620 return kvm_write_guest_page(kvm, gfn, zero_page, offset, len); 1621 } 1622 EXPORT_SYMBOL_GPL(kvm_clear_guest_page); 1623 1624 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 1625 { 1626 gfn_t gfn = gpa >> PAGE_SHIFT; 1627 int seg; 1628 int offset = offset_in_page(gpa); 1629 int ret; 1630 1631 while ((seg = next_segment(len, offset)) != 0) { 1632 ret = kvm_clear_guest_page(kvm, gfn, offset, seg); 1633 if (ret < 0) 1634 return ret; 1635 offset = 0; 1636 len -= seg; 1637 ++gfn; 1638 } 1639 return 0; 1640 } 1641 EXPORT_SYMBOL_GPL(kvm_clear_guest); 1642 1643 static void mark_page_dirty_in_slot(struct kvm *kvm, 1644 struct kvm_memory_slot *memslot, 1645 gfn_t gfn) 1646 { 1647 if (memslot && memslot->dirty_bitmap) { 1648 unsigned long rel_gfn = gfn - memslot->base_gfn; 1649 1650 set_bit_le(rel_gfn, memslot->dirty_bitmap); 1651 } 1652 } 1653 1654 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 1655 { 1656 struct kvm_memory_slot *memslot; 1657 1658 memslot = gfn_to_memslot(kvm, gfn); 1659 mark_page_dirty_in_slot(kvm, memslot, gfn); 1660 } 1661 EXPORT_SYMBOL_GPL(mark_page_dirty); 1662 1663 /* 1664 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 1665 */ 1666 void kvm_vcpu_block(struct kvm_vcpu *vcpu) 1667 { 1668 DEFINE_WAIT(wait); 1669 1670 for (;;) { 1671 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1672 1673 if (kvm_arch_vcpu_runnable(vcpu)) { 1674 kvm_make_request(KVM_REQ_UNHALT, vcpu); 1675 break; 1676 } 1677 if (kvm_cpu_has_pending_timer(vcpu)) 1678 break; 1679 if (signal_pending(current)) 1680 break; 1681 1682 schedule(); 1683 } 1684 1685 finish_wait(&vcpu->wq, &wait); 1686 } 1687 EXPORT_SYMBOL_GPL(kvm_vcpu_block); 1688 1689 #ifndef CONFIG_S390 1690 /* 1691 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. 1692 */ 1693 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 1694 { 1695 int me; 1696 int cpu = vcpu->cpu; 1697 wait_queue_head_t *wqp; 1698 1699 wqp = kvm_arch_vcpu_wq(vcpu); 1700 if (waitqueue_active(wqp)) { 1701 wake_up_interruptible(wqp); 1702 ++vcpu->stat.halt_wakeup; 1703 } 1704 1705 me = get_cpu(); 1706 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) 1707 if (kvm_arch_vcpu_should_kick(vcpu)) 1708 smp_send_reschedule(cpu); 1709 put_cpu(); 1710 } 1711 EXPORT_SYMBOL_GPL(kvm_vcpu_kick); 1712 #endif /* !CONFIG_S390 */ 1713 1714 bool kvm_vcpu_yield_to(struct kvm_vcpu *target) 1715 { 1716 struct pid *pid; 1717 struct task_struct *task = NULL; 1718 bool ret = false; 1719 1720 rcu_read_lock(); 1721 pid = rcu_dereference(target->pid); 1722 if (pid) 1723 task = get_pid_task(target->pid, PIDTYPE_PID); 1724 rcu_read_unlock(); 1725 if (!task) 1726 return ret; 1727 if (task->flags & PF_VCPU) { 1728 put_task_struct(task); 1729 return ret; 1730 } 1731 ret = yield_to(task, 1); 1732 put_task_struct(task); 1733 1734 return ret; 1735 } 1736 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); 1737 1738 /* 1739 * Helper that checks whether a VCPU is eligible for directed yield. 1740 * Most eligible candidate to yield is decided by following heuristics: 1741 * 1742 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently 1743 * (preempted lock holder), indicated by @in_spin_loop. 1744 * Set at the beiginning and cleared at the end of interception/PLE handler. 1745 * 1746 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get 1747 * chance last time (mostly it has become eligible now since we have probably 1748 * yielded to lockholder in last iteration. This is done by toggling 1749 * @dy_eligible each time a VCPU checked for eligibility.) 1750 * 1751 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding 1752 * to preempted lock-holder could result in wrong VCPU selection and CPU 1753 * burning. Giving priority for a potential lock-holder increases lock 1754 * progress. 1755 * 1756 * Since algorithm is based on heuristics, accessing another VCPU data without 1757 * locking does not harm. It may result in trying to yield to same VCPU, fail 1758 * and continue with next VCPU and so on. 1759 */ 1760 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) 1761 { 1762 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 1763 bool eligible; 1764 1765 eligible = !vcpu->spin_loop.in_spin_loop || 1766 (vcpu->spin_loop.in_spin_loop && 1767 vcpu->spin_loop.dy_eligible); 1768 1769 if (vcpu->spin_loop.in_spin_loop) 1770 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); 1771 1772 return eligible; 1773 #else 1774 return true; 1775 #endif 1776 } 1777 1778 void kvm_vcpu_on_spin(struct kvm_vcpu *me) 1779 { 1780 struct kvm *kvm = me->kvm; 1781 struct kvm_vcpu *vcpu; 1782 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 1783 int yielded = 0; 1784 int try = 3; 1785 int pass; 1786 int i; 1787 1788 kvm_vcpu_set_in_spin_loop(me, true); 1789 /* 1790 * We boost the priority of a VCPU that is runnable but not 1791 * currently running, because it got preempted by something 1792 * else and called schedule in __vcpu_run. Hopefully that 1793 * VCPU is holding the lock that we need and will release it. 1794 * We approximate round-robin by starting at the last boosted VCPU. 1795 */ 1796 for (pass = 0; pass < 2 && !yielded && try; pass++) { 1797 kvm_for_each_vcpu(i, vcpu, kvm) { 1798 if (!pass && i <= last_boosted_vcpu) { 1799 i = last_boosted_vcpu; 1800 continue; 1801 } else if (pass && i > last_boosted_vcpu) 1802 break; 1803 if (!ACCESS_ONCE(vcpu->preempted)) 1804 continue; 1805 if (vcpu == me) 1806 continue; 1807 if (waitqueue_active(&vcpu->wq)) 1808 continue; 1809 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 1810 continue; 1811 1812 yielded = kvm_vcpu_yield_to(vcpu); 1813 if (yielded > 0) { 1814 kvm->last_boosted_vcpu = i; 1815 break; 1816 } else if (yielded < 0) { 1817 try--; 1818 if (!try) 1819 break; 1820 } 1821 } 1822 } 1823 kvm_vcpu_set_in_spin_loop(me, false); 1824 1825 /* Ensure vcpu is not eligible during next spinloop */ 1826 kvm_vcpu_set_dy_eligible(me, false); 1827 } 1828 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 1829 1830 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1831 { 1832 struct kvm_vcpu *vcpu = vma->vm_file->private_data; 1833 struct page *page; 1834 1835 if (vmf->pgoff == 0) 1836 page = virt_to_page(vcpu->run); 1837 #ifdef CONFIG_X86 1838 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 1839 page = virt_to_page(vcpu->arch.pio_data); 1840 #endif 1841 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1842 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 1843 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 1844 #endif 1845 else 1846 return kvm_arch_vcpu_fault(vcpu, vmf); 1847 get_page(page); 1848 vmf->page = page; 1849 return 0; 1850 } 1851 1852 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 1853 .fault = kvm_vcpu_fault, 1854 }; 1855 1856 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 1857 { 1858 vma->vm_ops = &kvm_vcpu_vm_ops; 1859 return 0; 1860 } 1861 1862 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 1863 { 1864 struct kvm_vcpu *vcpu = filp->private_data; 1865 1866 kvm_put_kvm(vcpu->kvm); 1867 return 0; 1868 } 1869 1870 static struct file_operations kvm_vcpu_fops = { 1871 .release = kvm_vcpu_release, 1872 .unlocked_ioctl = kvm_vcpu_ioctl, 1873 #ifdef CONFIG_COMPAT 1874 .compat_ioctl = kvm_vcpu_compat_ioctl, 1875 #endif 1876 .mmap = kvm_vcpu_mmap, 1877 .llseek = noop_llseek, 1878 }; 1879 1880 /* 1881 * Allocates an inode for the vcpu. 1882 */ 1883 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 1884 { 1885 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); 1886 } 1887 1888 /* 1889 * Creates some virtual cpus. Good luck creating more than one. 1890 */ 1891 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 1892 { 1893 int r; 1894 struct kvm_vcpu *vcpu, *v; 1895 1896 if (id >= KVM_MAX_VCPUS) 1897 return -EINVAL; 1898 1899 vcpu = kvm_arch_vcpu_create(kvm, id); 1900 if (IS_ERR(vcpu)) 1901 return PTR_ERR(vcpu); 1902 1903 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 1904 1905 r = kvm_arch_vcpu_setup(vcpu); 1906 if (r) 1907 goto vcpu_destroy; 1908 1909 mutex_lock(&kvm->lock); 1910 if (!kvm_vcpu_compatible(vcpu)) { 1911 r = -EINVAL; 1912 goto unlock_vcpu_destroy; 1913 } 1914 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { 1915 r = -EINVAL; 1916 goto unlock_vcpu_destroy; 1917 } 1918 1919 kvm_for_each_vcpu(r, v, kvm) 1920 if (v->vcpu_id == id) { 1921 r = -EEXIST; 1922 goto unlock_vcpu_destroy; 1923 } 1924 1925 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); 1926 1927 /* Now it's all set up, let userspace reach it */ 1928 kvm_get_kvm(kvm); 1929 r = create_vcpu_fd(vcpu); 1930 if (r < 0) { 1931 kvm_put_kvm(kvm); 1932 goto unlock_vcpu_destroy; 1933 } 1934 1935 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; 1936 smp_wmb(); 1937 atomic_inc(&kvm->online_vcpus); 1938 1939 mutex_unlock(&kvm->lock); 1940 kvm_arch_vcpu_postcreate(vcpu); 1941 return r; 1942 1943 unlock_vcpu_destroy: 1944 mutex_unlock(&kvm->lock); 1945 vcpu_destroy: 1946 kvm_arch_vcpu_destroy(vcpu); 1947 return r; 1948 } 1949 1950 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 1951 { 1952 if (sigset) { 1953 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1954 vcpu->sigset_active = 1; 1955 vcpu->sigset = *sigset; 1956 } else 1957 vcpu->sigset_active = 0; 1958 return 0; 1959 } 1960 1961 static long kvm_vcpu_ioctl(struct file *filp, 1962 unsigned int ioctl, unsigned long arg) 1963 { 1964 struct kvm_vcpu *vcpu = filp->private_data; 1965 void __user *argp = (void __user *)arg; 1966 int r; 1967 struct kvm_fpu *fpu = NULL; 1968 struct kvm_sregs *kvm_sregs = NULL; 1969 1970 if (vcpu->kvm->mm != current->mm) 1971 return -EIO; 1972 1973 #if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS) 1974 /* 1975 * Special cases: vcpu ioctls that are asynchronous to vcpu execution, 1976 * so vcpu_load() would break it. 1977 */ 1978 if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT) 1979 return kvm_arch_vcpu_ioctl(filp, ioctl, arg); 1980 #endif 1981 1982 1983 r = vcpu_load(vcpu); 1984 if (r) 1985 return r; 1986 switch (ioctl) { 1987 case KVM_RUN: 1988 r = -EINVAL; 1989 if (arg) 1990 goto out; 1991 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); 1992 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 1993 break; 1994 case KVM_GET_REGS: { 1995 struct kvm_regs *kvm_regs; 1996 1997 r = -ENOMEM; 1998 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 1999 if (!kvm_regs) 2000 goto out; 2001 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 2002 if (r) 2003 goto out_free1; 2004 r = -EFAULT; 2005 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 2006 goto out_free1; 2007 r = 0; 2008 out_free1: 2009 kfree(kvm_regs); 2010 break; 2011 } 2012 case KVM_SET_REGS: { 2013 struct kvm_regs *kvm_regs; 2014 2015 r = -ENOMEM; 2016 kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); 2017 if (IS_ERR(kvm_regs)) { 2018 r = PTR_ERR(kvm_regs); 2019 goto out; 2020 } 2021 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 2022 kfree(kvm_regs); 2023 break; 2024 } 2025 case KVM_GET_SREGS: { 2026 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 2027 r = -ENOMEM; 2028 if (!kvm_sregs) 2029 goto out; 2030 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 2031 if (r) 2032 goto out; 2033 r = -EFAULT; 2034 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 2035 goto out; 2036 r = 0; 2037 break; 2038 } 2039 case KVM_SET_SREGS: { 2040 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); 2041 if (IS_ERR(kvm_sregs)) { 2042 r = PTR_ERR(kvm_sregs); 2043 kvm_sregs = NULL; 2044 goto out; 2045 } 2046 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 2047 break; 2048 } 2049 case KVM_GET_MP_STATE: { 2050 struct kvm_mp_state mp_state; 2051 2052 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 2053 if (r) 2054 goto out; 2055 r = -EFAULT; 2056 if (copy_to_user(argp, &mp_state, sizeof mp_state)) 2057 goto out; 2058 r = 0; 2059 break; 2060 } 2061 case KVM_SET_MP_STATE: { 2062 struct kvm_mp_state mp_state; 2063 2064 r = -EFAULT; 2065 if (copy_from_user(&mp_state, argp, sizeof mp_state)) 2066 goto out; 2067 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 2068 break; 2069 } 2070 case KVM_TRANSLATE: { 2071 struct kvm_translation tr; 2072 2073 r = -EFAULT; 2074 if (copy_from_user(&tr, argp, sizeof tr)) 2075 goto out; 2076 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 2077 if (r) 2078 goto out; 2079 r = -EFAULT; 2080 if (copy_to_user(argp, &tr, sizeof tr)) 2081 goto out; 2082 r = 0; 2083 break; 2084 } 2085 case KVM_SET_GUEST_DEBUG: { 2086 struct kvm_guest_debug dbg; 2087 2088 r = -EFAULT; 2089 if (copy_from_user(&dbg, argp, sizeof dbg)) 2090 goto out; 2091 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 2092 break; 2093 } 2094 case KVM_SET_SIGNAL_MASK: { 2095 struct kvm_signal_mask __user *sigmask_arg = argp; 2096 struct kvm_signal_mask kvm_sigmask; 2097 sigset_t sigset, *p; 2098 2099 p = NULL; 2100 if (argp) { 2101 r = -EFAULT; 2102 if (copy_from_user(&kvm_sigmask, argp, 2103 sizeof kvm_sigmask)) 2104 goto out; 2105 r = -EINVAL; 2106 if (kvm_sigmask.len != sizeof sigset) 2107 goto out; 2108 r = -EFAULT; 2109 if (copy_from_user(&sigset, sigmask_arg->sigset, 2110 sizeof sigset)) 2111 goto out; 2112 p = &sigset; 2113 } 2114 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 2115 break; 2116 } 2117 case KVM_GET_FPU: { 2118 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 2119 r = -ENOMEM; 2120 if (!fpu) 2121 goto out; 2122 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 2123 if (r) 2124 goto out; 2125 r = -EFAULT; 2126 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 2127 goto out; 2128 r = 0; 2129 break; 2130 } 2131 case KVM_SET_FPU: { 2132 fpu = memdup_user(argp, sizeof(*fpu)); 2133 if (IS_ERR(fpu)) { 2134 r = PTR_ERR(fpu); 2135 fpu = NULL; 2136 goto out; 2137 } 2138 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 2139 break; 2140 } 2141 default: 2142 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 2143 } 2144 out: 2145 vcpu_put(vcpu); 2146 kfree(fpu); 2147 kfree(kvm_sregs); 2148 return r; 2149 } 2150 2151 #ifdef CONFIG_COMPAT 2152 static long kvm_vcpu_compat_ioctl(struct file *filp, 2153 unsigned int ioctl, unsigned long arg) 2154 { 2155 struct kvm_vcpu *vcpu = filp->private_data; 2156 void __user *argp = compat_ptr(arg); 2157 int r; 2158 2159 if (vcpu->kvm->mm != current->mm) 2160 return -EIO; 2161 2162 switch (ioctl) { 2163 case KVM_SET_SIGNAL_MASK: { 2164 struct kvm_signal_mask __user *sigmask_arg = argp; 2165 struct kvm_signal_mask kvm_sigmask; 2166 compat_sigset_t csigset; 2167 sigset_t sigset; 2168 2169 if (argp) { 2170 r = -EFAULT; 2171 if (copy_from_user(&kvm_sigmask, argp, 2172 sizeof kvm_sigmask)) 2173 goto out; 2174 r = -EINVAL; 2175 if (kvm_sigmask.len != sizeof csigset) 2176 goto out; 2177 r = -EFAULT; 2178 if (copy_from_user(&csigset, sigmask_arg->sigset, 2179 sizeof csigset)) 2180 goto out; 2181 sigset_from_compat(&sigset, &csigset); 2182 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 2183 } else 2184 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); 2185 break; 2186 } 2187 default: 2188 r = kvm_vcpu_ioctl(filp, ioctl, arg); 2189 } 2190 2191 out: 2192 return r; 2193 } 2194 #endif 2195 2196 static int kvm_device_ioctl_attr(struct kvm_device *dev, 2197 int (*accessor)(struct kvm_device *dev, 2198 struct kvm_device_attr *attr), 2199 unsigned long arg) 2200 { 2201 struct kvm_device_attr attr; 2202 2203 if (!accessor) 2204 return -EPERM; 2205 2206 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 2207 return -EFAULT; 2208 2209 return accessor(dev, &attr); 2210 } 2211 2212 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, 2213 unsigned long arg) 2214 { 2215 struct kvm_device *dev = filp->private_data; 2216 2217 switch (ioctl) { 2218 case KVM_SET_DEVICE_ATTR: 2219 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); 2220 case KVM_GET_DEVICE_ATTR: 2221 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); 2222 case KVM_HAS_DEVICE_ATTR: 2223 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); 2224 default: 2225 if (dev->ops->ioctl) 2226 return dev->ops->ioctl(dev, ioctl, arg); 2227 2228 return -ENOTTY; 2229 } 2230 } 2231 2232 static int kvm_device_release(struct inode *inode, struct file *filp) 2233 { 2234 struct kvm_device *dev = filp->private_data; 2235 struct kvm *kvm = dev->kvm; 2236 2237 kvm_put_kvm(kvm); 2238 return 0; 2239 } 2240 2241 static const struct file_operations kvm_device_fops = { 2242 .unlocked_ioctl = kvm_device_ioctl, 2243 #ifdef CONFIG_COMPAT 2244 .compat_ioctl = kvm_device_ioctl, 2245 #endif 2246 .release = kvm_device_release, 2247 }; 2248 2249 struct kvm_device *kvm_device_from_filp(struct file *filp) 2250 { 2251 if (filp->f_op != &kvm_device_fops) 2252 return NULL; 2253 2254 return filp->private_data; 2255 } 2256 2257 static int kvm_ioctl_create_device(struct kvm *kvm, 2258 struct kvm_create_device *cd) 2259 { 2260 struct kvm_device_ops *ops = NULL; 2261 struct kvm_device *dev; 2262 bool test = cd->flags & KVM_CREATE_DEVICE_TEST; 2263 int ret; 2264 2265 switch (cd->type) { 2266 #ifdef CONFIG_KVM_MPIC 2267 case KVM_DEV_TYPE_FSL_MPIC_20: 2268 case KVM_DEV_TYPE_FSL_MPIC_42: 2269 ops = &kvm_mpic_ops; 2270 break; 2271 #endif 2272 #ifdef CONFIG_KVM_XICS 2273 case KVM_DEV_TYPE_XICS: 2274 ops = &kvm_xics_ops; 2275 break; 2276 #endif 2277 #ifdef CONFIG_KVM_VFIO 2278 case KVM_DEV_TYPE_VFIO: 2279 ops = &kvm_vfio_ops; 2280 break; 2281 #endif 2282 #ifdef CONFIG_KVM_ARM_VGIC 2283 case KVM_DEV_TYPE_ARM_VGIC_V2: 2284 ops = &kvm_arm_vgic_v2_ops; 2285 break; 2286 #endif 2287 default: 2288 return -ENODEV; 2289 } 2290 2291 if (test) 2292 return 0; 2293 2294 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2295 if (!dev) 2296 return -ENOMEM; 2297 2298 dev->ops = ops; 2299 dev->kvm = kvm; 2300 2301 ret = ops->create(dev, cd->type); 2302 if (ret < 0) { 2303 kfree(dev); 2304 return ret; 2305 } 2306 2307 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 2308 if (ret < 0) { 2309 ops->destroy(dev); 2310 return ret; 2311 } 2312 2313 list_add(&dev->vm_node, &kvm->devices); 2314 kvm_get_kvm(kvm); 2315 cd->fd = ret; 2316 return 0; 2317 } 2318 2319 static long kvm_vm_ioctl(struct file *filp, 2320 unsigned int ioctl, unsigned long arg) 2321 { 2322 struct kvm *kvm = filp->private_data; 2323 void __user *argp = (void __user *)arg; 2324 int r; 2325 2326 if (kvm->mm != current->mm) 2327 return -EIO; 2328 switch (ioctl) { 2329 case KVM_CREATE_VCPU: 2330 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 2331 break; 2332 case KVM_SET_USER_MEMORY_REGION: { 2333 struct kvm_userspace_memory_region kvm_userspace_mem; 2334 2335 r = -EFAULT; 2336 if (copy_from_user(&kvm_userspace_mem, argp, 2337 sizeof kvm_userspace_mem)) 2338 goto out; 2339 2340 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); 2341 break; 2342 } 2343 case KVM_GET_DIRTY_LOG: { 2344 struct kvm_dirty_log log; 2345 2346 r = -EFAULT; 2347 if (copy_from_user(&log, argp, sizeof log)) 2348 goto out; 2349 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 2350 break; 2351 } 2352 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2353 case KVM_REGISTER_COALESCED_MMIO: { 2354 struct kvm_coalesced_mmio_zone zone; 2355 r = -EFAULT; 2356 if (copy_from_user(&zone, argp, sizeof zone)) 2357 goto out; 2358 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 2359 break; 2360 } 2361 case KVM_UNREGISTER_COALESCED_MMIO: { 2362 struct kvm_coalesced_mmio_zone zone; 2363 r = -EFAULT; 2364 if (copy_from_user(&zone, argp, sizeof zone)) 2365 goto out; 2366 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 2367 break; 2368 } 2369 #endif 2370 case KVM_IRQFD: { 2371 struct kvm_irqfd data; 2372 2373 r = -EFAULT; 2374 if (copy_from_user(&data, argp, sizeof data)) 2375 goto out; 2376 r = kvm_irqfd(kvm, &data); 2377 break; 2378 } 2379 case KVM_IOEVENTFD: { 2380 struct kvm_ioeventfd data; 2381 2382 r = -EFAULT; 2383 if (copy_from_user(&data, argp, sizeof data)) 2384 goto out; 2385 r = kvm_ioeventfd(kvm, &data); 2386 break; 2387 } 2388 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 2389 case KVM_SET_BOOT_CPU_ID: 2390 r = 0; 2391 mutex_lock(&kvm->lock); 2392 if (atomic_read(&kvm->online_vcpus) != 0) 2393 r = -EBUSY; 2394 else 2395 kvm->bsp_vcpu_id = arg; 2396 mutex_unlock(&kvm->lock); 2397 break; 2398 #endif 2399 #ifdef CONFIG_HAVE_KVM_MSI 2400 case KVM_SIGNAL_MSI: { 2401 struct kvm_msi msi; 2402 2403 r = -EFAULT; 2404 if (copy_from_user(&msi, argp, sizeof msi)) 2405 goto out; 2406 r = kvm_send_userspace_msi(kvm, &msi); 2407 break; 2408 } 2409 #endif 2410 #ifdef __KVM_HAVE_IRQ_LINE 2411 case KVM_IRQ_LINE_STATUS: 2412 case KVM_IRQ_LINE: { 2413 struct kvm_irq_level irq_event; 2414 2415 r = -EFAULT; 2416 if (copy_from_user(&irq_event, argp, sizeof irq_event)) 2417 goto out; 2418 2419 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, 2420 ioctl == KVM_IRQ_LINE_STATUS); 2421 if (r) 2422 goto out; 2423 2424 r = -EFAULT; 2425 if (ioctl == KVM_IRQ_LINE_STATUS) { 2426 if (copy_to_user(argp, &irq_event, sizeof irq_event)) 2427 goto out; 2428 } 2429 2430 r = 0; 2431 break; 2432 } 2433 #endif 2434 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 2435 case KVM_SET_GSI_ROUTING: { 2436 struct kvm_irq_routing routing; 2437 struct kvm_irq_routing __user *urouting; 2438 struct kvm_irq_routing_entry *entries; 2439 2440 r = -EFAULT; 2441 if (copy_from_user(&routing, argp, sizeof(routing))) 2442 goto out; 2443 r = -EINVAL; 2444 if (routing.nr >= KVM_MAX_IRQ_ROUTES) 2445 goto out; 2446 if (routing.flags) 2447 goto out; 2448 r = -ENOMEM; 2449 entries = vmalloc(routing.nr * sizeof(*entries)); 2450 if (!entries) 2451 goto out; 2452 r = -EFAULT; 2453 urouting = argp; 2454 if (copy_from_user(entries, urouting->entries, 2455 routing.nr * sizeof(*entries))) 2456 goto out_free_irq_routing; 2457 r = kvm_set_irq_routing(kvm, entries, routing.nr, 2458 routing.flags); 2459 out_free_irq_routing: 2460 vfree(entries); 2461 break; 2462 } 2463 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ 2464 case KVM_CREATE_DEVICE: { 2465 struct kvm_create_device cd; 2466 2467 r = -EFAULT; 2468 if (copy_from_user(&cd, argp, sizeof(cd))) 2469 goto out; 2470 2471 r = kvm_ioctl_create_device(kvm, &cd); 2472 if (r) 2473 goto out; 2474 2475 r = -EFAULT; 2476 if (copy_to_user(argp, &cd, sizeof(cd))) 2477 goto out; 2478 2479 r = 0; 2480 break; 2481 } 2482 default: 2483 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 2484 if (r == -ENOTTY) 2485 r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg); 2486 } 2487 out: 2488 return r; 2489 } 2490 2491 #ifdef CONFIG_COMPAT 2492 struct compat_kvm_dirty_log { 2493 __u32 slot; 2494 __u32 padding1; 2495 union { 2496 compat_uptr_t dirty_bitmap; /* one bit per page */ 2497 __u64 padding2; 2498 }; 2499 }; 2500 2501 static long kvm_vm_compat_ioctl(struct file *filp, 2502 unsigned int ioctl, unsigned long arg) 2503 { 2504 struct kvm *kvm = filp->private_data; 2505 int r; 2506 2507 if (kvm->mm != current->mm) 2508 return -EIO; 2509 switch (ioctl) { 2510 case KVM_GET_DIRTY_LOG: { 2511 struct compat_kvm_dirty_log compat_log; 2512 struct kvm_dirty_log log; 2513 2514 r = -EFAULT; 2515 if (copy_from_user(&compat_log, (void __user *)arg, 2516 sizeof(compat_log))) 2517 goto out; 2518 log.slot = compat_log.slot; 2519 log.padding1 = compat_log.padding1; 2520 log.padding2 = compat_log.padding2; 2521 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 2522 2523 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 2524 break; 2525 } 2526 default: 2527 r = kvm_vm_ioctl(filp, ioctl, arg); 2528 } 2529 2530 out: 2531 return r; 2532 } 2533 #endif 2534 2535 static struct file_operations kvm_vm_fops = { 2536 .release = kvm_vm_release, 2537 .unlocked_ioctl = kvm_vm_ioctl, 2538 #ifdef CONFIG_COMPAT 2539 .compat_ioctl = kvm_vm_compat_ioctl, 2540 #endif 2541 .llseek = noop_llseek, 2542 }; 2543 2544 static int kvm_dev_ioctl_create_vm(unsigned long type) 2545 { 2546 int r; 2547 struct kvm *kvm; 2548 2549 kvm = kvm_create_vm(type); 2550 if (IS_ERR(kvm)) 2551 return PTR_ERR(kvm); 2552 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2553 r = kvm_coalesced_mmio_init(kvm); 2554 if (r < 0) { 2555 kvm_put_kvm(kvm); 2556 return r; 2557 } 2558 #endif 2559 r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR | O_CLOEXEC); 2560 if (r < 0) 2561 kvm_put_kvm(kvm); 2562 2563 return r; 2564 } 2565 2566 static long kvm_dev_ioctl_check_extension_generic(long arg) 2567 { 2568 switch (arg) { 2569 case KVM_CAP_USER_MEMORY: 2570 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 2571 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 2572 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 2573 case KVM_CAP_SET_BOOT_CPU_ID: 2574 #endif 2575 case KVM_CAP_INTERNAL_ERROR_DATA: 2576 #ifdef CONFIG_HAVE_KVM_MSI 2577 case KVM_CAP_SIGNAL_MSI: 2578 #endif 2579 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 2580 case KVM_CAP_IRQFD_RESAMPLE: 2581 #endif 2582 return 1; 2583 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 2584 case KVM_CAP_IRQ_ROUTING: 2585 return KVM_MAX_IRQ_ROUTES; 2586 #endif 2587 default: 2588 break; 2589 } 2590 return kvm_dev_ioctl_check_extension(arg); 2591 } 2592 2593 static long kvm_dev_ioctl(struct file *filp, 2594 unsigned int ioctl, unsigned long arg) 2595 { 2596 long r = -EINVAL; 2597 2598 switch (ioctl) { 2599 case KVM_GET_API_VERSION: 2600 r = -EINVAL; 2601 if (arg) 2602 goto out; 2603 r = KVM_API_VERSION; 2604 break; 2605 case KVM_CREATE_VM: 2606 r = kvm_dev_ioctl_create_vm(arg); 2607 break; 2608 case KVM_CHECK_EXTENSION: 2609 r = kvm_dev_ioctl_check_extension_generic(arg); 2610 break; 2611 case KVM_GET_VCPU_MMAP_SIZE: 2612 r = -EINVAL; 2613 if (arg) 2614 goto out; 2615 r = PAGE_SIZE; /* struct kvm_run */ 2616 #ifdef CONFIG_X86 2617 r += PAGE_SIZE; /* pio data page */ 2618 #endif 2619 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2620 r += PAGE_SIZE; /* coalesced mmio ring page */ 2621 #endif 2622 break; 2623 case KVM_TRACE_ENABLE: 2624 case KVM_TRACE_PAUSE: 2625 case KVM_TRACE_DISABLE: 2626 r = -EOPNOTSUPP; 2627 break; 2628 default: 2629 return kvm_arch_dev_ioctl(filp, ioctl, arg); 2630 } 2631 out: 2632 return r; 2633 } 2634 2635 static struct file_operations kvm_chardev_ops = { 2636 .unlocked_ioctl = kvm_dev_ioctl, 2637 .compat_ioctl = kvm_dev_ioctl, 2638 .llseek = noop_llseek, 2639 }; 2640 2641 static struct miscdevice kvm_dev = { 2642 KVM_MINOR, 2643 "kvm", 2644 &kvm_chardev_ops, 2645 }; 2646 2647 static void hardware_enable_nolock(void *junk) 2648 { 2649 int cpu = raw_smp_processor_id(); 2650 int r; 2651 2652 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 2653 return; 2654 2655 cpumask_set_cpu(cpu, cpus_hardware_enabled); 2656 2657 r = kvm_arch_hardware_enable(NULL); 2658 2659 if (r) { 2660 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 2661 atomic_inc(&hardware_enable_failed); 2662 printk(KERN_INFO "kvm: enabling virtualization on " 2663 "CPU%d failed\n", cpu); 2664 } 2665 } 2666 2667 static void hardware_enable(void) 2668 { 2669 raw_spin_lock(&kvm_count_lock); 2670 if (kvm_usage_count) 2671 hardware_enable_nolock(NULL); 2672 raw_spin_unlock(&kvm_count_lock); 2673 } 2674 2675 static void hardware_disable_nolock(void *junk) 2676 { 2677 int cpu = raw_smp_processor_id(); 2678 2679 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 2680 return; 2681 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 2682 kvm_arch_hardware_disable(NULL); 2683 } 2684 2685 static void hardware_disable(void) 2686 { 2687 raw_spin_lock(&kvm_count_lock); 2688 if (kvm_usage_count) 2689 hardware_disable_nolock(NULL); 2690 raw_spin_unlock(&kvm_count_lock); 2691 } 2692 2693 static void hardware_disable_all_nolock(void) 2694 { 2695 BUG_ON(!kvm_usage_count); 2696 2697 kvm_usage_count--; 2698 if (!kvm_usage_count) 2699 on_each_cpu(hardware_disable_nolock, NULL, 1); 2700 } 2701 2702 static void hardware_disable_all(void) 2703 { 2704 raw_spin_lock(&kvm_count_lock); 2705 hardware_disable_all_nolock(); 2706 raw_spin_unlock(&kvm_count_lock); 2707 } 2708 2709 static int hardware_enable_all(void) 2710 { 2711 int r = 0; 2712 2713 raw_spin_lock(&kvm_count_lock); 2714 2715 kvm_usage_count++; 2716 if (kvm_usage_count == 1) { 2717 atomic_set(&hardware_enable_failed, 0); 2718 on_each_cpu(hardware_enable_nolock, NULL, 1); 2719 2720 if (atomic_read(&hardware_enable_failed)) { 2721 hardware_disable_all_nolock(); 2722 r = -EBUSY; 2723 } 2724 } 2725 2726 raw_spin_unlock(&kvm_count_lock); 2727 2728 return r; 2729 } 2730 2731 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, 2732 void *v) 2733 { 2734 int cpu = (long)v; 2735 2736 val &= ~CPU_TASKS_FROZEN; 2737 switch (val) { 2738 case CPU_DYING: 2739 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", 2740 cpu); 2741 hardware_disable(); 2742 break; 2743 case CPU_STARTING: 2744 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", 2745 cpu); 2746 hardware_enable(); 2747 break; 2748 } 2749 return NOTIFY_OK; 2750 } 2751 2752 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 2753 void *v) 2754 { 2755 /* 2756 * Some (well, at least mine) BIOSes hang on reboot if 2757 * in vmx root mode. 2758 * 2759 * And Intel TXT required VMX off for all cpu when system shutdown. 2760 */ 2761 printk(KERN_INFO "kvm: exiting hardware virtualization\n"); 2762 kvm_rebooting = true; 2763 on_each_cpu(hardware_disable_nolock, NULL, 1); 2764 return NOTIFY_OK; 2765 } 2766 2767 static struct notifier_block kvm_reboot_notifier = { 2768 .notifier_call = kvm_reboot, 2769 .priority = 0, 2770 }; 2771 2772 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 2773 { 2774 int i; 2775 2776 for (i = 0; i < bus->dev_count; i++) { 2777 struct kvm_io_device *pos = bus->range[i].dev; 2778 2779 kvm_iodevice_destructor(pos); 2780 } 2781 kfree(bus); 2782 } 2783 2784 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, 2785 const struct kvm_io_range *r2) 2786 { 2787 if (r1->addr < r2->addr) 2788 return -1; 2789 if (r1->addr + r1->len > r2->addr + r2->len) 2790 return 1; 2791 return 0; 2792 } 2793 2794 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) 2795 { 2796 return kvm_io_bus_cmp(p1, p2); 2797 } 2798 2799 static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, 2800 gpa_t addr, int len) 2801 { 2802 bus->range[bus->dev_count++] = (struct kvm_io_range) { 2803 .addr = addr, 2804 .len = len, 2805 .dev = dev, 2806 }; 2807 2808 sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range), 2809 kvm_io_bus_sort_cmp, NULL); 2810 2811 return 0; 2812 } 2813 2814 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, 2815 gpa_t addr, int len) 2816 { 2817 struct kvm_io_range *range, key; 2818 int off; 2819 2820 key = (struct kvm_io_range) { 2821 .addr = addr, 2822 .len = len, 2823 }; 2824 2825 range = bsearch(&key, bus->range, bus->dev_count, 2826 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); 2827 if (range == NULL) 2828 return -ENOENT; 2829 2830 off = range - bus->range; 2831 2832 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) 2833 off--; 2834 2835 return off; 2836 } 2837 2838 static int __kvm_io_bus_write(struct kvm_io_bus *bus, 2839 struct kvm_io_range *range, const void *val) 2840 { 2841 int idx; 2842 2843 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 2844 if (idx < 0) 2845 return -EOPNOTSUPP; 2846 2847 while (idx < bus->dev_count && 2848 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 2849 if (!kvm_iodevice_write(bus->range[idx].dev, range->addr, 2850 range->len, val)) 2851 return idx; 2852 idx++; 2853 } 2854 2855 return -EOPNOTSUPP; 2856 } 2857 2858 /* kvm_io_bus_write - called under kvm->slots_lock */ 2859 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 2860 int len, const void *val) 2861 { 2862 struct kvm_io_bus *bus; 2863 struct kvm_io_range range; 2864 int r; 2865 2866 range = (struct kvm_io_range) { 2867 .addr = addr, 2868 .len = len, 2869 }; 2870 2871 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 2872 r = __kvm_io_bus_write(bus, &range, val); 2873 return r < 0 ? r : 0; 2874 } 2875 2876 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ 2877 int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 2878 int len, const void *val, long cookie) 2879 { 2880 struct kvm_io_bus *bus; 2881 struct kvm_io_range range; 2882 2883 range = (struct kvm_io_range) { 2884 .addr = addr, 2885 .len = len, 2886 }; 2887 2888 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 2889 2890 /* First try the device referenced by cookie. */ 2891 if ((cookie >= 0) && (cookie < bus->dev_count) && 2892 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) 2893 if (!kvm_iodevice_write(bus->range[cookie].dev, addr, len, 2894 val)) 2895 return cookie; 2896 2897 /* 2898 * cookie contained garbage; fall back to search and return the 2899 * correct cookie value. 2900 */ 2901 return __kvm_io_bus_write(bus, &range, val); 2902 } 2903 2904 static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range, 2905 void *val) 2906 { 2907 int idx; 2908 2909 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 2910 if (idx < 0) 2911 return -EOPNOTSUPP; 2912 2913 while (idx < bus->dev_count && 2914 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 2915 if (!kvm_iodevice_read(bus->range[idx].dev, range->addr, 2916 range->len, val)) 2917 return idx; 2918 idx++; 2919 } 2920 2921 return -EOPNOTSUPP; 2922 } 2923 2924 /* kvm_io_bus_read - called under kvm->slots_lock */ 2925 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 2926 int len, void *val) 2927 { 2928 struct kvm_io_bus *bus; 2929 struct kvm_io_range range; 2930 int r; 2931 2932 range = (struct kvm_io_range) { 2933 .addr = addr, 2934 .len = len, 2935 }; 2936 2937 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 2938 r = __kvm_io_bus_read(bus, &range, val); 2939 return r < 0 ? r : 0; 2940 } 2941 2942 2943 /* Caller must hold slots_lock. */ 2944 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 2945 int len, struct kvm_io_device *dev) 2946 { 2947 struct kvm_io_bus *new_bus, *bus; 2948 2949 bus = kvm->buses[bus_idx]; 2950 /* exclude ioeventfd which is limited by maximum fd */ 2951 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 2952 return -ENOSPC; 2953 2954 new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) * 2955 sizeof(struct kvm_io_range)), GFP_KERNEL); 2956 if (!new_bus) 2957 return -ENOMEM; 2958 memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count * 2959 sizeof(struct kvm_io_range))); 2960 kvm_io_bus_insert_dev(new_bus, dev, addr, len); 2961 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 2962 synchronize_srcu_expedited(&kvm->srcu); 2963 kfree(bus); 2964 2965 return 0; 2966 } 2967 2968 /* Caller must hold slots_lock. */ 2969 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 2970 struct kvm_io_device *dev) 2971 { 2972 int i, r; 2973 struct kvm_io_bus *new_bus, *bus; 2974 2975 bus = kvm->buses[bus_idx]; 2976 r = -ENOENT; 2977 for (i = 0; i < bus->dev_count; i++) 2978 if (bus->range[i].dev == dev) { 2979 r = 0; 2980 break; 2981 } 2982 2983 if (r) 2984 return r; 2985 2986 new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) * 2987 sizeof(struct kvm_io_range)), GFP_KERNEL); 2988 if (!new_bus) 2989 return -ENOMEM; 2990 2991 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 2992 new_bus->dev_count--; 2993 memcpy(new_bus->range + i, bus->range + i + 1, 2994 (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); 2995 2996 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 2997 synchronize_srcu_expedited(&kvm->srcu); 2998 kfree(bus); 2999 return r; 3000 } 3001 3002 static struct notifier_block kvm_cpu_notifier = { 3003 .notifier_call = kvm_cpu_hotplug, 3004 }; 3005 3006 static int vm_stat_get(void *_offset, u64 *val) 3007 { 3008 unsigned offset = (long)_offset; 3009 struct kvm *kvm; 3010 3011 *val = 0; 3012 spin_lock(&kvm_lock); 3013 list_for_each_entry(kvm, &vm_list, vm_list) 3014 *val += *(u32 *)((void *)kvm + offset); 3015 spin_unlock(&kvm_lock); 3016 return 0; 3017 } 3018 3019 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n"); 3020 3021 static int vcpu_stat_get(void *_offset, u64 *val) 3022 { 3023 unsigned offset = (long)_offset; 3024 struct kvm *kvm; 3025 struct kvm_vcpu *vcpu; 3026 int i; 3027 3028 *val = 0; 3029 spin_lock(&kvm_lock); 3030 list_for_each_entry(kvm, &vm_list, vm_list) 3031 kvm_for_each_vcpu(i, vcpu, kvm) 3032 *val += *(u32 *)((void *)vcpu + offset); 3033 3034 spin_unlock(&kvm_lock); 3035 return 0; 3036 } 3037 3038 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); 3039 3040 static const struct file_operations *stat_fops[] = { 3041 [KVM_STAT_VCPU] = &vcpu_stat_fops, 3042 [KVM_STAT_VM] = &vm_stat_fops, 3043 }; 3044 3045 static int kvm_init_debug(void) 3046 { 3047 int r = -EEXIST; 3048 struct kvm_stats_debugfs_item *p; 3049 3050 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 3051 if (kvm_debugfs_dir == NULL) 3052 goto out; 3053 3054 for (p = debugfs_entries; p->name; ++p) { 3055 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir, 3056 (void *)(long)p->offset, 3057 stat_fops[p->kind]); 3058 if (p->dentry == NULL) 3059 goto out_dir; 3060 } 3061 3062 return 0; 3063 3064 out_dir: 3065 debugfs_remove_recursive(kvm_debugfs_dir); 3066 out: 3067 return r; 3068 } 3069 3070 static void kvm_exit_debug(void) 3071 { 3072 struct kvm_stats_debugfs_item *p; 3073 3074 for (p = debugfs_entries; p->name; ++p) 3075 debugfs_remove(p->dentry); 3076 debugfs_remove(kvm_debugfs_dir); 3077 } 3078 3079 static int kvm_suspend(void) 3080 { 3081 if (kvm_usage_count) 3082 hardware_disable_nolock(NULL); 3083 return 0; 3084 } 3085 3086 static void kvm_resume(void) 3087 { 3088 if (kvm_usage_count) { 3089 WARN_ON(raw_spin_is_locked(&kvm_count_lock)); 3090 hardware_enable_nolock(NULL); 3091 } 3092 } 3093 3094 static struct syscore_ops kvm_syscore_ops = { 3095 .suspend = kvm_suspend, 3096 .resume = kvm_resume, 3097 }; 3098 3099 static inline 3100 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 3101 { 3102 return container_of(pn, struct kvm_vcpu, preempt_notifier); 3103 } 3104 3105 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 3106 { 3107 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 3108 if (vcpu->preempted) 3109 vcpu->preempted = false; 3110 3111 kvm_arch_vcpu_load(vcpu, cpu); 3112 } 3113 3114 static void kvm_sched_out(struct preempt_notifier *pn, 3115 struct task_struct *next) 3116 { 3117 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 3118 3119 if (current->state == TASK_RUNNING) 3120 vcpu->preempted = true; 3121 kvm_arch_vcpu_put(vcpu); 3122 } 3123 3124 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 3125 struct module *module) 3126 { 3127 int r; 3128 int cpu; 3129 3130 r = kvm_arch_init(opaque); 3131 if (r) 3132 goto out_fail; 3133 3134 /* 3135 * kvm_arch_init makes sure there's at most one caller 3136 * for architectures that support multiple implementations, 3137 * like intel and amd on x86. 3138 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating 3139 * conflicts in case kvm is already setup for another implementation. 3140 */ 3141 r = kvm_irqfd_init(); 3142 if (r) 3143 goto out_irqfd; 3144 3145 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 3146 r = -ENOMEM; 3147 goto out_free_0; 3148 } 3149 3150 r = kvm_arch_hardware_setup(); 3151 if (r < 0) 3152 goto out_free_0a; 3153 3154 for_each_online_cpu(cpu) { 3155 smp_call_function_single(cpu, 3156 kvm_arch_check_processor_compat, 3157 &r, 1); 3158 if (r < 0) 3159 goto out_free_1; 3160 } 3161 3162 r = register_cpu_notifier(&kvm_cpu_notifier); 3163 if (r) 3164 goto out_free_2; 3165 register_reboot_notifier(&kvm_reboot_notifier); 3166 3167 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 3168 if (!vcpu_align) 3169 vcpu_align = __alignof__(struct kvm_vcpu); 3170 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, 3171 0, NULL); 3172 if (!kvm_vcpu_cache) { 3173 r = -ENOMEM; 3174 goto out_free_3; 3175 } 3176 3177 r = kvm_async_pf_init(); 3178 if (r) 3179 goto out_free; 3180 3181 kvm_chardev_ops.owner = module; 3182 kvm_vm_fops.owner = module; 3183 kvm_vcpu_fops.owner = module; 3184 3185 r = misc_register(&kvm_dev); 3186 if (r) { 3187 printk(KERN_ERR "kvm: misc device register failed\n"); 3188 goto out_unreg; 3189 } 3190 3191 register_syscore_ops(&kvm_syscore_ops); 3192 3193 kvm_preempt_ops.sched_in = kvm_sched_in; 3194 kvm_preempt_ops.sched_out = kvm_sched_out; 3195 3196 r = kvm_init_debug(); 3197 if (r) { 3198 printk(KERN_ERR "kvm: create debugfs files failed\n"); 3199 goto out_undebugfs; 3200 } 3201 3202 return 0; 3203 3204 out_undebugfs: 3205 unregister_syscore_ops(&kvm_syscore_ops); 3206 misc_deregister(&kvm_dev); 3207 out_unreg: 3208 kvm_async_pf_deinit(); 3209 out_free: 3210 kmem_cache_destroy(kvm_vcpu_cache); 3211 out_free_3: 3212 unregister_reboot_notifier(&kvm_reboot_notifier); 3213 unregister_cpu_notifier(&kvm_cpu_notifier); 3214 out_free_2: 3215 out_free_1: 3216 kvm_arch_hardware_unsetup(); 3217 out_free_0a: 3218 free_cpumask_var(cpus_hardware_enabled); 3219 out_free_0: 3220 kvm_irqfd_exit(); 3221 out_irqfd: 3222 kvm_arch_exit(); 3223 out_fail: 3224 return r; 3225 } 3226 EXPORT_SYMBOL_GPL(kvm_init); 3227 3228 void kvm_exit(void) 3229 { 3230 kvm_exit_debug(); 3231 misc_deregister(&kvm_dev); 3232 kmem_cache_destroy(kvm_vcpu_cache); 3233 kvm_async_pf_deinit(); 3234 unregister_syscore_ops(&kvm_syscore_ops); 3235 unregister_reboot_notifier(&kvm_reboot_notifier); 3236 unregister_cpu_notifier(&kvm_cpu_notifier); 3237 on_each_cpu(hardware_disable_nolock, NULL, 1); 3238 kvm_arch_hardware_unsetup(); 3239 kvm_arch_exit(); 3240 kvm_irqfd_exit(); 3241 free_cpumask_var(cpus_hardware_enabled); 3242 } 3243 EXPORT_SYMBOL_GPL(kvm_exit); 3244