1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This module enables machines with Intel VT-x extensions to run virtual 5 * machines without emulation or binary translation. 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Avi Kivity <avi@qumranet.com> 12 * Yaniv Kamay <yaniv@qumranet.com> 13 * 14 * This work is licensed under the terms of the GNU GPL, version 2. See 15 * the COPYING file in the top-level directory. 16 * 17 */ 18 19 #include "iodev.h" 20 21 #include <linux/kvm_host.h> 22 #include <linux/kvm.h> 23 #include <linux/module.h> 24 #include <linux/errno.h> 25 #include <linux/percpu.h> 26 #include <linux/mm.h> 27 #include <linux/miscdevice.h> 28 #include <linux/vmalloc.h> 29 #include <linux/reboot.h> 30 #include <linux/debugfs.h> 31 #include <linux/highmem.h> 32 #include <linux/file.h> 33 #include <linux/syscore_ops.h> 34 #include <linux/cpu.h> 35 #include <linux/sched.h> 36 #include <linux/cpumask.h> 37 #include <linux/smp.h> 38 #include <linux/anon_inodes.h> 39 #include <linux/profile.h> 40 #include <linux/kvm_para.h> 41 #include <linux/pagemap.h> 42 #include <linux/mman.h> 43 #include <linux/swap.h> 44 #include <linux/bitops.h> 45 #include <linux/spinlock.h> 46 #include <linux/compat.h> 47 #include <linux/srcu.h> 48 #include <linux/hugetlb.h> 49 #include <linux/slab.h> 50 #include <linux/sort.h> 51 #include <linux/bsearch.h> 52 53 #include <asm/processor.h> 54 #include <asm/io.h> 55 #include <asm/ioctl.h> 56 #include <asm/uaccess.h> 57 #include <asm/pgtable.h> 58 59 #include "coalesced_mmio.h" 60 #include "async_pf.h" 61 #include "vfio.h" 62 63 #define CREATE_TRACE_POINTS 64 #include <trace/events/kvm.h> 65 66 MODULE_AUTHOR("Qumranet"); 67 MODULE_LICENSE("GPL"); 68 69 /* 70 * Ordering of locks: 71 * 72 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 73 */ 74 75 DEFINE_SPINLOCK(kvm_lock); 76 static DEFINE_RAW_SPINLOCK(kvm_count_lock); 77 LIST_HEAD(vm_list); 78 79 static cpumask_var_t cpus_hardware_enabled; 80 static int kvm_usage_count = 0; 81 static atomic_t hardware_enable_failed; 82 83 struct kmem_cache *kvm_vcpu_cache; 84 EXPORT_SYMBOL_GPL(kvm_vcpu_cache); 85 86 static __read_mostly struct preempt_ops kvm_preempt_ops; 87 88 struct dentry *kvm_debugfs_dir; 89 90 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 91 unsigned long arg); 92 #ifdef CONFIG_COMPAT 93 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, 94 unsigned long arg); 95 #endif 96 static int hardware_enable_all(void); 97 static void hardware_disable_all(void); 98 99 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 100 101 static void kvm_release_pfn_dirty(pfn_t pfn); 102 static void mark_page_dirty_in_slot(struct kvm *kvm, 103 struct kvm_memory_slot *memslot, gfn_t gfn); 104 105 __visible bool kvm_rebooting; 106 EXPORT_SYMBOL_GPL(kvm_rebooting); 107 108 static bool largepages_enabled = true; 109 110 bool kvm_is_reserved_pfn(pfn_t pfn) 111 { 112 if (pfn_valid(pfn)) 113 return PageReserved(pfn_to_page(pfn)); 114 115 return true; 116 } 117 118 /* 119 * Switches to specified vcpu, until a matching vcpu_put() 120 */ 121 int vcpu_load(struct kvm_vcpu *vcpu) 122 { 123 int cpu; 124 125 if (mutex_lock_killable(&vcpu->mutex)) 126 return -EINTR; 127 cpu = get_cpu(); 128 preempt_notifier_register(&vcpu->preempt_notifier); 129 kvm_arch_vcpu_load(vcpu, cpu); 130 put_cpu(); 131 return 0; 132 } 133 134 void vcpu_put(struct kvm_vcpu *vcpu) 135 { 136 preempt_disable(); 137 kvm_arch_vcpu_put(vcpu); 138 preempt_notifier_unregister(&vcpu->preempt_notifier); 139 preempt_enable(); 140 mutex_unlock(&vcpu->mutex); 141 } 142 143 static void ack_flush(void *_completed) 144 { 145 } 146 147 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) 148 { 149 int i, cpu, me; 150 cpumask_var_t cpus; 151 bool called = true; 152 struct kvm_vcpu *vcpu; 153 154 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 155 156 me = get_cpu(); 157 kvm_for_each_vcpu(i, vcpu, kvm) { 158 kvm_make_request(req, vcpu); 159 cpu = vcpu->cpu; 160 161 /* Set ->requests bit before we read ->mode */ 162 smp_mb(); 163 164 if (cpus != NULL && cpu != -1 && cpu != me && 165 kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE) 166 cpumask_set_cpu(cpu, cpus); 167 } 168 if (unlikely(cpus == NULL)) 169 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); 170 else if (!cpumask_empty(cpus)) 171 smp_call_function_many(cpus, ack_flush, NULL, 1); 172 else 173 called = false; 174 put_cpu(); 175 free_cpumask_var(cpus); 176 return called; 177 } 178 179 void kvm_flush_remote_tlbs(struct kvm *kvm) 180 { 181 long dirty_count = kvm->tlbs_dirty; 182 183 smp_mb(); 184 if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 185 ++kvm->stat.remote_tlb_flush; 186 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); 187 } 188 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); 189 190 void kvm_reload_remote_mmus(struct kvm *kvm) 191 { 192 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); 193 } 194 195 void kvm_make_mclock_inprogress_request(struct kvm *kvm) 196 { 197 kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); 198 } 199 200 void kvm_make_scan_ioapic_request(struct kvm *kvm) 201 { 202 kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC); 203 } 204 205 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 206 { 207 struct page *page; 208 int r; 209 210 mutex_init(&vcpu->mutex); 211 vcpu->cpu = -1; 212 vcpu->kvm = kvm; 213 vcpu->vcpu_id = id; 214 vcpu->pid = NULL; 215 init_waitqueue_head(&vcpu->wq); 216 kvm_async_pf_vcpu_init(vcpu); 217 218 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 219 if (!page) { 220 r = -ENOMEM; 221 goto fail; 222 } 223 vcpu->run = page_address(page); 224 225 kvm_vcpu_set_in_spin_loop(vcpu, false); 226 kvm_vcpu_set_dy_eligible(vcpu, false); 227 vcpu->preempted = false; 228 229 r = kvm_arch_vcpu_init(vcpu); 230 if (r < 0) 231 goto fail_free_run; 232 return 0; 233 234 fail_free_run: 235 free_page((unsigned long)vcpu->run); 236 fail: 237 return r; 238 } 239 EXPORT_SYMBOL_GPL(kvm_vcpu_init); 240 241 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) 242 { 243 put_pid(vcpu->pid); 244 kvm_arch_vcpu_uninit(vcpu); 245 free_page((unsigned long)vcpu->run); 246 } 247 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); 248 249 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 250 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 251 { 252 return container_of(mn, struct kvm, mmu_notifier); 253 } 254 255 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, 256 struct mm_struct *mm, 257 unsigned long address) 258 { 259 struct kvm *kvm = mmu_notifier_to_kvm(mn); 260 int need_tlb_flush, idx; 261 262 /* 263 * When ->invalidate_page runs, the linux pte has been zapped 264 * already but the page is still allocated until 265 * ->invalidate_page returns. So if we increase the sequence 266 * here the kvm page fault will notice if the spte can't be 267 * established because the page is going to be freed. If 268 * instead the kvm page fault establishes the spte before 269 * ->invalidate_page runs, kvm_unmap_hva will release it 270 * before returning. 271 * 272 * The sequence increase only need to be seen at spin_unlock 273 * time, and not at spin_lock time. 274 * 275 * Increasing the sequence after the spin_unlock would be 276 * unsafe because the kvm page fault could then establish the 277 * pte after kvm_unmap_hva returned, without noticing the page 278 * is going to be freed. 279 */ 280 idx = srcu_read_lock(&kvm->srcu); 281 spin_lock(&kvm->mmu_lock); 282 283 kvm->mmu_notifier_seq++; 284 need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty; 285 /* we've to flush the tlb before the pages can be freed */ 286 if (need_tlb_flush) 287 kvm_flush_remote_tlbs(kvm); 288 289 spin_unlock(&kvm->mmu_lock); 290 291 kvm_arch_mmu_notifier_invalidate_page(kvm, address); 292 293 srcu_read_unlock(&kvm->srcu, idx); 294 } 295 296 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 297 struct mm_struct *mm, 298 unsigned long address, 299 pte_t pte) 300 { 301 struct kvm *kvm = mmu_notifier_to_kvm(mn); 302 int idx; 303 304 idx = srcu_read_lock(&kvm->srcu); 305 spin_lock(&kvm->mmu_lock); 306 kvm->mmu_notifier_seq++; 307 kvm_set_spte_hva(kvm, address, pte); 308 spin_unlock(&kvm->mmu_lock); 309 srcu_read_unlock(&kvm->srcu, idx); 310 } 311 312 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 313 struct mm_struct *mm, 314 unsigned long start, 315 unsigned long end) 316 { 317 struct kvm *kvm = mmu_notifier_to_kvm(mn); 318 int need_tlb_flush = 0, idx; 319 320 idx = srcu_read_lock(&kvm->srcu); 321 spin_lock(&kvm->mmu_lock); 322 /* 323 * The count increase must become visible at unlock time as no 324 * spte can be established without taking the mmu_lock and 325 * count is also read inside the mmu_lock critical section. 326 */ 327 kvm->mmu_notifier_count++; 328 need_tlb_flush = kvm_unmap_hva_range(kvm, start, end); 329 need_tlb_flush |= kvm->tlbs_dirty; 330 /* we've to flush the tlb before the pages can be freed */ 331 if (need_tlb_flush) 332 kvm_flush_remote_tlbs(kvm); 333 334 spin_unlock(&kvm->mmu_lock); 335 srcu_read_unlock(&kvm->srcu, idx); 336 } 337 338 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 339 struct mm_struct *mm, 340 unsigned long start, 341 unsigned long end) 342 { 343 struct kvm *kvm = mmu_notifier_to_kvm(mn); 344 345 spin_lock(&kvm->mmu_lock); 346 /* 347 * This sequence increase will notify the kvm page fault that 348 * the page that is going to be mapped in the spte could have 349 * been freed. 350 */ 351 kvm->mmu_notifier_seq++; 352 smp_wmb(); 353 /* 354 * The above sequence increase must be visible before the 355 * below count decrease, which is ensured by the smp_wmb above 356 * in conjunction with the smp_rmb in mmu_notifier_retry(). 357 */ 358 kvm->mmu_notifier_count--; 359 spin_unlock(&kvm->mmu_lock); 360 361 BUG_ON(kvm->mmu_notifier_count < 0); 362 } 363 364 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 365 struct mm_struct *mm, 366 unsigned long start, 367 unsigned long end) 368 { 369 struct kvm *kvm = mmu_notifier_to_kvm(mn); 370 int young, idx; 371 372 idx = srcu_read_lock(&kvm->srcu); 373 spin_lock(&kvm->mmu_lock); 374 375 young = kvm_age_hva(kvm, start, end); 376 if (young) 377 kvm_flush_remote_tlbs(kvm); 378 379 spin_unlock(&kvm->mmu_lock); 380 srcu_read_unlock(&kvm->srcu, idx); 381 382 return young; 383 } 384 385 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, 386 struct mm_struct *mm, 387 unsigned long address) 388 { 389 struct kvm *kvm = mmu_notifier_to_kvm(mn); 390 int young, idx; 391 392 idx = srcu_read_lock(&kvm->srcu); 393 spin_lock(&kvm->mmu_lock); 394 young = kvm_test_age_hva(kvm, address); 395 spin_unlock(&kvm->mmu_lock); 396 srcu_read_unlock(&kvm->srcu, idx); 397 398 return young; 399 } 400 401 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 402 struct mm_struct *mm) 403 { 404 struct kvm *kvm = mmu_notifier_to_kvm(mn); 405 int idx; 406 407 idx = srcu_read_lock(&kvm->srcu); 408 kvm_arch_flush_shadow_all(kvm); 409 srcu_read_unlock(&kvm->srcu, idx); 410 } 411 412 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 413 .invalidate_page = kvm_mmu_notifier_invalidate_page, 414 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 415 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 416 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 417 .test_young = kvm_mmu_notifier_test_young, 418 .change_pte = kvm_mmu_notifier_change_pte, 419 .release = kvm_mmu_notifier_release, 420 }; 421 422 static int kvm_init_mmu_notifier(struct kvm *kvm) 423 { 424 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 425 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 426 } 427 428 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 429 430 static int kvm_init_mmu_notifier(struct kvm *kvm) 431 { 432 return 0; 433 } 434 435 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 436 437 static void kvm_init_memslots_id(struct kvm *kvm) 438 { 439 int i; 440 struct kvm_memslots *slots = kvm->memslots; 441 442 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) 443 slots->id_to_index[i] = slots->memslots[i].id = i; 444 } 445 446 static struct kvm *kvm_create_vm(unsigned long type) 447 { 448 int r, i; 449 struct kvm *kvm = kvm_arch_alloc_vm(); 450 451 if (!kvm) 452 return ERR_PTR(-ENOMEM); 453 454 r = kvm_arch_init_vm(kvm, type); 455 if (r) 456 goto out_err_no_disable; 457 458 r = hardware_enable_all(); 459 if (r) 460 goto out_err_no_disable; 461 462 #ifdef CONFIG_HAVE_KVM_IRQFD 463 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 464 #endif 465 466 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 467 468 r = -ENOMEM; 469 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 470 if (!kvm->memslots) 471 goto out_err_no_srcu; 472 473 /* 474 * Init kvm generation close to the maximum to easily test the 475 * code of handling generation number wrap-around. 476 */ 477 kvm->memslots->generation = -150; 478 479 kvm_init_memslots_id(kvm); 480 if (init_srcu_struct(&kvm->srcu)) 481 goto out_err_no_srcu; 482 if (init_srcu_struct(&kvm->irq_srcu)) 483 goto out_err_no_irq_srcu; 484 for (i = 0; i < KVM_NR_BUSES; i++) { 485 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), 486 GFP_KERNEL); 487 if (!kvm->buses[i]) 488 goto out_err; 489 } 490 491 spin_lock_init(&kvm->mmu_lock); 492 kvm->mm = current->mm; 493 atomic_inc(&kvm->mm->mm_count); 494 kvm_eventfd_init(kvm); 495 mutex_init(&kvm->lock); 496 mutex_init(&kvm->irq_lock); 497 mutex_init(&kvm->slots_lock); 498 atomic_set(&kvm->users_count, 1); 499 INIT_LIST_HEAD(&kvm->devices); 500 501 r = kvm_init_mmu_notifier(kvm); 502 if (r) 503 goto out_err; 504 505 spin_lock(&kvm_lock); 506 list_add(&kvm->vm_list, &vm_list); 507 spin_unlock(&kvm_lock); 508 509 return kvm; 510 511 out_err: 512 cleanup_srcu_struct(&kvm->irq_srcu); 513 out_err_no_irq_srcu: 514 cleanup_srcu_struct(&kvm->srcu); 515 out_err_no_srcu: 516 hardware_disable_all(); 517 out_err_no_disable: 518 for (i = 0; i < KVM_NR_BUSES; i++) 519 kfree(kvm->buses[i]); 520 kfree(kvm->memslots); 521 kvm_arch_free_vm(kvm); 522 return ERR_PTR(r); 523 } 524 525 /* 526 * Avoid using vmalloc for a small buffer. 527 * Should not be used when the size is statically known. 528 */ 529 void *kvm_kvzalloc(unsigned long size) 530 { 531 if (size > PAGE_SIZE) 532 return vzalloc(size); 533 else 534 return kzalloc(size, GFP_KERNEL); 535 } 536 537 void kvm_kvfree(const void *addr) 538 { 539 if (is_vmalloc_addr(addr)) 540 vfree(addr); 541 else 542 kfree(addr); 543 } 544 545 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) 546 { 547 if (!memslot->dirty_bitmap) 548 return; 549 550 kvm_kvfree(memslot->dirty_bitmap); 551 memslot->dirty_bitmap = NULL; 552 } 553 554 /* 555 * Free any memory in @free but not in @dont. 556 */ 557 static void kvm_free_physmem_slot(struct kvm *kvm, struct kvm_memory_slot *free, 558 struct kvm_memory_slot *dont) 559 { 560 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 561 kvm_destroy_dirty_bitmap(free); 562 563 kvm_arch_free_memslot(kvm, free, dont); 564 565 free->npages = 0; 566 } 567 568 static void kvm_free_physmem(struct kvm *kvm) 569 { 570 struct kvm_memslots *slots = kvm->memslots; 571 struct kvm_memory_slot *memslot; 572 573 kvm_for_each_memslot(memslot, slots) 574 kvm_free_physmem_slot(kvm, memslot, NULL); 575 576 kfree(kvm->memslots); 577 } 578 579 static void kvm_destroy_devices(struct kvm *kvm) 580 { 581 struct list_head *node, *tmp; 582 583 list_for_each_safe(node, tmp, &kvm->devices) { 584 struct kvm_device *dev = 585 list_entry(node, struct kvm_device, vm_node); 586 587 list_del(node); 588 dev->ops->destroy(dev); 589 } 590 } 591 592 static void kvm_destroy_vm(struct kvm *kvm) 593 { 594 int i; 595 struct mm_struct *mm = kvm->mm; 596 597 kvm_arch_sync_events(kvm); 598 spin_lock(&kvm_lock); 599 list_del(&kvm->vm_list); 600 spin_unlock(&kvm_lock); 601 kvm_free_irq_routing(kvm); 602 for (i = 0; i < KVM_NR_BUSES; i++) 603 kvm_io_bus_destroy(kvm->buses[i]); 604 kvm_coalesced_mmio_free(kvm); 605 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 606 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 607 #else 608 kvm_arch_flush_shadow_all(kvm); 609 #endif 610 kvm_arch_destroy_vm(kvm); 611 kvm_destroy_devices(kvm); 612 kvm_free_physmem(kvm); 613 cleanup_srcu_struct(&kvm->irq_srcu); 614 cleanup_srcu_struct(&kvm->srcu); 615 kvm_arch_free_vm(kvm); 616 hardware_disable_all(); 617 mmdrop(mm); 618 } 619 620 void kvm_get_kvm(struct kvm *kvm) 621 { 622 atomic_inc(&kvm->users_count); 623 } 624 EXPORT_SYMBOL_GPL(kvm_get_kvm); 625 626 void kvm_put_kvm(struct kvm *kvm) 627 { 628 if (atomic_dec_and_test(&kvm->users_count)) 629 kvm_destroy_vm(kvm); 630 } 631 EXPORT_SYMBOL_GPL(kvm_put_kvm); 632 633 634 static int kvm_vm_release(struct inode *inode, struct file *filp) 635 { 636 struct kvm *kvm = filp->private_data; 637 638 kvm_irqfd_release(kvm); 639 640 kvm_put_kvm(kvm); 641 return 0; 642 } 643 644 /* 645 * Allocation size is twice as large as the actual dirty bitmap size. 646 * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed. 647 */ 648 static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) 649 { 650 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); 651 652 memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes); 653 if (!memslot->dirty_bitmap) 654 return -ENOMEM; 655 656 return 0; 657 } 658 659 /* 660 * Insert memslot and re-sort memslots based on their GFN, 661 * so binary search could be used to lookup GFN. 662 * Sorting algorithm takes advantage of having initially 663 * sorted array and known changed memslot position. 664 */ 665 static void update_memslots(struct kvm_memslots *slots, 666 struct kvm_memory_slot *new) 667 { 668 int id = new->id; 669 int i = slots->id_to_index[id]; 670 struct kvm_memory_slot *mslots = slots->memslots; 671 672 WARN_ON(mslots[i].id != id); 673 if (!new->npages) { 674 WARN_ON(!mslots[i].npages); 675 new->base_gfn = 0; 676 if (mslots[i].npages) 677 slots->used_slots--; 678 } else { 679 if (!mslots[i].npages) 680 slots->used_slots++; 681 } 682 683 while (i < KVM_MEM_SLOTS_NUM - 1 && 684 new->base_gfn <= mslots[i + 1].base_gfn) { 685 if (!mslots[i + 1].npages) 686 break; 687 mslots[i] = mslots[i + 1]; 688 slots->id_to_index[mslots[i].id] = i; 689 i++; 690 } 691 692 /* 693 * The ">=" is needed when creating a slot with base_gfn == 0, 694 * so that it moves before all those with base_gfn == npages == 0. 695 * 696 * On the other hand, if new->npages is zero, the above loop has 697 * already left i pointing to the beginning of the empty part of 698 * mslots, and the ">=" would move the hole backwards in this 699 * case---which is wrong. So skip the loop when deleting a slot. 700 */ 701 if (new->npages) { 702 while (i > 0 && 703 new->base_gfn >= mslots[i - 1].base_gfn) { 704 mslots[i] = mslots[i - 1]; 705 slots->id_to_index[mslots[i].id] = i; 706 i--; 707 } 708 } else 709 WARN_ON_ONCE(i != slots->used_slots); 710 711 mslots[i] = *new; 712 slots->id_to_index[mslots[i].id] = i; 713 } 714 715 static int check_memory_region_flags(struct kvm_userspace_memory_region *mem) 716 { 717 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; 718 719 #ifdef __KVM_HAVE_READONLY_MEM 720 valid_flags |= KVM_MEM_READONLY; 721 #endif 722 723 if (mem->flags & ~valid_flags) 724 return -EINVAL; 725 726 return 0; 727 } 728 729 static struct kvm_memslots *install_new_memslots(struct kvm *kvm, 730 struct kvm_memslots *slots) 731 { 732 struct kvm_memslots *old_memslots = kvm->memslots; 733 734 /* 735 * Set the low bit in the generation, which disables SPTE caching 736 * until the end of synchronize_srcu_expedited. 737 */ 738 WARN_ON(old_memslots->generation & 1); 739 slots->generation = old_memslots->generation + 1; 740 741 rcu_assign_pointer(kvm->memslots, slots); 742 synchronize_srcu_expedited(&kvm->srcu); 743 744 /* 745 * Increment the new memslot generation a second time. This prevents 746 * vm exits that race with memslot updates from caching a memslot 747 * generation that will (potentially) be valid forever. 748 */ 749 slots->generation++; 750 751 kvm_arch_memslots_updated(kvm); 752 753 return old_memslots; 754 } 755 756 /* 757 * Allocate some memory and give it an address in the guest physical address 758 * space. 759 * 760 * Discontiguous memory is allowed, mostly for framebuffers. 761 * 762 * Must be called holding kvm->slots_lock for write. 763 */ 764 int __kvm_set_memory_region(struct kvm *kvm, 765 struct kvm_userspace_memory_region *mem) 766 { 767 int r; 768 gfn_t base_gfn; 769 unsigned long npages; 770 struct kvm_memory_slot *slot; 771 struct kvm_memory_slot old, new; 772 struct kvm_memslots *slots = NULL, *old_memslots; 773 enum kvm_mr_change change; 774 775 r = check_memory_region_flags(mem); 776 if (r) 777 goto out; 778 779 r = -EINVAL; 780 /* General sanity checks */ 781 if (mem->memory_size & (PAGE_SIZE - 1)) 782 goto out; 783 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 784 goto out; 785 /* We can read the guest memory with __xxx_user() later on. */ 786 if ((mem->slot < KVM_USER_MEM_SLOTS) && 787 ((mem->userspace_addr & (PAGE_SIZE - 1)) || 788 !access_ok(VERIFY_WRITE, 789 (void __user *)(unsigned long)mem->userspace_addr, 790 mem->memory_size))) 791 goto out; 792 if (mem->slot >= KVM_MEM_SLOTS_NUM) 793 goto out; 794 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 795 goto out; 796 797 slot = id_to_memslot(kvm->memslots, mem->slot); 798 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 799 npages = mem->memory_size >> PAGE_SHIFT; 800 801 if (npages > KVM_MEM_MAX_NR_PAGES) 802 goto out; 803 804 if (!npages) 805 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; 806 807 new = old = *slot; 808 809 new.id = mem->slot; 810 new.base_gfn = base_gfn; 811 new.npages = npages; 812 new.flags = mem->flags; 813 814 if (npages) { 815 if (!old.npages) 816 change = KVM_MR_CREATE; 817 else { /* Modify an existing slot. */ 818 if ((mem->userspace_addr != old.userspace_addr) || 819 (npages != old.npages) || 820 ((new.flags ^ old.flags) & KVM_MEM_READONLY)) 821 goto out; 822 823 if (base_gfn != old.base_gfn) 824 change = KVM_MR_MOVE; 825 else if (new.flags != old.flags) 826 change = KVM_MR_FLAGS_ONLY; 827 else { /* Nothing to change. */ 828 r = 0; 829 goto out; 830 } 831 } 832 } else if (old.npages) { 833 change = KVM_MR_DELETE; 834 } else /* Modify a non-existent slot: disallowed. */ 835 goto out; 836 837 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 838 /* Check for overlaps */ 839 r = -EEXIST; 840 kvm_for_each_memslot(slot, kvm->memslots) { 841 if ((slot->id >= KVM_USER_MEM_SLOTS) || 842 (slot->id == mem->slot)) 843 continue; 844 if (!((base_gfn + npages <= slot->base_gfn) || 845 (base_gfn >= slot->base_gfn + slot->npages))) 846 goto out; 847 } 848 } 849 850 /* Free page dirty bitmap if unneeded */ 851 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 852 new.dirty_bitmap = NULL; 853 854 r = -ENOMEM; 855 if (change == KVM_MR_CREATE) { 856 new.userspace_addr = mem->userspace_addr; 857 858 if (kvm_arch_create_memslot(kvm, &new, npages)) 859 goto out_free; 860 } 861 862 /* Allocate page dirty bitmap if needed */ 863 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 864 if (kvm_create_dirty_bitmap(&new) < 0) 865 goto out_free; 866 } 867 868 slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), 869 GFP_KERNEL); 870 if (!slots) 871 goto out_free; 872 873 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { 874 slot = id_to_memslot(slots, mem->slot); 875 slot->flags |= KVM_MEMSLOT_INVALID; 876 877 old_memslots = install_new_memslots(kvm, slots); 878 879 /* slot was deleted or moved, clear iommu mapping */ 880 kvm_iommu_unmap_pages(kvm, &old); 881 /* From this point no new shadow pages pointing to a deleted, 882 * or moved, memslot will be created. 883 * 884 * validation of sp->gfn happens in: 885 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 886 * - kvm_is_visible_gfn (mmu_check_roots) 887 */ 888 kvm_arch_flush_shadow_memslot(kvm, slot); 889 890 /* 891 * We can re-use the old_memslots from above, the only difference 892 * from the currently installed memslots is the invalid flag. This 893 * will get overwritten by update_memslots anyway. 894 */ 895 slots = old_memslots; 896 } 897 898 r = kvm_arch_prepare_memory_region(kvm, &new, mem, change); 899 if (r) 900 goto out_slots; 901 902 /* actual memory is freed via old in kvm_free_physmem_slot below */ 903 if (change == KVM_MR_DELETE) { 904 new.dirty_bitmap = NULL; 905 memset(&new.arch, 0, sizeof(new.arch)); 906 } 907 908 update_memslots(slots, &new); 909 old_memslots = install_new_memslots(kvm, slots); 910 911 kvm_arch_commit_memory_region(kvm, mem, &old, change); 912 913 kvm_free_physmem_slot(kvm, &old, &new); 914 kfree(old_memslots); 915 916 /* 917 * IOMMU mapping: New slots need to be mapped. Old slots need to be 918 * un-mapped and re-mapped if their base changes. Since base change 919 * unmapping is handled above with slot deletion, mapping alone is 920 * needed here. Anything else the iommu might care about for existing 921 * slots (size changes, userspace addr changes and read-only flag 922 * changes) is disallowed above, so any other attribute changes getting 923 * here can be skipped. 924 */ 925 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 926 r = kvm_iommu_map_pages(kvm, &new); 927 return r; 928 } 929 930 return 0; 931 932 out_slots: 933 kfree(slots); 934 out_free: 935 kvm_free_physmem_slot(kvm, &new, &old); 936 out: 937 return r; 938 } 939 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 940 941 int kvm_set_memory_region(struct kvm *kvm, 942 struct kvm_userspace_memory_region *mem) 943 { 944 int r; 945 946 mutex_lock(&kvm->slots_lock); 947 r = __kvm_set_memory_region(kvm, mem); 948 mutex_unlock(&kvm->slots_lock); 949 return r; 950 } 951 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 952 953 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 954 struct kvm_userspace_memory_region *mem) 955 { 956 if (mem->slot >= KVM_USER_MEM_SLOTS) 957 return -EINVAL; 958 return kvm_set_memory_region(kvm, mem); 959 } 960 961 int kvm_get_dirty_log(struct kvm *kvm, 962 struct kvm_dirty_log *log, int *is_dirty) 963 { 964 struct kvm_memory_slot *memslot; 965 int r, i; 966 unsigned long n; 967 unsigned long any = 0; 968 969 r = -EINVAL; 970 if (log->slot >= KVM_USER_MEM_SLOTS) 971 goto out; 972 973 memslot = id_to_memslot(kvm->memslots, log->slot); 974 r = -ENOENT; 975 if (!memslot->dirty_bitmap) 976 goto out; 977 978 n = kvm_dirty_bitmap_bytes(memslot); 979 980 for (i = 0; !any && i < n/sizeof(long); ++i) 981 any = memslot->dirty_bitmap[i]; 982 983 r = -EFAULT; 984 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) 985 goto out; 986 987 if (any) 988 *is_dirty = 1; 989 990 r = 0; 991 out: 992 return r; 993 } 994 EXPORT_SYMBOL_GPL(kvm_get_dirty_log); 995 996 bool kvm_largepages_enabled(void) 997 { 998 return largepages_enabled; 999 } 1000 1001 void kvm_disable_largepages(void) 1002 { 1003 largepages_enabled = false; 1004 } 1005 EXPORT_SYMBOL_GPL(kvm_disable_largepages); 1006 1007 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 1008 { 1009 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 1010 } 1011 EXPORT_SYMBOL_GPL(gfn_to_memslot); 1012 1013 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 1014 { 1015 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); 1016 1017 if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS || 1018 memslot->flags & KVM_MEMSLOT_INVALID) 1019 return 0; 1020 1021 return 1; 1022 } 1023 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 1024 1025 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) 1026 { 1027 struct vm_area_struct *vma; 1028 unsigned long addr, size; 1029 1030 size = PAGE_SIZE; 1031 1032 addr = gfn_to_hva(kvm, gfn); 1033 if (kvm_is_error_hva(addr)) 1034 return PAGE_SIZE; 1035 1036 down_read(¤t->mm->mmap_sem); 1037 vma = find_vma(current->mm, addr); 1038 if (!vma) 1039 goto out; 1040 1041 size = vma_kernel_pagesize(vma); 1042 1043 out: 1044 up_read(¤t->mm->mmap_sem); 1045 1046 return size; 1047 } 1048 1049 static bool memslot_is_readonly(struct kvm_memory_slot *slot) 1050 { 1051 return slot->flags & KVM_MEM_READONLY; 1052 } 1053 1054 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1055 gfn_t *nr_pages, bool write) 1056 { 1057 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 1058 return KVM_HVA_ERR_BAD; 1059 1060 if (memslot_is_readonly(slot) && write) 1061 return KVM_HVA_ERR_RO_BAD; 1062 1063 if (nr_pages) 1064 *nr_pages = slot->npages - (gfn - slot->base_gfn); 1065 1066 return __gfn_to_hva_memslot(slot, gfn); 1067 } 1068 1069 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1070 gfn_t *nr_pages) 1071 { 1072 return __gfn_to_hva_many(slot, gfn, nr_pages, true); 1073 } 1074 1075 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 1076 gfn_t gfn) 1077 { 1078 return gfn_to_hva_many(slot, gfn, NULL); 1079 } 1080 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); 1081 1082 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 1083 { 1084 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); 1085 } 1086 EXPORT_SYMBOL_GPL(gfn_to_hva); 1087 1088 /* 1089 * If writable is set to false, the hva returned by this function is only 1090 * allowed to be read. 1091 */ 1092 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, 1093 gfn_t gfn, bool *writable) 1094 { 1095 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); 1096 1097 if (!kvm_is_error_hva(hva) && writable) 1098 *writable = !memslot_is_readonly(slot); 1099 1100 return hva; 1101 } 1102 1103 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 1104 { 1105 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1106 1107 return gfn_to_hva_memslot_prot(slot, gfn, writable); 1108 } 1109 1110 static int kvm_read_hva(void *data, void __user *hva, int len) 1111 { 1112 return __copy_from_user(data, hva, len); 1113 } 1114 1115 static int kvm_read_hva_atomic(void *data, void __user *hva, int len) 1116 { 1117 return __copy_from_user_inatomic(data, hva, len); 1118 } 1119 1120 static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, 1121 unsigned long start, int write, struct page **page) 1122 { 1123 int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET; 1124 1125 if (write) 1126 flags |= FOLL_WRITE; 1127 1128 return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL); 1129 } 1130 1131 int kvm_get_user_page_io(struct task_struct *tsk, struct mm_struct *mm, 1132 unsigned long addr, bool write_fault, 1133 struct page **pagep) 1134 { 1135 int npages; 1136 int locked = 1; 1137 int flags = FOLL_TOUCH | FOLL_HWPOISON | 1138 (pagep ? FOLL_GET : 0) | 1139 (write_fault ? FOLL_WRITE : 0); 1140 1141 /* 1142 * If retrying the fault, we get here *not* having allowed the filemap 1143 * to wait on the page lock. We should now allow waiting on the IO with 1144 * the mmap semaphore released. 1145 */ 1146 down_read(&mm->mmap_sem); 1147 npages = __get_user_pages(tsk, mm, addr, 1, flags, pagep, NULL, 1148 &locked); 1149 if (!locked) { 1150 VM_BUG_ON(npages); 1151 1152 if (!pagep) 1153 return 0; 1154 1155 /* 1156 * The previous call has now waited on the IO. Now we can 1157 * retry and complete. Pass TRIED to ensure we do not re 1158 * schedule async IO (see e.g. filemap_fault). 1159 */ 1160 down_read(&mm->mmap_sem); 1161 npages = __get_user_pages(tsk, mm, addr, 1, flags | FOLL_TRIED, 1162 pagep, NULL, NULL); 1163 } 1164 up_read(&mm->mmap_sem); 1165 return npages; 1166 } 1167 1168 static inline int check_user_page_hwpoison(unsigned long addr) 1169 { 1170 int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE; 1171 1172 rc = __get_user_pages(current, current->mm, addr, 1, 1173 flags, NULL, NULL, NULL); 1174 return rc == -EHWPOISON; 1175 } 1176 1177 /* 1178 * The atomic path to get the writable pfn which will be stored in @pfn, 1179 * true indicates success, otherwise false is returned. 1180 */ 1181 static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, 1182 bool write_fault, bool *writable, pfn_t *pfn) 1183 { 1184 struct page *page[1]; 1185 int npages; 1186 1187 if (!(async || atomic)) 1188 return false; 1189 1190 /* 1191 * Fast pin a writable pfn only if it is a write fault request 1192 * or the caller allows to map a writable pfn for a read fault 1193 * request. 1194 */ 1195 if (!(write_fault || writable)) 1196 return false; 1197 1198 npages = __get_user_pages_fast(addr, 1, 1, page); 1199 if (npages == 1) { 1200 *pfn = page_to_pfn(page[0]); 1201 1202 if (writable) 1203 *writable = true; 1204 return true; 1205 } 1206 1207 return false; 1208 } 1209 1210 /* 1211 * The slow path to get the pfn of the specified host virtual address, 1212 * 1 indicates success, -errno is returned if error is detected. 1213 */ 1214 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, 1215 bool *writable, pfn_t *pfn) 1216 { 1217 struct page *page[1]; 1218 int npages = 0; 1219 1220 might_sleep(); 1221 1222 if (writable) 1223 *writable = write_fault; 1224 1225 if (async) { 1226 down_read(¤t->mm->mmap_sem); 1227 npages = get_user_page_nowait(current, current->mm, 1228 addr, write_fault, page); 1229 up_read(¤t->mm->mmap_sem); 1230 } else { 1231 /* 1232 * By now we have tried gup_fast, and possibly async_pf, and we 1233 * are certainly not atomic. Time to retry the gup, allowing 1234 * mmap semaphore to be relinquished in the case of IO. 1235 */ 1236 npages = kvm_get_user_page_io(current, current->mm, addr, 1237 write_fault, page); 1238 } 1239 if (npages != 1) 1240 return npages; 1241 1242 /* map read fault as writable if possible */ 1243 if (unlikely(!write_fault) && writable) { 1244 struct page *wpage[1]; 1245 1246 npages = __get_user_pages_fast(addr, 1, 1, wpage); 1247 if (npages == 1) { 1248 *writable = true; 1249 put_page(page[0]); 1250 page[0] = wpage[0]; 1251 } 1252 1253 npages = 1; 1254 } 1255 *pfn = page_to_pfn(page[0]); 1256 return npages; 1257 } 1258 1259 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) 1260 { 1261 if (unlikely(!(vma->vm_flags & VM_READ))) 1262 return false; 1263 1264 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) 1265 return false; 1266 1267 return true; 1268 } 1269 1270 /* 1271 * Pin guest page in memory and return its pfn. 1272 * @addr: host virtual address which maps memory to the guest 1273 * @atomic: whether this function can sleep 1274 * @async: whether this function need to wait IO complete if the 1275 * host page is not in the memory 1276 * @write_fault: whether we should get a writable host page 1277 * @writable: whether it allows to map a writable host page for !@write_fault 1278 * 1279 * The function will map a writable host page for these two cases: 1280 * 1): @write_fault = true 1281 * 2): @write_fault = false && @writable, @writable will tell the caller 1282 * whether the mapping is writable. 1283 */ 1284 static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, 1285 bool write_fault, bool *writable) 1286 { 1287 struct vm_area_struct *vma; 1288 pfn_t pfn = 0; 1289 int npages; 1290 1291 /* we can do it either atomically or asynchronously, not both */ 1292 BUG_ON(atomic && async); 1293 1294 if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn)) 1295 return pfn; 1296 1297 if (atomic) 1298 return KVM_PFN_ERR_FAULT; 1299 1300 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); 1301 if (npages == 1) 1302 return pfn; 1303 1304 down_read(¤t->mm->mmap_sem); 1305 if (npages == -EHWPOISON || 1306 (!async && check_user_page_hwpoison(addr))) { 1307 pfn = KVM_PFN_ERR_HWPOISON; 1308 goto exit; 1309 } 1310 1311 vma = find_vma_intersection(current->mm, addr, addr + 1); 1312 1313 if (vma == NULL) 1314 pfn = KVM_PFN_ERR_FAULT; 1315 else if ((vma->vm_flags & VM_PFNMAP)) { 1316 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + 1317 vma->vm_pgoff; 1318 BUG_ON(!kvm_is_reserved_pfn(pfn)); 1319 } else { 1320 if (async && vma_is_valid(vma, write_fault)) 1321 *async = true; 1322 pfn = KVM_PFN_ERR_FAULT; 1323 } 1324 exit: 1325 up_read(¤t->mm->mmap_sem); 1326 return pfn; 1327 } 1328 1329 static pfn_t 1330 __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, 1331 bool *async, bool write_fault, bool *writable) 1332 { 1333 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); 1334 1335 if (addr == KVM_HVA_ERR_RO_BAD) 1336 return KVM_PFN_ERR_RO_FAULT; 1337 1338 if (kvm_is_error_hva(addr)) 1339 return KVM_PFN_NOSLOT; 1340 1341 /* Do not map writable pfn in the readonly memslot. */ 1342 if (writable && memslot_is_readonly(slot)) { 1343 *writable = false; 1344 writable = NULL; 1345 } 1346 1347 return hva_to_pfn(addr, atomic, async, write_fault, 1348 writable); 1349 } 1350 1351 static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async, 1352 bool write_fault, bool *writable) 1353 { 1354 struct kvm_memory_slot *slot; 1355 1356 if (async) 1357 *async = false; 1358 1359 slot = gfn_to_memslot(kvm, gfn); 1360 1361 return __gfn_to_pfn_memslot(slot, gfn, atomic, async, write_fault, 1362 writable); 1363 } 1364 1365 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) 1366 { 1367 return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL); 1368 } 1369 EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic); 1370 1371 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, 1372 bool write_fault, bool *writable) 1373 { 1374 return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable); 1375 } 1376 EXPORT_SYMBOL_GPL(gfn_to_pfn_async); 1377 1378 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 1379 { 1380 return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL); 1381 } 1382 EXPORT_SYMBOL_GPL(gfn_to_pfn); 1383 1384 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 1385 bool *writable) 1386 { 1387 return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable); 1388 } 1389 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 1390 1391 pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 1392 { 1393 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL); 1394 } 1395 1396 pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) 1397 { 1398 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL); 1399 } 1400 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); 1401 1402 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, 1403 int nr_pages) 1404 { 1405 unsigned long addr; 1406 gfn_t entry; 1407 1408 addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry); 1409 if (kvm_is_error_hva(addr)) 1410 return -1; 1411 1412 if (entry < nr_pages) 1413 return 0; 1414 1415 return __get_user_pages_fast(addr, nr_pages, 1, pages); 1416 } 1417 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); 1418 1419 static struct page *kvm_pfn_to_page(pfn_t pfn) 1420 { 1421 if (is_error_noslot_pfn(pfn)) 1422 return KVM_ERR_PTR_BAD_PAGE; 1423 1424 if (kvm_is_reserved_pfn(pfn)) { 1425 WARN_ON(1); 1426 return KVM_ERR_PTR_BAD_PAGE; 1427 } 1428 1429 return pfn_to_page(pfn); 1430 } 1431 1432 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 1433 { 1434 pfn_t pfn; 1435 1436 pfn = gfn_to_pfn(kvm, gfn); 1437 1438 return kvm_pfn_to_page(pfn); 1439 } 1440 1441 EXPORT_SYMBOL_GPL(gfn_to_page); 1442 1443 void kvm_release_page_clean(struct page *page) 1444 { 1445 WARN_ON(is_error_page(page)); 1446 1447 kvm_release_pfn_clean(page_to_pfn(page)); 1448 } 1449 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 1450 1451 void kvm_release_pfn_clean(pfn_t pfn) 1452 { 1453 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) 1454 put_page(pfn_to_page(pfn)); 1455 } 1456 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1457 1458 void kvm_release_page_dirty(struct page *page) 1459 { 1460 WARN_ON(is_error_page(page)); 1461 1462 kvm_release_pfn_dirty(page_to_pfn(page)); 1463 } 1464 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 1465 1466 static void kvm_release_pfn_dirty(pfn_t pfn) 1467 { 1468 kvm_set_pfn_dirty(pfn); 1469 kvm_release_pfn_clean(pfn); 1470 } 1471 1472 void kvm_set_pfn_dirty(pfn_t pfn) 1473 { 1474 if (!kvm_is_reserved_pfn(pfn)) { 1475 struct page *page = pfn_to_page(pfn); 1476 if (!PageReserved(page)) 1477 SetPageDirty(page); 1478 } 1479 } 1480 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 1481 1482 void kvm_set_pfn_accessed(pfn_t pfn) 1483 { 1484 if (!kvm_is_reserved_pfn(pfn)) 1485 mark_page_accessed(pfn_to_page(pfn)); 1486 } 1487 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1488 1489 void kvm_get_pfn(pfn_t pfn) 1490 { 1491 if (!kvm_is_reserved_pfn(pfn)) 1492 get_page(pfn_to_page(pfn)); 1493 } 1494 EXPORT_SYMBOL_GPL(kvm_get_pfn); 1495 1496 static int next_segment(unsigned long len, int offset) 1497 { 1498 if (len > PAGE_SIZE - offset) 1499 return PAGE_SIZE - offset; 1500 else 1501 return len; 1502 } 1503 1504 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 1505 int len) 1506 { 1507 int r; 1508 unsigned long addr; 1509 1510 addr = gfn_to_hva_prot(kvm, gfn, NULL); 1511 if (kvm_is_error_hva(addr)) 1512 return -EFAULT; 1513 r = kvm_read_hva(data, (void __user *)addr + offset, len); 1514 if (r) 1515 return -EFAULT; 1516 return 0; 1517 } 1518 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 1519 1520 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 1521 { 1522 gfn_t gfn = gpa >> PAGE_SHIFT; 1523 int seg; 1524 int offset = offset_in_page(gpa); 1525 int ret; 1526 1527 while ((seg = next_segment(len, offset)) != 0) { 1528 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 1529 if (ret < 0) 1530 return ret; 1531 offset = 0; 1532 len -= seg; 1533 data += seg; 1534 ++gfn; 1535 } 1536 return 0; 1537 } 1538 EXPORT_SYMBOL_GPL(kvm_read_guest); 1539 1540 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 1541 unsigned long len) 1542 { 1543 int r; 1544 unsigned long addr; 1545 gfn_t gfn = gpa >> PAGE_SHIFT; 1546 int offset = offset_in_page(gpa); 1547 1548 addr = gfn_to_hva_prot(kvm, gfn, NULL); 1549 if (kvm_is_error_hva(addr)) 1550 return -EFAULT; 1551 pagefault_disable(); 1552 r = kvm_read_hva_atomic(data, (void __user *)addr + offset, len); 1553 pagefault_enable(); 1554 if (r) 1555 return -EFAULT; 1556 return 0; 1557 } 1558 EXPORT_SYMBOL(kvm_read_guest_atomic); 1559 1560 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 1561 int offset, int len) 1562 { 1563 int r; 1564 unsigned long addr; 1565 1566 addr = gfn_to_hva(kvm, gfn); 1567 if (kvm_is_error_hva(addr)) 1568 return -EFAULT; 1569 r = __copy_to_user((void __user *)addr + offset, data, len); 1570 if (r) 1571 return -EFAULT; 1572 mark_page_dirty(kvm, gfn); 1573 return 0; 1574 } 1575 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 1576 1577 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 1578 unsigned long len) 1579 { 1580 gfn_t gfn = gpa >> PAGE_SHIFT; 1581 int seg; 1582 int offset = offset_in_page(gpa); 1583 int ret; 1584 1585 while ((seg = next_segment(len, offset)) != 0) { 1586 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 1587 if (ret < 0) 1588 return ret; 1589 offset = 0; 1590 len -= seg; 1591 data += seg; 1592 ++gfn; 1593 } 1594 return 0; 1595 } 1596 1597 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1598 gpa_t gpa, unsigned long len) 1599 { 1600 struct kvm_memslots *slots = kvm_memslots(kvm); 1601 int offset = offset_in_page(gpa); 1602 gfn_t start_gfn = gpa >> PAGE_SHIFT; 1603 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; 1604 gfn_t nr_pages_needed = end_gfn - start_gfn + 1; 1605 gfn_t nr_pages_avail; 1606 1607 ghc->gpa = gpa; 1608 ghc->generation = slots->generation; 1609 ghc->len = len; 1610 ghc->memslot = gfn_to_memslot(kvm, start_gfn); 1611 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail); 1612 if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) { 1613 ghc->hva += offset; 1614 } else { 1615 /* 1616 * If the requested region crosses two memslots, we still 1617 * verify that the entire region is valid here. 1618 */ 1619 while (start_gfn <= end_gfn) { 1620 ghc->memslot = gfn_to_memslot(kvm, start_gfn); 1621 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, 1622 &nr_pages_avail); 1623 if (kvm_is_error_hva(ghc->hva)) 1624 return -EFAULT; 1625 start_gfn += nr_pages_avail; 1626 } 1627 /* Use the slow path for cross page reads and writes. */ 1628 ghc->memslot = NULL; 1629 } 1630 return 0; 1631 } 1632 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 1633 1634 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1635 void *data, unsigned long len) 1636 { 1637 struct kvm_memslots *slots = kvm_memslots(kvm); 1638 int r; 1639 1640 BUG_ON(len > ghc->len); 1641 1642 if (slots->generation != ghc->generation) 1643 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); 1644 1645 if (unlikely(!ghc->memslot)) 1646 return kvm_write_guest(kvm, ghc->gpa, data, len); 1647 1648 if (kvm_is_error_hva(ghc->hva)) 1649 return -EFAULT; 1650 1651 r = __copy_to_user((void __user *)ghc->hva, data, len); 1652 if (r) 1653 return -EFAULT; 1654 mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT); 1655 1656 return 0; 1657 } 1658 EXPORT_SYMBOL_GPL(kvm_write_guest_cached); 1659 1660 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1661 void *data, unsigned long len) 1662 { 1663 struct kvm_memslots *slots = kvm_memslots(kvm); 1664 int r; 1665 1666 BUG_ON(len > ghc->len); 1667 1668 if (slots->generation != ghc->generation) 1669 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); 1670 1671 if (unlikely(!ghc->memslot)) 1672 return kvm_read_guest(kvm, ghc->gpa, data, len); 1673 1674 if (kvm_is_error_hva(ghc->hva)) 1675 return -EFAULT; 1676 1677 r = __copy_from_user(data, (void __user *)ghc->hva, len); 1678 if (r) 1679 return -EFAULT; 1680 1681 return 0; 1682 } 1683 EXPORT_SYMBOL_GPL(kvm_read_guest_cached); 1684 1685 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) 1686 { 1687 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 1688 1689 return kvm_write_guest_page(kvm, gfn, zero_page, offset, len); 1690 } 1691 EXPORT_SYMBOL_GPL(kvm_clear_guest_page); 1692 1693 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 1694 { 1695 gfn_t gfn = gpa >> PAGE_SHIFT; 1696 int seg; 1697 int offset = offset_in_page(gpa); 1698 int ret; 1699 1700 while ((seg = next_segment(len, offset)) != 0) { 1701 ret = kvm_clear_guest_page(kvm, gfn, offset, seg); 1702 if (ret < 0) 1703 return ret; 1704 offset = 0; 1705 len -= seg; 1706 ++gfn; 1707 } 1708 return 0; 1709 } 1710 EXPORT_SYMBOL_GPL(kvm_clear_guest); 1711 1712 static void mark_page_dirty_in_slot(struct kvm *kvm, 1713 struct kvm_memory_slot *memslot, 1714 gfn_t gfn) 1715 { 1716 if (memslot && memslot->dirty_bitmap) { 1717 unsigned long rel_gfn = gfn - memslot->base_gfn; 1718 1719 set_bit_le(rel_gfn, memslot->dirty_bitmap); 1720 } 1721 } 1722 1723 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 1724 { 1725 struct kvm_memory_slot *memslot; 1726 1727 memslot = gfn_to_memslot(kvm, gfn); 1728 mark_page_dirty_in_slot(kvm, memslot, gfn); 1729 } 1730 EXPORT_SYMBOL_GPL(mark_page_dirty); 1731 1732 /* 1733 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 1734 */ 1735 void kvm_vcpu_block(struct kvm_vcpu *vcpu) 1736 { 1737 DEFINE_WAIT(wait); 1738 1739 for (;;) { 1740 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1741 1742 if (kvm_arch_vcpu_runnable(vcpu)) { 1743 kvm_make_request(KVM_REQ_UNHALT, vcpu); 1744 break; 1745 } 1746 if (kvm_cpu_has_pending_timer(vcpu)) 1747 break; 1748 if (signal_pending(current)) 1749 break; 1750 1751 schedule(); 1752 } 1753 1754 finish_wait(&vcpu->wq, &wait); 1755 } 1756 EXPORT_SYMBOL_GPL(kvm_vcpu_block); 1757 1758 #ifndef CONFIG_S390 1759 /* 1760 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. 1761 */ 1762 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 1763 { 1764 int me; 1765 int cpu = vcpu->cpu; 1766 wait_queue_head_t *wqp; 1767 1768 wqp = kvm_arch_vcpu_wq(vcpu); 1769 if (waitqueue_active(wqp)) { 1770 wake_up_interruptible(wqp); 1771 ++vcpu->stat.halt_wakeup; 1772 } 1773 1774 me = get_cpu(); 1775 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) 1776 if (kvm_arch_vcpu_should_kick(vcpu)) 1777 smp_send_reschedule(cpu); 1778 put_cpu(); 1779 } 1780 EXPORT_SYMBOL_GPL(kvm_vcpu_kick); 1781 #endif /* !CONFIG_S390 */ 1782 1783 int kvm_vcpu_yield_to(struct kvm_vcpu *target) 1784 { 1785 struct pid *pid; 1786 struct task_struct *task = NULL; 1787 int ret = 0; 1788 1789 rcu_read_lock(); 1790 pid = rcu_dereference(target->pid); 1791 if (pid) 1792 task = get_pid_task(pid, PIDTYPE_PID); 1793 rcu_read_unlock(); 1794 if (!task) 1795 return ret; 1796 ret = yield_to(task, 1); 1797 put_task_struct(task); 1798 1799 return ret; 1800 } 1801 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); 1802 1803 /* 1804 * Helper that checks whether a VCPU is eligible for directed yield. 1805 * Most eligible candidate to yield is decided by following heuristics: 1806 * 1807 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently 1808 * (preempted lock holder), indicated by @in_spin_loop. 1809 * Set at the beiginning and cleared at the end of interception/PLE handler. 1810 * 1811 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get 1812 * chance last time (mostly it has become eligible now since we have probably 1813 * yielded to lockholder in last iteration. This is done by toggling 1814 * @dy_eligible each time a VCPU checked for eligibility.) 1815 * 1816 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding 1817 * to preempted lock-holder could result in wrong VCPU selection and CPU 1818 * burning. Giving priority for a potential lock-holder increases lock 1819 * progress. 1820 * 1821 * Since algorithm is based on heuristics, accessing another VCPU data without 1822 * locking does not harm. It may result in trying to yield to same VCPU, fail 1823 * and continue with next VCPU and so on. 1824 */ 1825 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) 1826 { 1827 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 1828 bool eligible; 1829 1830 eligible = !vcpu->spin_loop.in_spin_loop || 1831 vcpu->spin_loop.dy_eligible; 1832 1833 if (vcpu->spin_loop.in_spin_loop) 1834 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); 1835 1836 return eligible; 1837 #else 1838 return true; 1839 #endif 1840 } 1841 1842 void kvm_vcpu_on_spin(struct kvm_vcpu *me) 1843 { 1844 struct kvm *kvm = me->kvm; 1845 struct kvm_vcpu *vcpu; 1846 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 1847 int yielded = 0; 1848 int try = 3; 1849 int pass; 1850 int i; 1851 1852 kvm_vcpu_set_in_spin_loop(me, true); 1853 /* 1854 * We boost the priority of a VCPU that is runnable but not 1855 * currently running, because it got preempted by something 1856 * else and called schedule in __vcpu_run. Hopefully that 1857 * VCPU is holding the lock that we need and will release it. 1858 * We approximate round-robin by starting at the last boosted VCPU. 1859 */ 1860 for (pass = 0; pass < 2 && !yielded && try; pass++) { 1861 kvm_for_each_vcpu(i, vcpu, kvm) { 1862 if (!pass && i <= last_boosted_vcpu) { 1863 i = last_boosted_vcpu; 1864 continue; 1865 } else if (pass && i > last_boosted_vcpu) 1866 break; 1867 if (!ACCESS_ONCE(vcpu->preempted)) 1868 continue; 1869 if (vcpu == me) 1870 continue; 1871 if (waitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) 1872 continue; 1873 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 1874 continue; 1875 1876 yielded = kvm_vcpu_yield_to(vcpu); 1877 if (yielded > 0) { 1878 kvm->last_boosted_vcpu = i; 1879 break; 1880 } else if (yielded < 0) { 1881 try--; 1882 if (!try) 1883 break; 1884 } 1885 } 1886 } 1887 kvm_vcpu_set_in_spin_loop(me, false); 1888 1889 /* Ensure vcpu is not eligible during next spinloop */ 1890 kvm_vcpu_set_dy_eligible(me, false); 1891 } 1892 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 1893 1894 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1895 { 1896 struct kvm_vcpu *vcpu = vma->vm_file->private_data; 1897 struct page *page; 1898 1899 if (vmf->pgoff == 0) 1900 page = virt_to_page(vcpu->run); 1901 #ifdef CONFIG_X86 1902 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 1903 page = virt_to_page(vcpu->arch.pio_data); 1904 #endif 1905 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1906 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 1907 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 1908 #endif 1909 else 1910 return kvm_arch_vcpu_fault(vcpu, vmf); 1911 get_page(page); 1912 vmf->page = page; 1913 return 0; 1914 } 1915 1916 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 1917 .fault = kvm_vcpu_fault, 1918 }; 1919 1920 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 1921 { 1922 vma->vm_ops = &kvm_vcpu_vm_ops; 1923 return 0; 1924 } 1925 1926 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 1927 { 1928 struct kvm_vcpu *vcpu = filp->private_data; 1929 1930 kvm_put_kvm(vcpu->kvm); 1931 return 0; 1932 } 1933 1934 static struct file_operations kvm_vcpu_fops = { 1935 .release = kvm_vcpu_release, 1936 .unlocked_ioctl = kvm_vcpu_ioctl, 1937 #ifdef CONFIG_COMPAT 1938 .compat_ioctl = kvm_vcpu_compat_ioctl, 1939 #endif 1940 .mmap = kvm_vcpu_mmap, 1941 .llseek = noop_llseek, 1942 }; 1943 1944 /* 1945 * Allocates an inode for the vcpu. 1946 */ 1947 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 1948 { 1949 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); 1950 } 1951 1952 /* 1953 * Creates some virtual cpus. Good luck creating more than one. 1954 */ 1955 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 1956 { 1957 int r; 1958 struct kvm_vcpu *vcpu, *v; 1959 1960 if (id >= KVM_MAX_VCPUS) 1961 return -EINVAL; 1962 1963 vcpu = kvm_arch_vcpu_create(kvm, id); 1964 if (IS_ERR(vcpu)) 1965 return PTR_ERR(vcpu); 1966 1967 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 1968 1969 r = kvm_arch_vcpu_setup(vcpu); 1970 if (r) 1971 goto vcpu_destroy; 1972 1973 mutex_lock(&kvm->lock); 1974 if (!kvm_vcpu_compatible(vcpu)) { 1975 r = -EINVAL; 1976 goto unlock_vcpu_destroy; 1977 } 1978 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { 1979 r = -EINVAL; 1980 goto unlock_vcpu_destroy; 1981 } 1982 1983 kvm_for_each_vcpu(r, v, kvm) 1984 if (v->vcpu_id == id) { 1985 r = -EEXIST; 1986 goto unlock_vcpu_destroy; 1987 } 1988 1989 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); 1990 1991 /* Now it's all set up, let userspace reach it */ 1992 kvm_get_kvm(kvm); 1993 r = create_vcpu_fd(vcpu); 1994 if (r < 0) { 1995 kvm_put_kvm(kvm); 1996 goto unlock_vcpu_destroy; 1997 } 1998 1999 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; 2000 smp_wmb(); 2001 atomic_inc(&kvm->online_vcpus); 2002 2003 mutex_unlock(&kvm->lock); 2004 kvm_arch_vcpu_postcreate(vcpu); 2005 return r; 2006 2007 unlock_vcpu_destroy: 2008 mutex_unlock(&kvm->lock); 2009 vcpu_destroy: 2010 kvm_arch_vcpu_destroy(vcpu); 2011 return r; 2012 } 2013 2014 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 2015 { 2016 if (sigset) { 2017 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2018 vcpu->sigset_active = 1; 2019 vcpu->sigset = *sigset; 2020 } else 2021 vcpu->sigset_active = 0; 2022 return 0; 2023 } 2024 2025 static long kvm_vcpu_ioctl(struct file *filp, 2026 unsigned int ioctl, unsigned long arg) 2027 { 2028 struct kvm_vcpu *vcpu = filp->private_data; 2029 void __user *argp = (void __user *)arg; 2030 int r; 2031 struct kvm_fpu *fpu = NULL; 2032 struct kvm_sregs *kvm_sregs = NULL; 2033 2034 if (vcpu->kvm->mm != current->mm) 2035 return -EIO; 2036 2037 if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) 2038 return -EINVAL; 2039 2040 #if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS) 2041 /* 2042 * Special cases: vcpu ioctls that are asynchronous to vcpu execution, 2043 * so vcpu_load() would break it. 2044 */ 2045 if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT) 2046 return kvm_arch_vcpu_ioctl(filp, ioctl, arg); 2047 #endif 2048 2049 2050 r = vcpu_load(vcpu); 2051 if (r) 2052 return r; 2053 switch (ioctl) { 2054 case KVM_RUN: 2055 r = -EINVAL; 2056 if (arg) 2057 goto out; 2058 if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) { 2059 /* The thread running this VCPU changed. */ 2060 struct pid *oldpid = vcpu->pid; 2061 struct pid *newpid = get_task_pid(current, PIDTYPE_PID); 2062 rcu_assign_pointer(vcpu->pid, newpid); 2063 if (oldpid) 2064 synchronize_rcu(); 2065 put_pid(oldpid); 2066 } 2067 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); 2068 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 2069 break; 2070 case KVM_GET_REGS: { 2071 struct kvm_regs *kvm_regs; 2072 2073 r = -ENOMEM; 2074 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 2075 if (!kvm_regs) 2076 goto out; 2077 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 2078 if (r) 2079 goto out_free1; 2080 r = -EFAULT; 2081 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 2082 goto out_free1; 2083 r = 0; 2084 out_free1: 2085 kfree(kvm_regs); 2086 break; 2087 } 2088 case KVM_SET_REGS: { 2089 struct kvm_regs *kvm_regs; 2090 2091 r = -ENOMEM; 2092 kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); 2093 if (IS_ERR(kvm_regs)) { 2094 r = PTR_ERR(kvm_regs); 2095 goto out; 2096 } 2097 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 2098 kfree(kvm_regs); 2099 break; 2100 } 2101 case KVM_GET_SREGS: { 2102 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 2103 r = -ENOMEM; 2104 if (!kvm_sregs) 2105 goto out; 2106 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 2107 if (r) 2108 goto out; 2109 r = -EFAULT; 2110 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 2111 goto out; 2112 r = 0; 2113 break; 2114 } 2115 case KVM_SET_SREGS: { 2116 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); 2117 if (IS_ERR(kvm_sregs)) { 2118 r = PTR_ERR(kvm_sregs); 2119 kvm_sregs = NULL; 2120 goto out; 2121 } 2122 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 2123 break; 2124 } 2125 case KVM_GET_MP_STATE: { 2126 struct kvm_mp_state mp_state; 2127 2128 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 2129 if (r) 2130 goto out; 2131 r = -EFAULT; 2132 if (copy_to_user(argp, &mp_state, sizeof mp_state)) 2133 goto out; 2134 r = 0; 2135 break; 2136 } 2137 case KVM_SET_MP_STATE: { 2138 struct kvm_mp_state mp_state; 2139 2140 r = -EFAULT; 2141 if (copy_from_user(&mp_state, argp, sizeof mp_state)) 2142 goto out; 2143 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 2144 break; 2145 } 2146 case KVM_TRANSLATE: { 2147 struct kvm_translation tr; 2148 2149 r = -EFAULT; 2150 if (copy_from_user(&tr, argp, sizeof tr)) 2151 goto out; 2152 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 2153 if (r) 2154 goto out; 2155 r = -EFAULT; 2156 if (copy_to_user(argp, &tr, sizeof tr)) 2157 goto out; 2158 r = 0; 2159 break; 2160 } 2161 case KVM_SET_GUEST_DEBUG: { 2162 struct kvm_guest_debug dbg; 2163 2164 r = -EFAULT; 2165 if (copy_from_user(&dbg, argp, sizeof dbg)) 2166 goto out; 2167 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 2168 break; 2169 } 2170 case KVM_SET_SIGNAL_MASK: { 2171 struct kvm_signal_mask __user *sigmask_arg = argp; 2172 struct kvm_signal_mask kvm_sigmask; 2173 sigset_t sigset, *p; 2174 2175 p = NULL; 2176 if (argp) { 2177 r = -EFAULT; 2178 if (copy_from_user(&kvm_sigmask, argp, 2179 sizeof kvm_sigmask)) 2180 goto out; 2181 r = -EINVAL; 2182 if (kvm_sigmask.len != sizeof sigset) 2183 goto out; 2184 r = -EFAULT; 2185 if (copy_from_user(&sigset, sigmask_arg->sigset, 2186 sizeof sigset)) 2187 goto out; 2188 p = &sigset; 2189 } 2190 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 2191 break; 2192 } 2193 case KVM_GET_FPU: { 2194 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 2195 r = -ENOMEM; 2196 if (!fpu) 2197 goto out; 2198 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 2199 if (r) 2200 goto out; 2201 r = -EFAULT; 2202 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 2203 goto out; 2204 r = 0; 2205 break; 2206 } 2207 case KVM_SET_FPU: { 2208 fpu = memdup_user(argp, sizeof(*fpu)); 2209 if (IS_ERR(fpu)) { 2210 r = PTR_ERR(fpu); 2211 fpu = NULL; 2212 goto out; 2213 } 2214 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 2215 break; 2216 } 2217 default: 2218 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 2219 } 2220 out: 2221 vcpu_put(vcpu); 2222 kfree(fpu); 2223 kfree(kvm_sregs); 2224 return r; 2225 } 2226 2227 #ifdef CONFIG_COMPAT 2228 static long kvm_vcpu_compat_ioctl(struct file *filp, 2229 unsigned int ioctl, unsigned long arg) 2230 { 2231 struct kvm_vcpu *vcpu = filp->private_data; 2232 void __user *argp = compat_ptr(arg); 2233 int r; 2234 2235 if (vcpu->kvm->mm != current->mm) 2236 return -EIO; 2237 2238 switch (ioctl) { 2239 case KVM_SET_SIGNAL_MASK: { 2240 struct kvm_signal_mask __user *sigmask_arg = argp; 2241 struct kvm_signal_mask kvm_sigmask; 2242 compat_sigset_t csigset; 2243 sigset_t sigset; 2244 2245 if (argp) { 2246 r = -EFAULT; 2247 if (copy_from_user(&kvm_sigmask, argp, 2248 sizeof kvm_sigmask)) 2249 goto out; 2250 r = -EINVAL; 2251 if (kvm_sigmask.len != sizeof csigset) 2252 goto out; 2253 r = -EFAULT; 2254 if (copy_from_user(&csigset, sigmask_arg->sigset, 2255 sizeof csigset)) 2256 goto out; 2257 sigset_from_compat(&sigset, &csigset); 2258 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 2259 } else 2260 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); 2261 break; 2262 } 2263 default: 2264 r = kvm_vcpu_ioctl(filp, ioctl, arg); 2265 } 2266 2267 out: 2268 return r; 2269 } 2270 #endif 2271 2272 static int kvm_device_ioctl_attr(struct kvm_device *dev, 2273 int (*accessor)(struct kvm_device *dev, 2274 struct kvm_device_attr *attr), 2275 unsigned long arg) 2276 { 2277 struct kvm_device_attr attr; 2278 2279 if (!accessor) 2280 return -EPERM; 2281 2282 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 2283 return -EFAULT; 2284 2285 return accessor(dev, &attr); 2286 } 2287 2288 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, 2289 unsigned long arg) 2290 { 2291 struct kvm_device *dev = filp->private_data; 2292 2293 switch (ioctl) { 2294 case KVM_SET_DEVICE_ATTR: 2295 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); 2296 case KVM_GET_DEVICE_ATTR: 2297 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); 2298 case KVM_HAS_DEVICE_ATTR: 2299 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); 2300 default: 2301 if (dev->ops->ioctl) 2302 return dev->ops->ioctl(dev, ioctl, arg); 2303 2304 return -ENOTTY; 2305 } 2306 } 2307 2308 static int kvm_device_release(struct inode *inode, struct file *filp) 2309 { 2310 struct kvm_device *dev = filp->private_data; 2311 struct kvm *kvm = dev->kvm; 2312 2313 kvm_put_kvm(kvm); 2314 return 0; 2315 } 2316 2317 static const struct file_operations kvm_device_fops = { 2318 .unlocked_ioctl = kvm_device_ioctl, 2319 #ifdef CONFIG_COMPAT 2320 .compat_ioctl = kvm_device_ioctl, 2321 #endif 2322 .release = kvm_device_release, 2323 }; 2324 2325 struct kvm_device *kvm_device_from_filp(struct file *filp) 2326 { 2327 if (filp->f_op != &kvm_device_fops) 2328 return NULL; 2329 2330 return filp->private_data; 2331 } 2332 2333 static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { 2334 #ifdef CONFIG_KVM_MPIC 2335 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, 2336 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, 2337 #endif 2338 2339 #ifdef CONFIG_KVM_XICS 2340 [KVM_DEV_TYPE_XICS] = &kvm_xics_ops, 2341 #endif 2342 }; 2343 2344 int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type) 2345 { 2346 if (type >= ARRAY_SIZE(kvm_device_ops_table)) 2347 return -ENOSPC; 2348 2349 if (kvm_device_ops_table[type] != NULL) 2350 return -EEXIST; 2351 2352 kvm_device_ops_table[type] = ops; 2353 return 0; 2354 } 2355 2356 void kvm_unregister_device_ops(u32 type) 2357 { 2358 if (kvm_device_ops_table[type] != NULL) 2359 kvm_device_ops_table[type] = NULL; 2360 } 2361 2362 static int kvm_ioctl_create_device(struct kvm *kvm, 2363 struct kvm_create_device *cd) 2364 { 2365 struct kvm_device_ops *ops = NULL; 2366 struct kvm_device *dev; 2367 bool test = cd->flags & KVM_CREATE_DEVICE_TEST; 2368 int ret; 2369 2370 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) 2371 return -ENODEV; 2372 2373 ops = kvm_device_ops_table[cd->type]; 2374 if (ops == NULL) 2375 return -ENODEV; 2376 2377 if (test) 2378 return 0; 2379 2380 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2381 if (!dev) 2382 return -ENOMEM; 2383 2384 dev->ops = ops; 2385 dev->kvm = kvm; 2386 2387 ret = ops->create(dev, cd->type); 2388 if (ret < 0) { 2389 kfree(dev); 2390 return ret; 2391 } 2392 2393 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 2394 if (ret < 0) { 2395 ops->destroy(dev); 2396 return ret; 2397 } 2398 2399 list_add(&dev->vm_node, &kvm->devices); 2400 kvm_get_kvm(kvm); 2401 cd->fd = ret; 2402 return 0; 2403 } 2404 2405 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) 2406 { 2407 switch (arg) { 2408 case KVM_CAP_USER_MEMORY: 2409 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 2410 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 2411 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 2412 case KVM_CAP_SET_BOOT_CPU_ID: 2413 #endif 2414 case KVM_CAP_INTERNAL_ERROR_DATA: 2415 #ifdef CONFIG_HAVE_KVM_MSI 2416 case KVM_CAP_SIGNAL_MSI: 2417 #endif 2418 #ifdef CONFIG_HAVE_KVM_IRQFD 2419 case KVM_CAP_IRQFD_RESAMPLE: 2420 #endif 2421 case KVM_CAP_CHECK_EXTENSION_VM: 2422 return 1; 2423 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 2424 case KVM_CAP_IRQ_ROUTING: 2425 return KVM_MAX_IRQ_ROUTES; 2426 #endif 2427 default: 2428 break; 2429 } 2430 return kvm_vm_ioctl_check_extension(kvm, arg); 2431 } 2432 2433 static long kvm_vm_ioctl(struct file *filp, 2434 unsigned int ioctl, unsigned long arg) 2435 { 2436 struct kvm *kvm = filp->private_data; 2437 void __user *argp = (void __user *)arg; 2438 int r; 2439 2440 if (kvm->mm != current->mm) 2441 return -EIO; 2442 switch (ioctl) { 2443 case KVM_CREATE_VCPU: 2444 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 2445 break; 2446 case KVM_SET_USER_MEMORY_REGION: { 2447 struct kvm_userspace_memory_region kvm_userspace_mem; 2448 2449 r = -EFAULT; 2450 if (copy_from_user(&kvm_userspace_mem, argp, 2451 sizeof kvm_userspace_mem)) 2452 goto out; 2453 2454 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); 2455 break; 2456 } 2457 case KVM_GET_DIRTY_LOG: { 2458 struct kvm_dirty_log log; 2459 2460 r = -EFAULT; 2461 if (copy_from_user(&log, argp, sizeof log)) 2462 goto out; 2463 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 2464 break; 2465 } 2466 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2467 case KVM_REGISTER_COALESCED_MMIO: { 2468 struct kvm_coalesced_mmio_zone zone; 2469 r = -EFAULT; 2470 if (copy_from_user(&zone, argp, sizeof zone)) 2471 goto out; 2472 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 2473 break; 2474 } 2475 case KVM_UNREGISTER_COALESCED_MMIO: { 2476 struct kvm_coalesced_mmio_zone zone; 2477 r = -EFAULT; 2478 if (copy_from_user(&zone, argp, sizeof zone)) 2479 goto out; 2480 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 2481 break; 2482 } 2483 #endif 2484 case KVM_IRQFD: { 2485 struct kvm_irqfd data; 2486 2487 r = -EFAULT; 2488 if (copy_from_user(&data, argp, sizeof data)) 2489 goto out; 2490 r = kvm_irqfd(kvm, &data); 2491 break; 2492 } 2493 case KVM_IOEVENTFD: { 2494 struct kvm_ioeventfd data; 2495 2496 r = -EFAULT; 2497 if (copy_from_user(&data, argp, sizeof data)) 2498 goto out; 2499 r = kvm_ioeventfd(kvm, &data); 2500 break; 2501 } 2502 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 2503 case KVM_SET_BOOT_CPU_ID: 2504 r = 0; 2505 mutex_lock(&kvm->lock); 2506 if (atomic_read(&kvm->online_vcpus) != 0) 2507 r = -EBUSY; 2508 else 2509 kvm->bsp_vcpu_id = arg; 2510 mutex_unlock(&kvm->lock); 2511 break; 2512 #endif 2513 #ifdef CONFIG_HAVE_KVM_MSI 2514 case KVM_SIGNAL_MSI: { 2515 struct kvm_msi msi; 2516 2517 r = -EFAULT; 2518 if (copy_from_user(&msi, argp, sizeof msi)) 2519 goto out; 2520 r = kvm_send_userspace_msi(kvm, &msi); 2521 break; 2522 } 2523 #endif 2524 #ifdef __KVM_HAVE_IRQ_LINE 2525 case KVM_IRQ_LINE_STATUS: 2526 case KVM_IRQ_LINE: { 2527 struct kvm_irq_level irq_event; 2528 2529 r = -EFAULT; 2530 if (copy_from_user(&irq_event, argp, sizeof irq_event)) 2531 goto out; 2532 2533 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, 2534 ioctl == KVM_IRQ_LINE_STATUS); 2535 if (r) 2536 goto out; 2537 2538 r = -EFAULT; 2539 if (ioctl == KVM_IRQ_LINE_STATUS) { 2540 if (copy_to_user(argp, &irq_event, sizeof irq_event)) 2541 goto out; 2542 } 2543 2544 r = 0; 2545 break; 2546 } 2547 #endif 2548 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 2549 case KVM_SET_GSI_ROUTING: { 2550 struct kvm_irq_routing routing; 2551 struct kvm_irq_routing __user *urouting; 2552 struct kvm_irq_routing_entry *entries; 2553 2554 r = -EFAULT; 2555 if (copy_from_user(&routing, argp, sizeof(routing))) 2556 goto out; 2557 r = -EINVAL; 2558 if (routing.nr >= KVM_MAX_IRQ_ROUTES) 2559 goto out; 2560 if (routing.flags) 2561 goto out; 2562 r = -ENOMEM; 2563 entries = vmalloc(routing.nr * sizeof(*entries)); 2564 if (!entries) 2565 goto out; 2566 r = -EFAULT; 2567 urouting = argp; 2568 if (copy_from_user(entries, urouting->entries, 2569 routing.nr * sizeof(*entries))) 2570 goto out_free_irq_routing; 2571 r = kvm_set_irq_routing(kvm, entries, routing.nr, 2572 routing.flags); 2573 out_free_irq_routing: 2574 vfree(entries); 2575 break; 2576 } 2577 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ 2578 case KVM_CREATE_DEVICE: { 2579 struct kvm_create_device cd; 2580 2581 r = -EFAULT; 2582 if (copy_from_user(&cd, argp, sizeof(cd))) 2583 goto out; 2584 2585 r = kvm_ioctl_create_device(kvm, &cd); 2586 if (r) 2587 goto out; 2588 2589 r = -EFAULT; 2590 if (copy_to_user(argp, &cd, sizeof(cd))) 2591 goto out; 2592 2593 r = 0; 2594 break; 2595 } 2596 case KVM_CHECK_EXTENSION: 2597 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); 2598 break; 2599 default: 2600 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 2601 } 2602 out: 2603 return r; 2604 } 2605 2606 #ifdef CONFIG_COMPAT 2607 struct compat_kvm_dirty_log { 2608 __u32 slot; 2609 __u32 padding1; 2610 union { 2611 compat_uptr_t dirty_bitmap; /* one bit per page */ 2612 __u64 padding2; 2613 }; 2614 }; 2615 2616 static long kvm_vm_compat_ioctl(struct file *filp, 2617 unsigned int ioctl, unsigned long arg) 2618 { 2619 struct kvm *kvm = filp->private_data; 2620 int r; 2621 2622 if (kvm->mm != current->mm) 2623 return -EIO; 2624 switch (ioctl) { 2625 case KVM_GET_DIRTY_LOG: { 2626 struct compat_kvm_dirty_log compat_log; 2627 struct kvm_dirty_log log; 2628 2629 r = -EFAULT; 2630 if (copy_from_user(&compat_log, (void __user *)arg, 2631 sizeof(compat_log))) 2632 goto out; 2633 log.slot = compat_log.slot; 2634 log.padding1 = compat_log.padding1; 2635 log.padding2 = compat_log.padding2; 2636 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 2637 2638 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 2639 break; 2640 } 2641 default: 2642 r = kvm_vm_ioctl(filp, ioctl, arg); 2643 } 2644 2645 out: 2646 return r; 2647 } 2648 #endif 2649 2650 static struct file_operations kvm_vm_fops = { 2651 .release = kvm_vm_release, 2652 .unlocked_ioctl = kvm_vm_ioctl, 2653 #ifdef CONFIG_COMPAT 2654 .compat_ioctl = kvm_vm_compat_ioctl, 2655 #endif 2656 .llseek = noop_llseek, 2657 }; 2658 2659 static int kvm_dev_ioctl_create_vm(unsigned long type) 2660 { 2661 int r; 2662 struct kvm *kvm; 2663 2664 kvm = kvm_create_vm(type); 2665 if (IS_ERR(kvm)) 2666 return PTR_ERR(kvm); 2667 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2668 r = kvm_coalesced_mmio_init(kvm); 2669 if (r < 0) { 2670 kvm_put_kvm(kvm); 2671 return r; 2672 } 2673 #endif 2674 r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR | O_CLOEXEC); 2675 if (r < 0) 2676 kvm_put_kvm(kvm); 2677 2678 return r; 2679 } 2680 2681 static long kvm_dev_ioctl(struct file *filp, 2682 unsigned int ioctl, unsigned long arg) 2683 { 2684 long r = -EINVAL; 2685 2686 switch (ioctl) { 2687 case KVM_GET_API_VERSION: 2688 if (arg) 2689 goto out; 2690 r = KVM_API_VERSION; 2691 break; 2692 case KVM_CREATE_VM: 2693 r = kvm_dev_ioctl_create_vm(arg); 2694 break; 2695 case KVM_CHECK_EXTENSION: 2696 r = kvm_vm_ioctl_check_extension_generic(NULL, arg); 2697 break; 2698 case KVM_GET_VCPU_MMAP_SIZE: 2699 if (arg) 2700 goto out; 2701 r = PAGE_SIZE; /* struct kvm_run */ 2702 #ifdef CONFIG_X86 2703 r += PAGE_SIZE; /* pio data page */ 2704 #endif 2705 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2706 r += PAGE_SIZE; /* coalesced mmio ring page */ 2707 #endif 2708 break; 2709 case KVM_TRACE_ENABLE: 2710 case KVM_TRACE_PAUSE: 2711 case KVM_TRACE_DISABLE: 2712 r = -EOPNOTSUPP; 2713 break; 2714 default: 2715 return kvm_arch_dev_ioctl(filp, ioctl, arg); 2716 } 2717 out: 2718 return r; 2719 } 2720 2721 static struct file_operations kvm_chardev_ops = { 2722 .unlocked_ioctl = kvm_dev_ioctl, 2723 .compat_ioctl = kvm_dev_ioctl, 2724 .llseek = noop_llseek, 2725 }; 2726 2727 static struct miscdevice kvm_dev = { 2728 KVM_MINOR, 2729 "kvm", 2730 &kvm_chardev_ops, 2731 }; 2732 2733 static void hardware_enable_nolock(void *junk) 2734 { 2735 int cpu = raw_smp_processor_id(); 2736 int r; 2737 2738 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 2739 return; 2740 2741 cpumask_set_cpu(cpu, cpus_hardware_enabled); 2742 2743 r = kvm_arch_hardware_enable(); 2744 2745 if (r) { 2746 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 2747 atomic_inc(&hardware_enable_failed); 2748 printk(KERN_INFO "kvm: enabling virtualization on " 2749 "CPU%d failed\n", cpu); 2750 } 2751 } 2752 2753 static void hardware_enable(void) 2754 { 2755 raw_spin_lock(&kvm_count_lock); 2756 if (kvm_usage_count) 2757 hardware_enable_nolock(NULL); 2758 raw_spin_unlock(&kvm_count_lock); 2759 } 2760 2761 static void hardware_disable_nolock(void *junk) 2762 { 2763 int cpu = raw_smp_processor_id(); 2764 2765 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 2766 return; 2767 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 2768 kvm_arch_hardware_disable(); 2769 } 2770 2771 static void hardware_disable(void) 2772 { 2773 raw_spin_lock(&kvm_count_lock); 2774 if (kvm_usage_count) 2775 hardware_disable_nolock(NULL); 2776 raw_spin_unlock(&kvm_count_lock); 2777 } 2778 2779 static void hardware_disable_all_nolock(void) 2780 { 2781 BUG_ON(!kvm_usage_count); 2782 2783 kvm_usage_count--; 2784 if (!kvm_usage_count) 2785 on_each_cpu(hardware_disable_nolock, NULL, 1); 2786 } 2787 2788 static void hardware_disable_all(void) 2789 { 2790 raw_spin_lock(&kvm_count_lock); 2791 hardware_disable_all_nolock(); 2792 raw_spin_unlock(&kvm_count_lock); 2793 } 2794 2795 static int hardware_enable_all(void) 2796 { 2797 int r = 0; 2798 2799 raw_spin_lock(&kvm_count_lock); 2800 2801 kvm_usage_count++; 2802 if (kvm_usage_count == 1) { 2803 atomic_set(&hardware_enable_failed, 0); 2804 on_each_cpu(hardware_enable_nolock, NULL, 1); 2805 2806 if (atomic_read(&hardware_enable_failed)) { 2807 hardware_disable_all_nolock(); 2808 r = -EBUSY; 2809 } 2810 } 2811 2812 raw_spin_unlock(&kvm_count_lock); 2813 2814 return r; 2815 } 2816 2817 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, 2818 void *v) 2819 { 2820 int cpu = (long)v; 2821 2822 val &= ~CPU_TASKS_FROZEN; 2823 switch (val) { 2824 case CPU_DYING: 2825 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", 2826 cpu); 2827 hardware_disable(); 2828 break; 2829 case CPU_STARTING: 2830 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", 2831 cpu); 2832 hardware_enable(); 2833 break; 2834 } 2835 return NOTIFY_OK; 2836 } 2837 2838 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 2839 void *v) 2840 { 2841 /* 2842 * Some (well, at least mine) BIOSes hang on reboot if 2843 * in vmx root mode. 2844 * 2845 * And Intel TXT required VMX off for all cpu when system shutdown. 2846 */ 2847 printk(KERN_INFO "kvm: exiting hardware virtualization\n"); 2848 kvm_rebooting = true; 2849 on_each_cpu(hardware_disable_nolock, NULL, 1); 2850 return NOTIFY_OK; 2851 } 2852 2853 static struct notifier_block kvm_reboot_notifier = { 2854 .notifier_call = kvm_reboot, 2855 .priority = 0, 2856 }; 2857 2858 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 2859 { 2860 int i; 2861 2862 for (i = 0; i < bus->dev_count; i++) { 2863 struct kvm_io_device *pos = bus->range[i].dev; 2864 2865 kvm_iodevice_destructor(pos); 2866 } 2867 kfree(bus); 2868 } 2869 2870 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, 2871 const struct kvm_io_range *r2) 2872 { 2873 if (r1->addr < r2->addr) 2874 return -1; 2875 if (r1->addr + r1->len > r2->addr + r2->len) 2876 return 1; 2877 return 0; 2878 } 2879 2880 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) 2881 { 2882 return kvm_io_bus_cmp(p1, p2); 2883 } 2884 2885 static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, 2886 gpa_t addr, int len) 2887 { 2888 bus->range[bus->dev_count++] = (struct kvm_io_range) { 2889 .addr = addr, 2890 .len = len, 2891 .dev = dev, 2892 }; 2893 2894 sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range), 2895 kvm_io_bus_sort_cmp, NULL); 2896 2897 return 0; 2898 } 2899 2900 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, 2901 gpa_t addr, int len) 2902 { 2903 struct kvm_io_range *range, key; 2904 int off; 2905 2906 key = (struct kvm_io_range) { 2907 .addr = addr, 2908 .len = len, 2909 }; 2910 2911 range = bsearch(&key, bus->range, bus->dev_count, 2912 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); 2913 if (range == NULL) 2914 return -ENOENT; 2915 2916 off = range - bus->range; 2917 2918 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) 2919 off--; 2920 2921 return off; 2922 } 2923 2924 static int __kvm_io_bus_write(struct kvm_io_bus *bus, 2925 struct kvm_io_range *range, const void *val) 2926 { 2927 int idx; 2928 2929 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 2930 if (idx < 0) 2931 return -EOPNOTSUPP; 2932 2933 while (idx < bus->dev_count && 2934 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 2935 if (!kvm_iodevice_write(bus->range[idx].dev, range->addr, 2936 range->len, val)) 2937 return idx; 2938 idx++; 2939 } 2940 2941 return -EOPNOTSUPP; 2942 } 2943 2944 /* kvm_io_bus_write - called under kvm->slots_lock */ 2945 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 2946 int len, const void *val) 2947 { 2948 struct kvm_io_bus *bus; 2949 struct kvm_io_range range; 2950 int r; 2951 2952 range = (struct kvm_io_range) { 2953 .addr = addr, 2954 .len = len, 2955 }; 2956 2957 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 2958 r = __kvm_io_bus_write(bus, &range, val); 2959 return r < 0 ? r : 0; 2960 } 2961 2962 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ 2963 int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 2964 int len, const void *val, long cookie) 2965 { 2966 struct kvm_io_bus *bus; 2967 struct kvm_io_range range; 2968 2969 range = (struct kvm_io_range) { 2970 .addr = addr, 2971 .len = len, 2972 }; 2973 2974 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 2975 2976 /* First try the device referenced by cookie. */ 2977 if ((cookie >= 0) && (cookie < bus->dev_count) && 2978 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) 2979 if (!kvm_iodevice_write(bus->range[cookie].dev, addr, len, 2980 val)) 2981 return cookie; 2982 2983 /* 2984 * cookie contained garbage; fall back to search and return the 2985 * correct cookie value. 2986 */ 2987 return __kvm_io_bus_write(bus, &range, val); 2988 } 2989 2990 static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range, 2991 void *val) 2992 { 2993 int idx; 2994 2995 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 2996 if (idx < 0) 2997 return -EOPNOTSUPP; 2998 2999 while (idx < bus->dev_count && 3000 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 3001 if (!kvm_iodevice_read(bus->range[idx].dev, range->addr, 3002 range->len, val)) 3003 return idx; 3004 idx++; 3005 } 3006 3007 return -EOPNOTSUPP; 3008 } 3009 EXPORT_SYMBOL_GPL(kvm_io_bus_write); 3010 3011 /* kvm_io_bus_read - called under kvm->slots_lock */ 3012 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 3013 int len, void *val) 3014 { 3015 struct kvm_io_bus *bus; 3016 struct kvm_io_range range; 3017 int r; 3018 3019 range = (struct kvm_io_range) { 3020 .addr = addr, 3021 .len = len, 3022 }; 3023 3024 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 3025 r = __kvm_io_bus_read(bus, &range, val); 3026 return r < 0 ? r : 0; 3027 } 3028 3029 3030 /* Caller must hold slots_lock. */ 3031 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 3032 int len, struct kvm_io_device *dev) 3033 { 3034 struct kvm_io_bus *new_bus, *bus; 3035 3036 bus = kvm->buses[bus_idx]; 3037 /* exclude ioeventfd which is limited by maximum fd */ 3038 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 3039 return -ENOSPC; 3040 3041 new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) * 3042 sizeof(struct kvm_io_range)), GFP_KERNEL); 3043 if (!new_bus) 3044 return -ENOMEM; 3045 memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count * 3046 sizeof(struct kvm_io_range))); 3047 kvm_io_bus_insert_dev(new_bus, dev, addr, len); 3048 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3049 synchronize_srcu_expedited(&kvm->srcu); 3050 kfree(bus); 3051 3052 return 0; 3053 } 3054 3055 /* Caller must hold slots_lock. */ 3056 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 3057 struct kvm_io_device *dev) 3058 { 3059 int i, r; 3060 struct kvm_io_bus *new_bus, *bus; 3061 3062 bus = kvm->buses[bus_idx]; 3063 r = -ENOENT; 3064 for (i = 0; i < bus->dev_count; i++) 3065 if (bus->range[i].dev == dev) { 3066 r = 0; 3067 break; 3068 } 3069 3070 if (r) 3071 return r; 3072 3073 new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) * 3074 sizeof(struct kvm_io_range)), GFP_KERNEL); 3075 if (!new_bus) 3076 return -ENOMEM; 3077 3078 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 3079 new_bus->dev_count--; 3080 memcpy(new_bus->range + i, bus->range + i + 1, 3081 (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); 3082 3083 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3084 synchronize_srcu_expedited(&kvm->srcu); 3085 kfree(bus); 3086 return r; 3087 } 3088 3089 static struct notifier_block kvm_cpu_notifier = { 3090 .notifier_call = kvm_cpu_hotplug, 3091 }; 3092 3093 static int vm_stat_get(void *_offset, u64 *val) 3094 { 3095 unsigned offset = (long)_offset; 3096 struct kvm *kvm; 3097 3098 *val = 0; 3099 spin_lock(&kvm_lock); 3100 list_for_each_entry(kvm, &vm_list, vm_list) 3101 *val += *(u32 *)((void *)kvm + offset); 3102 spin_unlock(&kvm_lock); 3103 return 0; 3104 } 3105 3106 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n"); 3107 3108 static int vcpu_stat_get(void *_offset, u64 *val) 3109 { 3110 unsigned offset = (long)_offset; 3111 struct kvm *kvm; 3112 struct kvm_vcpu *vcpu; 3113 int i; 3114 3115 *val = 0; 3116 spin_lock(&kvm_lock); 3117 list_for_each_entry(kvm, &vm_list, vm_list) 3118 kvm_for_each_vcpu(i, vcpu, kvm) 3119 *val += *(u32 *)((void *)vcpu + offset); 3120 3121 spin_unlock(&kvm_lock); 3122 return 0; 3123 } 3124 3125 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); 3126 3127 static const struct file_operations *stat_fops[] = { 3128 [KVM_STAT_VCPU] = &vcpu_stat_fops, 3129 [KVM_STAT_VM] = &vm_stat_fops, 3130 }; 3131 3132 static int kvm_init_debug(void) 3133 { 3134 int r = -EEXIST; 3135 struct kvm_stats_debugfs_item *p; 3136 3137 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 3138 if (kvm_debugfs_dir == NULL) 3139 goto out; 3140 3141 for (p = debugfs_entries; p->name; ++p) { 3142 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir, 3143 (void *)(long)p->offset, 3144 stat_fops[p->kind]); 3145 if (p->dentry == NULL) 3146 goto out_dir; 3147 } 3148 3149 return 0; 3150 3151 out_dir: 3152 debugfs_remove_recursive(kvm_debugfs_dir); 3153 out: 3154 return r; 3155 } 3156 3157 static void kvm_exit_debug(void) 3158 { 3159 struct kvm_stats_debugfs_item *p; 3160 3161 for (p = debugfs_entries; p->name; ++p) 3162 debugfs_remove(p->dentry); 3163 debugfs_remove(kvm_debugfs_dir); 3164 } 3165 3166 static int kvm_suspend(void) 3167 { 3168 if (kvm_usage_count) 3169 hardware_disable_nolock(NULL); 3170 return 0; 3171 } 3172 3173 static void kvm_resume(void) 3174 { 3175 if (kvm_usage_count) { 3176 WARN_ON(raw_spin_is_locked(&kvm_count_lock)); 3177 hardware_enable_nolock(NULL); 3178 } 3179 } 3180 3181 static struct syscore_ops kvm_syscore_ops = { 3182 .suspend = kvm_suspend, 3183 .resume = kvm_resume, 3184 }; 3185 3186 static inline 3187 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 3188 { 3189 return container_of(pn, struct kvm_vcpu, preempt_notifier); 3190 } 3191 3192 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 3193 { 3194 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 3195 if (vcpu->preempted) 3196 vcpu->preempted = false; 3197 3198 kvm_arch_sched_in(vcpu, cpu); 3199 3200 kvm_arch_vcpu_load(vcpu, cpu); 3201 } 3202 3203 static void kvm_sched_out(struct preempt_notifier *pn, 3204 struct task_struct *next) 3205 { 3206 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 3207 3208 if (current->state == TASK_RUNNING) 3209 vcpu->preempted = true; 3210 kvm_arch_vcpu_put(vcpu); 3211 } 3212 3213 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 3214 struct module *module) 3215 { 3216 int r; 3217 int cpu; 3218 3219 r = kvm_arch_init(opaque); 3220 if (r) 3221 goto out_fail; 3222 3223 /* 3224 * kvm_arch_init makes sure there's at most one caller 3225 * for architectures that support multiple implementations, 3226 * like intel and amd on x86. 3227 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating 3228 * conflicts in case kvm is already setup for another implementation. 3229 */ 3230 r = kvm_irqfd_init(); 3231 if (r) 3232 goto out_irqfd; 3233 3234 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 3235 r = -ENOMEM; 3236 goto out_free_0; 3237 } 3238 3239 r = kvm_arch_hardware_setup(); 3240 if (r < 0) 3241 goto out_free_0a; 3242 3243 for_each_online_cpu(cpu) { 3244 smp_call_function_single(cpu, 3245 kvm_arch_check_processor_compat, 3246 &r, 1); 3247 if (r < 0) 3248 goto out_free_1; 3249 } 3250 3251 r = register_cpu_notifier(&kvm_cpu_notifier); 3252 if (r) 3253 goto out_free_2; 3254 register_reboot_notifier(&kvm_reboot_notifier); 3255 3256 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 3257 if (!vcpu_align) 3258 vcpu_align = __alignof__(struct kvm_vcpu); 3259 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, 3260 0, NULL); 3261 if (!kvm_vcpu_cache) { 3262 r = -ENOMEM; 3263 goto out_free_3; 3264 } 3265 3266 r = kvm_async_pf_init(); 3267 if (r) 3268 goto out_free; 3269 3270 kvm_chardev_ops.owner = module; 3271 kvm_vm_fops.owner = module; 3272 kvm_vcpu_fops.owner = module; 3273 3274 r = misc_register(&kvm_dev); 3275 if (r) { 3276 printk(KERN_ERR "kvm: misc device register failed\n"); 3277 goto out_unreg; 3278 } 3279 3280 register_syscore_ops(&kvm_syscore_ops); 3281 3282 kvm_preempt_ops.sched_in = kvm_sched_in; 3283 kvm_preempt_ops.sched_out = kvm_sched_out; 3284 3285 r = kvm_init_debug(); 3286 if (r) { 3287 printk(KERN_ERR "kvm: create debugfs files failed\n"); 3288 goto out_undebugfs; 3289 } 3290 3291 r = kvm_vfio_ops_init(); 3292 WARN_ON(r); 3293 3294 return 0; 3295 3296 out_undebugfs: 3297 unregister_syscore_ops(&kvm_syscore_ops); 3298 misc_deregister(&kvm_dev); 3299 out_unreg: 3300 kvm_async_pf_deinit(); 3301 out_free: 3302 kmem_cache_destroy(kvm_vcpu_cache); 3303 out_free_3: 3304 unregister_reboot_notifier(&kvm_reboot_notifier); 3305 unregister_cpu_notifier(&kvm_cpu_notifier); 3306 out_free_2: 3307 out_free_1: 3308 kvm_arch_hardware_unsetup(); 3309 out_free_0a: 3310 free_cpumask_var(cpus_hardware_enabled); 3311 out_free_0: 3312 kvm_irqfd_exit(); 3313 out_irqfd: 3314 kvm_arch_exit(); 3315 out_fail: 3316 return r; 3317 } 3318 EXPORT_SYMBOL_GPL(kvm_init); 3319 3320 void kvm_exit(void) 3321 { 3322 kvm_exit_debug(); 3323 misc_deregister(&kvm_dev); 3324 kmem_cache_destroy(kvm_vcpu_cache); 3325 kvm_async_pf_deinit(); 3326 unregister_syscore_ops(&kvm_syscore_ops); 3327 unregister_reboot_notifier(&kvm_reboot_notifier); 3328 unregister_cpu_notifier(&kvm_cpu_notifier); 3329 on_each_cpu(hardware_disable_nolock, NULL, 1); 3330 kvm_arch_hardware_unsetup(); 3331 kvm_arch_exit(); 3332 kvm_irqfd_exit(); 3333 free_cpumask_var(cpus_hardware_enabled); 3334 kvm_vfio_ops_exit(); 3335 } 3336 EXPORT_SYMBOL_GPL(kvm_exit); 3337