1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This module enables machines with Intel VT-x extensions to run virtual 5 * machines without emulation or binary translation. 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Avi Kivity <avi@qumranet.com> 12 * Yaniv Kamay <yaniv@qumranet.com> 13 * 14 * This work is licensed under the terms of the GNU GPL, version 2. See 15 * the COPYING file in the top-level directory. 16 * 17 */ 18 19 #include "iodev.h" 20 21 #include <linux/kvm_host.h> 22 #include <linux/kvm.h> 23 #include <linux/module.h> 24 #include <linux/errno.h> 25 #include <linux/percpu.h> 26 #include <linux/mm.h> 27 #include <linux/miscdevice.h> 28 #include <linux/vmalloc.h> 29 #include <linux/reboot.h> 30 #include <linux/debugfs.h> 31 #include <linux/highmem.h> 32 #include <linux/file.h> 33 #include <linux/syscore_ops.h> 34 #include <linux/cpu.h> 35 #include <linux/sched.h> 36 #include <linux/cpumask.h> 37 #include <linux/smp.h> 38 #include <linux/anon_inodes.h> 39 #include <linux/profile.h> 40 #include <linux/kvm_para.h> 41 #include <linux/pagemap.h> 42 #include <linux/mman.h> 43 #include <linux/swap.h> 44 #include <linux/bitops.h> 45 #include <linux/spinlock.h> 46 #include <linux/compat.h> 47 #include <linux/srcu.h> 48 #include <linux/hugetlb.h> 49 #include <linux/slab.h> 50 #include <linux/sort.h> 51 #include <linux/bsearch.h> 52 53 #include <asm/processor.h> 54 #include <asm/io.h> 55 #include <asm/ioctl.h> 56 #include <asm/uaccess.h> 57 #include <asm/pgtable.h> 58 59 #include "coalesced_mmio.h" 60 #include "async_pf.h" 61 #include "vfio.h" 62 63 #define CREATE_TRACE_POINTS 64 #include <trace/events/kvm.h> 65 66 MODULE_AUTHOR("Qumranet"); 67 MODULE_LICENSE("GPL"); 68 69 /* 70 * Ordering of locks: 71 * 72 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 73 */ 74 75 DEFINE_SPINLOCK(kvm_lock); 76 static DEFINE_RAW_SPINLOCK(kvm_count_lock); 77 LIST_HEAD(vm_list); 78 79 static cpumask_var_t cpus_hardware_enabled; 80 static int kvm_usage_count = 0; 81 static atomic_t hardware_enable_failed; 82 83 struct kmem_cache *kvm_vcpu_cache; 84 EXPORT_SYMBOL_GPL(kvm_vcpu_cache); 85 86 static __read_mostly struct preempt_ops kvm_preempt_ops; 87 88 struct dentry *kvm_debugfs_dir; 89 90 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 91 unsigned long arg); 92 #ifdef CONFIG_COMPAT 93 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, 94 unsigned long arg); 95 #endif 96 static int hardware_enable_all(void); 97 static void hardware_disable_all(void); 98 99 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 100 101 static void kvm_release_pfn_dirty(pfn_t pfn); 102 static void mark_page_dirty_in_slot(struct kvm *kvm, 103 struct kvm_memory_slot *memslot, gfn_t gfn); 104 105 __visible bool kvm_rebooting; 106 EXPORT_SYMBOL_GPL(kvm_rebooting); 107 108 static bool largepages_enabled = true; 109 110 bool kvm_is_reserved_pfn(pfn_t pfn) 111 { 112 if (pfn_valid(pfn)) 113 return PageReserved(pfn_to_page(pfn)); 114 115 return true; 116 } 117 118 /* 119 * Switches to specified vcpu, until a matching vcpu_put() 120 */ 121 int vcpu_load(struct kvm_vcpu *vcpu) 122 { 123 int cpu; 124 125 if (mutex_lock_killable(&vcpu->mutex)) 126 return -EINTR; 127 cpu = get_cpu(); 128 preempt_notifier_register(&vcpu->preempt_notifier); 129 kvm_arch_vcpu_load(vcpu, cpu); 130 put_cpu(); 131 return 0; 132 } 133 134 void vcpu_put(struct kvm_vcpu *vcpu) 135 { 136 preempt_disable(); 137 kvm_arch_vcpu_put(vcpu); 138 preempt_notifier_unregister(&vcpu->preempt_notifier); 139 preempt_enable(); 140 mutex_unlock(&vcpu->mutex); 141 } 142 143 static void ack_flush(void *_completed) 144 { 145 } 146 147 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) 148 { 149 int i, cpu, me; 150 cpumask_var_t cpus; 151 bool called = true; 152 struct kvm_vcpu *vcpu; 153 154 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 155 156 me = get_cpu(); 157 kvm_for_each_vcpu(i, vcpu, kvm) { 158 kvm_make_request(req, vcpu); 159 cpu = vcpu->cpu; 160 161 /* Set ->requests bit before we read ->mode */ 162 smp_mb(); 163 164 if (cpus != NULL && cpu != -1 && cpu != me && 165 kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE) 166 cpumask_set_cpu(cpu, cpus); 167 } 168 if (unlikely(cpus == NULL)) 169 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); 170 else if (!cpumask_empty(cpus)) 171 smp_call_function_many(cpus, ack_flush, NULL, 1); 172 else 173 called = false; 174 put_cpu(); 175 free_cpumask_var(cpus); 176 return called; 177 } 178 179 void kvm_flush_remote_tlbs(struct kvm *kvm) 180 { 181 long dirty_count = kvm->tlbs_dirty; 182 183 smp_mb(); 184 if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 185 ++kvm->stat.remote_tlb_flush; 186 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); 187 } 188 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); 189 190 void kvm_reload_remote_mmus(struct kvm *kvm) 191 { 192 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); 193 } 194 195 void kvm_make_mclock_inprogress_request(struct kvm *kvm) 196 { 197 kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); 198 } 199 200 void kvm_make_scan_ioapic_request(struct kvm *kvm) 201 { 202 kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC); 203 } 204 205 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 206 { 207 struct page *page; 208 int r; 209 210 mutex_init(&vcpu->mutex); 211 vcpu->cpu = -1; 212 vcpu->kvm = kvm; 213 vcpu->vcpu_id = id; 214 vcpu->pid = NULL; 215 init_waitqueue_head(&vcpu->wq); 216 kvm_async_pf_vcpu_init(vcpu); 217 218 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 219 if (!page) { 220 r = -ENOMEM; 221 goto fail; 222 } 223 vcpu->run = page_address(page); 224 225 kvm_vcpu_set_in_spin_loop(vcpu, false); 226 kvm_vcpu_set_dy_eligible(vcpu, false); 227 vcpu->preempted = false; 228 229 r = kvm_arch_vcpu_init(vcpu); 230 if (r < 0) 231 goto fail_free_run; 232 return 0; 233 234 fail_free_run: 235 free_page((unsigned long)vcpu->run); 236 fail: 237 return r; 238 } 239 EXPORT_SYMBOL_GPL(kvm_vcpu_init); 240 241 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) 242 { 243 put_pid(vcpu->pid); 244 kvm_arch_vcpu_uninit(vcpu); 245 free_page((unsigned long)vcpu->run); 246 } 247 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); 248 249 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 250 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 251 { 252 return container_of(mn, struct kvm, mmu_notifier); 253 } 254 255 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, 256 struct mm_struct *mm, 257 unsigned long address) 258 { 259 struct kvm *kvm = mmu_notifier_to_kvm(mn); 260 int need_tlb_flush, idx; 261 262 /* 263 * When ->invalidate_page runs, the linux pte has been zapped 264 * already but the page is still allocated until 265 * ->invalidate_page returns. So if we increase the sequence 266 * here the kvm page fault will notice if the spte can't be 267 * established because the page is going to be freed. If 268 * instead the kvm page fault establishes the spte before 269 * ->invalidate_page runs, kvm_unmap_hva will release it 270 * before returning. 271 * 272 * The sequence increase only need to be seen at spin_unlock 273 * time, and not at spin_lock time. 274 * 275 * Increasing the sequence after the spin_unlock would be 276 * unsafe because the kvm page fault could then establish the 277 * pte after kvm_unmap_hva returned, without noticing the page 278 * is going to be freed. 279 */ 280 idx = srcu_read_lock(&kvm->srcu); 281 spin_lock(&kvm->mmu_lock); 282 283 kvm->mmu_notifier_seq++; 284 need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty; 285 /* we've to flush the tlb before the pages can be freed */ 286 if (need_tlb_flush) 287 kvm_flush_remote_tlbs(kvm); 288 289 spin_unlock(&kvm->mmu_lock); 290 291 kvm_arch_mmu_notifier_invalidate_page(kvm, address); 292 293 srcu_read_unlock(&kvm->srcu, idx); 294 } 295 296 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 297 struct mm_struct *mm, 298 unsigned long address, 299 pte_t pte) 300 { 301 struct kvm *kvm = mmu_notifier_to_kvm(mn); 302 int idx; 303 304 idx = srcu_read_lock(&kvm->srcu); 305 spin_lock(&kvm->mmu_lock); 306 kvm->mmu_notifier_seq++; 307 kvm_set_spte_hva(kvm, address, pte); 308 spin_unlock(&kvm->mmu_lock); 309 srcu_read_unlock(&kvm->srcu, idx); 310 } 311 312 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 313 struct mm_struct *mm, 314 unsigned long start, 315 unsigned long end) 316 { 317 struct kvm *kvm = mmu_notifier_to_kvm(mn); 318 int need_tlb_flush = 0, idx; 319 320 idx = srcu_read_lock(&kvm->srcu); 321 spin_lock(&kvm->mmu_lock); 322 /* 323 * The count increase must become visible at unlock time as no 324 * spte can be established without taking the mmu_lock and 325 * count is also read inside the mmu_lock critical section. 326 */ 327 kvm->mmu_notifier_count++; 328 need_tlb_flush = kvm_unmap_hva_range(kvm, start, end); 329 need_tlb_flush |= kvm->tlbs_dirty; 330 /* we've to flush the tlb before the pages can be freed */ 331 if (need_tlb_flush) 332 kvm_flush_remote_tlbs(kvm); 333 334 spin_unlock(&kvm->mmu_lock); 335 srcu_read_unlock(&kvm->srcu, idx); 336 } 337 338 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 339 struct mm_struct *mm, 340 unsigned long start, 341 unsigned long end) 342 { 343 struct kvm *kvm = mmu_notifier_to_kvm(mn); 344 345 spin_lock(&kvm->mmu_lock); 346 /* 347 * This sequence increase will notify the kvm page fault that 348 * the page that is going to be mapped in the spte could have 349 * been freed. 350 */ 351 kvm->mmu_notifier_seq++; 352 smp_wmb(); 353 /* 354 * The above sequence increase must be visible before the 355 * below count decrease, which is ensured by the smp_wmb above 356 * in conjunction with the smp_rmb in mmu_notifier_retry(). 357 */ 358 kvm->mmu_notifier_count--; 359 spin_unlock(&kvm->mmu_lock); 360 361 BUG_ON(kvm->mmu_notifier_count < 0); 362 } 363 364 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 365 struct mm_struct *mm, 366 unsigned long start, 367 unsigned long end) 368 { 369 struct kvm *kvm = mmu_notifier_to_kvm(mn); 370 int young, idx; 371 372 idx = srcu_read_lock(&kvm->srcu); 373 spin_lock(&kvm->mmu_lock); 374 375 young = kvm_age_hva(kvm, start, end); 376 if (young) 377 kvm_flush_remote_tlbs(kvm); 378 379 spin_unlock(&kvm->mmu_lock); 380 srcu_read_unlock(&kvm->srcu, idx); 381 382 return young; 383 } 384 385 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, 386 struct mm_struct *mm, 387 unsigned long address) 388 { 389 struct kvm *kvm = mmu_notifier_to_kvm(mn); 390 int young, idx; 391 392 idx = srcu_read_lock(&kvm->srcu); 393 spin_lock(&kvm->mmu_lock); 394 young = kvm_test_age_hva(kvm, address); 395 spin_unlock(&kvm->mmu_lock); 396 srcu_read_unlock(&kvm->srcu, idx); 397 398 return young; 399 } 400 401 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 402 struct mm_struct *mm) 403 { 404 struct kvm *kvm = mmu_notifier_to_kvm(mn); 405 int idx; 406 407 idx = srcu_read_lock(&kvm->srcu); 408 kvm_arch_flush_shadow_all(kvm); 409 srcu_read_unlock(&kvm->srcu, idx); 410 } 411 412 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 413 .invalidate_page = kvm_mmu_notifier_invalidate_page, 414 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 415 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 416 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 417 .test_young = kvm_mmu_notifier_test_young, 418 .change_pte = kvm_mmu_notifier_change_pte, 419 .release = kvm_mmu_notifier_release, 420 }; 421 422 static int kvm_init_mmu_notifier(struct kvm *kvm) 423 { 424 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 425 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 426 } 427 428 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 429 430 static int kvm_init_mmu_notifier(struct kvm *kvm) 431 { 432 return 0; 433 } 434 435 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 436 437 static void kvm_init_memslots_id(struct kvm *kvm) 438 { 439 int i; 440 struct kvm_memslots *slots = kvm->memslots; 441 442 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) 443 slots->id_to_index[i] = slots->memslots[i].id = i; 444 } 445 446 static struct kvm *kvm_create_vm(unsigned long type) 447 { 448 int r, i; 449 struct kvm *kvm = kvm_arch_alloc_vm(); 450 451 if (!kvm) 452 return ERR_PTR(-ENOMEM); 453 454 r = kvm_arch_init_vm(kvm, type); 455 if (r) 456 goto out_err_no_disable; 457 458 r = hardware_enable_all(); 459 if (r) 460 goto out_err_no_disable; 461 462 #ifdef CONFIG_HAVE_KVM_IRQFD 463 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 464 #endif 465 466 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 467 468 r = -ENOMEM; 469 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 470 if (!kvm->memslots) 471 goto out_err_no_srcu; 472 473 /* 474 * Init kvm generation close to the maximum to easily test the 475 * code of handling generation number wrap-around. 476 */ 477 kvm->memslots->generation = -150; 478 479 kvm_init_memslots_id(kvm); 480 if (init_srcu_struct(&kvm->srcu)) 481 goto out_err_no_srcu; 482 if (init_srcu_struct(&kvm->irq_srcu)) 483 goto out_err_no_irq_srcu; 484 for (i = 0; i < KVM_NR_BUSES; i++) { 485 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), 486 GFP_KERNEL); 487 if (!kvm->buses[i]) 488 goto out_err; 489 } 490 491 spin_lock_init(&kvm->mmu_lock); 492 kvm->mm = current->mm; 493 atomic_inc(&kvm->mm->mm_count); 494 kvm_eventfd_init(kvm); 495 mutex_init(&kvm->lock); 496 mutex_init(&kvm->irq_lock); 497 mutex_init(&kvm->slots_lock); 498 atomic_set(&kvm->users_count, 1); 499 INIT_LIST_HEAD(&kvm->devices); 500 501 r = kvm_init_mmu_notifier(kvm); 502 if (r) 503 goto out_err; 504 505 spin_lock(&kvm_lock); 506 list_add(&kvm->vm_list, &vm_list); 507 spin_unlock(&kvm_lock); 508 509 return kvm; 510 511 out_err: 512 cleanup_srcu_struct(&kvm->irq_srcu); 513 out_err_no_irq_srcu: 514 cleanup_srcu_struct(&kvm->srcu); 515 out_err_no_srcu: 516 hardware_disable_all(); 517 out_err_no_disable: 518 for (i = 0; i < KVM_NR_BUSES; i++) 519 kfree(kvm->buses[i]); 520 kfree(kvm->memslots); 521 kvm_arch_free_vm(kvm); 522 return ERR_PTR(r); 523 } 524 525 /* 526 * Avoid using vmalloc for a small buffer. 527 * Should not be used when the size is statically known. 528 */ 529 void *kvm_kvzalloc(unsigned long size) 530 { 531 if (size > PAGE_SIZE) 532 return vzalloc(size); 533 else 534 return kzalloc(size, GFP_KERNEL); 535 } 536 537 void kvm_kvfree(const void *addr) 538 { 539 if (is_vmalloc_addr(addr)) 540 vfree(addr); 541 else 542 kfree(addr); 543 } 544 545 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) 546 { 547 if (!memslot->dirty_bitmap) 548 return; 549 550 kvm_kvfree(memslot->dirty_bitmap); 551 memslot->dirty_bitmap = NULL; 552 } 553 554 /* 555 * Free any memory in @free but not in @dont. 556 */ 557 static void kvm_free_physmem_slot(struct kvm *kvm, struct kvm_memory_slot *free, 558 struct kvm_memory_slot *dont) 559 { 560 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 561 kvm_destroy_dirty_bitmap(free); 562 563 kvm_arch_free_memslot(kvm, free, dont); 564 565 free->npages = 0; 566 } 567 568 static void kvm_free_physmem(struct kvm *kvm) 569 { 570 struct kvm_memslots *slots = kvm->memslots; 571 struct kvm_memory_slot *memslot; 572 573 kvm_for_each_memslot(memslot, slots) 574 kvm_free_physmem_slot(kvm, memslot, NULL); 575 576 kfree(kvm->memslots); 577 } 578 579 static void kvm_destroy_devices(struct kvm *kvm) 580 { 581 struct list_head *node, *tmp; 582 583 list_for_each_safe(node, tmp, &kvm->devices) { 584 struct kvm_device *dev = 585 list_entry(node, struct kvm_device, vm_node); 586 587 list_del(node); 588 dev->ops->destroy(dev); 589 } 590 } 591 592 static void kvm_destroy_vm(struct kvm *kvm) 593 { 594 int i; 595 struct mm_struct *mm = kvm->mm; 596 597 kvm_arch_sync_events(kvm); 598 spin_lock(&kvm_lock); 599 list_del(&kvm->vm_list); 600 spin_unlock(&kvm_lock); 601 kvm_free_irq_routing(kvm); 602 for (i = 0; i < KVM_NR_BUSES; i++) 603 kvm_io_bus_destroy(kvm->buses[i]); 604 kvm_coalesced_mmio_free(kvm); 605 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 606 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 607 #else 608 kvm_arch_flush_shadow_all(kvm); 609 #endif 610 kvm_arch_destroy_vm(kvm); 611 kvm_destroy_devices(kvm); 612 kvm_free_physmem(kvm); 613 cleanup_srcu_struct(&kvm->irq_srcu); 614 cleanup_srcu_struct(&kvm->srcu); 615 kvm_arch_free_vm(kvm); 616 hardware_disable_all(); 617 mmdrop(mm); 618 } 619 620 void kvm_get_kvm(struct kvm *kvm) 621 { 622 atomic_inc(&kvm->users_count); 623 } 624 EXPORT_SYMBOL_GPL(kvm_get_kvm); 625 626 void kvm_put_kvm(struct kvm *kvm) 627 { 628 if (atomic_dec_and_test(&kvm->users_count)) 629 kvm_destroy_vm(kvm); 630 } 631 EXPORT_SYMBOL_GPL(kvm_put_kvm); 632 633 634 static int kvm_vm_release(struct inode *inode, struct file *filp) 635 { 636 struct kvm *kvm = filp->private_data; 637 638 kvm_irqfd_release(kvm); 639 640 kvm_put_kvm(kvm); 641 return 0; 642 } 643 644 /* 645 * Allocation size is twice as large as the actual dirty bitmap size. 646 * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed. 647 */ 648 static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) 649 { 650 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); 651 652 memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes); 653 if (!memslot->dirty_bitmap) 654 return -ENOMEM; 655 656 return 0; 657 } 658 659 /* 660 * Insert memslot and re-sort memslots based on their GFN, 661 * so binary search could be used to lookup GFN. 662 * Sorting algorithm takes advantage of having initially 663 * sorted array and known changed memslot position. 664 */ 665 static void update_memslots(struct kvm_memslots *slots, 666 struct kvm_memory_slot *new) 667 { 668 int id = new->id; 669 int i = slots->id_to_index[id]; 670 struct kvm_memory_slot *mslots = slots->memslots; 671 672 WARN_ON(mslots[i].id != id); 673 if (!new->npages) { 674 new->base_gfn = 0; 675 if (mslots[i].npages) 676 slots->used_slots--; 677 } else { 678 if (!mslots[i].npages) 679 slots->used_slots++; 680 } 681 682 while (i < KVM_MEM_SLOTS_NUM - 1 && 683 new->base_gfn <= mslots[i + 1].base_gfn) { 684 if (!mslots[i + 1].npages) 685 break; 686 mslots[i] = mslots[i + 1]; 687 slots->id_to_index[mslots[i].id] = i; 688 i++; 689 } 690 while (i > 0 && 691 new->base_gfn > mslots[i - 1].base_gfn) { 692 mslots[i] = mslots[i - 1]; 693 slots->id_to_index[mslots[i].id] = i; 694 i--; 695 } 696 697 mslots[i] = *new; 698 slots->id_to_index[mslots[i].id] = i; 699 } 700 701 static int check_memory_region_flags(struct kvm_userspace_memory_region *mem) 702 { 703 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; 704 705 #ifdef __KVM_HAVE_READONLY_MEM 706 valid_flags |= KVM_MEM_READONLY; 707 #endif 708 709 if (mem->flags & ~valid_flags) 710 return -EINVAL; 711 712 return 0; 713 } 714 715 static struct kvm_memslots *install_new_memslots(struct kvm *kvm, 716 struct kvm_memslots *slots) 717 { 718 struct kvm_memslots *old_memslots = kvm->memslots; 719 720 /* 721 * Set the low bit in the generation, which disables SPTE caching 722 * until the end of synchronize_srcu_expedited. 723 */ 724 WARN_ON(old_memslots->generation & 1); 725 slots->generation = old_memslots->generation + 1; 726 727 rcu_assign_pointer(kvm->memslots, slots); 728 synchronize_srcu_expedited(&kvm->srcu); 729 730 /* 731 * Increment the new memslot generation a second time. This prevents 732 * vm exits that race with memslot updates from caching a memslot 733 * generation that will (potentially) be valid forever. 734 */ 735 slots->generation++; 736 737 kvm_arch_memslots_updated(kvm); 738 739 return old_memslots; 740 } 741 742 /* 743 * Allocate some memory and give it an address in the guest physical address 744 * space. 745 * 746 * Discontiguous memory is allowed, mostly for framebuffers. 747 * 748 * Must be called holding kvm->slots_lock for write. 749 */ 750 int __kvm_set_memory_region(struct kvm *kvm, 751 struct kvm_userspace_memory_region *mem) 752 { 753 int r; 754 gfn_t base_gfn; 755 unsigned long npages; 756 struct kvm_memory_slot *slot; 757 struct kvm_memory_slot old, new; 758 struct kvm_memslots *slots = NULL, *old_memslots; 759 enum kvm_mr_change change; 760 761 r = check_memory_region_flags(mem); 762 if (r) 763 goto out; 764 765 r = -EINVAL; 766 /* General sanity checks */ 767 if (mem->memory_size & (PAGE_SIZE - 1)) 768 goto out; 769 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 770 goto out; 771 /* We can read the guest memory with __xxx_user() later on. */ 772 if ((mem->slot < KVM_USER_MEM_SLOTS) && 773 ((mem->userspace_addr & (PAGE_SIZE - 1)) || 774 !access_ok(VERIFY_WRITE, 775 (void __user *)(unsigned long)mem->userspace_addr, 776 mem->memory_size))) 777 goto out; 778 if (mem->slot >= KVM_MEM_SLOTS_NUM) 779 goto out; 780 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 781 goto out; 782 783 slot = id_to_memslot(kvm->memslots, mem->slot); 784 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 785 npages = mem->memory_size >> PAGE_SHIFT; 786 787 if (npages > KVM_MEM_MAX_NR_PAGES) 788 goto out; 789 790 if (!npages) 791 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; 792 793 new = old = *slot; 794 795 new.id = mem->slot; 796 new.base_gfn = base_gfn; 797 new.npages = npages; 798 new.flags = mem->flags; 799 800 if (npages) { 801 if (!old.npages) 802 change = KVM_MR_CREATE; 803 else { /* Modify an existing slot. */ 804 if ((mem->userspace_addr != old.userspace_addr) || 805 (npages != old.npages) || 806 ((new.flags ^ old.flags) & KVM_MEM_READONLY)) 807 goto out; 808 809 if (base_gfn != old.base_gfn) 810 change = KVM_MR_MOVE; 811 else if (new.flags != old.flags) 812 change = KVM_MR_FLAGS_ONLY; 813 else { /* Nothing to change. */ 814 r = 0; 815 goto out; 816 } 817 } 818 } else if (old.npages) { 819 change = KVM_MR_DELETE; 820 } else /* Modify a non-existent slot: disallowed. */ 821 goto out; 822 823 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 824 /* Check for overlaps */ 825 r = -EEXIST; 826 kvm_for_each_memslot(slot, kvm->memslots) { 827 if ((slot->id >= KVM_USER_MEM_SLOTS) || 828 (slot->id == mem->slot)) 829 continue; 830 if (!((base_gfn + npages <= slot->base_gfn) || 831 (base_gfn >= slot->base_gfn + slot->npages))) 832 goto out; 833 } 834 } 835 836 /* Free page dirty bitmap if unneeded */ 837 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 838 new.dirty_bitmap = NULL; 839 840 r = -ENOMEM; 841 if (change == KVM_MR_CREATE) { 842 new.userspace_addr = mem->userspace_addr; 843 844 if (kvm_arch_create_memslot(kvm, &new, npages)) 845 goto out_free; 846 } 847 848 /* Allocate page dirty bitmap if needed */ 849 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 850 if (kvm_create_dirty_bitmap(&new) < 0) 851 goto out_free; 852 } 853 854 slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), 855 GFP_KERNEL); 856 if (!slots) 857 goto out_free; 858 859 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { 860 slot = id_to_memslot(slots, mem->slot); 861 slot->flags |= KVM_MEMSLOT_INVALID; 862 863 old_memslots = install_new_memslots(kvm, slots); 864 865 /* slot was deleted or moved, clear iommu mapping */ 866 kvm_iommu_unmap_pages(kvm, &old); 867 /* From this point no new shadow pages pointing to a deleted, 868 * or moved, memslot will be created. 869 * 870 * validation of sp->gfn happens in: 871 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 872 * - kvm_is_visible_gfn (mmu_check_roots) 873 */ 874 kvm_arch_flush_shadow_memslot(kvm, slot); 875 876 /* 877 * We can re-use the old_memslots from above, the only difference 878 * from the currently installed memslots is the invalid flag. This 879 * will get overwritten by update_memslots anyway. 880 */ 881 slots = old_memslots; 882 } 883 884 r = kvm_arch_prepare_memory_region(kvm, &new, mem, change); 885 if (r) 886 goto out_slots; 887 888 /* actual memory is freed via old in kvm_free_physmem_slot below */ 889 if (change == KVM_MR_DELETE) { 890 new.dirty_bitmap = NULL; 891 memset(&new.arch, 0, sizeof(new.arch)); 892 } 893 894 update_memslots(slots, &new); 895 old_memslots = install_new_memslots(kvm, slots); 896 897 kvm_arch_commit_memory_region(kvm, mem, &old, change); 898 899 kvm_free_physmem_slot(kvm, &old, &new); 900 kfree(old_memslots); 901 902 /* 903 * IOMMU mapping: New slots need to be mapped. Old slots need to be 904 * un-mapped and re-mapped if their base changes. Since base change 905 * unmapping is handled above with slot deletion, mapping alone is 906 * needed here. Anything else the iommu might care about for existing 907 * slots (size changes, userspace addr changes and read-only flag 908 * changes) is disallowed above, so any other attribute changes getting 909 * here can be skipped. 910 */ 911 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 912 r = kvm_iommu_map_pages(kvm, &new); 913 return r; 914 } 915 916 return 0; 917 918 out_slots: 919 kfree(slots); 920 out_free: 921 kvm_free_physmem_slot(kvm, &new, &old); 922 out: 923 return r; 924 } 925 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 926 927 int kvm_set_memory_region(struct kvm *kvm, 928 struct kvm_userspace_memory_region *mem) 929 { 930 int r; 931 932 mutex_lock(&kvm->slots_lock); 933 r = __kvm_set_memory_region(kvm, mem); 934 mutex_unlock(&kvm->slots_lock); 935 return r; 936 } 937 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 938 939 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 940 struct kvm_userspace_memory_region *mem) 941 { 942 if (mem->slot >= KVM_USER_MEM_SLOTS) 943 return -EINVAL; 944 return kvm_set_memory_region(kvm, mem); 945 } 946 947 int kvm_get_dirty_log(struct kvm *kvm, 948 struct kvm_dirty_log *log, int *is_dirty) 949 { 950 struct kvm_memory_slot *memslot; 951 int r, i; 952 unsigned long n; 953 unsigned long any = 0; 954 955 r = -EINVAL; 956 if (log->slot >= KVM_USER_MEM_SLOTS) 957 goto out; 958 959 memslot = id_to_memslot(kvm->memslots, log->slot); 960 r = -ENOENT; 961 if (!memslot->dirty_bitmap) 962 goto out; 963 964 n = kvm_dirty_bitmap_bytes(memslot); 965 966 for (i = 0; !any && i < n/sizeof(long); ++i) 967 any = memslot->dirty_bitmap[i]; 968 969 r = -EFAULT; 970 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) 971 goto out; 972 973 if (any) 974 *is_dirty = 1; 975 976 r = 0; 977 out: 978 return r; 979 } 980 EXPORT_SYMBOL_GPL(kvm_get_dirty_log); 981 982 bool kvm_largepages_enabled(void) 983 { 984 return largepages_enabled; 985 } 986 987 void kvm_disable_largepages(void) 988 { 989 largepages_enabled = false; 990 } 991 EXPORT_SYMBOL_GPL(kvm_disable_largepages); 992 993 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 994 { 995 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 996 } 997 EXPORT_SYMBOL_GPL(gfn_to_memslot); 998 999 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 1000 { 1001 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); 1002 1003 if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS || 1004 memslot->flags & KVM_MEMSLOT_INVALID) 1005 return 0; 1006 1007 return 1; 1008 } 1009 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 1010 1011 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) 1012 { 1013 struct vm_area_struct *vma; 1014 unsigned long addr, size; 1015 1016 size = PAGE_SIZE; 1017 1018 addr = gfn_to_hva(kvm, gfn); 1019 if (kvm_is_error_hva(addr)) 1020 return PAGE_SIZE; 1021 1022 down_read(¤t->mm->mmap_sem); 1023 vma = find_vma(current->mm, addr); 1024 if (!vma) 1025 goto out; 1026 1027 size = vma_kernel_pagesize(vma); 1028 1029 out: 1030 up_read(¤t->mm->mmap_sem); 1031 1032 return size; 1033 } 1034 1035 static bool memslot_is_readonly(struct kvm_memory_slot *slot) 1036 { 1037 return slot->flags & KVM_MEM_READONLY; 1038 } 1039 1040 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1041 gfn_t *nr_pages, bool write) 1042 { 1043 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 1044 return KVM_HVA_ERR_BAD; 1045 1046 if (memslot_is_readonly(slot) && write) 1047 return KVM_HVA_ERR_RO_BAD; 1048 1049 if (nr_pages) 1050 *nr_pages = slot->npages - (gfn - slot->base_gfn); 1051 1052 return __gfn_to_hva_memslot(slot, gfn); 1053 } 1054 1055 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1056 gfn_t *nr_pages) 1057 { 1058 return __gfn_to_hva_many(slot, gfn, nr_pages, true); 1059 } 1060 1061 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 1062 gfn_t gfn) 1063 { 1064 return gfn_to_hva_many(slot, gfn, NULL); 1065 } 1066 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); 1067 1068 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 1069 { 1070 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); 1071 } 1072 EXPORT_SYMBOL_GPL(gfn_to_hva); 1073 1074 /* 1075 * If writable is set to false, the hva returned by this function is only 1076 * allowed to be read. 1077 */ 1078 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, 1079 gfn_t gfn, bool *writable) 1080 { 1081 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); 1082 1083 if (!kvm_is_error_hva(hva) && writable) 1084 *writable = !memslot_is_readonly(slot); 1085 1086 return hva; 1087 } 1088 1089 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 1090 { 1091 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1092 1093 return gfn_to_hva_memslot_prot(slot, gfn, writable); 1094 } 1095 1096 static int kvm_read_hva(void *data, void __user *hva, int len) 1097 { 1098 return __copy_from_user(data, hva, len); 1099 } 1100 1101 static int kvm_read_hva_atomic(void *data, void __user *hva, int len) 1102 { 1103 return __copy_from_user_inatomic(data, hva, len); 1104 } 1105 1106 static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, 1107 unsigned long start, int write, struct page **page) 1108 { 1109 int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET; 1110 1111 if (write) 1112 flags |= FOLL_WRITE; 1113 1114 return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL); 1115 } 1116 1117 int kvm_get_user_page_io(struct task_struct *tsk, struct mm_struct *mm, 1118 unsigned long addr, bool write_fault, 1119 struct page **pagep) 1120 { 1121 int npages; 1122 int locked = 1; 1123 int flags = FOLL_TOUCH | FOLL_HWPOISON | 1124 (pagep ? FOLL_GET : 0) | 1125 (write_fault ? FOLL_WRITE : 0); 1126 1127 /* 1128 * If retrying the fault, we get here *not* having allowed the filemap 1129 * to wait on the page lock. We should now allow waiting on the IO with 1130 * the mmap semaphore released. 1131 */ 1132 down_read(&mm->mmap_sem); 1133 npages = __get_user_pages(tsk, mm, addr, 1, flags, pagep, NULL, 1134 &locked); 1135 if (!locked) { 1136 VM_BUG_ON(npages); 1137 1138 if (!pagep) 1139 return 0; 1140 1141 /* 1142 * The previous call has now waited on the IO. Now we can 1143 * retry and complete. Pass TRIED to ensure we do not re 1144 * schedule async IO (see e.g. filemap_fault). 1145 */ 1146 down_read(&mm->mmap_sem); 1147 npages = __get_user_pages(tsk, mm, addr, 1, flags | FOLL_TRIED, 1148 pagep, NULL, NULL); 1149 } 1150 up_read(&mm->mmap_sem); 1151 return npages; 1152 } 1153 1154 static inline int check_user_page_hwpoison(unsigned long addr) 1155 { 1156 int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE; 1157 1158 rc = __get_user_pages(current, current->mm, addr, 1, 1159 flags, NULL, NULL, NULL); 1160 return rc == -EHWPOISON; 1161 } 1162 1163 /* 1164 * The atomic path to get the writable pfn which will be stored in @pfn, 1165 * true indicates success, otherwise false is returned. 1166 */ 1167 static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, 1168 bool write_fault, bool *writable, pfn_t *pfn) 1169 { 1170 struct page *page[1]; 1171 int npages; 1172 1173 if (!(async || atomic)) 1174 return false; 1175 1176 /* 1177 * Fast pin a writable pfn only if it is a write fault request 1178 * or the caller allows to map a writable pfn for a read fault 1179 * request. 1180 */ 1181 if (!(write_fault || writable)) 1182 return false; 1183 1184 npages = __get_user_pages_fast(addr, 1, 1, page); 1185 if (npages == 1) { 1186 *pfn = page_to_pfn(page[0]); 1187 1188 if (writable) 1189 *writable = true; 1190 return true; 1191 } 1192 1193 return false; 1194 } 1195 1196 /* 1197 * The slow path to get the pfn of the specified host virtual address, 1198 * 1 indicates success, -errno is returned if error is detected. 1199 */ 1200 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, 1201 bool *writable, pfn_t *pfn) 1202 { 1203 struct page *page[1]; 1204 int npages = 0; 1205 1206 might_sleep(); 1207 1208 if (writable) 1209 *writable = write_fault; 1210 1211 if (async) { 1212 down_read(¤t->mm->mmap_sem); 1213 npages = get_user_page_nowait(current, current->mm, 1214 addr, write_fault, page); 1215 up_read(¤t->mm->mmap_sem); 1216 } else { 1217 /* 1218 * By now we have tried gup_fast, and possibly async_pf, and we 1219 * are certainly not atomic. Time to retry the gup, allowing 1220 * mmap semaphore to be relinquished in the case of IO. 1221 */ 1222 npages = kvm_get_user_page_io(current, current->mm, addr, 1223 write_fault, page); 1224 } 1225 if (npages != 1) 1226 return npages; 1227 1228 /* map read fault as writable if possible */ 1229 if (unlikely(!write_fault) && writable) { 1230 struct page *wpage[1]; 1231 1232 npages = __get_user_pages_fast(addr, 1, 1, wpage); 1233 if (npages == 1) { 1234 *writable = true; 1235 put_page(page[0]); 1236 page[0] = wpage[0]; 1237 } 1238 1239 npages = 1; 1240 } 1241 *pfn = page_to_pfn(page[0]); 1242 return npages; 1243 } 1244 1245 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) 1246 { 1247 if (unlikely(!(vma->vm_flags & VM_READ))) 1248 return false; 1249 1250 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) 1251 return false; 1252 1253 return true; 1254 } 1255 1256 /* 1257 * Pin guest page in memory and return its pfn. 1258 * @addr: host virtual address which maps memory to the guest 1259 * @atomic: whether this function can sleep 1260 * @async: whether this function need to wait IO complete if the 1261 * host page is not in the memory 1262 * @write_fault: whether we should get a writable host page 1263 * @writable: whether it allows to map a writable host page for !@write_fault 1264 * 1265 * The function will map a writable host page for these two cases: 1266 * 1): @write_fault = true 1267 * 2): @write_fault = false && @writable, @writable will tell the caller 1268 * whether the mapping is writable. 1269 */ 1270 static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, 1271 bool write_fault, bool *writable) 1272 { 1273 struct vm_area_struct *vma; 1274 pfn_t pfn = 0; 1275 int npages; 1276 1277 /* we can do it either atomically or asynchronously, not both */ 1278 BUG_ON(atomic && async); 1279 1280 if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn)) 1281 return pfn; 1282 1283 if (atomic) 1284 return KVM_PFN_ERR_FAULT; 1285 1286 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); 1287 if (npages == 1) 1288 return pfn; 1289 1290 down_read(¤t->mm->mmap_sem); 1291 if (npages == -EHWPOISON || 1292 (!async && check_user_page_hwpoison(addr))) { 1293 pfn = KVM_PFN_ERR_HWPOISON; 1294 goto exit; 1295 } 1296 1297 vma = find_vma_intersection(current->mm, addr, addr + 1); 1298 1299 if (vma == NULL) 1300 pfn = KVM_PFN_ERR_FAULT; 1301 else if ((vma->vm_flags & VM_PFNMAP)) { 1302 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + 1303 vma->vm_pgoff; 1304 BUG_ON(!kvm_is_reserved_pfn(pfn)); 1305 } else { 1306 if (async && vma_is_valid(vma, write_fault)) 1307 *async = true; 1308 pfn = KVM_PFN_ERR_FAULT; 1309 } 1310 exit: 1311 up_read(¤t->mm->mmap_sem); 1312 return pfn; 1313 } 1314 1315 static pfn_t 1316 __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, 1317 bool *async, bool write_fault, bool *writable) 1318 { 1319 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); 1320 1321 if (addr == KVM_HVA_ERR_RO_BAD) 1322 return KVM_PFN_ERR_RO_FAULT; 1323 1324 if (kvm_is_error_hva(addr)) 1325 return KVM_PFN_NOSLOT; 1326 1327 /* Do not map writable pfn in the readonly memslot. */ 1328 if (writable && memslot_is_readonly(slot)) { 1329 *writable = false; 1330 writable = NULL; 1331 } 1332 1333 return hva_to_pfn(addr, atomic, async, write_fault, 1334 writable); 1335 } 1336 1337 static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async, 1338 bool write_fault, bool *writable) 1339 { 1340 struct kvm_memory_slot *slot; 1341 1342 if (async) 1343 *async = false; 1344 1345 slot = gfn_to_memslot(kvm, gfn); 1346 1347 return __gfn_to_pfn_memslot(slot, gfn, atomic, async, write_fault, 1348 writable); 1349 } 1350 1351 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) 1352 { 1353 return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL); 1354 } 1355 EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic); 1356 1357 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, 1358 bool write_fault, bool *writable) 1359 { 1360 return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable); 1361 } 1362 EXPORT_SYMBOL_GPL(gfn_to_pfn_async); 1363 1364 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 1365 { 1366 return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL); 1367 } 1368 EXPORT_SYMBOL_GPL(gfn_to_pfn); 1369 1370 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 1371 bool *writable) 1372 { 1373 return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable); 1374 } 1375 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 1376 1377 pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 1378 { 1379 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL); 1380 } 1381 1382 pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) 1383 { 1384 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL); 1385 } 1386 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); 1387 1388 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, 1389 int nr_pages) 1390 { 1391 unsigned long addr; 1392 gfn_t entry; 1393 1394 addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry); 1395 if (kvm_is_error_hva(addr)) 1396 return -1; 1397 1398 if (entry < nr_pages) 1399 return 0; 1400 1401 return __get_user_pages_fast(addr, nr_pages, 1, pages); 1402 } 1403 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); 1404 1405 static struct page *kvm_pfn_to_page(pfn_t pfn) 1406 { 1407 if (is_error_noslot_pfn(pfn)) 1408 return KVM_ERR_PTR_BAD_PAGE; 1409 1410 if (kvm_is_reserved_pfn(pfn)) { 1411 WARN_ON(1); 1412 return KVM_ERR_PTR_BAD_PAGE; 1413 } 1414 1415 return pfn_to_page(pfn); 1416 } 1417 1418 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 1419 { 1420 pfn_t pfn; 1421 1422 pfn = gfn_to_pfn(kvm, gfn); 1423 1424 return kvm_pfn_to_page(pfn); 1425 } 1426 1427 EXPORT_SYMBOL_GPL(gfn_to_page); 1428 1429 void kvm_release_page_clean(struct page *page) 1430 { 1431 WARN_ON(is_error_page(page)); 1432 1433 kvm_release_pfn_clean(page_to_pfn(page)); 1434 } 1435 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 1436 1437 void kvm_release_pfn_clean(pfn_t pfn) 1438 { 1439 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) 1440 put_page(pfn_to_page(pfn)); 1441 } 1442 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1443 1444 void kvm_release_page_dirty(struct page *page) 1445 { 1446 WARN_ON(is_error_page(page)); 1447 1448 kvm_release_pfn_dirty(page_to_pfn(page)); 1449 } 1450 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 1451 1452 static void kvm_release_pfn_dirty(pfn_t pfn) 1453 { 1454 kvm_set_pfn_dirty(pfn); 1455 kvm_release_pfn_clean(pfn); 1456 } 1457 1458 void kvm_set_pfn_dirty(pfn_t pfn) 1459 { 1460 if (!kvm_is_reserved_pfn(pfn)) { 1461 struct page *page = pfn_to_page(pfn); 1462 if (!PageReserved(page)) 1463 SetPageDirty(page); 1464 } 1465 } 1466 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 1467 1468 void kvm_set_pfn_accessed(pfn_t pfn) 1469 { 1470 if (!kvm_is_reserved_pfn(pfn)) 1471 mark_page_accessed(pfn_to_page(pfn)); 1472 } 1473 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1474 1475 void kvm_get_pfn(pfn_t pfn) 1476 { 1477 if (!kvm_is_reserved_pfn(pfn)) 1478 get_page(pfn_to_page(pfn)); 1479 } 1480 EXPORT_SYMBOL_GPL(kvm_get_pfn); 1481 1482 static int next_segment(unsigned long len, int offset) 1483 { 1484 if (len > PAGE_SIZE - offset) 1485 return PAGE_SIZE - offset; 1486 else 1487 return len; 1488 } 1489 1490 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 1491 int len) 1492 { 1493 int r; 1494 unsigned long addr; 1495 1496 addr = gfn_to_hva_prot(kvm, gfn, NULL); 1497 if (kvm_is_error_hva(addr)) 1498 return -EFAULT; 1499 r = kvm_read_hva(data, (void __user *)addr + offset, len); 1500 if (r) 1501 return -EFAULT; 1502 return 0; 1503 } 1504 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 1505 1506 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 1507 { 1508 gfn_t gfn = gpa >> PAGE_SHIFT; 1509 int seg; 1510 int offset = offset_in_page(gpa); 1511 int ret; 1512 1513 while ((seg = next_segment(len, offset)) != 0) { 1514 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 1515 if (ret < 0) 1516 return ret; 1517 offset = 0; 1518 len -= seg; 1519 data += seg; 1520 ++gfn; 1521 } 1522 return 0; 1523 } 1524 EXPORT_SYMBOL_GPL(kvm_read_guest); 1525 1526 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 1527 unsigned long len) 1528 { 1529 int r; 1530 unsigned long addr; 1531 gfn_t gfn = gpa >> PAGE_SHIFT; 1532 int offset = offset_in_page(gpa); 1533 1534 addr = gfn_to_hva_prot(kvm, gfn, NULL); 1535 if (kvm_is_error_hva(addr)) 1536 return -EFAULT; 1537 pagefault_disable(); 1538 r = kvm_read_hva_atomic(data, (void __user *)addr + offset, len); 1539 pagefault_enable(); 1540 if (r) 1541 return -EFAULT; 1542 return 0; 1543 } 1544 EXPORT_SYMBOL(kvm_read_guest_atomic); 1545 1546 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 1547 int offset, int len) 1548 { 1549 int r; 1550 unsigned long addr; 1551 1552 addr = gfn_to_hva(kvm, gfn); 1553 if (kvm_is_error_hva(addr)) 1554 return -EFAULT; 1555 r = __copy_to_user((void __user *)addr + offset, data, len); 1556 if (r) 1557 return -EFAULT; 1558 mark_page_dirty(kvm, gfn); 1559 return 0; 1560 } 1561 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 1562 1563 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 1564 unsigned long len) 1565 { 1566 gfn_t gfn = gpa >> PAGE_SHIFT; 1567 int seg; 1568 int offset = offset_in_page(gpa); 1569 int ret; 1570 1571 while ((seg = next_segment(len, offset)) != 0) { 1572 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 1573 if (ret < 0) 1574 return ret; 1575 offset = 0; 1576 len -= seg; 1577 data += seg; 1578 ++gfn; 1579 } 1580 return 0; 1581 } 1582 1583 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1584 gpa_t gpa, unsigned long len) 1585 { 1586 struct kvm_memslots *slots = kvm_memslots(kvm); 1587 int offset = offset_in_page(gpa); 1588 gfn_t start_gfn = gpa >> PAGE_SHIFT; 1589 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; 1590 gfn_t nr_pages_needed = end_gfn - start_gfn + 1; 1591 gfn_t nr_pages_avail; 1592 1593 ghc->gpa = gpa; 1594 ghc->generation = slots->generation; 1595 ghc->len = len; 1596 ghc->memslot = gfn_to_memslot(kvm, start_gfn); 1597 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail); 1598 if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) { 1599 ghc->hva += offset; 1600 } else { 1601 /* 1602 * If the requested region crosses two memslots, we still 1603 * verify that the entire region is valid here. 1604 */ 1605 while (start_gfn <= end_gfn) { 1606 ghc->memslot = gfn_to_memslot(kvm, start_gfn); 1607 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, 1608 &nr_pages_avail); 1609 if (kvm_is_error_hva(ghc->hva)) 1610 return -EFAULT; 1611 start_gfn += nr_pages_avail; 1612 } 1613 /* Use the slow path for cross page reads and writes. */ 1614 ghc->memslot = NULL; 1615 } 1616 return 0; 1617 } 1618 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 1619 1620 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1621 void *data, unsigned long len) 1622 { 1623 struct kvm_memslots *slots = kvm_memslots(kvm); 1624 int r; 1625 1626 BUG_ON(len > ghc->len); 1627 1628 if (slots->generation != ghc->generation) 1629 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); 1630 1631 if (unlikely(!ghc->memslot)) 1632 return kvm_write_guest(kvm, ghc->gpa, data, len); 1633 1634 if (kvm_is_error_hva(ghc->hva)) 1635 return -EFAULT; 1636 1637 r = __copy_to_user((void __user *)ghc->hva, data, len); 1638 if (r) 1639 return -EFAULT; 1640 mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT); 1641 1642 return 0; 1643 } 1644 EXPORT_SYMBOL_GPL(kvm_write_guest_cached); 1645 1646 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1647 void *data, unsigned long len) 1648 { 1649 struct kvm_memslots *slots = kvm_memslots(kvm); 1650 int r; 1651 1652 BUG_ON(len > ghc->len); 1653 1654 if (slots->generation != ghc->generation) 1655 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); 1656 1657 if (unlikely(!ghc->memslot)) 1658 return kvm_read_guest(kvm, ghc->gpa, data, len); 1659 1660 if (kvm_is_error_hva(ghc->hva)) 1661 return -EFAULT; 1662 1663 r = __copy_from_user(data, (void __user *)ghc->hva, len); 1664 if (r) 1665 return -EFAULT; 1666 1667 return 0; 1668 } 1669 EXPORT_SYMBOL_GPL(kvm_read_guest_cached); 1670 1671 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) 1672 { 1673 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 1674 1675 return kvm_write_guest_page(kvm, gfn, zero_page, offset, len); 1676 } 1677 EXPORT_SYMBOL_GPL(kvm_clear_guest_page); 1678 1679 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 1680 { 1681 gfn_t gfn = gpa >> PAGE_SHIFT; 1682 int seg; 1683 int offset = offset_in_page(gpa); 1684 int ret; 1685 1686 while ((seg = next_segment(len, offset)) != 0) { 1687 ret = kvm_clear_guest_page(kvm, gfn, offset, seg); 1688 if (ret < 0) 1689 return ret; 1690 offset = 0; 1691 len -= seg; 1692 ++gfn; 1693 } 1694 return 0; 1695 } 1696 EXPORT_SYMBOL_GPL(kvm_clear_guest); 1697 1698 static void mark_page_dirty_in_slot(struct kvm *kvm, 1699 struct kvm_memory_slot *memslot, 1700 gfn_t gfn) 1701 { 1702 if (memslot && memslot->dirty_bitmap) { 1703 unsigned long rel_gfn = gfn - memslot->base_gfn; 1704 1705 set_bit_le(rel_gfn, memslot->dirty_bitmap); 1706 } 1707 } 1708 1709 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 1710 { 1711 struct kvm_memory_slot *memslot; 1712 1713 memslot = gfn_to_memslot(kvm, gfn); 1714 mark_page_dirty_in_slot(kvm, memslot, gfn); 1715 } 1716 EXPORT_SYMBOL_GPL(mark_page_dirty); 1717 1718 /* 1719 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 1720 */ 1721 void kvm_vcpu_block(struct kvm_vcpu *vcpu) 1722 { 1723 DEFINE_WAIT(wait); 1724 1725 for (;;) { 1726 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1727 1728 if (kvm_arch_vcpu_runnable(vcpu)) { 1729 kvm_make_request(KVM_REQ_UNHALT, vcpu); 1730 break; 1731 } 1732 if (kvm_cpu_has_pending_timer(vcpu)) 1733 break; 1734 if (signal_pending(current)) 1735 break; 1736 1737 schedule(); 1738 } 1739 1740 finish_wait(&vcpu->wq, &wait); 1741 } 1742 EXPORT_SYMBOL_GPL(kvm_vcpu_block); 1743 1744 #ifndef CONFIG_S390 1745 /* 1746 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. 1747 */ 1748 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 1749 { 1750 int me; 1751 int cpu = vcpu->cpu; 1752 wait_queue_head_t *wqp; 1753 1754 wqp = kvm_arch_vcpu_wq(vcpu); 1755 if (waitqueue_active(wqp)) { 1756 wake_up_interruptible(wqp); 1757 ++vcpu->stat.halt_wakeup; 1758 } 1759 1760 me = get_cpu(); 1761 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) 1762 if (kvm_arch_vcpu_should_kick(vcpu)) 1763 smp_send_reschedule(cpu); 1764 put_cpu(); 1765 } 1766 EXPORT_SYMBOL_GPL(kvm_vcpu_kick); 1767 #endif /* !CONFIG_S390 */ 1768 1769 int kvm_vcpu_yield_to(struct kvm_vcpu *target) 1770 { 1771 struct pid *pid; 1772 struct task_struct *task = NULL; 1773 int ret = 0; 1774 1775 rcu_read_lock(); 1776 pid = rcu_dereference(target->pid); 1777 if (pid) 1778 task = get_pid_task(pid, PIDTYPE_PID); 1779 rcu_read_unlock(); 1780 if (!task) 1781 return ret; 1782 ret = yield_to(task, 1); 1783 put_task_struct(task); 1784 1785 return ret; 1786 } 1787 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); 1788 1789 /* 1790 * Helper that checks whether a VCPU is eligible for directed yield. 1791 * Most eligible candidate to yield is decided by following heuristics: 1792 * 1793 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently 1794 * (preempted lock holder), indicated by @in_spin_loop. 1795 * Set at the beiginning and cleared at the end of interception/PLE handler. 1796 * 1797 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get 1798 * chance last time (mostly it has become eligible now since we have probably 1799 * yielded to lockholder in last iteration. This is done by toggling 1800 * @dy_eligible each time a VCPU checked for eligibility.) 1801 * 1802 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding 1803 * to preempted lock-holder could result in wrong VCPU selection and CPU 1804 * burning. Giving priority for a potential lock-holder increases lock 1805 * progress. 1806 * 1807 * Since algorithm is based on heuristics, accessing another VCPU data without 1808 * locking does not harm. It may result in trying to yield to same VCPU, fail 1809 * and continue with next VCPU and so on. 1810 */ 1811 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) 1812 { 1813 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 1814 bool eligible; 1815 1816 eligible = !vcpu->spin_loop.in_spin_loop || 1817 vcpu->spin_loop.dy_eligible; 1818 1819 if (vcpu->spin_loop.in_spin_loop) 1820 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); 1821 1822 return eligible; 1823 #else 1824 return true; 1825 #endif 1826 } 1827 1828 void kvm_vcpu_on_spin(struct kvm_vcpu *me) 1829 { 1830 struct kvm *kvm = me->kvm; 1831 struct kvm_vcpu *vcpu; 1832 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 1833 int yielded = 0; 1834 int try = 3; 1835 int pass; 1836 int i; 1837 1838 kvm_vcpu_set_in_spin_loop(me, true); 1839 /* 1840 * We boost the priority of a VCPU that is runnable but not 1841 * currently running, because it got preempted by something 1842 * else and called schedule in __vcpu_run. Hopefully that 1843 * VCPU is holding the lock that we need and will release it. 1844 * We approximate round-robin by starting at the last boosted VCPU. 1845 */ 1846 for (pass = 0; pass < 2 && !yielded && try; pass++) { 1847 kvm_for_each_vcpu(i, vcpu, kvm) { 1848 if (!pass && i <= last_boosted_vcpu) { 1849 i = last_boosted_vcpu; 1850 continue; 1851 } else if (pass && i > last_boosted_vcpu) 1852 break; 1853 if (!ACCESS_ONCE(vcpu->preempted)) 1854 continue; 1855 if (vcpu == me) 1856 continue; 1857 if (waitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) 1858 continue; 1859 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 1860 continue; 1861 1862 yielded = kvm_vcpu_yield_to(vcpu); 1863 if (yielded > 0) { 1864 kvm->last_boosted_vcpu = i; 1865 break; 1866 } else if (yielded < 0) { 1867 try--; 1868 if (!try) 1869 break; 1870 } 1871 } 1872 } 1873 kvm_vcpu_set_in_spin_loop(me, false); 1874 1875 /* Ensure vcpu is not eligible during next spinloop */ 1876 kvm_vcpu_set_dy_eligible(me, false); 1877 } 1878 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 1879 1880 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1881 { 1882 struct kvm_vcpu *vcpu = vma->vm_file->private_data; 1883 struct page *page; 1884 1885 if (vmf->pgoff == 0) 1886 page = virt_to_page(vcpu->run); 1887 #ifdef CONFIG_X86 1888 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 1889 page = virt_to_page(vcpu->arch.pio_data); 1890 #endif 1891 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1892 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 1893 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 1894 #endif 1895 else 1896 return kvm_arch_vcpu_fault(vcpu, vmf); 1897 get_page(page); 1898 vmf->page = page; 1899 return 0; 1900 } 1901 1902 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 1903 .fault = kvm_vcpu_fault, 1904 }; 1905 1906 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 1907 { 1908 vma->vm_ops = &kvm_vcpu_vm_ops; 1909 return 0; 1910 } 1911 1912 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 1913 { 1914 struct kvm_vcpu *vcpu = filp->private_data; 1915 1916 kvm_put_kvm(vcpu->kvm); 1917 return 0; 1918 } 1919 1920 static struct file_operations kvm_vcpu_fops = { 1921 .release = kvm_vcpu_release, 1922 .unlocked_ioctl = kvm_vcpu_ioctl, 1923 #ifdef CONFIG_COMPAT 1924 .compat_ioctl = kvm_vcpu_compat_ioctl, 1925 #endif 1926 .mmap = kvm_vcpu_mmap, 1927 .llseek = noop_llseek, 1928 }; 1929 1930 /* 1931 * Allocates an inode for the vcpu. 1932 */ 1933 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 1934 { 1935 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); 1936 } 1937 1938 /* 1939 * Creates some virtual cpus. Good luck creating more than one. 1940 */ 1941 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 1942 { 1943 int r; 1944 struct kvm_vcpu *vcpu, *v; 1945 1946 if (id >= KVM_MAX_VCPUS) 1947 return -EINVAL; 1948 1949 vcpu = kvm_arch_vcpu_create(kvm, id); 1950 if (IS_ERR(vcpu)) 1951 return PTR_ERR(vcpu); 1952 1953 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 1954 1955 r = kvm_arch_vcpu_setup(vcpu); 1956 if (r) 1957 goto vcpu_destroy; 1958 1959 mutex_lock(&kvm->lock); 1960 if (!kvm_vcpu_compatible(vcpu)) { 1961 r = -EINVAL; 1962 goto unlock_vcpu_destroy; 1963 } 1964 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { 1965 r = -EINVAL; 1966 goto unlock_vcpu_destroy; 1967 } 1968 1969 kvm_for_each_vcpu(r, v, kvm) 1970 if (v->vcpu_id == id) { 1971 r = -EEXIST; 1972 goto unlock_vcpu_destroy; 1973 } 1974 1975 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); 1976 1977 /* Now it's all set up, let userspace reach it */ 1978 kvm_get_kvm(kvm); 1979 r = create_vcpu_fd(vcpu); 1980 if (r < 0) { 1981 kvm_put_kvm(kvm); 1982 goto unlock_vcpu_destroy; 1983 } 1984 1985 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; 1986 smp_wmb(); 1987 atomic_inc(&kvm->online_vcpus); 1988 1989 mutex_unlock(&kvm->lock); 1990 kvm_arch_vcpu_postcreate(vcpu); 1991 return r; 1992 1993 unlock_vcpu_destroy: 1994 mutex_unlock(&kvm->lock); 1995 vcpu_destroy: 1996 kvm_arch_vcpu_destroy(vcpu); 1997 return r; 1998 } 1999 2000 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 2001 { 2002 if (sigset) { 2003 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2004 vcpu->sigset_active = 1; 2005 vcpu->sigset = *sigset; 2006 } else 2007 vcpu->sigset_active = 0; 2008 return 0; 2009 } 2010 2011 static long kvm_vcpu_ioctl(struct file *filp, 2012 unsigned int ioctl, unsigned long arg) 2013 { 2014 struct kvm_vcpu *vcpu = filp->private_data; 2015 void __user *argp = (void __user *)arg; 2016 int r; 2017 struct kvm_fpu *fpu = NULL; 2018 struct kvm_sregs *kvm_sregs = NULL; 2019 2020 if (vcpu->kvm->mm != current->mm) 2021 return -EIO; 2022 2023 if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) 2024 return -EINVAL; 2025 2026 #if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS) 2027 /* 2028 * Special cases: vcpu ioctls that are asynchronous to vcpu execution, 2029 * so vcpu_load() would break it. 2030 */ 2031 if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT) 2032 return kvm_arch_vcpu_ioctl(filp, ioctl, arg); 2033 #endif 2034 2035 2036 r = vcpu_load(vcpu); 2037 if (r) 2038 return r; 2039 switch (ioctl) { 2040 case KVM_RUN: 2041 r = -EINVAL; 2042 if (arg) 2043 goto out; 2044 if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) { 2045 /* The thread running this VCPU changed. */ 2046 struct pid *oldpid = vcpu->pid; 2047 struct pid *newpid = get_task_pid(current, PIDTYPE_PID); 2048 rcu_assign_pointer(vcpu->pid, newpid); 2049 if (oldpid) 2050 synchronize_rcu(); 2051 put_pid(oldpid); 2052 } 2053 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); 2054 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 2055 break; 2056 case KVM_GET_REGS: { 2057 struct kvm_regs *kvm_regs; 2058 2059 r = -ENOMEM; 2060 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 2061 if (!kvm_regs) 2062 goto out; 2063 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 2064 if (r) 2065 goto out_free1; 2066 r = -EFAULT; 2067 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 2068 goto out_free1; 2069 r = 0; 2070 out_free1: 2071 kfree(kvm_regs); 2072 break; 2073 } 2074 case KVM_SET_REGS: { 2075 struct kvm_regs *kvm_regs; 2076 2077 r = -ENOMEM; 2078 kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); 2079 if (IS_ERR(kvm_regs)) { 2080 r = PTR_ERR(kvm_regs); 2081 goto out; 2082 } 2083 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 2084 kfree(kvm_regs); 2085 break; 2086 } 2087 case KVM_GET_SREGS: { 2088 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 2089 r = -ENOMEM; 2090 if (!kvm_sregs) 2091 goto out; 2092 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 2093 if (r) 2094 goto out; 2095 r = -EFAULT; 2096 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 2097 goto out; 2098 r = 0; 2099 break; 2100 } 2101 case KVM_SET_SREGS: { 2102 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); 2103 if (IS_ERR(kvm_sregs)) { 2104 r = PTR_ERR(kvm_sregs); 2105 kvm_sregs = NULL; 2106 goto out; 2107 } 2108 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 2109 break; 2110 } 2111 case KVM_GET_MP_STATE: { 2112 struct kvm_mp_state mp_state; 2113 2114 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 2115 if (r) 2116 goto out; 2117 r = -EFAULT; 2118 if (copy_to_user(argp, &mp_state, sizeof mp_state)) 2119 goto out; 2120 r = 0; 2121 break; 2122 } 2123 case KVM_SET_MP_STATE: { 2124 struct kvm_mp_state mp_state; 2125 2126 r = -EFAULT; 2127 if (copy_from_user(&mp_state, argp, sizeof mp_state)) 2128 goto out; 2129 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 2130 break; 2131 } 2132 case KVM_TRANSLATE: { 2133 struct kvm_translation tr; 2134 2135 r = -EFAULT; 2136 if (copy_from_user(&tr, argp, sizeof tr)) 2137 goto out; 2138 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 2139 if (r) 2140 goto out; 2141 r = -EFAULT; 2142 if (copy_to_user(argp, &tr, sizeof tr)) 2143 goto out; 2144 r = 0; 2145 break; 2146 } 2147 case KVM_SET_GUEST_DEBUG: { 2148 struct kvm_guest_debug dbg; 2149 2150 r = -EFAULT; 2151 if (copy_from_user(&dbg, argp, sizeof dbg)) 2152 goto out; 2153 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 2154 break; 2155 } 2156 case KVM_SET_SIGNAL_MASK: { 2157 struct kvm_signal_mask __user *sigmask_arg = argp; 2158 struct kvm_signal_mask kvm_sigmask; 2159 sigset_t sigset, *p; 2160 2161 p = NULL; 2162 if (argp) { 2163 r = -EFAULT; 2164 if (copy_from_user(&kvm_sigmask, argp, 2165 sizeof kvm_sigmask)) 2166 goto out; 2167 r = -EINVAL; 2168 if (kvm_sigmask.len != sizeof sigset) 2169 goto out; 2170 r = -EFAULT; 2171 if (copy_from_user(&sigset, sigmask_arg->sigset, 2172 sizeof sigset)) 2173 goto out; 2174 p = &sigset; 2175 } 2176 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 2177 break; 2178 } 2179 case KVM_GET_FPU: { 2180 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 2181 r = -ENOMEM; 2182 if (!fpu) 2183 goto out; 2184 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 2185 if (r) 2186 goto out; 2187 r = -EFAULT; 2188 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 2189 goto out; 2190 r = 0; 2191 break; 2192 } 2193 case KVM_SET_FPU: { 2194 fpu = memdup_user(argp, sizeof(*fpu)); 2195 if (IS_ERR(fpu)) { 2196 r = PTR_ERR(fpu); 2197 fpu = NULL; 2198 goto out; 2199 } 2200 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 2201 break; 2202 } 2203 default: 2204 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 2205 } 2206 out: 2207 vcpu_put(vcpu); 2208 kfree(fpu); 2209 kfree(kvm_sregs); 2210 return r; 2211 } 2212 2213 #ifdef CONFIG_COMPAT 2214 static long kvm_vcpu_compat_ioctl(struct file *filp, 2215 unsigned int ioctl, unsigned long arg) 2216 { 2217 struct kvm_vcpu *vcpu = filp->private_data; 2218 void __user *argp = compat_ptr(arg); 2219 int r; 2220 2221 if (vcpu->kvm->mm != current->mm) 2222 return -EIO; 2223 2224 switch (ioctl) { 2225 case KVM_SET_SIGNAL_MASK: { 2226 struct kvm_signal_mask __user *sigmask_arg = argp; 2227 struct kvm_signal_mask kvm_sigmask; 2228 compat_sigset_t csigset; 2229 sigset_t sigset; 2230 2231 if (argp) { 2232 r = -EFAULT; 2233 if (copy_from_user(&kvm_sigmask, argp, 2234 sizeof kvm_sigmask)) 2235 goto out; 2236 r = -EINVAL; 2237 if (kvm_sigmask.len != sizeof csigset) 2238 goto out; 2239 r = -EFAULT; 2240 if (copy_from_user(&csigset, sigmask_arg->sigset, 2241 sizeof csigset)) 2242 goto out; 2243 sigset_from_compat(&sigset, &csigset); 2244 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 2245 } else 2246 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); 2247 break; 2248 } 2249 default: 2250 r = kvm_vcpu_ioctl(filp, ioctl, arg); 2251 } 2252 2253 out: 2254 return r; 2255 } 2256 #endif 2257 2258 static int kvm_device_ioctl_attr(struct kvm_device *dev, 2259 int (*accessor)(struct kvm_device *dev, 2260 struct kvm_device_attr *attr), 2261 unsigned long arg) 2262 { 2263 struct kvm_device_attr attr; 2264 2265 if (!accessor) 2266 return -EPERM; 2267 2268 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 2269 return -EFAULT; 2270 2271 return accessor(dev, &attr); 2272 } 2273 2274 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, 2275 unsigned long arg) 2276 { 2277 struct kvm_device *dev = filp->private_data; 2278 2279 switch (ioctl) { 2280 case KVM_SET_DEVICE_ATTR: 2281 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); 2282 case KVM_GET_DEVICE_ATTR: 2283 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); 2284 case KVM_HAS_DEVICE_ATTR: 2285 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); 2286 default: 2287 if (dev->ops->ioctl) 2288 return dev->ops->ioctl(dev, ioctl, arg); 2289 2290 return -ENOTTY; 2291 } 2292 } 2293 2294 static int kvm_device_release(struct inode *inode, struct file *filp) 2295 { 2296 struct kvm_device *dev = filp->private_data; 2297 struct kvm *kvm = dev->kvm; 2298 2299 kvm_put_kvm(kvm); 2300 return 0; 2301 } 2302 2303 static const struct file_operations kvm_device_fops = { 2304 .unlocked_ioctl = kvm_device_ioctl, 2305 #ifdef CONFIG_COMPAT 2306 .compat_ioctl = kvm_device_ioctl, 2307 #endif 2308 .release = kvm_device_release, 2309 }; 2310 2311 struct kvm_device *kvm_device_from_filp(struct file *filp) 2312 { 2313 if (filp->f_op != &kvm_device_fops) 2314 return NULL; 2315 2316 return filp->private_data; 2317 } 2318 2319 static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { 2320 #ifdef CONFIG_KVM_MPIC 2321 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, 2322 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, 2323 #endif 2324 2325 #ifdef CONFIG_KVM_XICS 2326 [KVM_DEV_TYPE_XICS] = &kvm_xics_ops, 2327 #endif 2328 }; 2329 2330 int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type) 2331 { 2332 if (type >= ARRAY_SIZE(kvm_device_ops_table)) 2333 return -ENOSPC; 2334 2335 if (kvm_device_ops_table[type] != NULL) 2336 return -EEXIST; 2337 2338 kvm_device_ops_table[type] = ops; 2339 return 0; 2340 } 2341 2342 void kvm_unregister_device_ops(u32 type) 2343 { 2344 if (kvm_device_ops_table[type] != NULL) 2345 kvm_device_ops_table[type] = NULL; 2346 } 2347 2348 static int kvm_ioctl_create_device(struct kvm *kvm, 2349 struct kvm_create_device *cd) 2350 { 2351 struct kvm_device_ops *ops = NULL; 2352 struct kvm_device *dev; 2353 bool test = cd->flags & KVM_CREATE_DEVICE_TEST; 2354 int ret; 2355 2356 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) 2357 return -ENODEV; 2358 2359 ops = kvm_device_ops_table[cd->type]; 2360 if (ops == NULL) 2361 return -ENODEV; 2362 2363 if (test) 2364 return 0; 2365 2366 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2367 if (!dev) 2368 return -ENOMEM; 2369 2370 dev->ops = ops; 2371 dev->kvm = kvm; 2372 2373 ret = ops->create(dev, cd->type); 2374 if (ret < 0) { 2375 kfree(dev); 2376 return ret; 2377 } 2378 2379 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 2380 if (ret < 0) { 2381 ops->destroy(dev); 2382 return ret; 2383 } 2384 2385 list_add(&dev->vm_node, &kvm->devices); 2386 kvm_get_kvm(kvm); 2387 cd->fd = ret; 2388 return 0; 2389 } 2390 2391 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) 2392 { 2393 switch (arg) { 2394 case KVM_CAP_USER_MEMORY: 2395 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 2396 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 2397 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 2398 case KVM_CAP_SET_BOOT_CPU_ID: 2399 #endif 2400 case KVM_CAP_INTERNAL_ERROR_DATA: 2401 #ifdef CONFIG_HAVE_KVM_MSI 2402 case KVM_CAP_SIGNAL_MSI: 2403 #endif 2404 #ifdef CONFIG_HAVE_KVM_IRQFD 2405 case KVM_CAP_IRQFD_RESAMPLE: 2406 #endif 2407 case KVM_CAP_CHECK_EXTENSION_VM: 2408 return 1; 2409 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 2410 case KVM_CAP_IRQ_ROUTING: 2411 return KVM_MAX_IRQ_ROUTES; 2412 #endif 2413 default: 2414 break; 2415 } 2416 return kvm_vm_ioctl_check_extension(kvm, arg); 2417 } 2418 2419 static long kvm_vm_ioctl(struct file *filp, 2420 unsigned int ioctl, unsigned long arg) 2421 { 2422 struct kvm *kvm = filp->private_data; 2423 void __user *argp = (void __user *)arg; 2424 int r; 2425 2426 if (kvm->mm != current->mm) 2427 return -EIO; 2428 switch (ioctl) { 2429 case KVM_CREATE_VCPU: 2430 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 2431 break; 2432 case KVM_SET_USER_MEMORY_REGION: { 2433 struct kvm_userspace_memory_region kvm_userspace_mem; 2434 2435 r = -EFAULT; 2436 if (copy_from_user(&kvm_userspace_mem, argp, 2437 sizeof kvm_userspace_mem)) 2438 goto out; 2439 2440 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); 2441 break; 2442 } 2443 case KVM_GET_DIRTY_LOG: { 2444 struct kvm_dirty_log log; 2445 2446 r = -EFAULT; 2447 if (copy_from_user(&log, argp, sizeof log)) 2448 goto out; 2449 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 2450 break; 2451 } 2452 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2453 case KVM_REGISTER_COALESCED_MMIO: { 2454 struct kvm_coalesced_mmio_zone zone; 2455 r = -EFAULT; 2456 if (copy_from_user(&zone, argp, sizeof zone)) 2457 goto out; 2458 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 2459 break; 2460 } 2461 case KVM_UNREGISTER_COALESCED_MMIO: { 2462 struct kvm_coalesced_mmio_zone zone; 2463 r = -EFAULT; 2464 if (copy_from_user(&zone, argp, sizeof zone)) 2465 goto out; 2466 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 2467 break; 2468 } 2469 #endif 2470 case KVM_IRQFD: { 2471 struct kvm_irqfd data; 2472 2473 r = -EFAULT; 2474 if (copy_from_user(&data, argp, sizeof data)) 2475 goto out; 2476 r = kvm_irqfd(kvm, &data); 2477 break; 2478 } 2479 case KVM_IOEVENTFD: { 2480 struct kvm_ioeventfd data; 2481 2482 r = -EFAULT; 2483 if (copy_from_user(&data, argp, sizeof data)) 2484 goto out; 2485 r = kvm_ioeventfd(kvm, &data); 2486 break; 2487 } 2488 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 2489 case KVM_SET_BOOT_CPU_ID: 2490 r = 0; 2491 mutex_lock(&kvm->lock); 2492 if (atomic_read(&kvm->online_vcpus) != 0) 2493 r = -EBUSY; 2494 else 2495 kvm->bsp_vcpu_id = arg; 2496 mutex_unlock(&kvm->lock); 2497 break; 2498 #endif 2499 #ifdef CONFIG_HAVE_KVM_MSI 2500 case KVM_SIGNAL_MSI: { 2501 struct kvm_msi msi; 2502 2503 r = -EFAULT; 2504 if (copy_from_user(&msi, argp, sizeof msi)) 2505 goto out; 2506 r = kvm_send_userspace_msi(kvm, &msi); 2507 break; 2508 } 2509 #endif 2510 #ifdef __KVM_HAVE_IRQ_LINE 2511 case KVM_IRQ_LINE_STATUS: 2512 case KVM_IRQ_LINE: { 2513 struct kvm_irq_level irq_event; 2514 2515 r = -EFAULT; 2516 if (copy_from_user(&irq_event, argp, sizeof irq_event)) 2517 goto out; 2518 2519 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, 2520 ioctl == KVM_IRQ_LINE_STATUS); 2521 if (r) 2522 goto out; 2523 2524 r = -EFAULT; 2525 if (ioctl == KVM_IRQ_LINE_STATUS) { 2526 if (copy_to_user(argp, &irq_event, sizeof irq_event)) 2527 goto out; 2528 } 2529 2530 r = 0; 2531 break; 2532 } 2533 #endif 2534 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 2535 case KVM_SET_GSI_ROUTING: { 2536 struct kvm_irq_routing routing; 2537 struct kvm_irq_routing __user *urouting; 2538 struct kvm_irq_routing_entry *entries; 2539 2540 r = -EFAULT; 2541 if (copy_from_user(&routing, argp, sizeof(routing))) 2542 goto out; 2543 r = -EINVAL; 2544 if (routing.nr >= KVM_MAX_IRQ_ROUTES) 2545 goto out; 2546 if (routing.flags) 2547 goto out; 2548 r = -ENOMEM; 2549 entries = vmalloc(routing.nr * sizeof(*entries)); 2550 if (!entries) 2551 goto out; 2552 r = -EFAULT; 2553 urouting = argp; 2554 if (copy_from_user(entries, urouting->entries, 2555 routing.nr * sizeof(*entries))) 2556 goto out_free_irq_routing; 2557 r = kvm_set_irq_routing(kvm, entries, routing.nr, 2558 routing.flags); 2559 out_free_irq_routing: 2560 vfree(entries); 2561 break; 2562 } 2563 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ 2564 case KVM_CREATE_DEVICE: { 2565 struct kvm_create_device cd; 2566 2567 r = -EFAULT; 2568 if (copy_from_user(&cd, argp, sizeof(cd))) 2569 goto out; 2570 2571 r = kvm_ioctl_create_device(kvm, &cd); 2572 if (r) 2573 goto out; 2574 2575 r = -EFAULT; 2576 if (copy_to_user(argp, &cd, sizeof(cd))) 2577 goto out; 2578 2579 r = 0; 2580 break; 2581 } 2582 case KVM_CHECK_EXTENSION: 2583 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); 2584 break; 2585 default: 2586 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 2587 } 2588 out: 2589 return r; 2590 } 2591 2592 #ifdef CONFIG_COMPAT 2593 struct compat_kvm_dirty_log { 2594 __u32 slot; 2595 __u32 padding1; 2596 union { 2597 compat_uptr_t dirty_bitmap; /* one bit per page */ 2598 __u64 padding2; 2599 }; 2600 }; 2601 2602 static long kvm_vm_compat_ioctl(struct file *filp, 2603 unsigned int ioctl, unsigned long arg) 2604 { 2605 struct kvm *kvm = filp->private_data; 2606 int r; 2607 2608 if (kvm->mm != current->mm) 2609 return -EIO; 2610 switch (ioctl) { 2611 case KVM_GET_DIRTY_LOG: { 2612 struct compat_kvm_dirty_log compat_log; 2613 struct kvm_dirty_log log; 2614 2615 r = -EFAULT; 2616 if (copy_from_user(&compat_log, (void __user *)arg, 2617 sizeof(compat_log))) 2618 goto out; 2619 log.slot = compat_log.slot; 2620 log.padding1 = compat_log.padding1; 2621 log.padding2 = compat_log.padding2; 2622 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 2623 2624 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 2625 break; 2626 } 2627 default: 2628 r = kvm_vm_ioctl(filp, ioctl, arg); 2629 } 2630 2631 out: 2632 return r; 2633 } 2634 #endif 2635 2636 static struct file_operations kvm_vm_fops = { 2637 .release = kvm_vm_release, 2638 .unlocked_ioctl = kvm_vm_ioctl, 2639 #ifdef CONFIG_COMPAT 2640 .compat_ioctl = kvm_vm_compat_ioctl, 2641 #endif 2642 .llseek = noop_llseek, 2643 }; 2644 2645 static int kvm_dev_ioctl_create_vm(unsigned long type) 2646 { 2647 int r; 2648 struct kvm *kvm; 2649 2650 kvm = kvm_create_vm(type); 2651 if (IS_ERR(kvm)) 2652 return PTR_ERR(kvm); 2653 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2654 r = kvm_coalesced_mmio_init(kvm); 2655 if (r < 0) { 2656 kvm_put_kvm(kvm); 2657 return r; 2658 } 2659 #endif 2660 r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR | O_CLOEXEC); 2661 if (r < 0) 2662 kvm_put_kvm(kvm); 2663 2664 return r; 2665 } 2666 2667 static long kvm_dev_ioctl(struct file *filp, 2668 unsigned int ioctl, unsigned long arg) 2669 { 2670 long r = -EINVAL; 2671 2672 switch (ioctl) { 2673 case KVM_GET_API_VERSION: 2674 if (arg) 2675 goto out; 2676 r = KVM_API_VERSION; 2677 break; 2678 case KVM_CREATE_VM: 2679 r = kvm_dev_ioctl_create_vm(arg); 2680 break; 2681 case KVM_CHECK_EXTENSION: 2682 r = kvm_vm_ioctl_check_extension_generic(NULL, arg); 2683 break; 2684 case KVM_GET_VCPU_MMAP_SIZE: 2685 if (arg) 2686 goto out; 2687 r = PAGE_SIZE; /* struct kvm_run */ 2688 #ifdef CONFIG_X86 2689 r += PAGE_SIZE; /* pio data page */ 2690 #endif 2691 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2692 r += PAGE_SIZE; /* coalesced mmio ring page */ 2693 #endif 2694 break; 2695 case KVM_TRACE_ENABLE: 2696 case KVM_TRACE_PAUSE: 2697 case KVM_TRACE_DISABLE: 2698 r = -EOPNOTSUPP; 2699 break; 2700 default: 2701 return kvm_arch_dev_ioctl(filp, ioctl, arg); 2702 } 2703 out: 2704 return r; 2705 } 2706 2707 static struct file_operations kvm_chardev_ops = { 2708 .unlocked_ioctl = kvm_dev_ioctl, 2709 .compat_ioctl = kvm_dev_ioctl, 2710 .llseek = noop_llseek, 2711 }; 2712 2713 static struct miscdevice kvm_dev = { 2714 KVM_MINOR, 2715 "kvm", 2716 &kvm_chardev_ops, 2717 }; 2718 2719 static void hardware_enable_nolock(void *junk) 2720 { 2721 int cpu = raw_smp_processor_id(); 2722 int r; 2723 2724 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 2725 return; 2726 2727 cpumask_set_cpu(cpu, cpus_hardware_enabled); 2728 2729 r = kvm_arch_hardware_enable(); 2730 2731 if (r) { 2732 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 2733 atomic_inc(&hardware_enable_failed); 2734 printk(KERN_INFO "kvm: enabling virtualization on " 2735 "CPU%d failed\n", cpu); 2736 } 2737 } 2738 2739 static void hardware_enable(void) 2740 { 2741 raw_spin_lock(&kvm_count_lock); 2742 if (kvm_usage_count) 2743 hardware_enable_nolock(NULL); 2744 raw_spin_unlock(&kvm_count_lock); 2745 } 2746 2747 static void hardware_disable_nolock(void *junk) 2748 { 2749 int cpu = raw_smp_processor_id(); 2750 2751 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 2752 return; 2753 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 2754 kvm_arch_hardware_disable(); 2755 } 2756 2757 static void hardware_disable(void) 2758 { 2759 raw_spin_lock(&kvm_count_lock); 2760 if (kvm_usage_count) 2761 hardware_disable_nolock(NULL); 2762 raw_spin_unlock(&kvm_count_lock); 2763 } 2764 2765 static void hardware_disable_all_nolock(void) 2766 { 2767 BUG_ON(!kvm_usage_count); 2768 2769 kvm_usage_count--; 2770 if (!kvm_usage_count) 2771 on_each_cpu(hardware_disable_nolock, NULL, 1); 2772 } 2773 2774 static void hardware_disable_all(void) 2775 { 2776 raw_spin_lock(&kvm_count_lock); 2777 hardware_disable_all_nolock(); 2778 raw_spin_unlock(&kvm_count_lock); 2779 } 2780 2781 static int hardware_enable_all(void) 2782 { 2783 int r = 0; 2784 2785 raw_spin_lock(&kvm_count_lock); 2786 2787 kvm_usage_count++; 2788 if (kvm_usage_count == 1) { 2789 atomic_set(&hardware_enable_failed, 0); 2790 on_each_cpu(hardware_enable_nolock, NULL, 1); 2791 2792 if (atomic_read(&hardware_enable_failed)) { 2793 hardware_disable_all_nolock(); 2794 r = -EBUSY; 2795 } 2796 } 2797 2798 raw_spin_unlock(&kvm_count_lock); 2799 2800 return r; 2801 } 2802 2803 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, 2804 void *v) 2805 { 2806 int cpu = (long)v; 2807 2808 val &= ~CPU_TASKS_FROZEN; 2809 switch (val) { 2810 case CPU_DYING: 2811 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", 2812 cpu); 2813 hardware_disable(); 2814 break; 2815 case CPU_STARTING: 2816 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", 2817 cpu); 2818 hardware_enable(); 2819 break; 2820 } 2821 return NOTIFY_OK; 2822 } 2823 2824 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 2825 void *v) 2826 { 2827 /* 2828 * Some (well, at least mine) BIOSes hang on reboot if 2829 * in vmx root mode. 2830 * 2831 * And Intel TXT required VMX off for all cpu when system shutdown. 2832 */ 2833 printk(KERN_INFO "kvm: exiting hardware virtualization\n"); 2834 kvm_rebooting = true; 2835 on_each_cpu(hardware_disable_nolock, NULL, 1); 2836 return NOTIFY_OK; 2837 } 2838 2839 static struct notifier_block kvm_reboot_notifier = { 2840 .notifier_call = kvm_reboot, 2841 .priority = 0, 2842 }; 2843 2844 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 2845 { 2846 int i; 2847 2848 for (i = 0; i < bus->dev_count; i++) { 2849 struct kvm_io_device *pos = bus->range[i].dev; 2850 2851 kvm_iodevice_destructor(pos); 2852 } 2853 kfree(bus); 2854 } 2855 2856 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, 2857 const struct kvm_io_range *r2) 2858 { 2859 if (r1->addr < r2->addr) 2860 return -1; 2861 if (r1->addr + r1->len > r2->addr + r2->len) 2862 return 1; 2863 return 0; 2864 } 2865 2866 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) 2867 { 2868 return kvm_io_bus_cmp(p1, p2); 2869 } 2870 2871 static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, 2872 gpa_t addr, int len) 2873 { 2874 bus->range[bus->dev_count++] = (struct kvm_io_range) { 2875 .addr = addr, 2876 .len = len, 2877 .dev = dev, 2878 }; 2879 2880 sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range), 2881 kvm_io_bus_sort_cmp, NULL); 2882 2883 return 0; 2884 } 2885 2886 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, 2887 gpa_t addr, int len) 2888 { 2889 struct kvm_io_range *range, key; 2890 int off; 2891 2892 key = (struct kvm_io_range) { 2893 .addr = addr, 2894 .len = len, 2895 }; 2896 2897 range = bsearch(&key, bus->range, bus->dev_count, 2898 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); 2899 if (range == NULL) 2900 return -ENOENT; 2901 2902 off = range - bus->range; 2903 2904 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) 2905 off--; 2906 2907 return off; 2908 } 2909 2910 static int __kvm_io_bus_write(struct kvm_io_bus *bus, 2911 struct kvm_io_range *range, const void *val) 2912 { 2913 int idx; 2914 2915 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 2916 if (idx < 0) 2917 return -EOPNOTSUPP; 2918 2919 while (idx < bus->dev_count && 2920 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 2921 if (!kvm_iodevice_write(bus->range[idx].dev, range->addr, 2922 range->len, val)) 2923 return idx; 2924 idx++; 2925 } 2926 2927 return -EOPNOTSUPP; 2928 } 2929 2930 /* kvm_io_bus_write - called under kvm->slots_lock */ 2931 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 2932 int len, const void *val) 2933 { 2934 struct kvm_io_bus *bus; 2935 struct kvm_io_range range; 2936 int r; 2937 2938 range = (struct kvm_io_range) { 2939 .addr = addr, 2940 .len = len, 2941 }; 2942 2943 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 2944 r = __kvm_io_bus_write(bus, &range, val); 2945 return r < 0 ? r : 0; 2946 } 2947 2948 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ 2949 int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 2950 int len, const void *val, long cookie) 2951 { 2952 struct kvm_io_bus *bus; 2953 struct kvm_io_range range; 2954 2955 range = (struct kvm_io_range) { 2956 .addr = addr, 2957 .len = len, 2958 }; 2959 2960 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 2961 2962 /* First try the device referenced by cookie. */ 2963 if ((cookie >= 0) && (cookie < bus->dev_count) && 2964 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) 2965 if (!kvm_iodevice_write(bus->range[cookie].dev, addr, len, 2966 val)) 2967 return cookie; 2968 2969 /* 2970 * cookie contained garbage; fall back to search and return the 2971 * correct cookie value. 2972 */ 2973 return __kvm_io_bus_write(bus, &range, val); 2974 } 2975 2976 static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range, 2977 void *val) 2978 { 2979 int idx; 2980 2981 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 2982 if (idx < 0) 2983 return -EOPNOTSUPP; 2984 2985 while (idx < bus->dev_count && 2986 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 2987 if (!kvm_iodevice_read(bus->range[idx].dev, range->addr, 2988 range->len, val)) 2989 return idx; 2990 idx++; 2991 } 2992 2993 return -EOPNOTSUPP; 2994 } 2995 EXPORT_SYMBOL_GPL(kvm_io_bus_write); 2996 2997 /* kvm_io_bus_read - called under kvm->slots_lock */ 2998 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 2999 int len, void *val) 3000 { 3001 struct kvm_io_bus *bus; 3002 struct kvm_io_range range; 3003 int r; 3004 3005 range = (struct kvm_io_range) { 3006 .addr = addr, 3007 .len = len, 3008 }; 3009 3010 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 3011 r = __kvm_io_bus_read(bus, &range, val); 3012 return r < 0 ? r : 0; 3013 } 3014 3015 3016 /* Caller must hold slots_lock. */ 3017 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 3018 int len, struct kvm_io_device *dev) 3019 { 3020 struct kvm_io_bus *new_bus, *bus; 3021 3022 bus = kvm->buses[bus_idx]; 3023 /* exclude ioeventfd which is limited by maximum fd */ 3024 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 3025 return -ENOSPC; 3026 3027 new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) * 3028 sizeof(struct kvm_io_range)), GFP_KERNEL); 3029 if (!new_bus) 3030 return -ENOMEM; 3031 memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count * 3032 sizeof(struct kvm_io_range))); 3033 kvm_io_bus_insert_dev(new_bus, dev, addr, len); 3034 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3035 synchronize_srcu_expedited(&kvm->srcu); 3036 kfree(bus); 3037 3038 return 0; 3039 } 3040 3041 /* Caller must hold slots_lock. */ 3042 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 3043 struct kvm_io_device *dev) 3044 { 3045 int i, r; 3046 struct kvm_io_bus *new_bus, *bus; 3047 3048 bus = kvm->buses[bus_idx]; 3049 r = -ENOENT; 3050 for (i = 0; i < bus->dev_count; i++) 3051 if (bus->range[i].dev == dev) { 3052 r = 0; 3053 break; 3054 } 3055 3056 if (r) 3057 return r; 3058 3059 new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) * 3060 sizeof(struct kvm_io_range)), GFP_KERNEL); 3061 if (!new_bus) 3062 return -ENOMEM; 3063 3064 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 3065 new_bus->dev_count--; 3066 memcpy(new_bus->range + i, bus->range + i + 1, 3067 (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); 3068 3069 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3070 synchronize_srcu_expedited(&kvm->srcu); 3071 kfree(bus); 3072 return r; 3073 } 3074 3075 static struct notifier_block kvm_cpu_notifier = { 3076 .notifier_call = kvm_cpu_hotplug, 3077 }; 3078 3079 static int vm_stat_get(void *_offset, u64 *val) 3080 { 3081 unsigned offset = (long)_offset; 3082 struct kvm *kvm; 3083 3084 *val = 0; 3085 spin_lock(&kvm_lock); 3086 list_for_each_entry(kvm, &vm_list, vm_list) 3087 *val += *(u32 *)((void *)kvm + offset); 3088 spin_unlock(&kvm_lock); 3089 return 0; 3090 } 3091 3092 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n"); 3093 3094 static int vcpu_stat_get(void *_offset, u64 *val) 3095 { 3096 unsigned offset = (long)_offset; 3097 struct kvm *kvm; 3098 struct kvm_vcpu *vcpu; 3099 int i; 3100 3101 *val = 0; 3102 spin_lock(&kvm_lock); 3103 list_for_each_entry(kvm, &vm_list, vm_list) 3104 kvm_for_each_vcpu(i, vcpu, kvm) 3105 *val += *(u32 *)((void *)vcpu + offset); 3106 3107 spin_unlock(&kvm_lock); 3108 return 0; 3109 } 3110 3111 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); 3112 3113 static const struct file_operations *stat_fops[] = { 3114 [KVM_STAT_VCPU] = &vcpu_stat_fops, 3115 [KVM_STAT_VM] = &vm_stat_fops, 3116 }; 3117 3118 static int kvm_init_debug(void) 3119 { 3120 int r = -EEXIST; 3121 struct kvm_stats_debugfs_item *p; 3122 3123 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 3124 if (kvm_debugfs_dir == NULL) 3125 goto out; 3126 3127 for (p = debugfs_entries; p->name; ++p) { 3128 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir, 3129 (void *)(long)p->offset, 3130 stat_fops[p->kind]); 3131 if (p->dentry == NULL) 3132 goto out_dir; 3133 } 3134 3135 return 0; 3136 3137 out_dir: 3138 debugfs_remove_recursive(kvm_debugfs_dir); 3139 out: 3140 return r; 3141 } 3142 3143 static void kvm_exit_debug(void) 3144 { 3145 struct kvm_stats_debugfs_item *p; 3146 3147 for (p = debugfs_entries; p->name; ++p) 3148 debugfs_remove(p->dentry); 3149 debugfs_remove(kvm_debugfs_dir); 3150 } 3151 3152 static int kvm_suspend(void) 3153 { 3154 if (kvm_usage_count) 3155 hardware_disable_nolock(NULL); 3156 return 0; 3157 } 3158 3159 static void kvm_resume(void) 3160 { 3161 if (kvm_usage_count) { 3162 WARN_ON(raw_spin_is_locked(&kvm_count_lock)); 3163 hardware_enable_nolock(NULL); 3164 } 3165 } 3166 3167 static struct syscore_ops kvm_syscore_ops = { 3168 .suspend = kvm_suspend, 3169 .resume = kvm_resume, 3170 }; 3171 3172 static inline 3173 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 3174 { 3175 return container_of(pn, struct kvm_vcpu, preempt_notifier); 3176 } 3177 3178 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 3179 { 3180 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 3181 if (vcpu->preempted) 3182 vcpu->preempted = false; 3183 3184 kvm_arch_sched_in(vcpu, cpu); 3185 3186 kvm_arch_vcpu_load(vcpu, cpu); 3187 } 3188 3189 static void kvm_sched_out(struct preempt_notifier *pn, 3190 struct task_struct *next) 3191 { 3192 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 3193 3194 if (current->state == TASK_RUNNING) 3195 vcpu->preempted = true; 3196 kvm_arch_vcpu_put(vcpu); 3197 } 3198 3199 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 3200 struct module *module) 3201 { 3202 int r; 3203 int cpu; 3204 3205 r = kvm_arch_init(opaque); 3206 if (r) 3207 goto out_fail; 3208 3209 /* 3210 * kvm_arch_init makes sure there's at most one caller 3211 * for architectures that support multiple implementations, 3212 * like intel and amd on x86. 3213 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating 3214 * conflicts in case kvm is already setup for another implementation. 3215 */ 3216 r = kvm_irqfd_init(); 3217 if (r) 3218 goto out_irqfd; 3219 3220 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 3221 r = -ENOMEM; 3222 goto out_free_0; 3223 } 3224 3225 r = kvm_arch_hardware_setup(); 3226 if (r < 0) 3227 goto out_free_0a; 3228 3229 for_each_online_cpu(cpu) { 3230 smp_call_function_single(cpu, 3231 kvm_arch_check_processor_compat, 3232 &r, 1); 3233 if (r < 0) 3234 goto out_free_1; 3235 } 3236 3237 r = register_cpu_notifier(&kvm_cpu_notifier); 3238 if (r) 3239 goto out_free_2; 3240 register_reboot_notifier(&kvm_reboot_notifier); 3241 3242 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 3243 if (!vcpu_align) 3244 vcpu_align = __alignof__(struct kvm_vcpu); 3245 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, 3246 0, NULL); 3247 if (!kvm_vcpu_cache) { 3248 r = -ENOMEM; 3249 goto out_free_3; 3250 } 3251 3252 r = kvm_async_pf_init(); 3253 if (r) 3254 goto out_free; 3255 3256 kvm_chardev_ops.owner = module; 3257 kvm_vm_fops.owner = module; 3258 kvm_vcpu_fops.owner = module; 3259 3260 r = misc_register(&kvm_dev); 3261 if (r) { 3262 printk(KERN_ERR "kvm: misc device register failed\n"); 3263 goto out_unreg; 3264 } 3265 3266 register_syscore_ops(&kvm_syscore_ops); 3267 3268 kvm_preempt_ops.sched_in = kvm_sched_in; 3269 kvm_preempt_ops.sched_out = kvm_sched_out; 3270 3271 r = kvm_init_debug(); 3272 if (r) { 3273 printk(KERN_ERR "kvm: create debugfs files failed\n"); 3274 goto out_undebugfs; 3275 } 3276 3277 r = kvm_vfio_ops_init(); 3278 WARN_ON(r); 3279 3280 return 0; 3281 3282 out_undebugfs: 3283 unregister_syscore_ops(&kvm_syscore_ops); 3284 misc_deregister(&kvm_dev); 3285 out_unreg: 3286 kvm_async_pf_deinit(); 3287 out_free: 3288 kmem_cache_destroy(kvm_vcpu_cache); 3289 out_free_3: 3290 unregister_reboot_notifier(&kvm_reboot_notifier); 3291 unregister_cpu_notifier(&kvm_cpu_notifier); 3292 out_free_2: 3293 out_free_1: 3294 kvm_arch_hardware_unsetup(); 3295 out_free_0a: 3296 free_cpumask_var(cpus_hardware_enabled); 3297 out_free_0: 3298 kvm_irqfd_exit(); 3299 out_irqfd: 3300 kvm_arch_exit(); 3301 out_fail: 3302 return r; 3303 } 3304 EXPORT_SYMBOL_GPL(kvm_init); 3305 3306 void kvm_exit(void) 3307 { 3308 kvm_exit_debug(); 3309 misc_deregister(&kvm_dev); 3310 kmem_cache_destroy(kvm_vcpu_cache); 3311 kvm_async_pf_deinit(); 3312 unregister_syscore_ops(&kvm_syscore_ops); 3313 unregister_reboot_notifier(&kvm_reboot_notifier); 3314 unregister_cpu_notifier(&kvm_cpu_notifier); 3315 on_each_cpu(hardware_disable_nolock, NULL, 1); 3316 kvm_arch_hardware_unsetup(); 3317 kvm_arch_exit(); 3318 kvm_irqfd_exit(); 3319 free_cpumask_var(cpus_hardware_enabled); 3320 kvm_vfio_ops_exit(); 3321 } 3322 EXPORT_SYMBOL_GPL(kvm_exit); 3323