1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This module enables machines with Intel VT-x extensions to run virtual 5 * machines without emulation or binary translation. 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Avi Kivity <avi@qumranet.com> 12 * Yaniv Kamay <yaniv@qumranet.com> 13 * 14 * This work is licensed under the terms of the GNU GPL, version 2. See 15 * the COPYING file in the top-level directory. 16 * 17 */ 18 19 #include <kvm/iodev.h> 20 21 #include <linux/kvm_host.h> 22 #include <linux/kvm.h> 23 #include <linux/module.h> 24 #include <linux/errno.h> 25 #include <linux/percpu.h> 26 #include <linux/mm.h> 27 #include <linux/miscdevice.h> 28 #include <linux/vmalloc.h> 29 #include <linux/reboot.h> 30 #include <linux/debugfs.h> 31 #include <linux/highmem.h> 32 #include <linux/file.h> 33 #include <linux/syscore_ops.h> 34 #include <linux/cpu.h> 35 #include <linux/sched.h> 36 #include <linux/cpumask.h> 37 #include <linux/smp.h> 38 #include <linux/anon_inodes.h> 39 #include <linux/profile.h> 40 #include <linux/kvm_para.h> 41 #include <linux/pagemap.h> 42 #include <linux/mman.h> 43 #include <linux/swap.h> 44 #include <linux/bitops.h> 45 #include <linux/spinlock.h> 46 #include <linux/compat.h> 47 #include <linux/srcu.h> 48 #include <linux/hugetlb.h> 49 #include <linux/slab.h> 50 #include <linux/sort.h> 51 #include <linux/bsearch.h> 52 53 #include <asm/processor.h> 54 #include <asm/io.h> 55 #include <asm/ioctl.h> 56 #include <asm/uaccess.h> 57 #include <asm/pgtable.h> 58 59 #include "coalesced_mmio.h" 60 #include "async_pf.h" 61 #include "vfio.h" 62 63 #define CREATE_TRACE_POINTS 64 #include <trace/events/kvm.h> 65 66 MODULE_AUTHOR("Qumranet"); 67 MODULE_LICENSE("GPL"); 68 69 /* Architectures should define their poll value according to the halt latency */ 70 static unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; 71 module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR); 72 73 /* Default doubles per-vcpu halt_poll_ns. */ 74 static unsigned int halt_poll_ns_grow = 2; 75 module_param(halt_poll_ns_grow, int, S_IRUGO); 76 77 /* Default resets per-vcpu halt_poll_ns . */ 78 static unsigned int halt_poll_ns_shrink; 79 module_param(halt_poll_ns_shrink, int, S_IRUGO); 80 81 /* 82 * Ordering of locks: 83 * 84 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 85 */ 86 87 DEFINE_SPINLOCK(kvm_lock); 88 static DEFINE_RAW_SPINLOCK(kvm_count_lock); 89 LIST_HEAD(vm_list); 90 91 static cpumask_var_t cpus_hardware_enabled; 92 static int kvm_usage_count; 93 static atomic_t hardware_enable_failed; 94 95 struct kmem_cache *kvm_vcpu_cache; 96 EXPORT_SYMBOL_GPL(kvm_vcpu_cache); 97 98 static __read_mostly struct preempt_ops kvm_preempt_ops; 99 100 struct dentry *kvm_debugfs_dir; 101 EXPORT_SYMBOL_GPL(kvm_debugfs_dir); 102 103 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 104 unsigned long arg); 105 #ifdef CONFIG_KVM_COMPAT 106 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, 107 unsigned long arg); 108 #endif 109 static int hardware_enable_all(void); 110 static void hardware_disable_all(void); 111 112 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 113 114 static void kvm_release_pfn_dirty(kvm_pfn_t pfn); 115 static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn); 116 117 __visible bool kvm_rebooting; 118 EXPORT_SYMBOL_GPL(kvm_rebooting); 119 120 static bool largepages_enabled = true; 121 122 bool kvm_is_reserved_pfn(kvm_pfn_t pfn) 123 { 124 if (pfn_valid(pfn)) 125 return PageReserved(pfn_to_page(pfn)); 126 127 return true; 128 } 129 130 /* 131 * Switches to specified vcpu, until a matching vcpu_put() 132 */ 133 int vcpu_load(struct kvm_vcpu *vcpu) 134 { 135 int cpu; 136 137 if (mutex_lock_killable(&vcpu->mutex)) 138 return -EINTR; 139 cpu = get_cpu(); 140 preempt_notifier_register(&vcpu->preempt_notifier); 141 kvm_arch_vcpu_load(vcpu, cpu); 142 put_cpu(); 143 return 0; 144 } 145 146 void vcpu_put(struct kvm_vcpu *vcpu) 147 { 148 preempt_disable(); 149 kvm_arch_vcpu_put(vcpu); 150 preempt_notifier_unregister(&vcpu->preempt_notifier); 151 preempt_enable(); 152 mutex_unlock(&vcpu->mutex); 153 } 154 155 static void ack_flush(void *_completed) 156 { 157 } 158 159 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) 160 { 161 int i, cpu, me; 162 cpumask_var_t cpus; 163 bool called = true; 164 struct kvm_vcpu *vcpu; 165 166 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 167 168 me = get_cpu(); 169 kvm_for_each_vcpu(i, vcpu, kvm) { 170 kvm_make_request(req, vcpu); 171 cpu = vcpu->cpu; 172 173 /* Set ->requests bit before we read ->mode */ 174 smp_mb(); 175 176 if (cpus != NULL && cpu != -1 && cpu != me && 177 kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE) 178 cpumask_set_cpu(cpu, cpus); 179 } 180 if (unlikely(cpus == NULL)) 181 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); 182 else if (!cpumask_empty(cpus)) 183 smp_call_function_many(cpus, ack_flush, NULL, 1); 184 else 185 called = false; 186 put_cpu(); 187 free_cpumask_var(cpus); 188 return called; 189 } 190 191 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL 192 void kvm_flush_remote_tlbs(struct kvm *kvm) 193 { 194 long dirty_count = kvm->tlbs_dirty; 195 196 smp_mb(); 197 if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 198 ++kvm->stat.remote_tlb_flush; 199 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); 200 } 201 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); 202 #endif 203 204 void kvm_reload_remote_mmus(struct kvm *kvm) 205 { 206 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); 207 } 208 209 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 210 { 211 struct page *page; 212 int r; 213 214 mutex_init(&vcpu->mutex); 215 vcpu->cpu = -1; 216 vcpu->kvm = kvm; 217 vcpu->vcpu_id = id; 218 vcpu->pid = NULL; 219 vcpu->halt_poll_ns = 0; 220 init_waitqueue_head(&vcpu->wq); 221 kvm_async_pf_vcpu_init(vcpu); 222 223 vcpu->pre_pcpu = -1; 224 INIT_LIST_HEAD(&vcpu->blocked_vcpu_list); 225 226 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 227 if (!page) { 228 r = -ENOMEM; 229 goto fail; 230 } 231 vcpu->run = page_address(page); 232 233 kvm_vcpu_set_in_spin_loop(vcpu, false); 234 kvm_vcpu_set_dy_eligible(vcpu, false); 235 vcpu->preempted = false; 236 237 r = kvm_arch_vcpu_init(vcpu); 238 if (r < 0) 239 goto fail_free_run; 240 return 0; 241 242 fail_free_run: 243 free_page((unsigned long)vcpu->run); 244 fail: 245 return r; 246 } 247 EXPORT_SYMBOL_GPL(kvm_vcpu_init); 248 249 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) 250 { 251 put_pid(vcpu->pid); 252 kvm_arch_vcpu_uninit(vcpu); 253 free_page((unsigned long)vcpu->run); 254 } 255 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); 256 257 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 258 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 259 { 260 return container_of(mn, struct kvm, mmu_notifier); 261 } 262 263 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, 264 struct mm_struct *mm, 265 unsigned long address) 266 { 267 struct kvm *kvm = mmu_notifier_to_kvm(mn); 268 int need_tlb_flush, idx; 269 270 /* 271 * When ->invalidate_page runs, the linux pte has been zapped 272 * already but the page is still allocated until 273 * ->invalidate_page returns. So if we increase the sequence 274 * here the kvm page fault will notice if the spte can't be 275 * established because the page is going to be freed. If 276 * instead the kvm page fault establishes the spte before 277 * ->invalidate_page runs, kvm_unmap_hva will release it 278 * before returning. 279 * 280 * The sequence increase only need to be seen at spin_unlock 281 * time, and not at spin_lock time. 282 * 283 * Increasing the sequence after the spin_unlock would be 284 * unsafe because the kvm page fault could then establish the 285 * pte after kvm_unmap_hva returned, without noticing the page 286 * is going to be freed. 287 */ 288 idx = srcu_read_lock(&kvm->srcu); 289 spin_lock(&kvm->mmu_lock); 290 291 kvm->mmu_notifier_seq++; 292 need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty; 293 /* we've to flush the tlb before the pages can be freed */ 294 if (need_tlb_flush) 295 kvm_flush_remote_tlbs(kvm); 296 297 spin_unlock(&kvm->mmu_lock); 298 299 kvm_arch_mmu_notifier_invalidate_page(kvm, address); 300 301 srcu_read_unlock(&kvm->srcu, idx); 302 } 303 304 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 305 struct mm_struct *mm, 306 unsigned long address, 307 pte_t pte) 308 { 309 struct kvm *kvm = mmu_notifier_to_kvm(mn); 310 int idx; 311 312 idx = srcu_read_lock(&kvm->srcu); 313 spin_lock(&kvm->mmu_lock); 314 kvm->mmu_notifier_seq++; 315 kvm_set_spte_hva(kvm, address, pte); 316 spin_unlock(&kvm->mmu_lock); 317 srcu_read_unlock(&kvm->srcu, idx); 318 } 319 320 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 321 struct mm_struct *mm, 322 unsigned long start, 323 unsigned long end) 324 { 325 struct kvm *kvm = mmu_notifier_to_kvm(mn); 326 int need_tlb_flush = 0, idx; 327 328 idx = srcu_read_lock(&kvm->srcu); 329 spin_lock(&kvm->mmu_lock); 330 /* 331 * The count increase must become visible at unlock time as no 332 * spte can be established without taking the mmu_lock and 333 * count is also read inside the mmu_lock critical section. 334 */ 335 kvm->mmu_notifier_count++; 336 need_tlb_flush = kvm_unmap_hva_range(kvm, start, end); 337 need_tlb_flush |= kvm->tlbs_dirty; 338 /* we've to flush the tlb before the pages can be freed */ 339 if (need_tlb_flush) 340 kvm_flush_remote_tlbs(kvm); 341 342 spin_unlock(&kvm->mmu_lock); 343 srcu_read_unlock(&kvm->srcu, idx); 344 } 345 346 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 347 struct mm_struct *mm, 348 unsigned long start, 349 unsigned long end) 350 { 351 struct kvm *kvm = mmu_notifier_to_kvm(mn); 352 353 spin_lock(&kvm->mmu_lock); 354 /* 355 * This sequence increase will notify the kvm page fault that 356 * the page that is going to be mapped in the spte could have 357 * been freed. 358 */ 359 kvm->mmu_notifier_seq++; 360 smp_wmb(); 361 /* 362 * The above sequence increase must be visible before the 363 * below count decrease, which is ensured by the smp_wmb above 364 * in conjunction with the smp_rmb in mmu_notifier_retry(). 365 */ 366 kvm->mmu_notifier_count--; 367 spin_unlock(&kvm->mmu_lock); 368 369 BUG_ON(kvm->mmu_notifier_count < 0); 370 } 371 372 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 373 struct mm_struct *mm, 374 unsigned long start, 375 unsigned long end) 376 { 377 struct kvm *kvm = mmu_notifier_to_kvm(mn); 378 int young, idx; 379 380 idx = srcu_read_lock(&kvm->srcu); 381 spin_lock(&kvm->mmu_lock); 382 383 young = kvm_age_hva(kvm, start, end); 384 if (young) 385 kvm_flush_remote_tlbs(kvm); 386 387 spin_unlock(&kvm->mmu_lock); 388 srcu_read_unlock(&kvm->srcu, idx); 389 390 return young; 391 } 392 393 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, 394 struct mm_struct *mm, 395 unsigned long start, 396 unsigned long end) 397 { 398 struct kvm *kvm = mmu_notifier_to_kvm(mn); 399 int young, idx; 400 401 idx = srcu_read_lock(&kvm->srcu); 402 spin_lock(&kvm->mmu_lock); 403 /* 404 * Even though we do not flush TLB, this will still adversely 405 * affect performance on pre-Haswell Intel EPT, where there is 406 * no EPT Access Bit to clear so that we have to tear down EPT 407 * tables instead. If we find this unacceptable, we can always 408 * add a parameter to kvm_age_hva so that it effectively doesn't 409 * do anything on clear_young. 410 * 411 * Also note that currently we never issue secondary TLB flushes 412 * from clear_young, leaving this job up to the regular system 413 * cadence. If we find this inaccurate, we might come up with a 414 * more sophisticated heuristic later. 415 */ 416 young = kvm_age_hva(kvm, start, end); 417 spin_unlock(&kvm->mmu_lock); 418 srcu_read_unlock(&kvm->srcu, idx); 419 420 return young; 421 } 422 423 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, 424 struct mm_struct *mm, 425 unsigned long address) 426 { 427 struct kvm *kvm = mmu_notifier_to_kvm(mn); 428 int young, idx; 429 430 idx = srcu_read_lock(&kvm->srcu); 431 spin_lock(&kvm->mmu_lock); 432 young = kvm_test_age_hva(kvm, address); 433 spin_unlock(&kvm->mmu_lock); 434 srcu_read_unlock(&kvm->srcu, idx); 435 436 return young; 437 } 438 439 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 440 struct mm_struct *mm) 441 { 442 struct kvm *kvm = mmu_notifier_to_kvm(mn); 443 int idx; 444 445 idx = srcu_read_lock(&kvm->srcu); 446 kvm_arch_flush_shadow_all(kvm); 447 srcu_read_unlock(&kvm->srcu, idx); 448 } 449 450 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 451 .invalidate_page = kvm_mmu_notifier_invalidate_page, 452 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 453 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 454 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 455 .clear_young = kvm_mmu_notifier_clear_young, 456 .test_young = kvm_mmu_notifier_test_young, 457 .change_pte = kvm_mmu_notifier_change_pte, 458 .release = kvm_mmu_notifier_release, 459 }; 460 461 static int kvm_init_mmu_notifier(struct kvm *kvm) 462 { 463 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 464 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 465 } 466 467 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 468 469 static int kvm_init_mmu_notifier(struct kvm *kvm) 470 { 471 return 0; 472 } 473 474 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 475 476 static struct kvm_memslots *kvm_alloc_memslots(void) 477 { 478 int i; 479 struct kvm_memslots *slots; 480 481 slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); 482 if (!slots) 483 return NULL; 484 485 /* 486 * Init kvm generation close to the maximum to easily test the 487 * code of handling generation number wrap-around. 488 */ 489 slots->generation = -150; 490 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) 491 slots->id_to_index[i] = slots->memslots[i].id = i; 492 493 return slots; 494 } 495 496 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) 497 { 498 if (!memslot->dirty_bitmap) 499 return; 500 501 kvfree(memslot->dirty_bitmap); 502 memslot->dirty_bitmap = NULL; 503 } 504 505 /* 506 * Free any memory in @free but not in @dont. 507 */ 508 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 509 struct kvm_memory_slot *dont) 510 { 511 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 512 kvm_destroy_dirty_bitmap(free); 513 514 kvm_arch_free_memslot(kvm, free, dont); 515 516 free->npages = 0; 517 } 518 519 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) 520 { 521 struct kvm_memory_slot *memslot; 522 523 if (!slots) 524 return; 525 526 kvm_for_each_memslot(memslot, slots) 527 kvm_free_memslot(kvm, memslot, NULL); 528 529 kvfree(slots); 530 } 531 532 static struct kvm *kvm_create_vm(unsigned long type) 533 { 534 int r, i; 535 struct kvm *kvm = kvm_arch_alloc_vm(); 536 537 if (!kvm) 538 return ERR_PTR(-ENOMEM); 539 540 r = kvm_arch_init_vm(kvm, type); 541 if (r) 542 goto out_err_no_disable; 543 544 r = hardware_enable_all(); 545 if (r) 546 goto out_err_no_disable; 547 548 #ifdef CONFIG_HAVE_KVM_IRQFD 549 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 550 #endif 551 552 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 553 554 r = -ENOMEM; 555 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 556 kvm->memslots[i] = kvm_alloc_memslots(); 557 if (!kvm->memslots[i]) 558 goto out_err_no_srcu; 559 } 560 561 if (init_srcu_struct(&kvm->srcu)) 562 goto out_err_no_srcu; 563 if (init_srcu_struct(&kvm->irq_srcu)) 564 goto out_err_no_irq_srcu; 565 for (i = 0; i < KVM_NR_BUSES; i++) { 566 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), 567 GFP_KERNEL); 568 if (!kvm->buses[i]) 569 goto out_err; 570 } 571 572 spin_lock_init(&kvm->mmu_lock); 573 kvm->mm = current->mm; 574 atomic_inc(&kvm->mm->mm_count); 575 kvm_eventfd_init(kvm); 576 mutex_init(&kvm->lock); 577 mutex_init(&kvm->irq_lock); 578 mutex_init(&kvm->slots_lock); 579 atomic_set(&kvm->users_count, 1); 580 INIT_LIST_HEAD(&kvm->devices); 581 582 r = kvm_init_mmu_notifier(kvm); 583 if (r) 584 goto out_err; 585 586 spin_lock(&kvm_lock); 587 list_add(&kvm->vm_list, &vm_list); 588 spin_unlock(&kvm_lock); 589 590 preempt_notifier_inc(); 591 592 return kvm; 593 594 out_err: 595 cleanup_srcu_struct(&kvm->irq_srcu); 596 out_err_no_irq_srcu: 597 cleanup_srcu_struct(&kvm->srcu); 598 out_err_no_srcu: 599 hardware_disable_all(); 600 out_err_no_disable: 601 for (i = 0; i < KVM_NR_BUSES; i++) 602 kfree(kvm->buses[i]); 603 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 604 kvm_free_memslots(kvm, kvm->memslots[i]); 605 kvm_arch_free_vm(kvm); 606 return ERR_PTR(r); 607 } 608 609 /* 610 * Avoid using vmalloc for a small buffer. 611 * Should not be used when the size is statically known. 612 */ 613 void *kvm_kvzalloc(unsigned long size) 614 { 615 if (size > PAGE_SIZE) 616 return vzalloc(size); 617 else 618 return kzalloc(size, GFP_KERNEL); 619 } 620 621 static void kvm_destroy_devices(struct kvm *kvm) 622 { 623 struct list_head *node, *tmp; 624 625 list_for_each_safe(node, tmp, &kvm->devices) { 626 struct kvm_device *dev = 627 list_entry(node, struct kvm_device, vm_node); 628 629 list_del(node); 630 dev->ops->destroy(dev); 631 } 632 } 633 634 static void kvm_destroy_vm(struct kvm *kvm) 635 { 636 int i; 637 struct mm_struct *mm = kvm->mm; 638 639 kvm_arch_sync_events(kvm); 640 spin_lock(&kvm_lock); 641 list_del(&kvm->vm_list); 642 spin_unlock(&kvm_lock); 643 kvm_free_irq_routing(kvm); 644 for (i = 0; i < KVM_NR_BUSES; i++) 645 kvm_io_bus_destroy(kvm->buses[i]); 646 kvm_coalesced_mmio_free(kvm); 647 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 648 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 649 #else 650 kvm_arch_flush_shadow_all(kvm); 651 #endif 652 kvm_arch_destroy_vm(kvm); 653 kvm_destroy_devices(kvm); 654 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 655 kvm_free_memslots(kvm, kvm->memslots[i]); 656 cleanup_srcu_struct(&kvm->irq_srcu); 657 cleanup_srcu_struct(&kvm->srcu); 658 kvm_arch_free_vm(kvm); 659 preempt_notifier_dec(); 660 hardware_disable_all(); 661 mmdrop(mm); 662 } 663 664 void kvm_get_kvm(struct kvm *kvm) 665 { 666 atomic_inc(&kvm->users_count); 667 } 668 EXPORT_SYMBOL_GPL(kvm_get_kvm); 669 670 void kvm_put_kvm(struct kvm *kvm) 671 { 672 if (atomic_dec_and_test(&kvm->users_count)) 673 kvm_destroy_vm(kvm); 674 } 675 EXPORT_SYMBOL_GPL(kvm_put_kvm); 676 677 678 static int kvm_vm_release(struct inode *inode, struct file *filp) 679 { 680 struct kvm *kvm = filp->private_data; 681 682 kvm_irqfd_release(kvm); 683 684 kvm_put_kvm(kvm); 685 return 0; 686 } 687 688 /* 689 * Allocation size is twice as large as the actual dirty bitmap size. 690 * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed. 691 */ 692 static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) 693 { 694 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); 695 696 memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes); 697 if (!memslot->dirty_bitmap) 698 return -ENOMEM; 699 700 return 0; 701 } 702 703 /* 704 * Insert memslot and re-sort memslots based on their GFN, 705 * so binary search could be used to lookup GFN. 706 * Sorting algorithm takes advantage of having initially 707 * sorted array and known changed memslot position. 708 */ 709 static void update_memslots(struct kvm_memslots *slots, 710 struct kvm_memory_slot *new) 711 { 712 int id = new->id; 713 int i = slots->id_to_index[id]; 714 struct kvm_memory_slot *mslots = slots->memslots; 715 716 WARN_ON(mslots[i].id != id); 717 if (!new->npages) { 718 WARN_ON(!mslots[i].npages); 719 if (mslots[i].npages) 720 slots->used_slots--; 721 } else { 722 if (!mslots[i].npages) 723 slots->used_slots++; 724 } 725 726 while (i < KVM_MEM_SLOTS_NUM - 1 && 727 new->base_gfn <= mslots[i + 1].base_gfn) { 728 if (!mslots[i + 1].npages) 729 break; 730 mslots[i] = mslots[i + 1]; 731 slots->id_to_index[mslots[i].id] = i; 732 i++; 733 } 734 735 /* 736 * The ">=" is needed when creating a slot with base_gfn == 0, 737 * so that it moves before all those with base_gfn == npages == 0. 738 * 739 * On the other hand, if new->npages is zero, the above loop has 740 * already left i pointing to the beginning of the empty part of 741 * mslots, and the ">=" would move the hole backwards in this 742 * case---which is wrong. So skip the loop when deleting a slot. 743 */ 744 if (new->npages) { 745 while (i > 0 && 746 new->base_gfn >= mslots[i - 1].base_gfn) { 747 mslots[i] = mslots[i - 1]; 748 slots->id_to_index[mslots[i].id] = i; 749 i--; 750 } 751 } else 752 WARN_ON_ONCE(i != slots->used_slots); 753 754 mslots[i] = *new; 755 slots->id_to_index[mslots[i].id] = i; 756 } 757 758 static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem) 759 { 760 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; 761 762 #ifdef __KVM_HAVE_READONLY_MEM 763 valid_flags |= KVM_MEM_READONLY; 764 #endif 765 766 if (mem->flags & ~valid_flags) 767 return -EINVAL; 768 769 return 0; 770 } 771 772 static struct kvm_memslots *install_new_memslots(struct kvm *kvm, 773 int as_id, struct kvm_memslots *slots) 774 { 775 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); 776 777 /* 778 * Set the low bit in the generation, which disables SPTE caching 779 * until the end of synchronize_srcu_expedited. 780 */ 781 WARN_ON(old_memslots->generation & 1); 782 slots->generation = old_memslots->generation + 1; 783 784 rcu_assign_pointer(kvm->memslots[as_id], slots); 785 synchronize_srcu_expedited(&kvm->srcu); 786 787 /* 788 * Increment the new memslot generation a second time. This prevents 789 * vm exits that race with memslot updates from caching a memslot 790 * generation that will (potentially) be valid forever. 791 */ 792 slots->generation++; 793 794 kvm_arch_memslots_updated(kvm, slots); 795 796 return old_memslots; 797 } 798 799 /* 800 * Allocate some memory and give it an address in the guest physical address 801 * space. 802 * 803 * Discontiguous memory is allowed, mostly for framebuffers. 804 * 805 * Must be called holding kvm->slots_lock for write. 806 */ 807 int __kvm_set_memory_region(struct kvm *kvm, 808 const struct kvm_userspace_memory_region *mem) 809 { 810 int r; 811 gfn_t base_gfn; 812 unsigned long npages; 813 struct kvm_memory_slot *slot; 814 struct kvm_memory_slot old, new; 815 struct kvm_memslots *slots = NULL, *old_memslots; 816 int as_id, id; 817 enum kvm_mr_change change; 818 819 r = check_memory_region_flags(mem); 820 if (r) 821 goto out; 822 823 r = -EINVAL; 824 as_id = mem->slot >> 16; 825 id = (u16)mem->slot; 826 827 /* General sanity checks */ 828 if (mem->memory_size & (PAGE_SIZE - 1)) 829 goto out; 830 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 831 goto out; 832 /* We can read the guest memory with __xxx_user() later on. */ 833 if ((id < KVM_USER_MEM_SLOTS) && 834 ((mem->userspace_addr & (PAGE_SIZE - 1)) || 835 !access_ok(VERIFY_WRITE, 836 (void __user *)(unsigned long)mem->userspace_addr, 837 mem->memory_size))) 838 goto out; 839 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) 840 goto out; 841 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 842 goto out; 843 844 slot = id_to_memslot(__kvm_memslots(kvm, as_id), id); 845 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 846 npages = mem->memory_size >> PAGE_SHIFT; 847 848 if (npages > KVM_MEM_MAX_NR_PAGES) 849 goto out; 850 851 new = old = *slot; 852 853 new.id = id; 854 new.base_gfn = base_gfn; 855 new.npages = npages; 856 new.flags = mem->flags; 857 858 if (npages) { 859 if (!old.npages) 860 change = KVM_MR_CREATE; 861 else { /* Modify an existing slot. */ 862 if ((mem->userspace_addr != old.userspace_addr) || 863 (npages != old.npages) || 864 ((new.flags ^ old.flags) & KVM_MEM_READONLY)) 865 goto out; 866 867 if (base_gfn != old.base_gfn) 868 change = KVM_MR_MOVE; 869 else if (new.flags != old.flags) 870 change = KVM_MR_FLAGS_ONLY; 871 else { /* Nothing to change. */ 872 r = 0; 873 goto out; 874 } 875 } 876 } else { 877 if (!old.npages) 878 goto out; 879 880 change = KVM_MR_DELETE; 881 new.base_gfn = 0; 882 new.flags = 0; 883 } 884 885 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 886 /* Check for overlaps */ 887 r = -EEXIST; 888 kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) { 889 if ((slot->id >= KVM_USER_MEM_SLOTS) || 890 (slot->id == id)) 891 continue; 892 if (!((base_gfn + npages <= slot->base_gfn) || 893 (base_gfn >= slot->base_gfn + slot->npages))) 894 goto out; 895 } 896 } 897 898 /* Free page dirty bitmap if unneeded */ 899 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 900 new.dirty_bitmap = NULL; 901 902 r = -ENOMEM; 903 if (change == KVM_MR_CREATE) { 904 new.userspace_addr = mem->userspace_addr; 905 906 if (kvm_arch_create_memslot(kvm, &new, npages)) 907 goto out_free; 908 } 909 910 /* Allocate page dirty bitmap if needed */ 911 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 912 if (kvm_create_dirty_bitmap(&new) < 0) 913 goto out_free; 914 } 915 916 slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); 917 if (!slots) 918 goto out_free; 919 memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots)); 920 921 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { 922 slot = id_to_memslot(slots, id); 923 slot->flags |= KVM_MEMSLOT_INVALID; 924 925 old_memslots = install_new_memslots(kvm, as_id, slots); 926 927 /* slot was deleted or moved, clear iommu mapping */ 928 kvm_iommu_unmap_pages(kvm, &old); 929 /* From this point no new shadow pages pointing to a deleted, 930 * or moved, memslot will be created. 931 * 932 * validation of sp->gfn happens in: 933 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 934 * - kvm_is_visible_gfn (mmu_check_roots) 935 */ 936 kvm_arch_flush_shadow_memslot(kvm, slot); 937 938 /* 939 * We can re-use the old_memslots from above, the only difference 940 * from the currently installed memslots is the invalid flag. This 941 * will get overwritten by update_memslots anyway. 942 */ 943 slots = old_memslots; 944 } 945 946 r = kvm_arch_prepare_memory_region(kvm, &new, mem, change); 947 if (r) 948 goto out_slots; 949 950 /* actual memory is freed via old in kvm_free_memslot below */ 951 if (change == KVM_MR_DELETE) { 952 new.dirty_bitmap = NULL; 953 memset(&new.arch, 0, sizeof(new.arch)); 954 } 955 956 update_memslots(slots, &new); 957 old_memslots = install_new_memslots(kvm, as_id, slots); 958 959 kvm_arch_commit_memory_region(kvm, mem, &old, &new, change); 960 961 kvm_free_memslot(kvm, &old, &new); 962 kvfree(old_memslots); 963 964 /* 965 * IOMMU mapping: New slots need to be mapped. Old slots need to be 966 * un-mapped and re-mapped if their base changes. Since base change 967 * unmapping is handled above with slot deletion, mapping alone is 968 * needed here. Anything else the iommu might care about for existing 969 * slots (size changes, userspace addr changes and read-only flag 970 * changes) is disallowed above, so any other attribute changes getting 971 * here can be skipped. 972 */ 973 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 974 r = kvm_iommu_map_pages(kvm, &new); 975 return r; 976 } 977 978 return 0; 979 980 out_slots: 981 kvfree(slots); 982 out_free: 983 kvm_free_memslot(kvm, &new, &old); 984 out: 985 return r; 986 } 987 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 988 989 int kvm_set_memory_region(struct kvm *kvm, 990 const struct kvm_userspace_memory_region *mem) 991 { 992 int r; 993 994 mutex_lock(&kvm->slots_lock); 995 r = __kvm_set_memory_region(kvm, mem); 996 mutex_unlock(&kvm->slots_lock); 997 return r; 998 } 999 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 1000 1001 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 1002 struct kvm_userspace_memory_region *mem) 1003 { 1004 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) 1005 return -EINVAL; 1006 1007 return kvm_set_memory_region(kvm, mem); 1008 } 1009 1010 int kvm_get_dirty_log(struct kvm *kvm, 1011 struct kvm_dirty_log *log, int *is_dirty) 1012 { 1013 struct kvm_memslots *slots; 1014 struct kvm_memory_slot *memslot; 1015 int r, i, as_id, id; 1016 unsigned long n; 1017 unsigned long any = 0; 1018 1019 r = -EINVAL; 1020 as_id = log->slot >> 16; 1021 id = (u16)log->slot; 1022 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1023 goto out; 1024 1025 slots = __kvm_memslots(kvm, as_id); 1026 memslot = id_to_memslot(slots, id); 1027 r = -ENOENT; 1028 if (!memslot->dirty_bitmap) 1029 goto out; 1030 1031 n = kvm_dirty_bitmap_bytes(memslot); 1032 1033 for (i = 0; !any && i < n/sizeof(long); ++i) 1034 any = memslot->dirty_bitmap[i]; 1035 1036 r = -EFAULT; 1037 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) 1038 goto out; 1039 1040 if (any) 1041 *is_dirty = 1; 1042 1043 r = 0; 1044 out: 1045 return r; 1046 } 1047 EXPORT_SYMBOL_GPL(kvm_get_dirty_log); 1048 1049 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 1050 /** 1051 * kvm_get_dirty_log_protect - get a snapshot of dirty pages, and if any pages 1052 * are dirty write protect them for next write. 1053 * @kvm: pointer to kvm instance 1054 * @log: slot id and address to which we copy the log 1055 * @is_dirty: flag set if any page is dirty 1056 * 1057 * We need to keep it in mind that VCPU threads can write to the bitmap 1058 * concurrently. So, to avoid losing track of dirty pages we keep the 1059 * following order: 1060 * 1061 * 1. Take a snapshot of the bit and clear it if needed. 1062 * 2. Write protect the corresponding page. 1063 * 3. Copy the snapshot to the userspace. 1064 * 4. Upon return caller flushes TLB's if needed. 1065 * 1066 * Between 2 and 4, the guest may write to the page using the remaining TLB 1067 * entry. This is not a problem because the page is reported dirty using 1068 * the snapshot taken before and step 4 ensures that writes done after 1069 * exiting to userspace will be logged for the next call. 1070 * 1071 */ 1072 int kvm_get_dirty_log_protect(struct kvm *kvm, 1073 struct kvm_dirty_log *log, bool *is_dirty) 1074 { 1075 struct kvm_memslots *slots; 1076 struct kvm_memory_slot *memslot; 1077 int r, i, as_id, id; 1078 unsigned long n; 1079 unsigned long *dirty_bitmap; 1080 unsigned long *dirty_bitmap_buffer; 1081 1082 r = -EINVAL; 1083 as_id = log->slot >> 16; 1084 id = (u16)log->slot; 1085 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1086 goto out; 1087 1088 slots = __kvm_memslots(kvm, as_id); 1089 memslot = id_to_memslot(slots, id); 1090 1091 dirty_bitmap = memslot->dirty_bitmap; 1092 r = -ENOENT; 1093 if (!dirty_bitmap) 1094 goto out; 1095 1096 n = kvm_dirty_bitmap_bytes(memslot); 1097 1098 dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); 1099 memset(dirty_bitmap_buffer, 0, n); 1100 1101 spin_lock(&kvm->mmu_lock); 1102 *is_dirty = false; 1103 for (i = 0; i < n / sizeof(long); i++) { 1104 unsigned long mask; 1105 gfn_t offset; 1106 1107 if (!dirty_bitmap[i]) 1108 continue; 1109 1110 *is_dirty = true; 1111 1112 mask = xchg(&dirty_bitmap[i], 0); 1113 dirty_bitmap_buffer[i] = mask; 1114 1115 if (mask) { 1116 offset = i * BITS_PER_LONG; 1117 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 1118 offset, mask); 1119 } 1120 } 1121 1122 spin_unlock(&kvm->mmu_lock); 1123 1124 r = -EFAULT; 1125 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) 1126 goto out; 1127 1128 r = 0; 1129 out: 1130 return r; 1131 } 1132 EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect); 1133 #endif 1134 1135 bool kvm_largepages_enabled(void) 1136 { 1137 return largepages_enabled; 1138 } 1139 1140 void kvm_disable_largepages(void) 1141 { 1142 largepages_enabled = false; 1143 } 1144 EXPORT_SYMBOL_GPL(kvm_disable_largepages); 1145 1146 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 1147 { 1148 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 1149 } 1150 EXPORT_SYMBOL_GPL(gfn_to_memslot); 1151 1152 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) 1153 { 1154 return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn); 1155 } 1156 1157 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 1158 { 1159 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); 1160 1161 if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS || 1162 memslot->flags & KVM_MEMSLOT_INVALID) 1163 return false; 1164 1165 return true; 1166 } 1167 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 1168 1169 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) 1170 { 1171 struct vm_area_struct *vma; 1172 unsigned long addr, size; 1173 1174 size = PAGE_SIZE; 1175 1176 addr = gfn_to_hva(kvm, gfn); 1177 if (kvm_is_error_hva(addr)) 1178 return PAGE_SIZE; 1179 1180 down_read(¤t->mm->mmap_sem); 1181 vma = find_vma(current->mm, addr); 1182 if (!vma) 1183 goto out; 1184 1185 size = vma_kernel_pagesize(vma); 1186 1187 out: 1188 up_read(¤t->mm->mmap_sem); 1189 1190 return size; 1191 } 1192 1193 static bool memslot_is_readonly(struct kvm_memory_slot *slot) 1194 { 1195 return slot->flags & KVM_MEM_READONLY; 1196 } 1197 1198 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1199 gfn_t *nr_pages, bool write) 1200 { 1201 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 1202 return KVM_HVA_ERR_BAD; 1203 1204 if (memslot_is_readonly(slot) && write) 1205 return KVM_HVA_ERR_RO_BAD; 1206 1207 if (nr_pages) 1208 *nr_pages = slot->npages - (gfn - slot->base_gfn); 1209 1210 return __gfn_to_hva_memslot(slot, gfn); 1211 } 1212 1213 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1214 gfn_t *nr_pages) 1215 { 1216 return __gfn_to_hva_many(slot, gfn, nr_pages, true); 1217 } 1218 1219 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 1220 gfn_t gfn) 1221 { 1222 return gfn_to_hva_many(slot, gfn, NULL); 1223 } 1224 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); 1225 1226 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 1227 { 1228 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); 1229 } 1230 EXPORT_SYMBOL_GPL(gfn_to_hva); 1231 1232 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) 1233 { 1234 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); 1235 } 1236 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); 1237 1238 /* 1239 * If writable is set to false, the hva returned by this function is only 1240 * allowed to be read. 1241 */ 1242 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, 1243 gfn_t gfn, bool *writable) 1244 { 1245 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); 1246 1247 if (!kvm_is_error_hva(hva) && writable) 1248 *writable = !memslot_is_readonly(slot); 1249 1250 return hva; 1251 } 1252 1253 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 1254 { 1255 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1256 1257 return gfn_to_hva_memslot_prot(slot, gfn, writable); 1258 } 1259 1260 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) 1261 { 1262 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1263 1264 return gfn_to_hva_memslot_prot(slot, gfn, writable); 1265 } 1266 1267 static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, 1268 unsigned long start, int write, struct page **page) 1269 { 1270 int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET; 1271 1272 if (write) 1273 flags |= FOLL_WRITE; 1274 1275 return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL); 1276 } 1277 1278 static inline int check_user_page_hwpoison(unsigned long addr) 1279 { 1280 int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE; 1281 1282 rc = __get_user_pages(current, current->mm, addr, 1, 1283 flags, NULL, NULL, NULL); 1284 return rc == -EHWPOISON; 1285 } 1286 1287 /* 1288 * The atomic path to get the writable pfn which will be stored in @pfn, 1289 * true indicates success, otherwise false is returned. 1290 */ 1291 static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, 1292 bool write_fault, bool *writable, kvm_pfn_t *pfn) 1293 { 1294 struct page *page[1]; 1295 int npages; 1296 1297 if (!(async || atomic)) 1298 return false; 1299 1300 /* 1301 * Fast pin a writable pfn only if it is a write fault request 1302 * or the caller allows to map a writable pfn for a read fault 1303 * request. 1304 */ 1305 if (!(write_fault || writable)) 1306 return false; 1307 1308 npages = __get_user_pages_fast(addr, 1, 1, page); 1309 if (npages == 1) { 1310 *pfn = page_to_pfn(page[0]); 1311 1312 if (writable) 1313 *writable = true; 1314 return true; 1315 } 1316 1317 return false; 1318 } 1319 1320 /* 1321 * The slow path to get the pfn of the specified host virtual address, 1322 * 1 indicates success, -errno is returned if error is detected. 1323 */ 1324 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, 1325 bool *writable, kvm_pfn_t *pfn) 1326 { 1327 struct page *page[1]; 1328 int npages = 0; 1329 1330 might_sleep(); 1331 1332 if (writable) 1333 *writable = write_fault; 1334 1335 if (async) { 1336 down_read(¤t->mm->mmap_sem); 1337 npages = get_user_page_nowait(current, current->mm, 1338 addr, write_fault, page); 1339 up_read(¤t->mm->mmap_sem); 1340 } else 1341 npages = __get_user_pages_unlocked(current, current->mm, addr, 1, 1342 write_fault, 0, page, 1343 FOLL_TOUCH|FOLL_HWPOISON); 1344 if (npages != 1) 1345 return npages; 1346 1347 /* map read fault as writable if possible */ 1348 if (unlikely(!write_fault) && writable) { 1349 struct page *wpage[1]; 1350 1351 npages = __get_user_pages_fast(addr, 1, 1, wpage); 1352 if (npages == 1) { 1353 *writable = true; 1354 put_page(page[0]); 1355 page[0] = wpage[0]; 1356 } 1357 1358 npages = 1; 1359 } 1360 *pfn = page_to_pfn(page[0]); 1361 return npages; 1362 } 1363 1364 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) 1365 { 1366 if (unlikely(!(vma->vm_flags & VM_READ))) 1367 return false; 1368 1369 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) 1370 return false; 1371 1372 return true; 1373 } 1374 1375 /* 1376 * Pin guest page in memory and return its pfn. 1377 * @addr: host virtual address which maps memory to the guest 1378 * @atomic: whether this function can sleep 1379 * @async: whether this function need to wait IO complete if the 1380 * host page is not in the memory 1381 * @write_fault: whether we should get a writable host page 1382 * @writable: whether it allows to map a writable host page for !@write_fault 1383 * 1384 * The function will map a writable host page for these two cases: 1385 * 1): @write_fault = true 1386 * 2): @write_fault = false && @writable, @writable will tell the caller 1387 * whether the mapping is writable. 1388 */ 1389 static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, 1390 bool write_fault, bool *writable) 1391 { 1392 struct vm_area_struct *vma; 1393 kvm_pfn_t pfn = 0; 1394 int npages; 1395 1396 /* we can do it either atomically or asynchronously, not both */ 1397 BUG_ON(atomic && async); 1398 1399 if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn)) 1400 return pfn; 1401 1402 if (atomic) 1403 return KVM_PFN_ERR_FAULT; 1404 1405 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); 1406 if (npages == 1) 1407 return pfn; 1408 1409 down_read(¤t->mm->mmap_sem); 1410 if (npages == -EHWPOISON || 1411 (!async && check_user_page_hwpoison(addr))) { 1412 pfn = KVM_PFN_ERR_HWPOISON; 1413 goto exit; 1414 } 1415 1416 vma = find_vma_intersection(current->mm, addr, addr + 1); 1417 1418 if (vma == NULL) 1419 pfn = KVM_PFN_ERR_FAULT; 1420 else if ((vma->vm_flags & VM_PFNMAP)) { 1421 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + 1422 vma->vm_pgoff; 1423 BUG_ON(!kvm_is_reserved_pfn(pfn)); 1424 } else { 1425 if (async && vma_is_valid(vma, write_fault)) 1426 *async = true; 1427 pfn = KVM_PFN_ERR_FAULT; 1428 } 1429 exit: 1430 up_read(¤t->mm->mmap_sem); 1431 return pfn; 1432 } 1433 1434 kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, 1435 bool atomic, bool *async, bool write_fault, 1436 bool *writable) 1437 { 1438 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); 1439 1440 if (addr == KVM_HVA_ERR_RO_BAD) 1441 return KVM_PFN_ERR_RO_FAULT; 1442 1443 if (kvm_is_error_hva(addr)) 1444 return KVM_PFN_NOSLOT; 1445 1446 /* Do not map writable pfn in the readonly memslot. */ 1447 if (writable && memslot_is_readonly(slot)) { 1448 *writable = false; 1449 writable = NULL; 1450 } 1451 1452 return hva_to_pfn(addr, atomic, async, write_fault, 1453 writable); 1454 } 1455 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); 1456 1457 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 1458 bool *writable) 1459 { 1460 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, 1461 write_fault, writable); 1462 } 1463 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 1464 1465 kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 1466 { 1467 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL); 1468 } 1469 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); 1470 1471 kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) 1472 { 1473 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL); 1474 } 1475 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); 1476 1477 kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) 1478 { 1479 return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn); 1480 } 1481 EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic); 1482 1483 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) 1484 { 1485 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 1486 } 1487 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic); 1488 1489 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 1490 { 1491 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); 1492 } 1493 EXPORT_SYMBOL_GPL(gfn_to_pfn); 1494 1495 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) 1496 { 1497 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 1498 } 1499 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn); 1500 1501 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 1502 struct page **pages, int nr_pages) 1503 { 1504 unsigned long addr; 1505 gfn_t entry; 1506 1507 addr = gfn_to_hva_many(slot, gfn, &entry); 1508 if (kvm_is_error_hva(addr)) 1509 return -1; 1510 1511 if (entry < nr_pages) 1512 return 0; 1513 1514 return __get_user_pages_fast(addr, nr_pages, 1, pages); 1515 } 1516 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); 1517 1518 static struct page *kvm_pfn_to_page(kvm_pfn_t pfn) 1519 { 1520 if (is_error_noslot_pfn(pfn)) 1521 return KVM_ERR_PTR_BAD_PAGE; 1522 1523 if (kvm_is_reserved_pfn(pfn)) { 1524 WARN_ON(1); 1525 return KVM_ERR_PTR_BAD_PAGE; 1526 } 1527 1528 return pfn_to_page(pfn); 1529 } 1530 1531 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 1532 { 1533 kvm_pfn_t pfn; 1534 1535 pfn = gfn_to_pfn(kvm, gfn); 1536 1537 return kvm_pfn_to_page(pfn); 1538 } 1539 EXPORT_SYMBOL_GPL(gfn_to_page); 1540 1541 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) 1542 { 1543 kvm_pfn_t pfn; 1544 1545 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn); 1546 1547 return kvm_pfn_to_page(pfn); 1548 } 1549 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page); 1550 1551 void kvm_release_page_clean(struct page *page) 1552 { 1553 WARN_ON(is_error_page(page)); 1554 1555 kvm_release_pfn_clean(page_to_pfn(page)); 1556 } 1557 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 1558 1559 void kvm_release_pfn_clean(kvm_pfn_t pfn) 1560 { 1561 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) 1562 put_page(pfn_to_page(pfn)); 1563 } 1564 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1565 1566 void kvm_release_page_dirty(struct page *page) 1567 { 1568 WARN_ON(is_error_page(page)); 1569 1570 kvm_release_pfn_dirty(page_to_pfn(page)); 1571 } 1572 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 1573 1574 static void kvm_release_pfn_dirty(kvm_pfn_t pfn) 1575 { 1576 kvm_set_pfn_dirty(pfn); 1577 kvm_release_pfn_clean(pfn); 1578 } 1579 1580 void kvm_set_pfn_dirty(kvm_pfn_t pfn) 1581 { 1582 if (!kvm_is_reserved_pfn(pfn)) { 1583 struct page *page = pfn_to_page(pfn); 1584 1585 if (!PageReserved(page)) 1586 SetPageDirty(page); 1587 } 1588 } 1589 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 1590 1591 void kvm_set_pfn_accessed(kvm_pfn_t pfn) 1592 { 1593 if (!kvm_is_reserved_pfn(pfn)) 1594 mark_page_accessed(pfn_to_page(pfn)); 1595 } 1596 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1597 1598 void kvm_get_pfn(kvm_pfn_t pfn) 1599 { 1600 if (!kvm_is_reserved_pfn(pfn)) 1601 get_page(pfn_to_page(pfn)); 1602 } 1603 EXPORT_SYMBOL_GPL(kvm_get_pfn); 1604 1605 static int next_segment(unsigned long len, int offset) 1606 { 1607 if (len > PAGE_SIZE - offset) 1608 return PAGE_SIZE - offset; 1609 else 1610 return len; 1611 } 1612 1613 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, 1614 void *data, int offset, int len) 1615 { 1616 int r; 1617 unsigned long addr; 1618 1619 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 1620 if (kvm_is_error_hva(addr)) 1621 return -EFAULT; 1622 r = __copy_from_user(data, (void __user *)addr + offset, len); 1623 if (r) 1624 return -EFAULT; 1625 return 0; 1626 } 1627 1628 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 1629 int len) 1630 { 1631 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1632 1633 return __kvm_read_guest_page(slot, gfn, data, offset, len); 1634 } 1635 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 1636 1637 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, 1638 int offset, int len) 1639 { 1640 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1641 1642 return __kvm_read_guest_page(slot, gfn, data, offset, len); 1643 } 1644 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); 1645 1646 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 1647 { 1648 gfn_t gfn = gpa >> PAGE_SHIFT; 1649 int seg; 1650 int offset = offset_in_page(gpa); 1651 int ret; 1652 1653 while ((seg = next_segment(len, offset)) != 0) { 1654 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 1655 if (ret < 0) 1656 return ret; 1657 offset = 0; 1658 len -= seg; 1659 data += seg; 1660 ++gfn; 1661 } 1662 return 0; 1663 } 1664 EXPORT_SYMBOL_GPL(kvm_read_guest); 1665 1666 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) 1667 { 1668 gfn_t gfn = gpa >> PAGE_SHIFT; 1669 int seg; 1670 int offset = offset_in_page(gpa); 1671 int ret; 1672 1673 while ((seg = next_segment(len, offset)) != 0) { 1674 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); 1675 if (ret < 0) 1676 return ret; 1677 offset = 0; 1678 len -= seg; 1679 data += seg; 1680 ++gfn; 1681 } 1682 return 0; 1683 } 1684 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest); 1685 1686 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 1687 void *data, int offset, unsigned long len) 1688 { 1689 int r; 1690 unsigned long addr; 1691 1692 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 1693 if (kvm_is_error_hva(addr)) 1694 return -EFAULT; 1695 pagefault_disable(); 1696 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 1697 pagefault_enable(); 1698 if (r) 1699 return -EFAULT; 1700 return 0; 1701 } 1702 1703 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 1704 unsigned long len) 1705 { 1706 gfn_t gfn = gpa >> PAGE_SHIFT; 1707 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1708 int offset = offset_in_page(gpa); 1709 1710 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); 1711 } 1712 EXPORT_SYMBOL_GPL(kvm_read_guest_atomic); 1713 1714 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, 1715 void *data, unsigned long len) 1716 { 1717 gfn_t gfn = gpa >> PAGE_SHIFT; 1718 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1719 int offset = offset_in_page(gpa); 1720 1721 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); 1722 } 1723 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic); 1724 1725 static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn, 1726 const void *data, int offset, int len) 1727 { 1728 int r; 1729 unsigned long addr; 1730 1731 addr = gfn_to_hva_memslot(memslot, gfn); 1732 if (kvm_is_error_hva(addr)) 1733 return -EFAULT; 1734 r = __copy_to_user((void __user *)addr + offset, data, len); 1735 if (r) 1736 return -EFAULT; 1737 mark_page_dirty_in_slot(memslot, gfn); 1738 return 0; 1739 } 1740 1741 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, 1742 const void *data, int offset, int len) 1743 { 1744 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1745 1746 return __kvm_write_guest_page(slot, gfn, data, offset, len); 1747 } 1748 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 1749 1750 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, 1751 const void *data, int offset, int len) 1752 { 1753 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1754 1755 return __kvm_write_guest_page(slot, gfn, data, offset, len); 1756 } 1757 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page); 1758 1759 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 1760 unsigned long len) 1761 { 1762 gfn_t gfn = gpa >> PAGE_SHIFT; 1763 int seg; 1764 int offset = offset_in_page(gpa); 1765 int ret; 1766 1767 while ((seg = next_segment(len, offset)) != 0) { 1768 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 1769 if (ret < 0) 1770 return ret; 1771 offset = 0; 1772 len -= seg; 1773 data += seg; 1774 ++gfn; 1775 } 1776 return 0; 1777 } 1778 EXPORT_SYMBOL_GPL(kvm_write_guest); 1779 1780 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 1781 unsigned long len) 1782 { 1783 gfn_t gfn = gpa >> PAGE_SHIFT; 1784 int seg; 1785 int offset = offset_in_page(gpa); 1786 int ret; 1787 1788 while ((seg = next_segment(len, offset)) != 0) { 1789 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); 1790 if (ret < 0) 1791 return ret; 1792 offset = 0; 1793 len -= seg; 1794 data += seg; 1795 ++gfn; 1796 } 1797 return 0; 1798 } 1799 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest); 1800 1801 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1802 gpa_t gpa, unsigned long len) 1803 { 1804 struct kvm_memslots *slots = kvm_memslots(kvm); 1805 int offset = offset_in_page(gpa); 1806 gfn_t start_gfn = gpa >> PAGE_SHIFT; 1807 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; 1808 gfn_t nr_pages_needed = end_gfn - start_gfn + 1; 1809 gfn_t nr_pages_avail; 1810 1811 ghc->gpa = gpa; 1812 ghc->generation = slots->generation; 1813 ghc->len = len; 1814 ghc->memslot = gfn_to_memslot(kvm, start_gfn); 1815 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL); 1816 if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) { 1817 ghc->hva += offset; 1818 } else { 1819 /* 1820 * If the requested region crosses two memslots, we still 1821 * verify that the entire region is valid here. 1822 */ 1823 while (start_gfn <= end_gfn) { 1824 ghc->memslot = gfn_to_memslot(kvm, start_gfn); 1825 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, 1826 &nr_pages_avail); 1827 if (kvm_is_error_hva(ghc->hva)) 1828 return -EFAULT; 1829 start_gfn += nr_pages_avail; 1830 } 1831 /* Use the slow path for cross page reads and writes. */ 1832 ghc->memslot = NULL; 1833 } 1834 return 0; 1835 } 1836 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 1837 1838 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1839 void *data, unsigned long len) 1840 { 1841 struct kvm_memslots *slots = kvm_memslots(kvm); 1842 int r; 1843 1844 BUG_ON(len > ghc->len); 1845 1846 if (slots->generation != ghc->generation) 1847 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); 1848 1849 if (unlikely(!ghc->memslot)) 1850 return kvm_write_guest(kvm, ghc->gpa, data, len); 1851 1852 if (kvm_is_error_hva(ghc->hva)) 1853 return -EFAULT; 1854 1855 r = __copy_to_user((void __user *)ghc->hva, data, len); 1856 if (r) 1857 return -EFAULT; 1858 mark_page_dirty_in_slot(ghc->memslot, ghc->gpa >> PAGE_SHIFT); 1859 1860 return 0; 1861 } 1862 EXPORT_SYMBOL_GPL(kvm_write_guest_cached); 1863 1864 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1865 void *data, unsigned long len) 1866 { 1867 struct kvm_memslots *slots = kvm_memslots(kvm); 1868 int r; 1869 1870 BUG_ON(len > ghc->len); 1871 1872 if (slots->generation != ghc->generation) 1873 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); 1874 1875 if (unlikely(!ghc->memslot)) 1876 return kvm_read_guest(kvm, ghc->gpa, data, len); 1877 1878 if (kvm_is_error_hva(ghc->hva)) 1879 return -EFAULT; 1880 1881 r = __copy_from_user(data, (void __user *)ghc->hva, len); 1882 if (r) 1883 return -EFAULT; 1884 1885 return 0; 1886 } 1887 EXPORT_SYMBOL_GPL(kvm_read_guest_cached); 1888 1889 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) 1890 { 1891 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 1892 1893 return kvm_write_guest_page(kvm, gfn, zero_page, offset, len); 1894 } 1895 EXPORT_SYMBOL_GPL(kvm_clear_guest_page); 1896 1897 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 1898 { 1899 gfn_t gfn = gpa >> PAGE_SHIFT; 1900 int seg; 1901 int offset = offset_in_page(gpa); 1902 int ret; 1903 1904 while ((seg = next_segment(len, offset)) != 0) { 1905 ret = kvm_clear_guest_page(kvm, gfn, offset, seg); 1906 if (ret < 0) 1907 return ret; 1908 offset = 0; 1909 len -= seg; 1910 ++gfn; 1911 } 1912 return 0; 1913 } 1914 EXPORT_SYMBOL_GPL(kvm_clear_guest); 1915 1916 static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, 1917 gfn_t gfn) 1918 { 1919 if (memslot && memslot->dirty_bitmap) { 1920 unsigned long rel_gfn = gfn - memslot->base_gfn; 1921 1922 set_bit_le(rel_gfn, memslot->dirty_bitmap); 1923 } 1924 } 1925 1926 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 1927 { 1928 struct kvm_memory_slot *memslot; 1929 1930 memslot = gfn_to_memslot(kvm, gfn); 1931 mark_page_dirty_in_slot(memslot, gfn); 1932 } 1933 EXPORT_SYMBOL_GPL(mark_page_dirty); 1934 1935 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) 1936 { 1937 struct kvm_memory_slot *memslot; 1938 1939 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1940 mark_page_dirty_in_slot(memslot, gfn); 1941 } 1942 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); 1943 1944 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) 1945 { 1946 int old, val; 1947 1948 old = val = vcpu->halt_poll_ns; 1949 /* 10us base */ 1950 if (val == 0 && halt_poll_ns_grow) 1951 val = 10000; 1952 else 1953 val *= halt_poll_ns_grow; 1954 1955 vcpu->halt_poll_ns = val; 1956 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); 1957 } 1958 1959 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) 1960 { 1961 int old, val; 1962 1963 old = val = vcpu->halt_poll_ns; 1964 if (halt_poll_ns_shrink == 0) 1965 val = 0; 1966 else 1967 val /= halt_poll_ns_shrink; 1968 1969 vcpu->halt_poll_ns = val; 1970 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); 1971 } 1972 1973 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) 1974 { 1975 if (kvm_arch_vcpu_runnable(vcpu)) { 1976 kvm_make_request(KVM_REQ_UNHALT, vcpu); 1977 return -EINTR; 1978 } 1979 if (kvm_cpu_has_pending_timer(vcpu)) 1980 return -EINTR; 1981 if (signal_pending(current)) 1982 return -EINTR; 1983 1984 return 0; 1985 } 1986 1987 /* 1988 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 1989 */ 1990 void kvm_vcpu_block(struct kvm_vcpu *vcpu) 1991 { 1992 ktime_t start, cur; 1993 DEFINE_WAIT(wait); 1994 bool waited = false; 1995 u64 block_ns; 1996 1997 start = cur = ktime_get(); 1998 if (vcpu->halt_poll_ns) { 1999 ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); 2000 2001 ++vcpu->stat.halt_attempted_poll; 2002 do { 2003 /* 2004 * This sets KVM_REQ_UNHALT if an interrupt 2005 * arrives. 2006 */ 2007 if (kvm_vcpu_check_block(vcpu) < 0) { 2008 ++vcpu->stat.halt_successful_poll; 2009 goto out; 2010 } 2011 cur = ktime_get(); 2012 } while (single_task_running() && ktime_before(cur, stop)); 2013 } 2014 2015 kvm_arch_vcpu_blocking(vcpu); 2016 2017 for (;;) { 2018 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 2019 2020 if (kvm_vcpu_check_block(vcpu) < 0) 2021 break; 2022 2023 waited = true; 2024 schedule(); 2025 } 2026 2027 finish_wait(&vcpu->wq, &wait); 2028 cur = ktime_get(); 2029 2030 kvm_arch_vcpu_unblocking(vcpu); 2031 out: 2032 block_ns = ktime_to_ns(cur) - ktime_to_ns(start); 2033 2034 if (halt_poll_ns) { 2035 if (block_ns <= vcpu->halt_poll_ns) 2036 ; 2037 /* we had a long block, shrink polling */ 2038 else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns) 2039 shrink_halt_poll_ns(vcpu); 2040 /* we had a short halt and our poll time is too small */ 2041 else if (vcpu->halt_poll_ns < halt_poll_ns && 2042 block_ns < halt_poll_ns) 2043 grow_halt_poll_ns(vcpu); 2044 } else 2045 vcpu->halt_poll_ns = 0; 2046 2047 trace_kvm_vcpu_wakeup(block_ns, waited); 2048 } 2049 EXPORT_SYMBOL_GPL(kvm_vcpu_block); 2050 2051 #ifndef CONFIG_S390 2052 /* 2053 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. 2054 */ 2055 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 2056 { 2057 int me; 2058 int cpu = vcpu->cpu; 2059 wait_queue_head_t *wqp; 2060 2061 wqp = kvm_arch_vcpu_wq(vcpu); 2062 if (waitqueue_active(wqp)) { 2063 wake_up_interruptible(wqp); 2064 ++vcpu->stat.halt_wakeup; 2065 } 2066 2067 me = get_cpu(); 2068 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) 2069 if (kvm_arch_vcpu_should_kick(vcpu)) 2070 smp_send_reschedule(cpu); 2071 put_cpu(); 2072 } 2073 EXPORT_SYMBOL_GPL(kvm_vcpu_kick); 2074 #endif /* !CONFIG_S390 */ 2075 2076 int kvm_vcpu_yield_to(struct kvm_vcpu *target) 2077 { 2078 struct pid *pid; 2079 struct task_struct *task = NULL; 2080 int ret = 0; 2081 2082 rcu_read_lock(); 2083 pid = rcu_dereference(target->pid); 2084 if (pid) 2085 task = get_pid_task(pid, PIDTYPE_PID); 2086 rcu_read_unlock(); 2087 if (!task) 2088 return ret; 2089 ret = yield_to(task, 1); 2090 put_task_struct(task); 2091 2092 return ret; 2093 } 2094 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); 2095 2096 /* 2097 * Helper that checks whether a VCPU is eligible for directed yield. 2098 * Most eligible candidate to yield is decided by following heuristics: 2099 * 2100 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently 2101 * (preempted lock holder), indicated by @in_spin_loop. 2102 * Set at the beiginning and cleared at the end of interception/PLE handler. 2103 * 2104 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get 2105 * chance last time (mostly it has become eligible now since we have probably 2106 * yielded to lockholder in last iteration. This is done by toggling 2107 * @dy_eligible each time a VCPU checked for eligibility.) 2108 * 2109 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding 2110 * to preempted lock-holder could result in wrong VCPU selection and CPU 2111 * burning. Giving priority for a potential lock-holder increases lock 2112 * progress. 2113 * 2114 * Since algorithm is based on heuristics, accessing another VCPU data without 2115 * locking does not harm. It may result in trying to yield to same VCPU, fail 2116 * and continue with next VCPU and so on. 2117 */ 2118 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) 2119 { 2120 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 2121 bool eligible; 2122 2123 eligible = !vcpu->spin_loop.in_spin_loop || 2124 vcpu->spin_loop.dy_eligible; 2125 2126 if (vcpu->spin_loop.in_spin_loop) 2127 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); 2128 2129 return eligible; 2130 #else 2131 return true; 2132 #endif 2133 } 2134 2135 void kvm_vcpu_on_spin(struct kvm_vcpu *me) 2136 { 2137 struct kvm *kvm = me->kvm; 2138 struct kvm_vcpu *vcpu; 2139 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 2140 int yielded = 0; 2141 int try = 3; 2142 int pass; 2143 int i; 2144 2145 kvm_vcpu_set_in_spin_loop(me, true); 2146 /* 2147 * We boost the priority of a VCPU that is runnable but not 2148 * currently running, because it got preempted by something 2149 * else and called schedule in __vcpu_run. Hopefully that 2150 * VCPU is holding the lock that we need and will release it. 2151 * We approximate round-robin by starting at the last boosted VCPU. 2152 */ 2153 for (pass = 0; pass < 2 && !yielded && try; pass++) { 2154 kvm_for_each_vcpu(i, vcpu, kvm) { 2155 if (!pass && i <= last_boosted_vcpu) { 2156 i = last_boosted_vcpu; 2157 continue; 2158 } else if (pass && i > last_boosted_vcpu) 2159 break; 2160 if (!ACCESS_ONCE(vcpu->preempted)) 2161 continue; 2162 if (vcpu == me) 2163 continue; 2164 if (waitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) 2165 continue; 2166 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 2167 continue; 2168 2169 yielded = kvm_vcpu_yield_to(vcpu); 2170 if (yielded > 0) { 2171 kvm->last_boosted_vcpu = i; 2172 break; 2173 } else if (yielded < 0) { 2174 try--; 2175 if (!try) 2176 break; 2177 } 2178 } 2179 } 2180 kvm_vcpu_set_in_spin_loop(me, false); 2181 2182 /* Ensure vcpu is not eligible during next spinloop */ 2183 kvm_vcpu_set_dy_eligible(me, false); 2184 } 2185 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 2186 2187 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2188 { 2189 struct kvm_vcpu *vcpu = vma->vm_file->private_data; 2190 struct page *page; 2191 2192 if (vmf->pgoff == 0) 2193 page = virt_to_page(vcpu->run); 2194 #ifdef CONFIG_X86 2195 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 2196 page = virt_to_page(vcpu->arch.pio_data); 2197 #endif 2198 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2199 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 2200 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 2201 #endif 2202 else 2203 return kvm_arch_vcpu_fault(vcpu, vmf); 2204 get_page(page); 2205 vmf->page = page; 2206 return 0; 2207 } 2208 2209 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 2210 .fault = kvm_vcpu_fault, 2211 }; 2212 2213 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 2214 { 2215 vma->vm_ops = &kvm_vcpu_vm_ops; 2216 return 0; 2217 } 2218 2219 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 2220 { 2221 struct kvm_vcpu *vcpu = filp->private_data; 2222 2223 kvm_put_kvm(vcpu->kvm); 2224 return 0; 2225 } 2226 2227 static struct file_operations kvm_vcpu_fops = { 2228 .release = kvm_vcpu_release, 2229 .unlocked_ioctl = kvm_vcpu_ioctl, 2230 #ifdef CONFIG_KVM_COMPAT 2231 .compat_ioctl = kvm_vcpu_compat_ioctl, 2232 #endif 2233 .mmap = kvm_vcpu_mmap, 2234 .llseek = noop_llseek, 2235 }; 2236 2237 /* 2238 * Allocates an inode for the vcpu. 2239 */ 2240 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 2241 { 2242 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); 2243 } 2244 2245 /* 2246 * Creates some virtual cpus. Good luck creating more than one. 2247 */ 2248 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 2249 { 2250 int r; 2251 struct kvm_vcpu *vcpu; 2252 2253 if (id >= KVM_MAX_VCPUS) 2254 return -EINVAL; 2255 2256 vcpu = kvm_arch_vcpu_create(kvm, id); 2257 if (IS_ERR(vcpu)) 2258 return PTR_ERR(vcpu); 2259 2260 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 2261 2262 r = kvm_arch_vcpu_setup(vcpu); 2263 if (r) 2264 goto vcpu_destroy; 2265 2266 mutex_lock(&kvm->lock); 2267 if (!kvm_vcpu_compatible(vcpu)) { 2268 r = -EINVAL; 2269 goto unlock_vcpu_destroy; 2270 } 2271 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { 2272 r = -EINVAL; 2273 goto unlock_vcpu_destroy; 2274 } 2275 if (kvm_get_vcpu_by_id(kvm, id)) { 2276 r = -EEXIST; 2277 goto unlock_vcpu_destroy; 2278 } 2279 2280 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); 2281 2282 /* Now it's all set up, let userspace reach it */ 2283 kvm_get_kvm(kvm); 2284 r = create_vcpu_fd(vcpu); 2285 if (r < 0) { 2286 kvm_put_kvm(kvm); 2287 goto unlock_vcpu_destroy; 2288 } 2289 2290 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; 2291 2292 /* 2293 * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus 2294 * before kvm->online_vcpu's incremented value. 2295 */ 2296 smp_wmb(); 2297 atomic_inc(&kvm->online_vcpus); 2298 2299 mutex_unlock(&kvm->lock); 2300 kvm_arch_vcpu_postcreate(vcpu); 2301 return r; 2302 2303 unlock_vcpu_destroy: 2304 mutex_unlock(&kvm->lock); 2305 vcpu_destroy: 2306 kvm_arch_vcpu_destroy(vcpu); 2307 return r; 2308 } 2309 2310 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 2311 { 2312 if (sigset) { 2313 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2314 vcpu->sigset_active = 1; 2315 vcpu->sigset = *sigset; 2316 } else 2317 vcpu->sigset_active = 0; 2318 return 0; 2319 } 2320 2321 static long kvm_vcpu_ioctl(struct file *filp, 2322 unsigned int ioctl, unsigned long arg) 2323 { 2324 struct kvm_vcpu *vcpu = filp->private_data; 2325 void __user *argp = (void __user *)arg; 2326 int r; 2327 struct kvm_fpu *fpu = NULL; 2328 struct kvm_sregs *kvm_sregs = NULL; 2329 2330 if (vcpu->kvm->mm != current->mm) 2331 return -EIO; 2332 2333 if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) 2334 return -EINVAL; 2335 2336 #if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS) 2337 /* 2338 * Special cases: vcpu ioctls that are asynchronous to vcpu execution, 2339 * so vcpu_load() would break it. 2340 */ 2341 if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_S390_IRQ || ioctl == KVM_INTERRUPT) 2342 return kvm_arch_vcpu_ioctl(filp, ioctl, arg); 2343 #endif 2344 2345 2346 r = vcpu_load(vcpu); 2347 if (r) 2348 return r; 2349 switch (ioctl) { 2350 case KVM_RUN: 2351 r = -EINVAL; 2352 if (arg) 2353 goto out; 2354 if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) { 2355 /* The thread running this VCPU changed. */ 2356 struct pid *oldpid = vcpu->pid; 2357 struct pid *newpid = get_task_pid(current, PIDTYPE_PID); 2358 2359 rcu_assign_pointer(vcpu->pid, newpid); 2360 if (oldpid) 2361 synchronize_rcu(); 2362 put_pid(oldpid); 2363 } 2364 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); 2365 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 2366 break; 2367 case KVM_GET_REGS: { 2368 struct kvm_regs *kvm_regs; 2369 2370 r = -ENOMEM; 2371 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 2372 if (!kvm_regs) 2373 goto out; 2374 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 2375 if (r) 2376 goto out_free1; 2377 r = -EFAULT; 2378 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 2379 goto out_free1; 2380 r = 0; 2381 out_free1: 2382 kfree(kvm_regs); 2383 break; 2384 } 2385 case KVM_SET_REGS: { 2386 struct kvm_regs *kvm_regs; 2387 2388 r = -ENOMEM; 2389 kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); 2390 if (IS_ERR(kvm_regs)) { 2391 r = PTR_ERR(kvm_regs); 2392 goto out; 2393 } 2394 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 2395 kfree(kvm_regs); 2396 break; 2397 } 2398 case KVM_GET_SREGS: { 2399 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 2400 r = -ENOMEM; 2401 if (!kvm_sregs) 2402 goto out; 2403 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 2404 if (r) 2405 goto out; 2406 r = -EFAULT; 2407 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 2408 goto out; 2409 r = 0; 2410 break; 2411 } 2412 case KVM_SET_SREGS: { 2413 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); 2414 if (IS_ERR(kvm_sregs)) { 2415 r = PTR_ERR(kvm_sregs); 2416 kvm_sregs = NULL; 2417 goto out; 2418 } 2419 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 2420 break; 2421 } 2422 case KVM_GET_MP_STATE: { 2423 struct kvm_mp_state mp_state; 2424 2425 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 2426 if (r) 2427 goto out; 2428 r = -EFAULT; 2429 if (copy_to_user(argp, &mp_state, sizeof(mp_state))) 2430 goto out; 2431 r = 0; 2432 break; 2433 } 2434 case KVM_SET_MP_STATE: { 2435 struct kvm_mp_state mp_state; 2436 2437 r = -EFAULT; 2438 if (copy_from_user(&mp_state, argp, sizeof(mp_state))) 2439 goto out; 2440 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 2441 break; 2442 } 2443 case KVM_TRANSLATE: { 2444 struct kvm_translation tr; 2445 2446 r = -EFAULT; 2447 if (copy_from_user(&tr, argp, sizeof(tr))) 2448 goto out; 2449 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 2450 if (r) 2451 goto out; 2452 r = -EFAULT; 2453 if (copy_to_user(argp, &tr, sizeof(tr))) 2454 goto out; 2455 r = 0; 2456 break; 2457 } 2458 case KVM_SET_GUEST_DEBUG: { 2459 struct kvm_guest_debug dbg; 2460 2461 r = -EFAULT; 2462 if (copy_from_user(&dbg, argp, sizeof(dbg))) 2463 goto out; 2464 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 2465 break; 2466 } 2467 case KVM_SET_SIGNAL_MASK: { 2468 struct kvm_signal_mask __user *sigmask_arg = argp; 2469 struct kvm_signal_mask kvm_sigmask; 2470 sigset_t sigset, *p; 2471 2472 p = NULL; 2473 if (argp) { 2474 r = -EFAULT; 2475 if (copy_from_user(&kvm_sigmask, argp, 2476 sizeof(kvm_sigmask))) 2477 goto out; 2478 r = -EINVAL; 2479 if (kvm_sigmask.len != sizeof(sigset)) 2480 goto out; 2481 r = -EFAULT; 2482 if (copy_from_user(&sigset, sigmask_arg->sigset, 2483 sizeof(sigset))) 2484 goto out; 2485 p = &sigset; 2486 } 2487 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 2488 break; 2489 } 2490 case KVM_GET_FPU: { 2491 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 2492 r = -ENOMEM; 2493 if (!fpu) 2494 goto out; 2495 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 2496 if (r) 2497 goto out; 2498 r = -EFAULT; 2499 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 2500 goto out; 2501 r = 0; 2502 break; 2503 } 2504 case KVM_SET_FPU: { 2505 fpu = memdup_user(argp, sizeof(*fpu)); 2506 if (IS_ERR(fpu)) { 2507 r = PTR_ERR(fpu); 2508 fpu = NULL; 2509 goto out; 2510 } 2511 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 2512 break; 2513 } 2514 default: 2515 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 2516 } 2517 out: 2518 vcpu_put(vcpu); 2519 kfree(fpu); 2520 kfree(kvm_sregs); 2521 return r; 2522 } 2523 2524 #ifdef CONFIG_KVM_COMPAT 2525 static long kvm_vcpu_compat_ioctl(struct file *filp, 2526 unsigned int ioctl, unsigned long arg) 2527 { 2528 struct kvm_vcpu *vcpu = filp->private_data; 2529 void __user *argp = compat_ptr(arg); 2530 int r; 2531 2532 if (vcpu->kvm->mm != current->mm) 2533 return -EIO; 2534 2535 switch (ioctl) { 2536 case KVM_SET_SIGNAL_MASK: { 2537 struct kvm_signal_mask __user *sigmask_arg = argp; 2538 struct kvm_signal_mask kvm_sigmask; 2539 compat_sigset_t csigset; 2540 sigset_t sigset; 2541 2542 if (argp) { 2543 r = -EFAULT; 2544 if (copy_from_user(&kvm_sigmask, argp, 2545 sizeof(kvm_sigmask))) 2546 goto out; 2547 r = -EINVAL; 2548 if (kvm_sigmask.len != sizeof(csigset)) 2549 goto out; 2550 r = -EFAULT; 2551 if (copy_from_user(&csigset, sigmask_arg->sigset, 2552 sizeof(csigset))) 2553 goto out; 2554 sigset_from_compat(&sigset, &csigset); 2555 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 2556 } else 2557 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); 2558 break; 2559 } 2560 default: 2561 r = kvm_vcpu_ioctl(filp, ioctl, arg); 2562 } 2563 2564 out: 2565 return r; 2566 } 2567 #endif 2568 2569 static int kvm_device_ioctl_attr(struct kvm_device *dev, 2570 int (*accessor)(struct kvm_device *dev, 2571 struct kvm_device_attr *attr), 2572 unsigned long arg) 2573 { 2574 struct kvm_device_attr attr; 2575 2576 if (!accessor) 2577 return -EPERM; 2578 2579 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 2580 return -EFAULT; 2581 2582 return accessor(dev, &attr); 2583 } 2584 2585 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, 2586 unsigned long arg) 2587 { 2588 struct kvm_device *dev = filp->private_data; 2589 2590 switch (ioctl) { 2591 case KVM_SET_DEVICE_ATTR: 2592 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); 2593 case KVM_GET_DEVICE_ATTR: 2594 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); 2595 case KVM_HAS_DEVICE_ATTR: 2596 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); 2597 default: 2598 if (dev->ops->ioctl) 2599 return dev->ops->ioctl(dev, ioctl, arg); 2600 2601 return -ENOTTY; 2602 } 2603 } 2604 2605 static int kvm_device_release(struct inode *inode, struct file *filp) 2606 { 2607 struct kvm_device *dev = filp->private_data; 2608 struct kvm *kvm = dev->kvm; 2609 2610 kvm_put_kvm(kvm); 2611 return 0; 2612 } 2613 2614 static const struct file_operations kvm_device_fops = { 2615 .unlocked_ioctl = kvm_device_ioctl, 2616 #ifdef CONFIG_KVM_COMPAT 2617 .compat_ioctl = kvm_device_ioctl, 2618 #endif 2619 .release = kvm_device_release, 2620 }; 2621 2622 struct kvm_device *kvm_device_from_filp(struct file *filp) 2623 { 2624 if (filp->f_op != &kvm_device_fops) 2625 return NULL; 2626 2627 return filp->private_data; 2628 } 2629 2630 static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { 2631 #ifdef CONFIG_KVM_MPIC 2632 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, 2633 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, 2634 #endif 2635 2636 #ifdef CONFIG_KVM_XICS 2637 [KVM_DEV_TYPE_XICS] = &kvm_xics_ops, 2638 #endif 2639 }; 2640 2641 int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type) 2642 { 2643 if (type >= ARRAY_SIZE(kvm_device_ops_table)) 2644 return -ENOSPC; 2645 2646 if (kvm_device_ops_table[type] != NULL) 2647 return -EEXIST; 2648 2649 kvm_device_ops_table[type] = ops; 2650 return 0; 2651 } 2652 2653 void kvm_unregister_device_ops(u32 type) 2654 { 2655 if (kvm_device_ops_table[type] != NULL) 2656 kvm_device_ops_table[type] = NULL; 2657 } 2658 2659 static int kvm_ioctl_create_device(struct kvm *kvm, 2660 struct kvm_create_device *cd) 2661 { 2662 struct kvm_device_ops *ops = NULL; 2663 struct kvm_device *dev; 2664 bool test = cd->flags & KVM_CREATE_DEVICE_TEST; 2665 int ret; 2666 2667 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) 2668 return -ENODEV; 2669 2670 ops = kvm_device_ops_table[cd->type]; 2671 if (ops == NULL) 2672 return -ENODEV; 2673 2674 if (test) 2675 return 0; 2676 2677 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2678 if (!dev) 2679 return -ENOMEM; 2680 2681 dev->ops = ops; 2682 dev->kvm = kvm; 2683 2684 ret = ops->create(dev, cd->type); 2685 if (ret < 0) { 2686 kfree(dev); 2687 return ret; 2688 } 2689 2690 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 2691 if (ret < 0) { 2692 ops->destroy(dev); 2693 return ret; 2694 } 2695 2696 list_add(&dev->vm_node, &kvm->devices); 2697 kvm_get_kvm(kvm); 2698 cd->fd = ret; 2699 return 0; 2700 } 2701 2702 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) 2703 { 2704 switch (arg) { 2705 case KVM_CAP_USER_MEMORY: 2706 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 2707 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 2708 case KVM_CAP_INTERNAL_ERROR_DATA: 2709 #ifdef CONFIG_HAVE_KVM_MSI 2710 case KVM_CAP_SIGNAL_MSI: 2711 #endif 2712 #ifdef CONFIG_HAVE_KVM_IRQFD 2713 case KVM_CAP_IRQFD: 2714 case KVM_CAP_IRQFD_RESAMPLE: 2715 #endif 2716 case KVM_CAP_IOEVENTFD_ANY_LENGTH: 2717 case KVM_CAP_CHECK_EXTENSION_VM: 2718 return 1; 2719 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 2720 case KVM_CAP_IRQ_ROUTING: 2721 return KVM_MAX_IRQ_ROUTES; 2722 #endif 2723 #if KVM_ADDRESS_SPACE_NUM > 1 2724 case KVM_CAP_MULTI_ADDRESS_SPACE: 2725 return KVM_ADDRESS_SPACE_NUM; 2726 #endif 2727 default: 2728 break; 2729 } 2730 return kvm_vm_ioctl_check_extension(kvm, arg); 2731 } 2732 2733 static long kvm_vm_ioctl(struct file *filp, 2734 unsigned int ioctl, unsigned long arg) 2735 { 2736 struct kvm *kvm = filp->private_data; 2737 void __user *argp = (void __user *)arg; 2738 int r; 2739 2740 if (kvm->mm != current->mm) 2741 return -EIO; 2742 switch (ioctl) { 2743 case KVM_CREATE_VCPU: 2744 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 2745 break; 2746 case KVM_SET_USER_MEMORY_REGION: { 2747 struct kvm_userspace_memory_region kvm_userspace_mem; 2748 2749 r = -EFAULT; 2750 if (copy_from_user(&kvm_userspace_mem, argp, 2751 sizeof(kvm_userspace_mem))) 2752 goto out; 2753 2754 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); 2755 break; 2756 } 2757 case KVM_GET_DIRTY_LOG: { 2758 struct kvm_dirty_log log; 2759 2760 r = -EFAULT; 2761 if (copy_from_user(&log, argp, sizeof(log))) 2762 goto out; 2763 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 2764 break; 2765 } 2766 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2767 case KVM_REGISTER_COALESCED_MMIO: { 2768 struct kvm_coalesced_mmio_zone zone; 2769 2770 r = -EFAULT; 2771 if (copy_from_user(&zone, argp, sizeof(zone))) 2772 goto out; 2773 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 2774 break; 2775 } 2776 case KVM_UNREGISTER_COALESCED_MMIO: { 2777 struct kvm_coalesced_mmio_zone zone; 2778 2779 r = -EFAULT; 2780 if (copy_from_user(&zone, argp, sizeof(zone))) 2781 goto out; 2782 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 2783 break; 2784 } 2785 #endif 2786 case KVM_IRQFD: { 2787 struct kvm_irqfd data; 2788 2789 r = -EFAULT; 2790 if (copy_from_user(&data, argp, sizeof(data))) 2791 goto out; 2792 r = kvm_irqfd(kvm, &data); 2793 break; 2794 } 2795 case KVM_IOEVENTFD: { 2796 struct kvm_ioeventfd data; 2797 2798 r = -EFAULT; 2799 if (copy_from_user(&data, argp, sizeof(data))) 2800 goto out; 2801 r = kvm_ioeventfd(kvm, &data); 2802 break; 2803 } 2804 #ifdef CONFIG_HAVE_KVM_MSI 2805 case KVM_SIGNAL_MSI: { 2806 struct kvm_msi msi; 2807 2808 r = -EFAULT; 2809 if (copy_from_user(&msi, argp, sizeof(msi))) 2810 goto out; 2811 r = kvm_send_userspace_msi(kvm, &msi); 2812 break; 2813 } 2814 #endif 2815 #ifdef __KVM_HAVE_IRQ_LINE 2816 case KVM_IRQ_LINE_STATUS: 2817 case KVM_IRQ_LINE: { 2818 struct kvm_irq_level irq_event; 2819 2820 r = -EFAULT; 2821 if (copy_from_user(&irq_event, argp, sizeof(irq_event))) 2822 goto out; 2823 2824 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, 2825 ioctl == KVM_IRQ_LINE_STATUS); 2826 if (r) 2827 goto out; 2828 2829 r = -EFAULT; 2830 if (ioctl == KVM_IRQ_LINE_STATUS) { 2831 if (copy_to_user(argp, &irq_event, sizeof(irq_event))) 2832 goto out; 2833 } 2834 2835 r = 0; 2836 break; 2837 } 2838 #endif 2839 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 2840 case KVM_SET_GSI_ROUTING: { 2841 struct kvm_irq_routing routing; 2842 struct kvm_irq_routing __user *urouting; 2843 struct kvm_irq_routing_entry *entries; 2844 2845 r = -EFAULT; 2846 if (copy_from_user(&routing, argp, sizeof(routing))) 2847 goto out; 2848 r = -EINVAL; 2849 if (routing.nr >= KVM_MAX_IRQ_ROUTES) 2850 goto out; 2851 if (routing.flags) 2852 goto out; 2853 r = -ENOMEM; 2854 entries = vmalloc(routing.nr * sizeof(*entries)); 2855 if (!entries) 2856 goto out; 2857 r = -EFAULT; 2858 urouting = argp; 2859 if (copy_from_user(entries, urouting->entries, 2860 routing.nr * sizeof(*entries))) 2861 goto out_free_irq_routing; 2862 r = kvm_set_irq_routing(kvm, entries, routing.nr, 2863 routing.flags); 2864 out_free_irq_routing: 2865 vfree(entries); 2866 break; 2867 } 2868 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ 2869 case KVM_CREATE_DEVICE: { 2870 struct kvm_create_device cd; 2871 2872 r = -EFAULT; 2873 if (copy_from_user(&cd, argp, sizeof(cd))) 2874 goto out; 2875 2876 r = kvm_ioctl_create_device(kvm, &cd); 2877 if (r) 2878 goto out; 2879 2880 r = -EFAULT; 2881 if (copy_to_user(argp, &cd, sizeof(cd))) 2882 goto out; 2883 2884 r = 0; 2885 break; 2886 } 2887 case KVM_CHECK_EXTENSION: 2888 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); 2889 break; 2890 default: 2891 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 2892 } 2893 out: 2894 return r; 2895 } 2896 2897 #ifdef CONFIG_KVM_COMPAT 2898 struct compat_kvm_dirty_log { 2899 __u32 slot; 2900 __u32 padding1; 2901 union { 2902 compat_uptr_t dirty_bitmap; /* one bit per page */ 2903 __u64 padding2; 2904 }; 2905 }; 2906 2907 static long kvm_vm_compat_ioctl(struct file *filp, 2908 unsigned int ioctl, unsigned long arg) 2909 { 2910 struct kvm *kvm = filp->private_data; 2911 int r; 2912 2913 if (kvm->mm != current->mm) 2914 return -EIO; 2915 switch (ioctl) { 2916 case KVM_GET_DIRTY_LOG: { 2917 struct compat_kvm_dirty_log compat_log; 2918 struct kvm_dirty_log log; 2919 2920 r = -EFAULT; 2921 if (copy_from_user(&compat_log, (void __user *)arg, 2922 sizeof(compat_log))) 2923 goto out; 2924 log.slot = compat_log.slot; 2925 log.padding1 = compat_log.padding1; 2926 log.padding2 = compat_log.padding2; 2927 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 2928 2929 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 2930 break; 2931 } 2932 default: 2933 r = kvm_vm_ioctl(filp, ioctl, arg); 2934 } 2935 2936 out: 2937 return r; 2938 } 2939 #endif 2940 2941 static struct file_operations kvm_vm_fops = { 2942 .release = kvm_vm_release, 2943 .unlocked_ioctl = kvm_vm_ioctl, 2944 #ifdef CONFIG_KVM_COMPAT 2945 .compat_ioctl = kvm_vm_compat_ioctl, 2946 #endif 2947 .llseek = noop_llseek, 2948 }; 2949 2950 static int kvm_dev_ioctl_create_vm(unsigned long type) 2951 { 2952 int r; 2953 struct kvm *kvm; 2954 2955 kvm = kvm_create_vm(type); 2956 if (IS_ERR(kvm)) 2957 return PTR_ERR(kvm); 2958 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2959 r = kvm_coalesced_mmio_init(kvm); 2960 if (r < 0) { 2961 kvm_put_kvm(kvm); 2962 return r; 2963 } 2964 #endif 2965 r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR | O_CLOEXEC); 2966 if (r < 0) 2967 kvm_put_kvm(kvm); 2968 2969 return r; 2970 } 2971 2972 static long kvm_dev_ioctl(struct file *filp, 2973 unsigned int ioctl, unsigned long arg) 2974 { 2975 long r = -EINVAL; 2976 2977 switch (ioctl) { 2978 case KVM_GET_API_VERSION: 2979 if (arg) 2980 goto out; 2981 r = KVM_API_VERSION; 2982 break; 2983 case KVM_CREATE_VM: 2984 r = kvm_dev_ioctl_create_vm(arg); 2985 break; 2986 case KVM_CHECK_EXTENSION: 2987 r = kvm_vm_ioctl_check_extension_generic(NULL, arg); 2988 break; 2989 case KVM_GET_VCPU_MMAP_SIZE: 2990 if (arg) 2991 goto out; 2992 r = PAGE_SIZE; /* struct kvm_run */ 2993 #ifdef CONFIG_X86 2994 r += PAGE_SIZE; /* pio data page */ 2995 #endif 2996 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2997 r += PAGE_SIZE; /* coalesced mmio ring page */ 2998 #endif 2999 break; 3000 case KVM_TRACE_ENABLE: 3001 case KVM_TRACE_PAUSE: 3002 case KVM_TRACE_DISABLE: 3003 r = -EOPNOTSUPP; 3004 break; 3005 default: 3006 return kvm_arch_dev_ioctl(filp, ioctl, arg); 3007 } 3008 out: 3009 return r; 3010 } 3011 3012 static struct file_operations kvm_chardev_ops = { 3013 .unlocked_ioctl = kvm_dev_ioctl, 3014 .compat_ioctl = kvm_dev_ioctl, 3015 .llseek = noop_llseek, 3016 }; 3017 3018 static struct miscdevice kvm_dev = { 3019 KVM_MINOR, 3020 "kvm", 3021 &kvm_chardev_ops, 3022 }; 3023 3024 static void hardware_enable_nolock(void *junk) 3025 { 3026 int cpu = raw_smp_processor_id(); 3027 int r; 3028 3029 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 3030 return; 3031 3032 cpumask_set_cpu(cpu, cpus_hardware_enabled); 3033 3034 r = kvm_arch_hardware_enable(); 3035 3036 if (r) { 3037 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 3038 atomic_inc(&hardware_enable_failed); 3039 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu); 3040 } 3041 } 3042 3043 static void hardware_enable(void) 3044 { 3045 raw_spin_lock(&kvm_count_lock); 3046 if (kvm_usage_count) 3047 hardware_enable_nolock(NULL); 3048 raw_spin_unlock(&kvm_count_lock); 3049 } 3050 3051 static void hardware_disable_nolock(void *junk) 3052 { 3053 int cpu = raw_smp_processor_id(); 3054 3055 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 3056 return; 3057 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 3058 kvm_arch_hardware_disable(); 3059 } 3060 3061 static void hardware_disable(void) 3062 { 3063 raw_spin_lock(&kvm_count_lock); 3064 if (kvm_usage_count) 3065 hardware_disable_nolock(NULL); 3066 raw_spin_unlock(&kvm_count_lock); 3067 } 3068 3069 static void hardware_disable_all_nolock(void) 3070 { 3071 BUG_ON(!kvm_usage_count); 3072 3073 kvm_usage_count--; 3074 if (!kvm_usage_count) 3075 on_each_cpu(hardware_disable_nolock, NULL, 1); 3076 } 3077 3078 static void hardware_disable_all(void) 3079 { 3080 raw_spin_lock(&kvm_count_lock); 3081 hardware_disable_all_nolock(); 3082 raw_spin_unlock(&kvm_count_lock); 3083 } 3084 3085 static int hardware_enable_all(void) 3086 { 3087 int r = 0; 3088 3089 raw_spin_lock(&kvm_count_lock); 3090 3091 kvm_usage_count++; 3092 if (kvm_usage_count == 1) { 3093 atomic_set(&hardware_enable_failed, 0); 3094 on_each_cpu(hardware_enable_nolock, NULL, 1); 3095 3096 if (atomic_read(&hardware_enable_failed)) { 3097 hardware_disable_all_nolock(); 3098 r = -EBUSY; 3099 } 3100 } 3101 3102 raw_spin_unlock(&kvm_count_lock); 3103 3104 return r; 3105 } 3106 3107 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, 3108 void *v) 3109 { 3110 val &= ~CPU_TASKS_FROZEN; 3111 switch (val) { 3112 case CPU_DYING: 3113 hardware_disable(); 3114 break; 3115 case CPU_STARTING: 3116 hardware_enable(); 3117 break; 3118 } 3119 return NOTIFY_OK; 3120 } 3121 3122 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 3123 void *v) 3124 { 3125 /* 3126 * Some (well, at least mine) BIOSes hang on reboot if 3127 * in vmx root mode. 3128 * 3129 * And Intel TXT required VMX off for all cpu when system shutdown. 3130 */ 3131 pr_info("kvm: exiting hardware virtualization\n"); 3132 kvm_rebooting = true; 3133 on_each_cpu(hardware_disable_nolock, NULL, 1); 3134 return NOTIFY_OK; 3135 } 3136 3137 static struct notifier_block kvm_reboot_notifier = { 3138 .notifier_call = kvm_reboot, 3139 .priority = 0, 3140 }; 3141 3142 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 3143 { 3144 int i; 3145 3146 for (i = 0; i < bus->dev_count; i++) { 3147 struct kvm_io_device *pos = bus->range[i].dev; 3148 3149 kvm_iodevice_destructor(pos); 3150 } 3151 kfree(bus); 3152 } 3153 3154 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, 3155 const struct kvm_io_range *r2) 3156 { 3157 gpa_t addr1 = r1->addr; 3158 gpa_t addr2 = r2->addr; 3159 3160 if (addr1 < addr2) 3161 return -1; 3162 3163 /* If r2->len == 0, match the exact address. If r2->len != 0, 3164 * accept any overlapping write. Any order is acceptable for 3165 * overlapping ranges, because kvm_io_bus_get_first_dev ensures 3166 * we process all of them. 3167 */ 3168 if (r2->len) { 3169 addr1 += r1->len; 3170 addr2 += r2->len; 3171 } 3172 3173 if (addr1 > addr2) 3174 return 1; 3175 3176 return 0; 3177 } 3178 3179 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) 3180 { 3181 return kvm_io_bus_cmp(p1, p2); 3182 } 3183 3184 static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, 3185 gpa_t addr, int len) 3186 { 3187 bus->range[bus->dev_count++] = (struct kvm_io_range) { 3188 .addr = addr, 3189 .len = len, 3190 .dev = dev, 3191 }; 3192 3193 sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range), 3194 kvm_io_bus_sort_cmp, NULL); 3195 3196 return 0; 3197 } 3198 3199 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, 3200 gpa_t addr, int len) 3201 { 3202 struct kvm_io_range *range, key; 3203 int off; 3204 3205 key = (struct kvm_io_range) { 3206 .addr = addr, 3207 .len = len, 3208 }; 3209 3210 range = bsearch(&key, bus->range, bus->dev_count, 3211 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); 3212 if (range == NULL) 3213 return -ENOENT; 3214 3215 off = range - bus->range; 3216 3217 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) 3218 off--; 3219 3220 return off; 3221 } 3222 3223 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 3224 struct kvm_io_range *range, const void *val) 3225 { 3226 int idx; 3227 3228 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 3229 if (idx < 0) 3230 return -EOPNOTSUPP; 3231 3232 while (idx < bus->dev_count && 3233 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 3234 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, 3235 range->len, val)) 3236 return idx; 3237 idx++; 3238 } 3239 3240 return -EOPNOTSUPP; 3241 } 3242 3243 /* kvm_io_bus_write - called under kvm->slots_lock */ 3244 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 3245 int len, const void *val) 3246 { 3247 struct kvm_io_bus *bus; 3248 struct kvm_io_range range; 3249 int r; 3250 3251 range = (struct kvm_io_range) { 3252 .addr = addr, 3253 .len = len, 3254 }; 3255 3256 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3257 r = __kvm_io_bus_write(vcpu, bus, &range, val); 3258 return r < 0 ? r : 0; 3259 } 3260 3261 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ 3262 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 3263 gpa_t addr, int len, const void *val, long cookie) 3264 { 3265 struct kvm_io_bus *bus; 3266 struct kvm_io_range range; 3267 3268 range = (struct kvm_io_range) { 3269 .addr = addr, 3270 .len = len, 3271 }; 3272 3273 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3274 3275 /* First try the device referenced by cookie. */ 3276 if ((cookie >= 0) && (cookie < bus->dev_count) && 3277 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) 3278 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, 3279 val)) 3280 return cookie; 3281 3282 /* 3283 * cookie contained garbage; fall back to search and return the 3284 * correct cookie value. 3285 */ 3286 return __kvm_io_bus_write(vcpu, bus, &range, val); 3287 } 3288 3289 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 3290 struct kvm_io_range *range, void *val) 3291 { 3292 int idx; 3293 3294 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 3295 if (idx < 0) 3296 return -EOPNOTSUPP; 3297 3298 while (idx < bus->dev_count && 3299 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 3300 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, 3301 range->len, val)) 3302 return idx; 3303 idx++; 3304 } 3305 3306 return -EOPNOTSUPP; 3307 } 3308 EXPORT_SYMBOL_GPL(kvm_io_bus_write); 3309 3310 /* kvm_io_bus_read - called under kvm->slots_lock */ 3311 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 3312 int len, void *val) 3313 { 3314 struct kvm_io_bus *bus; 3315 struct kvm_io_range range; 3316 int r; 3317 3318 range = (struct kvm_io_range) { 3319 .addr = addr, 3320 .len = len, 3321 }; 3322 3323 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3324 r = __kvm_io_bus_read(vcpu, bus, &range, val); 3325 return r < 0 ? r : 0; 3326 } 3327 3328 3329 /* Caller must hold slots_lock. */ 3330 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 3331 int len, struct kvm_io_device *dev) 3332 { 3333 struct kvm_io_bus *new_bus, *bus; 3334 3335 bus = kvm->buses[bus_idx]; 3336 /* exclude ioeventfd which is limited by maximum fd */ 3337 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 3338 return -ENOSPC; 3339 3340 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count + 1) * 3341 sizeof(struct kvm_io_range)), GFP_KERNEL); 3342 if (!new_bus) 3343 return -ENOMEM; 3344 memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count * 3345 sizeof(struct kvm_io_range))); 3346 kvm_io_bus_insert_dev(new_bus, dev, addr, len); 3347 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3348 synchronize_srcu_expedited(&kvm->srcu); 3349 kfree(bus); 3350 3351 return 0; 3352 } 3353 3354 /* Caller must hold slots_lock. */ 3355 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 3356 struct kvm_io_device *dev) 3357 { 3358 int i, r; 3359 struct kvm_io_bus *new_bus, *bus; 3360 3361 bus = kvm->buses[bus_idx]; 3362 r = -ENOENT; 3363 for (i = 0; i < bus->dev_count; i++) 3364 if (bus->range[i].dev == dev) { 3365 r = 0; 3366 break; 3367 } 3368 3369 if (r) 3370 return r; 3371 3372 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) * 3373 sizeof(struct kvm_io_range)), GFP_KERNEL); 3374 if (!new_bus) 3375 return -ENOMEM; 3376 3377 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 3378 new_bus->dev_count--; 3379 memcpy(new_bus->range + i, bus->range + i + 1, 3380 (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); 3381 3382 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3383 synchronize_srcu_expedited(&kvm->srcu); 3384 kfree(bus); 3385 return r; 3386 } 3387 3388 static struct notifier_block kvm_cpu_notifier = { 3389 .notifier_call = kvm_cpu_hotplug, 3390 }; 3391 3392 static int vm_stat_get(void *_offset, u64 *val) 3393 { 3394 unsigned offset = (long)_offset; 3395 struct kvm *kvm; 3396 3397 *val = 0; 3398 spin_lock(&kvm_lock); 3399 list_for_each_entry(kvm, &vm_list, vm_list) 3400 *val += *(u32 *)((void *)kvm + offset); 3401 spin_unlock(&kvm_lock); 3402 return 0; 3403 } 3404 3405 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n"); 3406 3407 static int vcpu_stat_get(void *_offset, u64 *val) 3408 { 3409 unsigned offset = (long)_offset; 3410 struct kvm *kvm; 3411 struct kvm_vcpu *vcpu; 3412 int i; 3413 3414 *val = 0; 3415 spin_lock(&kvm_lock); 3416 list_for_each_entry(kvm, &vm_list, vm_list) 3417 kvm_for_each_vcpu(i, vcpu, kvm) 3418 *val += *(u32 *)((void *)vcpu + offset); 3419 3420 spin_unlock(&kvm_lock); 3421 return 0; 3422 } 3423 3424 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); 3425 3426 static const struct file_operations *stat_fops[] = { 3427 [KVM_STAT_VCPU] = &vcpu_stat_fops, 3428 [KVM_STAT_VM] = &vm_stat_fops, 3429 }; 3430 3431 static int kvm_init_debug(void) 3432 { 3433 int r = -EEXIST; 3434 struct kvm_stats_debugfs_item *p; 3435 3436 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 3437 if (kvm_debugfs_dir == NULL) 3438 goto out; 3439 3440 for (p = debugfs_entries; p->name; ++p) { 3441 if (!debugfs_create_file(p->name, 0444, kvm_debugfs_dir, 3442 (void *)(long)p->offset, 3443 stat_fops[p->kind])) 3444 goto out_dir; 3445 } 3446 3447 return 0; 3448 3449 out_dir: 3450 debugfs_remove_recursive(kvm_debugfs_dir); 3451 out: 3452 return r; 3453 } 3454 3455 static int kvm_suspend(void) 3456 { 3457 if (kvm_usage_count) 3458 hardware_disable_nolock(NULL); 3459 return 0; 3460 } 3461 3462 static void kvm_resume(void) 3463 { 3464 if (kvm_usage_count) { 3465 WARN_ON(raw_spin_is_locked(&kvm_count_lock)); 3466 hardware_enable_nolock(NULL); 3467 } 3468 } 3469 3470 static struct syscore_ops kvm_syscore_ops = { 3471 .suspend = kvm_suspend, 3472 .resume = kvm_resume, 3473 }; 3474 3475 static inline 3476 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 3477 { 3478 return container_of(pn, struct kvm_vcpu, preempt_notifier); 3479 } 3480 3481 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 3482 { 3483 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 3484 3485 if (vcpu->preempted) 3486 vcpu->preempted = false; 3487 3488 kvm_arch_sched_in(vcpu, cpu); 3489 3490 kvm_arch_vcpu_load(vcpu, cpu); 3491 } 3492 3493 static void kvm_sched_out(struct preempt_notifier *pn, 3494 struct task_struct *next) 3495 { 3496 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 3497 3498 if (current->state == TASK_RUNNING) 3499 vcpu->preempted = true; 3500 kvm_arch_vcpu_put(vcpu); 3501 } 3502 3503 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 3504 struct module *module) 3505 { 3506 int r; 3507 int cpu; 3508 3509 r = kvm_arch_init(opaque); 3510 if (r) 3511 goto out_fail; 3512 3513 /* 3514 * kvm_arch_init makes sure there's at most one caller 3515 * for architectures that support multiple implementations, 3516 * like intel and amd on x86. 3517 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating 3518 * conflicts in case kvm is already setup for another implementation. 3519 */ 3520 r = kvm_irqfd_init(); 3521 if (r) 3522 goto out_irqfd; 3523 3524 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 3525 r = -ENOMEM; 3526 goto out_free_0; 3527 } 3528 3529 r = kvm_arch_hardware_setup(); 3530 if (r < 0) 3531 goto out_free_0a; 3532 3533 for_each_online_cpu(cpu) { 3534 smp_call_function_single(cpu, 3535 kvm_arch_check_processor_compat, 3536 &r, 1); 3537 if (r < 0) 3538 goto out_free_1; 3539 } 3540 3541 r = register_cpu_notifier(&kvm_cpu_notifier); 3542 if (r) 3543 goto out_free_2; 3544 register_reboot_notifier(&kvm_reboot_notifier); 3545 3546 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 3547 if (!vcpu_align) 3548 vcpu_align = __alignof__(struct kvm_vcpu); 3549 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, 3550 0, NULL); 3551 if (!kvm_vcpu_cache) { 3552 r = -ENOMEM; 3553 goto out_free_3; 3554 } 3555 3556 r = kvm_async_pf_init(); 3557 if (r) 3558 goto out_free; 3559 3560 kvm_chardev_ops.owner = module; 3561 kvm_vm_fops.owner = module; 3562 kvm_vcpu_fops.owner = module; 3563 3564 r = misc_register(&kvm_dev); 3565 if (r) { 3566 pr_err("kvm: misc device register failed\n"); 3567 goto out_unreg; 3568 } 3569 3570 register_syscore_ops(&kvm_syscore_ops); 3571 3572 kvm_preempt_ops.sched_in = kvm_sched_in; 3573 kvm_preempt_ops.sched_out = kvm_sched_out; 3574 3575 r = kvm_init_debug(); 3576 if (r) { 3577 pr_err("kvm: create debugfs files failed\n"); 3578 goto out_undebugfs; 3579 } 3580 3581 r = kvm_vfio_ops_init(); 3582 WARN_ON(r); 3583 3584 return 0; 3585 3586 out_undebugfs: 3587 unregister_syscore_ops(&kvm_syscore_ops); 3588 misc_deregister(&kvm_dev); 3589 out_unreg: 3590 kvm_async_pf_deinit(); 3591 out_free: 3592 kmem_cache_destroy(kvm_vcpu_cache); 3593 out_free_3: 3594 unregister_reboot_notifier(&kvm_reboot_notifier); 3595 unregister_cpu_notifier(&kvm_cpu_notifier); 3596 out_free_2: 3597 out_free_1: 3598 kvm_arch_hardware_unsetup(); 3599 out_free_0a: 3600 free_cpumask_var(cpus_hardware_enabled); 3601 out_free_0: 3602 kvm_irqfd_exit(); 3603 out_irqfd: 3604 kvm_arch_exit(); 3605 out_fail: 3606 return r; 3607 } 3608 EXPORT_SYMBOL_GPL(kvm_init); 3609 3610 void kvm_exit(void) 3611 { 3612 debugfs_remove_recursive(kvm_debugfs_dir); 3613 misc_deregister(&kvm_dev); 3614 kmem_cache_destroy(kvm_vcpu_cache); 3615 kvm_async_pf_deinit(); 3616 unregister_syscore_ops(&kvm_syscore_ops); 3617 unregister_reboot_notifier(&kvm_reboot_notifier); 3618 unregister_cpu_notifier(&kvm_cpu_notifier); 3619 on_each_cpu(hardware_disable_nolock, NULL, 1); 3620 kvm_arch_hardware_unsetup(); 3621 kvm_arch_exit(); 3622 kvm_irqfd_exit(); 3623 free_cpumask_var(cpus_hardware_enabled); 3624 kvm_vfio_ops_exit(); 3625 } 3626 EXPORT_SYMBOL_GPL(kvm_exit); 3627