1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This module enables machines with Intel VT-x extensions to run virtual 5 * machines without emulation or binary translation. 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Avi Kivity <avi@qumranet.com> 12 * Yaniv Kamay <yaniv@qumranet.com> 13 * 14 * This work is licensed under the terms of the GNU GPL, version 2. See 15 * the COPYING file in the top-level directory. 16 * 17 */ 18 19 #include <kvm/iodev.h> 20 21 #include <linux/kvm_host.h> 22 #include <linux/kvm.h> 23 #include <linux/module.h> 24 #include <linux/errno.h> 25 #include <linux/percpu.h> 26 #include <linux/mm.h> 27 #include <linux/miscdevice.h> 28 #include <linux/vmalloc.h> 29 #include <linux/reboot.h> 30 #include <linux/debugfs.h> 31 #include <linux/highmem.h> 32 #include <linux/file.h> 33 #include <linux/syscore_ops.h> 34 #include <linux/cpu.h> 35 #include <linux/sched.h> 36 #include <linux/cpumask.h> 37 #include <linux/smp.h> 38 #include <linux/anon_inodes.h> 39 #include <linux/profile.h> 40 #include <linux/kvm_para.h> 41 #include <linux/pagemap.h> 42 #include <linux/mman.h> 43 #include <linux/swap.h> 44 #include <linux/bitops.h> 45 #include <linux/spinlock.h> 46 #include <linux/compat.h> 47 #include <linux/srcu.h> 48 #include <linux/hugetlb.h> 49 #include <linux/slab.h> 50 #include <linux/sort.h> 51 #include <linux/bsearch.h> 52 53 #include <asm/processor.h> 54 #include <asm/io.h> 55 #include <asm/ioctl.h> 56 #include <asm/uaccess.h> 57 #include <asm/pgtable.h> 58 59 #include "coalesced_mmio.h" 60 #include "async_pf.h" 61 #include "vfio.h" 62 63 #define CREATE_TRACE_POINTS 64 #include <trace/events/kvm.h> 65 66 MODULE_AUTHOR("Qumranet"); 67 MODULE_LICENSE("GPL"); 68 69 /* Architectures should define their poll value according to the halt latency */ 70 static unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; 71 module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR); 72 73 /* Default doubles per-vcpu halt_poll_ns. */ 74 static unsigned int halt_poll_ns_grow = 2; 75 module_param(halt_poll_ns_grow, uint, S_IRUGO | S_IWUSR); 76 77 /* Default resets per-vcpu halt_poll_ns . */ 78 static unsigned int halt_poll_ns_shrink; 79 module_param(halt_poll_ns_shrink, uint, S_IRUGO | S_IWUSR); 80 81 /* 82 * Ordering of locks: 83 * 84 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 85 */ 86 87 DEFINE_SPINLOCK(kvm_lock); 88 static DEFINE_RAW_SPINLOCK(kvm_count_lock); 89 LIST_HEAD(vm_list); 90 91 static cpumask_var_t cpus_hardware_enabled; 92 static int kvm_usage_count; 93 static atomic_t hardware_enable_failed; 94 95 struct kmem_cache *kvm_vcpu_cache; 96 EXPORT_SYMBOL_GPL(kvm_vcpu_cache); 97 98 static __read_mostly struct preempt_ops kvm_preempt_ops; 99 100 struct dentry *kvm_debugfs_dir; 101 EXPORT_SYMBOL_GPL(kvm_debugfs_dir); 102 103 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 104 unsigned long arg); 105 #ifdef CONFIG_KVM_COMPAT 106 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, 107 unsigned long arg); 108 #endif 109 static int hardware_enable_all(void); 110 static void hardware_disable_all(void); 111 112 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 113 114 static void kvm_release_pfn_dirty(kvm_pfn_t pfn); 115 static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn); 116 117 __visible bool kvm_rebooting; 118 EXPORT_SYMBOL_GPL(kvm_rebooting); 119 120 static bool largepages_enabled = true; 121 122 bool kvm_is_reserved_pfn(kvm_pfn_t pfn) 123 { 124 if (pfn_valid(pfn)) 125 return PageReserved(pfn_to_page(pfn)); 126 127 return true; 128 } 129 130 /* 131 * Switches to specified vcpu, until a matching vcpu_put() 132 */ 133 int vcpu_load(struct kvm_vcpu *vcpu) 134 { 135 int cpu; 136 137 if (mutex_lock_killable(&vcpu->mutex)) 138 return -EINTR; 139 cpu = get_cpu(); 140 preempt_notifier_register(&vcpu->preempt_notifier); 141 kvm_arch_vcpu_load(vcpu, cpu); 142 put_cpu(); 143 return 0; 144 } 145 146 void vcpu_put(struct kvm_vcpu *vcpu) 147 { 148 preempt_disable(); 149 kvm_arch_vcpu_put(vcpu); 150 preempt_notifier_unregister(&vcpu->preempt_notifier); 151 preempt_enable(); 152 mutex_unlock(&vcpu->mutex); 153 } 154 155 static void ack_flush(void *_completed) 156 { 157 } 158 159 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) 160 { 161 int i, cpu, me; 162 cpumask_var_t cpus; 163 bool called = true; 164 struct kvm_vcpu *vcpu; 165 166 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 167 168 me = get_cpu(); 169 kvm_for_each_vcpu(i, vcpu, kvm) { 170 kvm_make_request(req, vcpu); 171 cpu = vcpu->cpu; 172 173 /* Set ->requests bit before we read ->mode */ 174 smp_mb(); 175 176 if (cpus != NULL && cpu != -1 && cpu != me && 177 kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE) 178 cpumask_set_cpu(cpu, cpus); 179 } 180 if (unlikely(cpus == NULL)) 181 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); 182 else if (!cpumask_empty(cpus)) 183 smp_call_function_many(cpus, ack_flush, NULL, 1); 184 else 185 called = false; 186 put_cpu(); 187 free_cpumask_var(cpus); 188 return called; 189 } 190 191 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL 192 void kvm_flush_remote_tlbs(struct kvm *kvm) 193 { 194 long dirty_count = kvm->tlbs_dirty; 195 196 smp_mb(); 197 if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 198 ++kvm->stat.remote_tlb_flush; 199 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); 200 } 201 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); 202 #endif 203 204 void kvm_reload_remote_mmus(struct kvm *kvm) 205 { 206 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); 207 } 208 209 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 210 { 211 struct page *page; 212 int r; 213 214 mutex_init(&vcpu->mutex); 215 vcpu->cpu = -1; 216 vcpu->kvm = kvm; 217 vcpu->vcpu_id = id; 218 vcpu->pid = NULL; 219 init_swait_queue_head(&vcpu->wq); 220 kvm_async_pf_vcpu_init(vcpu); 221 222 vcpu->pre_pcpu = -1; 223 INIT_LIST_HEAD(&vcpu->blocked_vcpu_list); 224 225 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 226 if (!page) { 227 r = -ENOMEM; 228 goto fail; 229 } 230 vcpu->run = page_address(page); 231 232 kvm_vcpu_set_in_spin_loop(vcpu, false); 233 kvm_vcpu_set_dy_eligible(vcpu, false); 234 vcpu->preempted = false; 235 236 r = kvm_arch_vcpu_init(vcpu); 237 if (r < 0) 238 goto fail_free_run; 239 return 0; 240 241 fail_free_run: 242 free_page((unsigned long)vcpu->run); 243 fail: 244 return r; 245 } 246 EXPORT_SYMBOL_GPL(kvm_vcpu_init); 247 248 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) 249 { 250 put_pid(vcpu->pid); 251 kvm_arch_vcpu_uninit(vcpu); 252 free_page((unsigned long)vcpu->run); 253 } 254 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); 255 256 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 257 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 258 { 259 return container_of(mn, struct kvm, mmu_notifier); 260 } 261 262 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, 263 struct mm_struct *mm, 264 unsigned long address) 265 { 266 struct kvm *kvm = mmu_notifier_to_kvm(mn); 267 int need_tlb_flush, idx; 268 269 /* 270 * When ->invalidate_page runs, the linux pte has been zapped 271 * already but the page is still allocated until 272 * ->invalidate_page returns. So if we increase the sequence 273 * here the kvm page fault will notice if the spte can't be 274 * established because the page is going to be freed. If 275 * instead the kvm page fault establishes the spte before 276 * ->invalidate_page runs, kvm_unmap_hva will release it 277 * before returning. 278 * 279 * The sequence increase only need to be seen at spin_unlock 280 * time, and not at spin_lock time. 281 * 282 * Increasing the sequence after the spin_unlock would be 283 * unsafe because the kvm page fault could then establish the 284 * pte after kvm_unmap_hva returned, without noticing the page 285 * is going to be freed. 286 */ 287 idx = srcu_read_lock(&kvm->srcu); 288 spin_lock(&kvm->mmu_lock); 289 290 kvm->mmu_notifier_seq++; 291 need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty; 292 /* we've to flush the tlb before the pages can be freed */ 293 if (need_tlb_flush) 294 kvm_flush_remote_tlbs(kvm); 295 296 spin_unlock(&kvm->mmu_lock); 297 298 kvm_arch_mmu_notifier_invalidate_page(kvm, address); 299 300 srcu_read_unlock(&kvm->srcu, idx); 301 } 302 303 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 304 struct mm_struct *mm, 305 unsigned long address, 306 pte_t pte) 307 { 308 struct kvm *kvm = mmu_notifier_to_kvm(mn); 309 int idx; 310 311 idx = srcu_read_lock(&kvm->srcu); 312 spin_lock(&kvm->mmu_lock); 313 kvm->mmu_notifier_seq++; 314 kvm_set_spte_hva(kvm, address, pte); 315 spin_unlock(&kvm->mmu_lock); 316 srcu_read_unlock(&kvm->srcu, idx); 317 } 318 319 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 320 struct mm_struct *mm, 321 unsigned long start, 322 unsigned long end) 323 { 324 struct kvm *kvm = mmu_notifier_to_kvm(mn); 325 int need_tlb_flush = 0, idx; 326 327 idx = srcu_read_lock(&kvm->srcu); 328 spin_lock(&kvm->mmu_lock); 329 /* 330 * The count increase must become visible at unlock time as no 331 * spte can be established without taking the mmu_lock and 332 * count is also read inside the mmu_lock critical section. 333 */ 334 kvm->mmu_notifier_count++; 335 need_tlb_flush = kvm_unmap_hva_range(kvm, start, end); 336 need_tlb_flush |= kvm->tlbs_dirty; 337 /* we've to flush the tlb before the pages can be freed */ 338 if (need_tlb_flush) 339 kvm_flush_remote_tlbs(kvm); 340 341 spin_unlock(&kvm->mmu_lock); 342 srcu_read_unlock(&kvm->srcu, idx); 343 } 344 345 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 346 struct mm_struct *mm, 347 unsigned long start, 348 unsigned long end) 349 { 350 struct kvm *kvm = mmu_notifier_to_kvm(mn); 351 352 spin_lock(&kvm->mmu_lock); 353 /* 354 * This sequence increase will notify the kvm page fault that 355 * the page that is going to be mapped in the spte could have 356 * been freed. 357 */ 358 kvm->mmu_notifier_seq++; 359 smp_wmb(); 360 /* 361 * The above sequence increase must be visible before the 362 * below count decrease, which is ensured by the smp_wmb above 363 * in conjunction with the smp_rmb in mmu_notifier_retry(). 364 */ 365 kvm->mmu_notifier_count--; 366 spin_unlock(&kvm->mmu_lock); 367 368 BUG_ON(kvm->mmu_notifier_count < 0); 369 } 370 371 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 372 struct mm_struct *mm, 373 unsigned long start, 374 unsigned long end) 375 { 376 struct kvm *kvm = mmu_notifier_to_kvm(mn); 377 int young, idx; 378 379 idx = srcu_read_lock(&kvm->srcu); 380 spin_lock(&kvm->mmu_lock); 381 382 young = kvm_age_hva(kvm, start, end); 383 if (young) 384 kvm_flush_remote_tlbs(kvm); 385 386 spin_unlock(&kvm->mmu_lock); 387 srcu_read_unlock(&kvm->srcu, idx); 388 389 return young; 390 } 391 392 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, 393 struct mm_struct *mm, 394 unsigned long start, 395 unsigned long end) 396 { 397 struct kvm *kvm = mmu_notifier_to_kvm(mn); 398 int young, idx; 399 400 idx = srcu_read_lock(&kvm->srcu); 401 spin_lock(&kvm->mmu_lock); 402 /* 403 * Even though we do not flush TLB, this will still adversely 404 * affect performance on pre-Haswell Intel EPT, where there is 405 * no EPT Access Bit to clear so that we have to tear down EPT 406 * tables instead. If we find this unacceptable, we can always 407 * add a parameter to kvm_age_hva so that it effectively doesn't 408 * do anything on clear_young. 409 * 410 * Also note that currently we never issue secondary TLB flushes 411 * from clear_young, leaving this job up to the regular system 412 * cadence. If we find this inaccurate, we might come up with a 413 * more sophisticated heuristic later. 414 */ 415 young = kvm_age_hva(kvm, start, end); 416 spin_unlock(&kvm->mmu_lock); 417 srcu_read_unlock(&kvm->srcu, idx); 418 419 return young; 420 } 421 422 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, 423 struct mm_struct *mm, 424 unsigned long address) 425 { 426 struct kvm *kvm = mmu_notifier_to_kvm(mn); 427 int young, idx; 428 429 idx = srcu_read_lock(&kvm->srcu); 430 spin_lock(&kvm->mmu_lock); 431 young = kvm_test_age_hva(kvm, address); 432 spin_unlock(&kvm->mmu_lock); 433 srcu_read_unlock(&kvm->srcu, idx); 434 435 return young; 436 } 437 438 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 439 struct mm_struct *mm) 440 { 441 struct kvm *kvm = mmu_notifier_to_kvm(mn); 442 int idx; 443 444 idx = srcu_read_lock(&kvm->srcu); 445 kvm_arch_flush_shadow_all(kvm); 446 srcu_read_unlock(&kvm->srcu, idx); 447 } 448 449 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 450 .invalidate_page = kvm_mmu_notifier_invalidate_page, 451 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 452 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 453 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 454 .clear_young = kvm_mmu_notifier_clear_young, 455 .test_young = kvm_mmu_notifier_test_young, 456 .change_pte = kvm_mmu_notifier_change_pte, 457 .release = kvm_mmu_notifier_release, 458 }; 459 460 static int kvm_init_mmu_notifier(struct kvm *kvm) 461 { 462 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 463 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 464 } 465 466 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 467 468 static int kvm_init_mmu_notifier(struct kvm *kvm) 469 { 470 return 0; 471 } 472 473 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 474 475 static struct kvm_memslots *kvm_alloc_memslots(void) 476 { 477 int i; 478 struct kvm_memslots *slots; 479 480 slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); 481 if (!slots) 482 return NULL; 483 484 /* 485 * Init kvm generation close to the maximum to easily test the 486 * code of handling generation number wrap-around. 487 */ 488 slots->generation = -150; 489 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) 490 slots->id_to_index[i] = slots->memslots[i].id = i; 491 492 return slots; 493 } 494 495 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) 496 { 497 if (!memslot->dirty_bitmap) 498 return; 499 500 kvfree(memslot->dirty_bitmap); 501 memslot->dirty_bitmap = NULL; 502 } 503 504 /* 505 * Free any memory in @free but not in @dont. 506 */ 507 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 508 struct kvm_memory_slot *dont) 509 { 510 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 511 kvm_destroy_dirty_bitmap(free); 512 513 kvm_arch_free_memslot(kvm, free, dont); 514 515 free->npages = 0; 516 } 517 518 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) 519 { 520 struct kvm_memory_slot *memslot; 521 522 if (!slots) 523 return; 524 525 kvm_for_each_memslot(memslot, slots) 526 kvm_free_memslot(kvm, memslot, NULL); 527 528 kvfree(slots); 529 } 530 531 static struct kvm *kvm_create_vm(unsigned long type) 532 { 533 int r, i; 534 struct kvm *kvm = kvm_arch_alloc_vm(); 535 536 if (!kvm) 537 return ERR_PTR(-ENOMEM); 538 539 r = kvm_arch_init_vm(kvm, type); 540 if (r) 541 goto out_err_no_disable; 542 543 r = hardware_enable_all(); 544 if (r) 545 goto out_err_no_disable; 546 547 #ifdef CONFIG_HAVE_KVM_IRQFD 548 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 549 #endif 550 551 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 552 553 r = -ENOMEM; 554 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 555 kvm->memslots[i] = kvm_alloc_memslots(); 556 if (!kvm->memslots[i]) 557 goto out_err_no_srcu; 558 } 559 560 if (init_srcu_struct(&kvm->srcu)) 561 goto out_err_no_srcu; 562 if (init_srcu_struct(&kvm->irq_srcu)) 563 goto out_err_no_irq_srcu; 564 for (i = 0; i < KVM_NR_BUSES; i++) { 565 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), 566 GFP_KERNEL); 567 if (!kvm->buses[i]) 568 goto out_err; 569 } 570 571 spin_lock_init(&kvm->mmu_lock); 572 kvm->mm = current->mm; 573 atomic_inc(&kvm->mm->mm_count); 574 kvm_eventfd_init(kvm); 575 mutex_init(&kvm->lock); 576 mutex_init(&kvm->irq_lock); 577 mutex_init(&kvm->slots_lock); 578 atomic_set(&kvm->users_count, 1); 579 INIT_LIST_HEAD(&kvm->devices); 580 581 r = kvm_init_mmu_notifier(kvm); 582 if (r) 583 goto out_err; 584 585 spin_lock(&kvm_lock); 586 list_add(&kvm->vm_list, &vm_list); 587 spin_unlock(&kvm_lock); 588 589 preempt_notifier_inc(); 590 591 return kvm; 592 593 out_err: 594 cleanup_srcu_struct(&kvm->irq_srcu); 595 out_err_no_irq_srcu: 596 cleanup_srcu_struct(&kvm->srcu); 597 out_err_no_srcu: 598 hardware_disable_all(); 599 out_err_no_disable: 600 for (i = 0; i < KVM_NR_BUSES; i++) 601 kfree(kvm->buses[i]); 602 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 603 kvm_free_memslots(kvm, kvm->memslots[i]); 604 kvm_arch_free_vm(kvm); 605 return ERR_PTR(r); 606 } 607 608 /* 609 * Avoid using vmalloc for a small buffer. 610 * Should not be used when the size is statically known. 611 */ 612 void *kvm_kvzalloc(unsigned long size) 613 { 614 if (size > PAGE_SIZE) 615 return vzalloc(size); 616 else 617 return kzalloc(size, GFP_KERNEL); 618 } 619 620 static void kvm_destroy_devices(struct kvm *kvm) 621 { 622 struct kvm_device *dev, *tmp; 623 624 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { 625 list_del(&dev->vm_node); 626 dev->ops->destroy(dev); 627 } 628 } 629 630 static void kvm_destroy_vm(struct kvm *kvm) 631 { 632 int i; 633 struct mm_struct *mm = kvm->mm; 634 635 kvm_arch_sync_events(kvm); 636 spin_lock(&kvm_lock); 637 list_del(&kvm->vm_list); 638 spin_unlock(&kvm_lock); 639 kvm_free_irq_routing(kvm); 640 for (i = 0; i < KVM_NR_BUSES; i++) 641 kvm_io_bus_destroy(kvm->buses[i]); 642 kvm_coalesced_mmio_free(kvm); 643 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 644 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 645 #else 646 kvm_arch_flush_shadow_all(kvm); 647 #endif 648 kvm_arch_destroy_vm(kvm); 649 kvm_destroy_devices(kvm); 650 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 651 kvm_free_memslots(kvm, kvm->memslots[i]); 652 cleanup_srcu_struct(&kvm->irq_srcu); 653 cleanup_srcu_struct(&kvm->srcu); 654 kvm_arch_free_vm(kvm); 655 preempt_notifier_dec(); 656 hardware_disable_all(); 657 mmdrop(mm); 658 } 659 660 void kvm_get_kvm(struct kvm *kvm) 661 { 662 atomic_inc(&kvm->users_count); 663 } 664 EXPORT_SYMBOL_GPL(kvm_get_kvm); 665 666 void kvm_put_kvm(struct kvm *kvm) 667 { 668 if (atomic_dec_and_test(&kvm->users_count)) 669 kvm_destroy_vm(kvm); 670 } 671 EXPORT_SYMBOL_GPL(kvm_put_kvm); 672 673 674 static int kvm_vm_release(struct inode *inode, struct file *filp) 675 { 676 struct kvm *kvm = filp->private_data; 677 678 kvm_irqfd_release(kvm); 679 680 kvm_put_kvm(kvm); 681 return 0; 682 } 683 684 /* 685 * Allocation size is twice as large as the actual dirty bitmap size. 686 * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed. 687 */ 688 static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) 689 { 690 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); 691 692 memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes); 693 if (!memslot->dirty_bitmap) 694 return -ENOMEM; 695 696 return 0; 697 } 698 699 /* 700 * Insert memslot and re-sort memslots based on their GFN, 701 * so binary search could be used to lookup GFN. 702 * Sorting algorithm takes advantage of having initially 703 * sorted array and known changed memslot position. 704 */ 705 static void update_memslots(struct kvm_memslots *slots, 706 struct kvm_memory_slot *new) 707 { 708 int id = new->id; 709 int i = slots->id_to_index[id]; 710 struct kvm_memory_slot *mslots = slots->memslots; 711 712 WARN_ON(mslots[i].id != id); 713 if (!new->npages) { 714 WARN_ON(!mslots[i].npages); 715 if (mslots[i].npages) 716 slots->used_slots--; 717 } else { 718 if (!mslots[i].npages) 719 slots->used_slots++; 720 } 721 722 while (i < KVM_MEM_SLOTS_NUM - 1 && 723 new->base_gfn <= mslots[i + 1].base_gfn) { 724 if (!mslots[i + 1].npages) 725 break; 726 mslots[i] = mslots[i + 1]; 727 slots->id_to_index[mslots[i].id] = i; 728 i++; 729 } 730 731 /* 732 * The ">=" is needed when creating a slot with base_gfn == 0, 733 * so that it moves before all those with base_gfn == npages == 0. 734 * 735 * On the other hand, if new->npages is zero, the above loop has 736 * already left i pointing to the beginning of the empty part of 737 * mslots, and the ">=" would move the hole backwards in this 738 * case---which is wrong. So skip the loop when deleting a slot. 739 */ 740 if (new->npages) { 741 while (i > 0 && 742 new->base_gfn >= mslots[i - 1].base_gfn) { 743 mslots[i] = mslots[i - 1]; 744 slots->id_to_index[mslots[i].id] = i; 745 i--; 746 } 747 } else 748 WARN_ON_ONCE(i != slots->used_slots); 749 750 mslots[i] = *new; 751 slots->id_to_index[mslots[i].id] = i; 752 } 753 754 static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem) 755 { 756 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; 757 758 #ifdef __KVM_HAVE_READONLY_MEM 759 valid_flags |= KVM_MEM_READONLY; 760 #endif 761 762 if (mem->flags & ~valid_flags) 763 return -EINVAL; 764 765 return 0; 766 } 767 768 static struct kvm_memslots *install_new_memslots(struct kvm *kvm, 769 int as_id, struct kvm_memslots *slots) 770 { 771 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); 772 773 /* 774 * Set the low bit in the generation, which disables SPTE caching 775 * until the end of synchronize_srcu_expedited. 776 */ 777 WARN_ON(old_memslots->generation & 1); 778 slots->generation = old_memslots->generation + 1; 779 780 rcu_assign_pointer(kvm->memslots[as_id], slots); 781 synchronize_srcu_expedited(&kvm->srcu); 782 783 /* 784 * Increment the new memslot generation a second time. This prevents 785 * vm exits that race with memslot updates from caching a memslot 786 * generation that will (potentially) be valid forever. 787 */ 788 slots->generation++; 789 790 kvm_arch_memslots_updated(kvm, slots); 791 792 return old_memslots; 793 } 794 795 /* 796 * Allocate some memory and give it an address in the guest physical address 797 * space. 798 * 799 * Discontiguous memory is allowed, mostly for framebuffers. 800 * 801 * Must be called holding kvm->slots_lock for write. 802 */ 803 int __kvm_set_memory_region(struct kvm *kvm, 804 const struct kvm_userspace_memory_region *mem) 805 { 806 int r; 807 gfn_t base_gfn; 808 unsigned long npages; 809 struct kvm_memory_slot *slot; 810 struct kvm_memory_slot old, new; 811 struct kvm_memslots *slots = NULL, *old_memslots; 812 int as_id, id; 813 enum kvm_mr_change change; 814 815 r = check_memory_region_flags(mem); 816 if (r) 817 goto out; 818 819 r = -EINVAL; 820 as_id = mem->slot >> 16; 821 id = (u16)mem->slot; 822 823 /* General sanity checks */ 824 if (mem->memory_size & (PAGE_SIZE - 1)) 825 goto out; 826 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 827 goto out; 828 /* We can read the guest memory with __xxx_user() later on. */ 829 if ((id < KVM_USER_MEM_SLOTS) && 830 ((mem->userspace_addr & (PAGE_SIZE - 1)) || 831 !access_ok(VERIFY_WRITE, 832 (void __user *)(unsigned long)mem->userspace_addr, 833 mem->memory_size))) 834 goto out; 835 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) 836 goto out; 837 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 838 goto out; 839 840 slot = id_to_memslot(__kvm_memslots(kvm, as_id), id); 841 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 842 npages = mem->memory_size >> PAGE_SHIFT; 843 844 if (npages > KVM_MEM_MAX_NR_PAGES) 845 goto out; 846 847 new = old = *slot; 848 849 new.id = id; 850 new.base_gfn = base_gfn; 851 new.npages = npages; 852 new.flags = mem->flags; 853 854 if (npages) { 855 if (!old.npages) 856 change = KVM_MR_CREATE; 857 else { /* Modify an existing slot. */ 858 if ((mem->userspace_addr != old.userspace_addr) || 859 (npages != old.npages) || 860 ((new.flags ^ old.flags) & KVM_MEM_READONLY)) 861 goto out; 862 863 if (base_gfn != old.base_gfn) 864 change = KVM_MR_MOVE; 865 else if (new.flags != old.flags) 866 change = KVM_MR_FLAGS_ONLY; 867 else { /* Nothing to change. */ 868 r = 0; 869 goto out; 870 } 871 } 872 } else { 873 if (!old.npages) 874 goto out; 875 876 change = KVM_MR_DELETE; 877 new.base_gfn = 0; 878 new.flags = 0; 879 } 880 881 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 882 /* Check for overlaps */ 883 r = -EEXIST; 884 kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) { 885 if ((slot->id >= KVM_USER_MEM_SLOTS) || 886 (slot->id == id)) 887 continue; 888 if (!((base_gfn + npages <= slot->base_gfn) || 889 (base_gfn >= slot->base_gfn + slot->npages))) 890 goto out; 891 } 892 } 893 894 /* Free page dirty bitmap if unneeded */ 895 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 896 new.dirty_bitmap = NULL; 897 898 r = -ENOMEM; 899 if (change == KVM_MR_CREATE) { 900 new.userspace_addr = mem->userspace_addr; 901 902 if (kvm_arch_create_memslot(kvm, &new, npages)) 903 goto out_free; 904 } 905 906 /* Allocate page dirty bitmap if needed */ 907 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 908 if (kvm_create_dirty_bitmap(&new) < 0) 909 goto out_free; 910 } 911 912 slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); 913 if (!slots) 914 goto out_free; 915 memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots)); 916 917 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { 918 slot = id_to_memslot(slots, id); 919 slot->flags |= KVM_MEMSLOT_INVALID; 920 921 old_memslots = install_new_memslots(kvm, as_id, slots); 922 923 /* slot was deleted or moved, clear iommu mapping */ 924 kvm_iommu_unmap_pages(kvm, &old); 925 /* From this point no new shadow pages pointing to a deleted, 926 * or moved, memslot will be created. 927 * 928 * validation of sp->gfn happens in: 929 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 930 * - kvm_is_visible_gfn (mmu_check_roots) 931 */ 932 kvm_arch_flush_shadow_memslot(kvm, slot); 933 934 /* 935 * We can re-use the old_memslots from above, the only difference 936 * from the currently installed memslots is the invalid flag. This 937 * will get overwritten by update_memslots anyway. 938 */ 939 slots = old_memslots; 940 } 941 942 r = kvm_arch_prepare_memory_region(kvm, &new, mem, change); 943 if (r) 944 goto out_slots; 945 946 /* actual memory is freed via old in kvm_free_memslot below */ 947 if (change == KVM_MR_DELETE) { 948 new.dirty_bitmap = NULL; 949 memset(&new.arch, 0, sizeof(new.arch)); 950 } 951 952 update_memslots(slots, &new); 953 old_memslots = install_new_memslots(kvm, as_id, slots); 954 955 kvm_arch_commit_memory_region(kvm, mem, &old, &new, change); 956 957 kvm_free_memslot(kvm, &old, &new); 958 kvfree(old_memslots); 959 960 /* 961 * IOMMU mapping: New slots need to be mapped. Old slots need to be 962 * un-mapped and re-mapped if their base changes. Since base change 963 * unmapping is handled above with slot deletion, mapping alone is 964 * needed here. Anything else the iommu might care about for existing 965 * slots (size changes, userspace addr changes and read-only flag 966 * changes) is disallowed above, so any other attribute changes getting 967 * here can be skipped. 968 */ 969 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 970 r = kvm_iommu_map_pages(kvm, &new); 971 return r; 972 } 973 974 return 0; 975 976 out_slots: 977 kvfree(slots); 978 out_free: 979 kvm_free_memslot(kvm, &new, &old); 980 out: 981 return r; 982 } 983 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 984 985 int kvm_set_memory_region(struct kvm *kvm, 986 const struct kvm_userspace_memory_region *mem) 987 { 988 int r; 989 990 mutex_lock(&kvm->slots_lock); 991 r = __kvm_set_memory_region(kvm, mem); 992 mutex_unlock(&kvm->slots_lock); 993 return r; 994 } 995 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 996 997 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 998 struct kvm_userspace_memory_region *mem) 999 { 1000 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) 1001 return -EINVAL; 1002 1003 return kvm_set_memory_region(kvm, mem); 1004 } 1005 1006 int kvm_get_dirty_log(struct kvm *kvm, 1007 struct kvm_dirty_log *log, int *is_dirty) 1008 { 1009 struct kvm_memslots *slots; 1010 struct kvm_memory_slot *memslot; 1011 int r, i, as_id, id; 1012 unsigned long n; 1013 unsigned long any = 0; 1014 1015 r = -EINVAL; 1016 as_id = log->slot >> 16; 1017 id = (u16)log->slot; 1018 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1019 goto out; 1020 1021 slots = __kvm_memslots(kvm, as_id); 1022 memslot = id_to_memslot(slots, id); 1023 r = -ENOENT; 1024 if (!memslot->dirty_bitmap) 1025 goto out; 1026 1027 n = kvm_dirty_bitmap_bytes(memslot); 1028 1029 for (i = 0; !any && i < n/sizeof(long); ++i) 1030 any = memslot->dirty_bitmap[i]; 1031 1032 r = -EFAULT; 1033 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) 1034 goto out; 1035 1036 if (any) 1037 *is_dirty = 1; 1038 1039 r = 0; 1040 out: 1041 return r; 1042 } 1043 EXPORT_SYMBOL_GPL(kvm_get_dirty_log); 1044 1045 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 1046 /** 1047 * kvm_get_dirty_log_protect - get a snapshot of dirty pages, and if any pages 1048 * are dirty write protect them for next write. 1049 * @kvm: pointer to kvm instance 1050 * @log: slot id and address to which we copy the log 1051 * @is_dirty: flag set if any page is dirty 1052 * 1053 * We need to keep it in mind that VCPU threads can write to the bitmap 1054 * concurrently. So, to avoid losing track of dirty pages we keep the 1055 * following order: 1056 * 1057 * 1. Take a snapshot of the bit and clear it if needed. 1058 * 2. Write protect the corresponding page. 1059 * 3. Copy the snapshot to the userspace. 1060 * 4. Upon return caller flushes TLB's if needed. 1061 * 1062 * Between 2 and 4, the guest may write to the page using the remaining TLB 1063 * entry. This is not a problem because the page is reported dirty using 1064 * the snapshot taken before and step 4 ensures that writes done after 1065 * exiting to userspace will be logged for the next call. 1066 * 1067 */ 1068 int kvm_get_dirty_log_protect(struct kvm *kvm, 1069 struct kvm_dirty_log *log, bool *is_dirty) 1070 { 1071 struct kvm_memslots *slots; 1072 struct kvm_memory_slot *memslot; 1073 int r, i, as_id, id; 1074 unsigned long n; 1075 unsigned long *dirty_bitmap; 1076 unsigned long *dirty_bitmap_buffer; 1077 1078 r = -EINVAL; 1079 as_id = log->slot >> 16; 1080 id = (u16)log->slot; 1081 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1082 goto out; 1083 1084 slots = __kvm_memslots(kvm, as_id); 1085 memslot = id_to_memslot(slots, id); 1086 1087 dirty_bitmap = memslot->dirty_bitmap; 1088 r = -ENOENT; 1089 if (!dirty_bitmap) 1090 goto out; 1091 1092 n = kvm_dirty_bitmap_bytes(memslot); 1093 1094 dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); 1095 memset(dirty_bitmap_buffer, 0, n); 1096 1097 spin_lock(&kvm->mmu_lock); 1098 *is_dirty = false; 1099 for (i = 0; i < n / sizeof(long); i++) { 1100 unsigned long mask; 1101 gfn_t offset; 1102 1103 if (!dirty_bitmap[i]) 1104 continue; 1105 1106 *is_dirty = true; 1107 1108 mask = xchg(&dirty_bitmap[i], 0); 1109 dirty_bitmap_buffer[i] = mask; 1110 1111 if (mask) { 1112 offset = i * BITS_PER_LONG; 1113 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 1114 offset, mask); 1115 } 1116 } 1117 1118 spin_unlock(&kvm->mmu_lock); 1119 1120 r = -EFAULT; 1121 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) 1122 goto out; 1123 1124 r = 0; 1125 out: 1126 return r; 1127 } 1128 EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect); 1129 #endif 1130 1131 bool kvm_largepages_enabled(void) 1132 { 1133 return largepages_enabled; 1134 } 1135 1136 void kvm_disable_largepages(void) 1137 { 1138 largepages_enabled = false; 1139 } 1140 EXPORT_SYMBOL_GPL(kvm_disable_largepages); 1141 1142 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 1143 { 1144 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 1145 } 1146 EXPORT_SYMBOL_GPL(gfn_to_memslot); 1147 1148 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) 1149 { 1150 return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn); 1151 } 1152 1153 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 1154 { 1155 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); 1156 1157 if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS || 1158 memslot->flags & KVM_MEMSLOT_INVALID) 1159 return false; 1160 1161 return true; 1162 } 1163 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 1164 1165 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) 1166 { 1167 struct vm_area_struct *vma; 1168 unsigned long addr, size; 1169 1170 size = PAGE_SIZE; 1171 1172 addr = gfn_to_hva(kvm, gfn); 1173 if (kvm_is_error_hva(addr)) 1174 return PAGE_SIZE; 1175 1176 down_read(¤t->mm->mmap_sem); 1177 vma = find_vma(current->mm, addr); 1178 if (!vma) 1179 goto out; 1180 1181 size = vma_kernel_pagesize(vma); 1182 1183 out: 1184 up_read(¤t->mm->mmap_sem); 1185 1186 return size; 1187 } 1188 1189 static bool memslot_is_readonly(struct kvm_memory_slot *slot) 1190 { 1191 return slot->flags & KVM_MEM_READONLY; 1192 } 1193 1194 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1195 gfn_t *nr_pages, bool write) 1196 { 1197 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 1198 return KVM_HVA_ERR_BAD; 1199 1200 if (memslot_is_readonly(slot) && write) 1201 return KVM_HVA_ERR_RO_BAD; 1202 1203 if (nr_pages) 1204 *nr_pages = slot->npages - (gfn - slot->base_gfn); 1205 1206 return __gfn_to_hva_memslot(slot, gfn); 1207 } 1208 1209 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1210 gfn_t *nr_pages) 1211 { 1212 return __gfn_to_hva_many(slot, gfn, nr_pages, true); 1213 } 1214 1215 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 1216 gfn_t gfn) 1217 { 1218 return gfn_to_hva_many(slot, gfn, NULL); 1219 } 1220 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); 1221 1222 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 1223 { 1224 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); 1225 } 1226 EXPORT_SYMBOL_GPL(gfn_to_hva); 1227 1228 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) 1229 { 1230 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); 1231 } 1232 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); 1233 1234 /* 1235 * If writable is set to false, the hva returned by this function is only 1236 * allowed to be read. 1237 */ 1238 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, 1239 gfn_t gfn, bool *writable) 1240 { 1241 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); 1242 1243 if (!kvm_is_error_hva(hva) && writable) 1244 *writable = !memslot_is_readonly(slot); 1245 1246 return hva; 1247 } 1248 1249 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 1250 { 1251 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1252 1253 return gfn_to_hva_memslot_prot(slot, gfn, writable); 1254 } 1255 1256 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) 1257 { 1258 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1259 1260 return gfn_to_hva_memslot_prot(slot, gfn, writable); 1261 } 1262 1263 static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, 1264 unsigned long start, int write, struct page **page) 1265 { 1266 int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET; 1267 1268 if (write) 1269 flags |= FOLL_WRITE; 1270 1271 return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL); 1272 } 1273 1274 static inline int check_user_page_hwpoison(unsigned long addr) 1275 { 1276 int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE; 1277 1278 rc = __get_user_pages(current, current->mm, addr, 1, 1279 flags, NULL, NULL, NULL); 1280 return rc == -EHWPOISON; 1281 } 1282 1283 /* 1284 * The atomic path to get the writable pfn which will be stored in @pfn, 1285 * true indicates success, otherwise false is returned. 1286 */ 1287 static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, 1288 bool write_fault, bool *writable, kvm_pfn_t *pfn) 1289 { 1290 struct page *page[1]; 1291 int npages; 1292 1293 if (!(async || atomic)) 1294 return false; 1295 1296 /* 1297 * Fast pin a writable pfn only if it is a write fault request 1298 * or the caller allows to map a writable pfn for a read fault 1299 * request. 1300 */ 1301 if (!(write_fault || writable)) 1302 return false; 1303 1304 npages = __get_user_pages_fast(addr, 1, 1, page); 1305 if (npages == 1) { 1306 *pfn = page_to_pfn(page[0]); 1307 1308 if (writable) 1309 *writable = true; 1310 return true; 1311 } 1312 1313 return false; 1314 } 1315 1316 /* 1317 * The slow path to get the pfn of the specified host virtual address, 1318 * 1 indicates success, -errno is returned if error is detected. 1319 */ 1320 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, 1321 bool *writable, kvm_pfn_t *pfn) 1322 { 1323 struct page *page[1]; 1324 int npages = 0; 1325 1326 might_sleep(); 1327 1328 if (writable) 1329 *writable = write_fault; 1330 1331 if (async) { 1332 down_read(¤t->mm->mmap_sem); 1333 npages = get_user_page_nowait(current, current->mm, 1334 addr, write_fault, page); 1335 up_read(¤t->mm->mmap_sem); 1336 } else 1337 npages = __get_user_pages_unlocked(current, current->mm, addr, 1, 1338 write_fault, 0, page, 1339 FOLL_TOUCH|FOLL_HWPOISON); 1340 if (npages != 1) 1341 return npages; 1342 1343 /* map read fault as writable if possible */ 1344 if (unlikely(!write_fault) && writable) { 1345 struct page *wpage[1]; 1346 1347 npages = __get_user_pages_fast(addr, 1, 1, wpage); 1348 if (npages == 1) { 1349 *writable = true; 1350 put_page(page[0]); 1351 page[0] = wpage[0]; 1352 } 1353 1354 npages = 1; 1355 } 1356 *pfn = page_to_pfn(page[0]); 1357 return npages; 1358 } 1359 1360 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) 1361 { 1362 if (unlikely(!(vma->vm_flags & VM_READ))) 1363 return false; 1364 1365 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) 1366 return false; 1367 1368 return true; 1369 } 1370 1371 /* 1372 * Pin guest page in memory and return its pfn. 1373 * @addr: host virtual address which maps memory to the guest 1374 * @atomic: whether this function can sleep 1375 * @async: whether this function need to wait IO complete if the 1376 * host page is not in the memory 1377 * @write_fault: whether we should get a writable host page 1378 * @writable: whether it allows to map a writable host page for !@write_fault 1379 * 1380 * The function will map a writable host page for these two cases: 1381 * 1): @write_fault = true 1382 * 2): @write_fault = false && @writable, @writable will tell the caller 1383 * whether the mapping is writable. 1384 */ 1385 static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, 1386 bool write_fault, bool *writable) 1387 { 1388 struct vm_area_struct *vma; 1389 kvm_pfn_t pfn = 0; 1390 int npages; 1391 1392 /* we can do it either atomically or asynchronously, not both */ 1393 BUG_ON(atomic && async); 1394 1395 if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn)) 1396 return pfn; 1397 1398 if (atomic) 1399 return KVM_PFN_ERR_FAULT; 1400 1401 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); 1402 if (npages == 1) 1403 return pfn; 1404 1405 down_read(¤t->mm->mmap_sem); 1406 if (npages == -EHWPOISON || 1407 (!async && check_user_page_hwpoison(addr))) { 1408 pfn = KVM_PFN_ERR_HWPOISON; 1409 goto exit; 1410 } 1411 1412 vma = find_vma_intersection(current->mm, addr, addr + 1); 1413 1414 if (vma == NULL) 1415 pfn = KVM_PFN_ERR_FAULT; 1416 else if ((vma->vm_flags & VM_PFNMAP)) { 1417 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + 1418 vma->vm_pgoff; 1419 BUG_ON(!kvm_is_reserved_pfn(pfn)); 1420 } else { 1421 if (async && vma_is_valid(vma, write_fault)) 1422 *async = true; 1423 pfn = KVM_PFN_ERR_FAULT; 1424 } 1425 exit: 1426 up_read(¤t->mm->mmap_sem); 1427 return pfn; 1428 } 1429 1430 kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, 1431 bool atomic, bool *async, bool write_fault, 1432 bool *writable) 1433 { 1434 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); 1435 1436 if (addr == KVM_HVA_ERR_RO_BAD) { 1437 if (writable) 1438 *writable = false; 1439 return KVM_PFN_ERR_RO_FAULT; 1440 } 1441 1442 if (kvm_is_error_hva(addr)) { 1443 if (writable) 1444 *writable = false; 1445 return KVM_PFN_NOSLOT; 1446 } 1447 1448 /* Do not map writable pfn in the readonly memslot. */ 1449 if (writable && memslot_is_readonly(slot)) { 1450 *writable = false; 1451 writable = NULL; 1452 } 1453 1454 return hva_to_pfn(addr, atomic, async, write_fault, 1455 writable); 1456 } 1457 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); 1458 1459 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 1460 bool *writable) 1461 { 1462 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, 1463 write_fault, writable); 1464 } 1465 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 1466 1467 kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 1468 { 1469 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL); 1470 } 1471 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); 1472 1473 kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) 1474 { 1475 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL); 1476 } 1477 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); 1478 1479 kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) 1480 { 1481 return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn); 1482 } 1483 EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic); 1484 1485 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) 1486 { 1487 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 1488 } 1489 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic); 1490 1491 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 1492 { 1493 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); 1494 } 1495 EXPORT_SYMBOL_GPL(gfn_to_pfn); 1496 1497 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) 1498 { 1499 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 1500 } 1501 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn); 1502 1503 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 1504 struct page **pages, int nr_pages) 1505 { 1506 unsigned long addr; 1507 gfn_t entry; 1508 1509 addr = gfn_to_hva_many(slot, gfn, &entry); 1510 if (kvm_is_error_hva(addr)) 1511 return -1; 1512 1513 if (entry < nr_pages) 1514 return 0; 1515 1516 return __get_user_pages_fast(addr, nr_pages, 1, pages); 1517 } 1518 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); 1519 1520 static struct page *kvm_pfn_to_page(kvm_pfn_t pfn) 1521 { 1522 if (is_error_noslot_pfn(pfn)) 1523 return KVM_ERR_PTR_BAD_PAGE; 1524 1525 if (kvm_is_reserved_pfn(pfn)) { 1526 WARN_ON(1); 1527 return KVM_ERR_PTR_BAD_PAGE; 1528 } 1529 1530 return pfn_to_page(pfn); 1531 } 1532 1533 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 1534 { 1535 kvm_pfn_t pfn; 1536 1537 pfn = gfn_to_pfn(kvm, gfn); 1538 1539 return kvm_pfn_to_page(pfn); 1540 } 1541 EXPORT_SYMBOL_GPL(gfn_to_page); 1542 1543 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) 1544 { 1545 kvm_pfn_t pfn; 1546 1547 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn); 1548 1549 return kvm_pfn_to_page(pfn); 1550 } 1551 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page); 1552 1553 void kvm_release_page_clean(struct page *page) 1554 { 1555 WARN_ON(is_error_page(page)); 1556 1557 kvm_release_pfn_clean(page_to_pfn(page)); 1558 } 1559 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 1560 1561 void kvm_release_pfn_clean(kvm_pfn_t pfn) 1562 { 1563 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) 1564 put_page(pfn_to_page(pfn)); 1565 } 1566 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1567 1568 void kvm_release_page_dirty(struct page *page) 1569 { 1570 WARN_ON(is_error_page(page)); 1571 1572 kvm_release_pfn_dirty(page_to_pfn(page)); 1573 } 1574 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 1575 1576 static void kvm_release_pfn_dirty(kvm_pfn_t pfn) 1577 { 1578 kvm_set_pfn_dirty(pfn); 1579 kvm_release_pfn_clean(pfn); 1580 } 1581 1582 void kvm_set_pfn_dirty(kvm_pfn_t pfn) 1583 { 1584 if (!kvm_is_reserved_pfn(pfn)) { 1585 struct page *page = pfn_to_page(pfn); 1586 1587 if (!PageReserved(page)) 1588 SetPageDirty(page); 1589 } 1590 } 1591 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 1592 1593 void kvm_set_pfn_accessed(kvm_pfn_t pfn) 1594 { 1595 if (!kvm_is_reserved_pfn(pfn)) 1596 mark_page_accessed(pfn_to_page(pfn)); 1597 } 1598 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1599 1600 void kvm_get_pfn(kvm_pfn_t pfn) 1601 { 1602 if (!kvm_is_reserved_pfn(pfn)) 1603 get_page(pfn_to_page(pfn)); 1604 } 1605 EXPORT_SYMBOL_GPL(kvm_get_pfn); 1606 1607 static int next_segment(unsigned long len, int offset) 1608 { 1609 if (len > PAGE_SIZE - offset) 1610 return PAGE_SIZE - offset; 1611 else 1612 return len; 1613 } 1614 1615 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, 1616 void *data, int offset, int len) 1617 { 1618 int r; 1619 unsigned long addr; 1620 1621 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 1622 if (kvm_is_error_hva(addr)) 1623 return -EFAULT; 1624 r = __copy_from_user(data, (void __user *)addr + offset, len); 1625 if (r) 1626 return -EFAULT; 1627 return 0; 1628 } 1629 1630 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 1631 int len) 1632 { 1633 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1634 1635 return __kvm_read_guest_page(slot, gfn, data, offset, len); 1636 } 1637 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 1638 1639 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, 1640 int offset, int len) 1641 { 1642 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1643 1644 return __kvm_read_guest_page(slot, gfn, data, offset, len); 1645 } 1646 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); 1647 1648 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 1649 { 1650 gfn_t gfn = gpa >> PAGE_SHIFT; 1651 int seg; 1652 int offset = offset_in_page(gpa); 1653 int ret; 1654 1655 while ((seg = next_segment(len, offset)) != 0) { 1656 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 1657 if (ret < 0) 1658 return ret; 1659 offset = 0; 1660 len -= seg; 1661 data += seg; 1662 ++gfn; 1663 } 1664 return 0; 1665 } 1666 EXPORT_SYMBOL_GPL(kvm_read_guest); 1667 1668 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) 1669 { 1670 gfn_t gfn = gpa >> PAGE_SHIFT; 1671 int seg; 1672 int offset = offset_in_page(gpa); 1673 int ret; 1674 1675 while ((seg = next_segment(len, offset)) != 0) { 1676 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); 1677 if (ret < 0) 1678 return ret; 1679 offset = 0; 1680 len -= seg; 1681 data += seg; 1682 ++gfn; 1683 } 1684 return 0; 1685 } 1686 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest); 1687 1688 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 1689 void *data, int offset, unsigned long len) 1690 { 1691 int r; 1692 unsigned long addr; 1693 1694 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 1695 if (kvm_is_error_hva(addr)) 1696 return -EFAULT; 1697 pagefault_disable(); 1698 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 1699 pagefault_enable(); 1700 if (r) 1701 return -EFAULT; 1702 return 0; 1703 } 1704 1705 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 1706 unsigned long len) 1707 { 1708 gfn_t gfn = gpa >> PAGE_SHIFT; 1709 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1710 int offset = offset_in_page(gpa); 1711 1712 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); 1713 } 1714 EXPORT_SYMBOL_GPL(kvm_read_guest_atomic); 1715 1716 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, 1717 void *data, unsigned long len) 1718 { 1719 gfn_t gfn = gpa >> PAGE_SHIFT; 1720 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1721 int offset = offset_in_page(gpa); 1722 1723 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); 1724 } 1725 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic); 1726 1727 static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn, 1728 const void *data, int offset, int len) 1729 { 1730 int r; 1731 unsigned long addr; 1732 1733 addr = gfn_to_hva_memslot(memslot, gfn); 1734 if (kvm_is_error_hva(addr)) 1735 return -EFAULT; 1736 r = __copy_to_user((void __user *)addr + offset, data, len); 1737 if (r) 1738 return -EFAULT; 1739 mark_page_dirty_in_slot(memslot, gfn); 1740 return 0; 1741 } 1742 1743 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, 1744 const void *data, int offset, int len) 1745 { 1746 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1747 1748 return __kvm_write_guest_page(slot, gfn, data, offset, len); 1749 } 1750 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 1751 1752 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, 1753 const void *data, int offset, int len) 1754 { 1755 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1756 1757 return __kvm_write_guest_page(slot, gfn, data, offset, len); 1758 } 1759 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page); 1760 1761 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 1762 unsigned long len) 1763 { 1764 gfn_t gfn = gpa >> PAGE_SHIFT; 1765 int seg; 1766 int offset = offset_in_page(gpa); 1767 int ret; 1768 1769 while ((seg = next_segment(len, offset)) != 0) { 1770 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 1771 if (ret < 0) 1772 return ret; 1773 offset = 0; 1774 len -= seg; 1775 data += seg; 1776 ++gfn; 1777 } 1778 return 0; 1779 } 1780 EXPORT_SYMBOL_GPL(kvm_write_guest); 1781 1782 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 1783 unsigned long len) 1784 { 1785 gfn_t gfn = gpa >> PAGE_SHIFT; 1786 int seg; 1787 int offset = offset_in_page(gpa); 1788 int ret; 1789 1790 while ((seg = next_segment(len, offset)) != 0) { 1791 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); 1792 if (ret < 0) 1793 return ret; 1794 offset = 0; 1795 len -= seg; 1796 data += seg; 1797 ++gfn; 1798 } 1799 return 0; 1800 } 1801 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest); 1802 1803 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1804 gpa_t gpa, unsigned long len) 1805 { 1806 struct kvm_memslots *slots = kvm_memslots(kvm); 1807 int offset = offset_in_page(gpa); 1808 gfn_t start_gfn = gpa >> PAGE_SHIFT; 1809 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; 1810 gfn_t nr_pages_needed = end_gfn - start_gfn + 1; 1811 gfn_t nr_pages_avail; 1812 1813 ghc->gpa = gpa; 1814 ghc->generation = slots->generation; 1815 ghc->len = len; 1816 ghc->memslot = gfn_to_memslot(kvm, start_gfn); 1817 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL); 1818 if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) { 1819 ghc->hva += offset; 1820 } else { 1821 /* 1822 * If the requested region crosses two memslots, we still 1823 * verify that the entire region is valid here. 1824 */ 1825 while (start_gfn <= end_gfn) { 1826 ghc->memslot = gfn_to_memslot(kvm, start_gfn); 1827 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, 1828 &nr_pages_avail); 1829 if (kvm_is_error_hva(ghc->hva)) 1830 return -EFAULT; 1831 start_gfn += nr_pages_avail; 1832 } 1833 /* Use the slow path for cross page reads and writes. */ 1834 ghc->memslot = NULL; 1835 } 1836 return 0; 1837 } 1838 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 1839 1840 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1841 void *data, unsigned long len) 1842 { 1843 struct kvm_memslots *slots = kvm_memslots(kvm); 1844 int r; 1845 1846 BUG_ON(len > ghc->len); 1847 1848 if (slots->generation != ghc->generation) 1849 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); 1850 1851 if (unlikely(!ghc->memslot)) 1852 return kvm_write_guest(kvm, ghc->gpa, data, len); 1853 1854 if (kvm_is_error_hva(ghc->hva)) 1855 return -EFAULT; 1856 1857 r = __copy_to_user((void __user *)ghc->hva, data, len); 1858 if (r) 1859 return -EFAULT; 1860 mark_page_dirty_in_slot(ghc->memslot, ghc->gpa >> PAGE_SHIFT); 1861 1862 return 0; 1863 } 1864 EXPORT_SYMBOL_GPL(kvm_write_guest_cached); 1865 1866 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1867 void *data, unsigned long len) 1868 { 1869 struct kvm_memslots *slots = kvm_memslots(kvm); 1870 int r; 1871 1872 BUG_ON(len > ghc->len); 1873 1874 if (slots->generation != ghc->generation) 1875 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); 1876 1877 if (unlikely(!ghc->memslot)) 1878 return kvm_read_guest(kvm, ghc->gpa, data, len); 1879 1880 if (kvm_is_error_hva(ghc->hva)) 1881 return -EFAULT; 1882 1883 r = __copy_from_user(data, (void __user *)ghc->hva, len); 1884 if (r) 1885 return -EFAULT; 1886 1887 return 0; 1888 } 1889 EXPORT_SYMBOL_GPL(kvm_read_guest_cached); 1890 1891 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) 1892 { 1893 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 1894 1895 return kvm_write_guest_page(kvm, gfn, zero_page, offset, len); 1896 } 1897 EXPORT_SYMBOL_GPL(kvm_clear_guest_page); 1898 1899 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 1900 { 1901 gfn_t gfn = gpa >> PAGE_SHIFT; 1902 int seg; 1903 int offset = offset_in_page(gpa); 1904 int ret; 1905 1906 while ((seg = next_segment(len, offset)) != 0) { 1907 ret = kvm_clear_guest_page(kvm, gfn, offset, seg); 1908 if (ret < 0) 1909 return ret; 1910 offset = 0; 1911 len -= seg; 1912 ++gfn; 1913 } 1914 return 0; 1915 } 1916 EXPORT_SYMBOL_GPL(kvm_clear_guest); 1917 1918 static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, 1919 gfn_t gfn) 1920 { 1921 if (memslot && memslot->dirty_bitmap) { 1922 unsigned long rel_gfn = gfn - memslot->base_gfn; 1923 1924 set_bit_le(rel_gfn, memslot->dirty_bitmap); 1925 } 1926 } 1927 1928 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 1929 { 1930 struct kvm_memory_slot *memslot; 1931 1932 memslot = gfn_to_memslot(kvm, gfn); 1933 mark_page_dirty_in_slot(memslot, gfn); 1934 } 1935 EXPORT_SYMBOL_GPL(mark_page_dirty); 1936 1937 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) 1938 { 1939 struct kvm_memory_slot *memslot; 1940 1941 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1942 mark_page_dirty_in_slot(memslot, gfn); 1943 } 1944 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); 1945 1946 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) 1947 { 1948 unsigned int old, val, grow; 1949 1950 old = val = vcpu->halt_poll_ns; 1951 grow = READ_ONCE(halt_poll_ns_grow); 1952 /* 10us base */ 1953 if (val == 0 && grow) 1954 val = 10000; 1955 else 1956 val *= grow; 1957 1958 if (val > halt_poll_ns) 1959 val = halt_poll_ns; 1960 1961 vcpu->halt_poll_ns = val; 1962 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); 1963 } 1964 1965 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) 1966 { 1967 unsigned int old, val, shrink; 1968 1969 old = val = vcpu->halt_poll_ns; 1970 shrink = READ_ONCE(halt_poll_ns_shrink); 1971 if (shrink == 0) 1972 val = 0; 1973 else 1974 val /= shrink; 1975 1976 vcpu->halt_poll_ns = val; 1977 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); 1978 } 1979 1980 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) 1981 { 1982 if (kvm_arch_vcpu_runnable(vcpu)) { 1983 kvm_make_request(KVM_REQ_UNHALT, vcpu); 1984 return -EINTR; 1985 } 1986 if (kvm_cpu_has_pending_timer(vcpu)) 1987 return -EINTR; 1988 if (signal_pending(current)) 1989 return -EINTR; 1990 1991 return 0; 1992 } 1993 1994 /* 1995 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 1996 */ 1997 void kvm_vcpu_block(struct kvm_vcpu *vcpu) 1998 { 1999 ktime_t start, cur; 2000 DECLARE_SWAITQUEUE(wait); 2001 bool waited = false; 2002 u64 block_ns; 2003 2004 start = cur = ktime_get(); 2005 if (vcpu->halt_poll_ns) { 2006 ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); 2007 2008 ++vcpu->stat.halt_attempted_poll; 2009 do { 2010 /* 2011 * This sets KVM_REQ_UNHALT if an interrupt 2012 * arrives. 2013 */ 2014 if (kvm_vcpu_check_block(vcpu) < 0) { 2015 ++vcpu->stat.halt_successful_poll; 2016 goto out; 2017 } 2018 cur = ktime_get(); 2019 } while (single_task_running() && ktime_before(cur, stop)); 2020 } 2021 2022 kvm_arch_vcpu_blocking(vcpu); 2023 2024 for (;;) { 2025 prepare_to_swait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 2026 2027 if (kvm_vcpu_check_block(vcpu) < 0) 2028 break; 2029 2030 waited = true; 2031 schedule(); 2032 } 2033 2034 finish_swait(&vcpu->wq, &wait); 2035 cur = ktime_get(); 2036 2037 kvm_arch_vcpu_unblocking(vcpu); 2038 out: 2039 block_ns = ktime_to_ns(cur) - ktime_to_ns(start); 2040 2041 if (halt_poll_ns) { 2042 if (block_ns <= vcpu->halt_poll_ns) 2043 ; 2044 /* we had a long block, shrink polling */ 2045 else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns) 2046 shrink_halt_poll_ns(vcpu); 2047 /* we had a short halt and our poll time is too small */ 2048 else if (vcpu->halt_poll_ns < halt_poll_ns && 2049 block_ns < halt_poll_ns) 2050 grow_halt_poll_ns(vcpu); 2051 } else 2052 vcpu->halt_poll_ns = 0; 2053 2054 trace_kvm_vcpu_wakeup(block_ns, waited); 2055 } 2056 EXPORT_SYMBOL_GPL(kvm_vcpu_block); 2057 2058 #ifndef CONFIG_S390 2059 /* 2060 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. 2061 */ 2062 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 2063 { 2064 int me; 2065 int cpu = vcpu->cpu; 2066 struct swait_queue_head *wqp; 2067 2068 wqp = kvm_arch_vcpu_wq(vcpu); 2069 if (swait_active(wqp)) { 2070 swake_up(wqp); 2071 ++vcpu->stat.halt_wakeup; 2072 } 2073 2074 me = get_cpu(); 2075 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) 2076 if (kvm_arch_vcpu_should_kick(vcpu)) 2077 smp_send_reschedule(cpu); 2078 put_cpu(); 2079 } 2080 EXPORT_SYMBOL_GPL(kvm_vcpu_kick); 2081 #endif /* !CONFIG_S390 */ 2082 2083 int kvm_vcpu_yield_to(struct kvm_vcpu *target) 2084 { 2085 struct pid *pid; 2086 struct task_struct *task = NULL; 2087 int ret = 0; 2088 2089 rcu_read_lock(); 2090 pid = rcu_dereference(target->pid); 2091 if (pid) 2092 task = get_pid_task(pid, PIDTYPE_PID); 2093 rcu_read_unlock(); 2094 if (!task) 2095 return ret; 2096 ret = yield_to(task, 1); 2097 put_task_struct(task); 2098 2099 return ret; 2100 } 2101 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); 2102 2103 /* 2104 * Helper that checks whether a VCPU is eligible for directed yield. 2105 * Most eligible candidate to yield is decided by following heuristics: 2106 * 2107 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently 2108 * (preempted lock holder), indicated by @in_spin_loop. 2109 * Set at the beiginning and cleared at the end of interception/PLE handler. 2110 * 2111 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get 2112 * chance last time (mostly it has become eligible now since we have probably 2113 * yielded to lockholder in last iteration. This is done by toggling 2114 * @dy_eligible each time a VCPU checked for eligibility.) 2115 * 2116 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding 2117 * to preempted lock-holder could result in wrong VCPU selection and CPU 2118 * burning. Giving priority for a potential lock-holder increases lock 2119 * progress. 2120 * 2121 * Since algorithm is based on heuristics, accessing another VCPU data without 2122 * locking does not harm. It may result in trying to yield to same VCPU, fail 2123 * and continue with next VCPU and so on. 2124 */ 2125 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) 2126 { 2127 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 2128 bool eligible; 2129 2130 eligible = !vcpu->spin_loop.in_spin_loop || 2131 vcpu->spin_loop.dy_eligible; 2132 2133 if (vcpu->spin_loop.in_spin_loop) 2134 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); 2135 2136 return eligible; 2137 #else 2138 return true; 2139 #endif 2140 } 2141 2142 void kvm_vcpu_on_spin(struct kvm_vcpu *me) 2143 { 2144 struct kvm *kvm = me->kvm; 2145 struct kvm_vcpu *vcpu; 2146 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 2147 int yielded = 0; 2148 int try = 3; 2149 int pass; 2150 int i; 2151 2152 kvm_vcpu_set_in_spin_loop(me, true); 2153 /* 2154 * We boost the priority of a VCPU that is runnable but not 2155 * currently running, because it got preempted by something 2156 * else and called schedule in __vcpu_run. Hopefully that 2157 * VCPU is holding the lock that we need and will release it. 2158 * We approximate round-robin by starting at the last boosted VCPU. 2159 */ 2160 for (pass = 0; pass < 2 && !yielded && try; pass++) { 2161 kvm_for_each_vcpu(i, vcpu, kvm) { 2162 if (!pass && i <= last_boosted_vcpu) { 2163 i = last_boosted_vcpu; 2164 continue; 2165 } else if (pass && i > last_boosted_vcpu) 2166 break; 2167 if (!ACCESS_ONCE(vcpu->preempted)) 2168 continue; 2169 if (vcpu == me) 2170 continue; 2171 if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) 2172 continue; 2173 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 2174 continue; 2175 2176 yielded = kvm_vcpu_yield_to(vcpu); 2177 if (yielded > 0) { 2178 kvm->last_boosted_vcpu = i; 2179 break; 2180 } else if (yielded < 0) { 2181 try--; 2182 if (!try) 2183 break; 2184 } 2185 } 2186 } 2187 kvm_vcpu_set_in_spin_loop(me, false); 2188 2189 /* Ensure vcpu is not eligible during next spinloop */ 2190 kvm_vcpu_set_dy_eligible(me, false); 2191 } 2192 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 2193 2194 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2195 { 2196 struct kvm_vcpu *vcpu = vma->vm_file->private_data; 2197 struct page *page; 2198 2199 if (vmf->pgoff == 0) 2200 page = virt_to_page(vcpu->run); 2201 #ifdef CONFIG_X86 2202 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 2203 page = virt_to_page(vcpu->arch.pio_data); 2204 #endif 2205 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2206 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 2207 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 2208 #endif 2209 else 2210 return kvm_arch_vcpu_fault(vcpu, vmf); 2211 get_page(page); 2212 vmf->page = page; 2213 return 0; 2214 } 2215 2216 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 2217 .fault = kvm_vcpu_fault, 2218 }; 2219 2220 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 2221 { 2222 vma->vm_ops = &kvm_vcpu_vm_ops; 2223 return 0; 2224 } 2225 2226 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 2227 { 2228 struct kvm_vcpu *vcpu = filp->private_data; 2229 2230 kvm_put_kvm(vcpu->kvm); 2231 return 0; 2232 } 2233 2234 static struct file_operations kvm_vcpu_fops = { 2235 .release = kvm_vcpu_release, 2236 .unlocked_ioctl = kvm_vcpu_ioctl, 2237 #ifdef CONFIG_KVM_COMPAT 2238 .compat_ioctl = kvm_vcpu_compat_ioctl, 2239 #endif 2240 .mmap = kvm_vcpu_mmap, 2241 .llseek = noop_llseek, 2242 }; 2243 2244 /* 2245 * Allocates an inode for the vcpu. 2246 */ 2247 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 2248 { 2249 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); 2250 } 2251 2252 /* 2253 * Creates some virtual cpus. Good luck creating more than one. 2254 */ 2255 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 2256 { 2257 int r; 2258 struct kvm_vcpu *vcpu; 2259 2260 if (id >= KVM_MAX_VCPUS) 2261 return -EINVAL; 2262 2263 vcpu = kvm_arch_vcpu_create(kvm, id); 2264 if (IS_ERR(vcpu)) 2265 return PTR_ERR(vcpu); 2266 2267 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 2268 2269 r = kvm_arch_vcpu_setup(vcpu); 2270 if (r) 2271 goto vcpu_destroy; 2272 2273 mutex_lock(&kvm->lock); 2274 if (!kvm_vcpu_compatible(vcpu)) { 2275 r = -EINVAL; 2276 goto unlock_vcpu_destroy; 2277 } 2278 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { 2279 r = -EINVAL; 2280 goto unlock_vcpu_destroy; 2281 } 2282 if (kvm_get_vcpu_by_id(kvm, id)) { 2283 r = -EEXIST; 2284 goto unlock_vcpu_destroy; 2285 } 2286 2287 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); 2288 2289 /* Now it's all set up, let userspace reach it */ 2290 kvm_get_kvm(kvm); 2291 r = create_vcpu_fd(vcpu); 2292 if (r < 0) { 2293 kvm_put_kvm(kvm); 2294 goto unlock_vcpu_destroy; 2295 } 2296 2297 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; 2298 2299 /* 2300 * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus 2301 * before kvm->online_vcpu's incremented value. 2302 */ 2303 smp_wmb(); 2304 atomic_inc(&kvm->online_vcpus); 2305 2306 mutex_unlock(&kvm->lock); 2307 kvm_arch_vcpu_postcreate(vcpu); 2308 return r; 2309 2310 unlock_vcpu_destroy: 2311 mutex_unlock(&kvm->lock); 2312 vcpu_destroy: 2313 kvm_arch_vcpu_destroy(vcpu); 2314 return r; 2315 } 2316 2317 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 2318 { 2319 if (sigset) { 2320 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2321 vcpu->sigset_active = 1; 2322 vcpu->sigset = *sigset; 2323 } else 2324 vcpu->sigset_active = 0; 2325 return 0; 2326 } 2327 2328 static long kvm_vcpu_ioctl(struct file *filp, 2329 unsigned int ioctl, unsigned long arg) 2330 { 2331 struct kvm_vcpu *vcpu = filp->private_data; 2332 void __user *argp = (void __user *)arg; 2333 int r; 2334 struct kvm_fpu *fpu = NULL; 2335 struct kvm_sregs *kvm_sregs = NULL; 2336 2337 if (vcpu->kvm->mm != current->mm) 2338 return -EIO; 2339 2340 if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) 2341 return -EINVAL; 2342 2343 #if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS) 2344 /* 2345 * Special cases: vcpu ioctls that are asynchronous to vcpu execution, 2346 * so vcpu_load() would break it. 2347 */ 2348 if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_S390_IRQ || ioctl == KVM_INTERRUPT) 2349 return kvm_arch_vcpu_ioctl(filp, ioctl, arg); 2350 #endif 2351 2352 2353 r = vcpu_load(vcpu); 2354 if (r) 2355 return r; 2356 switch (ioctl) { 2357 case KVM_RUN: 2358 r = -EINVAL; 2359 if (arg) 2360 goto out; 2361 if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) { 2362 /* The thread running this VCPU changed. */ 2363 struct pid *oldpid = vcpu->pid; 2364 struct pid *newpid = get_task_pid(current, PIDTYPE_PID); 2365 2366 rcu_assign_pointer(vcpu->pid, newpid); 2367 if (oldpid) 2368 synchronize_rcu(); 2369 put_pid(oldpid); 2370 } 2371 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); 2372 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 2373 break; 2374 case KVM_GET_REGS: { 2375 struct kvm_regs *kvm_regs; 2376 2377 r = -ENOMEM; 2378 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 2379 if (!kvm_regs) 2380 goto out; 2381 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 2382 if (r) 2383 goto out_free1; 2384 r = -EFAULT; 2385 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 2386 goto out_free1; 2387 r = 0; 2388 out_free1: 2389 kfree(kvm_regs); 2390 break; 2391 } 2392 case KVM_SET_REGS: { 2393 struct kvm_regs *kvm_regs; 2394 2395 r = -ENOMEM; 2396 kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); 2397 if (IS_ERR(kvm_regs)) { 2398 r = PTR_ERR(kvm_regs); 2399 goto out; 2400 } 2401 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 2402 kfree(kvm_regs); 2403 break; 2404 } 2405 case KVM_GET_SREGS: { 2406 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 2407 r = -ENOMEM; 2408 if (!kvm_sregs) 2409 goto out; 2410 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 2411 if (r) 2412 goto out; 2413 r = -EFAULT; 2414 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 2415 goto out; 2416 r = 0; 2417 break; 2418 } 2419 case KVM_SET_SREGS: { 2420 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); 2421 if (IS_ERR(kvm_sregs)) { 2422 r = PTR_ERR(kvm_sregs); 2423 kvm_sregs = NULL; 2424 goto out; 2425 } 2426 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 2427 break; 2428 } 2429 case KVM_GET_MP_STATE: { 2430 struct kvm_mp_state mp_state; 2431 2432 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 2433 if (r) 2434 goto out; 2435 r = -EFAULT; 2436 if (copy_to_user(argp, &mp_state, sizeof(mp_state))) 2437 goto out; 2438 r = 0; 2439 break; 2440 } 2441 case KVM_SET_MP_STATE: { 2442 struct kvm_mp_state mp_state; 2443 2444 r = -EFAULT; 2445 if (copy_from_user(&mp_state, argp, sizeof(mp_state))) 2446 goto out; 2447 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 2448 break; 2449 } 2450 case KVM_TRANSLATE: { 2451 struct kvm_translation tr; 2452 2453 r = -EFAULT; 2454 if (copy_from_user(&tr, argp, sizeof(tr))) 2455 goto out; 2456 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 2457 if (r) 2458 goto out; 2459 r = -EFAULT; 2460 if (copy_to_user(argp, &tr, sizeof(tr))) 2461 goto out; 2462 r = 0; 2463 break; 2464 } 2465 case KVM_SET_GUEST_DEBUG: { 2466 struct kvm_guest_debug dbg; 2467 2468 r = -EFAULT; 2469 if (copy_from_user(&dbg, argp, sizeof(dbg))) 2470 goto out; 2471 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 2472 break; 2473 } 2474 case KVM_SET_SIGNAL_MASK: { 2475 struct kvm_signal_mask __user *sigmask_arg = argp; 2476 struct kvm_signal_mask kvm_sigmask; 2477 sigset_t sigset, *p; 2478 2479 p = NULL; 2480 if (argp) { 2481 r = -EFAULT; 2482 if (copy_from_user(&kvm_sigmask, argp, 2483 sizeof(kvm_sigmask))) 2484 goto out; 2485 r = -EINVAL; 2486 if (kvm_sigmask.len != sizeof(sigset)) 2487 goto out; 2488 r = -EFAULT; 2489 if (copy_from_user(&sigset, sigmask_arg->sigset, 2490 sizeof(sigset))) 2491 goto out; 2492 p = &sigset; 2493 } 2494 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 2495 break; 2496 } 2497 case KVM_GET_FPU: { 2498 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 2499 r = -ENOMEM; 2500 if (!fpu) 2501 goto out; 2502 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 2503 if (r) 2504 goto out; 2505 r = -EFAULT; 2506 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 2507 goto out; 2508 r = 0; 2509 break; 2510 } 2511 case KVM_SET_FPU: { 2512 fpu = memdup_user(argp, sizeof(*fpu)); 2513 if (IS_ERR(fpu)) { 2514 r = PTR_ERR(fpu); 2515 fpu = NULL; 2516 goto out; 2517 } 2518 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 2519 break; 2520 } 2521 default: 2522 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 2523 } 2524 out: 2525 vcpu_put(vcpu); 2526 kfree(fpu); 2527 kfree(kvm_sregs); 2528 return r; 2529 } 2530 2531 #ifdef CONFIG_KVM_COMPAT 2532 static long kvm_vcpu_compat_ioctl(struct file *filp, 2533 unsigned int ioctl, unsigned long arg) 2534 { 2535 struct kvm_vcpu *vcpu = filp->private_data; 2536 void __user *argp = compat_ptr(arg); 2537 int r; 2538 2539 if (vcpu->kvm->mm != current->mm) 2540 return -EIO; 2541 2542 switch (ioctl) { 2543 case KVM_SET_SIGNAL_MASK: { 2544 struct kvm_signal_mask __user *sigmask_arg = argp; 2545 struct kvm_signal_mask kvm_sigmask; 2546 compat_sigset_t csigset; 2547 sigset_t sigset; 2548 2549 if (argp) { 2550 r = -EFAULT; 2551 if (copy_from_user(&kvm_sigmask, argp, 2552 sizeof(kvm_sigmask))) 2553 goto out; 2554 r = -EINVAL; 2555 if (kvm_sigmask.len != sizeof(csigset)) 2556 goto out; 2557 r = -EFAULT; 2558 if (copy_from_user(&csigset, sigmask_arg->sigset, 2559 sizeof(csigset))) 2560 goto out; 2561 sigset_from_compat(&sigset, &csigset); 2562 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 2563 } else 2564 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); 2565 break; 2566 } 2567 default: 2568 r = kvm_vcpu_ioctl(filp, ioctl, arg); 2569 } 2570 2571 out: 2572 return r; 2573 } 2574 #endif 2575 2576 static int kvm_device_ioctl_attr(struct kvm_device *dev, 2577 int (*accessor)(struct kvm_device *dev, 2578 struct kvm_device_attr *attr), 2579 unsigned long arg) 2580 { 2581 struct kvm_device_attr attr; 2582 2583 if (!accessor) 2584 return -EPERM; 2585 2586 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 2587 return -EFAULT; 2588 2589 return accessor(dev, &attr); 2590 } 2591 2592 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, 2593 unsigned long arg) 2594 { 2595 struct kvm_device *dev = filp->private_data; 2596 2597 switch (ioctl) { 2598 case KVM_SET_DEVICE_ATTR: 2599 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); 2600 case KVM_GET_DEVICE_ATTR: 2601 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); 2602 case KVM_HAS_DEVICE_ATTR: 2603 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); 2604 default: 2605 if (dev->ops->ioctl) 2606 return dev->ops->ioctl(dev, ioctl, arg); 2607 2608 return -ENOTTY; 2609 } 2610 } 2611 2612 static int kvm_device_release(struct inode *inode, struct file *filp) 2613 { 2614 struct kvm_device *dev = filp->private_data; 2615 struct kvm *kvm = dev->kvm; 2616 2617 kvm_put_kvm(kvm); 2618 return 0; 2619 } 2620 2621 static const struct file_operations kvm_device_fops = { 2622 .unlocked_ioctl = kvm_device_ioctl, 2623 #ifdef CONFIG_KVM_COMPAT 2624 .compat_ioctl = kvm_device_ioctl, 2625 #endif 2626 .release = kvm_device_release, 2627 }; 2628 2629 struct kvm_device *kvm_device_from_filp(struct file *filp) 2630 { 2631 if (filp->f_op != &kvm_device_fops) 2632 return NULL; 2633 2634 return filp->private_data; 2635 } 2636 2637 static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { 2638 #ifdef CONFIG_KVM_MPIC 2639 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, 2640 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, 2641 #endif 2642 2643 #ifdef CONFIG_KVM_XICS 2644 [KVM_DEV_TYPE_XICS] = &kvm_xics_ops, 2645 #endif 2646 }; 2647 2648 int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type) 2649 { 2650 if (type >= ARRAY_SIZE(kvm_device_ops_table)) 2651 return -ENOSPC; 2652 2653 if (kvm_device_ops_table[type] != NULL) 2654 return -EEXIST; 2655 2656 kvm_device_ops_table[type] = ops; 2657 return 0; 2658 } 2659 2660 void kvm_unregister_device_ops(u32 type) 2661 { 2662 if (kvm_device_ops_table[type] != NULL) 2663 kvm_device_ops_table[type] = NULL; 2664 } 2665 2666 static int kvm_ioctl_create_device(struct kvm *kvm, 2667 struct kvm_create_device *cd) 2668 { 2669 struct kvm_device_ops *ops = NULL; 2670 struct kvm_device *dev; 2671 bool test = cd->flags & KVM_CREATE_DEVICE_TEST; 2672 int ret; 2673 2674 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) 2675 return -ENODEV; 2676 2677 ops = kvm_device_ops_table[cd->type]; 2678 if (ops == NULL) 2679 return -ENODEV; 2680 2681 if (test) 2682 return 0; 2683 2684 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2685 if (!dev) 2686 return -ENOMEM; 2687 2688 dev->ops = ops; 2689 dev->kvm = kvm; 2690 2691 ret = ops->create(dev, cd->type); 2692 if (ret < 0) { 2693 kfree(dev); 2694 return ret; 2695 } 2696 2697 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 2698 if (ret < 0) { 2699 ops->destroy(dev); 2700 return ret; 2701 } 2702 2703 list_add(&dev->vm_node, &kvm->devices); 2704 kvm_get_kvm(kvm); 2705 cd->fd = ret; 2706 return 0; 2707 } 2708 2709 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) 2710 { 2711 switch (arg) { 2712 case KVM_CAP_USER_MEMORY: 2713 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 2714 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 2715 case KVM_CAP_INTERNAL_ERROR_DATA: 2716 #ifdef CONFIG_HAVE_KVM_MSI 2717 case KVM_CAP_SIGNAL_MSI: 2718 #endif 2719 #ifdef CONFIG_HAVE_KVM_IRQFD 2720 case KVM_CAP_IRQFD: 2721 case KVM_CAP_IRQFD_RESAMPLE: 2722 #endif 2723 case KVM_CAP_IOEVENTFD_ANY_LENGTH: 2724 case KVM_CAP_CHECK_EXTENSION_VM: 2725 return 1; 2726 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 2727 case KVM_CAP_IRQ_ROUTING: 2728 return KVM_MAX_IRQ_ROUTES; 2729 #endif 2730 #if KVM_ADDRESS_SPACE_NUM > 1 2731 case KVM_CAP_MULTI_ADDRESS_SPACE: 2732 return KVM_ADDRESS_SPACE_NUM; 2733 #endif 2734 default: 2735 break; 2736 } 2737 return kvm_vm_ioctl_check_extension(kvm, arg); 2738 } 2739 2740 static long kvm_vm_ioctl(struct file *filp, 2741 unsigned int ioctl, unsigned long arg) 2742 { 2743 struct kvm *kvm = filp->private_data; 2744 void __user *argp = (void __user *)arg; 2745 int r; 2746 2747 if (kvm->mm != current->mm) 2748 return -EIO; 2749 switch (ioctl) { 2750 case KVM_CREATE_VCPU: 2751 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 2752 break; 2753 case KVM_SET_USER_MEMORY_REGION: { 2754 struct kvm_userspace_memory_region kvm_userspace_mem; 2755 2756 r = -EFAULT; 2757 if (copy_from_user(&kvm_userspace_mem, argp, 2758 sizeof(kvm_userspace_mem))) 2759 goto out; 2760 2761 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); 2762 break; 2763 } 2764 case KVM_GET_DIRTY_LOG: { 2765 struct kvm_dirty_log log; 2766 2767 r = -EFAULT; 2768 if (copy_from_user(&log, argp, sizeof(log))) 2769 goto out; 2770 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 2771 break; 2772 } 2773 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2774 case KVM_REGISTER_COALESCED_MMIO: { 2775 struct kvm_coalesced_mmio_zone zone; 2776 2777 r = -EFAULT; 2778 if (copy_from_user(&zone, argp, sizeof(zone))) 2779 goto out; 2780 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 2781 break; 2782 } 2783 case KVM_UNREGISTER_COALESCED_MMIO: { 2784 struct kvm_coalesced_mmio_zone zone; 2785 2786 r = -EFAULT; 2787 if (copy_from_user(&zone, argp, sizeof(zone))) 2788 goto out; 2789 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 2790 break; 2791 } 2792 #endif 2793 case KVM_IRQFD: { 2794 struct kvm_irqfd data; 2795 2796 r = -EFAULT; 2797 if (copy_from_user(&data, argp, sizeof(data))) 2798 goto out; 2799 r = kvm_irqfd(kvm, &data); 2800 break; 2801 } 2802 case KVM_IOEVENTFD: { 2803 struct kvm_ioeventfd data; 2804 2805 r = -EFAULT; 2806 if (copy_from_user(&data, argp, sizeof(data))) 2807 goto out; 2808 r = kvm_ioeventfd(kvm, &data); 2809 break; 2810 } 2811 #ifdef CONFIG_HAVE_KVM_MSI 2812 case KVM_SIGNAL_MSI: { 2813 struct kvm_msi msi; 2814 2815 r = -EFAULT; 2816 if (copy_from_user(&msi, argp, sizeof(msi))) 2817 goto out; 2818 r = kvm_send_userspace_msi(kvm, &msi); 2819 break; 2820 } 2821 #endif 2822 #ifdef __KVM_HAVE_IRQ_LINE 2823 case KVM_IRQ_LINE_STATUS: 2824 case KVM_IRQ_LINE: { 2825 struct kvm_irq_level irq_event; 2826 2827 r = -EFAULT; 2828 if (copy_from_user(&irq_event, argp, sizeof(irq_event))) 2829 goto out; 2830 2831 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, 2832 ioctl == KVM_IRQ_LINE_STATUS); 2833 if (r) 2834 goto out; 2835 2836 r = -EFAULT; 2837 if (ioctl == KVM_IRQ_LINE_STATUS) { 2838 if (copy_to_user(argp, &irq_event, sizeof(irq_event))) 2839 goto out; 2840 } 2841 2842 r = 0; 2843 break; 2844 } 2845 #endif 2846 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 2847 case KVM_SET_GSI_ROUTING: { 2848 struct kvm_irq_routing routing; 2849 struct kvm_irq_routing __user *urouting; 2850 struct kvm_irq_routing_entry *entries; 2851 2852 r = -EFAULT; 2853 if (copy_from_user(&routing, argp, sizeof(routing))) 2854 goto out; 2855 r = -EINVAL; 2856 if (routing.nr >= KVM_MAX_IRQ_ROUTES) 2857 goto out; 2858 if (routing.flags) 2859 goto out; 2860 r = -ENOMEM; 2861 entries = vmalloc(routing.nr * sizeof(*entries)); 2862 if (!entries) 2863 goto out; 2864 r = -EFAULT; 2865 urouting = argp; 2866 if (copy_from_user(entries, urouting->entries, 2867 routing.nr * sizeof(*entries))) 2868 goto out_free_irq_routing; 2869 r = kvm_set_irq_routing(kvm, entries, routing.nr, 2870 routing.flags); 2871 out_free_irq_routing: 2872 vfree(entries); 2873 break; 2874 } 2875 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ 2876 case KVM_CREATE_DEVICE: { 2877 struct kvm_create_device cd; 2878 2879 r = -EFAULT; 2880 if (copy_from_user(&cd, argp, sizeof(cd))) 2881 goto out; 2882 2883 r = kvm_ioctl_create_device(kvm, &cd); 2884 if (r) 2885 goto out; 2886 2887 r = -EFAULT; 2888 if (copy_to_user(argp, &cd, sizeof(cd))) 2889 goto out; 2890 2891 r = 0; 2892 break; 2893 } 2894 case KVM_CHECK_EXTENSION: 2895 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); 2896 break; 2897 default: 2898 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 2899 } 2900 out: 2901 return r; 2902 } 2903 2904 #ifdef CONFIG_KVM_COMPAT 2905 struct compat_kvm_dirty_log { 2906 __u32 slot; 2907 __u32 padding1; 2908 union { 2909 compat_uptr_t dirty_bitmap; /* one bit per page */ 2910 __u64 padding2; 2911 }; 2912 }; 2913 2914 static long kvm_vm_compat_ioctl(struct file *filp, 2915 unsigned int ioctl, unsigned long arg) 2916 { 2917 struct kvm *kvm = filp->private_data; 2918 int r; 2919 2920 if (kvm->mm != current->mm) 2921 return -EIO; 2922 switch (ioctl) { 2923 case KVM_GET_DIRTY_LOG: { 2924 struct compat_kvm_dirty_log compat_log; 2925 struct kvm_dirty_log log; 2926 2927 r = -EFAULT; 2928 if (copy_from_user(&compat_log, (void __user *)arg, 2929 sizeof(compat_log))) 2930 goto out; 2931 log.slot = compat_log.slot; 2932 log.padding1 = compat_log.padding1; 2933 log.padding2 = compat_log.padding2; 2934 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 2935 2936 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 2937 break; 2938 } 2939 default: 2940 r = kvm_vm_ioctl(filp, ioctl, arg); 2941 } 2942 2943 out: 2944 return r; 2945 } 2946 #endif 2947 2948 static struct file_operations kvm_vm_fops = { 2949 .release = kvm_vm_release, 2950 .unlocked_ioctl = kvm_vm_ioctl, 2951 #ifdef CONFIG_KVM_COMPAT 2952 .compat_ioctl = kvm_vm_compat_ioctl, 2953 #endif 2954 .llseek = noop_llseek, 2955 }; 2956 2957 static int kvm_dev_ioctl_create_vm(unsigned long type) 2958 { 2959 int r; 2960 struct kvm *kvm; 2961 2962 kvm = kvm_create_vm(type); 2963 if (IS_ERR(kvm)) 2964 return PTR_ERR(kvm); 2965 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2966 r = kvm_coalesced_mmio_init(kvm); 2967 if (r < 0) { 2968 kvm_put_kvm(kvm); 2969 return r; 2970 } 2971 #endif 2972 r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR | O_CLOEXEC); 2973 if (r < 0) 2974 kvm_put_kvm(kvm); 2975 2976 return r; 2977 } 2978 2979 static long kvm_dev_ioctl(struct file *filp, 2980 unsigned int ioctl, unsigned long arg) 2981 { 2982 long r = -EINVAL; 2983 2984 switch (ioctl) { 2985 case KVM_GET_API_VERSION: 2986 if (arg) 2987 goto out; 2988 r = KVM_API_VERSION; 2989 break; 2990 case KVM_CREATE_VM: 2991 r = kvm_dev_ioctl_create_vm(arg); 2992 break; 2993 case KVM_CHECK_EXTENSION: 2994 r = kvm_vm_ioctl_check_extension_generic(NULL, arg); 2995 break; 2996 case KVM_GET_VCPU_MMAP_SIZE: 2997 if (arg) 2998 goto out; 2999 r = PAGE_SIZE; /* struct kvm_run */ 3000 #ifdef CONFIG_X86 3001 r += PAGE_SIZE; /* pio data page */ 3002 #endif 3003 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 3004 r += PAGE_SIZE; /* coalesced mmio ring page */ 3005 #endif 3006 break; 3007 case KVM_TRACE_ENABLE: 3008 case KVM_TRACE_PAUSE: 3009 case KVM_TRACE_DISABLE: 3010 r = -EOPNOTSUPP; 3011 break; 3012 default: 3013 return kvm_arch_dev_ioctl(filp, ioctl, arg); 3014 } 3015 out: 3016 return r; 3017 } 3018 3019 static struct file_operations kvm_chardev_ops = { 3020 .unlocked_ioctl = kvm_dev_ioctl, 3021 .compat_ioctl = kvm_dev_ioctl, 3022 .llseek = noop_llseek, 3023 }; 3024 3025 static struct miscdevice kvm_dev = { 3026 KVM_MINOR, 3027 "kvm", 3028 &kvm_chardev_ops, 3029 }; 3030 3031 static void hardware_enable_nolock(void *junk) 3032 { 3033 int cpu = raw_smp_processor_id(); 3034 int r; 3035 3036 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 3037 return; 3038 3039 cpumask_set_cpu(cpu, cpus_hardware_enabled); 3040 3041 r = kvm_arch_hardware_enable(); 3042 3043 if (r) { 3044 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 3045 atomic_inc(&hardware_enable_failed); 3046 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu); 3047 } 3048 } 3049 3050 static void hardware_enable(void) 3051 { 3052 raw_spin_lock(&kvm_count_lock); 3053 if (kvm_usage_count) 3054 hardware_enable_nolock(NULL); 3055 raw_spin_unlock(&kvm_count_lock); 3056 } 3057 3058 static void hardware_disable_nolock(void *junk) 3059 { 3060 int cpu = raw_smp_processor_id(); 3061 3062 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 3063 return; 3064 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 3065 kvm_arch_hardware_disable(); 3066 } 3067 3068 static void hardware_disable(void) 3069 { 3070 raw_spin_lock(&kvm_count_lock); 3071 if (kvm_usage_count) 3072 hardware_disable_nolock(NULL); 3073 raw_spin_unlock(&kvm_count_lock); 3074 } 3075 3076 static void hardware_disable_all_nolock(void) 3077 { 3078 BUG_ON(!kvm_usage_count); 3079 3080 kvm_usage_count--; 3081 if (!kvm_usage_count) 3082 on_each_cpu(hardware_disable_nolock, NULL, 1); 3083 } 3084 3085 static void hardware_disable_all(void) 3086 { 3087 raw_spin_lock(&kvm_count_lock); 3088 hardware_disable_all_nolock(); 3089 raw_spin_unlock(&kvm_count_lock); 3090 } 3091 3092 static int hardware_enable_all(void) 3093 { 3094 int r = 0; 3095 3096 raw_spin_lock(&kvm_count_lock); 3097 3098 kvm_usage_count++; 3099 if (kvm_usage_count == 1) { 3100 atomic_set(&hardware_enable_failed, 0); 3101 on_each_cpu(hardware_enable_nolock, NULL, 1); 3102 3103 if (atomic_read(&hardware_enable_failed)) { 3104 hardware_disable_all_nolock(); 3105 r = -EBUSY; 3106 } 3107 } 3108 3109 raw_spin_unlock(&kvm_count_lock); 3110 3111 return r; 3112 } 3113 3114 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, 3115 void *v) 3116 { 3117 val &= ~CPU_TASKS_FROZEN; 3118 switch (val) { 3119 case CPU_DYING: 3120 hardware_disable(); 3121 break; 3122 case CPU_STARTING: 3123 hardware_enable(); 3124 break; 3125 } 3126 return NOTIFY_OK; 3127 } 3128 3129 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 3130 void *v) 3131 { 3132 /* 3133 * Some (well, at least mine) BIOSes hang on reboot if 3134 * in vmx root mode. 3135 * 3136 * And Intel TXT required VMX off for all cpu when system shutdown. 3137 */ 3138 pr_info("kvm: exiting hardware virtualization\n"); 3139 kvm_rebooting = true; 3140 on_each_cpu(hardware_disable_nolock, NULL, 1); 3141 return NOTIFY_OK; 3142 } 3143 3144 static struct notifier_block kvm_reboot_notifier = { 3145 .notifier_call = kvm_reboot, 3146 .priority = 0, 3147 }; 3148 3149 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 3150 { 3151 int i; 3152 3153 for (i = 0; i < bus->dev_count; i++) { 3154 struct kvm_io_device *pos = bus->range[i].dev; 3155 3156 kvm_iodevice_destructor(pos); 3157 } 3158 kfree(bus); 3159 } 3160 3161 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, 3162 const struct kvm_io_range *r2) 3163 { 3164 gpa_t addr1 = r1->addr; 3165 gpa_t addr2 = r2->addr; 3166 3167 if (addr1 < addr2) 3168 return -1; 3169 3170 /* If r2->len == 0, match the exact address. If r2->len != 0, 3171 * accept any overlapping write. Any order is acceptable for 3172 * overlapping ranges, because kvm_io_bus_get_first_dev ensures 3173 * we process all of them. 3174 */ 3175 if (r2->len) { 3176 addr1 += r1->len; 3177 addr2 += r2->len; 3178 } 3179 3180 if (addr1 > addr2) 3181 return 1; 3182 3183 return 0; 3184 } 3185 3186 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) 3187 { 3188 return kvm_io_bus_cmp(p1, p2); 3189 } 3190 3191 static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, 3192 gpa_t addr, int len) 3193 { 3194 bus->range[bus->dev_count++] = (struct kvm_io_range) { 3195 .addr = addr, 3196 .len = len, 3197 .dev = dev, 3198 }; 3199 3200 sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range), 3201 kvm_io_bus_sort_cmp, NULL); 3202 3203 return 0; 3204 } 3205 3206 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, 3207 gpa_t addr, int len) 3208 { 3209 struct kvm_io_range *range, key; 3210 int off; 3211 3212 key = (struct kvm_io_range) { 3213 .addr = addr, 3214 .len = len, 3215 }; 3216 3217 range = bsearch(&key, bus->range, bus->dev_count, 3218 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); 3219 if (range == NULL) 3220 return -ENOENT; 3221 3222 off = range - bus->range; 3223 3224 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) 3225 off--; 3226 3227 return off; 3228 } 3229 3230 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 3231 struct kvm_io_range *range, const void *val) 3232 { 3233 int idx; 3234 3235 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 3236 if (idx < 0) 3237 return -EOPNOTSUPP; 3238 3239 while (idx < bus->dev_count && 3240 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 3241 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, 3242 range->len, val)) 3243 return idx; 3244 idx++; 3245 } 3246 3247 return -EOPNOTSUPP; 3248 } 3249 3250 /* kvm_io_bus_write - called under kvm->slots_lock */ 3251 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 3252 int len, const void *val) 3253 { 3254 struct kvm_io_bus *bus; 3255 struct kvm_io_range range; 3256 int r; 3257 3258 range = (struct kvm_io_range) { 3259 .addr = addr, 3260 .len = len, 3261 }; 3262 3263 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3264 r = __kvm_io_bus_write(vcpu, bus, &range, val); 3265 return r < 0 ? r : 0; 3266 } 3267 3268 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ 3269 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 3270 gpa_t addr, int len, const void *val, long cookie) 3271 { 3272 struct kvm_io_bus *bus; 3273 struct kvm_io_range range; 3274 3275 range = (struct kvm_io_range) { 3276 .addr = addr, 3277 .len = len, 3278 }; 3279 3280 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3281 3282 /* First try the device referenced by cookie. */ 3283 if ((cookie >= 0) && (cookie < bus->dev_count) && 3284 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) 3285 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, 3286 val)) 3287 return cookie; 3288 3289 /* 3290 * cookie contained garbage; fall back to search and return the 3291 * correct cookie value. 3292 */ 3293 return __kvm_io_bus_write(vcpu, bus, &range, val); 3294 } 3295 3296 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 3297 struct kvm_io_range *range, void *val) 3298 { 3299 int idx; 3300 3301 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 3302 if (idx < 0) 3303 return -EOPNOTSUPP; 3304 3305 while (idx < bus->dev_count && 3306 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 3307 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, 3308 range->len, val)) 3309 return idx; 3310 idx++; 3311 } 3312 3313 return -EOPNOTSUPP; 3314 } 3315 EXPORT_SYMBOL_GPL(kvm_io_bus_write); 3316 3317 /* kvm_io_bus_read - called under kvm->slots_lock */ 3318 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 3319 int len, void *val) 3320 { 3321 struct kvm_io_bus *bus; 3322 struct kvm_io_range range; 3323 int r; 3324 3325 range = (struct kvm_io_range) { 3326 .addr = addr, 3327 .len = len, 3328 }; 3329 3330 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3331 r = __kvm_io_bus_read(vcpu, bus, &range, val); 3332 return r < 0 ? r : 0; 3333 } 3334 3335 3336 /* Caller must hold slots_lock. */ 3337 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 3338 int len, struct kvm_io_device *dev) 3339 { 3340 struct kvm_io_bus *new_bus, *bus; 3341 3342 bus = kvm->buses[bus_idx]; 3343 /* exclude ioeventfd which is limited by maximum fd */ 3344 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 3345 return -ENOSPC; 3346 3347 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count + 1) * 3348 sizeof(struct kvm_io_range)), GFP_KERNEL); 3349 if (!new_bus) 3350 return -ENOMEM; 3351 memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count * 3352 sizeof(struct kvm_io_range))); 3353 kvm_io_bus_insert_dev(new_bus, dev, addr, len); 3354 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3355 synchronize_srcu_expedited(&kvm->srcu); 3356 kfree(bus); 3357 3358 return 0; 3359 } 3360 3361 /* Caller must hold slots_lock. */ 3362 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 3363 struct kvm_io_device *dev) 3364 { 3365 int i, r; 3366 struct kvm_io_bus *new_bus, *bus; 3367 3368 bus = kvm->buses[bus_idx]; 3369 r = -ENOENT; 3370 for (i = 0; i < bus->dev_count; i++) 3371 if (bus->range[i].dev == dev) { 3372 r = 0; 3373 break; 3374 } 3375 3376 if (r) 3377 return r; 3378 3379 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) * 3380 sizeof(struct kvm_io_range)), GFP_KERNEL); 3381 if (!new_bus) 3382 return -ENOMEM; 3383 3384 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 3385 new_bus->dev_count--; 3386 memcpy(new_bus->range + i, bus->range + i + 1, 3387 (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); 3388 3389 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3390 synchronize_srcu_expedited(&kvm->srcu); 3391 kfree(bus); 3392 return r; 3393 } 3394 3395 static struct notifier_block kvm_cpu_notifier = { 3396 .notifier_call = kvm_cpu_hotplug, 3397 }; 3398 3399 static int vm_stat_get(void *_offset, u64 *val) 3400 { 3401 unsigned offset = (long)_offset; 3402 struct kvm *kvm; 3403 3404 *val = 0; 3405 spin_lock(&kvm_lock); 3406 list_for_each_entry(kvm, &vm_list, vm_list) 3407 *val += *(u32 *)((void *)kvm + offset); 3408 spin_unlock(&kvm_lock); 3409 return 0; 3410 } 3411 3412 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n"); 3413 3414 static int vcpu_stat_get(void *_offset, u64 *val) 3415 { 3416 unsigned offset = (long)_offset; 3417 struct kvm *kvm; 3418 struct kvm_vcpu *vcpu; 3419 int i; 3420 3421 *val = 0; 3422 spin_lock(&kvm_lock); 3423 list_for_each_entry(kvm, &vm_list, vm_list) 3424 kvm_for_each_vcpu(i, vcpu, kvm) 3425 *val += *(u32 *)((void *)vcpu + offset); 3426 3427 spin_unlock(&kvm_lock); 3428 return 0; 3429 } 3430 3431 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); 3432 3433 static const struct file_operations *stat_fops[] = { 3434 [KVM_STAT_VCPU] = &vcpu_stat_fops, 3435 [KVM_STAT_VM] = &vm_stat_fops, 3436 }; 3437 3438 static int kvm_init_debug(void) 3439 { 3440 int r = -EEXIST; 3441 struct kvm_stats_debugfs_item *p; 3442 3443 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 3444 if (kvm_debugfs_dir == NULL) 3445 goto out; 3446 3447 for (p = debugfs_entries; p->name; ++p) { 3448 if (!debugfs_create_file(p->name, 0444, kvm_debugfs_dir, 3449 (void *)(long)p->offset, 3450 stat_fops[p->kind])) 3451 goto out_dir; 3452 } 3453 3454 return 0; 3455 3456 out_dir: 3457 debugfs_remove_recursive(kvm_debugfs_dir); 3458 out: 3459 return r; 3460 } 3461 3462 static int kvm_suspend(void) 3463 { 3464 if (kvm_usage_count) 3465 hardware_disable_nolock(NULL); 3466 return 0; 3467 } 3468 3469 static void kvm_resume(void) 3470 { 3471 if (kvm_usage_count) { 3472 WARN_ON(raw_spin_is_locked(&kvm_count_lock)); 3473 hardware_enable_nolock(NULL); 3474 } 3475 } 3476 3477 static struct syscore_ops kvm_syscore_ops = { 3478 .suspend = kvm_suspend, 3479 .resume = kvm_resume, 3480 }; 3481 3482 static inline 3483 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 3484 { 3485 return container_of(pn, struct kvm_vcpu, preempt_notifier); 3486 } 3487 3488 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 3489 { 3490 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 3491 3492 if (vcpu->preempted) 3493 vcpu->preempted = false; 3494 3495 kvm_arch_sched_in(vcpu, cpu); 3496 3497 kvm_arch_vcpu_load(vcpu, cpu); 3498 } 3499 3500 static void kvm_sched_out(struct preempt_notifier *pn, 3501 struct task_struct *next) 3502 { 3503 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 3504 3505 if (current->state == TASK_RUNNING) 3506 vcpu->preempted = true; 3507 kvm_arch_vcpu_put(vcpu); 3508 } 3509 3510 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 3511 struct module *module) 3512 { 3513 int r; 3514 int cpu; 3515 3516 r = kvm_arch_init(opaque); 3517 if (r) 3518 goto out_fail; 3519 3520 /* 3521 * kvm_arch_init makes sure there's at most one caller 3522 * for architectures that support multiple implementations, 3523 * like intel and amd on x86. 3524 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating 3525 * conflicts in case kvm is already setup for another implementation. 3526 */ 3527 r = kvm_irqfd_init(); 3528 if (r) 3529 goto out_irqfd; 3530 3531 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 3532 r = -ENOMEM; 3533 goto out_free_0; 3534 } 3535 3536 r = kvm_arch_hardware_setup(); 3537 if (r < 0) 3538 goto out_free_0a; 3539 3540 for_each_online_cpu(cpu) { 3541 smp_call_function_single(cpu, 3542 kvm_arch_check_processor_compat, 3543 &r, 1); 3544 if (r < 0) 3545 goto out_free_1; 3546 } 3547 3548 r = register_cpu_notifier(&kvm_cpu_notifier); 3549 if (r) 3550 goto out_free_2; 3551 register_reboot_notifier(&kvm_reboot_notifier); 3552 3553 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 3554 if (!vcpu_align) 3555 vcpu_align = __alignof__(struct kvm_vcpu); 3556 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, 3557 0, NULL); 3558 if (!kvm_vcpu_cache) { 3559 r = -ENOMEM; 3560 goto out_free_3; 3561 } 3562 3563 r = kvm_async_pf_init(); 3564 if (r) 3565 goto out_free; 3566 3567 kvm_chardev_ops.owner = module; 3568 kvm_vm_fops.owner = module; 3569 kvm_vcpu_fops.owner = module; 3570 3571 r = misc_register(&kvm_dev); 3572 if (r) { 3573 pr_err("kvm: misc device register failed\n"); 3574 goto out_unreg; 3575 } 3576 3577 register_syscore_ops(&kvm_syscore_ops); 3578 3579 kvm_preempt_ops.sched_in = kvm_sched_in; 3580 kvm_preempt_ops.sched_out = kvm_sched_out; 3581 3582 r = kvm_init_debug(); 3583 if (r) { 3584 pr_err("kvm: create debugfs files failed\n"); 3585 goto out_undebugfs; 3586 } 3587 3588 r = kvm_vfio_ops_init(); 3589 WARN_ON(r); 3590 3591 return 0; 3592 3593 out_undebugfs: 3594 unregister_syscore_ops(&kvm_syscore_ops); 3595 misc_deregister(&kvm_dev); 3596 out_unreg: 3597 kvm_async_pf_deinit(); 3598 out_free: 3599 kmem_cache_destroy(kvm_vcpu_cache); 3600 out_free_3: 3601 unregister_reboot_notifier(&kvm_reboot_notifier); 3602 unregister_cpu_notifier(&kvm_cpu_notifier); 3603 out_free_2: 3604 out_free_1: 3605 kvm_arch_hardware_unsetup(); 3606 out_free_0a: 3607 free_cpumask_var(cpus_hardware_enabled); 3608 out_free_0: 3609 kvm_irqfd_exit(); 3610 out_irqfd: 3611 kvm_arch_exit(); 3612 out_fail: 3613 return r; 3614 } 3615 EXPORT_SYMBOL_GPL(kvm_init); 3616 3617 void kvm_exit(void) 3618 { 3619 debugfs_remove_recursive(kvm_debugfs_dir); 3620 misc_deregister(&kvm_dev); 3621 kmem_cache_destroy(kvm_vcpu_cache); 3622 kvm_async_pf_deinit(); 3623 unregister_syscore_ops(&kvm_syscore_ops); 3624 unregister_reboot_notifier(&kvm_reboot_notifier); 3625 unregister_cpu_notifier(&kvm_cpu_notifier); 3626 on_each_cpu(hardware_disable_nolock, NULL, 1); 3627 kvm_arch_hardware_unsetup(); 3628 kvm_arch_exit(); 3629 kvm_irqfd_exit(); 3630 free_cpumask_var(cpus_hardware_enabled); 3631 kvm_vfio_ops_exit(); 3632 } 3633 EXPORT_SYMBOL_GPL(kvm_exit); 3634