1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This module enables machines with Intel VT-x extensions to run virtual 5 * machines without emulation or binary translation. 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Avi Kivity <avi@qumranet.com> 12 * Yaniv Kamay <yaniv@qumranet.com> 13 * 14 * This work is licensed under the terms of the GNU GPL, version 2. See 15 * the COPYING file in the top-level directory. 16 * 17 */ 18 19 #include <kvm/iodev.h> 20 21 #include <linux/kvm_host.h> 22 #include <linux/kvm.h> 23 #include <linux/module.h> 24 #include <linux/errno.h> 25 #include <linux/percpu.h> 26 #include <linux/mm.h> 27 #include <linux/miscdevice.h> 28 #include <linux/vmalloc.h> 29 #include <linux/reboot.h> 30 #include <linux/debugfs.h> 31 #include <linux/highmem.h> 32 #include <linux/file.h> 33 #include <linux/syscore_ops.h> 34 #include <linux/cpu.h> 35 #include <linux/sched.h> 36 #include <linux/cpumask.h> 37 #include <linux/smp.h> 38 #include <linux/anon_inodes.h> 39 #include <linux/profile.h> 40 #include <linux/kvm_para.h> 41 #include <linux/pagemap.h> 42 #include <linux/mman.h> 43 #include <linux/swap.h> 44 #include <linux/bitops.h> 45 #include <linux/spinlock.h> 46 #include <linux/compat.h> 47 #include <linux/srcu.h> 48 #include <linux/hugetlb.h> 49 #include <linux/slab.h> 50 #include <linux/sort.h> 51 #include <linux/bsearch.h> 52 53 #include <asm/processor.h> 54 #include <asm/io.h> 55 #include <asm/ioctl.h> 56 #include <asm/uaccess.h> 57 #include <asm/pgtable.h> 58 59 #include "coalesced_mmio.h" 60 #include "async_pf.h" 61 #include "vfio.h" 62 63 #define CREATE_TRACE_POINTS 64 #include <trace/events/kvm.h> 65 66 /* Worst case buffer size needed for holding an integer. */ 67 #define ITOA_MAX_LEN 12 68 69 MODULE_AUTHOR("Qumranet"); 70 MODULE_LICENSE("GPL"); 71 72 /* Architectures should define their poll value according to the halt latency */ 73 static unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; 74 module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR); 75 76 /* Default doubles per-vcpu halt_poll_ns. */ 77 static unsigned int halt_poll_ns_grow = 2; 78 module_param(halt_poll_ns_grow, uint, S_IRUGO | S_IWUSR); 79 80 /* Default resets per-vcpu halt_poll_ns . */ 81 static unsigned int halt_poll_ns_shrink; 82 module_param(halt_poll_ns_shrink, uint, S_IRUGO | S_IWUSR); 83 84 /* 85 * Ordering of locks: 86 * 87 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 88 */ 89 90 DEFINE_SPINLOCK(kvm_lock); 91 static DEFINE_RAW_SPINLOCK(kvm_count_lock); 92 LIST_HEAD(vm_list); 93 94 static cpumask_var_t cpus_hardware_enabled; 95 static int kvm_usage_count; 96 static atomic_t hardware_enable_failed; 97 98 struct kmem_cache *kvm_vcpu_cache; 99 EXPORT_SYMBOL_GPL(kvm_vcpu_cache); 100 101 static __read_mostly struct preempt_ops kvm_preempt_ops; 102 103 struct dentry *kvm_debugfs_dir; 104 EXPORT_SYMBOL_GPL(kvm_debugfs_dir); 105 106 static int kvm_debugfs_num_entries; 107 static const struct file_operations *stat_fops_per_vm[]; 108 109 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 110 unsigned long arg); 111 #ifdef CONFIG_KVM_COMPAT 112 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, 113 unsigned long arg); 114 #endif 115 static int hardware_enable_all(void); 116 static void hardware_disable_all(void); 117 118 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 119 120 static void kvm_release_pfn_dirty(kvm_pfn_t pfn); 121 static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn); 122 123 __visible bool kvm_rebooting; 124 EXPORT_SYMBOL_GPL(kvm_rebooting); 125 126 static bool largepages_enabled = true; 127 128 bool kvm_is_reserved_pfn(kvm_pfn_t pfn) 129 { 130 if (pfn_valid(pfn)) 131 return PageReserved(pfn_to_page(pfn)); 132 133 return true; 134 } 135 136 /* 137 * Switches to specified vcpu, until a matching vcpu_put() 138 */ 139 int vcpu_load(struct kvm_vcpu *vcpu) 140 { 141 int cpu; 142 143 if (mutex_lock_killable(&vcpu->mutex)) 144 return -EINTR; 145 cpu = get_cpu(); 146 preempt_notifier_register(&vcpu->preempt_notifier); 147 kvm_arch_vcpu_load(vcpu, cpu); 148 put_cpu(); 149 return 0; 150 } 151 EXPORT_SYMBOL_GPL(vcpu_load); 152 153 void vcpu_put(struct kvm_vcpu *vcpu) 154 { 155 preempt_disable(); 156 kvm_arch_vcpu_put(vcpu); 157 preempt_notifier_unregister(&vcpu->preempt_notifier); 158 preempt_enable(); 159 mutex_unlock(&vcpu->mutex); 160 } 161 EXPORT_SYMBOL_GPL(vcpu_put); 162 163 static void ack_flush(void *_completed) 164 { 165 } 166 167 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) 168 { 169 int i, cpu, me; 170 cpumask_var_t cpus; 171 bool called = true; 172 struct kvm_vcpu *vcpu; 173 174 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 175 176 me = get_cpu(); 177 kvm_for_each_vcpu(i, vcpu, kvm) { 178 kvm_make_request(req, vcpu); 179 cpu = vcpu->cpu; 180 181 /* Set ->requests bit before we read ->mode. */ 182 smp_mb__after_atomic(); 183 184 if (cpus != NULL && cpu != -1 && cpu != me && 185 kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE) 186 cpumask_set_cpu(cpu, cpus); 187 } 188 if (unlikely(cpus == NULL)) 189 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); 190 else if (!cpumask_empty(cpus)) 191 smp_call_function_many(cpus, ack_flush, NULL, 1); 192 else 193 called = false; 194 put_cpu(); 195 free_cpumask_var(cpus); 196 return called; 197 } 198 199 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL 200 void kvm_flush_remote_tlbs(struct kvm *kvm) 201 { 202 /* 203 * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in 204 * kvm_make_all_cpus_request. 205 */ 206 long dirty_count = smp_load_acquire(&kvm->tlbs_dirty); 207 208 /* 209 * We want to publish modifications to the page tables before reading 210 * mode. Pairs with a memory barrier in arch-specific code. 211 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest 212 * and smp_mb in walk_shadow_page_lockless_begin/end. 213 * - powerpc: smp_mb in kvmppc_prepare_to_enter. 214 * 215 * There is already an smp_mb__after_atomic() before 216 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that 217 * barrier here. 218 */ 219 if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 220 ++kvm->stat.remote_tlb_flush; 221 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); 222 } 223 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); 224 #endif 225 226 void kvm_reload_remote_mmus(struct kvm *kvm) 227 { 228 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); 229 } 230 231 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 232 { 233 struct page *page; 234 int r; 235 236 mutex_init(&vcpu->mutex); 237 vcpu->cpu = -1; 238 vcpu->kvm = kvm; 239 vcpu->vcpu_id = id; 240 vcpu->pid = NULL; 241 init_swait_queue_head(&vcpu->wq); 242 kvm_async_pf_vcpu_init(vcpu); 243 244 vcpu->pre_pcpu = -1; 245 INIT_LIST_HEAD(&vcpu->blocked_vcpu_list); 246 247 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 248 if (!page) { 249 r = -ENOMEM; 250 goto fail; 251 } 252 vcpu->run = page_address(page); 253 254 kvm_vcpu_set_in_spin_loop(vcpu, false); 255 kvm_vcpu_set_dy_eligible(vcpu, false); 256 vcpu->preempted = false; 257 258 r = kvm_arch_vcpu_init(vcpu); 259 if (r < 0) 260 goto fail_free_run; 261 return 0; 262 263 fail_free_run: 264 free_page((unsigned long)vcpu->run); 265 fail: 266 return r; 267 } 268 EXPORT_SYMBOL_GPL(kvm_vcpu_init); 269 270 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) 271 { 272 put_pid(vcpu->pid); 273 kvm_arch_vcpu_uninit(vcpu); 274 free_page((unsigned long)vcpu->run); 275 } 276 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); 277 278 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 279 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 280 { 281 return container_of(mn, struct kvm, mmu_notifier); 282 } 283 284 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, 285 struct mm_struct *mm, 286 unsigned long address) 287 { 288 struct kvm *kvm = mmu_notifier_to_kvm(mn); 289 int need_tlb_flush, idx; 290 291 /* 292 * When ->invalidate_page runs, the linux pte has been zapped 293 * already but the page is still allocated until 294 * ->invalidate_page returns. So if we increase the sequence 295 * here the kvm page fault will notice if the spte can't be 296 * established because the page is going to be freed. If 297 * instead the kvm page fault establishes the spte before 298 * ->invalidate_page runs, kvm_unmap_hva will release it 299 * before returning. 300 * 301 * The sequence increase only need to be seen at spin_unlock 302 * time, and not at spin_lock time. 303 * 304 * Increasing the sequence after the spin_unlock would be 305 * unsafe because the kvm page fault could then establish the 306 * pte after kvm_unmap_hva returned, without noticing the page 307 * is going to be freed. 308 */ 309 idx = srcu_read_lock(&kvm->srcu); 310 spin_lock(&kvm->mmu_lock); 311 312 kvm->mmu_notifier_seq++; 313 need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty; 314 /* we've to flush the tlb before the pages can be freed */ 315 if (need_tlb_flush) 316 kvm_flush_remote_tlbs(kvm); 317 318 spin_unlock(&kvm->mmu_lock); 319 320 kvm_arch_mmu_notifier_invalidate_page(kvm, address); 321 322 srcu_read_unlock(&kvm->srcu, idx); 323 } 324 325 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 326 struct mm_struct *mm, 327 unsigned long address, 328 pte_t pte) 329 { 330 struct kvm *kvm = mmu_notifier_to_kvm(mn); 331 int idx; 332 333 idx = srcu_read_lock(&kvm->srcu); 334 spin_lock(&kvm->mmu_lock); 335 kvm->mmu_notifier_seq++; 336 kvm_set_spte_hva(kvm, address, pte); 337 spin_unlock(&kvm->mmu_lock); 338 srcu_read_unlock(&kvm->srcu, idx); 339 } 340 341 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 342 struct mm_struct *mm, 343 unsigned long start, 344 unsigned long end) 345 { 346 struct kvm *kvm = mmu_notifier_to_kvm(mn); 347 int need_tlb_flush = 0, idx; 348 349 idx = srcu_read_lock(&kvm->srcu); 350 spin_lock(&kvm->mmu_lock); 351 /* 352 * The count increase must become visible at unlock time as no 353 * spte can be established without taking the mmu_lock and 354 * count is also read inside the mmu_lock critical section. 355 */ 356 kvm->mmu_notifier_count++; 357 need_tlb_flush = kvm_unmap_hva_range(kvm, start, end); 358 need_tlb_flush |= kvm->tlbs_dirty; 359 /* we've to flush the tlb before the pages can be freed */ 360 if (need_tlb_flush) 361 kvm_flush_remote_tlbs(kvm); 362 363 spin_unlock(&kvm->mmu_lock); 364 srcu_read_unlock(&kvm->srcu, idx); 365 } 366 367 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 368 struct mm_struct *mm, 369 unsigned long start, 370 unsigned long end) 371 { 372 struct kvm *kvm = mmu_notifier_to_kvm(mn); 373 374 spin_lock(&kvm->mmu_lock); 375 /* 376 * This sequence increase will notify the kvm page fault that 377 * the page that is going to be mapped in the spte could have 378 * been freed. 379 */ 380 kvm->mmu_notifier_seq++; 381 smp_wmb(); 382 /* 383 * The above sequence increase must be visible before the 384 * below count decrease, which is ensured by the smp_wmb above 385 * in conjunction with the smp_rmb in mmu_notifier_retry(). 386 */ 387 kvm->mmu_notifier_count--; 388 spin_unlock(&kvm->mmu_lock); 389 390 BUG_ON(kvm->mmu_notifier_count < 0); 391 } 392 393 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 394 struct mm_struct *mm, 395 unsigned long start, 396 unsigned long end) 397 { 398 struct kvm *kvm = mmu_notifier_to_kvm(mn); 399 int young, idx; 400 401 idx = srcu_read_lock(&kvm->srcu); 402 spin_lock(&kvm->mmu_lock); 403 404 young = kvm_age_hva(kvm, start, end); 405 if (young) 406 kvm_flush_remote_tlbs(kvm); 407 408 spin_unlock(&kvm->mmu_lock); 409 srcu_read_unlock(&kvm->srcu, idx); 410 411 return young; 412 } 413 414 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, 415 struct mm_struct *mm, 416 unsigned long start, 417 unsigned long end) 418 { 419 struct kvm *kvm = mmu_notifier_to_kvm(mn); 420 int young, idx; 421 422 idx = srcu_read_lock(&kvm->srcu); 423 spin_lock(&kvm->mmu_lock); 424 /* 425 * Even though we do not flush TLB, this will still adversely 426 * affect performance on pre-Haswell Intel EPT, where there is 427 * no EPT Access Bit to clear so that we have to tear down EPT 428 * tables instead. If we find this unacceptable, we can always 429 * add a parameter to kvm_age_hva so that it effectively doesn't 430 * do anything on clear_young. 431 * 432 * Also note that currently we never issue secondary TLB flushes 433 * from clear_young, leaving this job up to the regular system 434 * cadence. If we find this inaccurate, we might come up with a 435 * more sophisticated heuristic later. 436 */ 437 young = kvm_age_hva(kvm, start, end); 438 spin_unlock(&kvm->mmu_lock); 439 srcu_read_unlock(&kvm->srcu, idx); 440 441 return young; 442 } 443 444 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, 445 struct mm_struct *mm, 446 unsigned long address) 447 { 448 struct kvm *kvm = mmu_notifier_to_kvm(mn); 449 int young, idx; 450 451 idx = srcu_read_lock(&kvm->srcu); 452 spin_lock(&kvm->mmu_lock); 453 young = kvm_test_age_hva(kvm, address); 454 spin_unlock(&kvm->mmu_lock); 455 srcu_read_unlock(&kvm->srcu, idx); 456 457 return young; 458 } 459 460 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 461 struct mm_struct *mm) 462 { 463 struct kvm *kvm = mmu_notifier_to_kvm(mn); 464 int idx; 465 466 idx = srcu_read_lock(&kvm->srcu); 467 kvm_arch_flush_shadow_all(kvm); 468 srcu_read_unlock(&kvm->srcu, idx); 469 } 470 471 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 472 .invalidate_page = kvm_mmu_notifier_invalidate_page, 473 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 474 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 475 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 476 .clear_young = kvm_mmu_notifier_clear_young, 477 .test_young = kvm_mmu_notifier_test_young, 478 .change_pte = kvm_mmu_notifier_change_pte, 479 .release = kvm_mmu_notifier_release, 480 }; 481 482 static int kvm_init_mmu_notifier(struct kvm *kvm) 483 { 484 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 485 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 486 } 487 488 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 489 490 static int kvm_init_mmu_notifier(struct kvm *kvm) 491 { 492 return 0; 493 } 494 495 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 496 497 static struct kvm_memslots *kvm_alloc_memslots(void) 498 { 499 int i; 500 struct kvm_memslots *slots; 501 502 slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); 503 if (!slots) 504 return NULL; 505 506 /* 507 * Init kvm generation close to the maximum to easily test the 508 * code of handling generation number wrap-around. 509 */ 510 slots->generation = -150; 511 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) 512 slots->id_to_index[i] = slots->memslots[i].id = i; 513 514 return slots; 515 } 516 517 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) 518 { 519 if (!memslot->dirty_bitmap) 520 return; 521 522 kvfree(memslot->dirty_bitmap); 523 memslot->dirty_bitmap = NULL; 524 } 525 526 /* 527 * Free any memory in @free but not in @dont. 528 */ 529 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 530 struct kvm_memory_slot *dont) 531 { 532 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 533 kvm_destroy_dirty_bitmap(free); 534 535 kvm_arch_free_memslot(kvm, free, dont); 536 537 free->npages = 0; 538 } 539 540 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) 541 { 542 struct kvm_memory_slot *memslot; 543 544 if (!slots) 545 return; 546 547 kvm_for_each_memslot(memslot, slots) 548 kvm_free_memslot(kvm, memslot, NULL); 549 550 kvfree(slots); 551 } 552 553 static void kvm_destroy_vm_debugfs(struct kvm *kvm) 554 { 555 int i; 556 557 if (!kvm->debugfs_dentry) 558 return; 559 560 debugfs_remove_recursive(kvm->debugfs_dentry); 561 562 for (i = 0; i < kvm_debugfs_num_entries; i++) 563 kfree(kvm->debugfs_stat_data[i]); 564 kfree(kvm->debugfs_stat_data); 565 } 566 567 static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) 568 { 569 char dir_name[ITOA_MAX_LEN * 2]; 570 struct kvm_stat_data *stat_data; 571 struct kvm_stats_debugfs_item *p; 572 573 if (!debugfs_initialized()) 574 return 0; 575 576 snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd); 577 kvm->debugfs_dentry = debugfs_create_dir(dir_name, 578 kvm_debugfs_dir); 579 if (!kvm->debugfs_dentry) 580 return -ENOMEM; 581 582 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, 583 sizeof(*kvm->debugfs_stat_data), 584 GFP_KERNEL); 585 if (!kvm->debugfs_stat_data) 586 return -ENOMEM; 587 588 for (p = debugfs_entries; p->name; p++) { 589 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL); 590 if (!stat_data) 591 return -ENOMEM; 592 593 stat_data->kvm = kvm; 594 stat_data->offset = p->offset; 595 kvm->debugfs_stat_data[p - debugfs_entries] = stat_data; 596 if (!debugfs_create_file(p->name, 0444, 597 kvm->debugfs_dentry, 598 stat_data, 599 stat_fops_per_vm[p->kind])) 600 return -ENOMEM; 601 } 602 return 0; 603 } 604 605 static struct kvm *kvm_create_vm(unsigned long type) 606 { 607 int r, i; 608 struct kvm *kvm = kvm_arch_alloc_vm(); 609 610 if (!kvm) 611 return ERR_PTR(-ENOMEM); 612 613 spin_lock_init(&kvm->mmu_lock); 614 atomic_inc(¤t->mm->mm_count); 615 kvm->mm = current->mm; 616 kvm_eventfd_init(kvm); 617 mutex_init(&kvm->lock); 618 mutex_init(&kvm->irq_lock); 619 mutex_init(&kvm->slots_lock); 620 atomic_set(&kvm->users_count, 1); 621 INIT_LIST_HEAD(&kvm->devices); 622 623 r = kvm_arch_init_vm(kvm, type); 624 if (r) 625 goto out_err_no_disable; 626 627 r = hardware_enable_all(); 628 if (r) 629 goto out_err_no_disable; 630 631 #ifdef CONFIG_HAVE_KVM_IRQFD 632 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 633 #endif 634 635 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 636 637 r = -ENOMEM; 638 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 639 kvm->memslots[i] = kvm_alloc_memslots(); 640 if (!kvm->memslots[i]) 641 goto out_err_no_srcu; 642 } 643 644 if (init_srcu_struct(&kvm->srcu)) 645 goto out_err_no_srcu; 646 if (init_srcu_struct(&kvm->irq_srcu)) 647 goto out_err_no_irq_srcu; 648 for (i = 0; i < KVM_NR_BUSES; i++) { 649 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), 650 GFP_KERNEL); 651 if (!kvm->buses[i]) 652 goto out_err; 653 } 654 655 r = kvm_init_mmu_notifier(kvm); 656 if (r) 657 goto out_err; 658 659 spin_lock(&kvm_lock); 660 list_add(&kvm->vm_list, &vm_list); 661 spin_unlock(&kvm_lock); 662 663 preempt_notifier_inc(); 664 665 return kvm; 666 667 out_err: 668 cleanup_srcu_struct(&kvm->irq_srcu); 669 out_err_no_irq_srcu: 670 cleanup_srcu_struct(&kvm->srcu); 671 out_err_no_srcu: 672 hardware_disable_all(); 673 out_err_no_disable: 674 for (i = 0; i < KVM_NR_BUSES; i++) 675 kfree(kvm->buses[i]); 676 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 677 kvm_free_memslots(kvm, kvm->memslots[i]); 678 kvm_arch_free_vm(kvm); 679 mmdrop(current->mm); 680 return ERR_PTR(r); 681 } 682 683 /* 684 * Avoid using vmalloc for a small buffer. 685 * Should not be used when the size is statically known. 686 */ 687 void *kvm_kvzalloc(unsigned long size) 688 { 689 if (size > PAGE_SIZE) 690 return vzalloc(size); 691 else 692 return kzalloc(size, GFP_KERNEL); 693 } 694 695 static void kvm_destroy_devices(struct kvm *kvm) 696 { 697 struct kvm_device *dev, *tmp; 698 699 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { 700 list_del(&dev->vm_node); 701 dev->ops->destroy(dev); 702 } 703 } 704 705 static void kvm_destroy_vm(struct kvm *kvm) 706 { 707 int i; 708 struct mm_struct *mm = kvm->mm; 709 710 kvm_destroy_vm_debugfs(kvm); 711 kvm_arch_sync_events(kvm); 712 spin_lock(&kvm_lock); 713 list_del(&kvm->vm_list); 714 spin_unlock(&kvm_lock); 715 kvm_free_irq_routing(kvm); 716 for (i = 0; i < KVM_NR_BUSES; i++) 717 kvm_io_bus_destroy(kvm->buses[i]); 718 kvm_coalesced_mmio_free(kvm); 719 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 720 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 721 #else 722 kvm_arch_flush_shadow_all(kvm); 723 #endif 724 kvm_arch_destroy_vm(kvm); 725 kvm_destroy_devices(kvm); 726 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 727 kvm_free_memslots(kvm, kvm->memslots[i]); 728 cleanup_srcu_struct(&kvm->irq_srcu); 729 cleanup_srcu_struct(&kvm->srcu); 730 kvm_arch_free_vm(kvm); 731 preempt_notifier_dec(); 732 hardware_disable_all(); 733 mmdrop(mm); 734 } 735 736 void kvm_get_kvm(struct kvm *kvm) 737 { 738 atomic_inc(&kvm->users_count); 739 } 740 EXPORT_SYMBOL_GPL(kvm_get_kvm); 741 742 void kvm_put_kvm(struct kvm *kvm) 743 { 744 if (atomic_dec_and_test(&kvm->users_count)) 745 kvm_destroy_vm(kvm); 746 } 747 EXPORT_SYMBOL_GPL(kvm_put_kvm); 748 749 750 static int kvm_vm_release(struct inode *inode, struct file *filp) 751 { 752 struct kvm *kvm = filp->private_data; 753 754 kvm_irqfd_release(kvm); 755 756 kvm_put_kvm(kvm); 757 return 0; 758 } 759 760 /* 761 * Allocation size is twice as large as the actual dirty bitmap size. 762 * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed. 763 */ 764 static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) 765 { 766 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); 767 768 memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes); 769 if (!memslot->dirty_bitmap) 770 return -ENOMEM; 771 772 return 0; 773 } 774 775 /* 776 * Insert memslot and re-sort memslots based on their GFN, 777 * so binary search could be used to lookup GFN. 778 * Sorting algorithm takes advantage of having initially 779 * sorted array and known changed memslot position. 780 */ 781 static void update_memslots(struct kvm_memslots *slots, 782 struct kvm_memory_slot *new) 783 { 784 int id = new->id; 785 int i = slots->id_to_index[id]; 786 struct kvm_memory_slot *mslots = slots->memslots; 787 788 WARN_ON(mslots[i].id != id); 789 if (!new->npages) { 790 WARN_ON(!mslots[i].npages); 791 if (mslots[i].npages) 792 slots->used_slots--; 793 } else { 794 if (!mslots[i].npages) 795 slots->used_slots++; 796 } 797 798 while (i < KVM_MEM_SLOTS_NUM - 1 && 799 new->base_gfn <= mslots[i + 1].base_gfn) { 800 if (!mslots[i + 1].npages) 801 break; 802 mslots[i] = mslots[i + 1]; 803 slots->id_to_index[mslots[i].id] = i; 804 i++; 805 } 806 807 /* 808 * The ">=" is needed when creating a slot with base_gfn == 0, 809 * so that it moves before all those with base_gfn == npages == 0. 810 * 811 * On the other hand, if new->npages is zero, the above loop has 812 * already left i pointing to the beginning of the empty part of 813 * mslots, and the ">=" would move the hole backwards in this 814 * case---which is wrong. So skip the loop when deleting a slot. 815 */ 816 if (new->npages) { 817 while (i > 0 && 818 new->base_gfn >= mslots[i - 1].base_gfn) { 819 mslots[i] = mslots[i - 1]; 820 slots->id_to_index[mslots[i].id] = i; 821 i--; 822 } 823 } else 824 WARN_ON_ONCE(i != slots->used_slots); 825 826 mslots[i] = *new; 827 slots->id_to_index[mslots[i].id] = i; 828 } 829 830 static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem) 831 { 832 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; 833 834 #ifdef __KVM_HAVE_READONLY_MEM 835 valid_flags |= KVM_MEM_READONLY; 836 #endif 837 838 if (mem->flags & ~valid_flags) 839 return -EINVAL; 840 841 return 0; 842 } 843 844 static struct kvm_memslots *install_new_memslots(struct kvm *kvm, 845 int as_id, struct kvm_memslots *slots) 846 { 847 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); 848 849 /* 850 * Set the low bit in the generation, which disables SPTE caching 851 * until the end of synchronize_srcu_expedited. 852 */ 853 WARN_ON(old_memslots->generation & 1); 854 slots->generation = old_memslots->generation + 1; 855 856 rcu_assign_pointer(kvm->memslots[as_id], slots); 857 synchronize_srcu_expedited(&kvm->srcu); 858 859 /* 860 * Increment the new memslot generation a second time. This prevents 861 * vm exits that race with memslot updates from caching a memslot 862 * generation that will (potentially) be valid forever. 863 */ 864 slots->generation++; 865 866 kvm_arch_memslots_updated(kvm, slots); 867 868 return old_memslots; 869 } 870 871 /* 872 * Allocate some memory and give it an address in the guest physical address 873 * space. 874 * 875 * Discontiguous memory is allowed, mostly for framebuffers. 876 * 877 * Must be called holding kvm->slots_lock for write. 878 */ 879 int __kvm_set_memory_region(struct kvm *kvm, 880 const struct kvm_userspace_memory_region *mem) 881 { 882 int r; 883 gfn_t base_gfn; 884 unsigned long npages; 885 struct kvm_memory_slot *slot; 886 struct kvm_memory_slot old, new; 887 struct kvm_memslots *slots = NULL, *old_memslots; 888 int as_id, id; 889 enum kvm_mr_change change; 890 891 r = check_memory_region_flags(mem); 892 if (r) 893 goto out; 894 895 r = -EINVAL; 896 as_id = mem->slot >> 16; 897 id = (u16)mem->slot; 898 899 /* General sanity checks */ 900 if (mem->memory_size & (PAGE_SIZE - 1)) 901 goto out; 902 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 903 goto out; 904 /* We can read the guest memory with __xxx_user() later on. */ 905 if ((id < KVM_USER_MEM_SLOTS) && 906 ((mem->userspace_addr & (PAGE_SIZE - 1)) || 907 !access_ok(VERIFY_WRITE, 908 (void __user *)(unsigned long)mem->userspace_addr, 909 mem->memory_size))) 910 goto out; 911 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) 912 goto out; 913 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 914 goto out; 915 916 slot = id_to_memslot(__kvm_memslots(kvm, as_id), id); 917 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 918 npages = mem->memory_size >> PAGE_SHIFT; 919 920 if (npages > KVM_MEM_MAX_NR_PAGES) 921 goto out; 922 923 new = old = *slot; 924 925 new.id = id; 926 new.base_gfn = base_gfn; 927 new.npages = npages; 928 new.flags = mem->flags; 929 930 if (npages) { 931 if (!old.npages) 932 change = KVM_MR_CREATE; 933 else { /* Modify an existing slot. */ 934 if ((mem->userspace_addr != old.userspace_addr) || 935 (npages != old.npages) || 936 ((new.flags ^ old.flags) & KVM_MEM_READONLY)) 937 goto out; 938 939 if (base_gfn != old.base_gfn) 940 change = KVM_MR_MOVE; 941 else if (new.flags != old.flags) 942 change = KVM_MR_FLAGS_ONLY; 943 else { /* Nothing to change. */ 944 r = 0; 945 goto out; 946 } 947 } 948 } else { 949 if (!old.npages) 950 goto out; 951 952 change = KVM_MR_DELETE; 953 new.base_gfn = 0; 954 new.flags = 0; 955 } 956 957 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 958 /* Check for overlaps */ 959 r = -EEXIST; 960 kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) { 961 if ((slot->id >= KVM_USER_MEM_SLOTS) || 962 (slot->id == id)) 963 continue; 964 if (!((base_gfn + npages <= slot->base_gfn) || 965 (base_gfn >= slot->base_gfn + slot->npages))) 966 goto out; 967 } 968 } 969 970 /* Free page dirty bitmap if unneeded */ 971 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 972 new.dirty_bitmap = NULL; 973 974 r = -ENOMEM; 975 if (change == KVM_MR_CREATE) { 976 new.userspace_addr = mem->userspace_addr; 977 978 if (kvm_arch_create_memslot(kvm, &new, npages)) 979 goto out_free; 980 } 981 982 /* Allocate page dirty bitmap if needed */ 983 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 984 if (kvm_create_dirty_bitmap(&new) < 0) 985 goto out_free; 986 } 987 988 slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); 989 if (!slots) 990 goto out_free; 991 memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots)); 992 993 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { 994 slot = id_to_memslot(slots, id); 995 slot->flags |= KVM_MEMSLOT_INVALID; 996 997 old_memslots = install_new_memslots(kvm, as_id, slots); 998 999 /* slot was deleted or moved, clear iommu mapping */ 1000 kvm_iommu_unmap_pages(kvm, &old); 1001 /* From this point no new shadow pages pointing to a deleted, 1002 * or moved, memslot will be created. 1003 * 1004 * validation of sp->gfn happens in: 1005 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 1006 * - kvm_is_visible_gfn (mmu_check_roots) 1007 */ 1008 kvm_arch_flush_shadow_memslot(kvm, slot); 1009 1010 /* 1011 * We can re-use the old_memslots from above, the only difference 1012 * from the currently installed memslots is the invalid flag. This 1013 * will get overwritten by update_memslots anyway. 1014 */ 1015 slots = old_memslots; 1016 } 1017 1018 r = kvm_arch_prepare_memory_region(kvm, &new, mem, change); 1019 if (r) 1020 goto out_slots; 1021 1022 /* actual memory is freed via old in kvm_free_memslot below */ 1023 if (change == KVM_MR_DELETE) { 1024 new.dirty_bitmap = NULL; 1025 memset(&new.arch, 0, sizeof(new.arch)); 1026 } 1027 1028 update_memslots(slots, &new); 1029 old_memslots = install_new_memslots(kvm, as_id, slots); 1030 1031 kvm_arch_commit_memory_region(kvm, mem, &old, &new, change); 1032 1033 kvm_free_memslot(kvm, &old, &new); 1034 kvfree(old_memslots); 1035 1036 /* 1037 * IOMMU mapping: New slots need to be mapped. Old slots need to be 1038 * un-mapped and re-mapped if their base changes. Since base change 1039 * unmapping is handled above with slot deletion, mapping alone is 1040 * needed here. Anything else the iommu might care about for existing 1041 * slots (size changes, userspace addr changes and read-only flag 1042 * changes) is disallowed above, so any other attribute changes getting 1043 * here can be skipped. 1044 */ 1045 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 1046 r = kvm_iommu_map_pages(kvm, &new); 1047 return r; 1048 } 1049 1050 return 0; 1051 1052 out_slots: 1053 kvfree(slots); 1054 out_free: 1055 kvm_free_memslot(kvm, &new, &old); 1056 out: 1057 return r; 1058 } 1059 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 1060 1061 int kvm_set_memory_region(struct kvm *kvm, 1062 const struct kvm_userspace_memory_region *mem) 1063 { 1064 int r; 1065 1066 mutex_lock(&kvm->slots_lock); 1067 r = __kvm_set_memory_region(kvm, mem); 1068 mutex_unlock(&kvm->slots_lock); 1069 return r; 1070 } 1071 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 1072 1073 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 1074 struct kvm_userspace_memory_region *mem) 1075 { 1076 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) 1077 return -EINVAL; 1078 1079 return kvm_set_memory_region(kvm, mem); 1080 } 1081 1082 int kvm_get_dirty_log(struct kvm *kvm, 1083 struct kvm_dirty_log *log, int *is_dirty) 1084 { 1085 struct kvm_memslots *slots; 1086 struct kvm_memory_slot *memslot; 1087 int r, i, as_id, id; 1088 unsigned long n; 1089 unsigned long any = 0; 1090 1091 r = -EINVAL; 1092 as_id = log->slot >> 16; 1093 id = (u16)log->slot; 1094 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1095 goto out; 1096 1097 slots = __kvm_memslots(kvm, as_id); 1098 memslot = id_to_memslot(slots, id); 1099 r = -ENOENT; 1100 if (!memslot->dirty_bitmap) 1101 goto out; 1102 1103 n = kvm_dirty_bitmap_bytes(memslot); 1104 1105 for (i = 0; !any && i < n/sizeof(long); ++i) 1106 any = memslot->dirty_bitmap[i]; 1107 1108 r = -EFAULT; 1109 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) 1110 goto out; 1111 1112 if (any) 1113 *is_dirty = 1; 1114 1115 r = 0; 1116 out: 1117 return r; 1118 } 1119 EXPORT_SYMBOL_GPL(kvm_get_dirty_log); 1120 1121 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 1122 /** 1123 * kvm_get_dirty_log_protect - get a snapshot of dirty pages, and if any pages 1124 * are dirty write protect them for next write. 1125 * @kvm: pointer to kvm instance 1126 * @log: slot id and address to which we copy the log 1127 * @is_dirty: flag set if any page is dirty 1128 * 1129 * We need to keep it in mind that VCPU threads can write to the bitmap 1130 * concurrently. So, to avoid losing track of dirty pages we keep the 1131 * following order: 1132 * 1133 * 1. Take a snapshot of the bit and clear it if needed. 1134 * 2. Write protect the corresponding page. 1135 * 3. Copy the snapshot to the userspace. 1136 * 4. Upon return caller flushes TLB's if needed. 1137 * 1138 * Between 2 and 4, the guest may write to the page using the remaining TLB 1139 * entry. This is not a problem because the page is reported dirty using 1140 * the snapshot taken before and step 4 ensures that writes done after 1141 * exiting to userspace will be logged for the next call. 1142 * 1143 */ 1144 int kvm_get_dirty_log_protect(struct kvm *kvm, 1145 struct kvm_dirty_log *log, bool *is_dirty) 1146 { 1147 struct kvm_memslots *slots; 1148 struct kvm_memory_slot *memslot; 1149 int r, i, as_id, id; 1150 unsigned long n; 1151 unsigned long *dirty_bitmap; 1152 unsigned long *dirty_bitmap_buffer; 1153 1154 r = -EINVAL; 1155 as_id = log->slot >> 16; 1156 id = (u16)log->slot; 1157 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1158 goto out; 1159 1160 slots = __kvm_memslots(kvm, as_id); 1161 memslot = id_to_memslot(slots, id); 1162 1163 dirty_bitmap = memslot->dirty_bitmap; 1164 r = -ENOENT; 1165 if (!dirty_bitmap) 1166 goto out; 1167 1168 n = kvm_dirty_bitmap_bytes(memslot); 1169 1170 dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); 1171 memset(dirty_bitmap_buffer, 0, n); 1172 1173 spin_lock(&kvm->mmu_lock); 1174 *is_dirty = false; 1175 for (i = 0; i < n / sizeof(long); i++) { 1176 unsigned long mask; 1177 gfn_t offset; 1178 1179 if (!dirty_bitmap[i]) 1180 continue; 1181 1182 *is_dirty = true; 1183 1184 mask = xchg(&dirty_bitmap[i], 0); 1185 dirty_bitmap_buffer[i] = mask; 1186 1187 if (mask) { 1188 offset = i * BITS_PER_LONG; 1189 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 1190 offset, mask); 1191 } 1192 } 1193 1194 spin_unlock(&kvm->mmu_lock); 1195 1196 r = -EFAULT; 1197 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) 1198 goto out; 1199 1200 r = 0; 1201 out: 1202 return r; 1203 } 1204 EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect); 1205 #endif 1206 1207 bool kvm_largepages_enabled(void) 1208 { 1209 return largepages_enabled; 1210 } 1211 1212 void kvm_disable_largepages(void) 1213 { 1214 largepages_enabled = false; 1215 } 1216 EXPORT_SYMBOL_GPL(kvm_disable_largepages); 1217 1218 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 1219 { 1220 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 1221 } 1222 EXPORT_SYMBOL_GPL(gfn_to_memslot); 1223 1224 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) 1225 { 1226 return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn); 1227 } 1228 1229 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 1230 { 1231 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); 1232 1233 if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS || 1234 memslot->flags & KVM_MEMSLOT_INVALID) 1235 return false; 1236 1237 return true; 1238 } 1239 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 1240 1241 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) 1242 { 1243 struct vm_area_struct *vma; 1244 unsigned long addr, size; 1245 1246 size = PAGE_SIZE; 1247 1248 addr = gfn_to_hva(kvm, gfn); 1249 if (kvm_is_error_hva(addr)) 1250 return PAGE_SIZE; 1251 1252 down_read(¤t->mm->mmap_sem); 1253 vma = find_vma(current->mm, addr); 1254 if (!vma) 1255 goto out; 1256 1257 size = vma_kernel_pagesize(vma); 1258 1259 out: 1260 up_read(¤t->mm->mmap_sem); 1261 1262 return size; 1263 } 1264 1265 static bool memslot_is_readonly(struct kvm_memory_slot *slot) 1266 { 1267 return slot->flags & KVM_MEM_READONLY; 1268 } 1269 1270 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1271 gfn_t *nr_pages, bool write) 1272 { 1273 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 1274 return KVM_HVA_ERR_BAD; 1275 1276 if (memslot_is_readonly(slot) && write) 1277 return KVM_HVA_ERR_RO_BAD; 1278 1279 if (nr_pages) 1280 *nr_pages = slot->npages - (gfn - slot->base_gfn); 1281 1282 return __gfn_to_hva_memslot(slot, gfn); 1283 } 1284 1285 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1286 gfn_t *nr_pages) 1287 { 1288 return __gfn_to_hva_many(slot, gfn, nr_pages, true); 1289 } 1290 1291 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 1292 gfn_t gfn) 1293 { 1294 return gfn_to_hva_many(slot, gfn, NULL); 1295 } 1296 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); 1297 1298 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 1299 { 1300 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); 1301 } 1302 EXPORT_SYMBOL_GPL(gfn_to_hva); 1303 1304 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) 1305 { 1306 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); 1307 } 1308 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); 1309 1310 /* 1311 * If writable is set to false, the hva returned by this function is only 1312 * allowed to be read. 1313 */ 1314 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, 1315 gfn_t gfn, bool *writable) 1316 { 1317 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); 1318 1319 if (!kvm_is_error_hva(hva) && writable) 1320 *writable = !memslot_is_readonly(slot); 1321 1322 return hva; 1323 } 1324 1325 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 1326 { 1327 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1328 1329 return gfn_to_hva_memslot_prot(slot, gfn, writable); 1330 } 1331 1332 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) 1333 { 1334 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1335 1336 return gfn_to_hva_memslot_prot(slot, gfn, writable); 1337 } 1338 1339 static int get_user_page_nowait(unsigned long start, int write, 1340 struct page **page) 1341 { 1342 int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET; 1343 1344 if (write) 1345 flags |= FOLL_WRITE; 1346 1347 return __get_user_pages(current, current->mm, start, 1, flags, page, 1348 NULL, NULL); 1349 } 1350 1351 static inline int check_user_page_hwpoison(unsigned long addr) 1352 { 1353 int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE; 1354 1355 rc = __get_user_pages(current, current->mm, addr, 1, 1356 flags, NULL, NULL, NULL); 1357 return rc == -EHWPOISON; 1358 } 1359 1360 /* 1361 * The atomic path to get the writable pfn which will be stored in @pfn, 1362 * true indicates success, otherwise false is returned. 1363 */ 1364 static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, 1365 bool write_fault, bool *writable, kvm_pfn_t *pfn) 1366 { 1367 struct page *page[1]; 1368 int npages; 1369 1370 if (!(async || atomic)) 1371 return false; 1372 1373 /* 1374 * Fast pin a writable pfn only if it is a write fault request 1375 * or the caller allows to map a writable pfn for a read fault 1376 * request. 1377 */ 1378 if (!(write_fault || writable)) 1379 return false; 1380 1381 npages = __get_user_pages_fast(addr, 1, 1, page); 1382 if (npages == 1) { 1383 *pfn = page_to_pfn(page[0]); 1384 1385 if (writable) 1386 *writable = true; 1387 return true; 1388 } 1389 1390 return false; 1391 } 1392 1393 /* 1394 * The slow path to get the pfn of the specified host virtual address, 1395 * 1 indicates success, -errno is returned if error is detected. 1396 */ 1397 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, 1398 bool *writable, kvm_pfn_t *pfn) 1399 { 1400 struct page *page[1]; 1401 int npages = 0; 1402 1403 might_sleep(); 1404 1405 if (writable) 1406 *writable = write_fault; 1407 1408 if (async) { 1409 down_read(¤t->mm->mmap_sem); 1410 npages = get_user_page_nowait(addr, write_fault, page); 1411 up_read(¤t->mm->mmap_sem); 1412 } else 1413 npages = __get_user_pages_unlocked(current, current->mm, addr, 1, 1414 write_fault, 0, page, 1415 FOLL_TOUCH|FOLL_HWPOISON); 1416 if (npages != 1) 1417 return npages; 1418 1419 /* map read fault as writable if possible */ 1420 if (unlikely(!write_fault) && writable) { 1421 struct page *wpage[1]; 1422 1423 npages = __get_user_pages_fast(addr, 1, 1, wpage); 1424 if (npages == 1) { 1425 *writable = true; 1426 put_page(page[0]); 1427 page[0] = wpage[0]; 1428 } 1429 1430 npages = 1; 1431 } 1432 *pfn = page_to_pfn(page[0]); 1433 return npages; 1434 } 1435 1436 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) 1437 { 1438 if (unlikely(!(vma->vm_flags & VM_READ))) 1439 return false; 1440 1441 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) 1442 return false; 1443 1444 return true; 1445 } 1446 1447 static int hva_to_pfn_remapped(struct vm_area_struct *vma, 1448 unsigned long addr, bool *async, 1449 bool write_fault, kvm_pfn_t *p_pfn) 1450 { 1451 unsigned long pfn; 1452 int r; 1453 1454 r = follow_pfn(vma, addr, &pfn); 1455 if (r) { 1456 /* 1457 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does 1458 * not call the fault handler, so do it here. 1459 */ 1460 bool unlocked = false; 1461 r = fixup_user_fault(current, current->mm, addr, 1462 (write_fault ? FAULT_FLAG_WRITE : 0), 1463 &unlocked); 1464 if (unlocked) 1465 return -EAGAIN; 1466 if (r) 1467 return r; 1468 1469 r = follow_pfn(vma, addr, &pfn); 1470 if (r) 1471 return r; 1472 1473 } 1474 1475 1476 /* 1477 * Get a reference here because callers of *hva_to_pfn* and 1478 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the 1479 * returned pfn. This is only needed if the VMA has VM_MIXEDMAP 1480 * set, but the kvm_get_pfn/kvm_release_pfn_clean pair will 1481 * simply do nothing for reserved pfns. 1482 * 1483 * Whoever called remap_pfn_range is also going to call e.g. 1484 * unmap_mapping_range before the underlying pages are freed, 1485 * causing a call to our MMU notifier. 1486 */ 1487 kvm_get_pfn(pfn); 1488 1489 *p_pfn = pfn; 1490 return 0; 1491 } 1492 1493 /* 1494 * Pin guest page in memory and return its pfn. 1495 * @addr: host virtual address which maps memory to the guest 1496 * @atomic: whether this function can sleep 1497 * @async: whether this function need to wait IO complete if the 1498 * host page is not in the memory 1499 * @write_fault: whether we should get a writable host page 1500 * @writable: whether it allows to map a writable host page for !@write_fault 1501 * 1502 * The function will map a writable host page for these two cases: 1503 * 1): @write_fault = true 1504 * 2): @write_fault = false && @writable, @writable will tell the caller 1505 * whether the mapping is writable. 1506 */ 1507 static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, 1508 bool write_fault, bool *writable) 1509 { 1510 struct vm_area_struct *vma; 1511 kvm_pfn_t pfn = 0; 1512 int npages, r; 1513 1514 /* we can do it either atomically or asynchronously, not both */ 1515 BUG_ON(atomic && async); 1516 1517 if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn)) 1518 return pfn; 1519 1520 if (atomic) 1521 return KVM_PFN_ERR_FAULT; 1522 1523 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); 1524 if (npages == 1) 1525 return pfn; 1526 1527 down_read(¤t->mm->mmap_sem); 1528 if (npages == -EHWPOISON || 1529 (!async && check_user_page_hwpoison(addr))) { 1530 pfn = KVM_PFN_ERR_HWPOISON; 1531 goto exit; 1532 } 1533 1534 retry: 1535 vma = find_vma_intersection(current->mm, addr, addr + 1); 1536 1537 if (vma == NULL) 1538 pfn = KVM_PFN_ERR_FAULT; 1539 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { 1540 r = hva_to_pfn_remapped(vma, addr, async, write_fault, &pfn); 1541 if (r == -EAGAIN) 1542 goto retry; 1543 if (r < 0) 1544 pfn = KVM_PFN_ERR_FAULT; 1545 } else { 1546 if (async && vma_is_valid(vma, write_fault)) 1547 *async = true; 1548 pfn = KVM_PFN_ERR_FAULT; 1549 } 1550 exit: 1551 up_read(¤t->mm->mmap_sem); 1552 return pfn; 1553 } 1554 1555 kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, 1556 bool atomic, bool *async, bool write_fault, 1557 bool *writable) 1558 { 1559 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); 1560 1561 if (addr == KVM_HVA_ERR_RO_BAD) { 1562 if (writable) 1563 *writable = false; 1564 return KVM_PFN_ERR_RO_FAULT; 1565 } 1566 1567 if (kvm_is_error_hva(addr)) { 1568 if (writable) 1569 *writable = false; 1570 return KVM_PFN_NOSLOT; 1571 } 1572 1573 /* Do not map writable pfn in the readonly memslot. */ 1574 if (writable && memslot_is_readonly(slot)) { 1575 *writable = false; 1576 writable = NULL; 1577 } 1578 1579 return hva_to_pfn(addr, atomic, async, write_fault, 1580 writable); 1581 } 1582 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); 1583 1584 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 1585 bool *writable) 1586 { 1587 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, 1588 write_fault, writable); 1589 } 1590 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 1591 1592 kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 1593 { 1594 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL); 1595 } 1596 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); 1597 1598 kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) 1599 { 1600 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL); 1601 } 1602 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); 1603 1604 kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) 1605 { 1606 return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn); 1607 } 1608 EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic); 1609 1610 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) 1611 { 1612 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 1613 } 1614 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic); 1615 1616 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 1617 { 1618 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); 1619 } 1620 EXPORT_SYMBOL_GPL(gfn_to_pfn); 1621 1622 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) 1623 { 1624 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 1625 } 1626 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn); 1627 1628 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 1629 struct page **pages, int nr_pages) 1630 { 1631 unsigned long addr; 1632 gfn_t entry; 1633 1634 addr = gfn_to_hva_many(slot, gfn, &entry); 1635 if (kvm_is_error_hva(addr)) 1636 return -1; 1637 1638 if (entry < nr_pages) 1639 return 0; 1640 1641 return __get_user_pages_fast(addr, nr_pages, 1, pages); 1642 } 1643 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); 1644 1645 static struct page *kvm_pfn_to_page(kvm_pfn_t pfn) 1646 { 1647 if (is_error_noslot_pfn(pfn)) 1648 return KVM_ERR_PTR_BAD_PAGE; 1649 1650 if (kvm_is_reserved_pfn(pfn)) { 1651 WARN_ON(1); 1652 return KVM_ERR_PTR_BAD_PAGE; 1653 } 1654 1655 return pfn_to_page(pfn); 1656 } 1657 1658 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 1659 { 1660 kvm_pfn_t pfn; 1661 1662 pfn = gfn_to_pfn(kvm, gfn); 1663 1664 return kvm_pfn_to_page(pfn); 1665 } 1666 EXPORT_SYMBOL_GPL(gfn_to_page); 1667 1668 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) 1669 { 1670 kvm_pfn_t pfn; 1671 1672 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn); 1673 1674 return kvm_pfn_to_page(pfn); 1675 } 1676 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page); 1677 1678 void kvm_release_page_clean(struct page *page) 1679 { 1680 WARN_ON(is_error_page(page)); 1681 1682 kvm_release_pfn_clean(page_to_pfn(page)); 1683 } 1684 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 1685 1686 void kvm_release_pfn_clean(kvm_pfn_t pfn) 1687 { 1688 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) 1689 put_page(pfn_to_page(pfn)); 1690 } 1691 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1692 1693 void kvm_release_page_dirty(struct page *page) 1694 { 1695 WARN_ON(is_error_page(page)); 1696 1697 kvm_release_pfn_dirty(page_to_pfn(page)); 1698 } 1699 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 1700 1701 static void kvm_release_pfn_dirty(kvm_pfn_t pfn) 1702 { 1703 kvm_set_pfn_dirty(pfn); 1704 kvm_release_pfn_clean(pfn); 1705 } 1706 1707 void kvm_set_pfn_dirty(kvm_pfn_t pfn) 1708 { 1709 if (!kvm_is_reserved_pfn(pfn)) { 1710 struct page *page = pfn_to_page(pfn); 1711 1712 if (!PageReserved(page)) 1713 SetPageDirty(page); 1714 } 1715 } 1716 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 1717 1718 void kvm_set_pfn_accessed(kvm_pfn_t pfn) 1719 { 1720 if (!kvm_is_reserved_pfn(pfn)) 1721 mark_page_accessed(pfn_to_page(pfn)); 1722 } 1723 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1724 1725 void kvm_get_pfn(kvm_pfn_t pfn) 1726 { 1727 if (!kvm_is_reserved_pfn(pfn)) 1728 get_page(pfn_to_page(pfn)); 1729 } 1730 EXPORT_SYMBOL_GPL(kvm_get_pfn); 1731 1732 static int next_segment(unsigned long len, int offset) 1733 { 1734 if (len > PAGE_SIZE - offset) 1735 return PAGE_SIZE - offset; 1736 else 1737 return len; 1738 } 1739 1740 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, 1741 void *data, int offset, int len) 1742 { 1743 int r; 1744 unsigned long addr; 1745 1746 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 1747 if (kvm_is_error_hva(addr)) 1748 return -EFAULT; 1749 r = __copy_from_user(data, (void __user *)addr + offset, len); 1750 if (r) 1751 return -EFAULT; 1752 return 0; 1753 } 1754 1755 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 1756 int len) 1757 { 1758 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1759 1760 return __kvm_read_guest_page(slot, gfn, data, offset, len); 1761 } 1762 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 1763 1764 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, 1765 int offset, int len) 1766 { 1767 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1768 1769 return __kvm_read_guest_page(slot, gfn, data, offset, len); 1770 } 1771 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); 1772 1773 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 1774 { 1775 gfn_t gfn = gpa >> PAGE_SHIFT; 1776 int seg; 1777 int offset = offset_in_page(gpa); 1778 int ret; 1779 1780 while ((seg = next_segment(len, offset)) != 0) { 1781 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 1782 if (ret < 0) 1783 return ret; 1784 offset = 0; 1785 len -= seg; 1786 data += seg; 1787 ++gfn; 1788 } 1789 return 0; 1790 } 1791 EXPORT_SYMBOL_GPL(kvm_read_guest); 1792 1793 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) 1794 { 1795 gfn_t gfn = gpa >> PAGE_SHIFT; 1796 int seg; 1797 int offset = offset_in_page(gpa); 1798 int ret; 1799 1800 while ((seg = next_segment(len, offset)) != 0) { 1801 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); 1802 if (ret < 0) 1803 return ret; 1804 offset = 0; 1805 len -= seg; 1806 data += seg; 1807 ++gfn; 1808 } 1809 return 0; 1810 } 1811 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest); 1812 1813 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 1814 void *data, int offset, unsigned long len) 1815 { 1816 int r; 1817 unsigned long addr; 1818 1819 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 1820 if (kvm_is_error_hva(addr)) 1821 return -EFAULT; 1822 pagefault_disable(); 1823 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 1824 pagefault_enable(); 1825 if (r) 1826 return -EFAULT; 1827 return 0; 1828 } 1829 1830 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 1831 unsigned long len) 1832 { 1833 gfn_t gfn = gpa >> PAGE_SHIFT; 1834 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1835 int offset = offset_in_page(gpa); 1836 1837 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); 1838 } 1839 EXPORT_SYMBOL_GPL(kvm_read_guest_atomic); 1840 1841 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, 1842 void *data, unsigned long len) 1843 { 1844 gfn_t gfn = gpa >> PAGE_SHIFT; 1845 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1846 int offset = offset_in_page(gpa); 1847 1848 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); 1849 } 1850 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic); 1851 1852 static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn, 1853 const void *data, int offset, int len) 1854 { 1855 int r; 1856 unsigned long addr; 1857 1858 addr = gfn_to_hva_memslot(memslot, gfn); 1859 if (kvm_is_error_hva(addr)) 1860 return -EFAULT; 1861 r = __copy_to_user((void __user *)addr + offset, data, len); 1862 if (r) 1863 return -EFAULT; 1864 mark_page_dirty_in_slot(memslot, gfn); 1865 return 0; 1866 } 1867 1868 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, 1869 const void *data, int offset, int len) 1870 { 1871 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1872 1873 return __kvm_write_guest_page(slot, gfn, data, offset, len); 1874 } 1875 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 1876 1877 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, 1878 const void *data, int offset, int len) 1879 { 1880 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1881 1882 return __kvm_write_guest_page(slot, gfn, data, offset, len); 1883 } 1884 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page); 1885 1886 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 1887 unsigned long len) 1888 { 1889 gfn_t gfn = gpa >> PAGE_SHIFT; 1890 int seg; 1891 int offset = offset_in_page(gpa); 1892 int ret; 1893 1894 while ((seg = next_segment(len, offset)) != 0) { 1895 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 1896 if (ret < 0) 1897 return ret; 1898 offset = 0; 1899 len -= seg; 1900 data += seg; 1901 ++gfn; 1902 } 1903 return 0; 1904 } 1905 EXPORT_SYMBOL_GPL(kvm_write_guest); 1906 1907 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 1908 unsigned long len) 1909 { 1910 gfn_t gfn = gpa >> PAGE_SHIFT; 1911 int seg; 1912 int offset = offset_in_page(gpa); 1913 int ret; 1914 1915 while ((seg = next_segment(len, offset)) != 0) { 1916 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); 1917 if (ret < 0) 1918 return ret; 1919 offset = 0; 1920 len -= seg; 1921 data += seg; 1922 ++gfn; 1923 } 1924 return 0; 1925 } 1926 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest); 1927 1928 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1929 gpa_t gpa, unsigned long len) 1930 { 1931 struct kvm_memslots *slots = kvm_memslots(kvm); 1932 int offset = offset_in_page(gpa); 1933 gfn_t start_gfn = gpa >> PAGE_SHIFT; 1934 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; 1935 gfn_t nr_pages_needed = end_gfn - start_gfn + 1; 1936 gfn_t nr_pages_avail; 1937 1938 ghc->gpa = gpa; 1939 ghc->generation = slots->generation; 1940 ghc->len = len; 1941 ghc->memslot = gfn_to_memslot(kvm, start_gfn); 1942 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL); 1943 if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) { 1944 ghc->hva += offset; 1945 } else { 1946 /* 1947 * If the requested region crosses two memslots, we still 1948 * verify that the entire region is valid here. 1949 */ 1950 while (start_gfn <= end_gfn) { 1951 ghc->memslot = gfn_to_memslot(kvm, start_gfn); 1952 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, 1953 &nr_pages_avail); 1954 if (kvm_is_error_hva(ghc->hva)) 1955 return -EFAULT; 1956 start_gfn += nr_pages_avail; 1957 } 1958 /* Use the slow path for cross page reads and writes. */ 1959 ghc->memslot = NULL; 1960 } 1961 return 0; 1962 } 1963 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 1964 1965 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1966 void *data, unsigned long len) 1967 { 1968 struct kvm_memslots *slots = kvm_memslots(kvm); 1969 int r; 1970 1971 BUG_ON(len > ghc->len); 1972 1973 if (slots->generation != ghc->generation) 1974 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); 1975 1976 if (unlikely(!ghc->memslot)) 1977 return kvm_write_guest(kvm, ghc->gpa, data, len); 1978 1979 if (kvm_is_error_hva(ghc->hva)) 1980 return -EFAULT; 1981 1982 r = __copy_to_user((void __user *)ghc->hva, data, len); 1983 if (r) 1984 return -EFAULT; 1985 mark_page_dirty_in_slot(ghc->memslot, ghc->gpa >> PAGE_SHIFT); 1986 1987 return 0; 1988 } 1989 EXPORT_SYMBOL_GPL(kvm_write_guest_cached); 1990 1991 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1992 void *data, unsigned long len) 1993 { 1994 struct kvm_memslots *slots = kvm_memslots(kvm); 1995 int r; 1996 1997 BUG_ON(len > ghc->len); 1998 1999 if (slots->generation != ghc->generation) 2000 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); 2001 2002 if (unlikely(!ghc->memslot)) 2003 return kvm_read_guest(kvm, ghc->gpa, data, len); 2004 2005 if (kvm_is_error_hva(ghc->hva)) 2006 return -EFAULT; 2007 2008 r = __copy_from_user(data, (void __user *)ghc->hva, len); 2009 if (r) 2010 return -EFAULT; 2011 2012 return 0; 2013 } 2014 EXPORT_SYMBOL_GPL(kvm_read_guest_cached); 2015 2016 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) 2017 { 2018 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 2019 2020 return kvm_write_guest_page(kvm, gfn, zero_page, offset, len); 2021 } 2022 EXPORT_SYMBOL_GPL(kvm_clear_guest_page); 2023 2024 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 2025 { 2026 gfn_t gfn = gpa >> PAGE_SHIFT; 2027 int seg; 2028 int offset = offset_in_page(gpa); 2029 int ret; 2030 2031 while ((seg = next_segment(len, offset)) != 0) { 2032 ret = kvm_clear_guest_page(kvm, gfn, offset, seg); 2033 if (ret < 0) 2034 return ret; 2035 offset = 0; 2036 len -= seg; 2037 ++gfn; 2038 } 2039 return 0; 2040 } 2041 EXPORT_SYMBOL_GPL(kvm_clear_guest); 2042 2043 static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, 2044 gfn_t gfn) 2045 { 2046 if (memslot && memslot->dirty_bitmap) { 2047 unsigned long rel_gfn = gfn - memslot->base_gfn; 2048 2049 set_bit_le(rel_gfn, memslot->dirty_bitmap); 2050 } 2051 } 2052 2053 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 2054 { 2055 struct kvm_memory_slot *memslot; 2056 2057 memslot = gfn_to_memslot(kvm, gfn); 2058 mark_page_dirty_in_slot(memslot, gfn); 2059 } 2060 EXPORT_SYMBOL_GPL(mark_page_dirty); 2061 2062 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) 2063 { 2064 struct kvm_memory_slot *memslot; 2065 2066 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2067 mark_page_dirty_in_slot(memslot, gfn); 2068 } 2069 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); 2070 2071 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) 2072 { 2073 unsigned int old, val, grow; 2074 2075 old = val = vcpu->halt_poll_ns; 2076 grow = READ_ONCE(halt_poll_ns_grow); 2077 /* 10us base */ 2078 if (val == 0 && grow) 2079 val = 10000; 2080 else 2081 val *= grow; 2082 2083 if (val > halt_poll_ns) 2084 val = halt_poll_ns; 2085 2086 vcpu->halt_poll_ns = val; 2087 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); 2088 } 2089 2090 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) 2091 { 2092 unsigned int old, val, shrink; 2093 2094 old = val = vcpu->halt_poll_ns; 2095 shrink = READ_ONCE(halt_poll_ns_shrink); 2096 if (shrink == 0) 2097 val = 0; 2098 else 2099 val /= shrink; 2100 2101 vcpu->halt_poll_ns = val; 2102 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); 2103 } 2104 2105 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) 2106 { 2107 if (kvm_arch_vcpu_runnable(vcpu)) { 2108 kvm_make_request(KVM_REQ_UNHALT, vcpu); 2109 return -EINTR; 2110 } 2111 if (kvm_cpu_has_pending_timer(vcpu)) 2112 return -EINTR; 2113 if (signal_pending(current)) 2114 return -EINTR; 2115 2116 return 0; 2117 } 2118 2119 /* 2120 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 2121 */ 2122 void kvm_vcpu_block(struct kvm_vcpu *vcpu) 2123 { 2124 ktime_t start, cur; 2125 DECLARE_SWAITQUEUE(wait); 2126 bool waited = false; 2127 u64 block_ns; 2128 2129 start = cur = ktime_get(); 2130 if (vcpu->halt_poll_ns) { 2131 ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); 2132 2133 ++vcpu->stat.halt_attempted_poll; 2134 do { 2135 /* 2136 * This sets KVM_REQ_UNHALT if an interrupt 2137 * arrives. 2138 */ 2139 if (kvm_vcpu_check_block(vcpu) < 0) { 2140 ++vcpu->stat.halt_successful_poll; 2141 if (!vcpu_valid_wakeup(vcpu)) 2142 ++vcpu->stat.halt_poll_invalid; 2143 goto out; 2144 } 2145 cur = ktime_get(); 2146 } while (single_task_running() && ktime_before(cur, stop)); 2147 } 2148 2149 kvm_arch_vcpu_blocking(vcpu); 2150 2151 for (;;) { 2152 prepare_to_swait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 2153 2154 if (kvm_vcpu_check_block(vcpu) < 0) 2155 break; 2156 2157 waited = true; 2158 schedule(); 2159 } 2160 2161 finish_swait(&vcpu->wq, &wait); 2162 cur = ktime_get(); 2163 2164 kvm_arch_vcpu_unblocking(vcpu); 2165 out: 2166 block_ns = ktime_to_ns(cur) - ktime_to_ns(start); 2167 2168 if (!vcpu_valid_wakeup(vcpu)) 2169 shrink_halt_poll_ns(vcpu); 2170 else if (halt_poll_ns) { 2171 if (block_ns <= vcpu->halt_poll_ns) 2172 ; 2173 /* we had a long block, shrink polling */ 2174 else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns) 2175 shrink_halt_poll_ns(vcpu); 2176 /* we had a short halt and our poll time is too small */ 2177 else if (vcpu->halt_poll_ns < halt_poll_ns && 2178 block_ns < halt_poll_ns) 2179 grow_halt_poll_ns(vcpu); 2180 } else 2181 vcpu->halt_poll_ns = 0; 2182 2183 trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu)); 2184 kvm_arch_vcpu_block_finish(vcpu); 2185 } 2186 EXPORT_SYMBOL_GPL(kvm_vcpu_block); 2187 2188 #ifndef CONFIG_S390 2189 void kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) 2190 { 2191 struct swait_queue_head *wqp; 2192 2193 wqp = kvm_arch_vcpu_wq(vcpu); 2194 if (swait_active(wqp)) { 2195 swake_up(wqp); 2196 ++vcpu->stat.halt_wakeup; 2197 } 2198 2199 } 2200 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up); 2201 2202 /* 2203 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. 2204 */ 2205 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 2206 { 2207 int me; 2208 int cpu = vcpu->cpu; 2209 2210 kvm_vcpu_wake_up(vcpu); 2211 me = get_cpu(); 2212 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) 2213 if (kvm_arch_vcpu_should_kick(vcpu)) 2214 smp_send_reschedule(cpu); 2215 put_cpu(); 2216 } 2217 EXPORT_SYMBOL_GPL(kvm_vcpu_kick); 2218 #endif /* !CONFIG_S390 */ 2219 2220 int kvm_vcpu_yield_to(struct kvm_vcpu *target) 2221 { 2222 struct pid *pid; 2223 struct task_struct *task = NULL; 2224 int ret = 0; 2225 2226 rcu_read_lock(); 2227 pid = rcu_dereference(target->pid); 2228 if (pid) 2229 task = get_pid_task(pid, PIDTYPE_PID); 2230 rcu_read_unlock(); 2231 if (!task) 2232 return ret; 2233 ret = yield_to(task, 1); 2234 put_task_struct(task); 2235 2236 return ret; 2237 } 2238 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); 2239 2240 /* 2241 * Helper that checks whether a VCPU is eligible for directed yield. 2242 * Most eligible candidate to yield is decided by following heuristics: 2243 * 2244 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently 2245 * (preempted lock holder), indicated by @in_spin_loop. 2246 * Set at the beiginning and cleared at the end of interception/PLE handler. 2247 * 2248 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get 2249 * chance last time (mostly it has become eligible now since we have probably 2250 * yielded to lockholder in last iteration. This is done by toggling 2251 * @dy_eligible each time a VCPU checked for eligibility.) 2252 * 2253 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding 2254 * to preempted lock-holder could result in wrong VCPU selection and CPU 2255 * burning. Giving priority for a potential lock-holder increases lock 2256 * progress. 2257 * 2258 * Since algorithm is based on heuristics, accessing another VCPU data without 2259 * locking does not harm. It may result in trying to yield to same VCPU, fail 2260 * and continue with next VCPU and so on. 2261 */ 2262 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) 2263 { 2264 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 2265 bool eligible; 2266 2267 eligible = !vcpu->spin_loop.in_spin_loop || 2268 vcpu->spin_loop.dy_eligible; 2269 2270 if (vcpu->spin_loop.in_spin_loop) 2271 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); 2272 2273 return eligible; 2274 #else 2275 return true; 2276 #endif 2277 } 2278 2279 void kvm_vcpu_on_spin(struct kvm_vcpu *me) 2280 { 2281 struct kvm *kvm = me->kvm; 2282 struct kvm_vcpu *vcpu; 2283 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 2284 int yielded = 0; 2285 int try = 3; 2286 int pass; 2287 int i; 2288 2289 kvm_vcpu_set_in_spin_loop(me, true); 2290 /* 2291 * We boost the priority of a VCPU that is runnable but not 2292 * currently running, because it got preempted by something 2293 * else and called schedule in __vcpu_run. Hopefully that 2294 * VCPU is holding the lock that we need and will release it. 2295 * We approximate round-robin by starting at the last boosted VCPU. 2296 */ 2297 for (pass = 0; pass < 2 && !yielded && try; pass++) { 2298 kvm_for_each_vcpu(i, vcpu, kvm) { 2299 if (!pass && i <= last_boosted_vcpu) { 2300 i = last_boosted_vcpu; 2301 continue; 2302 } else if (pass && i > last_boosted_vcpu) 2303 break; 2304 if (!ACCESS_ONCE(vcpu->preempted)) 2305 continue; 2306 if (vcpu == me) 2307 continue; 2308 if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) 2309 continue; 2310 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 2311 continue; 2312 2313 yielded = kvm_vcpu_yield_to(vcpu); 2314 if (yielded > 0) { 2315 kvm->last_boosted_vcpu = i; 2316 break; 2317 } else if (yielded < 0) { 2318 try--; 2319 if (!try) 2320 break; 2321 } 2322 } 2323 } 2324 kvm_vcpu_set_in_spin_loop(me, false); 2325 2326 /* Ensure vcpu is not eligible during next spinloop */ 2327 kvm_vcpu_set_dy_eligible(me, false); 2328 } 2329 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 2330 2331 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2332 { 2333 struct kvm_vcpu *vcpu = vma->vm_file->private_data; 2334 struct page *page; 2335 2336 if (vmf->pgoff == 0) 2337 page = virt_to_page(vcpu->run); 2338 #ifdef CONFIG_X86 2339 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 2340 page = virt_to_page(vcpu->arch.pio_data); 2341 #endif 2342 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2343 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 2344 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 2345 #endif 2346 else 2347 return kvm_arch_vcpu_fault(vcpu, vmf); 2348 get_page(page); 2349 vmf->page = page; 2350 return 0; 2351 } 2352 2353 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 2354 .fault = kvm_vcpu_fault, 2355 }; 2356 2357 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 2358 { 2359 vma->vm_ops = &kvm_vcpu_vm_ops; 2360 return 0; 2361 } 2362 2363 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 2364 { 2365 struct kvm_vcpu *vcpu = filp->private_data; 2366 2367 kvm_put_kvm(vcpu->kvm); 2368 return 0; 2369 } 2370 2371 static struct file_operations kvm_vcpu_fops = { 2372 .release = kvm_vcpu_release, 2373 .unlocked_ioctl = kvm_vcpu_ioctl, 2374 #ifdef CONFIG_KVM_COMPAT 2375 .compat_ioctl = kvm_vcpu_compat_ioctl, 2376 #endif 2377 .mmap = kvm_vcpu_mmap, 2378 .llseek = noop_llseek, 2379 }; 2380 2381 /* 2382 * Allocates an inode for the vcpu. 2383 */ 2384 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 2385 { 2386 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); 2387 } 2388 2389 /* 2390 * Creates some virtual cpus. Good luck creating more than one. 2391 */ 2392 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 2393 { 2394 int r; 2395 struct kvm_vcpu *vcpu; 2396 2397 if (id >= KVM_MAX_VCPU_ID) 2398 return -EINVAL; 2399 2400 mutex_lock(&kvm->lock); 2401 if (kvm->created_vcpus == KVM_MAX_VCPUS) { 2402 mutex_unlock(&kvm->lock); 2403 return -EINVAL; 2404 } 2405 2406 kvm->created_vcpus++; 2407 mutex_unlock(&kvm->lock); 2408 2409 vcpu = kvm_arch_vcpu_create(kvm, id); 2410 if (IS_ERR(vcpu)) { 2411 r = PTR_ERR(vcpu); 2412 goto vcpu_decrement; 2413 } 2414 2415 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 2416 2417 r = kvm_arch_vcpu_setup(vcpu); 2418 if (r) 2419 goto vcpu_destroy; 2420 2421 mutex_lock(&kvm->lock); 2422 if (kvm_get_vcpu_by_id(kvm, id)) { 2423 r = -EEXIST; 2424 goto unlock_vcpu_destroy; 2425 } 2426 2427 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); 2428 2429 /* Now it's all set up, let userspace reach it */ 2430 kvm_get_kvm(kvm); 2431 r = create_vcpu_fd(vcpu); 2432 if (r < 0) { 2433 kvm_put_kvm(kvm); 2434 goto unlock_vcpu_destroy; 2435 } 2436 2437 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; 2438 2439 /* 2440 * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus 2441 * before kvm->online_vcpu's incremented value. 2442 */ 2443 smp_wmb(); 2444 atomic_inc(&kvm->online_vcpus); 2445 2446 mutex_unlock(&kvm->lock); 2447 kvm_arch_vcpu_postcreate(vcpu); 2448 return r; 2449 2450 unlock_vcpu_destroy: 2451 mutex_unlock(&kvm->lock); 2452 vcpu_destroy: 2453 kvm_arch_vcpu_destroy(vcpu); 2454 vcpu_decrement: 2455 mutex_lock(&kvm->lock); 2456 kvm->created_vcpus--; 2457 mutex_unlock(&kvm->lock); 2458 return r; 2459 } 2460 2461 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 2462 { 2463 if (sigset) { 2464 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2465 vcpu->sigset_active = 1; 2466 vcpu->sigset = *sigset; 2467 } else 2468 vcpu->sigset_active = 0; 2469 return 0; 2470 } 2471 2472 static long kvm_vcpu_ioctl(struct file *filp, 2473 unsigned int ioctl, unsigned long arg) 2474 { 2475 struct kvm_vcpu *vcpu = filp->private_data; 2476 void __user *argp = (void __user *)arg; 2477 int r; 2478 struct kvm_fpu *fpu = NULL; 2479 struct kvm_sregs *kvm_sregs = NULL; 2480 2481 if (vcpu->kvm->mm != current->mm) 2482 return -EIO; 2483 2484 if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) 2485 return -EINVAL; 2486 2487 #if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS) 2488 /* 2489 * Special cases: vcpu ioctls that are asynchronous to vcpu execution, 2490 * so vcpu_load() would break it. 2491 */ 2492 if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_S390_IRQ || ioctl == KVM_INTERRUPT) 2493 return kvm_arch_vcpu_ioctl(filp, ioctl, arg); 2494 #endif 2495 2496 2497 r = vcpu_load(vcpu); 2498 if (r) 2499 return r; 2500 switch (ioctl) { 2501 case KVM_RUN: 2502 r = -EINVAL; 2503 if (arg) 2504 goto out; 2505 if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) { 2506 /* The thread running this VCPU changed. */ 2507 struct pid *oldpid = vcpu->pid; 2508 struct pid *newpid = get_task_pid(current, PIDTYPE_PID); 2509 2510 rcu_assign_pointer(vcpu->pid, newpid); 2511 if (oldpid) 2512 synchronize_rcu(); 2513 put_pid(oldpid); 2514 } 2515 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); 2516 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 2517 break; 2518 case KVM_GET_REGS: { 2519 struct kvm_regs *kvm_regs; 2520 2521 r = -ENOMEM; 2522 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 2523 if (!kvm_regs) 2524 goto out; 2525 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 2526 if (r) 2527 goto out_free1; 2528 r = -EFAULT; 2529 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 2530 goto out_free1; 2531 r = 0; 2532 out_free1: 2533 kfree(kvm_regs); 2534 break; 2535 } 2536 case KVM_SET_REGS: { 2537 struct kvm_regs *kvm_regs; 2538 2539 r = -ENOMEM; 2540 kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); 2541 if (IS_ERR(kvm_regs)) { 2542 r = PTR_ERR(kvm_regs); 2543 goto out; 2544 } 2545 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 2546 kfree(kvm_regs); 2547 break; 2548 } 2549 case KVM_GET_SREGS: { 2550 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 2551 r = -ENOMEM; 2552 if (!kvm_sregs) 2553 goto out; 2554 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 2555 if (r) 2556 goto out; 2557 r = -EFAULT; 2558 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 2559 goto out; 2560 r = 0; 2561 break; 2562 } 2563 case KVM_SET_SREGS: { 2564 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); 2565 if (IS_ERR(kvm_sregs)) { 2566 r = PTR_ERR(kvm_sregs); 2567 kvm_sregs = NULL; 2568 goto out; 2569 } 2570 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 2571 break; 2572 } 2573 case KVM_GET_MP_STATE: { 2574 struct kvm_mp_state mp_state; 2575 2576 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 2577 if (r) 2578 goto out; 2579 r = -EFAULT; 2580 if (copy_to_user(argp, &mp_state, sizeof(mp_state))) 2581 goto out; 2582 r = 0; 2583 break; 2584 } 2585 case KVM_SET_MP_STATE: { 2586 struct kvm_mp_state mp_state; 2587 2588 r = -EFAULT; 2589 if (copy_from_user(&mp_state, argp, sizeof(mp_state))) 2590 goto out; 2591 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 2592 break; 2593 } 2594 case KVM_TRANSLATE: { 2595 struct kvm_translation tr; 2596 2597 r = -EFAULT; 2598 if (copy_from_user(&tr, argp, sizeof(tr))) 2599 goto out; 2600 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 2601 if (r) 2602 goto out; 2603 r = -EFAULT; 2604 if (copy_to_user(argp, &tr, sizeof(tr))) 2605 goto out; 2606 r = 0; 2607 break; 2608 } 2609 case KVM_SET_GUEST_DEBUG: { 2610 struct kvm_guest_debug dbg; 2611 2612 r = -EFAULT; 2613 if (copy_from_user(&dbg, argp, sizeof(dbg))) 2614 goto out; 2615 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 2616 break; 2617 } 2618 case KVM_SET_SIGNAL_MASK: { 2619 struct kvm_signal_mask __user *sigmask_arg = argp; 2620 struct kvm_signal_mask kvm_sigmask; 2621 sigset_t sigset, *p; 2622 2623 p = NULL; 2624 if (argp) { 2625 r = -EFAULT; 2626 if (copy_from_user(&kvm_sigmask, argp, 2627 sizeof(kvm_sigmask))) 2628 goto out; 2629 r = -EINVAL; 2630 if (kvm_sigmask.len != sizeof(sigset)) 2631 goto out; 2632 r = -EFAULT; 2633 if (copy_from_user(&sigset, sigmask_arg->sigset, 2634 sizeof(sigset))) 2635 goto out; 2636 p = &sigset; 2637 } 2638 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 2639 break; 2640 } 2641 case KVM_GET_FPU: { 2642 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 2643 r = -ENOMEM; 2644 if (!fpu) 2645 goto out; 2646 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 2647 if (r) 2648 goto out; 2649 r = -EFAULT; 2650 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 2651 goto out; 2652 r = 0; 2653 break; 2654 } 2655 case KVM_SET_FPU: { 2656 fpu = memdup_user(argp, sizeof(*fpu)); 2657 if (IS_ERR(fpu)) { 2658 r = PTR_ERR(fpu); 2659 fpu = NULL; 2660 goto out; 2661 } 2662 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 2663 break; 2664 } 2665 default: 2666 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 2667 } 2668 out: 2669 vcpu_put(vcpu); 2670 kfree(fpu); 2671 kfree(kvm_sregs); 2672 return r; 2673 } 2674 2675 #ifdef CONFIG_KVM_COMPAT 2676 static long kvm_vcpu_compat_ioctl(struct file *filp, 2677 unsigned int ioctl, unsigned long arg) 2678 { 2679 struct kvm_vcpu *vcpu = filp->private_data; 2680 void __user *argp = compat_ptr(arg); 2681 int r; 2682 2683 if (vcpu->kvm->mm != current->mm) 2684 return -EIO; 2685 2686 switch (ioctl) { 2687 case KVM_SET_SIGNAL_MASK: { 2688 struct kvm_signal_mask __user *sigmask_arg = argp; 2689 struct kvm_signal_mask kvm_sigmask; 2690 compat_sigset_t csigset; 2691 sigset_t sigset; 2692 2693 if (argp) { 2694 r = -EFAULT; 2695 if (copy_from_user(&kvm_sigmask, argp, 2696 sizeof(kvm_sigmask))) 2697 goto out; 2698 r = -EINVAL; 2699 if (kvm_sigmask.len != sizeof(csigset)) 2700 goto out; 2701 r = -EFAULT; 2702 if (copy_from_user(&csigset, sigmask_arg->sigset, 2703 sizeof(csigset))) 2704 goto out; 2705 sigset_from_compat(&sigset, &csigset); 2706 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 2707 } else 2708 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); 2709 break; 2710 } 2711 default: 2712 r = kvm_vcpu_ioctl(filp, ioctl, arg); 2713 } 2714 2715 out: 2716 return r; 2717 } 2718 #endif 2719 2720 static int kvm_device_ioctl_attr(struct kvm_device *dev, 2721 int (*accessor)(struct kvm_device *dev, 2722 struct kvm_device_attr *attr), 2723 unsigned long arg) 2724 { 2725 struct kvm_device_attr attr; 2726 2727 if (!accessor) 2728 return -EPERM; 2729 2730 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 2731 return -EFAULT; 2732 2733 return accessor(dev, &attr); 2734 } 2735 2736 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, 2737 unsigned long arg) 2738 { 2739 struct kvm_device *dev = filp->private_data; 2740 2741 switch (ioctl) { 2742 case KVM_SET_DEVICE_ATTR: 2743 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); 2744 case KVM_GET_DEVICE_ATTR: 2745 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); 2746 case KVM_HAS_DEVICE_ATTR: 2747 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); 2748 default: 2749 if (dev->ops->ioctl) 2750 return dev->ops->ioctl(dev, ioctl, arg); 2751 2752 return -ENOTTY; 2753 } 2754 } 2755 2756 static int kvm_device_release(struct inode *inode, struct file *filp) 2757 { 2758 struct kvm_device *dev = filp->private_data; 2759 struct kvm *kvm = dev->kvm; 2760 2761 kvm_put_kvm(kvm); 2762 return 0; 2763 } 2764 2765 static const struct file_operations kvm_device_fops = { 2766 .unlocked_ioctl = kvm_device_ioctl, 2767 #ifdef CONFIG_KVM_COMPAT 2768 .compat_ioctl = kvm_device_ioctl, 2769 #endif 2770 .release = kvm_device_release, 2771 }; 2772 2773 struct kvm_device *kvm_device_from_filp(struct file *filp) 2774 { 2775 if (filp->f_op != &kvm_device_fops) 2776 return NULL; 2777 2778 return filp->private_data; 2779 } 2780 2781 static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { 2782 #ifdef CONFIG_KVM_MPIC 2783 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, 2784 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, 2785 #endif 2786 2787 #ifdef CONFIG_KVM_XICS 2788 [KVM_DEV_TYPE_XICS] = &kvm_xics_ops, 2789 #endif 2790 }; 2791 2792 int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type) 2793 { 2794 if (type >= ARRAY_SIZE(kvm_device_ops_table)) 2795 return -ENOSPC; 2796 2797 if (kvm_device_ops_table[type] != NULL) 2798 return -EEXIST; 2799 2800 kvm_device_ops_table[type] = ops; 2801 return 0; 2802 } 2803 2804 void kvm_unregister_device_ops(u32 type) 2805 { 2806 if (kvm_device_ops_table[type] != NULL) 2807 kvm_device_ops_table[type] = NULL; 2808 } 2809 2810 static int kvm_ioctl_create_device(struct kvm *kvm, 2811 struct kvm_create_device *cd) 2812 { 2813 struct kvm_device_ops *ops = NULL; 2814 struct kvm_device *dev; 2815 bool test = cd->flags & KVM_CREATE_DEVICE_TEST; 2816 int ret; 2817 2818 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) 2819 return -ENODEV; 2820 2821 ops = kvm_device_ops_table[cd->type]; 2822 if (ops == NULL) 2823 return -ENODEV; 2824 2825 if (test) 2826 return 0; 2827 2828 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2829 if (!dev) 2830 return -ENOMEM; 2831 2832 dev->ops = ops; 2833 dev->kvm = kvm; 2834 2835 ret = ops->create(dev, cd->type); 2836 if (ret < 0) { 2837 kfree(dev); 2838 return ret; 2839 } 2840 2841 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 2842 if (ret < 0) { 2843 ops->destroy(dev); 2844 return ret; 2845 } 2846 2847 list_add(&dev->vm_node, &kvm->devices); 2848 kvm_get_kvm(kvm); 2849 cd->fd = ret; 2850 return 0; 2851 } 2852 2853 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) 2854 { 2855 switch (arg) { 2856 case KVM_CAP_USER_MEMORY: 2857 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 2858 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 2859 case KVM_CAP_INTERNAL_ERROR_DATA: 2860 #ifdef CONFIG_HAVE_KVM_MSI 2861 case KVM_CAP_SIGNAL_MSI: 2862 #endif 2863 #ifdef CONFIG_HAVE_KVM_IRQFD 2864 case KVM_CAP_IRQFD: 2865 case KVM_CAP_IRQFD_RESAMPLE: 2866 #endif 2867 case KVM_CAP_IOEVENTFD_ANY_LENGTH: 2868 case KVM_CAP_CHECK_EXTENSION_VM: 2869 return 1; 2870 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 2871 case KVM_CAP_IRQ_ROUTING: 2872 return KVM_MAX_IRQ_ROUTES; 2873 #endif 2874 #if KVM_ADDRESS_SPACE_NUM > 1 2875 case KVM_CAP_MULTI_ADDRESS_SPACE: 2876 return KVM_ADDRESS_SPACE_NUM; 2877 #endif 2878 case KVM_CAP_MAX_VCPU_ID: 2879 return KVM_MAX_VCPU_ID; 2880 default: 2881 break; 2882 } 2883 return kvm_vm_ioctl_check_extension(kvm, arg); 2884 } 2885 2886 static long kvm_vm_ioctl(struct file *filp, 2887 unsigned int ioctl, unsigned long arg) 2888 { 2889 struct kvm *kvm = filp->private_data; 2890 void __user *argp = (void __user *)arg; 2891 int r; 2892 2893 if (kvm->mm != current->mm) 2894 return -EIO; 2895 switch (ioctl) { 2896 case KVM_CREATE_VCPU: 2897 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 2898 break; 2899 case KVM_SET_USER_MEMORY_REGION: { 2900 struct kvm_userspace_memory_region kvm_userspace_mem; 2901 2902 r = -EFAULT; 2903 if (copy_from_user(&kvm_userspace_mem, argp, 2904 sizeof(kvm_userspace_mem))) 2905 goto out; 2906 2907 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); 2908 break; 2909 } 2910 case KVM_GET_DIRTY_LOG: { 2911 struct kvm_dirty_log log; 2912 2913 r = -EFAULT; 2914 if (copy_from_user(&log, argp, sizeof(log))) 2915 goto out; 2916 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 2917 break; 2918 } 2919 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2920 case KVM_REGISTER_COALESCED_MMIO: { 2921 struct kvm_coalesced_mmio_zone zone; 2922 2923 r = -EFAULT; 2924 if (copy_from_user(&zone, argp, sizeof(zone))) 2925 goto out; 2926 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 2927 break; 2928 } 2929 case KVM_UNREGISTER_COALESCED_MMIO: { 2930 struct kvm_coalesced_mmio_zone zone; 2931 2932 r = -EFAULT; 2933 if (copy_from_user(&zone, argp, sizeof(zone))) 2934 goto out; 2935 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 2936 break; 2937 } 2938 #endif 2939 case KVM_IRQFD: { 2940 struct kvm_irqfd data; 2941 2942 r = -EFAULT; 2943 if (copy_from_user(&data, argp, sizeof(data))) 2944 goto out; 2945 r = kvm_irqfd(kvm, &data); 2946 break; 2947 } 2948 case KVM_IOEVENTFD: { 2949 struct kvm_ioeventfd data; 2950 2951 r = -EFAULT; 2952 if (copy_from_user(&data, argp, sizeof(data))) 2953 goto out; 2954 r = kvm_ioeventfd(kvm, &data); 2955 break; 2956 } 2957 #ifdef CONFIG_HAVE_KVM_MSI 2958 case KVM_SIGNAL_MSI: { 2959 struct kvm_msi msi; 2960 2961 r = -EFAULT; 2962 if (copy_from_user(&msi, argp, sizeof(msi))) 2963 goto out; 2964 r = kvm_send_userspace_msi(kvm, &msi); 2965 break; 2966 } 2967 #endif 2968 #ifdef __KVM_HAVE_IRQ_LINE 2969 case KVM_IRQ_LINE_STATUS: 2970 case KVM_IRQ_LINE: { 2971 struct kvm_irq_level irq_event; 2972 2973 r = -EFAULT; 2974 if (copy_from_user(&irq_event, argp, sizeof(irq_event))) 2975 goto out; 2976 2977 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, 2978 ioctl == KVM_IRQ_LINE_STATUS); 2979 if (r) 2980 goto out; 2981 2982 r = -EFAULT; 2983 if (ioctl == KVM_IRQ_LINE_STATUS) { 2984 if (copy_to_user(argp, &irq_event, sizeof(irq_event))) 2985 goto out; 2986 } 2987 2988 r = 0; 2989 break; 2990 } 2991 #endif 2992 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 2993 case KVM_SET_GSI_ROUTING: { 2994 struct kvm_irq_routing routing; 2995 struct kvm_irq_routing __user *urouting; 2996 struct kvm_irq_routing_entry *entries = NULL; 2997 2998 r = -EFAULT; 2999 if (copy_from_user(&routing, argp, sizeof(routing))) 3000 goto out; 3001 r = -EINVAL; 3002 if (routing.nr > KVM_MAX_IRQ_ROUTES) 3003 goto out; 3004 if (routing.flags) 3005 goto out; 3006 if (routing.nr) { 3007 r = -ENOMEM; 3008 entries = vmalloc(routing.nr * sizeof(*entries)); 3009 if (!entries) 3010 goto out; 3011 r = -EFAULT; 3012 urouting = argp; 3013 if (copy_from_user(entries, urouting->entries, 3014 routing.nr * sizeof(*entries))) 3015 goto out_free_irq_routing; 3016 } 3017 r = kvm_set_irq_routing(kvm, entries, routing.nr, 3018 routing.flags); 3019 out_free_irq_routing: 3020 vfree(entries); 3021 break; 3022 } 3023 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ 3024 case KVM_CREATE_DEVICE: { 3025 struct kvm_create_device cd; 3026 3027 r = -EFAULT; 3028 if (copy_from_user(&cd, argp, sizeof(cd))) 3029 goto out; 3030 3031 r = kvm_ioctl_create_device(kvm, &cd); 3032 if (r) 3033 goto out; 3034 3035 r = -EFAULT; 3036 if (copy_to_user(argp, &cd, sizeof(cd))) 3037 goto out; 3038 3039 r = 0; 3040 break; 3041 } 3042 case KVM_CHECK_EXTENSION: 3043 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); 3044 break; 3045 default: 3046 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 3047 } 3048 out: 3049 return r; 3050 } 3051 3052 #ifdef CONFIG_KVM_COMPAT 3053 struct compat_kvm_dirty_log { 3054 __u32 slot; 3055 __u32 padding1; 3056 union { 3057 compat_uptr_t dirty_bitmap; /* one bit per page */ 3058 __u64 padding2; 3059 }; 3060 }; 3061 3062 static long kvm_vm_compat_ioctl(struct file *filp, 3063 unsigned int ioctl, unsigned long arg) 3064 { 3065 struct kvm *kvm = filp->private_data; 3066 int r; 3067 3068 if (kvm->mm != current->mm) 3069 return -EIO; 3070 switch (ioctl) { 3071 case KVM_GET_DIRTY_LOG: { 3072 struct compat_kvm_dirty_log compat_log; 3073 struct kvm_dirty_log log; 3074 3075 r = -EFAULT; 3076 if (copy_from_user(&compat_log, (void __user *)arg, 3077 sizeof(compat_log))) 3078 goto out; 3079 log.slot = compat_log.slot; 3080 log.padding1 = compat_log.padding1; 3081 log.padding2 = compat_log.padding2; 3082 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 3083 3084 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 3085 break; 3086 } 3087 default: 3088 r = kvm_vm_ioctl(filp, ioctl, arg); 3089 } 3090 3091 out: 3092 return r; 3093 } 3094 #endif 3095 3096 static struct file_operations kvm_vm_fops = { 3097 .release = kvm_vm_release, 3098 .unlocked_ioctl = kvm_vm_ioctl, 3099 #ifdef CONFIG_KVM_COMPAT 3100 .compat_ioctl = kvm_vm_compat_ioctl, 3101 #endif 3102 .llseek = noop_llseek, 3103 }; 3104 3105 static int kvm_dev_ioctl_create_vm(unsigned long type) 3106 { 3107 int r; 3108 struct kvm *kvm; 3109 struct file *file; 3110 3111 kvm = kvm_create_vm(type); 3112 if (IS_ERR(kvm)) 3113 return PTR_ERR(kvm); 3114 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 3115 r = kvm_coalesced_mmio_init(kvm); 3116 if (r < 0) { 3117 kvm_put_kvm(kvm); 3118 return r; 3119 } 3120 #endif 3121 r = get_unused_fd_flags(O_CLOEXEC); 3122 if (r < 0) { 3123 kvm_put_kvm(kvm); 3124 return r; 3125 } 3126 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 3127 if (IS_ERR(file)) { 3128 put_unused_fd(r); 3129 kvm_put_kvm(kvm); 3130 return PTR_ERR(file); 3131 } 3132 3133 if (kvm_create_vm_debugfs(kvm, r) < 0) { 3134 put_unused_fd(r); 3135 fput(file); 3136 return -ENOMEM; 3137 } 3138 3139 fd_install(r, file); 3140 return r; 3141 } 3142 3143 static long kvm_dev_ioctl(struct file *filp, 3144 unsigned int ioctl, unsigned long arg) 3145 { 3146 long r = -EINVAL; 3147 3148 switch (ioctl) { 3149 case KVM_GET_API_VERSION: 3150 if (arg) 3151 goto out; 3152 r = KVM_API_VERSION; 3153 break; 3154 case KVM_CREATE_VM: 3155 r = kvm_dev_ioctl_create_vm(arg); 3156 break; 3157 case KVM_CHECK_EXTENSION: 3158 r = kvm_vm_ioctl_check_extension_generic(NULL, arg); 3159 break; 3160 case KVM_GET_VCPU_MMAP_SIZE: 3161 if (arg) 3162 goto out; 3163 r = PAGE_SIZE; /* struct kvm_run */ 3164 #ifdef CONFIG_X86 3165 r += PAGE_SIZE; /* pio data page */ 3166 #endif 3167 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 3168 r += PAGE_SIZE; /* coalesced mmio ring page */ 3169 #endif 3170 break; 3171 case KVM_TRACE_ENABLE: 3172 case KVM_TRACE_PAUSE: 3173 case KVM_TRACE_DISABLE: 3174 r = -EOPNOTSUPP; 3175 break; 3176 default: 3177 return kvm_arch_dev_ioctl(filp, ioctl, arg); 3178 } 3179 out: 3180 return r; 3181 } 3182 3183 static struct file_operations kvm_chardev_ops = { 3184 .unlocked_ioctl = kvm_dev_ioctl, 3185 .compat_ioctl = kvm_dev_ioctl, 3186 .llseek = noop_llseek, 3187 }; 3188 3189 static struct miscdevice kvm_dev = { 3190 KVM_MINOR, 3191 "kvm", 3192 &kvm_chardev_ops, 3193 }; 3194 3195 static void hardware_enable_nolock(void *junk) 3196 { 3197 int cpu = raw_smp_processor_id(); 3198 int r; 3199 3200 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 3201 return; 3202 3203 cpumask_set_cpu(cpu, cpus_hardware_enabled); 3204 3205 r = kvm_arch_hardware_enable(); 3206 3207 if (r) { 3208 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 3209 atomic_inc(&hardware_enable_failed); 3210 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu); 3211 } 3212 } 3213 3214 static int kvm_starting_cpu(unsigned int cpu) 3215 { 3216 raw_spin_lock(&kvm_count_lock); 3217 if (kvm_usage_count) 3218 hardware_enable_nolock(NULL); 3219 raw_spin_unlock(&kvm_count_lock); 3220 return 0; 3221 } 3222 3223 static void hardware_disable_nolock(void *junk) 3224 { 3225 int cpu = raw_smp_processor_id(); 3226 3227 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 3228 return; 3229 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 3230 kvm_arch_hardware_disable(); 3231 } 3232 3233 static int kvm_dying_cpu(unsigned int cpu) 3234 { 3235 raw_spin_lock(&kvm_count_lock); 3236 if (kvm_usage_count) 3237 hardware_disable_nolock(NULL); 3238 raw_spin_unlock(&kvm_count_lock); 3239 return 0; 3240 } 3241 3242 static void hardware_disable_all_nolock(void) 3243 { 3244 BUG_ON(!kvm_usage_count); 3245 3246 kvm_usage_count--; 3247 if (!kvm_usage_count) 3248 on_each_cpu(hardware_disable_nolock, NULL, 1); 3249 } 3250 3251 static void hardware_disable_all(void) 3252 { 3253 raw_spin_lock(&kvm_count_lock); 3254 hardware_disable_all_nolock(); 3255 raw_spin_unlock(&kvm_count_lock); 3256 } 3257 3258 static int hardware_enable_all(void) 3259 { 3260 int r = 0; 3261 3262 raw_spin_lock(&kvm_count_lock); 3263 3264 kvm_usage_count++; 3265 if (kvm_usage_count == 1) { 3266 atomic_set(&hardware_enable_failed, 0); 3267 on_each_cpu(hardware_enable_nolock, NULL, 1); 3268 3269 if (atomic_read(&hardware_enable_failed)) { 3270 hardware_disable_all_nolock(); 3271 r = -EBUSY; 3272 } 3273 } 3274 3275 raw_spin_unlock(&kvm_count_lock); 3276 3277 return r; 3278 } 3279 3280 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 3281 void *v) 3282 { 3283 /* 3284 * Some (well, at least mine) BIOSes hang on reboot if 3285 * in vmx root mode. 3286 * 3287 * And Intel TXT required VMX off for all cpu when system shutdown. 3288 */ 3289 pr_info("kvm: exiting hardware virtualization\n"); 3290 kvm_rebooting = true; 3291 on_each_cpu(hardware_disable_nolock, NULL, 1); 3292 return NOTIFY_OK; 3293 } 3294 3295 static struct notifier_block kvm_reboot_notifier = { 3296 .notifier_call = kvm_reboot, 3297 .priority = 0, 3298 }; 3299 3300 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 3301 { 3302 int i; 3303 3304 for (i = 0; i < bus->dev_count; i++) { 3305 struct kvm_io_device *pos = bus->range[i].dev; 3306 3307 kvm_iodevice_destructor(pos); 3308 } 3309 kfree(bus); 3310 } 3311 3312 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, 3313 const struct kvm_io_range *r2) 3314 { 3315 gpa_t addr1 = r1->addr; 3316 gpa_t addr2 = r2->addr; 3317 3318 if (addr1 < addr2) 3319 return -1; 3320 3321 /* If r2->len == 0, match the exact address. If r2->len != 0, 3322 * accept any overlapping write. Any order is acceptable for 3323 * overlapping ranges, because kvm_io_bus_get_first_dev ensures 3324 * we process all of them. 3325 */ 3326 if (r2->len) { 3327 addr1 += r1->len; 3328 addr2 += r2->len; 3329 } 3330 3331 if (addr1 > addr2) 3332 return 1; 3333 3334 return 0; 3335 } 3336 3337 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) 3338 { 3339 return kvm_io_bus_cmp(p1, p2); 3340 } 3341 3342 static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, 3343 gpa_t addr, int len) 3344 { 3345 bus->range[bus->dev_count++] = (struct kvm_io_range) { 3346 .addr = addr, 3347 .len = len, 3348 .dev = dev, 3349 }; 3350 3351 sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range), 3352 kvm_io_bus_sort_cmp, NULL); 3353 3354 return 0; 3355 } 3356 3357 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, 3358 gpa_t addr, int len) 3359 { 3360 struct kvm_io_range *range, key; 3361 int off; 3362 3363 key = (struct kvm_io_range) { 3364 .addr = addr, 3365 .len = len, 3366 }; 3367 3368 range = bsearch(&key, bus->range, bus->dev_count, 3369 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); 3370 if (range == NULL) 3371 return -ENOENT; 3372 3373 off = range - bus->range; 3374 3375 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) 3376 off--; 3377 3378 return off; 3379 } 3380 3381 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 3382 struct kvm_io_range *range, const void *val) 3383 { 3384 int idx; 3385 3386 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 3387 if (idx < 0) 3388 return -EOPNOTSUPP; 3389 3390 while (idx < bus->dev_count && 3391 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 3392 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, 3393 range->len, val)) 3394 return idx; 3395 idx++; 3396 } 3397 3398 return -EOPNOTSUPP; 3399 } 3400 3401 /* kvm_io_bus_write - called under kvm->slots_lock */ 3402 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 3403 int len, const void *val) 3404 { 3405 struct kvm_io_bus *bus; 3406 struct kvm_io_range range; 3407 int r; 3408 3409 range = (struct kvm_io_range) { 3410 .addr = addr, 3411 .len = len, 3412 }; 3413 3414 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3415 r = __kvm_io_bus_write(vcpu, bus, &range, val); 3416 return r < 0 ? r : 0; 3417 } 3418 3419 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ 3420 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 3421 gpa_t addr, int len, const void *val, long cookie) 3422 { 3423 struct kvm_io_bus *bus; 3424 struct kvm_io_range range; 3425 3426 range = (struct kvm_io_range) { 3427 .addr = addr, 3428 .len = len, 3429 }; 3430 3431 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3432 3433 /* First try the device referenced by cookie. */ 3434 if ((cookie >= 0) && (cookie < bus->dev_count) && 3435 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) 3436 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, 3437 val)) 3438 return cookie; 3439 3440 /* 3441 * cookie contained garbage; fall back to search and return the 3442 * correct cookie value. 3443 */ 3444 return __kvm_io_bus_write(vcpu, bus, &range, val); 3445 } 3446 3447 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 3448 struct kvm_io_range *range, void *val) 3449 { 3450 int idx; 3451 3452 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 3453 if (idx < 0) 3454 return -EOPNOTSUPP; 3455 3456 while (idx < bus->dev_count && 3457 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 3458 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, 3459 range->len, val)) 3460 return idx; 3461 idx++; 3462 } 3463 3464 return -EOPNOTSUPP; 3465 } 3466 EXPORT_SYMBOL_GPL(kvm_io_bus_write); 3467 3468 /* kvm_io_bus_read - called under kvm->slots_lock */ 3469 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 3470 int len, void *val) 3471 { 3472 struct kvm_io_bus *bus; 3473 struct kvm_io_range range; 3474 int r; 3475 3476 range = (struct kvm_io_range) { 3477 .addr = addr, 3478 .len = len, 3479 }; 3480 3481 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3482 r = __kvm_io_bus_read(vcpu, bus, &range, val); 3483 return r < 0 ? r : 0; 3484 } 3485 3486 3487 /* Caller must hold slots_lock. */ 3488 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 3489 int len, struct kvm_io_device *dev) 3490 { 3491 struct kvm_io_bus *new_bus, *bus; 3492 3493 bus = kvm->buses[bus_idx]; 3494 /* exclude ioeventfd which is limited by maximum fd */ 3495 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 3496 return -ENOSPC; 3497 3498 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count + 1) * 3499 sizeof(struct kvm_io_range)), GFP_KERNEL); 3500 if (!new_bus) 3501 return -ENOMEM; 3502 memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count * 3503 sizeof(struct kvm_io_range))); 3504 kvm_io_bus_insert_dev(new_bus, dev, addr, len); 3505 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3506 synchronize_srcu_expedited(&kvm->srcu); 3507 kfree(bus); 3508 3509 return 0; 3510 } 3511 3512 /* Caller must hold slots_lock. */ 3513 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 3514 struct kvm_io_device *dev) 3515 { 3516 int i, r; 3517 struct kvm_io_bus *new_bus, *bus; 3518 3519 bus = kvm->buses[bus_idx]; 3520 r = -ENOENT; 3521 for (i = 0; i < bus->dev_count; i++) 3522 if (bus->range[i].dev == dev) { 3523 r = 0; 3524 break; 3525 } 3526 3527 if (r) 3528 return r; 3529 3530 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) * 3531 sizeof(struct kvm_io_range)), GFP_KERNEL); 3532 if (!new_bus) 3533 return -ENOMEM; 3534 3535 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 3536 new_bus->dev_count--; 3537 memcpy(new_bus->range + i, bus->range + i + 1, 3538 (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); 3539 3540 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3541 synchronize_srcu_expedited(&kvm->srcu); 3542 kfree(bus); 3543 return r; 3544 } 3545 3546 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 3547 gpa_t addr) 3548 { 3549 struct kvm_io_bus *bus; 3550 int dev_idx, srcu_idx; 3551 struct kvm_io_device *iodev = NULL; 3552 3553 srcu_idx = srcu_read_lock(&kvm->srcu); 3554 3555 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 3556 3557 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); 3558 if (dev_idx < 0) 3559 goto out_unlock; 3560 3561 iodev = bus->range[dev_idx].dev; 3562 3563 out_unlock: 3564 srcu_read_unlock(&kvm->srcu, srcu_idx); 3565 3566 return iodev; 3567 } 3568 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev); 3569 3570 static int kvm_debugfs_open(struct inode *inode, struct file *file, 3571 int (*get)(void *, u64 *), int (*set)(void *, u64), 3572 const char *fmt) 3573 { 3574 struct kvm_stat_data *stat_data = (struct kvm_stat_data *) 3575 inode->i_private; 3576 3577 /* The debugfs files are a reference to the kvm struct which 3578 * is still valid when kvm_destroy_vm is called. 3579 * To avoid the race between open and the removal of the debugfs 3580 * directory we test against the users count. 3581 */ 3582 if (!atomic_add_unless(&stat_data->kvm->users_count, 1, 0)) 3583 return -ENOENT; 3584 3585 if (simple_attr_open(inode, file, get, set, fmt)) { 3586 kvm_put_kvm(stat_data->kvm); 3587 return -ENOMEM; 3588 } 3589 3590 return 0; 3591 } 3592 3593 static int kvm_debugfs_release(struct inode *inode, struct file *file) 3594 { 3595 struct kvm_stat_data *stat_data = (struct kvm_stat_data *) 3596 inode->i_private; 3597 3598 simple_attr_release(inode, file); 3599 kvm_put_kvm(stat_data->kvm); 3600 3601 return 0; 3602 } 3603 3604 static int vm_stat_get_per_vm(void *data, u64 *val) 3605 { 3606 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 3607 3608 *val = *(u32 *)((void *)stat_data->kvm + stat_data->offset); 3609 3610 return 0; 3611 } 3612 3613 static int vm_stat_get_per_vm_open(struct inode *inode, struct file *file) 3614 { 3615 __simple_attr_check_format("%llu\n", 0ull); 3616 return kvm_debugfs_open(inode, file, vm_stat_get_per_vm, 3617 NULL, "%llu\n"); 3618 } 3619 3620 static const struct file_operations vm_stat_get_per_vm_fops = { 3621 .owner = THIS_MODULE, 3622 .open = vm_stat_get_per_vm_open, 3623 .release = kvm_debugfs_release, 3624 .read = simple_attr_read, 3625 .write = simple_attr_write, 3626 .llseek = generic_file_llseek, 3627 }; 3628 3629 static int vcpu_stat_get_per_vm(void *data, u64 *val) 3630 { 3631 int i; 3632 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 3633 struct kvm_vcpu *vcpu; 3634 3635 *val = 0; 3636 3637 kvm_for_each_vcpu(i, vcpu, stat_data->kvm) 3638 *val += *(u32 *)((void *)vcpu + stat_data->offset); 3639 3640 return 0; 3641 } 3642 3643 static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file) 3644 { 3645 __simple_attr_check_format("%llu\n", 0ull); 3646 return kvm_debugfs_open(inode, file, vcpu_stat_get_per_vm, 3647 NULL, "%llu\n"); 3648 } 3649 3650 static const struct file_operations vcpu_stat_get_per_vm_fops = { 3651 .owner = THIS_MODULE, 3652 .open = vcpu_stat_get_per_vm_open, 3653 .release = kvm_debugfs_release, 3654 .read = simple_attr_read, 3655 .write = simple_attr_write, 3656 .llseek = generic_file_llseek, 3657 }; 3658 3659 static const struct file_operations *stat_fops_per_vm[] = { 3660 [KVM_STAT_VCPU] = &vcpu_stat_get_per_vm_fops, 3661 [KVM_STAT_VM] = &vm_stat_get_per_vm_fops, 3662 }; 3663 3664 static int vm_stat_get(void *_offset, u64 *val) 3665 { 3666 unsigned offset = (long)_offset; 3667 struct kvm *kvm; 3668 struct kvm_stat_data stat_tmp = {.offset = offset}; 3669 u64 tmp_val; 3670 3671 *val = 0; 3672 spin_lock(&kvm_lock); 3673 list_for_each_entry(kvm, &vm_list, vm_list) { 3674 stat_tmp.kvm = kvm; 3675 vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val); 3676 *val += tmp_val; 3677 } 3678 spin_unlock(&kvm_lock); 3679 return 0; 3680 } 3681 3682 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n"); 3683 3684 static int vcpu_stat_get(void *_offset, u64 *val) 3685 { 3686 unsigned offset = (long)_offset; 3687 struct kvm *kvm; 3688 struct kvm_stat_data stat_tmp = {.offset = offset}; 3689 u64 tmp_val; 3690 3691 *val = 0; 3692 spin_lock(&kvm_lock); 3693 list_for_each_entry(kvm, &vm_list, vm_list) { 3694 stat_tmp.kvm = kvm; 3695 vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val); 3696 *val += tmp_val; 3697 } 3698 spin_unlock(&kvm_lock); 3699 return 0; 3700 } 3701 3702 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); 3703 3704 static const struct file_operations *stat_fops[] = { 3705 [KVM_STAT_VCPU] = &vcpu_stat_fops, 3706 [KVM_STAT_VM] = &vm_stat_fops, 3707 }; 3708 3709 static int kvm_init_debug(void) 3710 { 3711 int r = -EEXIST; 3712 struct kvm_stats_debugfs_item *p; 3713 3714 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 3715 if (kvm_debugfs_dir == NULL) 3716 goto out; 3717 3718 kvm_debugfs_num_entries = 0; 3719 for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) { 3720 if (!debugfs_create_file(p->name, 0444, kvm_debugfs_dir, 3721 (void *)(long)p->offset, 3722 stat_fops[p->kind])) 3723 goto out_dir; 3724 } 3725 3726 return 0; 3727 3728 out_dir: 3729 debugfs_remove_recursive(kvm_debugfs_dir); 3730 out: 3731 return r; 3732 } 3733 3734 static int kvm_suspend(void) 3735 { 3736 if (kvm_usage_count) 3737 hardware_disable_nolock(NULL); 3738 return 0; 3739 } 3740 3741 static void kvm_resume(void) 3742 { 3743 if (kvm_usage_count) { 3744 WARN_ON(raw_spin_is_locked(&kvm_count_lock)); 3745 hardware_enable_nolock(NULL); 3746 } 3747 } 3748 3749 static struct syscore_ops kvm_syscore_ops = { 3750 .suspend = kvm_suspend, 3751 .resume = kvm_resume, 3752 }; 3753 3754 static inline 3755 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 3756 { 3757 return container_of(pn, struct kvm_vcpu, preempt_notifier); 3758 } 3759 3760 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 3761 { 3762 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 3763 3764 if (vcpu->preempted) 3765 vcpu->preempted = false; 3766 3767 kvm_arch_sched_in(vcpu, cpu); 3768 3769 kvm_arch_vcpu_load(vcpu, cpu); 3770 } 3771 3772 static void kvm_sched_out(struct preempt_notifier *pn, 3773 struct task_struct *next) 3774 { 3775 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 3776 3777 if (current->state == TASK_RUNNING) 3778 vcpu->preempted = true; 3779 kvm_arch_vcpu_put(vcpu); 3780 } 3781 3782 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 3783 struct module *module) 3784 { 3785 int r; 3786 int cpu; 3787 3788 r = kvm_arch_init(opaque); 3789 if (r) 3790 goto out_fail; 3791 3792 /* 3793 * kvm_arch_init makes sure there's at most one caller 3794 * for architectures that support multiple implementations, 3795 * like intel and amd on x86. 3796 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating 3797 * conflicts in case kvm is already setup for another implementation. 3798 */ 3799 r = kvm_irqfd_init(); 3800 if (r) 3801 goto out_irqfd; 3802 3803 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 3804 r = -ENOMEM; 3805 goto out_free_0; 3806 } 3807 3808 r = kvm_arch_hardware_setup(); 3809 if (r < 0) 3810 goto out_free_0a; 3811 3812 for_each_online_cpu(cpu) { 3813 smp_call_function_single(cpu, 3814 kvm_arch_check_processor_compat, 3815 &r, 1); 3816 if (r < 0) 3817 goto out_free_1; 3818 } 3819 3820 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "AP_KVM_STARTING", 3821 kvm_starting_cpu, kvm_dying_cpu); 3822 if (r) 3823 goto out_free_2; 3824 register_reboot_notifier(&kvm_reboot_notifier); 3825 3826 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 3827 if (!vcpu_align) 3828 vcpu_align = __alignof__(struct kvm_vcpu); 3829 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, 3830 0, NULL); 3831 if (!kvm_vcpu_cache) { 3832 r = -ENOMEM; 3833 goto out_free_3; 3834 } 3835 3836 r = kvm_async_pf_init(); 3837 if (r) 3838 goto out_free; 3839 3840 kvm_chardev_ops.owner = module; 3841 kvm_vm_fops.owner = module; 3842 kvm_vcpu_fops.owner = module; 3843 3844 r = misc_register(&kvm_dev); 3845 if (r) { 3846 pr_err("kvm: misc device register failed\n"); 3847 goto out_unreg; 3848 } 3849 3850 register_syscore_ops(&kvm_syscore_ops); 3851 3852 kvm_preempt_ops.sched_in = kvm_sched_in; 3853 kvm_preempt_ops.sched_out = kvm_sched_out; 3854 3855 r = kvm_init_debug(); 3856 if (r) { 3857 pr_err("kvm: create debugfs files failed\n"); 3858 goto out_undebugfs; 3859 } 3860 3861 r = kvm_vfio_ops_init(); 3862 WARN_ON(r); 3863 3864 return 0; 3865 3866 out_undebugfs: 3867 unregister_syscore_ops(&kvm_syscore_ops); 3868 misc_deregister(&kvm_dev); 3869 out_unreg: 3870 kvm_async_pf_deinit(); 3871 out_free: 3872 kmem_cache_destroy(kvm_vcpu_cache); 3873 out_free_3: 3874 unregister_reboot_notifier(&kvm_reboot_notifier); 3875 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); 3876 out_free_2: 3877 out_free_1: 3878 kvm_arch_hardware_unsetup(); 3879 out_free_0a: 3880 free_cpumask_var(cpus_hardware_enabled); 3881 out_free_0: 3882 kvm_irqfd_exit(); 3883 out_irqfd: 3884 kvm_arch_exit(); 3885 out_fail: 3886 return r; 3887 } 3888 EXPORT_SYMBOL_GPL(kvm_init); 3889 3890 void kvm_exit(void) 3891 { 3892 debugfs_remove_recursive(kvm_debugfs_dir); 3893 misc_deregister(&kvm_dev); 3894 kmem_cache_destroy(kvm_vcpu_cache); 3895 kvm_async_pf_deinit(); 3896 unregister_syscore_ops(&kvm_syscore_ops); 3897 unregister_reboot_notifier(&kvm_reboot_notifier); 3898 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); 3899 on_each_cpu(hardware_disable_nolock, NULL, 1); 3900 kvm_arch_hardware_unsetup(); 3901 kvm_arch_exit(); 3902 kvm_irqfd_exit(); 3903 free_cpumask_var(cpus_hardware_enabled); 3904 kvm_vfio_ops_exit(); 3905 } 3906 EXPORT_SYMBOL_GPL(kvm_exit); 3907