1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This module enables machines with Intel VT-x extensions to run virtual 5 * machines without emulation or binary translation. 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Avi Kivity <avi@qumranet.com> 12 * Yaniv Kamay <yaniv@qumranet.com> 13 * 14 * This work is licensed under the terms of the GNU GPL, version 2. See 15 * the COPYING file in the top-level directory. 16 * 17 */ 18 19 #include <kvm/iodev.h> 20 21 #include <linux/kvm_host.h> 22 #include <linux/kvm.h> 23 #include <linux/module.h> 24 #include <linux/errno.h> 25 #include <linux/percpu.h> 26 #include <linux/mm.h> 27 #include <linux/miscdevice.h> 28 #include <linux/vmalloc.h> 29 #include <linux/reboot.h> 30 #include <linux/debugfs.h> 31 #include <linux/highmem.h> 32 #include <linux/file.h> 33 #include <linux/syscore_ops.h> 34 #include <linux/cpu.h> 35 #include <linux/sched/signal.h> 36 #include <linux/sched/mm.h> 37 #include <linux/sched/stat.h> 38 #include <linux/cpumask.h> 39 #include <linux/smp.h> 40 #include <linux/anon_inodes.h> 41 #include <linux/profile.h> 42 #include <linux/kvm_para.h> 43 #include <linux/pagemap.h> 44 #include <linux/mman.h> 45 #include <linux/swap.h> 46 #include <linux/bitops.h> 47 #include <linux/spinlock.h> 48 #include <linux/compat.h> 49 #include <linux/srcu.h> 50 #include <linux/hugetlb.h> 51 #include <linux/slab.h> 52 #include <linux/sort.h> 53 #include <linux/bsearch.h> 54 55 #include <asm/processor.h> 56 #include <asm/io.h> 57 #include <asm/ioctl.h> 58 #include <linux/uaccess.h> 59 #include <asm/pgtable.h> 60 61 #include "coalesced_mmio.h" 62 #include "async_pf.h" 63 #include "vfio.h" 64 65 #define CREATE_TRACE_POINTS 66 #include <trace/events/kvm.h> 67 68 /* Worst case buffer size needed for holding an integer. */ 69 #define ITOA_MAX_LEN 12 70 71 MODULE_AUTHOR("Qumranet"); 72 MODULE_LICENSE("GPL"); 73 74 /* Architectures should define their poll value according to the halt latency */ 75 unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; 76 module_param(halt_poll_ns, uint, 0644); 77 EXPORT_SYMBOL_GPL(halt_poll_ns); 78 79 /* Default doubles per-vcpu halt_poll_ns. */ 80 unsigned int halt_poll_ns_grow = 2; 81 module_param(halt_poll_ns_grow, uint, 0644); 82 EXPORT_SYMBOL_GPL(halt_poll_ns_grow); 83 84 /* Default resets per-vcpu halt_poll_ns . */ 85 unsigned int halt_poll_ns_shrink; 86 module_param(halt_poll_ns_shrink, uint, 0644); 87 EXPORT_SYMBOL_GPL(halt_poll_ns_shrink); 88 89 /* 90 * Ordering of locks: 91 * 92 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 93 */ 94 95 DEFINE_SPINLOCK(kvm_lock); 96 static DEFINE_RAW_SPINLOCK(kvm_count_lock); 97 LIST_HEAD(vm_list); 98 99 static cpumask_var_t cpus_hardware_enabled; 100 static int kvm_usage_count; 101 static atomic_t hardware_enable_failed; 102 103 struct kmem_cache *kvm_vcpu_cache; 104 EXPORT_SYMBOL_GPL(kvm_vcpu_cache); 105 106 static __read_mostly struct preempt_ops kvm_preempt_ops; 107 108 struct dentry *kvm_debugfs_dir; 109 EXPORT_SYMBOL_GPL(kvm_debugfs_dir); 110 111 static int kvm_debugfs_num_entries; 112 static const struct file_operations *stat_fops_per_vm[]; 113 114 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 115 unsigned long arg); 116 #ifdef CONFIG_KVM_COMPAT 117 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, 118 unsigned long arg); 119 #endif 120 static int hardware_enable_all(void); 121 static void hardware_disable_all(void); 122 123 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 124 125 static void kvm_release_pfn_dirty(kvm_pfn_t pfn); 126 static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn); 127 128 __visible bool kvm_rebooting; 129 EXPORT_SYMBOL_GPL(kvm_rebooting); 130 131 static bool largepages_enabled = true; 132 133 #define KVM_EVENT_CREATE_VM 0 134 #define KVM_EVENT_DESTROY_VM 1 135 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); 136 static unsigned long long kvm_createvm_count; 137 static unsigned long long kvm_active_vms; 138 139 bool kvm_is_reserved_pfn(kvm_pfn_t pfn) 140 { 141 if (pfn_valid(pfn)) 142 return PageReserved(pfn_to_page(pfn)); 143 144 return true; 145 } 146 147 /* 148 * Switches to specified vcpu, until a matching vcpu_put() 149 */ 150 int vcpu_load(struct kvm_vcpu *vcpu) 151 { 152 int cpu; 153 154 if (mutex_lock_killable(&vcpu->mutex)) 155 return -EINTR; 156 cpu = get_cpu(); 157 preempt_notifier_register(&vcpu->preempt_notifier); 158 kvm_arch_vcpu_load(vcpu, cpu); 159 put_cpu(); 160 return 0; 161 } 162 EXPORT_SYMBOL_GPL(vcpu_load); 163 164 void vcpu_put(struct kvm_vcpu *vcpu) 165 { 166 preempt_disable(); 167 kvm_arch_vcpu_put(vcpu); 168 preempt_notifier_unregister(&vcpu->preempt_notifier); 169 preempt_enable(); 170 mutex_unlock(&vcpu->mutex); 171 } 172 EXPORT_SYMBOL_GPL(vcpu_put); 173 174 /* TODO: merge with kvm_arch_vcpu_should_kick */ 175 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req) 176 { 177 int mode = kvm_vcpu_exiting_guest_mode(vcpu); 178 179 /* 180 * We need to wait for the VCPU to reenable interrupts and get out of 181 * READING_SHADOW_PAGE_TABLES mode. 182 */ 183 if (req & KVM_REQUEST_WAIT) 184 return mode != OUTSIDE_GUEST_MODE; 185 186 /* 187 * Need to kick a running VCPU, but otherwise there is nothing to do. 188 */ 189 return mode == IN_GUEST_MODE; 190 } 191 192 static void ack_flush(void *_completed) 193 { 194 } 195 196 static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait) 197 { 198 if (unlikely(!cpus)) 199 cpus = cpu_online_mask; 200 201 if (cpumask_empty(cpus)) 202 return false; 203 204 smp_call_function_many(cpus, ack_flush, NULL, wait); 205 return true; 206 } 207 208 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) 209 { 210 int i, cpu, me; 211 cpumask_var_t cpus; 212 bool called; 213 struct kvm_vcpu *vcpu; 214 215 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 216 217 me = get_cpu(); 218 kvm_for_each_vcpu(i, vcpu, kvm) { 219 kvm_make_request(req, vcpu); 220 cpu = vcpu->cpu; 221 222 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) 223 continue; 224 225 if (cpus != NULL && cpu != -1 && cpu != me && 226 kvm_request_needs_ipi(vcpu, req)) 227 __cpumask_set_cpu(cpu, cpus); 228 } 229 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); 230 put_cpu(); 231 free_cpumask_var(cpus); 232 return called; 233 } 234 235 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL 236 void kvm_flush_remote_tlbs(struct kvm *kvm) 237 { 238 /* 239 * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in 240 * kvm_make_all_cpus_request. 241 */ 242 long dirty_count = smp_load_acquire(&kvm->tlbs_dirty); 243 244 /* 245 * We want to publish modifications to the page tables before reading 246 * mode. Pairs with a memory barrier in arch-specific code. 247 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest 248 * and smp_mb in walk_shadow_page_lockless_begin/end. 249 * - powerpc: smp_mb in kvmppc_prepare_to_enter. 250 * 251 * There is already an smp_mb__after_atomic() before 252 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that 253 * barrier here. 254 */ 255 if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 256 ++kvm->stat.remote_tlb_flush; 257 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); 258 } 259 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); 260 #endif 261 262 void kvm_reload_remote_mmus(struct kvm *kvm) 263 { 264 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); 265 } 266 267 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 268 { 269 struct page *page; 270 int r; 271 272 mutex_init(&vcpu->mutex); 273 vcpu->cpu = -1; 274 vcpu->kvm = kvm; 275 vcpu->vcpu_id = id; 276 vcpu->pid = NULL; 277 init_swait_queue_head(&vcpu->wq); 278 kvm_async_pf_vcpu_init(vcpu); 279 280 vcpu->pre_pcpu = -1; 281 INIT_LIST_HEAD(&vcpu->blocked_vcpu_list); 282 283 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 284 if (!page) { 285 r = -ENOMEM; 286 goto fail; 287 } 288 vcpu->run = page_address(page); 289 290 kvm_vcpu_set_in_spin_loop(vcpu, false); 291 kvm_vcpu_set_dy_eligible(vcpu, false); 292 vcpu->preempted = false; 293 294 r = kvm_arch_vcpu_init(vcpu); 295 if (r < 0) 296 goto fail_free_run; 297 return 0; 298 299 fail_free_run: 300 free_page((unsigned long)vcpu->run); 301 fail: 302 return r; 303 } 304 EXPORT_SYMBOL_GPL(kvm_vcpu_init); 305 306 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) 307 { 308 /* 309 * no need for rcu_read_lock as VCPU_RUN is the only place that 310 * will change the vcpu->pid pointer and on uninit all file 311 * descriptors are already gone. 312 */ 313 put_pid(rcu_dereference_protected(vcpu->pid, 1)); 314 kvm_arch_vcpu_uninit(vcpu); 315 free_page((unsigned long)vcpu->run); 316 } 317 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); 318 319 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 320 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 321 { 322 return container_of(mn, struct kvm, mmu_notifier); 323 } 324 325 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, 326 struct mm_struct *mm, 327 unsigned long address) 328 { 329 struct kvm *kvm = mmu_notifier_to_kvm(mn); 330 int need_tlb_flush, idx; 331 332 /* 333 * When ->invalidate_page runs, the linux pte has been zapped 334 * already but the page is still allocated until 335 * ->invalidate_page returns. So if we increase the sequence 336 * here the kvm page fault will notice if the spte can't be 337 * established because the page is going to be freed. If 338 * instead the kvm page fault establishes the spte before 339 * ->invalidate_page runs, kvm_unmap_hva will release it 340 * before returning. 341 * 342 * The sequence increase only need to be seen at spin_unlock 343 * time, and not at spin_lock time. 344 * 345 * Increasing the sequence after the spin_unlock would be 346 * unsafe because the kvm page fault could then establish the 347 * pte after kvm_unmap_hva returned, without noticing the page 348 * is going to be freed. 349 */ 350 idx = srcu_read_lock(&kvm->srcu); 351 spin_lock(&kvm->mmu_lock); 352 353 kvm->mmu_notifier_seq++; 354 need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty; 355 /* we've to flush the tlb before the pages can be freed */ 356 if (need_tlb_flush) 357 kvm_flush_remote_tlbs(kvm); 358 359 spin_unlock(&kvm->mmu_lock); 360 361 kvm_arch_mmu_notifier_invalidate_page(kvm, address); 362 363 srcu_read_unlock(&kvm->srcu, idx); 364 } 365 366 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 367 struct mm_struct *mm, 368 unsigned long address, 369 pte_t pte) 370 { 371 struct kvm *kvm = mmu_notifier_to_kvm(mn); 372 int idx; 373 374 idx = srcu_read_lock(&kvm->srcu); 375 spin_lock(&kvm->mmu_lock); 376 kvm->mmu_notifier_seq++; 377 kvm_set_spte_hva(kvm, address, pte); 378 spin_unlock(&kvm->mmu_lock); 379 srcu_read_unlock(&kvm->srcu, idx); 380 } 381 382 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 383 struct mm_struct *mm, 384 unsigned long start, 385 unsigned long end) 386 { 387 struct kvm *kvm = mmu_notifier_to_kvm(mn); 388 int need_tlb_flush = 0, idx; 389 390 idx = srcu_read_lock(&kvm->srcu); 391 spin_lock(&kvm->mmu_lock); 392 /* 393 * The count increase must become visible at unlock time as no 394 * spte can be established without taking the mmu_lock and 395 * count is also read inside the mmu_lock critical section. 396 */ 397 kvm->mmu_notifier_count++; 398 need_tlb_flush = kvm_unmap_hva_range(kvm, start, end); 399 need_tlb_flush |= kvm->tlbs_dirty; 400 /* we've to flush the tlb before the pages can be freed */ 401 if (need_tlb_flush) 402 kvm_flush_remote_tlbs(kvm); 403 404 spin_unlock(&kvm->mmu_lock); 405 srcu_read_unlock(&kvm->srcu, idx); 406 } 407 408 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 409 struct mm_struct *mm, 410 unsigned long start, 411 unsigned long end) 412 { 413 struct kvm *kvm = mmu_notifier_to_kvm(mn); 414 415 spin_lock(&kvm->mmu_lock); 416 /* 417 * This sequence increase will notify the kvm page fault that 418 * the page that is going to be mapped in the spte could have 419 * been freed. 420 */ 421 kvm->mmu_notifier_seq++; 422 smp_wmb(); 423 /* 424 * The above sequence increase must be visible before the 425 * below count decrease, which is ensured by the smp_wmb above 426 * in conjunction with the smp_rmb in mmu_notifier_retry(). 427 */ 428 kvm->mmu_notifier_count--; 429 spin_unlock(&kvm->mmu_lock); 430 431 BUG_ON(kvm->mmu_notifier_count < 0); 432 } 433 434 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 435 struct mm_struct *mm, 436 unsigned long start, 437 unsigned long end) 438 { 439 struct kvm *kvm = mmu_notifier_to_kvm(mn); 440 int young, idx; 441 442 idx = srcu_read_lock(&kvm->srcu); 443 spin_lock(&kvm->mmu_lock); 444 445 young = kvm_age_hva(kvm, start, end); 446 if (young) 447 kvm_flush_remote_tlbs(kvm); 448 449 spin_unlock(&kvm->mmu_lock); 450 srcu_read_unlock(&kvm->srcu, idx); 451 452 return young; 453 } 454 455 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, 456 struct mm_struct *mm, 457 unsigned long start, 458 unsigned long end) 459 { 460 struct kvm *kvm = mmu_notifier_to_kvm(mn); 461 int young, idx; 462 463 idx = srcu_read_lock(&kvm->srcu); 464 spin_lock(&kvm->mmu_lock); 465 /* 466 * Even though we do not flush TLB, this will still adversely 467 * affect performance on pre-Haswell Intel EPT, where there is 468 * no EPT Access Bit to clear so that we have to tear down EPT 469 * tables instead. If we find this unacceptable, we can always 470 * add a parameter to kvm_age_hva so that it effectively doesn't 471 * do anything on clear_young. 472 * 473 * Also note that currently we never issue secondary TLB flushes 474 * from clear_young, leaving this job up to the regular system 475 * cadence. If we find this inaccurate, we might come up with a 476 * more sophisticated heuristic later. 477 */ 478 young = kvm_age_hva(kvm, start, end); 479 spin_unlock(&kvm->mmu_lock); 480 srcu_read_unlock(&kvm->srcu, idx); 481 482 return young; 483 } 484 485 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, 486 struct mm_struct *mm, 487 unsigned long address) 488 { 489 struct kvm *kvm = mmu_notifier_to_kvm(mn); 490 int young, idx; 491 492 idx = srcu_read_lock(&kvm->srcu); 493 spin_lock(&kvm->mmu_lock); 494 young = kvm_test_age_hva(kvm, address); 495 spin_unlock(&kvm->mmu_lock); 496 srcu_read_unlock(&kvm->srcu, idx); 497 498 return young; 499 } 500 501 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 502 struct mm_struct *mm) 503 { 504 struct kvm *kvm = mmu_notifier_to_kvm(mn); 505 int idx; 506 507 idx = srcu_read_lock(&kvm->srcu); 508 kvm_arch_flush_shadow_all(kvm); 509 srcu_read_unlock(&kvm->srcu, idx); 510 } 511 512 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 513 .invalidate_page = kvm_mmu_notifier_invalidate_page, 514 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 515 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 516 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 517 .clear_young = kvm_mmu_notifier_clear_young, 518 .test_young = kvm_mmu_notifier_test_young, 519 .change_pte = kvm_mmu_notifier_change_pte, 520 .release = kvm_mmu_notifier_release, 521 }; 522 523 static int kvm_init_mmu_notifier(struct kvm *kvm) 524 { 525 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 526 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 527 } 528 529 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 530 531 static int kvm_init_mmu_notifier(struct kvm *kvm) 532 { 533 return 0; 534 } 535 536 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 537 538 static struct kvm_memslots *kvm_alloc_memslots(void) 539 { 540 int i; 541 struct kvm_memslots *slots; 542 543 slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 544 if (!slots) 545 return NULL; 546 547 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) 548 slots->id_to_index[i] = slots->memslots[i].id = i; 549 550 return slots; 551 } 552 553 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) 554 { 555 if (!memslot->dirty_bitmap) 556 return; 557 558 kvfree(memslot->dirty_bitmap); 559 memslot->dirty_bitmap = NULL; 560 } 561 562 /* 563 * Free any memory in @free but not in @dont. 564 */ 565 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 566 struct kvm_memory_slot *dont) 567 { 568 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 569 kvm_destroy_dirty_bitmap(free); 570 571 kvm_arch_free_memslot(kvm, free, dont); 572 573 free->npages = 0; 574 } 575 576 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) 577 { 578 struct kvm_memory_slot *memslot; 579 580 if (!slots) 581 return; 582 583 kvm_for_each_memslot(memslot, slots) 584 kvm_free_memslot(kvm, memslot, NULL); 585 586 kvfree(slots); 587 } 588 589 static void kvm_destroy_vm_debugfs(struct kvm *kvm) 590 { 591 int i; 592 593 if (!kvm->debugfs_dentry) 594 return; 595 596 debugfs_remove_recursive(kvm->debugfs_dentry); 597 598 if (kvm->debugfs_stat_data) { 599 for (i = 0; i < kvm_debugfs_num_entries; i++) 600 kfree(kvm->debugfs_stat_data[i]); 601 kfree(kvm->debugfs_stat_data); 602 } 603 } 604 605 static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) 606 { 607 char dir_name[ITOA_MAX_LEN * 2]; 608 struct kvm_stat_data *stat_data; 609 struct kvm_stats_debugfs_item *p; 610 611 if (!debugfs_initialized()) 612 return 0; 613 614 snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd); 615 kvm->debugfs_dentry = debugfs_create_dir(dir_name, 616 kvm_debugfs_dir); 617 if (!kvm->debugfs_dentry) 618 return -ENOMEM; 619 620 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, 621 sizeof(*kvm->debugfs_stat_data), 622 GFP_KERNEL); 623 if (!kvm->debugfs_stat_data) 624 return -ENOMEM; 625 626 for (p = debugfs_entries; p->name; p++) { 627 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL); 628 if (!stat_data) 629 return -ENOMEM; 630 631 stat_data->kvm = kvm; 632 stat_data->offset = p->offset; 633 kvm->debugfs_stat_data[p - debugfs_entries] = stat_data; 634 if (!debugfs_create_file(p->name, 0644, 635 kvm->debugfs_dentry, 636 stat_data, 637 stat_fops_per_vm[p->kind])) 638 return -ENOMEM; 639 } 640 return 0; 641 } 642 643 static struct kvm *kvm_create_vm(unsigned long type) 644 { 645 int r, i; 646 struct kvm *kvm = kvm_arch_alloc_vm(); 647 648 if (!kvm) 649 return ERR_PTR(-ENOMEM); 650 651 spin_lock_init(&kvm->mmu_lock); 652 mmgrab(current->mm); 653 kvm->mm = current->mm; 654 kvm_eventfd_init(kvm); 655 mutex_init(&kvm->lock); 656 mutex_init(&kvm->irq_lock); 657 mutex_init(&kvm->slots_lock); 658 refcount_set(&kvm->users_count, 1); 659 INIT_LIST_HEAD(&kvm->devices); 660 661 r = kvm_arch_init_vm(kvm, type); 662 if (r) 663 goto out_err_no_disable; 664 665 r = hardware_enable_all(); 666 if (r) 667 goto out_err_no_disable; 668 669 #ifdef CONFIG_HAVE_KVM_IRQFD 670 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 671 #endif 672 673 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 674 675 r = -ENOMEM; 676 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 677 struct kvm_memslots *slots = kvm_alloc_memslots(); 678 if (!slots) 679 goto out_err_no_srcu; 680 /* 681 * Generations must be different for each address space. 682 * Init kvm generation close to the maximum to easily test the 683 * code of handling generation number wrap-around. 684 */ 685 slots->generation = i * 2 - 150; 686 rcu_assign_pointer(kvm->memslots[i], slots); 687 } 688 689 if (init_srcu_struct(&kvm->srcu)) 690 goto out_err_no_srcu; 691 if (init_srcu_struct(&kvm->irq_srcu)) 692 goto out_err_no_irq_srcu; 693 for (i = 0; i < KVM_NR_BUSES; i++) { 694 rcu_assign_pointer(kvm->buses[i], 695 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL)); 696 if (!kvm->buses[i]) 697 goto out_err; 698 } 699 700 r = kvm_init_mmu_notifier(kvm); 701 if (r) 702 goto out_err; 703 704 spin_lock(&kvm_lock); 705 list_add(&kvm->vm_list, &vm_list); 706 spin_unlock(&kvm_lock); 707 708 preempt_notifier_inc(); 709 710 return kvm; 711 712 out_err: 713 cleanup_srcu_struct(&kvm->irq_srcu); 714 out_err_no_irq_srcu: 715 cleanup_srcu_struct(&kvm->srcu); 716 out_err_no_srcu: 717 hardware_disable_all(); 718 out_err_no_disable: 719 for (i = 0; i < KVM_NR_BUSES; i++) 720 kfree(kvm_get_bus(kvm, i)); 721 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 722 kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); 723 kvm_arch_free_vm(kvm); 724 mmdrop(current->mm); 725 return ERR_PTR(r); 726 } 727 728 static void kvm_destroy_devices(struct kvm *kvm) 729 { 730 struct kvm_device *dev, *tmp; 731 732 /* 733 * We do not need to take the kvm->lock here, because nobody else 734 * has a reference to the struct kvm at this point and therefore 735 * cannot access the devices list anyhow. 736 */ 737 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { 738 list_del(&dev->vm_node); 739 dev->ops->destroy(dev); 740 } 741 } 742 743 static void kvm_destroy_vm(struct kvm *kvm) 744 { 745 int i; 746 struct mm_struct *mm = kvm->mm; 747 748 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm); 749 kvm_destroy_vm_debugfs(kvm); 750 kvm_arch_sync_events(kvm); 751 spin_lock(&kvm_lock); 752 list_del(&kvm->vm_list); 753 spin_unlock(&kvm_lock); 754 kvm_free_irq_routing(kvm); 755 for (i = 0; i < KVM_NR_BUSES; i++) { 756 struct kvm_io_bus *bus = kvm_get_bus(kvm, i); 757 758 if (bus) 759 kvm_io_bus_destroy(bus); 760 kvm->buses[i] = NULL; 761 } 762 kvm_coalesced_mmio_free(kvm); 763 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 764 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 765 #else 766 kvm_arch_flush_shadow_all(kvm); 767 #endif 768 kvm_arch_destroy_vm(kvm); 769 kvm_destroy_devices(kvm); 770 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 771 kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); 772 cleanup_srcu_struct(&kvm->irq_srcu); 773 cleanup_srcu_struct(&kvm->srcu); 774 kvm_arch_free_vm(kvm); 775 preempt_notifier_dec(); 776 hardware_disable_all(); 777 mmdrop(mm); 778 } 779 780 void kvm_get_kvm(struct kvm *kvm) 781 { 782 refcount_inc(&kvm->users_count); 783 } 784 EXPORT_SYMBOL_GPL(kvm_get_kvm); 785 786 void kvm_put_kvm(struct kvm *kvm) 787 { 788 if (refcount_dec_and_test(&kvm->users_count)) 789 kvm_destroy_vm(kvm); 790 } 791 EXPORT_SYMBOL_GPL(kvm_put_kvm); 792 793 794 static int kvm_vm_release(struct inode *inode, struct file *filp) 795 { 796 struct kvm *kvm = filp->private_data; 797 798 kvm_irqfd_release(kvm); 799 800 kvm_put_kvm(kvm); 801 return 0; 802 } 803 804 /* 805 * Allocation size is twice as large as the actual dirty bitmap size. 806 * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed. 807 */ 808 static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) 809 { 810 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); 811 812 memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL); 813 if (!memslot->dirty_bitmap) 814 return -ENOMEM; 815 816 return 0; 817 } 818 819 /* 820 * Insert memslot and re-sort memslots based on their GFN, 821 * so binary search could be used to lookup GFN. 822 * Sorting algorithm takes advantage of having initially 823 * sorted array and known changed memslot position. 824 */ 825 static void update_memslots(struct kvm_memslots *slots, 826 struct kvm_memory_slot *new) 827 { 828 int id = new->id; 829 int i = slots->id_to_index[id]; 830 struct kvm_memory_slot *mslots = slots->memslots; 831 832 WARN_ON(mslots[i].id != id); 833 if (!new->npages) { 834 WARN_ON(!mslots[i].npages); 835 if (mslots[i].npages) 836 slots->used_slots--; 837 } else { 838 if (!mslots[i].npages) 839 slots->used_slots++; 840 } 841 842 while (i < KVM_MEM_SLOTS_NUM - 1 && 843 new->base_gfn <= mslots[i + 1].base_gfn) { 844 if (!mslots[i + 1].npages) 845 break; 846 mslots[i] = mslots[i + 1]; 847 slots->id_to_index[mslots[i].id] = i; 848 i++; 849 } 850 851 /* 852 * The ">=" is needed when creating a slot with base_gfn == 0, 853 * so that it moves before all those with base_gfn == npages == 0. 854 * 855 * On the other hand, if new->npages is zero, the above loop has 856 * already left i pointing to the beginning of the empty part of 857 * mslots, and the ">=" would move the hole backwards in this 858 * case---which is wrong. So skip the loop when deleting a slot. 859 */ 860 if (new->npages) { 861 while (i > 0 && 862 new->base_gfn >= mslots[i - 1].base_gfn) { 863 mslots[i] = mslots[i - 1]; 864 slots->id_to_index[mslots[i].id] = i; 865 i--; 866 } 867 } else 868 WARN_ON_ONCE(i != slots->used_slots); 869 870 mslots[i] = *new; 871 slots->id_to_index[mslots[i].id] = i; 872 } 873 874 static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem) 875 { 876 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; 877 878 #ifdef __KVM_HAVE_READONLY_MEM 879 valid_flags |= KVM_MEM_READONLY; 880 #endif 881 882 if (mem->flags & ~valid_flags) 883 return -EINVAL; 884 885 return 0; 886 } 887 888 static struct kvm_memslots *install_new_memslots(struct kvm *kvm, 889 int as_id, struct kvm_memslots *slots) 890 { 891 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); 892 893 /* 894 * Set the low bit in the generation, which disables SPTE caching 895 * until the end of synchronize_srcu_expedited. 896 */ 897 WARN_ON(old_memslots->generation & 1); 898 slots->generation = old_memslots->generation + 1; 899 900 rcu_assign_pointer(kvm->memslots[as_id], slots); 901 synchronize_srcu_expedited(&kvm->srcu); 902 903 /* 904 * Increment the new memslot generation a second time. This prevents 905 * vm exits that race with memslot updates from caching a memslot 906 * generation that will (potentially) be valid forever. 907 * 908 * Generations must be unique even across address spaces. We do not need 909 * a global counter for that, instead the generation space is evenly split 910 * across address spaces. For example, with two address spaces, address 911 * space 0 will use generations 0, 4, 8, ... while * address space 1 will 912 * use generations 2, 6, 10, 14, ... 913 */ 914 slots->generation += KVM_ADDRESS_SPACE_NUM * 2 - 1; 915 916 kvm_arch_memslots_updated(kvm, slots); 917 918 return old_memslots; 919 } 920 921 /* 922 * Allocate some memory and give it an address in the guest physical address 923 * space. 924 * 925 * Discontiguous memory is allowed, mostly for framebuffers. 926 * 927 * Must be called holding kvm->slots_lock for write. 928 */ 929 int __kvm_set_memory_region(struct kvm *kvm, 930 const struct kvm_userspace_memory_region *mem) 931 { 932 int r; 933 gfn_t base_gfn; 934 unsigned long npages; 935 struct kvm_memory_slot *slot; 936 struct kvm_memory_slot old, new; 937 struct kvm_memslots *slots = NULL, *old_memslots; 938 int as_id, id; 939 enum kvm_mr_change change; 940 941 r = check_memory_region_flags(mem); 942 if (r) 943 goto out; 944 945 r = -EINVAL; 946 as_id = mem->slot >> 16; 947 id = (u16)mem->slot; 948 949 /* General sanity checks */ 950 if (mem->memory_size & (PAGE_SIZE - 1)) 951 goto out; 952 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 953 goto out; 954 /* We can read the guest memory with __xxx_user() later on. */ 955 if ((id < KVM_USER_MEM_SLOTS) && 956 ((mem->userspace_addr & (PAGE_SIZE - 1)) || 957 !access_ok(VERIFY_WRITE, 958 (void __user *)(unsigned long)mem->userspace_addr, 959 mem->memory_size))) 960 goto out; 961 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) 962 goto out; 963 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 964 goto out; 965 966 slot = id_to_memslot(__kvm_memslots(kvm, as_id), id); 967 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 968 npages = mem->memory_size >> PAGE_SHIFT; 969 970 if (npages > KVM_MEM_MAX_NR_PAGES) 971 goto out; 972 973 new = old = *slot; 974 975 new.id = id; 976 new.base_gfn = base_gfn; 977 new.npages = npages; 978 new.flags = mem->flags; 979 980 if (npages) { 981 if (!old.npages) 982 change = KVM_MR_CREATE; 983 else { /* Modify an existing slot. */ 984 if ((mem->userspace_addr != old.userspace_addr) || 985 (npages != old.npages) || 986 ((new.flags ^ old.flags) & KVM_MEM_READONLY)) 987 goto out; 988 989 if (base_gfn != old.base_gfn) 990 change = KVM_MR_MOVE; 991 else if (new.flags != old.flags) 992 change = KVM_MR_FLAGS_ONLY; 993 else { /* Nothing to change. */ 994 r = 0; 995 goto out; 996 } 997 } 998 } else { 999 if (!old.npages) 1000 goto out; 1001 1002 change = KVM_MR_DELETE; 1003 new.base_gfn = 0; 1004 new.flags = 0; 1005 } 1006 1007 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 1008 /* Check for overlaps */ 1009 r = -EEXIST; 1010 kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) { 1011 if ((slot->id >= KVM_USER_MEM_SLOTS) || 1012 (slot->id == id)) 1013 continue; 1014 if (!((base_gfn + npages <= slot->base_gfn) || 1015 (base_gfn >= slot->base_gfn + slot->npages))) 1016 goto out; 1017 } 1018 } 1019 1020 /* Free page dirty bitmap if unneeded */ 1021 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 1022 new.dirty_bitmap = NULL; 1023 1024 r = -ENOMEM; 1025 if (change == KVM_MR_CREATE) { 1026 new.userspace_addr = mem->userspace_addr; 1027 1028 if (kvm_arch_create_memslot(kvm, &new, npages)) 1029 goto out_free; 1030 } 1031 1032 /* Allocate page dirty bitmap if needed */ 1033 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 1034 if (kvm_create_dirty_bitmap(&new) < 0) 1035 goto out_free; 1036 } 1037 1038 slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 1039 if (!slots) 1040 goto out_free; 1041 memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots)); 1042 1043 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { 1044 slot = id_to_memslot(slots, id); 1045 slot->flags |= KVM_MEMSLOT_INVALID; 1046 1047 old_memslots = install_new_memslots(kvm, as_id, slots); 1048 1049 /* From this point no new shadow pages pointing to a deleted, 1050 * or moved, memslot will be created. 1051 * 1052 * validation of sp->gfn happens in: 1053 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 1054 * - kvm_is_visible_gfn (mmu_check_roots) 1055 */ 1056 kvm_arch_flush_shadow_memslot(kvm, slot); 1057 1058 /* 1059 * We can re-use the old_memslots from above, the only difference 1060 * from the currently installed memslots is the invalid flag. This 1061 * will get overwritten by update_memslots anyway. 1062 */ 1063 slots = old_memslots; 1064 } 1065 1066 r = kvm_arch_prepare_memory_region(kvm, &new, mem, change); 1067 if (r) 1068 goto out_slots; 1069 1070 /* actual memory is freed via old in kvm_free_memslot below */ 1071 if (change == KVM_MR_DELETE) { 1072 new.dirty_bitmap = NULL; 1073 memset(&new.arch, 0, sizeof(new.arch)); 1074 } 1075 1076 update_memslots(slots, &new); 1077 old_memslots = install_new_memslots(kvm, as_id, slots); 1078 1079 kvm_arch_commit_memory_region(kvm, mem, &old, &new, change); 1080 1081 kvm_free_memslot(kvm, &old, &new); 1082 kvfree(old_memslots); 1083 return 0; 1084 1085 out_slots: 1086 kvfree(slots); 1087 out_free: 1088 kvm_free_memslot(kvm, &new, &old); 1089 out: 1090 return r; 1091 } 1092 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 1093 1094 int kvm_set_memory_region(struct kvm *kvm, 1095 const struct kvm_userspace_memory_region *mem) 1096 { 1097 int r; 1098 1099 mutex_lock(&kvm->slots_lock); 1100 r = __kvm_set_memory_region(kvm, mem); 1101 mutex_unlock(&kvm->slots_lock); 1102 return r; 1103 } 1104 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 1105 1106 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 1107 struct kvm_userspace_memory_region *mem) 1108 { 1109 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) 1110 return -EINVAL; 1111 1112 return kvm_set_memory_region(kvm, mem); 1113 } 1114 1115 int kvm_get_dirty_log(struct kvm *kvm, 1116 struct kvm_dirty_log *log, int *is_dirty) 1117 { 1118 struct kvm_memslots *slots; 1119 struct kvm_memory_slot *memslot; 1120 int i, as_id, id; 1121 unsigned long n; 1122 unsigned long any = 0; 1123 1124 as_id = log->slot >> 16; 1125 id = (u16)log->slot; 1126 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1127 return -EINVAL; 1128 1129 slots = __kvm_memslots(kvm, as_id); 1130 memslot = id_to_memslot(slots, id); 1131 if (!memslot->dirty_bitmap) 1132 return -ENOENT; 1133 1134 n = kvm_dirty_bitmap_bytes(memslot); 1135 1136 for (i = 0; !any && i < n/sizeof(long); ++i) 1137 any = memslot->dirty_bitmap[i]; 1138 1139 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) 1140 return -EFAULT; 1141 1142 if (any) 1143 *is_dirty = 1; 1144 return 0; 1145 } 1146 EXPORT_SYMBOL_GPL(kvm_get_dirty_log); 1147 1148 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 1149 /** 1150 * kvm_get_dirty_log_protect - get a snapshot of dirty pages, and if any pages 1151 * are dirty write protect them for next write. 1152 * @kvm: pointer to kvm instance 1153 * @log: slot id and address to which we copy the log 1154 * @is_dirty: flag set if any page is dirty 1155 * 1156 * We need to keep it in mind that VCPU threads can write to the bitmap 1157 * concurrently. So, to avoid losing track of dirty pages we keep the 1158 * following order: 1159 * 1160 * 1. Take a snapshot of the bit and clear it if needed. 1161 * 2. Write protect the corresponding page. 1162 * 3. Copy the snapshot to the userspace. 1163 * 4. Upon return caller flushes TLB's if needed. 1164 * 1165 * Between 2 and 4, the guest may write to the page using the remaining TLB 1166 * entry. This is not a problem because the page is reported dirty using 1167 * the snapshot taken before and step 4 ensures that writes done after 1168 * exiting to userspace will be logged for the next call. 1169 * 1170 */ 1171 int kvm_get_dirty_log_protect(struct kvm *kvm, 1172 struct kvm_dirty_log *log, bool *is_dirty) 1173 { 1174 struct kvm_memslots *slots; 1175 struct kvm_memory_slot *memslot; 1176 int i, as_id, id; 1177 unsigned long n; 1178 unsigned long *dirty_bitmap; 1179 unsigned long *dirty_bitmap_buffer; 1180 1181 as_id = log->slot >> 16; 1182 id = (u16)log->slot; 1183 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1184 return -EINVAL; 1185 1186 slots = __kvm_memslots(kvm, as_id); 1187 memslot = id_to_memslot(slots, id); 1188 1189 dirty_bitmap = memslot->dirty_bitmap; 1190 if (!dirty_bitmap) 1191 return -ENOENT; 1192 1193 n = kvm_dirty_bitmap_bytes(memslot); 1194 1195 dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); 1196 memset(dirty_bitmap_buffer, 0, n); 1197 1198 spin_lock(&kvm->mmu_lock); 1199 *is_dirty = false; 1200 for (i = 0; i < n / sizeof(long); i++) { 1201 unsigned long mask; 1202 gfn_t offset; 1203 1204 if (!dirty_bitmap[i]) 1205 continue; 1206 1207 *is_dirty = true; 1208 1209 mask = xchg(&dirty_bitmap[i], 0); 1210 dirty_bitmap_buffer[i] = mask; 1211 1212 if (mask) { 1213 offset = i * BITS_PER_LONG; 1214 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 1215 offset, mask); 1216 } 1217 } 1218 1219 spin_unlock(&kvm->mmu_lock); 1220 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) 1221 return -EFAULT; 1222 return 0; 1223 } 1224 EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect); 1225 #endif 1226 1227 bool kvm_largepages_enabled(void) 1228 { 1229 return largepages_enabled; 1230 } 1231 1232 void kvm_disable_largepages(void) 1233 { 1234 largepages_enabled = false; 1235 } 1236 EXPORT_SYMBOL_GPL(kvm_disable_largepages); 1237 1238 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 1239 { 1240 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 1241 } 1242 EXPORT_SYMBOL_GPL(gfn_to_memslot); 1243 1244 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) 1245 { 1246 return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn); 1247 } 1248 1249 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 1250 { 1251 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); 1252 1253 if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS || 1254 memslot->flags & KVM_MEMSLOT_INVALID) 1255 return false; 1256 1257 return true; 1258 } 1259 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 1260 1261 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) 1262 { 1263 struct vm_area_struct *vma; 1264 unsigned long addr, size; 1265 1266 size = PAGE_SIZE; 1267 1268 addr = gfn_to_hva(kvm, gfn); 1269 if (kvm_is_error_hva(addr)) 1270 return PAGE_SIZE; 1271 1272 down_read(¤t->mm->mmap_sem); 1273 vma = find_vma(current->mm, addr); 1274 if (!vma) 1275 goto out; 1276 1277 size = vma_kernel_pagesize(vma); 1278 1279 out: 1280 up_read(¤t->mm->mmap_sem); 1281 1282 return size; 1283 } 1284 1285 static bool memslot_is_readonly(struct kvm_memory_slot *slot) 1286 { 1287 return slot->flags & KVM_MEM_READONLY; 1288 } 1289 1290 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1291 gfn_t *nr_pages, bool write) 1292 { 1293 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 1294 return KVM_HVA_ERR_BAD; 1295 1296 if (memslot_is_readonly(slot) && write) 1297 return KVM_HVA_ERR_RO_BAD; 1298 1299 if (nr_pages) 1300 *nr_pages = slot->npages - (gfn - slot->base_gfn); 1301 1302 return __gfn_to_hva_memslot(slot, gfn); 1303 } 1304 1305 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1306 gfn_t *nr_pages) 1307 { 1308 return __gfn_to_hva_many(slot, gfn, nr_pages, true); 1309 } 1310 1311 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 1312 gfn_t gfn) 1313 { 1314 return gfn_to_hva_many(slot, gfn, NULL); 1315 } 1316 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); 1317 1318 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 1319 { 1320 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); 1321 } 1322 EXPORT_SYMBOL_GPL(gfn_to_hva); 1323 1324 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) 1325 { 1326 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); 1327 } 1328 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); 1329 1330 /* 1331 * If writable is set to false, the hva returned by this function is only 1332 * allowed to be read. 1333 */ 1334 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, 1335 gfn_t gfn, bool *writable) 1336 { 1337 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); 1338 1339 if (!kvm_is_error_hva(hva) && writable) 1340 *writable = !memslot_is_readonly(slot); 1341 1342 return hva; 1343 } 1344 1345 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 1346 { 1347 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1348 1349 return gfn_to_hva_memslot_prot(slot, gfn, writable); 1350 } 1351 1352 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) 1353 { 1354 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1355 1356 return gfn_to_hva_memslot_prot(slot, gfn, writable); 1357 } 1358 1359 static int get_user_page_nowait(unsigned long start, int write, 1360 struct page **page) 1361 { 1362 int flags = FOLL_NOWAIT | FOLL_HWPOISON; 1363 1364 if (write) 1365 flags |= FOLL_WRITE; 1366 1367 return get_user_pages(start, 1, flags, page, NULL); 1368 } 1369 1370 static inline int check_user_page_hwpoison(unsigned long addr) 1371 { 1372 int rc, flags = FOLL_HWPOISON | FOLL_WRITE; 1373 1374 rc = get_user_pages(addr, 1, flags, NULL, NULL); 1375 return rc == -EHWPOISON; 1376 } 1377 1378 /* 1379 * The atomic path to get the writable pfn which will be stored in @pfn, 1380 * true indicates success, otherwise false is returned. 1381 */ 1382 static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, 1383 bool write_fault, bool *writable, kvm_pfn_t *pfn) 1384 { 1385 struct page *page[1]; 1386 int npages; 1387 1388 if (!(async || atomic)) 1389 return false; 1390 1391 /* 1392 * Fast pin a writable pfn only if it is a write fault request 1393 * or the caller allows to map a writable pfn for a read fault 1394 * request. 1395 */ 1396 if (!(write_fault || writable)) 1397 return false; 1398 1399 npages = __get_user_pages_fast(addr, 1, 1, page); 1400 if (npages == 1) { 1401 *pfn = page_to_pfn(page[0]); 1402 1403 if (writable) 1404 *writable = true; 1405 return true; 1406 } 1407 1408 return false; 1409 } 1410 1411 /* 1412 * The slow path to get the pfn of the specified host virtual address, 1413 * 1 indicates success, -errno is returned if error is detected. 1414 */ 1415 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, 1416 bool *writable, kvm_pfn_t *pfn) 1417 { 1418 struct page *page[1]; 1419 int npages = 0; 1420 1421 might_sleep(); 1422 1423 if (writable) 1424 *writable = write_fault; 1425 1426 if (async) { 1427 down_read(¤t->mm->mmap_sem); 1428 npages = get_user_page_nowait(addr, write_fault, page); 1429 up_read(¤t->mm->mmap_sem); 1430 } else { 1431 unsigned int flags = FOLL_HWPOISON; 1432 1433 if (write_fault) 1434 flags |= FOLL_WRITE; 1435 1436 npages = get_user_pages_unlocked(addr, 1, page, flags); 1437 } 1438 if (npages != 1) 1439 return npages; 1440 1441 /* map read fault as writable if possible */ 1442 if (unlikely(!write_fault) && writable) { 1443 struct page *wpage[1]; 1444 1445 npages = __get_user_pages_fast(addr, 1, 1, wpage); 1446 if (npages == 1) { 1447 *writable = true; 1448 put_page(page[0]); 1449 page[0] = wpage[0]; 1450 } 1451 1452 npages = 1; 1453 } 1454 *pfn = page_to_pfn(page[0]); 1455 return npages; 1456 } 1457 1458 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) 1459 { 1460 if (unlikely(!(vma->vm_flags & VM_READ))) 1461 return false; 1462 1463 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) 1464 return false; 1465 1466 return true; 1467 } 1468 1469 static int hva_to_pfn_remapped(struct vm_area_struct *vma, 1470 unsigned long addr, bool *async, 1471 bool write_fault, kvm_pfn_t *p_pfn) 1472 { 1473 unsigned long pfn; 1474 int r; 1475 1476 r = follow_pfn(vma, addr, &pfn); 1477 if (r) { 1478 /* 1479 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does 1480 * not call the fault handler, so do it here. 1481 */ 1482 bool unlocked = false; 1483 r = fixup_user_fault(current, current->mm, addr, 1484 (write_fault ? FAULT_FLAG_WRITE : 0), 1485 &unlocked); 1486 if (unlocked) 1487 return -EAGAIN; 1488 if (r) 1489 return r; 1490 1491 r = follow_pfn(vma, addr, &pfn); 1492 if (r) 1493 return r; 1494 1495 } 1496 1497 1498 /* 1499 * Get a reference here because callers of *hva_to_pfn* and 1500 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the 1501 * returned pfn. This is only needed if the VMA has VM_MIXEDMAP 1502 * set, but the kvm_get_pfn/kvm_release_pfn_clean pair will 1503 * simply do nothing for reserved pfns. 1504 * 1505 * Whoever called remap_pfn_range is also going to call e.g. 1506 * unmap_mapping_range before the underlying pages are freed, 1507 * causing a call to our MMU notifier. 1508 */ 1509 kvm_get_pfn(pfn); 1510 1511 *p_pfn = pfn; 1512 return 0; 1513 } 1514 1515 /* 1516 * Pin guest page in memory and return its pfn. 1517 * @addr: host virtual address which maps memory to the guest 1518 * @atomic: whether this function can sleep 1519 * @async: whether this function need to wait IO complete if the 1520 * host page is not in the memory 1521 * @write_fault: whether we should get a writable host page 1522 * @writable: whether it allows to map a writable host page for !@write_fault 1523 * 1524 * The function will map a writable host page for these two cases: 1525 * 1): @write_fault = true 1526 * 2): @write_fault = false && @writable, @writable will tell the caller 1527 * whether the mapping is writable. 1528 */ 1529 static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, 1530 bool write_fault, bool *writable) 1531 { 1532 struct vm_area_struct *vma; 1533 kvm_pfn_t pfn = 0; 1534 int npages, r; 1535 1536 /* we can do it either atomically or asynchronously, not both */ 1537 BUG_ON(atomic && async); 1538 1539 if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn)) 1540 return pfn; 1541 1542 if (atomic) 1543 return KVM_PFN_ERR_FAULT; 1544 1545 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); 1546 if (npages == 1) 1547 return pfn; 1548 1549 down_read(¤t->mm->mmap_sem); 1550 if (npages == -EHWPOISON || 1551 (!async && check_user_page_hwpoison(addr))) { 1552 pfn = KVM_PFN_ERR_HWPOISON; 1553 goto exit; 1554 } 1555 1556 retry: 1557 vma = find_vma_intersection(current->mm, addr, addr + 1); 1558 1559 if (vma == NULL) 1560 pfn = KVM_PFN_ERR_FAULT; 1561 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { 1562 r = hva_to_pfn_remapped(vma, addr, async, write_fault, &pfn); 1563 if (r == -EAGAIN) 1564 goto retry; 1565 if (r < 0) 1566 pfn = KVM_PFN_ERR_FAULT; 1567 } else { 1568 if (async && vma_is_valid(vma, write_fault)) 1569 *async = true; 1570 pfn = KVM_PFN_ERR_FAULT; 1571 } 1572 exit: 1573 up_read(¤t->mm->mmap_sem); 1574 return pfn; 1575 } 1576 1577 kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, 1578 bool atomic, bool *async, bool write_fault, 1579 bool *writable) 1580 { 1581 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); 1582 1583 if (addr == KVM_HVA_ERR_RO_BAD) { 1584 if (writable) 1585 *writable = false; 1586 return KVM_PFN_ERR_RO_FAULT; 1587 } 1588 1589 if (kvm_is_error_hva(addr)) { 1590 if (writable) 1591 *writable = false; 1592 return KVM_PFN_NOSLOT; 1593 } 1594 1595 /* Do not map writable pfn in the readonly memslot. */ 1596 if (writable && memslot_is_readonly(slot)) { 1597 *writable = false; 1598 writable = NULL; 1599 } 1600 1601 return hva_to_pfn(addr, atomic, async, write_fault, 1602 writable); 1603 } 1604 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); 1605 1606 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 1607 bool *writable) 1608 { 1609 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, 1610 write_fault, writable); 1611 } 1612 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 1613 1614 kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 1615 { 1616 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL); 1617 } 1618 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); 1619 1620 kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) 1621 { 1622 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL); 1623 } 1624 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); 1625 1626 kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) 1627 { 1628 return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn); 1629 } 1630 EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic); 1631 1632 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) 1633 { 1634 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 1635 } 1636 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic); 1637 1638 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 1639 { 1640 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); 1641 } 1642 EXPORT_SYMBOL_GPL(gfn_to_pfn); 1643 1644 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) 1645 { 1646 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 1647 } 1648 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn); 1649 1650 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 1651 struct page **pages, int nr_pages) 1652 { 1653 unsigned long addr; 1654 gfn_t entry; 1655 1656 addr = gfn_to_hva_many(slot, gfn, &entry); 1657 if (kvm_is_error_hva(addr)) 1658 return -1; 1659 1660 if (entry < nr_pages) 1661 return 0; 1662 1663 return __get_user_pages_fast(addr, nr_pages, 1, pages); 1664 } 1665 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); 1666 1667 static struct page *kvm_pfn_to_page(kvm_pfn_t pfn) 1668 { 1669 if (is_error_noslot_pfn(pfn)) 1670 return KVM_ERR_PTR_BAD_PAGE; 1671 1672 if (kvm_is_reserved_pfn(pfn)) { 1673 WARN_ON(1); 1674 return KVM_ERR_PTR_BAD_PAGE; 1675 } 1676 1677 return pfn_to_page(pfn); 1678 } 1679 1680 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 1681 { 1682 kvm_pfn_t pfn; 1683 1684 pfn = gfn_to_pfn(kvm, gfn); 1685 1686 return kvm_pfn_to_page(pfn); 1687 } 1688 EXPORT_SYMBOL_GPL(gfn_to_page); 1689 1690 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) 1691 { 1692 kvm_pfn_t pfn; 1693 1694 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn); 1695 1696 return kvm_pfn_to_page(pfn); 1697 } 1698 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page); 1699 1700 void kvm_release_page_clean(struct page *page) 1701 { 1702 WARN_ON(is_error_page(page)); 1703 1704 kvm_release_pfn_clean(page_to_pfn(page)); 1705 } 1706 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 1707 1708 void kvm_release_pfn_clean(kvm_pfn_t pfn) 1709 { 1710 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) 1711 put_page(pfn_to_page(pfn)); 1712 } 1713 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1714 1715 void kvm_release_page_dirty(struct page *page) 1716 { 1717 WARN_ON(is_error_page(page)); 1718 1719 kvm_release_pfn_dirty(page_to_pfn(page)); 1720 } 1721 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 1722 1723 static void kvm_release_pfn_dirty(kvm_pfn_t pfn) 1724 { 1725 kvm_set_pfn_dirty(pfn); 1726 kvm_release_pfn_clean(pfn); 1727 } 1728 1729 void kvm_set_pfn_dirty(kvm_pfn_t pfn) 1730 { 1731 if (!kvm_is_reserved_pfn(pfn)) { 1732 struct page *page = pfn_to_page(pfn); 1733 1734 if (!PageReserved(page)) 1735 SetPageDirty(page); 1736 } 1737 } 1738 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 1739 1740 void kvm_set_pfn_accessed(kvm_pfn_t pfn) 1741 { 1742 if (!kvm_is_reserved_pfn(pfn)) 1743 mark_page_accessed(pfn_to_page(pfn)); 1744 } 1745 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1746 1747 void kvm_get_pfn(kvm_pfn_t pfn) 1748 { 1749 if (!kvm_is_reserved_pfn(pfn)) 1750 get_page(pfn_to_page(pfn)); 1751 } 1752 EXPORT_SYMBOL_GPL(kvm_get_pfn); 1753 1754 static int next_segment(unsigned long len, int offset) 1755 { 1756 if (len > PAGE_SIZE - offset) 1757 return PAGE_SIZE - offset; 1758 else 1759 return len; 1760 } 1761 1762 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, 1763 void *data, int offset, int len) 1764 { 1765 int r; 1766 unsigned long addr; 1767 1768 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 1769 if (kvm_is_error_hva(addr)) 1770 return -EFAULT; 1771 r = __copy_from_user(data, (void __user *)addr + offset, len); 1772 if (r) 1773 return -EFAULT; 1774 return 0; 1775 } 1776 1777 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 1778 int len) 1779 { 1780 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1781 1782 return __kvm_read_guest_page(slot, gfn, data, offset, len); 1783 } 1784 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 1785 1786 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, 1787 int offset, int len) 1788 { 1789 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1790 1791 return __kvm_read_guest_page(slot, gfn, data, offset, len); 1792 } 1793 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); 1794 1795 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 1796 { 1797 gfn_t gfn = gpa >> PAGE_SHIFT; 1798 int seg; 1799 int offset = offset_in_page(gpa); 1800 int ret; 1801 1802 while ((seg = next_segment(len, offset)) != 0) { 1803 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 1804 if (ret < 0) 1805 return ret; 1806 offset = 0; 1807 len -= seg; 1808 data += seg; 1809 ++gfn; 1810 } 1811 return 0; 1812 } 1813 EXPORT_SYMBOL_GPL(kvm_read_guest); 1814 1815 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) 1816 { 1817 gfn_t gfn = gpa >> PAGE_SHIFT; 1818 int seg; 1819 int offset = offset_in_page(gpa); 1820 int ret; 1821 1822 while ((seg = next_segment(len, offset)) != 0) { 1823 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); 1824 if (ret < 0) 1825 return ret; 1826 offset = 0; 1827 len -= seg; 1828 data += seg; 1829 ++gfn; 1830 } 1831 return 0; 1832 } 1833 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest); 1834 1835 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 1836 void *data, int offset, unsigned long len) 1837 { 1838 int r; 1839 unsigned long addr; 1840 1841 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 1842 if (kvm_is_error_hva(addr)) 1843 return -EFAULT; 1844 pagefault_disable(); 1845 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 1846 pagefault_enable(); 1847 if (r) 1848 return -EFAULT; 1849 return 0; 1850 } 1851 1852 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 1853 unsigned long len) 1854 { 1855 gfn_t gfn = gpa >> PAGE_SHIFT; 1856 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1857 int offset = offset_in_page(gpa); 1858 1859 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); 1860 } 1861 EXPORT_SYMBOL_GPL(kvm_read_guest_atomic); 1862 1863 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, 1864 void *data, unsigned long len) 1865 { 1866 gfn_t gfn = gpa >> PAGE_SHIFT; 1867 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1868 int offset = offset_in_page(gpa); 1869 1870 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); 1871 } 1872 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic); 1873 1874 static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn, 1875 const void *data, int offset, int len) 1876 { 1877 int r; 1878 unsigned long addr; 1879 1880 addr = gfn_to_hva_memslot(memslot, gfn); 1881 if (kvm_is_error_hva(addr)) 1882 return -EFAULT; 1883 r = __copy_to_user((void __user *)addr + offset, data, len); 1884 if (r) 1885 return -EFAULT; 1886 mark_page_dirty_in_slot(memslot, gfn); 1887 return 0; 1888 } 1889 1890 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, 1891 const void *data, int offset, int len) 1892 { 1893 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1894 1895 return __kvm_write_guest_page(slot, gfn, data, offset, len); 1896 } 1897 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 1898 1899 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, 1900 const void *data, int offset, int len) 1901 { 1902 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1903 1904 return __kvm_write_guest_page(slot, gfn, data, offset, len); 1905 } 1906 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page); 1907 1908 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 1909 unsigned long len) 1910 { 1911 gfn_t gfn = gpa >> PAGE_SHIFT; 1912 int seg; 1913 int offset = offset_in_page(gpa); 1914 int ret; 1915 1916 while ((seg = next_segment(len, offset)) != 0) { 1917 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 1918 if (ret < 0) 1919 return ret; 1920 offset = 0; 1921 len -= seg; 1922 data += seg; 1923 ++gfn; 1924 } 1925 return 0; 1926 } 1927 EXPORT_SYMBOL_GPL(kvm_write_guest); 1928 1929 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 1930 unsigned long len) 1931 { 1932 gfn_t gfn = gpa >> PAGE_SHIFT; 1933 int seg; 1934 int offset = offset_in_page(gpa); 1935 int ret; 1936 1937 while ((seg = next_segment(len, offset)) != 0) { 1938 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); 1939 if (ret < 0) 1940 return ret; 1941 offset = 0; 1942 len -= seg; 1943 data += seg; 1944 ++gfn; 1945 } 1946 return 0; 1947 } 1948 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest); 1949 1950 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, 1951 struct gfn_to_hva_cache *ghc, 1952 gpa_t gpa, unsigned long len) 1953 { 1954 int offset = offset_in_page(gpa); 1955 gfn_t start_gfn = gpa >> PAGE_SHIFT; 1956 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; 1957 gfn_t nr_pages_needed = end_gfn - start_gfn + 1; 1958 gfn_t nr_pages_avail; 1959 1960 ghc->gpa = gpa; 1961 ghc->generation = slots->generation; 1962 ghc->len = len; 1963 ghc->memslot = __gfn_to_memslot(slots, start_gfn); 1964 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL); 1965 if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) { 1966 ghc->hva += offset; 1967 } else { 1968 /* 1969 * If the requested region crosses two memslots, we still 1970 * verify that the entire region is valid here. 1971 */ 1972 while (start_gfn <= end_gfn) { 1973 ghc->memslot = __gfn_to_memslot(slots, start_gfn); 1974 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, 1975 &nr_pages_avail); 1976 if (kvm_is_error_hva(ghc->hva)) 1977 return -EFAULT; 1978 start_gfn += nr_pages_avail; 1979 } 1980 /* Use the slow path for cross page reads and writes. */ 1981 ghc->memslot = NULL; 1982 } 1983 return 0; 1984 } 1985 1986 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1987 gpa_t gpa, unsigned long len) 1988 { 1989 struct kvm_memslots *slots = kvm_memslots(kvm); 1990 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len); 1991 } 1992 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 1993 1994 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1995 void *data, int offset, unsigned long len) 1996 { 1997 struct kvm_memslots *slots = kvm_memslots(kvm); 1998 int r; 1999 gpa_t gpa = ghc->gpa + offset; 2000 2001 BUG_ON(len + offset > ghc->len); 2002 2003 if (slots->generation != ghc->generation) 2004 __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len); 2005 2006 if (unlikely(!ghc->memslot)) 2007 return kvm_write_guest(kvm, gpa, data, len); 2008 2009 if (kvm_is_error_hva(ghc->hva)) 2010 return -EFAULT; 2011 2012 r = __copy_to_user((void __user *)ghc->hva + offset, data, len); 2013 if (r) 2014 return -EFAULT; 2015 mark_page_dirty_in_slot(ghc->memslot, gpa >> PAGE_SHIFT); 2016 2017 return 0; 2018 } 2019 EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached); 2020 2021 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 2022 void *data, unsigned long len) 2023 { 2024 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); 2025 } 2026 EXPORT_SYMBOL_GPL(kvm_write_guest_cached); 2027 2028 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 2029 void *data, unsigned long len) 2030 { 2031 struct kvm_memslots *slots = kvm_memslots(kvm); 2032 int r; 2033 2034 BUG_ON(len > ghc->len); 2035 2036 if (slots->generation != ghc->generation) 2037 __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len); 2038 2039 if (unlikely(!ghc->memslot)) 2040 return kvm_read_guest(kvm, ghc->gpa, data, len); 2041 2042 if (kvm_is_error_hva(ghc->hva)) 2043 return -EFAULT; 2044 2045 r = __copy_from_user(data, (void __user *)ghc->hva, len); 2046 if (r) 2047 return -EFAULT; 2048 2049 return 0; 2050 } 2051 EXPORT_SYMBOL_GPL(kvm_read_guest_cached); 2052 2053 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) 2054 { 2055 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 2056 2057 return kvm_write_guest_page(kvm, gfn, zero_page, offset, len); 2058 } 2059 EXPORT_SYMBOL_GPL(kvm_clear_guest_page); 2060 2061 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 2062 { 2063 gfn_t gfn = gpa >> PAGE_SHIFT; 2064 int seg; 2065 int offset = offset_in_page(gpa); 2066 int ret; 2067 2068 while ((seg = next_segment(len, offset)) != 0) { 2069 ret = kvm_clear_guest_page(kvm, gfn, offset, seg); 2070 if (ret < 0) 2071 return ret; 2072 offset = 0; 2073 len -= seg; 2074 ++gfn; 2075 } 2076 return 0; 2077 } 2078 EXPORT_SYMBOL_GPL(kvm_clear_guest); 2079 2080 static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, 2081 gfn_t gfn) 2082 { 2083 if (memslot && memslot->dirty_bitmap) { 2084 unsigned long rel_gfn = gfn - memslot->base_gfn; 2085 2086 set_bit_le(rel_gfn, memslot->dirty_bitmap); 2087 } 2088 } 2089 2090 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 2091 { 2092 struct kvm_memory_slot *memslot; 2093 2094 memslot = gfn_to_memslot(kvm, gfn); 2095 mark_page_dirty_in_slot(memslot, gfn); 2096 } 2097 EXPORT_SYMBOL_GPL(mark_page_dirty); 2098 2099 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) 2100 { 2101 struct kvm_memory_slot *memslot; 2102 2103 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2104 mark_page_dirty_in_slot(memslot, gfn); 2105 } 2106 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); 2107 2108 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) 2109 { 2110 unsigned int old, val, grow; 2111 2112 old = val = vcpu->halt_poll_ns; 2113 grow = READ_ONCE(halt_poll_ns_grow); 2114 /* 10us base */ 2115 if (val == 0 && grow) 2116 val = 10000; 2117 else 2118 val *= grow; 2119 2120 if (val > halt_poll_ns) 2121 val = halt_poll_ns; 2122 2123 vcpu->halt_poll_ns = val; 2124 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); 2125 } 2126 2127 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) 2128 { 2129 unsigned int old, val, shrink; 2130 2131 old = val = vcpu->halt_poll_ns; 2132 shrink = READ_ONCE(halt_poll_ns_shrink); 2133 if (shrink == 0) 2134 val = 0; 2135 else 2136 val /= shrink; 2137 2138 vcpu->halt_poll_ns = val; 2139 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); 2140 } 2141 2142 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) 2143 { 2144 if (kvm_arch_vcpu_runnable(vcpu)) { 2145 kvm_make_request(KVM_REQ_UNHALT, vcpu); 2146 return -EINTR; 2147 } 2148 if (kvm_cpu_has_pending_timer(vcpu)) 2149 return -EINTR; 2150 if (signal_pending(current)) 2151 return -EINTR; 2152 2153 return 0; 2154 } 2155 2156 /* 2157 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 2158 */ 2159 void kvm_vcpu_block(struct kvm_vcpu *vcpu) 2160 { 2161 ktime_t start, cur; 2162 DECLARE_SWAITQUEUE(wait); 2163 bool waited = false; 2164 u64 block_ns; 2165 2166 start = cur = ktime_get(); 2167 if (vcpu->halt_poll_ns) { 2168 ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); 2169 2170 ++vcpu->stat.halt_attempted_poll; 2171 do { 2172 /* 2173 * This sets KVM_REQ_UNHALT if an interrupt 2174 * arrives. 2175 */ 2176 if (kvm_vcpu_check_block(vcpu) < 0) { 2177 ++vcpu->stat.halt_successful_poll; 2178 if (!vcpu_valid_wakeup(vcpu)) 2179 ++vcpu->stat.halt_poll_invalid; 2180 goto out; 2181 } 2182 cur = ktime_get(); 2183 } while (single_task_running() && ktime_before(cur, stop)); 2184 } 2185 2186 kvm_arch_vcpu_blocking(vcpu); 2187 2188 for (;;) { 2189 prepare_to_swait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 2190 2191 if (kvm_vcpu_check_block(vcpu) < 0) 2192 break; 2193 2194 waited = true; 2195 schedule(); 2196 } 2197 2198 finish_swait(&vcpu->wq, &wait); 2199 cur = ktime_get(); 2200 2201 kvm_arch_vcpu_unblocking(vcpu); 2202 out: 2203 block_ns = ktime_to_ns(cur) - ktime_to_ns(start); 2204 2205 if (!vcpu_valid_wakeup(vcpu)) 2206 shrink_halt_poll_ns(vcpu); 2207 else if (halt_poll_ns) { 2208 if (block_ns <= vcpu->halt_poll_ns) 2209 ; 2210 /* we had a long block, shrink polling */ 2211 else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns) 2212 shrink_halt_poll_ns(vcpu); 2213 /* we had a short halt and our poll time is too small */ 2214 else if (vcpu->halt_poll_ns < halt_poll_ns && 2215 block_ns < halt_poll_ns) 2216 grow_halt_poll_ns(vcpu); 2217 } else 2218 vcpu->halt_poll_ns = 0; 2219 2220 trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu)); 2221 kvm_arch_vcpu_block_finish(vcpu); 2222 } 2223 EXPORT_SYMBOL_GPL(kvm_vcpu_block); 2224 2225 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) 2226 { 2227 struct swait_queue_head *wqp; 2228 2229 wqp = kvm_arch_vcpu_wq(vcpu); 2230 if (swait_active(wqp)) { 2231 swake_up(wqp); 2232 ++vcpu->stat.halt_wakeup; 2233 return true; 2234 } 2235 2236 return false; 2237 } 2238 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up); 2239 2240 #ifndef CONFIG_S390 2241 /* 2242 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. 2243 */ 2244 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 2245 { 2246 int me; 2247 int cpu = vcpu->cpu; 2248 2249 if (kvm_vcpu_wake_up(vcpu)) 2250 return; 2251 2252 me = get_cpu(); 2253 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) 2254 if (kvm_arch_vcpu_should_kick(vcpu)) 2255 smp_send_reschedule(cpu); 2256 put_cpu(); 2257 } 2258 EXPORT_SYMBOL_GPL(kvm_vcpu_kick); 2259 #endif /* !CONFIG_S390 */ 2260 2261 int kvm_vcpu_yield_to(struct kvm_vcpu *target) 2262 { 2263 struct pid *pid; 2264 struct task_struct *task = NULL; 2265 int ret = 0; 2266 2267 rcu_read_lock(); 2268 pid = rcu_dereference(target->pid); 2269 if (pid) 2270 task = get_pid_task(pid, PIDTYPE_PID); 2271 rcu_read_unlock(); 2272 if (!task) 2273 return ret; 2274 ret = yield_to(task, 1); 2275 put_task_struct(task); 2276 2277 return ret; 2278 } 2279 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); 2280 2281 /* 2282 * Helper that checks whether a VCPU is eligible for directed yield. 2283 * Most eligible candidate to yield is decided by following heuristics: 2284 * 2285 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently 2286 * (preempted lock holder), indicated by @in_spin_loop. 2287 * Set at the beiginning and cleared at the end of interception/PLE handler. 2288 * 2289 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get 2290 * chance last time (mostly it has become eligible now since we have probably 2291 * yielded to lockholder in last iteration. This is done by toggling 2292 * @dy_eligible each time a VCPU checked for eligibility.) 2293 * 2294 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding 2295 * to preempted lock-holder could result in wrong VCPU selection and CPU 2296 * burning. Giving priority for a potential lock-holder increases lock 2297 * progress. 2298 * 2299 * Since algorithm is based on heuristics, accessing another VCPU data without 2300 * locking does not harm. It may result in trying to yield to same VCPU, fail 2301 * and continue with next VCPU and so on. 2302 */ 2303 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) 2304 { 2305 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 2306 bool eligible; 2307 2308 eligible = !vcpu->spin_loop.in_spin_loop || 2309 vcpu->spin_loop.dy_eligible; 2310 2311 if (vcpu->spin_loop.in_spin_loop) 2312 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); 2313 2314 return eligible; 2315 #else 2316 return true; 2317 #endif 2318 } 2319 2320 void kvm_vcpu_on_spin(struct kvm_vcpu *me) 2321 { 2322 struct kvm *kvm = me->kvm; 2323 struct kvm_vcpu *vcpu; 2324 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 2325 int yielded = 0; 2326 int try = 3; 2327 int pass; 2328 int i; 2329 2330 kvm_vcpu_set_in_spin_loop(me, true); 2331 /* 2332 * We boost the priority of a VCPU that is runnable but not 2333 * currently running, because it got preempted by something 2334 * else and called schedule in __vcpu_run. Hopefully that 2335 * VCPU is holding the lock that we need and will release it. 2336 * We approximate round-robin by starting at the last boosted VCPU. 2337 */ 2338 for (pass = 0; pass < 2 && !yielded && try; pass++) { 2339 kvm_for_each_vcpu(i, vcpu, kvm) { 2340 if (!pass && i <= last_boosted_vcpu) { 2341 i = last_boosted_vcpu; 2342 continue; 2343 } else if (pass && i > last_boosted_vcpu) 2344 break; 2345 if (!ACCESS_ONCE(vcpu->preempted)) 2346 continue; 2347 if (vcpu == me) 2348 continue; 2349 if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) 2350 continue; 2351 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 2352 continue; 2353 2354 yielded = kvm_vcpu_yield_to(vcpu); 2355 if (yielded > 0) { 2356 kvm->last_boosted_vcpu = i; 2357 break; 2358 } else if (yielded < 0) { 2359 try--; 2360 if (!try) 2361 break; 2362 } 2363 } 2364 } 2365 kvm_vcpu_set_in_spin_loop(me, false); 2366 2367 /* Ensure vcpu is not eligible during next spinloop */ 2368 kvm_vcpu_set_dy_eligible(me, false); 2369 } 2370 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 2371 2372 static int kvm_vcpu_fault(struct vm_fault *vmf) 2373 { 2374 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data; 2375 struct page *page; 2376 2377 if (vmf->pgoff == 0) 2378 page = virt_to_page(vcpu->run); 2379 #ifdef CONFIG_X86 2380 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 2381 page = virt_to_page(vcpu->arch.pio_data); 2382 #endif 2383 #ifdef CONFIG_KVM_MMIO 2384 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 2385 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 2386 #endif 2387 else 2388 return kvm_arch_vcpu_fault(vcpu, vmf); 2389 get_page(page); 2390 vmf->page = page; 2391 return 0; 2392 } 2393 2394 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 2395 .fault = kvm_vcpu_fault, 2396 }; 2397 2398 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 2399 { 2400 vma->vm_ops = &kvm_vcpu_vm_ops; 2401 return 0; 2402 } 2403 2404 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 2405 { 2406 struct kvm_vcpu *vcpu = filp->private_data; 2407 2408 debugfs_remove_recursive(vcpu->debugfs_dentry); 2409 kvm_put_kvm(vcpu->kvm); 2410 return 0; 2411 } 2412 2413 static struct file_operations kvm_vcpu_fops = { 2414 .release = kvm_vcpu_release, 2415 .unlocked_ioctl = kvm_vcpu_ioctl, 2416 #ifdef CONFIG_KVM_COMPAT 2417 .compat_ioctl = kvm_vcpu_compat_ioctl, 2418 #endif 2419 .mmap = kvm_vcpu_mmap, 2420 .llseek = noop_llseek, 2421 }; 2422 2423 /* 2424 * Allocates an inode for the vcpu. 2425 */ 2426 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 2427 { 2428 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); 2429 } 2430 2431 static int kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) 2432 { 2433 char dir_name[ITOA_MAX_LEN * 2]; 2434 int ret; 2435 2436 if (!kvm_arch_has_vcpu_debugfs()) 2437 return 0; 2438 2439 if (!debugfs_initialized()) 2440 return 0; 2441 2442 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id); 2443 vcpu->debugfs_dentry = debugfs_create_dir(dir_name, 2444 vcpu->kvm->debugfs_dentry); 2445 if (!vcpu->debugfs_dentry) 2446 return -ENOMEM; 2447 2448 ret = kvm_arch_create_vcpu_debugfs(vcpu); 2449 if (ret < 0) { 2450 debugfs_remove_recursive(vcpu->debugfs_dentry); 2451 return ret; 2452 } 2453 2454 return 0; 2455 } 2456 2457 /* 2458 * Creates some virtual cpus. Good luck creating more than one. 2459 */ 2460 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 2461 { 2462 int r; 2463 struct kvm_vcpu *vcpu; 2464 2465 if (id >= KVM_MAX_VCPU_ID) 2466 return -EINVAL; 2467 2468 mutex_lock(&kvm->lock); 2469 if (kvm->created_vcpus == KVM_MAX_VCPUS) { 2470 mutex_unlock(&kvm->lock); 2471 return -EINVAL; 2472 } 2473 2474 kvm->created_vcpus++; 2475 mutex_unlock(&kvm->lock); 2476 2477 vcpu = kvm_arch_vcpu_create(kvm, id); 2478 if (IS_ERR(vcpu)) { 2479 r = PTR_ERR(vcpu); 2480 goto vcpu_decrement; 2481 } 2482 2483 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 2484 2485 r = kvm_arch_vcpu_setup(vcpu); 2486 if (r) 2487 goto vcpu_destroy; 2488 2489 r = kvm_create_vcpu_debugfs(vcpu); 2490 if (r) 2491 goto vcpu_destroy; 2492 2493 mutex_lock(&kvm->lock); 2494 if (kvm_get_vcpu_by_id(kvm, id)) { 2495 r = -EEXIST; 2496 goto unlock_vcpu_destroy; 2497 } 2498 2499 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); 2500 2501 /* Now it's all set up, let userspace reach it */ 2502 kvm_get_kvm(kvm); 2503 r = create_vcpu_fd(vcpu); 2504 if (r < 0) { 2505 kvm_put_kvm(kvm); 2506 goto unlock_vcpu_destroy; 2507 } 2508 2509 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; 2510 2511 /* 2512 * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus 2513 * before kvm->online_vcpu's incremented value. 2514 */ 2515 smp_wmb(); 2516 atomic_inc(&kvm->online_vcpus); 2517 2518 mutex_unlock(&kvm->lock); 2519 kvm_arch_vcpu_postcreate(vcpu); 2520 return r; 2521 2522 unlock_vcpu_destroy: 2523 mutex_unlock(&kvm->lock); 2524 debugfs_remove_recursive(vcpu->debugfs_dentry); 2525 vcpu_destroy: 2526 kvm_arch_vcpu_destroy(vcpu); 2527 vcpu_decrement: 2528 mutex_lock(&kvm->lock); 2529 kvm->created_vcpus--; 2530 mutex_unlock(&kvm->lock); 2531 return r; 2532 } 2533 2534 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 2535 { 2536 if (sigset) { 2537 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2538 vcpu->sigset_active = 1; 2539 vcpu->sigset = *sigset; 2540 } else 2541 vcpu->sigset_active = 0; 2542 return 0; 2543 } 2544 2545 static long kvm_vcpu_ioctl(struct file *filp, 2546 unsigned int ioctl, unsigned long arg) 2547 { 2548 struct kvm_vcpu *vcpu = filp->private_data; 2549 void __user *argp = (void __user *)arg; 2550 int r; 2551 struct kvm_fpu *fpu = NULL; 2552 struct kvm_sregs *kvm_sregs = NULL; 2553 2554 if (vcpu->kvm->mm != current->mm) 2555 return -EIO; 2556 2557 if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) 2558 return -EINVAL; 2559 2560 #if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS) 2561 /* 2562 * Special cases: vcpu ioctls that are asynchronous to vcpu execution, 2563 * so vcpu_load() would break it. 2564 */ 2565 if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_S390_IRQ || ioctl == KVM_INTERRUPT) 2566 return kvm_arch_vcpu_ioctl(filp, ioctl, arg); 2567 #endif 2568 2569 2570 r = vcpu_load(vcpu); 2571 if (r) 2572 return r; 2573 switch (ioctl) { 2574 case KVM_RUN: { 2575 struct pid *oldpid; 2576 r = -EINVAL; 2577 if (arg) 2578 goto out; 2579 oldpid = rcu_access_pointer(vcpu->pid); 2580 if (unlikely(oldpid != current->pids[PIDTYPE_PID].pid)) { 2581 /* The thread running this VCPU changed. */ 2582 struct pid *newpid = get_task_pid(current, PIDTYPE_PID); 2583 2584 rcu_assign_pointer(vcpu->pid, newpid); 2585 if (oldpid) 2586 synchronize_rcu(); 2587 put_pid(oldpid); 2588 } 2589 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); 2590 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 2591 break; 2592 } 2593 case KVM_GET_REGS: { 2594 struct kvm_regs *kvm_regs; 2595 2596 r = -ENOMEM; 2597 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 2598 if (!kvm_regs) 2599 goto out; 2600 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 2601 if (r) 2602 goto out_free1; 2603 r = -EFAULT; 2604 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 2605 goto out_free1; 2606 r = 0; 2607 out_free1: 2608 kfree(kvm_regs); 2609 break; 2610 } 2611 case KVM_SET_REGS: { 2612 struct kvm_regs *kvm_regs; 2613 2614 r = -ENOMEM; 2615 kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); 2616 if (IS_ERR(kvm_regs)) { 2617 r = PTR_ERR(kvm_regs); 2618 goto out; 2619 } 2620 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 2621 kfree(kvm_regs); 2622 break; 2623 } 2624 case KVM_GET_SREGS: { 2625 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 2626 r = -ENOMEM; 2627 if (!kvm_sregs) 2628 goto out; 2629 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 2630 if (r) 2631 goto out; 2632 r = -EFAULT; 2633 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 2634 goto out; 2635 r = 0; 2636 break; 2637 } 2638 case KVM_SET_SREGS: { 2639 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); 2640 if (IS_ERR(kvm_sregs)) { 2641 r = PTR_ERR(kvm_sregs); 2642 kvm_sregs = NULL; 2643 goto out; 2644 } 2645 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 2646 break; 2647 } 2648 case KVM_GET_MP_STATE: { 2649 struct kvm_mp_state mp_state; 2650 2651 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 2652 if (r) 2653 goto out; 2654 r = -EFAULT; 2655 if (copy_to_user(argp, &mp_state, sizeof(mp_state))) 2656 goto out; 2657 r = 0; 2658 break; 2659 } 2660 case KVM_SET_MP_STATE: { 2661 struct kvm_mp_state mp_state; 2662 2663 r = -EFAULT; 2664 if (copy_from_user(&mp_state, argp, sizeof(mp_state))) 2665 goto out; 2666 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 2667 break; 2668 } 2669 case KVM_TRANSLATE: { 2670 struct kvm_translation tr; 2671 2672 r = -EFAULT; 2673 if (copy_from_user(&tr, argp, sizeof(tr))) 2674 goto out; 2675 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 2676 if (r) 2677 goto out; 2678 r = -EFAULT; 2679 if (copy_to_user(argp, &tr, sizeof(tr))) 2680 goto out; 2681 r = 0; 2682 break; 2683 } 2684 case KVM_SET_GUEST_DEBUG: { 2685 struct kvm_guest_debug dbg; 2686 2687 r = -EFAULT; 2688 if (copy_from_user(&dbg, argp, sizeof(dbg))) 2689 goto out; 2690 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 2691 break; 2692 } 2693 case KVM_SET_SIGNAL_MASK: { 2694 struct kvm_signal_mask __user *sigmask_arg = argp; 2695 struct kvm_signal_mask kvm_sigmask; 2696 sigset_t sigset, *p; 2697 2698 p = NULL; 2699 if (argp) { 2700 r = -EFAULT; 2701 if (copy_from_user(&kvm_sigmask, argp, 2702 sizeof(kvm_sigmask))) 2703 goto out; 2704 r = -EINVAL; 2705 if (kvm_sigmask.len != sizeof(sigset)) 2706 goto out; 2707 r = -EFAULT; 2708 if (copy_from_user(&sigset, sigmask_arg->sigset, 2709 sizeof(sigset))) 2710 goto out; 2711 p = &sigset; 2712 } 2713 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 2714 break; 2715 } 2716 case KVM_GET_FPU: { 2717 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 2718 r = -ENOMEM; 2719 if (!fpu) 2720 goto out; 2721 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 2722 if (r) 2723 goto out; 2724 r = -EFAULT; 2725 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 2726 goto out; 2727 r = 0; 2728 break; 2729 } 2730 case KVM_SET_FPU: { 2731 fpu = memdup_user(argp, sizeof(*fpu)); 2732 if (IS_ERR(fpu)) { 2733 r = PTR_ERR(fpu); 2734 fpu = NULL; 2735 goto out; 2736 } 2737 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 2738 break; 2739 } 2740 default: 2741 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 2742 } 2743 out: 2744 vcpu_put(vcpu); 2745 kfree(fpu); 2746 kfree(kvm_sregs); 2747 return r; 2748 } 2749 2750 #ifdef CONFIG_KVM_COMPAT 2751 static long kvm_vcpu_compat_ioctl(struct file *filp, 2752 unsigned int ioctl, unsigned long arg) 2753 { 2754 struct kvm_vcpu *vcpu = filp->private_data; 2755 void __user *argp = compat_ptr(arg); 2756 int r; 2757 2758 if (vcpu->kvm->mm != current->mm) 2759 return -EIO; 2760 2761 switch (ioctl) { 2762 case KVM_SET_SIGNAL_MASK: { 2763 struct kvm_signal_mask __user *sigmask_arg = argp; 2764 struct kvm_signal_mask kvm_sigmask; 2765 compat_sigset_t csigset; 2766 sigset_t sigset; 2767 2768 if (argp) { 2769 r = -EFAULT; 2770 if (copy_from_user(&kvm_sigmask, argp, 2771 sizeof(kvm_sigmask))) 2772 goto out; 2773 r = -EINVAL; 2774 if (kvm_sigmask.len != sizeof(csigset)) 2775 goto out; 2776 r = -EFAULT; 2777 if (copy_from_user(&csigset, sigmask_arg->sigset, 2778 sizeof(csigset))) 2779 goto out; 2780 sigset_from_compat(&sigset, &csigset); 2781 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 2782 } else 2783 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); 2784 break; 2785 } 2786 default: 2787 r = kvm_vcpu_ioctl(filp, ioctl, arg); 2788 } 2789 2790 out: 2791 return r; 2792 } 2793 #endif 2794 2795 static int kvm_device_ioctl_attr(struct kvm_device *dev, 2796 int (*accessor)(struct kvm_device *dev, 2797 struct kvm_device_attr *attr), 2798 unsigned long arg) 2799 { 2800 struct kvm_device_attr attr; 2801 2802 if (!accessor) 2803 return -EPERM; 2804 2805 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 2806 return -EFAULT; 2807 2808 return accessor(dev, &attr); 2809 } 2810 2811 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, 2812 unsigned long arg) 2813 { 2814 struct kvm_device *dev = filp->private_data; 2815 2816 switch (ioctl) { 2817 case KVM_SET_DEVICE_ATTR: 2818 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); 2819 case KVM_GET_DEVICE_ATTR: 2820 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); 2821 case KVM_HAS_DEVICE_ATTR: 2822 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); 2823 default: 2824 if (dev->ops->ioctl) 2825 return dev->ops->ioctl(dev, ioctl, arg); 2826 2827 return -ENOTTY; 2828 } 2829 } 2830 2831 static int kvm_device_release(struct inode *inode, struct file *filp) 2832 { 2833 struct kvm_device *dev = filp->private_data; 2834 struct kvm *kvm = dev->kvm; 2835 2836 kvm_put_kvm(kvm); 2837 return 0; 2838 } 2839 2840 static const struct file_operations kvm_device_fops = { 2841 .unlocked_ioctl = kvm_device_ioctl, 2842 #ifdef CONFIG_KVM_COMPAT 2843 .compat_ioctl = kvm_device_ioctl, 2844 #endif 2845 .release = kvm_device_release, 2846 }; 2847 2848 struct kvm_device *kvm_device_from_filp(struct file *filp) 2849 { 2850 if (filp->f_op != &kvm_device_fops) 2851 return NULL; 2852 2853 return filp->private_data; 2854 } 2855 2856 static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { 2857 #ifdef CONFIG_KVM_MPIC 2858 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, 2859 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, 2860 #endif 2861 }; 2862 2863 int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type) 2864 { 2865 if (type >= ARRAY_SIZE(kvm_device_ops_table)) 2866 return -ENOSPC; 2867 2868 if (kvm_device_ops_table[type] != NULL) 2869 return -EEXIST; 2870 2871 kvm_device_ops_table[type] = ops; 2872 return 0; 2873 } 2874 2875 void kvm_unregister_device_ops(u32 type) 2876 { 2877 if (kvm_device_ops_table[type] != NULL) 2878 kvm_device_ops_table[type] = NULL; 2879 } 2880 2881 static int kvm_ioctl_create_device(struct kvm *kvm, 2882 struct kvm_create_device *cd) 2883 { 2884 struct kvm_device_ops *ops = NULL; 2885 struct kvm_device *dev; 2886 bool test = cd->flags & KVM_CREATE_DEVICE_TEST; 2887 int ret; 2888 2889 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) 2890 return -ENODEV; 2891 2892 ops = kvm_device_ops_table[cd->type]; 2893 if (ops == NULL) 2894 return -ENODEV; 2895 2896 if (test) 2897 return 0; 2898 2899 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2900 if (!dev) 2901 return -ENOMEM; 2902 2903 dev->ops = ops; 2904 dev->kvm = kvm; 2905 2906 mutex_lock(&kvm->lock); 2907 ret = ops->create(dev, cd->type); 2908 if (ret < 0) { 2909 mutex_unlock(&kvm->lock); 2910 kfree(dev); 2911 return ret; 2912 } 2913 list_add(&dev->vm_node, &kvm->devices); 2914 mutex_unlock(&kvm->lock); 2915 2916 if (ops->init) 2917 ops->init(dev); 2918 2919 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 2920 if (ret < 0) { 2921 mutex_lock(&kvm->lock); 2922 list_del(&dev->vm_node); 2923 mutex_unlock(&kvm->lock); 2924 ops->destroy(dev); 2925 return ret; 2926 } 2927 2928 kvm_get_kvm(kvm); 2929 cd->fd = ret; 2930 return 0; 2931 } 2932 2933 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) 2934 { 2935 switch (arg) { 2936 case KVM_CAP_USER_MEMORY: 2937 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 2938 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 2939 case KVM_CAP_INTERNAL_ERROR_DATA: 2940 #ifdef CONFIG_HAVE_KVM_MSI 2941 case KVM_CAP_SIGNAL_MSI: 2942 #endif 2943 #ifdef CONFIG_HAVE_KVM_IRQFD 2944 case KVM_CAP_IRQFD: 2945 case KVM_CAP_IRQFD_RESAMPLE: 2946 #endif 2947 case KVM_CAP_IOEVENTFD_ANY_LENGTH: 2948 case KVM_CAP_CHECK_EXTENSION_VM: 2949 return 1; 2950 #ifdef CONFIG_KVM_MMIO 2951 case KVM_CAP_COALESCED_MMIO: 2952 return KVM_COALESCED_MMIO_PAGE_OFFSET; 2953 #endif 2954 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 2955 case KVM_CAP_IRQ_ROUTING: 2956 return KVM_MAX_IRQ_ROUTES; 2957 #endif 2958 #if KVM_ADDRESS_SPACE_NUM > 1 2959 case KVM_CAP_MULTI_ADDRESS_SPACE: 2960 return KVM_ADDRESS_SPACE_NUM; 2961 #endif 2962 case KVM_CAP_MAX_VCPU_ID: 2963 return KVM_MAX_VCPU_ID; 2964 default: 2965 break; 2966 } 2967 return kvm_vm_ioctl_check_extension(kvm, arg); 2968 } 2969 2970 static long kvm_vm_ioctl(struct file *filp, 2971 unsigned int ioctl, unsigned long arg) 2972 { 2973 struct kvm *kvm = filp->private_data; 2974 void __user *argp = (void __user *)arg; 2975 int r; 2976 2977 if (kvm->mm != current->mm) 2978 return -EIO; 2979 switch (ioctl) { 2980 case KVM_CREATE_VCPU: 2981 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 2982 break; 2983 case KVM_SET_USER_MEMORY_REGION: { 2984 struct kvm_userspace_memory_region kvm_userspace_mem; 2985 2986 r = -EFAULT; 2987 if (copy_from_user(&kvm_userspace_mem, argp, 2988 sizeof(kvm_userspace_mem))) 2989 goto out; 2990 2991 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); 2992 break; 2993 } 2994 case KVM_GET_DIRTY_LOG: { 2995 struct kvm_dirty_log log; 2996 2997 r = -EFAULT; 2998 if (copy_from_user(&log, argp, sizeof(log))) 2999 goto out; 3000 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 3001 break; 3002 } 3003 #ifdef CONFIG_KVM_MMIO 3004 case KVM_REGISTER_COALESCED_MMIO: { 3005 struct kvm_coalesced_mmio_zone zone; 3006 3007 r = -EFAULT; 3008 if (copy_from_user(&zone, argp, sizeof(zone))) 3009 goto out; 3010 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 3011 break; 3012 } 3013 case KVM_UNREGISTER_COALESCED_MMIO: { 3014 struct kvm_coalesced_mmio_zone zone; 3015 3016 r = -EFAULT; 3017 if (copy_from_user(&zone, argp, sizeof(zone))) 3018 goto out; 3019 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 3020 break; 3021 } 3022 #endif 3023 case KVM_IRQFD: { 3024 struct kvm_irqfd data; 3025 3026 r = -EFAULT; 3027 if (copy_from_user(&data, argp, sizeof(data))) 3028 goto out; 3029 r = kvm_irqfd(kvm, &data); 3030 break; 3031 } 3032 case KVM_IOEVENTFD: { 3033 struct kvm_ioeventfd data; 3034 3035 r = -EFAULT; 3036 if (copy_from_user(&data, argp, sizeof(data))) 3037 goto out; 3038 r = kvm_ioeventfd(kvm, &data); 3039 break; 3040 } 3041 #ifdef CONFIG_HAVE_KVM_MSI 3042 case KVM_SIGNAL_MSI: { 3043 struct kvm_msi msi; 3044 3045 r = -EFAULT; 3046 if (copy_from_user(&msi, argp, sizeof(msi))) 3047 goto out; 3048 r = kvm_send_userspace_msi(kvm, &msi); 3049 break; 3050 } 3051 #endif 3052 #ifdef __KVM_HAVE_IRQ_LINE 3053 case KVM_IRQ_LINE_STATUS: 3054 case KVM_IRQ_LINE: { 3055 struct kvm_irq_level irq_event; 3056 3057 r = -EFAULT; 3058 if (copy_from_user(&irq_event, argp, sizeof(irq_event))) 3059 goto out; 3060 3061 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, 3062 ioctl == KVM_IRQ_LINE_STATUS); 3063 if (r) 3064 goto out; 3065 3066 r = -EFAULT; 3067 if (ioctl == KVM_IRQ_LINE_STATUS) { 3068 if (copy_to_user(argp, &irq_event, sizeof(irq_event))) 3069 goto out; 3070 } 3071 3072 r = 0; 3073 break; 3074 } 3075 #endif 3076 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 3077 case KVM_SET_GSI_ROUTING: { 3078 struct kvm_irq_routing routing; 3079 struct kvm_irq_routing __user *urouting; 3080 struct kvm_irq_routing_entry *entries = NULL; 3081 3082 r = -EFAULT; 3083 if (copy_from_user(&routing, argp, sizeof(routing))) 3084 goto out; 3085 r = -EINVAL; 3086 if (!kvm_arch_can_set_irq_routing(kvm)) 3087 goto out; 3088 if (routing.nr > KVM_MAX_IRQ_ROUTES) 3089 goto out; 3090 if (routing.flags) 3091 goto out; 3092 if (routing.nr) { 3093 r = -ENOMEM; 3094 entries = vmalloc(routing.nr * sizeof(*entries)); 3095 if (!entries) 3096 goto out; 3097 r = -EFAULT; 3098 urouting = argp; 3099 if (copy_from_user(entries, urouting->entries, 3100 routing.nr * sizeof(*entries))) 3101 goto out_free_irq_routing; 3102 } 3103 r = kvm_set_irq_routing(kvm, entries, routing.nr, 3104 routing.flags); 3105 out_free_irq_routing: 3106 vfree(entries); 3107 break; 3108 } 3109 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ 3110 case KVM_CREATE_DEVICE: { 3111 struct kvm_create_device cd; 3112 3113 r = -EFAULT; 3114 if (copy_from_user(&cd, argp, sizeof(cd))) 3115 goto out; 3116 3117 r = kvm_ioctl_create_device(kvm, &cd); 3118 if (r) 3119 goto out; 3120 3121 r = -EFAULT; 3122 if (copy_to_user(argp, &cd, sizeof(cd))) 3123 goto out; 3124 3125 r = 0; 3126 break; 3127 } 3128 case KVM_CHECK_EXTENSION: 3129 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); 3130 break; 3131 default: 3132 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 3133 } 3134 out: 3135 return r; 3136 } 3137 3138 #ifdef CONFIG_KVM_COMPAT 3139 struct compat_kvm_dirty_log { 3140 __u32 slot; 3141 __u32 padding1; 3142 union { 3143 compat_uptr_t dirty_bitmap; /* one bit per page */ 3144 __u64 padding2; 3145 }; 3146 }; 3147 3148 static long kvm_vm_compat_ioctl(struct file *filp, 3149 unsigned int ioctl, unsigned long arg) 3150 { 3151 struct kvm *kvm = filp->private_data; 3152 int r; 3153 3154 if (kvm->mm != current->mm) 3155 return -EIO; 3156 switch (ioctl) { 3157 case KVM_GET_DIRTY_LOG: { 3158 struct compat_kvm_dirty_log compat_log; 3159 struct kvm_dirty_log log; 3160 3161 if (copy_from_user(&compat_log, (void __user *)arg, 3162 sizeof(compat_log))) 3163 return -EFAULT; 3164 log.slot = compat_log.slot; 3165 log.padding1 = compat_log.padding1; 3166 log.padding2 = compat_log.padding2; 3167 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 3168 3169 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 3170 break; 3171 } 3172 default: 3173 r = kvm_vm_ioctl(filp, ioctl, arg); 3174 } 3175 return r; 3176 } 3177 #endif 3178 3179 static struct file_operations kvm_vm_fops = { 3180 .release = kvm_vm_release, 3181 .unlocked_ioctl = kvm_vm_ioctl, 3182 #ifdef CONFIG_KVM_COMPAT 3183 .compat_ioctl = kvm_vm_compat_ioctl, 3184 #endif 3185 .llseek = noop_llseek, 3186 }; 3187 3188 static int kvm_dev_ioctl_create_vm(unsigned long type) 3189 { 3190 int r; 3191 struct kvm *kvm; 3192 struct file *file; 3193 3194 kvm = kvm_create_vm(type); 3195 if (IS_ERR(kvm)) 3196 return PTR_ERR(kvm); 3197 #ifdef CONFIG_KVM_MMIO 3198 r = kvm_coalesced_mmio_init(kvm); 3199 if (r < 0) { 3200 kvm_put_kvm(kvm); 3201 return r; 3202 } 3203 #endif 3204 r = get_unused_fd_flags(O_CLOEXEC); 3205 if (r < 0) { 3206 kvm_put_kvm(kvm); 3207 return r; 3208 } 3209 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 3210 if (IS_ERR(file)) { 3211 put_unused_fd(r); 3212 kvm_put_kvm(kvm); 3213 return PTR_ERR(file); 3214 } 3215 3216 /* 3217 * Don't call kvm_put_kvm anymore at this point; file->f_op is 3218 * already set, with ->release() being kvm_vm_release(). In error 3219 * cases it will be called by the final fput(file) and will take 3220 * care of doing kvm_put_kvm(kvm). 3221 */ 3222 if (kvm_create_vm_debugfs(kvm, r) < 0) { 3223 put_unused_fd(r); 3224 fput(file); 3225 return -ENOMEM; 3226 } 3227 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm); 3228 3229 fd_install(r, file); 3230 return r; 3231 } 3232 3233 static long kvm_dev_ioctl(struct file *filp, 3234 unsigned int ioctl, unsigned long arg) 3235 { 3236 long r = -EINVAL; 3237 3238 switch (ioctl) { 3239 case KVM_GET_API_VERSION: 3240 if (arg) 3241 goto out; 3242 r = KVM_API_VERSION; 3243 break; 3244 case KVM_CREATE_VM: 3245 r = kvm_dev_ioctl_create_vm(arg); 3246 break; 3247 case KVM_CHECK_EXTENSION: 3248 r = kvm_vm_ioctl_check_extension_generic(NULL, arg); 3249 break; 3250 case KVM_GET_VCPU_MMAP_SIZE: 3251 if (arg) 3252 goto out; 3253 r = PAGE_SIZE; /* struct kvm_run */ 3254 #ifdef CONFIG_X86 3255 r += PAGE_SIZE; /* pio data page */ 3256 #endif 3257 #ifdef CONFIG_KVM_MMIO 3258 r += PAGE_SIZE; /* coalesced mmio ring page */ 3259 #endif 3260 break; 3261 case KVM_TRACE_ENABLE: 3262 case KVM_TRACE_PAUSE: 3263 case KVM_TRACE_DISABLE: 3264 r = -EOPNOTSUPP; 3265 break; 3266 default: 3267 return kvm_arch_dev_ioctl(filp, ioctl, arg); 3268 } 3269 out: 3270 return r; 3271 } 3272 3273 static struct file_operations kvm_chardev_ops = { 3274 .unlocked_ioctl = kvm_dev_ioctl, 3275 .compat_ioctl = kvm_dev_ioctl, 3276 .llseek = noop_llseek, 3277 }; 3278 3279 static struct miscdevice kvm_dev = { 3280 KVM_MINOR, 3281 "kvm", 3282 &kvm_chardev_ops, 3283 }; 3284 3285 static void hardware_enable_nolock(void *junk) 3286 { 3287 int cpu = raw_smp_processor_id(); 3288 int r; 3289 3290 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 3291 return; 3292 3293 cpumask_set_cpu(cpu, cpus_hardware_enabled); 3294 3295 r = kvm_arch_hardware_enable(); 3296 3297 if (r) { 3298 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 3299 atomic_inc(&hardware_enable_failed); 3300 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu); 3301 } 3302 } 3303 3304 static int kvm_starting_cpu(unsigned int cpu) 3305 { 3306 raw_spin_lock(&kvm_count_lock); 3307 if (kvm_usage_count) 3308 hardware_enable_nolock(NULL); 3309 raw_spin_unlock(&kvm_count_lock); 3310 return 0; 3311 } 3312 3313 static void hardware_disable_nolock(void *junk) 3314 { 3315 int cpu = raw_smp_processor_id(); 3316 3317 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 3318 return; 3319 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 3320 kvm_arch_hardware_disable(); 3321 } 3322 3323 static int kvm_dying_cpu(unsigned int cpu) 3324 { 3325 raw_spin_lock(&kvm_count_lock); 3326 if (kvm_usage_count) 3327 hardware_disable_nolock(NULL); 3328 raw_spin_unlock(&kvm_count_lock); 3329 return 0; 3330 } 3331 3332 static void hardware_disable_all_nolock(void) 3333 { 3334 BUG_ON(!kvm_usage_count); 3335 3336 kvm_usage_count--; 3337 if (!kvm_usage_count) 3338 on_each_cpu(hardware_disable_nolock, NULL, 1); 3339 } 3340 3341 static void hardware_disable_all(void) 3342 { 3343 raw_spin_lock(&kvm_count_lock); 3344 hardware_disable_all_nolock(); 3345 raw_spin_unlock(&kvm_count_lock); 3346 } 3347 3348 static int hardware_enable_all(void) 3349 { 3350 int r = 0; 3351 3352 raw_spin_lock(&kvm_count_lock); 3353 3354 kvm_usage_count++; 3355 if (kvm_usage_count == 1) { 3356 atomic_set(&hardware_enable_failed, 0); 3357 on_each_cpu(hardware_enable_nolock, NULL, 1); 3358 3359 if (atomic_read(&hardware_enable_failed)) { 3360 hardware_disable_all_nolock(); 3361 r = -EBUSY; 3362 } 3363 } 3364 3365 raw_spin_unlock(&kvm_count_lock); 3366 3367 return r; 3368 } 3369 3370 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 3371 void *v) 3372 { 3373 /* 3374 * Some (well, at least mine) BIOSes hang on reboot if 3375 * in vmx root mode. 3376 * 3377 * And Intel TXT required VMX off for all cpu when system shutdown. 3378 */ 3379 pr_info("kvm: exiting hardware virtualization\n"); 3380 kvm_rebooting = true; 3381 on_each_cpu(hardware_disable_nolock, NULL, 1); 3382 return NOTIFY_OK; 3383 } 3384 3385 static struct notifier_block kvm_reboot_notifier = { 3386 .notifier_call = kvm_reboot, 3387 .priority = 0, 3388 }; 3389 3390 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 3391 { 3392 int i; 3393 3394 for (i = 0; i < bus->dev_count; i++) { 3395 struct kvm_io_device *pos = bus->range[i].dev; 3396 3397 kvm_iodevice_destructor(pos); 3398 } 3399 kfree(bus); 3400 } 3401 3402 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, 3403 const struct kvm_io_range *r2) 3404 { 3405 gpa_t addr1 = r1->addr; 3406 gpa_t addr2 = r2->addr; 3407 3408 if (addr1 < addr2) 3409 return -1; 3410 3411 /* If r2->len == 0, match the exact address. If r2->len != 0, 3412 * accept any overlapping write. Any order is acceptable for 3413 * overlapping ranges, because kvm_io_bus_get_first_dev ensures 3414 * we process all of them. 3415 */ 3416 if (r2->len) { 3417 addr1 += r1->len; 3418 addr2 += r2->len; 3419 } 3420 3421 if (addr1 > addr2) 3422 return 1; 3423 3424 return 0; 3425 } 3426 3427 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) 3428 { 3429 return kvm_io_bus_cmp(p1, p2); 3430 } 3431 3432 static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, 3433 gpa_t addr, int len) 3434 { 3435 bus->range[bus->dev_count++] = (struct kvm_io_range) { 3436 .addr = addr, 3437 .len = len, 3438 .dev = dev, 3439 }; 3440 3441 sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range), 3442 kvm_io_bus_sort_cmp, NULL); 3443 3444 return 0; 3445 } 3446 3447 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, 3448 gpa_t addr, int len) 3449 { 3450 struct kvm_io_range *range, key; 3451 int off; 3452 3453 key = (struct kvm_io_range) { 3454 .addr = addr, 3455 .len = len, 3456 }; 3457 3458 range = bsearch(&key, bus->range, bus->dev_count, 3459 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); 3460 if (range == NULL) 3461 return -ENOENT; 3462 3463 off = range - bus->range; 3464 3465 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) 3466 off--; 3467 3468 return off; 3469 } 3470 3471 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 3472 struct kvm_io_range *range, const void *val) 3473 { 3474 int idx; 3475 3476 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 3477 if (idx < 0) 3478 return -EOPNOTSUPP; 3479 3480 while (idx < bus->dev_count && 3481 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 3482 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, 3483 range->len, val)) 3484 return idx; 3485 idx++; 3486 } 3487 3488 return -EOPNOTSUPP; 3489 } 3490 3491 /* kvm_io_bus_write - called under kvm->slots_lock */ 3492 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 3493 int len, const void *val) 3494 { 3495 struct kvm_io_bus *bus; 3496 struct kvm_io_range range; 3497 int r; 3498 3499 range = (struct kvm_io_range) { 3500 .addr = addr, 3501 .len = len, 3502 }; 3503 3504 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3505 if (!bus) 3506 return -ENOMEM; 3507 r = __kvm_io_bus_write(vcpu, bus, &range, val); 3508 return r < 0 ? r : 0; 3509 } 3510 3511 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ 3512 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 3513 gpa_t addr, int len, const void *val, long cookie) 3514 { 3515 struct kvm_io_bus *bus; 3516 struct kvm_io_range range; 3517 3518 range = (struct kvm_io_range) { 3519 .addr = addr, 3520 .len = len, 3521 }; 3522 3523 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3524 if (!bus) 3525 return -ENOMEM; 3526 3527 /* First try the device referenced by cookie. */ 3528 if ((cookie >= 0) && (cookie < bus->dev_count) && 3529 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) 3530 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, 3531 val)) 3532 return cookie; 3533 3534 /* 3535 * cookie contained garbage; fall back to search and return the 3536 * correct cookie value. 3537 */ 3538 return __kvm_io_bus_write(vcpu, bus, &range, val); 3539 } 3540 3541 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 3542 struct kvm_io_range *range, void *val) 3543 { 3544 int idx; 3545 3546 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 3547 if (idx < 0) 3548 return -EOPNOTSUPP; 3549 3550 while (idx < bus->dev_count && 3551 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 3552 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, 3553 range->len, val)) 3554 return idx; 3555 idx++; 3556 } 3557 3558 return -EOPNOTSUPP; 3559 } 3560 EXPORT_SYMBOL_GPL(kvm_io_bus_write); 3561 3562 /* kvm_io_bus_read - called under kvm->slots_lock */ 3563 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 3564 int len, void *val) 3565 { 3566 struct kvm_io_bus *bus; 3567 struct kvm_io_range range; 3568 int r; 3569 3570 range = (struct kvm_io_range) { 3571 .addr = addr, 3572 .len = len, 3573 }; 3574 3575 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3576 if (!bus) 3577 return -ENOMEM; 3578 r = __kvm_io_bus_read(vcpu, bus, &range, val); 3579 return r < 0 ? r : 0; 3580 } 3581 3582 3583 /* Caller must hold slots_lock. */ 3584 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 3585 int len, struct kvm_io_device *dev) 3586 { 3587 struct kvm_io_bus *new_bus, *bus; 3588 3589 bus = kvm_get_bus(kvm, bus_idx); 3590 if (!bus) 3591 return -ENOMEM; 3592 3593 /* exclude ioeventfd which is limited by maximum fd */ 3594 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 3595 return -ENOSPC; 3596 3597 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count + 1) * 3598 sizeof(struct kvm_io_range)), GFP_KERNEL); 3599 if (!new_bus) 3600 return -ENOMEM; 3601 memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count * 3602 sizeof(struct kvm_io_range))); 3603 kvm_io_bus_insert_dev(new_bus, dev, addr, len); 3604 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3605 synchronize_srcu_expedited(&kvm->srcu); 3606 kfree(bus); 3607 3608 return 0; 3609 } 3610 3611 /* Caller must hold slots_lock. */ 3612 void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 3613 struct kvm_io_device *dev) 3614 { 3615 int i; 3616 struct kvm_io_bus *new_bus, *bus; 3617 3618 bus = kvm_get_bus(kvm, bus_idx); 3619 if (!bus) 3620 return; 3621 3622 for (i = 0; i < bus->dev_count; i++) 3623 if (bus->range[i].dev == dev) { 3624 break; 3625 } 3626 3627 if (i == bus->dev_count) 3628 return; 3629 3630 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) * 3631 sizeof(struct kvm_io_range)), GFP_KERNEL); 3632 if (!new_bus) { 3633 pr_err("kvm: failed to shrink bus, removing it completely\n"); 3634 goto broken; 3635 } 3636 3637 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 3638 new_bus->dev_count--; 3639 memcpy(new_bus->range + i, bus->range + i + 1, 3640 (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); 3641 3642 broken: 3643 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3644 synchronize_srcu_expedited(&kvm->srcu); 3645 kfree(bus); 3646 return; 3647 } 3648 3649 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 3650 gpa_t addr) 3651 { 3652 struct kvm_io_bus *bus; 3653 int dev_idx, srcu_idx; 3654 struct kvm_io_device *iodev = NULL; 3655 3656 srcu_idx = srcu_read_lock(&kvm->srcu); 3657 3658 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 3659 if (!bus) 3660 goto out_unlock; 3661 3662 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); 3663 if (dev_idx < 0) 3664 goto out_unlock; 3665 3666 iodev = bus->range[dev_idx].dev; 3667 3668 out_unlock: 3669 srcu_read_unlock(&kvm->srcu, srcu_idx); 3670 3671 return iodev; 3672 } 3673 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev); 3674 3675 static int kvm_debugfs_open(struct inode *inode, struct file *file, 3676 int (*get)(void *, u64 *), int (*set)(void *, u64), 3677 const char *fmt) 3678 { 3679 struct kvm_stat_data *stat_data = (struct kvm_stat_data *) 3680 inode->i_private; 3681 3682 /* The debugfs files are a reference to the kvm struct which 3683 * is still valid when kvm_destroy_vm is called. 3684 * To avoid the race between open and the removal of the debugfs 3685 * directory we test against the users count. 3686 */ 3687 if (!refcount_inc_not_zero(&stat_data->kvm->users_count)) 3688 return -ENOENT; 3689 3690 if (simple_attr_open(inode, file, get, set, fmt)) { 3691 kvm_put_kvm(stat_data->kvm); 3692 return -ENOMEM; 3693 } 3694 3695 return 0; 3696 } 3697 3698 static int kvm_debugfs_release(struct inode *inode, struct file *file) 3699 { 3700 struct kvm_stat_data *stat_data = (struct kvm_stat_data *) 3701 inode->i_private; 3702 3703 simple_attr_release(inode, file); 3704 kvm_put_kvm(stat_data->kvm); 3705 3706 return 0; 3707 } 3708 3709 static int vm_stat_get_per_vm(void *data, u64 *val) 3710 { 3711 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 3712 3713 *val = *(ulong *)((void *)stat_data->kvm + stat_data->offset); 3714 3715 return 0; 3716 } 3717 3718 static int vm_stat_clear_per_vm(void *data, u64 val) 3719 { 3720 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 3721 3722 if (val) 3723 return -EINVAL; 3724 3725 *(ulong *)((void *)stat_data->kvm + stat_data->offset) = 0; 3726 3727 return 0; 3728 } 3729 3730 static int vm_stat_get_per_vm_open(struct inode *inode, struct file *file) 3731 { 3732 __simple_attr_check_format("%llu\n", 0ull); 3733 return kvm_debugfs_open(inode, file, vm_stat_get_per_vm, 3734 vm_stat_clear_per_vm, "%llu\n"); 3735 } 3736 3737 static const struct file_operations vm_stat_get_per_vm_fops = { 3738 .owner = THIS_MODULE, 3739 .open = vm_stat_get_per_vm_open, 3740 .release = kvm_debugfs_release, 3741 .read = simple_attr_read, 3742 .write = simple_attr_write, 3743 .llseek = no_llseek, 3744 }; 3745 3746 static int vcpu_stat_get_per_vm(void *data, u64 *val) 3747 { 3748 int i; 3749 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 3750 struct kvm_vcpu *vcpu; 3751 3752 *val = 0; 3753 3754 kvm_for_each_vcpu(i, vcpu, stat_data->kvm) 3755 *val += *(u64 *)((void *)vcpu + stat_data->offset); 3756 3757 return 0; 3758 } 3759 3760 static int vcpu_stat_clear_per_vm(void *data, u64 val) 3761 { 3762 int i; 3763 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 3764 struct kvm_vcpu *vcpu; 3765 3766 if (val) 3767 return -EINVAL; 3768 3769 kvm_for_each_vcpu(i, vcpu, stat_data->kvm) 3770 *(u64 *)((void *)vcpu + stat_data->offset) = 0; 3771 3772 return 0; 3773 } 3774 3775 static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file) 3776 { 3777 __simple_attr_check_format("%llu\n", 0ull); 3778 return kvm_debugfs_open(inode, file, vcpu_stat_get_per_vm, 3779 vcpu_stat_clear_per_vm, "%llu\n"); 3780 } 3781 3782 static const struct file_operations vcpu_stat_get_per_vm_fops = { 3783 .owner = THIS_MODULE, 3784 .open = vcpu_stat_get_per_vm_open, 3785 .release = kvm_debugfs_release, 3786 .read = simple_attr_read, 3787 .write = simple_attr_write, 3788 .llseek = no_llseek, 3789 }; 3790 3791 static const struct file_operations *stat_fops_per_vm[] = { 3792 [KVM_STAT_VCPU] = &vcpu_stat_get_per_vm_fops, 3793 [KVM_STAT_VM] = &vm_stat_get_per_vm_fops, 3794 }; 3795 3796 static int vm_stat_get(void *_offset, u64 *val) 3797 { 3798 unsigned offset = (long)_offset; 3799 struct kvm *kvm; 3800 struct kvm_stat_data stat_tmp = {.offset = offset}; 3801 u64 tmp_val; 3802 3803 *val = 0; 3804 spin_lock(&kvm_lock); 3805 list_for_each_entry(kvm, &vm_list, vm_list) { 3806 stat_tmp.kvm = kvm; 3807 vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val); 3808 *val += tmp_val; 3809 } 3810 spin_unlock(&kvm_lock); 3811 return 0; 3812 } 3813 3814 static int vm_stat_clear(void *_offset, u64 val) 3815 { 3816 unsigned offset = (long)_offset; 3817 struct kvm *kvm; 3818 struct kvm_stat_data stat_tmp = {.offset = offset}; 3819 3820 if (val) 3821 return -EINVAL; 3822 3823 spin_lock(&kvm_lock); 3824 list_for_each_entry(kvm, &vm_list, vm_list) { 3825 stat_tmp.kvm = kvm; 3826 vm_stat_clear_per_vm((void *)&stat_tmp, 0); 3827 } 3828 spin_unlock(&kvm_lock); 3829 3830 return 0; 3831 } 3832 3833 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n"); 3834 3835 static int vcpu_stat_get(void *_offset, u64 *val) 3836 { 3837 unsigned offset = (long)_offset; 3838 struct kvm *kvm; 3839 struct kvm_stat_data stat_tmp = {.offset = offset}; 3840 u64 tmp_val; 3841 3842 *val = 0; 3843 spin_lock(&kvm_lock); 3844 list_for_each_entry(kvm, &vm_list, vm_list) { 3845 stat_tmp.kvm = kvm; 3846 vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val); 3847 *val += tmp_val; 3848 } 3849 spin_unlock(&kvm_lock); 3850 return 0; 3851 } 3852 3853 static int vcpu_stat_clear(void *_offset, u64 val) 3854 { 3855 unsigned offset = (long)_offset; 3856 struct kvm *kvm; 3857 struct kvm_stat_data stat_tmp = {.offset = offset}; 3858 3859 if (val) 3860 return -EINVAL; 3861 3862 spin_lock(&kvm_lock); 3863 list_for_each_entry(kvm, &vm_list, vm_list) { 3864 stat_tmp.kvm = kvm; 3865 vcpu_stat_clear_per_vm((void *)&stat_tmp, 0); 3866 } 3867 spin_unlock(&kvm_lock); 3868 3869 return 0; 3870 } 3871 3872 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear, 3873 "%llu\n"); 3874 3875 static const struct file_operations *stat_fops[] = { 3876 [KVM_STAT_VCPU] = &vcpu_stat_fops, 3877 [KVM_STAT_VM] = &vm_stat_fops, 3878 }; 3879 3880 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) 3881 { 3882 struct kobj_uevent_env *env; 3883 unsigned long long created, active; 3884 3885 if (!kvm_dev.this_device || !kvm) 3886 return; 3887 3888 spin_lock(&kvm_lock); 3889 if (type == KVM_EVENT_CREATE_VM) { 3890 kvm_createvm_count++; 3891 kvm_active_vms++; 3892 } else if (type == KVM_EVENT_DESTROY_VM) { 3893 kvm_active_vms--; 3894 } 3895 created = kvm_createvm_count; 3896 active = kvm_active_vms; 3897 spin_unlock(&kvm_lock); 3898 3899 env = kzalloc(sizeof(*env), GFP_KERNEL); 3900 if (!env) 3901 return; 3902 3903 add_uevent_var(env, "CREATED=%llu", created); 3904 add_uevent_var(env, "COUNT=%llu", active); 3905 3906 if (type == KVM_EVENT_CREATE_VM) { 3907 add_uevent_var(env, "EVENT=create"); 3908 kvm->userspace_pid = task_pid_nr(current); 3909 } else if (type == KVM_EVENT_DESTROY_VM) { 3910 add_uevent_var(env, "EVENT=destroy"); 3911 } 3912 add_uevent_var(env, "PID=%d", kvm->userspace_pid); 3913 3914 if (kvm->debugfs_dentry) { 3915 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL); 3916 3917 if (p) { 3918 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); 3919 if (!IS_ERR(tmp)) 3920 add_uevent_var(env, "STATS_PATH=%s", tmp); 3921 kfree(p); 3922 } 3923 } 3924 /* no need for checks, since we are adding at most only 5 keys */ 3925 env->envp[env->envp_idx++] = NULL; 3926 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp); 3927 kfree(env); 3928 } 3929 3930 static int kvm_init_debug(void) 3931 { 3932 int r = -EEXIST; 3933 struct kvm_stats_debugfs_item *p; 3934 3935 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 3936 if (kvm_debugfs_dir == NULL) 3937 goto out; 3938 3939 kvm_debugfs_num_entries = 0; 3940 for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) { 3941 if (!debugfs_create_file(p->name, 0644, kvm_debugfs_dir, 3942 (void *)(long)p->offset, 3943 stat_fops[p->kind])) 3944 goto out_dir; 3945 } 3946 3947 return 0; 3948 3949 out_dir: 3950 debugfs_remove_recursive(kvm_debugfs_dir); 3951 out: 3952 return r; 3953 } 3954 3955 static int kvm_suspend(void) 3956 { 3957 if (kvm_usage_count) 3958 hardware_disable_nolock(NULL); 3959 return 0; 3960 } 3961 3962 static void kvm_resume(void) 3963 { 3964 if (kvm_usage_count) { 3965 WARN_ON(raw_spin_is_locked(&kvm_count_lock)); 3966 hardware_enable_nolock(NULL); 3967 } 3968 } 3969 3970 static struct syscore_ops kvm_syscore_ops = { 3971 .suspend = kvm_suspend, 3972 .resume = kvm_resume, 3973 }; 3974 3975 static inline 3976 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 3977 { 3978 return container_of(pn, struct kvm_vcpu, preempt_notifier); 3979 } 3980 3981 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 3982 { 3983 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 3984 3985 if (vcpu->preempted) 3986 vcpu->preempted = false; 3987 3988 kvm_arch_sched_in(vcpu, cpu); 3989 3990 kvm_arch_vcpu_load(vcpu, cpu); 3991 } 3992 3993 static void kvm_sched_out(struct preempt_notifier *pn, 3994 struct task_struct *next) 3995 { 3996 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 3997 3998 if (current->state == TASK_RUNNING) 3999 vcpu->preempted = true; 4000 kvm_arch_vcpu_put(vcpu); 4001 } 4002 4003 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 4004 struct module *module) 4005 { 4006 int r; 4007 int cpu; 4008 4009 r = kvm_arch_init(opaque); 4010 if (r) 4011 goto out_fail; 4012 4013 /* 4014 * kvm_arch_init makes sure there's at most one caller 4015 * for architectures that support multiple implementations, 4016 * like intel and amd on x86. 4017 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating 4018 * conflicts in case kvm is already setup for another implementation. 4019 */ 4020 r = kvm_irqfd_init(); 4021 if (r) 4022 goto out_irqfd; 4023 4024 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 4025 r = -ENOMEM; 4026 goto out_free_0; 4027 } 4028 4029 r = kvm_arch_hardware_setup(); 4030 if (r < 0) 4031 goto out_free_0a; 4032 4033 for_each_online_cpu(cpu) { 4034 smp_call_function_single(cpu, 4035 kvm_arch_check_processor_compat, 4036 &r, 1); 4037 if (r < 0) 4038 goto out_free_1; 4039 } 4040 4041 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting", 4042 kvm_starting_cpu, kvm_dying_cpu); 4043 if (r) 4044 goto out_free_2; 4045 register_reboot_notifier(&kvm_reboot_notifier); 4046 4047 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 4048 if (!vcpu_align) 4049 vcpu_align = __alignof__(struct kvm_vcpu); 4050 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, 4051 0, NULL); 4052 if (!kvm_vcpu_cache) { 4053 r = -ENOMEM; 4054 goto out_free_3; 4055 } 4056 4057 r = kvm_async_pf_init(); 4058 if (r) 4059 goto out_free; 4060 4061 kvm_chardev_ops.owner = module; 4062 kvm_vm_fops.owner = module; 4063 kvm_vcpu_fops.owner = module; 4064 4065 r = misc_register(&kvm_dev); 4066 if (r) { 4067 pr_err("kvm: misc device register failed\n"); 4068 goto out_unreg; 4069 } 4070 4071 register_syscore_ops(&kvm_syscore_ops); 4072 4073 kvm_preempt_ops.sched_in = kvm_sched_in; 4074 kvm_preempt_ops.sched_out = kvm_sched_out; 4075 4076 r = kvm_init_debug(); 4077 if (r) { 4078 pr_err("kvm: create debugfs files failed\n"); 4079 goto out_undebugfs; 4080 } 4081 4082 r = kvm_vfio_ops_init(); 4083 WARN_ON(r); 4084 4085 return 0; 4086 4087 out_undebugfs: 4088 unregister_syscore_ops(&kvm_syscore_ops); 4089 misc_deregister(&kvm_dev); 4090 out_unreg: 4091 kvm_async_pf_deinit(); 4092 out_free: 4093 kmem_cache_destroy(kvm_vcpu_cache); 4094 out_free_3: 4095 unregister_reboot_notifier(&kvm_reboot_notifier); 4096 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); 4097 out_free_2: 4098 out_free_1: 4099 kvm_arch_hardware_unsetup(); 4100 out_free_0a: 4101 free_cpumask_var(cpus_hardware_enabled); 4102 out_free_0: 4103 kvm_irqfd_exit(); 4104 out_irqfd: 4105 kvm_arch_exit(); 4106 out_fail: 4107 return r; 4108 } 4109 EXPORT_SYMBOL_GPL(kvm_init); 4110 4111 void kvm_exit(void) 4112 { 4113 debugfs_remove_recursive(kvm_debugfs_dir); 4114 misc_deregister(&kvm_dev); 4115 kmem_cache_destroy(kvm_vcpu_cache); 4116 kvm_async_pf_deinit(); 4117 unregister_syscore_ops(&kvm_syscore_ops); 4118 unregister_reboot_notifier(&kvm_reboot_notifier); 4119 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); 4120 on_each_cpu(hardware_disable_nolock, NULL, 1); 4121 kvm_arch_hardware_unsetup(); 4122 kvm_arch_exit(); 4123 kvm_irqfd_exit(); 4124 free_cpumask_var(cpus_hardware_enabled); 4125 kvm_vfio_ops_exit(); 4126 } 4127 EXPORT_SYMBOL_GPL(kvm_exit); 4128