1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This module enables machines with Intel VT-x extensions to run virtual 5 * machines without emulation or binary translation. 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Avi Kivity <avi@qumranet.com> 12 * Yaniv Kamay <yaniv@qumranet.com> 13 * 14 * This work is licensed under the terms of the GNU GPL, version 2. See 15 * the COPYING file in the top-level directory. 16 * 17 */ 18 19 #include "iodev.h" 20 21 #include <linux/kvm_host.h> 22 #include <linux/kvm.h> 23 #include <linux/module.h> 24 #include <linux/errno.h> 25 #include <linux/percpu.h> 26 #include <linux/mm.h> 27 #include <linux/miscdevice.h> 28 #include <linux/vmalloc.h> 29 #include <linux/reboot.h> 30 #include <linux/debugfs.h> 31 #include <linux/highmem.h> 32 #include <linux/file.h> 33 #include <linux/sysdev.h> 34 #include <linux/cpu.h> 35 #include <linux/sched.h> 36 #include <linux/cpumask.h> 37 #include <linux/smp.h> 38 #include <linux/anon_inodes.h> 39 #include <linux/profile.h> 40 #include <linux/kvm_para.h> 41 #include <linux/pagemap.h> 42 #include <linux/mman.h> 43 #include <linux/swap.h> 44 #include <linux/bitops.h> 45 #include <linux/spinlock.h> 46 #include <linux/compat.h> 47 #include <linux/srcu.h> 48 #include <linux/hugetlb.h> 49 #include <linux/slab.h> 50 51 #include <asm/processor.h> 52 #include <asm/io.h> 53 #include <asm/uaccess.h> 54 #include <asm/pgtable.h> 55 #include <asm-generic/bitops/le.h> 56 57 #include "coalesced_mmio.h" 58 #include "async_pf.h" 59 60 #define CREATE_TRACE_POINTS 61 #include <trace/events/kvm.h> 62 63 MODULE_AUTHOR("Qumranet"); 64 MODULE_LICENSE("GPL"); 65 66 /* 67 * Ordering of locks: 68 * 69 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 70 */ 71 72 DEFINE_SPINLOCK(kvm_lock); 73 LIST_HEAD(vm_list); 74 75 static cpumask_var_t cpus_hardware_enabled; 76 static int kvm_usage_count = 0; 77 static atomic_t hardware_enable_failed; 78 79 struct kmem_cache *kvm_vcpu_cache; 80 EXPORT_SYMBOL_GPL(kvm_vcpu_cache); 81 82 static __read_mostly struct preempt_ops kvm_preempt_ops; 83 84 struct dentry *kvm_debugfs_dir; 85 86 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 87 unsigned long arg); 88 static int hardware_enable_all(void); 89 static void hardware_disable_all(void); 90 91 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 92 93 bool kvm_rebooting; 94 EXPORT_SYMBOL_GPL(kvm_rebooting); 95 96 static bool largepages_enabled = true; 97 98 static struct page *hwpoison_page; 99 static pfn_t hwpoison_pfn; 100 101 static struct page *fault_page; 102 static pfn_t fault_pfn; 103 104 inline int kvm_is_mmio_pfn(pfn_t pfn) 105 { 106 if (pfn_valid(pfn)) { 107 int reserved; 108 struct page *tail = pfn_to_page(pfn); 109 struct page *head = compound_trans_head(tail); 110 reserved = PageReserved(head); 111 if (head != tail) { 112 /* 113 * "head" is not a dangling pointer 114 * (compound_trans_head takes care of that) 115 * but the hugepage may have been splitted 116 * from under us (and we may not hold a 117 * reference count on the head page so it can 118 * be reused before we run PageReferenced), so 119 * we've to check PageTail before returning 120 * what we just read. 121 */ 122 smp_rmb(); 123 if (PageTail(tail)) 124 return reserved; 125 } 126 return PageReserved(tail); 127 } 128 129 return true; 130 } 131 132 /* 133 * Switches to specified vcpu, until a matching vcpu_put() 134 */ 135 void vcpu_load(struct kvm_vcpu *vcpu) 136 { 137 int cpu; 138 139 mutex_lock(&vcpu->mutex); 140 cpu = get_cpu(); 141 preempt_notifier_register(&vcpu->preempt_notifier); 142 kvm_arch_vcpu_load(vcpu, cpu); 143 put_cpu(); 144 } 145 146 void vcpu_put(struct kvm_vcpu *vcpu) 147 { 148 preempt_disable(); 149 kvm_arch_vcpu_put(vcpu); 150 preempt_notifier_unregister(&vcpu->preempt_notifier); 151 preempt_enable(); 152 mutex_unlock(&vcpu->mutex); 153 } 154 155 static void ack_flush(void *_completed) 156 { 157 } 158 159 static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) 160 { 161 int i, cpu, me; 162 cpumask_var_t cpus; 163 bool called = true; 164 struct kvm_vcpu *vcpu; 165 166 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 167 168 raw_spin_lock(&kvm->requests_lock); 169 me = smp_processor_id(); 170 kvm_for_each_vcpu(i, vcpu, kvm) { 171 if (kvm_make_check_request(req, vcpu)) 172 continue; 173 cpu = vcpu->cpu; 174 if (cpus != NULL && cpu != -1 && cpu != me) 175 cpumask_set_cpu(cpu, cpus); 176 } 177 if (unlikely(cpus == NULL)) 178 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); 179 else if (!cpumask_empty(cpus)) 180 smp_call_function_many(cpus, ack_flush, NULL, 1); 181 else 182 called = false; 183 raw_spin_unlock(&kvm->requests_lock); 184 free_cpumask_var(cpus); 185 return called; 186 } 187 188 void kvm_flush_remote_tlbs(struct kvm *kvm) 189 { 190 int dirty_count = kvm->tlbs_dirty; 191 192 smp_mb(); 193 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 194 ++kvm->stat.remote_tlb_flush; 195 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); 196 } 197 198 void kvm_reload_remote_mmus(struct kvm *kvm) 199 { 200 make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); 201 } 202 203 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 204 { 205 struct page *page; 206 int r; 207 208 mutex_init(&vcpu->mutex); 209 vcpu->cpu = -1; 210 vcpu->kvm = kvm; 211 vcpu->vcpu_id = id; 212 init_waitqueue_head(&vcpu->wq); 213 kvm_async_pf_vcpu_init(vcpu); 214 215 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 216 if (!page) { 217 r = -ENOMEM; 218 goto fail; 219 } 220 vcpu->run = page_address(page); 221 222 r = kvm_arch_vcpu_init(vcpu); 223 if (r < 0) 224 goto fail_free_run; 225 return 0; 226 227 fail_free_run: 228 free_page((unsigned long)vcpu->run); 229 fail: 230 return r; 231 } 232 EXPORT_SYMBOL_GPL(kvm_vcpu_init); 233 234 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) 235 { 236 kvm_arch_vcpu_uninit(vcpu); 237 free_page((unsigned long)vcpu->run); 238 } 239 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); 240 241 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 242 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 243 { 244 return container_of(mn, struct kvm, mmu_notifier); 245 } 246 247 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, 248 struct mm_struct *mm, 249 unsigned long address) 250 { 251 struct kvm *kvm = mmu_notifier_to_kvm(mn); 252 int need_tlb_flush, idx; 253 254 /* 255 * When ->invalidate_page runs, the linux pte has been zapped 256 * already but the page is still allocated until 257 * ->invalidate_page returns. So if we increase the sequence 258 * here the kvm page fault will notice if the spte can't be 259 * established because the page is going to be freed. If 260 * instead the kvm page fault establishes the spte before 261 * ->invalidate_page runs, kvm_unmap_hva will release it 262 * before returning. 263 * 264 * The sequence increase only need to be seen at spin_unlock 265 * time, and not at spin_lock time. 266 * 267 * Increasing the sequence after the spin_unlock would be 268 * unsafe because the kvm page fault could then establish the 269 * pte after kvm_unmap_hva returned, without noticing the page 270 * is going to be freed. 271 */ 272 idx = srcu_read_lock(&kvm->srcu); 273 spin_lock(&kvm->mmu_lock); 274 kvm->mmu_notifier_seq++; 275 need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty; 276 spin_unlock(&kvm->mmu_lock); 277 srcu_read_unlock(&kvm->srcu, idx); 278 279 /* we've to flush the tlb before the pages can be freed */ 280 if (need_tlb_flush) 281 kvm_flush_remote_tlbs(kvm); 282 283 } 284 285 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 286 struct mm_struct *mm, 287 unsigned long address, 288 pte_t pte) 289 { 290 struct kvm *kvm = mmu_notifier_to_kvm(mn); 291 int idx; 292 293 idx = srcu_read_lock(&kvm->srcu); 294 spin_lock(&kvm->mmu_lock); 295 kvm->mmu_notifier_seq++; 296 kvm_set_spte_hva(kvm, address, pte); 297 spin_unlock(&kvm->mmu_lock); 298 srcu_read_unlock(&kvm->srcu, idx); 299 } 300 301 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 302 struct mm_struct *mm, 303 unsigned long start, 304 unsigned long end) 305 { 306 struct kvm *kvm = mmu_notifier_to_kvm(mn); 307 int need_tlb_flush = 0, idx; 308 309 idx = srcu_read_lock(&kvm->srcu); 310 spin_lock(&kvm->mmu_lock); 311 /* 312 * The count increase must become visible at unlock time as no 313 * spte can be established without taking the mmu_lock and 314 * count is also read inside the mmu_lock critical section. 315 */ 316 kvm->mmu_notifier_count++; 317 for (; start < end; start += PAGE_SIZE) 318 need_tlb_flush |= kvm_unmap_hva(kvm, start); 319 need_tlb_flush |= kvm->tlbs_dirty; 320 spin_unlock(&kvm->mmu_lock); 321 srcu_read_unlock(&kvm->srcu, idx); 322 323 /* we've to flush the tlb before the pages can be freed */ 324 if (need_tlb_flush) 325 kvm_flush_remote_tlbs(kvm); 326 } 327 328 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 329 struct mm_struct *mm, 330 unsigned long start, 331 unsigned long end) 332 { 333 struct kvm *kvm = mmu_notifier_to_kvm(mn); 334 335 spin_lock(&kvm->mmu_lock); 336 /* 337 * This sequence increase will notify the kvm page fault that 338 * the page that is going to be mapped in the spte could have 339 * been freed. 340 */ 341 kvm->mmu_notifier_seq++; 342 /* 343 * The above sequence increase must be visible before the 344 * below count decrease but both values are read by the kvm 345 * page fault under mmu_lock spinlock so we don't need to add 346 * a smb_wmb() here in between the two. 347 */ 348 kvm->mmu_notifier_count--; 349 spin_unlock(&kvm->mmu_lock); 350 351 BUG_ON(kvm->mmu_notifier_count < 0); 352 } 353 354 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 355 struct mm_struct *mm, 356 unsigned long address) 357 { 358 struct kvm *kvm = mmu_notifier_to_kvm(mn); 359 int young, idx; 360 361 idx = srcu_read_lock(&kvm->srcu); 362 spin_lock(&kvm->mmu_lock); 363 young = kvm_age_hva(kvm, address); 364 spin_unlock(&kvm->mmu_lock); 365 srcu_read_unlock(&kvm->srcu, idx); 366 367 if (young) 368 kvm_flush_remote_tlbs(kvm); 369 370 return young; 371 } 372 373 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, 374 struct mm_struct *mm, 375 unsigned long address) 376 { 377 struct kvm *kvm = mmu_notifier_to_kvm(mn); 378 int young, idx; 379 380 idx = srcu_read_lock(&kvm->srcu); 381 spin_lock(&kvm->mmu_lock); 382 young = kvm_test_age_hva(kvm, address); 383 spin_unlock(&kvm->mmu_lock); 384 srcu_read_unlock(&kvm->srcu, idx); 385 386 return young; 387 } 388 389 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 390 struct mm_struct *mm) 391 { 392 struct kvm *kvm = mmu_notifier_to_kvm(mn); 393 int idx; 394 395 idx = srcu_read_lock(&kvm->srcu); 396 kvm_arch_flush_shadow(kvm); 397 srcu_read_unlock(&kvm->srcu, idx); 398 } 399 400 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 401 .invalidate_page = kvm_mmu_notifier_invalidate_page, 402 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 403 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 404 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 405 .test_young = kvm_mmu_notifier_test_young, 406 .change_pte = kvm_mmu_notifier_change_pte, 407 .release = kvm_mmu_notifier_release, 408 }; 409 410 static int kvm_init_mmu_notifier(struct kvm *kvm) 411 { 412 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 413 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 414 } 415 416 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 417 418 static int kvm_init_mmu_notifier(struct kvm *kvm) 419 { 420 return 0; 421 } 422 423 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 424 425 static struct kvm *kvm_create_vm(void) 426 { 427 int r, i; 428 struct kvm *kvm = kvm_arch_alloc_vm(); 429 430 if (!kvm) 431 return ERR_PTR(-ENOMEM); 432 433 r = kvm_arch_init_vm(kvm); 434 if (r) 435 goto out_err_nodisable; 436 437 r = hardware_enable_all(); 438 if (r) 439 goto out_err_nodisable; 440 441 #ifdef CONFIG_HAVE_KVM_IRQCHIP 442 INIT_HLIST_HEAD(&kvm->mask_notifier_list); 443 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 444 #endif 445 446 r = -ENOMEM; 447 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 448 if (!kvm->memslots) 449 goto out_err_nosrcu; 450 if (init_srcu_struct(&kvm->srcu)) 451 goto out_err_nosrcu; 452 for (i = 0; i < KVM_NR_BUSES; i++) { 453 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), 454 GFP_KERNEL); 455 if (!kvm->buses[i]) 456 goto out_err; 457 } 458 459 r = kvm_init_mmu_notifier(kvm); 460 if (r) 461 goto out_err; 462 463 kvm->mm = current->mm; 464 atomic_inc(&kvm->mm->mm_count); 465 spin_lock_init(&kvm->mmu_lock); 466 raw_spin_lock_init(&kvm->requests_lock); 467 kvm_eventfd_init(kvm); 468 mutex_init(&kvm->lock); 469 mutex_init(&kvm->irq_lock); 470 mutex_init(&kvm->slots_lock); 471 atomic_set(&kvm->users_count, 1); 472 spin_lock(&kvm_lock); 473 list_add(&kvm->vm_list, &vm_list); 474 spin_unlock(&kvm_lock); 475 476 return kvm; 477 478 out_err: 479 cleanup_srcu_struct(&kvm->srcu); 480 out_err_nosrcu: 481 hardware_disable_all(); 482 out_err_nodisable: 483 for (i = 0; i < KVM_NR_BUSES; i++) 484 kfree(kvm->buses[i]); 485 kfree(kvm->memslots); 486 kvm_arch_free_vm(kvm); 487 return ERR_PTR(r); 488 } 489 490 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) 491 { 492 if (!memslot->dirty_bitmap) 493 return; 494 495 if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE) 496 vfree(memslot->dirty_bitmap_head); 497 else 498 kfree(memslot->dirty_bitmap_head); 499 500 memslot->dirty_bitmap = NULL; 501 memslot->dirty_bitmap_head = NULL; 502 } 503 504 /* 505 * Free any memory in @free but not in @dont. 506 */ 507 static void kvm_free_physmem_slot(struct kvm_memory_slot *free, 508 struct kvm_memory_slot *dont) 509 { 510 int i; 511 512 if (!dont || free->rmap != dont->rmap) 513 vfree(free->rmap); 514 515 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 516 kvm_destroy_dirty_bitmap(free); 517 518 519 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { 520 if (!dont || free->lpage_info[i] != dont->lpage_info[i]) { 521 vfree(free->lpage_info[i]); 522 free->lpage_info[i] = NULL; 523 } 524 } 525 526 free->npages = 0; 527 free->rmap = NULL; 528 } 529 530 void kvm_free_physmem(struct kvm *kvm) 531 { 532 int i; 533 struct kvm_memslots *slots = kvm->memslots; 534 535 for (i = 0; i < slots->nmemslots; ++i) 536 kvm_free_physmem_slot(&slots->memslots[i], NULL); 537 538 kfree(kvm->memslots); 539 } 540 541 static void kvm_destroy_vm(struct kvm *kvm) 542 { 543 int i; 544 struct mm_struct *mm = kvm->mm; 545 546 kvm_arch_sync_events(kvm); 547 spin_lock(&kvm_lock); 548 list_del(&kvm->vm_list); 549 spin_unlock(&kvm_lock); 550 kvm_free_irq_routing(kvm); 551 for (i = 0; i < KVM_NR_BUSES; i++) 552 kvm_io_bus_destroy(kvm->buses[i]); 553 kvm_coalesced_mmio_free(kvm); 554 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 555 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 556 #else 557 kvm_arch_flush_shadow(kvm); 558 #endif 559 kvm_arch_destroy_vm(kvm); 560 kvm_free_physmem(kvm); 561 cleanup_srcu_struct(&kvm->srcu); 562 kvm_arch_free_vm(kvm); 563 hardware_disable_all(); 564 mmdrop(mm); 565 } 566 567 void kvm_get_kvm(struct kvm *kvm) 568 { 569 atomic_inc(&kvm->users_count); 570 } 571 EXPORT_SYMBOL_GPL(kvm_get_kvm); 572 573 void kvm_put_kvm(struct kvm *kvm) 574 { 575 if (atomic_dec_and_test(&kvm->users_count)) 576 kvm_destroy_vm(kvm); 577 } 578 EXPORT_SYMBOL_GPL(kvm_put_kvm); 579 580 581 static int kvm_vm_release(struct inode *inode, struct file *filp) 582 { 583 struct kvm *kvm = filp->private_data; 584 585 kvm_irqfd_release(kvm); 586 587 kvm_put_kvm(kvm); 588 return 0; 589 } 590 591 /* 592 * Allocation size is twice as large as the actual dirty bitmap size. 593 * This makes it possible to do double buffering: see x86's 594 * kvm_vm_ioctl_get_dirty_log(). 595 */ 596 static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) 597 { 598 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); 599 600 if (dirty_bytes > PAGE_SIZE) 601 memslot->dirty_bitmap = vzalloc(dirty_bytes); 602 else 603 memslot->dirty_bitmap = kzalloc(dirty_bytes, GFP_KERNEL); 604 605 if (!memslot->dirty_bitmap) 606 return -ENOMEM; 607 608 memslot->dirty_bitmap_head = memslot->dirty_bitmap; 609 return 0; 610 } 611 612 /* 613 * Allocate some memory and give it an address in the guest physical address 614 * space. 615 * 616 * Discontiguous memory is allowed, mostly for framebuffers. 617 * 618 * Must be called holding mmap_sem for write. 619 */ 620 int __kvm_set_memory_region(struct kvm *kvm, 621 struct kvm_userspace_memory_region *mem, 622 int user_alloc) 623 { 624 int r, flush_shadow = 0; 625 gfn_t base_gfn; 626 unsigned long npages; 627 unsigned long i; 628 struct kvm_memory_slot *memslot; 629 struct kvm_memory_slot old, new; 630 struct kvm_memslots *slots, *old_memslots; 631 632 r = -EINVAL; 633 /* General sanity checks */ 634 if (mem->memory_size & (PAGE_SIZE - 1)) 635 goto out; 636 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 637 goto out; 638 if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1))) 639 goto out; 640 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) 641 goto out; 642 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 643 goto out; 644 645 memslot = &kvm->memslots->memslots[mem->slot]; 646 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 647 npages = mem->memory_size >> PAGE_SHIFT; 648 649 r = -EINVAL; 650 if (npages > KVM_MEM_MAX_NR_PAGES) 651 goto out; 652 653 if (!npages) 654 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; 655 656 new = old = *memslot; 657 658 new.id = mem->slot; 659 new.base_gfn = base_gfn; 660 new.npages = npages; 661 new.flags = mem->flags; 662 663 /* Disallow changing a memory slot's size. */ 664 r = -EINVAL; 665 if (npages && old.npages && npages != old.npages) 666 goto out_free; 667 668 /* Check for overlaps */ 669 r = -EEXIST; 670 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { 671 struct kvm_memory_slot *s = &kvm->memslots->memslots[i]; 672 673 if (s == memslot || !s->npages) 674 continue; 675 if (!((base_gfn + npages <= s->base_gfn) || 676 (base_gfn >= s->base_gfn + s->npages))) 677 goto out_free; 678 } 679 680 /* Free page dirty bitmap if unneeded */ 681 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 682 new.dirty_bitmap = NULL; 683 684 r = -ENOMEM; 685 686 /* Allocate if a slot is being created */ 687 #ifndef CONFIG_S390 688 if (npages && !new.rmap) { 689 new.rmap = vzalloc(npages * sizeof(*new.rmap)); 690 691 if (!new.rmap) 692 goto out_free; 693 694 new.user_alloc = user_alloc; 695 new.userspace_addr = mem->userspace_addr; 696 } 697 if (!npages) 698 goto skip_lpage; 699 700 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { 701 unsigned long ugfn; 702 unsigned long j; 703 int lpages; 704 int level = i + 2; 705 706 /* Avoid unused variable warning if no large pages */ 707 (void)level; 708 709 if (new.lpage_info[i]) 710 continue; 711 712 lpages = 1 + ((base_gfn + npages - 1) 713 >> KVM_HPAGE_GFN_SHIFT(level)); 714 lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level); 715 716 new.lpage_info[i] = vzalloc(lpages * sizeof(*new.lpage_info[i])); 717 718 if (!new.lpage_info[i]) 719 goto out_free; 720 721 if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) 722 new.lpage_info[i][0].write_count = 1; 723 if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) 724 new.lpage_info[i][lpages - 1].write_count = 1; 725 ugfn = new.userspace_addr >> PAGE_SHIFT; 726 /* 727 * If the gfn and userspace address are not aligned wrt each 728 * other, or if explicitly asked to, disable large page 729 * support for this slot 730 */ 731 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) || 732 !largepages_enabled) 733 for (j = 0; j < lpages; ++j) 734 new.lpage_info[i][j].write_count = 1; 735 } 736 737 skip_lpage: 738 739 /* Allocate page dirty bitmap if needed */ 740 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 741 if (kvm_create_dirty_bitmap(&new) < 0) 742 goto out_free; 743 /* destroy any largepage mappings for dirty tracking */ 744 if (old.npages) 745 flush_shadow = 1; 746 } 747 #else /* not defined CONFIG_S390 */ 748 new.user_alloc = user_alloc; 749 if (user_alloc) 750 new.userspace_addr = mem->userspace_addr; 751 #endif /* not defined CONFIG_S390 */ 752 753 if (!npages) { 754 r = -ENOMEM; 755 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 756 if (!slots) 757 goto out_free; 758 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); 759 if (mem->slot >= slots->nmemslots) 760 slots->nmemslots = mem->slot + 1; 761 slots->generation++; 762 slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID; 763 764 old_memslots = kvm->memslots; 765 rcu_assign_pointer(kvm->memslots, slots); 766 synchronize_srcu_expedited(&kvm->srcu); 767 /* From this point no new shadow pages pointing to a deleted 768 * memslot will be created. 769 * 770 * validation of sp->gfn happens in: 771 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 772 * - kvm_is_visible_gfn (mmu_check_roots) 773 */ 774 kvm_arch_flush_shadow(kvm); 775 kfree(old_memslots); 776 } 777 778 r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc); 779 if (r) 780 goto out_free; 781 782 /* map the pages in iommu page table */ 783 if (npages) { 784 r = kvm_iommu_map_pages(kvm, &new); 785 if (r) 786 goto out_free; 787 } 788 789 r = -ENOMEM; 790 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 791 if (!slots) 792 goto out_free; 793 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); 794 if (mem->slot >= slots->nmemslots) 795 slots->nmemslots = mem->slot + 1; 796 slots->generation++; 797 798 /* actual memory is freed via old in kvm_free_physmem_slot below */ 799 if (!npages) { 800 new.rmap = NULL; 801 new.dirty_bitmap = NULL; 802 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) 803 new.lpage_info[i] = NULL; 804 } 805 806 slots->memslots[mem->slot] = new; 807 old_memslots = kvm->memslots; 808 rcu_assign_pointer(kvm->memslots, slots); 809 synchronize_srcu_expedited(&kvm->srcu); 810 811 kvm_arch_commit_memory_region(kvm, mem, old, user_alloc); 812 813 kvm_free_physmem_slot(&old, &new); 814 kfree(old_memslots); 815 816 if (flush_shadow) 817 kvm_arch_flush_shadow(kvm); 818 819 return 0; 820 821 out_free: 822 kvm_free_physmem_slot(&new, &old); 823 out: 824 return r; 825 826 } 827 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 828 829 int kvm_set_memory_region(struct kvm *kvm, 830 struct kvm_userspace_memory_region *mem, 831 int user_alloc) 832 { 833 int r; 834 835 mutex_lock(&kvm->slots_lock); 836 r = __kvm_set_memory_region(kvm, mem, user_alloc); 837 mutex_unlock(&kvm->slots_lock); 838 return r; 839 } 840 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 841 842 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 843 struct 844 kvm_userspace_memory_region *mem, 845 int user_alloc) 846 { 847 if (mem->slot >= KVM_MEMORY_SLOTS) 848 return -EINVAL; 849 return kvm_set_memory_region(kvm, mem, user_alloc); 850 } 851 852 int kvm_get_dirty_log(struct kvm *kvm, 853 struct kvm_dirty_log *log, int *is_dirty) 854 { 855 struct kvm_memory_slot *memslot; 856 int r, i; 857 unsigned long n; 858 unsigned long any = 0; 859 860 r = -EINVAL; 861 if (log->slot >= KVM_MEMORY_SLOTS) 862 goto out; 863 864 memslot = &kvm->memslots->memslots[log->slot]; 865 r = -ENOENT; 866 if (!memslot->dirty_bitmap) 867 goto out; 868 869 n = kvm_dirty_bitmap_bytes(memslot); 870 871 for (i = 0; !any && i < n/sizeof(long); ++i) 872 any = memslot->dirty_bitmap[i]; 873 874 r = -EFAULT; 875 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) 876 goto out; 877 878 if (any) 879 *is_dirty = 1; 880 881 r = 0; 882 out: 883 return r; 884 } 885 886 void kvm_disable_largepages(void) 887 { 888 largepages_enabled = false; 889 } 890 EXPORT_SYMBOL_GPL(kvm_disable_largepages); 891 892 int is_error_page(struct page *page) 893 { 894 return page == bad_page || page == hwpoison_page || page == fault_page; 895 } 896 EXPORT_SYMBOL_GPL(is_error_page); 897 898 int is_error_pfn(pfn_t pfn) 899 { 900 return pfn == bad_pfn || pfn == hwpoison_pfn || pfn == fault_pfn; 901 } 902 EXPORT_SYMBOL_GPL(is_error_pfn); 903 904 int is_hwpoison_pfn(pfn_t pfn) 905 { 906 return pfn == hwpoison_pfn; 907 } 908 EXPORT_SYMBOL_GPL(is_hwpoison_pfn); 909 910 int is_fault_pfn(pfn_t pfn) 911 { 912 return pfn == fault_pfn; 913 } 914 EXPORT_SYMBOL_GPL(is_fault_pfn); 915 916 static inline unsigned long bad_hva(void) 917 { 918 return PAGE_OFFSET; 919 } 920 921 int kvm_is_error_hva(unsigned long addr) 922 { 923 return addr == bad_hva(); 924 } 925 EXPORT_SYMBOL_GPL(kvm_is_error_hva); 926 927 static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots, 928 gfn_t gfn) 929 { 930 int i; 931 932 for (i = 0; i < slots->nmemslots; ++i) { 933 struct kvm_memory_slot *memslot = &slots->memslots[i]; 934 935 if (gfn >= memslot->base_gfn 936 && gfn < memslot->base_gfn + memslot->npages) 937 return memslot; 938 } 939 return NULL; 940 } 941 942 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 943 { 944 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 945 } 946 EXPORT_SYMBOL_GPL(gfn_to_memslot); 947 948 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 949 { 950 int i; 951 struct kvm_memslots *slots = kvm_memslots(kvm); 952 953 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { 954 struct kvm_memory_slot *memslot = &slots->memslots[i]; 955 956 if (memslot->flags & KVM_MEMSLOT_INVALID) 957 continue; 958 959 if (gfn >= memslot->base_gfn 960 && gfn < memslot->base_gfn + memslot->npages) 961 return 1; 962 } 963 return 0; 964 } 965 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 966 967 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) 968 { 969 struct vm_area_struct *vma; 970 unsigned long addr, size; 971 972 size = PAGE_SIZE; 973 974 addr = gfn_to_hva(kvm, gfn); 975 if (kvm_is_error_hva(addr)) 976 return PAGE_SIZE; 977 978 down_read(¤t->mm->mmap_sem); 979 vma = find_vma(current->mm, addr); 980 if (!vma) 981 goto out; 982 983 size = vma_kernel_pagesize(vma); 984 985 out: 986 up_read(¤t->mm->mmap_sem); 987 988 return size; 989 } 990 991 int memslot_id(struct kvm *kvm, gfn_t gfn) 992 { 993 int i; 994 struct kvm_memslots *slots = kvm_memslots(kvm); 995 struct kvm_memory_slot *memslot = NULL; 996 997 for (i = 0; i < slots->nmemslots; ++i) { 998 memslot = &slots->memslots[i]; 999 1000 if (gfn >= memslot->base_gfn 1001 && gfn < memslot->base_gfn + memslot->npages) 1002 break; 1003 } 1004 1005 return memslot - slots->memslots; 1006 } 1007 1008 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1009 gfn_t *nr_pages) 1010 { 1011 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 1012 return bad_hva(); 1013 1014 if (nr_pages) 1015 *nr_pages = slot->npages - (gfn - slot->base_gfn); 1016 1017 return gfn_to_hva_memslot(slot, gfn); 1018 } 1019 1020 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 1021 { 1022 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); 1023 } 1024 EXPORT_SYMBOL_GPL(gfn_to_hva); 1025 1026 static pfn_t get_fault_pfn(void) 1027 { 1028 get_page(fault_page); 1029 return fault_pfn; 1030 } 1031 1032 static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic, 1033 bool *async, bool write_fault, bool *writable) 1034 { 1035 struct page *page[1]; 1036 int npages = 0; 1037 pfn_t pfn; 1038 1039 /* we can do it either atomically or asynchronously, not both */ 1040 BUG_ON(atomic && async); 1041 1042 BUG_ON(!write_fault && !writable); 1043 1044 if (writable) 1045 *writable = true; 1046 1047 if (atomic || async) 1048 npages = __get_user_pages_fast(addr, 1, 1, page); 1049 1050 if (unlikely(npages != 1) && !atomic) { 1051 might_sleep(); 1052 1053 if (writable) 1054 *writable = write_fault; 1055 1056 npages = get_user_pages_fast(addr, 1, write_fault, page); 1057 1058 /* map read fault as writable if possible */ 1059 if (unlikely(!write_fault) && npages == 1) { 1060 struct page *wpage[1]; 1061 1062 npages = __get_user_pages_fast(addr, 1, 1, wpage); 1063 if (npages == 1) { 1064 *writable = true; 1065 put_page(page[0]); 1066 page[0] = wpage[0]; 1067 } 1068 npages = 1; 1069 } 1070 } 1071 1072 if (unlikely(npages != 1)) { 1073 struct vm_area_struct *vma; 1074 1075 if (atomic) 1076 return get_fault_pfn(); 1077 1078 down_read(¤t->mm->mmap_sem); 1079 if (is_hwpoison_address(addr)) { 1080 up_read(¤t->mm->mmap_sem); 1081 get_page(hwpoison_page); 1082 return page_to_pfn(hwpoison_page); 1083 } 1084 1085 vma = find_vma_intersection(current->mm, addr, addr+1); 1086 1087 if (vma == NULL) 1088 pfn = get_fault_pfn(); 1089 else if ((vma->vm_flags & VM_PFNMAP)) { 1090 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + 1091 vma->vm_pgoff; 1092 BUG_ON(!kvm_is_mmio_pfn(pfn)); 1093 } else { 1094 if (async && (vma->vm_flags & VM_WRITE)) 1095 *async = true; 1096 pfn = get_fault_pfn(); 1097 } 1098 up_read(¤t->mm->mmap_sem); 1099 } else 1100 pfn = page_to_pfn(page[0]); 1101 1102 return pfn; 1103 } 1104 1105 pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr) 1106 { 1107 return hva_to_pfn(kvm, addr, true, NULL, true, NULL); 1108 } 1109 EXPORT_SYMBOL_GPL(hva_to_pfn_atomic); 1110 1111 static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async, 1112 bool write_fault, bool *writable) 1113 { 1114 unsigned long addr; 1115 1116 if (async) 1117 *async = false; 1118 1119 addr = gfn_to_hva(kvm, gfn); 1120 if (kvm_is_error_hva(addr)) { 1121 get_page(bad_page); 1122 return page_to_pfn(bad_page); 1123 } 1124 1125 return hva_to_pfn(kvm, addr, atomic, async, write_fault, writable); 1126 } 1127 1128 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) 1129 { 1130 return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL); 1131 } 1132 EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic); 1133 1134 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, 1135 bool write_fault, bool *writable) 1136 { 1137 return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable); 1138 } 1139 EXPORT_SYMBOL_GPL(gfn_to_pfn_async); 1140 1141 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 1142 { 1143 return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL); 1144 } 1145 EXPORT_SYMBOL_GPL(gfn_to_pfn); 1146 1147 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 1148 bool *writable) 1149 { 1150 return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable); 1151 } 1152 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 1153 1154 pfn_t gfn_to_pfn_memslot(struct kvm *kvm, 1155 struct kvm_memory_slot *slot, gfn_t gfn) 1156 { 1157 unsigned long addr = gfn_to_hva_memslot(slot, gfn); 1158 return hva_to_pfn(kvm, addr, false, NULL, true, NULL); 1159 } 1160 1161 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, 1162 int nr_pages) 1163 { 1164 unsigned long addr; 1165 gfn_t entry; 1166 1167 addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry); 1168 if (kvm_is_error_hva(addr)) 1169 return -1; 1170 1171 if (entry < nr_pages) 1172 return 0; 1173 1174 return __get_user_pages_fast(addr, nr_pages, 1, pages); 1175 } 1176 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); 1177 1178 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 1179 { 1180 pfn_t pfn; 1181 1182 pfn = gfn_to_pfn(kvm, gfn); 1183 if (!kvm_is_mmio_pfn(pfn)) 1184 return pfn_to_page(pfn); 1185 1186 WARN_ON(kvm_is_mmio_pfn(pfn)); 1187 1188 get_page(bad_page); 1189 return bad_page; 1190 } 1191 1192 EXPORT_SYMBOL_GPL(gfn_to_page); 1193 1194 void kvm_release_page_clean(struct page *page) 1195 { 1196 kvm_release_pfn_clean(page_to_pfn(page)); 1197 } 1198 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 1199 1200 void kvm_release_pfn_clean(pfn_t pfn) 1201 { 1202 if (!kvm_is_mmio_pfn(pfn)) 1203 put_page(pfn_to_page(pfn)); 1204 } 1205 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1206 1207 void kvm_release_page_dirty(struct page *page) 1208 { 1209 kvm_release_pfn_dirty(page_to_pfn(page)); 1210 } 1211 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 1212 1213 void kvm_release_pfn_dirty(pfn_t pfn) 1214 { 1215 kvm_set_pfn_dirty(pfn); 1216 kvm_release_pfn_clean(pfn); 1217 } 1218 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); 1219 1220 void kvm_set_page_dirty(struct page *page) 1221 { 1222 kvm_set_pfn_dirty(page_to_pfn(page)); 1223 } 1224 EXPORT_SYMBOL_GPL(kvm_set_page_dirty); 1225 1226 void kvm_set_pfn_dirty(pfn_t pfn) 1227 { 1228 if (!kvm_is_mmio_pfn(pfn)) { 1229 struct page *page = pfn_to_page(pfn); 1230 if (!PageReserved(page)) 1231 SetPageDirty(page); 1232 } 1233 } 1234 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 1235 1236 void kvm_set_pfn_accessed(pfn_t pfn) 1237 { 1238 if (!kvm_is_mmio_pfn(pfn)) 1239 mark_page_accessed(pfn_to_page(pfn)); 1240 } 1241 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1242 1243 void kvm_get_pfn(pfn_t pfn) 1244 { 1245 if (!kvm_is_mmio_pfn(pfn)) 1246 get_page(pfn_to_page(pfn)); 1247 } 1248 EXPORT_SYMBOL_GPL(kvm_get_pfn); 1249 1250 static int next_segment(unsigned long len, int offset) 1251 { 1252 if (len > PAGE_SIZE - offset) 1253 return PAGE_SIZE - offset; 1254 else 1255 return len; 1256 } 1257 1258 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 1259 int len) 1260 { 1261 int r; 1262 unsigned long addr; 1263 1264 addr = gfn_to_hva(kvm, gfn); 1265 if (kvm_is_error_hva(addr)) 1266 return -EFAULT; 1267 r = copy_from_user(data, (void __user *)addr + offset, len); 1268 if (r) 1269 return -EFAULT; 1270 return 0; 1271 } 1272 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 1273 1274 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 1275 { 1276 gfn_t gfn = gpa >> PAGE_SHIFT; 1277 int seg; 1278 int offset = offset_in_page(gpa); 1279 int ret; 1280 1281 while ((seg = next_segment(len, offset)) != 0) { 1282 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 1283 if (ret < 0) 1284 return ret; 1285 offset = 0; 1286 len -= seg; 1287 data += seg; 1288 ++gfn; 1289 } 1290 return 0; 1291 } 1292 EXPORT_SYMBOL_GPL(kvm_read_guest); 1293 1294 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 1295 unsigned long len) 1296 { 1297 int r; 1298 unsigned long addr; 1299 gfn_t gfn = gpa >> PAGE_SHIFT; 1300 int offset = offset_in_page(gpa); 1301 1302 addr = gfn_to_hva(kvm, gfn); 1303 if (kvm_is_error_hva(addr)) 1304 return -EFAULT; 1305 pagefault_disable(); 1306 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 1307 pagefault_enable(); 1308 if (r) 1309 return -EFAULT; 1310 return 0; 1311 } 1312 EXPORT_SYMBOL(kvm_read_guest_atomic); 1313 1314 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 1315 int offset, int len) 1316 { 1317 int r; 1318 unsigned long addr; 1319 1320 addr = gfn_to_hva(kvm, gfn); 1321 if (kvm_is_error_hva(addr)) 1322 return -EFAULT; 1323 r = copy_to_user((void __user *)addr + offset, data, len); 1324 if (r) 1325 return -EFAULT; 1326 mark_page_dirty(kvm, gfn); 1327 return 0; 1328 } 1329 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 1330 1331 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 1332 unsigned long len) 1333 { 1334 gfn_t gfn = gpa >> PAGE_SHIFT; 1335 int seg; 1336 int offset = offset_in_page(gpa); 1337 int ret; 1338 1339 while ((seg = next_segment(len, offset)) != 0) { 1340 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 1341 if (ret < 0) 1342 return ret; 1343 offset = 0; 1344 len -= seg; 1345 data += seg; 1346 ++gfn; 1347 } 1348 return 0; 1349 } 1350 1351 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1352 gpa_t gpa) 1353 { 1354 struct kvm_memslots *slots = kvm_memslots(kvm); 1355 int offset = offset_in_page(gpa); 1356 gfn_t gfn = gpa >> PAGE_SHIFT; 1357 1358 ghc->gpa = gpa; 1359 ghc->generation = slots->generation; 1360 ghc->memslot = __gfn_to_memslot(slots, gfn); 1361 ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL); 1362 if (!kvm_is_error_hva(ghc->hva)) 1363 ghc->hva += offset; 1364 else 1365 return -EFAULT; 1366 1367 return 0; 1368 } 1369 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 1370 1371 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1372 void *data, unsigned long len) 1373 { 1374 struct kvm_memslots *slots = kvm_memslots(kvm); 1375 int r; 1376 1377 if (slots->generation != ghc->generation) 1378 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); 1379 1380 if (kvm_is_error_hva(ghc->hva)) 1381 return -EFAULT; 1382 1383 r = copy_to_user((void __user *)ghc->hva, data, len); 1384 if (r) 1385 return -EFAULT; 1386 mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT); 1387 1388 return 0; 1389 } 1390 EXPORT_SYMBOL_GPL(kvm_write_guest_cached); 1391 1392 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) 1393 { 1394 return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page, 1395 offset, len); 1396 } 1397 EXPORT_SYMBOL_GPL(kvm_clear_guest_page); 1398 1399 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 1400 { 1401 gfn_t gfn = gpa >> PAGE_SHIFT; 1402 int seg; 1403 int offset = offset_in_page(gpa); 1404 int ret; 1405 1406 while ((seg = next_segment(len, offset)) != 0) { 1407 ret = kvm_clear_guest_page(kvm, gfn, offset, seg); 1408 if (ret < 0) 1409 return ret; 1410 offset = 0; 1411 len -= seg; 1412 ++gfn; 1413 } 1414 return 0; 1415 } 1416 EXPORT_SYMBOL_GPL(kvm_clear_guest); 1417 1418 void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, 1419 gfn_t gfn) 1420 { 1421 if (memslot && memslot->dirty_bitmap) { 1422 unsigned long rel_gfn = gfn - memslot->base_gfn; 1423 1424 generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); 1425 } 1426 } 1427 1428 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 1429 { 1430 struct kvm_memory_slot *memslot; 1431 1432 memslot = gfn_to_memslot(kvm, gfn); 1433 mark_page_dirty_in_slot(kvm, memslot, gfn); 1434 } 1435 1436 /* 1437 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 1438 */ 1439 void kvm_vcpu_block(struct kvm_vcpu *vcpu) 1440 { 1441 DEFINE_WAIT(wait); 1442 1443 for (;;) { 1444 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1445 1446 if (kvm_arch_vcpu_runnable(vcpu)) { 1447 kvm_make_request(KVM_REQ_UNHALT, vcpu); 1448 break; 1449 } 1450 if (kvm_cpu_has_pending_timer(vcpu)) 1451 break; 1452 if (signal_pending(current)) 1453 break; 1454 1455 schedule(); 1456 } 1457 1458 finish_wait(&vcpu->wq, &wait); 1459 } 1460 1461 void kvm_resched(struct kvm_vcpu *vcpu) 1462 { 1463 if (!need_resched()) 1464 return; 1465 cond_resched(); 1466 } 1467 EXPORT_SYMBOL_GPL(kvm_resched); 1468 1469 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu) 1470 { 1471 ktime_t expires; 1472 DEFINE_WAIT(wait); 1473 1474 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1475 1476 /* Sleep for 100 us, and hope lock-holder got scheduled */ 1477 expires = ktime_add_ns(ktime_get(), 100000UL); 1478 schedule_hrtimeout(&expires, HRTIMER_MODE_ABS); 1479 1480 finish_wait(&vcpu->wq, &wait); 1481 } 1482 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 1483 1484 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1485 { 1486 struct kvm_vcpu *vcpu = vma->vm_file->private_data; 1487 struct page *page; 1488 1489 if (vmf->pgoff == 0) 1490 page = virt_to_page(vcpu->run); 1491 #ifdef CONFIG_X86 1492 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 1493 page = virt_to_page(vcpu->arch.pio_data); 1494 #endif 1495 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1496 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 1497 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 1498 #endif 1499 else 1500 return VM_FAULT_SIGBUS; 1501 get_page(page); 1502 vmf->page = page; 1503 return 0; 1504 } 1505 1506 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 1507 .fault = kvm_vcpu_fault, 1508 }; 1509 1510 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 1511 { 1512 vma->vm_ops = &kvm_vcpu_vm_ops; 1513 return 0; 1514 } 1515 1516 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 1517 { 1518 struct kvm_vcpu *vcpu = filp->private_data; 1519 1520 kvm_put_kvm(vcpu->kvm); 1521 return 0; 1522 } 1523 1524 static struct file_operations kvm_vcpu_fops = { 1525 .release = kvm_vcpu_release, 1526 .unlocked_ioctl = kvm_vcpu_ioctl, 1527 .compat_ioctl = kvm_vcpu_ioctl, 1528 .mmap = kvm_vcpu_mmap, 1529 .llseek = noop_llseek, 1530 }; 1531 1532 /* 1533 * Allocates an inode for the vcpu. 1534 */ 1535 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 1536 { 1537 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR); 1538 } 1539 1540 /* 1541 * Creates some virtual cpus. Good luck creating more than one. 1542 */ 1543 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 1544 { 1545 int r; 1546 struct kvm_vcpu *vcpu, *v; 1547 1548 vcpu = kvm_arch_vcpu_create(kvm, id); 1549 if (IS_ERR(vcpu)) 1550 return PTR_ERR(vcpu); 1551 1552 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 1553 1554 r = kvm_arch_vcpu_setup(vcpu); 1555 if (r) 1556 return r; 1557 1558 mutex_lock(&kvm->lock); 1559 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { 1560 r = -EINVAL; 1561 goto vcpu_destroy; 1562 } 1563 1564 kvm_for_each_vcpu(r, v, kvm) 1565 if (v->vcpu_id == id) { 1566 r = -EEXIST; 1567 goto vcpu_destroy; 1568 } 1569 1570 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); 1571 1572 /* Now it's all set up, let userspace reach it */ 1573 kvm_get_kvm(kvm); 1574 r = create_vcpu_fd(vcpu); 1575 if (r < 0) { 1576 kvm_put_kvm(kvm); 1577 goto vcpu_destroy; 1578 } 1579 1580 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; 1581 smp_wmb(); 1582 atomic_inc(&kvm->online_vcpus); 1583 1584 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 1585 if (kvm->bsp_vcpu_id == id) 1586 kvm->bsp_vcpu = vcpu; 1587 #endif 1588 mutex_unlock(&kvm->lock); 1589 return r; 1590 1591 vcpu_destroy: 1592 mutex_unlock(&kvm->lock); 1593 kvm_arch_vcpu_destroy(vcpu); 1594 return r; 1595 } 1596 1597 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 1598 { 1599 if (sigset) { 1600 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1601 vcpu->sigset_active = 1; 1602 vcpu->sigset = *sigset; 1603 } else 1604 vcpu->sigset_active = 0; 1605 return 0; 1606 } 1607 1608 static long kvm_vcpu_ioctl(struct file *filp, 1609 unsigned int ioctl, unsigned long arg) 1610 { 1611 struct kvm_vcpu *vcpu = filp->private_data; 1612 void __user *argp = (void __user *)arg; 1613 int r; 1614 struct kvm_fpu *fpu = NULL; 1615 struct kvm_sregs *kvm_sregs = NULL; 1616 1617 if (vcpu->kvm->mm != current->mm) 1618 return -EIO; 1619 1620 #if defined(CONFIG_S390) || defined(CONFIG_PPC) 1621 /* 1622 * Special cases: vcpu ioctls that are asynchronous to vcpu execution, 1623 * so vcpu_load() would break it. 1624 */ 1625 if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT) 1626 return kvm_arch_vcpu_ioctl(filp, ioctl, arg); 1627 #endif 1628 1629 1630 vcpu_load(vcpu); 1631 switch (ioctl) { 1632 case KVM_RUN: 1633 r = -EINVAL; 1634 if (arg) 1635 goto out; 1636 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); 1637 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 1638 break; 1639 case KVM_GET_REGS: { 1640 struct kvm_regs *kvm_regs; 1641 1642 r = -ENOMEM; 1643 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 1644 if (!kvm_regs) 1645 goto out; 1646 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 1647 if (r) 1648 goto out_free1; 1649 r = -EFAULT; 1650 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 1651 goto out_free1; 1652 r = 0; 1653 out_free1: 1654 kfree(kvm_regs); 1655 break; 1656 } 1657 case KVM_SET_REGS: { 1658 struct kvm_regs *kvm_regs; 1659 1660 r = -ENOMEM; 1661 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 1662 if (!kvm_regs) 1663 goto out; 1664 r = -EFAULT; 1665 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs))) 1666 goto out_free2; 1667 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 1668 if (r) 1669 goto out_free2; 1670 r = 0; 1671 out_free2: 1672 kfree(kvm_regs); 1673 break; 1674 } 1675 case KVM_GET_SREGS: { 1676 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 1677 r = -ENOMEM; 1678 if (!kvm_sregs) 1679 goto out; 1680 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 1681 if (r) 1682 goto out; 1683 r = -EFAULT; 1684 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 1685 goto out; 1686 r = 0; 1687 break; 1688 } 1689 case KVM_SET_SREGS: { 1690 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 1691 r = -ENOMEM; 1692 if (!kvm_sregs) 1693 goto out; 1694 r = -EFAULT; 1695 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs))) 1696 goto out; 1697 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 1698 if (r) 1699 goto out; 1700 r = 0; 1701 break; 1702 } 1703 case KVM_GET_MP_STATE: { 1704 struct kvm_mp_state mp_state; 1705 1706 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 1707 if (r) 1708 goto out; 1709 r = -EFAULT; 1710 if (copy_to_user(argp, &mp_state, sizeof mp_state)) 1711 goto out; 1712 r = 0; 1713 break; 1714 } 1715 case KVM_SET_MP_STATE: { 1716 struct kvm_mp_state mp_state; 1717 1718 r = -EFAULT; 1719 if (copy_from_user(&mp_state, argp, sizeof mp_state)) 1720 goto out; 1721 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 1722 if (r) 1723 goto out; 1724 r = 0; 1725 break; 1726 } 1727 case KVM_TRANSLATE: { 1728 struct kvm_translation tr; 1729 1730 r = -EFAULT; 1731 if (copy_from_user(&tr, argp, sizeof tr)) 1732 goto out; 1733 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 1734 if (r) 1735 goto out; 1736 r = -EFAULT; 1737 if (copy_to_user(argp, &tr, sizeof tr)) 1738 goto out; 1739 r = 0; 1740 break; 1741 } 1742 case KVM_SET_GUEST_DEBUG: { 1743 struct kvm_guest_debug dbg; 1744 1745 r = -EFAULT; 1746 if (copy_from_user(&dbg, argp, sizeof dbg)) 1747 goto out; 1748 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 1749 if (r) 1750 goto out; 1751 r = 0; 1752 break; 1753 } 1754 case KVM_SET_SIGNAL_MASK: { 1755 struct kvm_signal_mask __user *sigmask_arg = argp; 1756 struct kvm_signal_mask kvm_sigmask; 1757 sigset_t sigset, *p; 1758 1759 p = NULL; 1760 if (argp) { 1761 r = -EFAULT; 1762 if (copy_from_user(&kvm_sigmask, argp, 1763 sizeof kvm_sigmask)) 1764 goto out; 1765 r = -EINVAL; 1766 if (kvm_sigmask.len != sizeof sigset) 1767 goto out; 1768 r = -EFAULT; 1769 if (copy_from_user(&sigset, sigmask_arg->sigset, 1770 sizeof sigset)) 1771 goto out; 1772 p = &sigset; 1773 } 1774 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 1775 break; 1776 } 1777 case KVM_GET_FPU: { 1778 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 1779 r = -ENOMEM; 1780 if (!fpu) 1781 goto out; 1782 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 1783 if (r) 1784 goto out; 1785 r = -EFAULT; 1786 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 1787 goto out; 1788 r = 0; 1789 break; 1790 } 1791 case KVM_SET_FPU: { 1792 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 1793 r = -ENOMEM; 1794 if (!fpu) 1795 goto out; 1796 r = -EFAULT; 1797 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu))) 1798 goto out; 1799 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 1800 if (r) 1801 goto out; 1802 r = 0; 1803 break; 1804 } 1805 default: 1806 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 1807 } 1808 out: 1809 vcpu_put(vcpu); 1810 kfree(fpu); 1811 kfree(kvm_sregs); 1812 return r; 1813 } 1814 1815 static long kvm_vm_ioctl(struct file *filp, 1816 unsigned int ioctl, unsigned long arg) 1817 { 1818 struct kvm *kvm = filp->private_data; 1819 void __user *argp = (void __user *)arg; 1820 int r; 1821 1822 if (kvm->mm != current->mm) 1823 return -EIO; 1824 switch (ioctl) { 1825 case KVM_CREATE_VCPU: 1826 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 1827 if (r < 0) 1828 goto out; 1829 break; 1830 case KVM_SET_USER_MEMORY_REGION: { 1831 struct kvm_userspace_memory_region kvm_userspace_mem; 1832 1833 r = -EFAULT; 1834 if (copy_from_user(&kvm_userspace_mem, argp, 1835 sizeof kvm_userspace_mem)) 1836 goto out; 1837 1838 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1); 1839 if (r) 1840 goto out; 1841 break; 1842 } 1843 case KVM_GET_DIRTY_LOG: { 1844 struct kvm_dirty_log log; 1845 1846 r = -EFAULT; 1847 if (copy_from_user(&log, argp, sizeof log)) 1848 goto out; 1849 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 1850 if (r) 1851 goto out; 1852 break; 1853 } 1854 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1855 case KVM_REGISTER_COALESCED_MMIO: { 1856 struct kvm_coalesced_mmio_zone zone; 1857 r = -EFAULT; 1858 if (copy_from_user(&zone, argp, sizeof zone)) 1859 goto out; 1860 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 1861 if (r) 1862 goto out; 1863 r = 0; 1864 break; 1865 } 1866 case KVM_UNREGISTER_COALESCED_MMIO: { 1867 struct kvm_coalesced_mmio_zone zone; 1868 r = -EFAULT; 1869 if (copy_from_user(&zone, argp, sizeof zone)) 1870 goto out; 1871 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 1872 if (r) 1873 goto out; 1874 r = 0; 1875 break; 1876 } 1877 #endif 1878 case KVM_IRQFD: { 1879 struct kvm_irqfd data; 1880 1881 r = -EFAULT; 1882 if (copy_from_user(&data, argp, sizeof data)) 1883 goto out; 1884 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags); 1885 break; 1886 } 1887 case KVM_IOEVENTFD: { 1888 struct kvm_ioeventfd data; 1889 1890 r = -EFAULT; 1891 if (copy_from_user(&data, argp, sizeof data)) 1892 goto out; 1893 r = kvm_ioeventfd(kvm, &data); 1894 break; 1895 } 1896 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 1897 case KVM_SET_BOOT_CPU_ID: 1898 r = 0; 1899 mutex_lock(&kvm->lock); 1900 if (atomic_read(&kvm->online_vcpus) != 0) 1901 r = -EBUSY; 1902 else 1903 kvm->bsp_vcpu_id = arg; 1904 mutex_unlock(&kvm->lock); 1905 break; 1906 #endif 1907 default: 1908 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 1909 if (r == -ENOTTY) 1910 r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg); 1911 } 1912 out: 1913 return r; 1914 } 1915 1916 #ifdef CONFIG_COMPAT 1917 struct compat_kvm_dirty_log { 1918 __u32 slot; 1919 __u32 padding1; 1920 union { 1921 compat_uptr_t dirty_bitmap; /* one bit per page */ 1922 __u64 padding2; 1923 }; 1924 }; 1925 1926 static long kvm_vm_compat_ioctl(struct file *filp, 1927 unsigned int ioctl, unsigned long arg) 1928 { 1929 struct kvm *kvm = filp->private_data; 1930 int r; 1931 1932 if (kvm->mm != current->mm) 1933 return -EIO; 1934 switch (ioctl) { 1935 case KVM_GET_DIRTY_LOG: { 1936 struct compat_kvm_dirty_log compat_log; 1937 struct kvm_dirty_log log; 1938 1939 r = -EFAULT; 1940 if (copy_from_user(&compat_log, (void __user *)arg, 1941 sizeof(compat_log))) 1942 goto out; 1943 log.slot = compat_log.slot; 1944 log.padding1 = compat_log.padding1; 1945 log.padding2 = compat_log.padding2; 1946 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 1947 1948 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 1949 if (r) 1950 goto out; 1951 break; 1952 } 1953 default: 1954 r = kvm_vm_ioctl(filp, ioctl, arg); 1955 } 1956 1957 out: 1958 return r; 1959 } 1960 #endif 1961 1962 static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1963 { 1964 struct page *page[1]; 1965 unsigned long addr; 1966 int npages; 1967 gfn_t gfn = vmf->pgoff; 1968 struct kvm *kvm = vma->vm_file->private_data; 1969 1970 addr = gfn_to_hva(kvm, gfn); 1971 if (kvm_is_error_hva(addr)) 1972 return VM_FAULT_SIGBUS; 1973 1974 npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page, 1975 NULL); 1976 if (unlikely(npages != 1)) 1977 return VM_FAULT_SIGBUS; 1978 1979 vmf->page = page[0]; 1980 return 0; 1981 } 1982 1983 static const struct vm_operations_struct kvm_vm_vm_ops = { 1984 .fault = kvm_vm_fault, 1985 }; 1986 1987 static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma) 1988 { 1989 vma->vm_ops = &kvm_vm_vm_ops; 1990 return 0; 1991 } 1992 1993 static struct file_operations kvm_vm_fops = { 1994 .release = kvm_vm_release, 1995 .unlocked_ioctl = kvm_vm_ioctl, 1996 #ifdef CONFIG_COMPAT 1997 .compat_ioctl = kvm_vm_compat_ioctl, 1998 #endif 1999 .mmap = kvm_vm_mmap, 2000 .llseek = noop_llseek, 2001 }; 2002 2003 static int kvm_dev_ioctl_create_vm(void) 2004 { 2005 int r; 2006 struct kvm *kvm; 2007 2008 kvm = kvm_create_vm(); 2009 if (IS_ERR(kvm)) 2010 return PTR_ERR(kvm); 2011 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2012 r = kvm_coalesced_mmio_init(kvm); 2013 if (r < 0) { 2014 kvm_put_kvm(kvm); 2015 return r; 2016 } 2017 #endif 2018 r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 2019 if (r < 0) 2020 kvm_put_kvm(kvm); 2021 2022 return r; 2023 } 2024 2025 static long kvm_dev_ioctl_check_extension_generic(long arg) 2026 { 2027 switch (arg) { 2028 case KVM_CAP_USER_MEMORY: 2029 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 2030 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 2031 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 2032 case KVM_CAP_SET_BOOT_CPU_ID: 2033 #endif 2034 case KVM_CAP_INTERNAL_ERROR_DATA: 2035 return 1; 2036 #ifdef CONFIG_HAVE_KVM_IRQCHIP 2037 case KVM_CAP_IRQ_ROUTING: 2038 return KVM_MAX_IRQ_ROUTES; 2039 #endif 2040 default: 2041 break; 2042 } 2043 return kvm_dev_ioctl_check_extension(arg); 2044 } 2045 2046 static long kvm_dev_ioctl(struct file *filp, 2047 unsigned int ioctl, unsigned long arg) 2048 { 2049 long r = -EINVAL; 2050 2051 switch (ioctl) { 2052 case KVM_GET_API_VERSION: 2053 r = -EINVAL; 2054 if (arg) 2055 goto out; 2056 r = KVM_API_VERSION; 2057 break; 2058 case KVM_CREATE_VM: 2059 r = -EINVAL; 2060 if (arg) 2061 goto out; 2062 r = kvm_dev_ioctl_create_vm(); 2063 break; 2064 case KVM_CHECK_EXTENSION: 2065 r = kvm_dev_ioctl_check_extension_generic(arg); 2066 break; 2067 case KVM_GET_VCPU_MMAP_SIZE: 2068 r = -EINVAL; 2069 if (arg) 2070 goto out; 2071 r = PAGE_SIZE; /* struct kvm_run */ 2072 #ifdef CONFIG_X86 2073 r += PAGE_SIZE; /* pio data page */ 2074 #endif 2075 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2076 r += PAGE_SIZE; /* coalesced mmio ring page */ 2077 #endif 2078 break; 2079 case KVM_TRACE_ENABLE: 2080 case KVM_TRACE_PAUSE: 2081 case KVM_TRACE_DISABLE: 2082 r = -EOPNOTSUPP; 2083 break; 2084 default: 2085 return kvm_arch_dev_ioctl(filp, ioctl, arg); 2086 } 2087 out: 2088 return r; 2089 } 2090 2091 static struct file_operations kvm_chardev_ops = { 2092 .unlocked_ioctl = kvm_dev_ioctl, 2093 .compat_ioctl = kvm_dev_ioctl, 2094 .llseek = noop_llseek, 2095 }; 2096 2097 static struct miscdevice kvm_dev = { 2098 KVM_MINOR, 2099 "kvm", 2100 &kvm_chardev_ops, 2101 }; 2102 2103 static void hardware_enable_nolock(void *junk) 2104 { 2105 int cpu = raw_smp_processor_id(); 2106 int r; 2107 2108 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 2109 return; 2110 2111 cpumask_set_cpu(cpu, cpus_hardware_enabled); 2112 2113 r = kvm_arch_hardware_enable(NULL); 2114 2115 if (r) { 2116 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 2117 atomic_inc(&hardware_enable_failed); 2118 printk(KERN_INFO "kvm: enabling virtualization on " 2119 "CPU%d failed\n", cpu); 2120 } 2121 } 2122 2123 static void hardware_enable(void *junk) 2124 { 2125 spin_lock(&kvm_lock); 2126 hardware_enable_nolock(junk); 2127 spin_unlock(&kvm_lock); 2128 } 2129 2130 static void hardware_disable_nolock(void *junk) 2131 { 2132 int cpu = raw_smp_processor_id(); 2133 2134 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 2135 return; 2136 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 2137 kvm_arch_hardware_disable(NULL); 2138 } 2139 2140 static void hardware_disable(void *junk) 2141 { 2142 spin_lock(&kvm_lock); 2143 hardware_disable_nolock(junk); 2144 spin_unlock(&kvm_lock); 2145 } 2146 2147 static void hardware_disable_all_nolock(void) 2148 { 2149 BUG_ON(!kvm_usage_count); 2150 2151 kvm_usage_count--; 2152 if (!kvm_usage_count) 2153 on_each_cpu(hardware_disable_nolock, NULL, 1); 2154 } 2155 2156 static void hardware_disable_all(void) 2157 { 2158 spin_lock(&kvm_lock); 2159 hardware_disable_all_nolock(); 2160 spin_unlock(&kvm_lock); 2161 } 2162 2163 static int hardware_enable_all(void) 2164 { 2165 int r = 0; 2166 2167 spin_lock(&kvm_lock); 2168 2169 kvm_usage_count++; 2170 if (kvm_usage_count == 1) { 2171 atomic_set(&hardware_enable_failed, 0); 2172 on_each_cpu(hardware_enable_nolock, NULL, 1); 2173 2174 if (atomic_read(&hardware_enable_failed)) { 2175 hardware_disable_all_nolock(); 2176 r = -EBUSY; 2177 } 2178 } 2179 2180 spin_unlock(&kvm_lock); 2181 2182 return r; 2183 } 2184 2185 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, 2186 void *v) 2187 { 2188 int cpu = (long)v; 2189 2190 if (!kvm_usage_count) 2191 return NOTIFY_OK; 2192 2193 val &= ~CPU_TASKS_FROZEN; 2194 switch (val) { 2195 case CPU_DYING: 2196 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", 2197 cpu); 2198 hardware_disable(NULL); 2199 break; 2200 case CPU_STARTING: 2201 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", 2202 cpu); 2203 hardware_enable(NULL); 2204 break; 2205 } 2206 return NOTIFY_OK; 2207 } 2208 2209 2210 asmlinkage void kvm_spurious_fault(void) 2211 { 2212 /* Fault while not rebooting. We want the trace. */ 2213 BUG(); 2214 } 2215 EXPORT_SYMBOL_GPL(kvm_spurious_fault); 2216 2217 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 2218 void *v) 2219 { 2220 /* 2221 * Some (well, at least mine) BIOSes hang on reboot if 2222 * in vmx root mode. 2223 * 2224 * And Intel TXT required VMX off for all cpu when system shutdown. 2225 */ 2226 printk(KERN_INFO "kvm: exiting hardware virtualization\n"); 2227 kvm_rebooting = true; 2228 on_each_cpu(hardware_disable_nolock, NULL, 1); 2229 return NOTIFY_OK; 2230 } 2231 2232 static struct notifier_block kvm_reboot_notifier = { 2233 .notifier_call = kvm_reboot, 2234 .priority = 0, 2235 }; 2236 2237 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 2238 { 2239 int i; 2240 2241 for (i = 0; i < bus->dev_count; i++) { 2242 struct kvm_io_device *pos = bus->devs[i]; 2243 2244 kvm_iodevice_destructor(pos); 2245 } 2246 kfree(bus); 2247 } 2248 2249 /* kvm_io_bus_write - called under kvm->slots_lock */ 2250 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 2251 int len, const void *val) 2252 { 2253 int i; 2254 struct kvm_io_bus *bus; 2255 2256 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 2257 for (i = 0; i < bus->dev_count; i++) 2258 if (!kvm_iodevice_write(bus->devs[i], addr, len, val)) 2259 return 0; 2260 return -EOPNOTSUPP; 2261 } 2262 2263 /* kvm_io_bus_read - called under kvm->slots_lock */ 2264 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 2265 int len, void *val) 2266 { 2267 int i; 2268 struct kvm_io_bus *bus; 2269 2270 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 2271 for (i = 0; i < bus->dev_count; i++) 2272 if (!kvm_iodevice_read(bus->devs[i], addr, len, val)) 2273 return 0; 2274 return -EOPNOTSUPP; 2275 } 2276 2277 /* Caller must hold slots_lock. */ 2278 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, 2279 struct kvm_io_device *dev) 2280 { 2281 struct kvm_io_bus *new_bus, *bus; 2282 2283 bus = kvm->buses[bus_idx]; 2284 if (bus->dev_count > NR_IOBUS_DEVS-1) 2285 return -ENOSPC; 2286 2287 new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); 2288 if (!new_bus) 2289 return -ENOMEM; 2290 memcpy(new_bus, bus, sizeof(struct kvm_io_bus)); 2291 new_bus->devs[new_bus->dev_count++] = dev; 2292 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 2293 synchronize_srcu_expedited(&kvm->srcu); 2294 kfree(bus); 2295 2296 return 0; 2297 } 2298 2299 /* Caller must hold slots_lock. */ 2300 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 2301 struct kvm_io_device *dev) 2302 { 2303 int i, r; 2304 struct kvm_io_bus *new_bus, *bus; 2305 2306 new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); 2307 if (!new_bus) 2308 return -ENOMEM; 2309 2310 bus = kvm->buses[bus_idx]; 2311 memcpy(new_bus, bus, sizeof(struct kvm_io_bus)); 2312 2313 r = -ENOENT; 2314 for (i = 0; i < new_bus->dev_count; i++) 2315 if (new_bus->devs[i] == dev) { 2316 r = 0; 2317 new_bus->devs[i] = new_bus->devs[--new_bus->dev_count]; 2318 break; 2319 } 2320 2321 if (r) { 2322 kfree(new_bus); 2323 return r; 2324 } 2325 2326 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 2327 synchronize_srcu_expedited(&kvm->srcu); 2328 kfree(bus); 2329 return r; 2330 } 2331 2332 static struct notifier_block kvm_cpu_notifier = { 2333 .notifier_call = kvm_cpu_hotplug, 2334 }; 2335 2336 static int vm_stat_get(void *_offset, u64 *val) 2337 { 2338 unsigned offset = (long)_offset; 2339 struct kvm *kvm; 2340 2341 *val = 0; 2342 spin_lock(&kvm_lock); 2343 list_for_each_entry(kvm, &vm_list, vm_list) 2344 *val += *(u32 *)((void *)kvm + offset); 2345 spin_unlock(&kvm_lock); 2346 return 0; 2347 } 2348 2349 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n"); 2350 2351 static int vcpu_stat_get(void *_offset, u64 *val) 2352 { 2353 unsigned offset = (long)_offset; 2354 struct kvm *kvm; 2355 struct kvm_vcpu *vcpu; 2356 int i; 2357 2358 *val = 0; 2359 spin_lock(&kvm_lock); 2360 list_for_each_entry(kvm, &vm_list, vm_list) 2361 kvm_for_each_vcpu(i, vcpu, kvm) 2362 *val += *(u32 *)((void *)vcpu + offset); 2363 2364 spin_unlock(&kvm_lock); 2365 return 0; 2366 } 2367 2368 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); 2369 2370 static const struct file_operations *stat_fops[] = { 2371 [KVM_STAT_VCPU] = &vcpu_stat_fops, 2372 [KVM_STAT_VM] = &vm_stat_fops, 2373 }; 2374 2375 static void kvm_init_debug(void) 2376 { 2377 struct kvm_stats_debugfs_item *p; 2378 2379 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 2380 for (p = debugfs_entries; p->name; ++p) 2381 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir, 2382 (void *)(long)p->offset, 2383 stat_fops[p->kind]); 2384 } 2385 2386 static void kvm_exit_debug(void) 2387 { 2388 struct kvm_stats_debugfs_item *p; 2389 2390 for (p = debugfs_entries; p->name; ++p) 2391 debugfs_remove(p->dentry); 2392 debugfs_remove(kvm_debugfs_dir); 2393 } 2394 2395 static int kvm_suspend(struct sys_device *dev, pm_message_t state) 2396 { 2397 if (kvm_usage_count) 2398 hardware_disable_nolock(NULL); 2399 return 0; 2400 } 2401 2402 static int kvm_resume(struct sys_device *dev) 2403 { 2404 if (kvm_usage_count) { 2405 WARN_ON(spin_is_locked(&kvm_lock)); 2406 hardware_enable_nolock(NULL); 2407 } 2408 return 0; 2409 } 2410 2411 static struct sysdev_class kvm_sysdev_class = { 2412 .name = "kvm", 2413 .suspend = kvm_suspend, 2414 .resume = kvm_resume, 2415 }; 2416 2417 static struct sys_device kvm_sysdev = { 2418 .id = 0, 2419 .cls = &kvm_sysdev_class, 2420 }; 2421 2422 struct page *bad_page; 2423 pfn_t bad_pfn; 2424 2425 static inline 2426 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 2427 { 2428 return container_of(pn, struct kvm_vcpu, preempt_notifier); 2429 } 2430 2431 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 2432 { 2433 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 2434 2435 kvm_arch_vcpu_load(vcpu, cpu); 2436 } 2437 2438 static void kvm_sched_out(struct preempt_notifier *pn, 2439 struct task_struct *next) 2440 { 2441 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 2442 2443 kvm_arch_vcpu_put(vcpu); 2444 } 2445 2446 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 2447 struct module *module) 2448 { 2449 int r; 2450 int cpu; 2451 2452 r = kvm_arch_init(opaque); 2453 if (r) 2454 goto out_fail; 2455 2456 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 2457 2458 if (bad_page == NULL) { 2459 r = -ENOMEM; 2460 goto out; 2461 } 2462 2463 bad_pfn = page_to_pfn(bad_page); 2464 2465 hwpoison_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 2466 2467 if (hwpoison_page == NULL) { 2468 r = -ENOMEM; 2469 goto out_free_0; 2470 } 2471 2472 hwpoison_pfn = page_to_pfn(hwpoison_page); 2473 2474 fault_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 2475 2476 if (fault_page == NULL) { 2477 r = -ENOMEM; 2478 goto out_free_0; 2479 } 2480 2481 fault_pfn = page_to_pfn(fault_page); 2482 2483 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 2484 r = -ENOMEM; 2485 goto out_free_0; 2486 } 2487 2488 r = kvm_arch_hardware_setup(); 2489 if (r < 0) 2490 goto out_free_0a; 2491 2492 for_each_online_cpu(cpu) { 2493 smp_call_function_single(cpu, 2494 kvm_arch_check_processor_compat, 2495 &r, 1); 2496 if (r < 0) 2497 goto out_free_1; 2498 } 2499 2500 r = register_cpu_notifier(&kvm_cpu_notifier); 2501 if (r) 2502 goto out_free_2; 2503 register_reboot_notifier(&kvm_reboot_notifier); 2504 2505 r = sysdev_class_register(&kvm_sysdev_class); 2506 if (r) 2507 goto out_free_3; 2508 2509 r = sysdev_register(&kvm_sysdev); 2510 if (r) 2511 goto out_free_4; 2512 2513 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 2514 if (!vcpu_align) 2515 vcpu_align = __alignof__(struct kvm_vcpu); 2516 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, 2517 0, NULL); 2518 if (!kvm_vcpu_cache) { 2519 r = -ENOMEM; 2520 goto out_free_5; 2521 } 2522 2523 r = kvm_async_pf_init(); 2524 if (r) 2525 goto out_free; 2526 2527 kvm_chardev_ops.owner = module; 2528 kvm_vm_fops.owner = module; 2529 kvm_vcpu_fops.owner = module; 2530 2531 r = misc_register(&kvm_dev); 2532 if (r) { 2533 printk(KERN_ERR "kvm: misc device register failed\n"); 2534 goto out_unreg; 2535 } 2536 2537 kvm_preempt_ops.sched_in = kvm_sched_in; 2538 kvm_preempt_ops.sched_out = kvm_sched_out; 2539 2540 kvm_init_debug(); 2541 2542 return 0; 2543 2544 out_unreg: 2545 kvm_async_pf_deinit(); 2546 out_free: 2547 kmem_cache_destroy(kvm_vcpu_cache); 2548 out_free_5: 2549 sysdev_unregister(&kvm_sysdev); 2550 out_free_4: 2551 sysdev_class_unregister(&kvm_sysdev_class); 2552 out_free_3: 2553 unregister_reboot_notifier(&kvm_reboot_notifier); 2554 unregister_cpu_notifier(&kvm_cpu_notifier); 2555 out_free_2: 2556 out_free_1: 2557 kvm_arch_hardware_unsetup(); 2558 out_free_0a: 2559 free_cpumask_var(cpus_hardware_enabled); 2560 out_free_0: 2561 if (fault_page) 2562 __free_page(fault_page); 2563 if (hwpoison_page) 2564 __free_page(hwpoison_page); 2565 __free_page(bad_page); 2566 out: 2567 kvm_arch_exit(); 2568 out_fail: 2569 return r; 2570 } 2571 EXPORT_SYMBOL_GPL(kvm_init); 2572 2573 void kvm_exit(void) 2574 { 2575 kvm_exit_debug(); 2576 misc_deregister(&kvm_dev); 2577 kmem_cache_destroy(kvm_vcpu_cache); 2578 kvm_async_pf_deinit(); 2579 sysdev_unregister(&kvm_sysdev); 2580 sysdev_class_unregister(&kvm_sysdev_class); 2581 unregister_reboot_notifier(&kvm_reboot_notifier); 2582 unregister_cpu_notifier(&kvm_cpu_notifier); 2583 on_each_cpu(hardware_disable_nolock, NULL, 1); 2584 kvm_arch_hardware_unsetup(); 2585 kvm_arch_exit(); 2586 free_cpumask_var(cpus_hardware_enabled); 2587 __free_page(hwpoison_page); 2588 __free_page(bad_page); 2589 } 2590 EXPORT_SYMBOL_GPL(kvm_exit); 2591