1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * This module enables machines with Intel VT-x extensions to run virtual 6 * machines without emulation or binary translation. 7 * 8 * Copyright (C) 2006 Qumranet, Inc. 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 10 * 11 * Authors: 12 * Avi Kivity <avi@qumranet.com> 13 * Yaniv Kamay <yaniv@qumranet.com> 14 */ 15 16 #include <kvm/iodev.h> 17 18 #include <linux/kvm_host.h> 19 #include <linux/kvm.h> 20 #include <linux/module.h> 21 #include <linux/errno.h> 22 #include <linux/percpu.h> 23 #include <linux/mm.h> 24 #include <linux/miscdevice.h> 25 #include <linux/vmalloc.h> 26 #include <linux/reboot.h> 27 #include <linux/debugfs.h> 28 #include <linux/highmem.h> 29 #include <linux/file.h> 30 #include <linux/syscore_ops.h> 31 #include <linux/cpu.h> 32 #include <linux/sched/signal.h> 33 #include <linux/sched/mm.h> 34 #include <linux/sched/stat.h> 35 #include <linux/cpumask.h> 36 #include <linux/smp.h> 37 #include <linux/anon_inodes.h> 38 #include <linux/profile.h> 39 #include <linux/kvm_para.h> 40 #include <linux/pagemap.h> 41 #include <linux/mman.h> 42 #include <linux/swap.h> 43 #include <linux/bitops.h> 44 #include <linux/spinlock.h> 45 #include <linux/compat.h> 46 #include <linux/srcu.h> 47 #include <linux/hugetlb.h> 48 #include <linux/slab.h> 49 #include <linux/sort.h> 50 #include <linux/bsearch.h> 51 #include <linux/io.h> 52 #include <linux/lockdep.h> 53 #include <linux/kthread.h> 54 55 #include <asm/processor.h> 56 #include <asm/ioctl.h> 57 #include <linux/uaccess.h> 58 59 #include "coalesced_mmio.h" 60 #include "async_pf.h" 61 #include "mmu_lock.h" 62 #include "vfio.h" 63 64 #define CREATE_TRACE_POINTS 65 #include <trace/events/kvm.h> 66 67 #include <linux/kvm_dirty_ring.h> 68 69 /* Worst case buffer size needed for holding an integer. */ 70 #define ITOA_MAX_LEN 12 71 72 MODULE_AUTHOR("Qumranet"); 73 MODULE_LICENSE("GPL"); 74 75 /* Architectures should define their poll value according to the halt latency */ 76 unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; 77 module_param(halt_poll_ns, uint, 0644); 78 EXPORT_SYMBOL_GPL(halt_poll_ns); 79 80 /* Default doubles per-vcpu halt_poll_ns. */ 81 unsigned int halt_poll_ns_grow = 2; 82 module_param(halt_poll_ns_grow, uint, 0644); 83 EXPORT_SYMBOL_GPL(halt_poll_ns_grow); 84 85 /* The start value to grow halt_poll_ns from */ 86 unsigned int halt_poll_ns_grow_start = 10000; /* 10us */ 87 module_param(halt_poll_ns_grow_start, uint, 0644); 88 EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start); 89 90 /* Default resets per-vcpu halt_poll_ns . */ 91 unsigned int halt_poll_ns_shrink; 92 module_param(halt_poll_ns_shrink, uint, 0644); 93 EXPORT_SYMBOL_GPL(halt_poll_ns_shrink); 94 95 /* 96 * Ordering of locks: 97 * 98 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 99 */ 100 101 DEFINE_MUTEX(kvm_lock); 102 static DEFINE_RAW_SPINLOCK(kvm_count_lock); 103 LIST_HEAD(vm_list); 104 105 static cpumask_var_t cpus_hardware_enabled; 106 static int kvm_usage_count; 107 static atomic_t hardware_enable_failed; 108 109 static struct kmem_cache *kvm_vcpu_cache; 110 111 static __read_mostly struct preempt_ops kvm_preempt_ops; 112 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu); 113 114 struct dentry *kvm_debugfs_dir; 115 EXPORT_SYMBOL_GPL(kvm_debugfs_dir); 116 117 static int kvm_debugfs_num_entries; 118 static const struct file_operations stat_fops_per_vm; 119 120 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 121 unsigned long arg); 122 #ifdef CONFIG_KVM_COMPAT 123 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, 124 unsigned long arg); 125 #define KVM_COMPAT(c) .compat_ioctl = (c) 126 #else 127 /* 128 * For architectures that don't implement a compat infrastructure, 129 * adopt a double line of defense: 130 * - Prevent a compat task from opening /dev/kvm 131 * - If the open has been done by a 64bit task, and the KVM fd 132 * passed to a compat task, let the ioctls fail. 133 */ 134 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl, 135 unsigned long arg) { return -EINVAL; } 136 137 static int kvm_no_compat_open(struct inode *inode, struct file *file) 138 { 139 return is_compat_task() ? -ENODEV : 0; 140 } 141 #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \ 142 .open = kvm_no_compat_open 143 #endif 144 static int hardware_enable_all(void); 145 static void hardware_disable_all(void); 146 147 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 148 149 __visible bool kvm_rebooting; 150 EXPORT_SYMBOL_GPL(kvm_rebooting); 151 152 #define KVM_EVENT_CREATE_VM 0 153 #define KVM_EVENT_DESTROY_VM 1 154 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); 155 static unsigned long long kvm_createvm_count; 156 static unsigned long long kvm_active_vms; 157 158 __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, 159 unsigned long start, unsigned long end) 160 { 161 } 162 163 bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) 164 { 165 /* 166 * The metadata used by is_zone_device_page() to determine whether or 167 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if 168 * the device has been pinned, e.g. by get_user_pages(). WARN if the 169 * page_count() is zero to help detect bad usage of this helper. 170 */ 171 if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn)))) 172 return false; 173 174 return is_zone_device_page(pfn_to_page(pfn)); 175 } 176 177 bool kvm_is_reserved_pfn(kvm_pfn_t pfn) 178 { 179 /* 180 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting 181 * perspective they are "normal" pages, albeit with slightly different 182 * usage rules. 183 */ 184 if (pfn_valid(pfn)) 185 return PageReserved(pfn_to_page(pfn)) && 186 !is_zero_pfn(pfn) && 187 !kvm_is_zone_device_pfn(pfn); 188 189 return true; 190 } 191 192 bool kvm_is_transparent_hugepage(kvm_pfn_t pfn) 193 { 194 struct page *page = pfn_to_page(pfn); 195 196 if (!PageTransCompoundMap(page)) 197 return false; 198 199 return is_transparent_hugepage(compound_head(page)); 200 } 201 202 /* 203 * Switches to specified vcpu, until a matching vcpu_put() 204 */ 205 void vcpu_load(struct kvm_vcpu *vcpu) 206 { 207 int cpu = get_cpu(); 208 209 __this_cpu_write(kvm_running_vcpu, vcpu); 210 preempt_notifier_register(&vcpu->preempt_notifier); 211 kvm_arch_vcpu_load(vcpu, cpu); 212 put_cpu(); 213 } 214 EXPORT_SYMBOL_GPL(vcpu_load); 215 216 void vcpu_put(struct kvm_vcpu *vcpu) 217 { 218 preempt_disable(); 219 kvm_arch_vcpu_put(vcpu); 220 preempt_notifier_unregister(&vcpu->preempt_notifier); 221 __this_cpu_write(kvm_running_vcpu, NULL); 222 preempt_enable(); 223 } 224 EXPORT_SYMBOL_GPL(vcpu_put); 225 226 /* TODO: merge with kvm_arch_vcpu_should_kick */ 227 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req) 228 { 229 int mode = kvm_vcpu_exiting_guest_mode(vcpu); 230 231 /* 232 * We need to wait for the VCPU to reenable interrupts and get out of 233 * READING_SHADOW_PAGE_TABLES mode. 234 */ 235 if (req & KVM_REQUEST_WAIT) 236 return mode != OUTSIDE_GUEST_MODE; 237 238 /* 239 * Need to kick a running VCPU, but otherwise there is nothing to do. 240 */ 241 return mode == IN_GUEST_MODE; 242 } 243 244 static void ack_flush(void *_completed) 245 { 246 } 247 248 static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait) 249 { 250 if (unlikely(!cpus)) 251 cpus = cpu_online_mask; 252 253 if (cpumask_empty(cpus)) 254 return false; 255 256 smp_call_function_many(cpus, ack_flush, NULL, wait); 257 return true; 258 } 259 260 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, 261 struct kvm_vcpu *except, 262 unsigned long *vcpu_bitmap, cpumask_var_t tmp) 263 { 264 int i, cpu, me; 265 struct kvm_vcpu *vcpu; 266 bool called; 267 268 me = get_cpu(); 269 270 kvm_for_each_vcpu(i, vcpu, kvm) { 271 if ((vcpu_bitmap && !test_bit(i, vcpu_bitmap)) || 272 vcpu == except) 273 continue; 274 275 kvm_make_request(req, vcpu); 276 cpu = vcpu->cpu; 277 278 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) 279 continue; 280 281 if (tmp != NULL && cpu != -1 && cpu != me && 282 kvm_request_needs_ipi(vcpu, req)) 283 __cpumask_set_cpu(cpu, tmp); 284 } 285 286 called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT)); 287 put_cpu(); 288 289 return called; 290 } 291 292 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, 293 struct kvm_vcpu *except) 294 { 295 cpumask_var_t cpus; 296 bool called; 297 298 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 299 300 called = kvm_make_vcpus_request_mask(kvm, req, except, NULL, cpus); 301 302 free_cpumask_var(cpus); 303 return called; 304 } 305 306 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) 307 { 308 return kvm_make_all_cpus_request_except(kvm, req, NULL); 309 } 310 311 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL 312 void kvm_flush_remote_tlbs(struct kvm *kvm) 313 { 314 /* 315 * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in 316 * kvm_make_all_cpus_request. 317 */ 318 long dirty_count = smp_load_acquire(&kvm->tlbs_dirty); 319 320 /* 321 * We want to publish modifications to the page tables before reading 322 * mode. Pairs with a memory barrier in arch-specific code. 323 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest 324 * and smp_mb in walk_shadow_page_lockless_begin/end. 325 * - powerpc: smp_mb in kvmppc_prepare_to_enter. 326 * 327 * There is already an smp_mb__after_atomic() before 328 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that 329 * barrier here. 330 */ 331 if (!kvm_arch_flush_remote_tlb(kvm) 332 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 333 ++kvm->stat.remote_tlb_flush; 334 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); 335 } 336 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); 337 #endif 338 339 void kvm_reload_remote_mmus(struct kvm *kvm) 340 { 341 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); 342 } 343 344 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 345 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, 346 gfp_t gfp_flags) 347 { 348 gfp_flags |= mc->gfp_zero; 349 350 if (mc->kmem_cache) 351 return kmem_cache_alloc(mc->kmem_cache, gfp_flags); 352 else 353 return (void *)__get_free_page(gfp_flags); 354 } 355 356 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min) 357 { 358 void *obj; 359 360 if (mc->nobjs >= min) 361 return 0; 362 while (mc->nobjs < ARRAY_SIZE(mc->objects)) { 363 obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT); 364 if (!obj) 365 return mc->nobjs >= min ? 0 : -ENOMEM; 366 mc->objects[mc->nobjs++] = obj; 367 } 368 return 0; 369 } 370 371 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc) 372 { 373 return mc->nobjs; 374 } 375 376 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) 377 { 378 while (mc->nobjs) { 379 if (mc->kmem_cache) 380 kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]); 381 else 382 free_page((unsigned long)mc->objects[--mc->nobjs]); 383 } 384 } 385 386 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) 387 { 388 void *p; 389 390 if (WARN_ON(!mc->nobjs)) 391 p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT); 392 else 393 p = mc->objects[--mc->nobjs]; 394 BUG_ON(!p); 395 return p; 396 } 397 #endif 398 399 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 400 { 401 mutex_init(&vcpu->mutex); 402 vcpu->cpu = -1; 403 vcpu->kvm = kvm; 404 vcpu->vcpu_id = id; 405 vcpu->pid = NULL; 406 rcuwait_init(&vcpu->wait); 407 kvm_async_pf_vcpu_init(vcpu); 408 409 vcpu->pre_pcpu = -1; 410 INIT_LIST_HEAD(&vcpu->blocked_vcpu_list); 411 412 kvm_vcpu_set_in_spin_loop(vcpu, false); 413 kvm_vcpu_set_dy_eligible(vcpu, false); 414 vcpu->preempted = false; 415 vcpu->ready = false; 416 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 417 } 418 419 void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) 420 { 421 kvm_dirty_ring_free(&vcpu->dirty_ring); 422 kvm_arch_vcpu_destroy(vcpu); 423 424 /* 425 * No need for rcu_read_lock as VCPU_RUN is the only place that changes 426 * the vcpu->pid pointer, and at destruction time all file descriptors 427 * are already gone. 428 */ 429 put_pid(rcu_dereference_protected(vcpu->pid, 1)); 430 431 free_page((unsigned long)vcpu->run); 432 kmem_cache_free(kvm_vcpu_cache, vcpu); 433 } 434 EXPORT_SYMBOL_GPL(kvm_vcpu_destroy); 435 436 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 437 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 438 { 439 return container_of(mn, struct kvm, mmu_notifier); 440 } 441 442 static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn, 443 struct mm_struct *mm, 444 unsigned long start, unsigned long end) 445 { 446 struct kvm *kvm = mmu_notifier_to_kvm(mn); 447 int idx; 448 449 idx = srcu_read_lock(&kvm->srcu); 450 kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); 451 srcu_read_unlock(&kvm->srcu, idx); 452 } 453 454 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 455 struct mm_struct *mm, 456 unsigned long address, 457 pte_t pte) 458 { 459 struct kvm *kvm = mmu_notifier_to_kvm(mn); 460 int idx; 461 462 idx = srcu_read_lock(&kvm->srcu); 463 464 KVM_MMU_LOCK(kvm); 465 466 kvm->mmu_notifier_seq++; 467 468 if (kvm_set_spte_hva(kvm, address, pte)) 469 kvm_flush_remote_tlbs(kvm); 470 471 KVM_MMU_UNLOCK(kvm); 472 srcu_read_unlock(&kvm->srcu, idx); 473 } 474 475 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 476 const struct mmu_notifier_range *range) 477 { 478 struct kvm *kvm = mmu_notifier_to_kvm(mn); 479 int need_tlb_flush = 0, idx; 480 481 idx = srcu_read_lock(&kvm->srcu); 482 KVM_MMU_LOCK(kvm); 483 /* 484 * The count increase must become visible at unlock time as no 485 * spte can be established without taking the mmu_lock and 486 * count is also read inside the mmu_lock critical section. 487 */ 488 kvm->mmu_notifier_count++; 489 need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end, 490 range->flags); 491 /* we've to flush the tlb before the pages can be freed */ 492 if (need_tlb_flush || kvm->tlbs_dirty) 493 kvm_flush_remote_tlbs(kvm); 494 495 KVM_MMU_UNLOCK(kvm); 496 srcu_read_unlock(&kvm->srcu, idx); 497 498 return 0; 499 } 500 501 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 502 const struct mmu_notifier_range *range) 503 { 504 struct kvm *kvm = mmu_notifier_to_kvm(mn); 505 506 KVM_MMU_LOCK(kvm); 507 /* 508 * This sequence increase will notify the kvm page fault that 509 * the page that is going to be mapped in the spte could have 510 * been freed. 511 */ 512 kvm->mmu_notifier_seq++; 513 smp_wmb(); 514 /* 515 * The above sequence increase must be visible before the 516 * below count decrease, which is ensured by the smp_wmb above 517 * in conjunction with the smp_rmb in mmu_notifier_retry(). 518 */ 519 kvm->mmu_notifier_count--; 520 KVM_MMU_UNLOCK(kvm); 521 522 BUG_ON(kvm->mmu_notifier_count < 0); 523 } 524 525 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 526 struct mm_struct *mm, 527 unsigned long start, 528 unsigned long end) 529 { 530 struct kvm *kvm = mmu_notifier_to_kvm(mn); 531 int young, idx; 532 533 idx = srcu_read_lock(&kvm->srcu); 534 KVM_MMU_LOCK(kvm); 535 536 young = kvm_age_hva(kvm, start, end); 537 if (young) 538 kvm_flush_remote_tlbs(kvm); 539 540 KVM_MMU_UNLOCK(kvm); 541 srcu_read_unlock(&kvm->srcu, idx); 542 543 return young; 544 } 545 546 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, 547 struct mm_struct *mm, 548 unsigned long start, 549 unsigned long end) 550 { 551 struct kvm *kvm = mmu_notifier_to_kvm(mn); 552 int young, idx; 553 554 idx = srcu_read_lock(&kvm->srcu); 555 KVM_MMU_LOCK(kvm); 556 /* 557 * Even though we do not flush TLB, this will still adversely 558 * affect performance on pre-Haswell Intel EPT, where there is 559 * no EPT Access Bit to clear so that we have to tear down EPT 560 * tables instead. If we find this unacceptable, we can always 561 * add a parameter to kvm_age_hva so that it effectively doesn't 562 * do anything on clear_young. 563 * 564 * Also note that currently we never issue secondary TLB flushes 565 * from clear_young, leaving this job up to the regular system 566 * cadence. If we find this inaccurate, we might come up with a 567 * more sophisticated heuristic later. 568 */ 569 young = kvm_age_hva(kvm, start, end); 570 KVM_MMU_UNLOCK(kvm); 571 srcu_read_unlock(&kvm->srcu, idx); 572 573 return young; 574 } 575 576 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, 577 struct mm_struct *mm, 578 unsigned long address) 579 { 580 struct kvm *kvm = mmu_notifier_to_kvm(mn); 581 int young, idx; 582 583 idx = srcu_read_lock(&kvm->srcu); 584 KVM_MMU_LOCK(kvm); 585 young = kvm_test_age_hva(kvm, address); 586 KVM_MMU_UNLOCK(kvm); 587 srcu_read_unlock(&kvm->srcu, idx); 588 589 return young; 590 } 591 592 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 593 struct mm_struct *mm) 594 { 595 struct kvm *kvm = mmu_notifier_to_kvm(mn); 596 int idx; 597 598 idx = srcu_read_lock(&kvm->srcu); 599 kvm_arch_flush_shadow_all(kvm); 600 srcu_read_unlock(&kvm->srcu, idx); 601 } 602 603 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 604 .invalidate_range = kvm_mmu_notifier_invalidate_range, 605 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 606 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 607 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 608 .clear_young = kvm_mmu_notifier_clear_young, 609 .test_young = kvm_mmu_notifier_test_young, 610 .change_pte = kvm_mmu_notifier_change_pte, 611 .release = kvm_mmu_notifier_release, 612 }; 613 614 static int kvm_init_mmu_notifier(struct kvm *kvm) 615 { 616 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 617 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 618 } 619 620 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 621 622 static int kvm_init_mmu_notifier(struct kvm *kvm) 623 { 624 return 0; 625 } 626 627 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 628 629 static struct kvm_memslots *kvm_alloc_memslots(void) 630 { 631 int i; 632 struct kvm_memslots *slots; 633 634 slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT); 635 if (!slots) 636 return NULL; 637 638 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) 639 slots->id_to_index[i] = -1; 640 641 return slots; 642 } 643 644 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) 645 { 646 if (!memslot->dirty_bitmap) 647 return; 648 649 kvfree(memslot->dirty_bitmap); 650 memslot->dirty_bitmap = NULL; 651 } 652 653 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 654 { 655 kvm_destroy_dirty_bitmap(slot); 656 657 kvm_arch_free_memslot(kvm, slot); 658 659 slot->flags = 0; 660 slot->npages = 0; 661 } 662 663 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) 664 { 665 struct kvm_memory_slot *memslot; 666 667 if (!slots) 668 return; 669 670 kvm_for_each_memslot(memslot, slots) 671 kvm_free_memslot(kvm, memslot); 672 673 kvfree(slots); 674 } 675 676 static void kvm_destroy_vm_debugfs(struct kvm *kvm) 677 { 678 int i; 679 680 if (!kvm->debugfs_dentry) 681 return; 682 683 debugfs_remove_recursive(kvm->debugfs_dentry); 684 685 if (kvm->debugfs_stat_data) { 686 for (i = 0; i < kvm_debugfs_num_entries; i++) 687 kfree(kvm->debugfs_stat_data[i]); 688 kfree(kvm->debugfs_stat_data); 689 } 690 } 691 692 static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) 693 { 694 char dir_name[ITOA_MAX_LEN * 2]; 695 struct kvm_stat_data *stat_data; 696 struct kvm_stats_debugfs_item *p; 697 698 if (!debugfs_initialized()) 699 return 0; 700 701 snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd); 702 kvm->debugfs_dentry = debugfs_create_dir(dir_name, kvm_debugfs_dir); 703 704 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, 705 sizeof(*kvm->debugfs_stat_data), 706 GFP_KERNEL_ACCOUNT); 707 if (!kvm->debugfs_stat_data) 708 return -ENOMEM; 709 710 for (p = debugfs_entries; p->name; p++) { 711 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); 712 if (!stat_data) 713 return -ENOMEM; 714 715 stat_data->kvm = kvm; 716 stat_data->dbgfs_item = p; 717 kvm->debugfs_stat_data[p - debugfs_entries] = stat_data; 718 debugfs_create_file(p->name, KVM_DBGFS_GET_MODE(p), 719 kvm->debugfs_dentry, stat_data, 720 &stat_fops_per_vm); 721 } 722 return 0; 723 } 724 725 /* 726 * Called after the VM is otherwise initialized, but just before adding it to 727 * the vm_list. 728 */ 729 int __weak kvm_arch_post_init_vm(struct kvm *kvm) 730 { 731 return 0; 732 } 733 734 /* 735 * Called just after removing the VM from the vm_list, but before doing any 736 * other destruction. 737 */ 738 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm) 739 { 740 } 741 742 static struct kvm *kvm_create_vm(unsigned long type) 743 { 744 struct kvm *kvm = kvm_arch_alloc_vm(); 745 int r = -ENOMEM; 746 int i; 747 748 if (!kvm) 749 return ERR_PTR(-ENOMEM); 750 751 KVM_MMU_LOCK_INIT(kvm); 752 mmgrab(current->mm); 753 kvm->mm = current->mm; 754 kvm_eventfd_init(kvm); 755 mutex_init(&kvm->lock); 756 mutex_init(&kvm->irq_lock); 757 mutex_init(&kvm->slots_lock); 758 INIT_LIST_HEAD(&kvm->devices); 759 760 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 761 762 if (init_srcu_struct(&kvm->srcu)) 763 goto out_err_no_srcu; 764 if (init_srcu_struct(&kvm->irq_srcu)) 765 goto out_err_no_irq_srcu; 766 767 refcount_set(&kvm->users_count, 1); 768 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 769 struct kvm_memslots *slots = kvm_alloc_memslots(); 770 771 if (!slots) 772 goto out_err_no_arch_destroy_vm; 773 /* Generations must be different for each address space. */ 774 slots->generation = i; 775 rcu_assign_pointer(kvm->memslots[i], slots); 776 } 777 778 for (i = 0; i < KVM_NR_BUSES; i++) { 779 rcu_assign_pointer(kvm->buses[i], 780 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT)); 781 if (!kvm->buses[i]) 782 goto out_err_no_arch_destroy_vm; 783 } 784 785 kvm->max_halt_poll_ns = halt_poll_ns; 786 787 r = kvm_arch_init_vm(kvm, type); 788 if (r) 789 goto out_err_no_arch_destroy_vm; 790 791 r = hardware_enable_all(); 792 if (r) 793 goto out_err_no_disable; 794 795 #ifdef CONFIG_HAVE_KVM_IRQFD 796 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 797 #endif 798 799 r = kvm_init_mmu_notifier(kvm); 800 if (r) 801 goto out_err_no_mmu_notifier; 802 803 r = kvm_arch_post_init_vm(kvm); 804 if (r) 805 goto out_err; 806 807 mutex_lock(&kvm_lock); 808 list_add(&kvm->vm_list, &vm_list); 809 mutex_unlock(&kvm_lock); 810 811 preempt_notifier_inc(); 812 813 return kvm; 814 815 out_err: 816 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 817 if (kvm->mmu_notifier.ops) 818 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); 819 #endif 820 out_err_no_mmu_notifier: 821 hardware_disable_all(); 822 out_err_no_disable: 823 kvm_arch_destroy_vm(kvm); 824 out_err_no_arch_destroy_vm: 825 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); 826 for (i = 0; i < KVM_NR_BUSES; i++) 827 kfree(kvm_get_bus(kvm, i)); 828 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 829 kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); 830 cleanup_srcu_struct(&kvm->irq_srcu); 831 out_err_no_irq_srcu: 832 cleanup_srcu_struct(&kvm->srcu); 833 out_err_no_srcu: 834 kvm_arch_free_vm(kvm); 835 mmdrop(current->mm); 836 return ERR_PTR(r); 837 } 838 839 static void kvm_destroy_devices(struct kvm *kvm) 840 { 841 struct kvm_device *dev, *tmp; 842 843 /* 844 * We do not need to take the kvm->lock here, because nobody else 845 * has a reference to the struct kvm at this point and therefore 846 * cannot access the devices list anyhow. 847 */ 848 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { 849 list_del(&dev->vm_node); 850 dev->ops->destroy(dev); 851 } 852 } 853 854 static void kvm_destroy_vm(struct kvm *kvm) 855 { 856 int i; 857 struct mm_struct *mm = kvm->mm; 858 859 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm); 860 kvm_destroy_vm_debugfs(kvm); 861 kvm_arch_sync_events(kvm); 862 mutex_lock(&kvm_lock); 863 list_del(&kvm->vm_list); 864 mutex_unlock(&kvm_lock); 865 kvm_arch_pre_destroy_vm(kvm); 866 867 kvm_free_irq_routing(kvm); 868 for (i = 0; i < KVM_NR_BUSES; i++) { 869 struct kvm_io_bus *bus = kvm_get_bus(kvm, i); 870 871 if (bus) 872 kvm_io_bus_destroy(bus); 873 kvm->buses[i] = NULL; 874 } 875 kvm_coalesced_mmio_free(kvm); 876 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 877 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 878 #else 879 kvm_arch_flush_shadow_all(kvm); 880 #endif 881 kvm_arch_destroy_vm(kvm); 882 kvm_destroy_devices(kvm); 883 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 884 kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); 885 cleanup_srcu_struct(&kvm->irq_srcu); 886 cleanup_srcu_struct(&kvm->srcu); 887 kvm_arch_free_vm(kvm); 888 preempt_notifier_dec(); 889 hardware_disable_all(); 890 mmdrop(mm); 891 } 892 893 void kvm_get_kvm(struct kvm *kvm) 894 { 895 refcount_inc(&kvm->users_count); 896 } 897 EXPORT_SYMBOL_GPL(kvm_get_kvm); 898 899 void kvm_put_kvm(struct kvm *kvm) 900 { 901 if (refcount_dec_and_test(&kvm->users_count)) 902 kvm_destroy_vm(kvm); 903 } 904 EXPORT_SYMBOL_GPL(kvm_put_kvm); 905 906 /* 907 * Used to put a reference that was taken on behalf of an object associated 908 * with a user-visible file descriptor, e.g. a vcpu or device, if installation 909 * of the new file descriptor fails and the reference cannot be transferred to 910 * its final owner. In such cases, the caller is still actively using @kvm and 911 * will fail miserably if the refcount unexpectedly hits zero. 912 */ 913 void kvm_put_kvm_no_destroy(struct kvm *kvm) 914 { 915 WARN_ON(refcount_dec_and_test(&kvm->users_count)); 916 } 917 EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy); 918 919 static int kvm_vm_release(struct inode *inode, struct file *filp) 920 { 921 struct kvm *kvm = filp->private_data; 922 923 kvm_irqfd_release(kvm); 924 925 kvm_put_kvm(kvm); 926 return 0; 927 } 928 929 /* 930 * Allocation size is twice as large as the actual dirty bitmap size. 931 * See kvm_vm_ioctl_get_dirty_log() why this is needed. 932 */ 933 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) 934 { 935 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); 936 937 memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL_ACCOUNT); 938 if (!memslot->dirty_bitmap) 939 return -ENOMEM; 940 941 return 0; 942 } 943 944 /* 945 * Delete a memslot by decrementing the number of used slots and shifting all 946 * other entries in the array forward one spot. 947 */ 948 static inline void kvm_memslot_delete(struct kvm_memslots *slots, 949 struct kvm_memory_slot *memslot) 950 { 951 struct kvm_memory_slot *mslots = slots->memslots; 952 int i; 953 954 if (WARN_ON(slots->id_to_index[memslot->id] == -1)) 955 return; 956 957 slots->used_slots--; 958 959 if (atomic_read(&slots->lru_slot) >= slots->used_slots) 960 atomic_set(&slots->lru_slot, 0); 961 962 for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) { 963 mslots[i] = mslots[i + 1]; 964 slots->id_to_index[mslots[i].id] = i; 965 } 966 mslots[i] = *memslot; 967 slots->id_to_index[memslot->id] = -1; 968 } 969 970 /* 971 * "Insert" a new memslot by incrementing the number of used slots. Returns 972 * the new slot's initial index into the memslots array. 973 */ 974 static inline int kvm_memslot_insert_back(struct kvm_memslots *slots) 975 { 976 return slots->used_slots++; 977 } 978 979 /* 980 * Move a changed memslot backwards in the array by shifting existing slots 981 * with a higher GFN toward the front of the array. Note, the changed memslot 982 * itself is not preserved in the array, i.e. not swapped at this time, only 983 * its new index into the array is tracked. Returns the changed memslot's 984 * current index into the memslots array. 985 */ 986 static inline int kvm_memslot_move_backward(struct kvm_memslots *slots, 987 struct kvm_memory_slot *memslot) 988 { 989 struct kvm_memory_slot *mslots = slots->memslots; 990 int i; 991 992 if (WARN_ON_ONCE(slots->id_to_index[memslot->id] == -1) || 993 WARN_ON_ONCE(!slots->used_slots)) 994 return -1; 995 996 /* 997 * Move the target memslot backward in the array by shifting existing 998 * memslots with a higher GFN (than the target memslot) towards the 999 * front of the array. 1000 */ 1001 for (i = slots->id_to_index[memslot->id]; i < slots->used_slots - 1; i++) { 1002 if (memslot->base_gfn > mslots[i + 1].base_gfn) 1003 break; 1004 1005 WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn); 1006 1007 /* Shift the next memslot forward one and update its index. */ 1008 mslots[i] = mslots[i + 1]; 1009 slots->id_to_index[mslots[i].id] = i; 1010 } 1011 return i; 1012 } 1013 1014 /* 1015 * Move a changed memslot forwards in the array by shifting existing slots with 1016 * a lower GFN toward the back of the array. Note, the changed memslot itself 1017 * is not preserved in the array, i.e. not swapped at this time, only its new 1018 * index into the array is tracked. Returns the changed memslot's final index 1019 * into the memslots array. 1020 */ 1021 static inline int kvm_memslot_move_forward(struct kvm_memslots *slots, 1022 struct kvm_memory_slot *memslot, 1023 int start) 1024 { 1025 struct kvm_memory_slot *mslots = slots->memslots; 1026 int i; 1027 1028 for (i = start; i > 0; i--) { 1029 if (memslot->base_gfn < mslots[i - 1].base_gfn) 1030 break; 1031 1032 WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn); 1033 1034 /* Shift the next memslot back one and update its index. */ 1035 mslots[i] = mslots[i - 1]; 1036 slots->id_to_index[mslots[i].id] = i; 1037 } 1038 return i; 1039 } 1040 1041 /* 1042 * Re-sort memslots based on their GFN to account for an added, deleted, or 1043 * moved memslot. Sorting memslots by GFN allows using a binary search during 1044 * memslot lookup. 1045 * 1046 * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! I.e. the entry 1047 * at memslots[0] has the highest GFN. 1048 * 1049 * The sorting algorithm takes advantage of having initially sorted memslots 1050 * and knowing the position of the changed memslot. Sorting is also optimized 1051 * by not swapping the updated memslot and instead only shifting other memslots 1052 * and tracking the new index for the update memslot. Only once its final 1053 * index is known is the updated memslot copied into its position in the array. 1054 * 1055 * - When deleting a memslot, the deleted memslot simply needs to be moved to 1056 * the end of the array. 1057 * 1058 * - When creating a memslot, the algorithm "inserts" the new memslot at the 1059 * end of the array and then it forward to its correct location. 1060 * 1061 * - When moving a memslot, the algorithm first moves the updated memslot 1062 * backward to handle the scenario where the memslot's GFN was changed to a 1063 * lower value. update_memslots() then falls through and runs the same flow 1064 * as creating a memslot to move the memslot forward to handle the scenario 1065 * where its GFN was changed to a higher value. 1066 * 1067 * Note, slots are sorted from highest->lowest instead of lowest->highest for 1068 * historical reasons. Originally, invalid memslots where denoted by having 1069 * GFN=0, thus sorting from highest->lowest naturally sorted invalid memslots 1070 * to the end of the array. The current algorithm uses dedicated logic to 1071 * delete a memslot and thus does not rely on invalid memslots having GFN=0. 1072 * 1073 * The other historical motiviation for highest->lowest was to improve the 1074 * performance of memslot lookup. KVM originally used a linear search starting 1075 * at memslots[0]. On x86, the largest memslot usually has one of the highest, 1076 * if not *the* highest, GFN, as the bulk of the guest's RAM is located in a 1077 * single memslot above the 4gb boundary. As the largest memslot is also the 1078 * most likely to be referenced, sorting it to the front of the array was 1079 * advantageous. The current binary search starts from the middle of the array 1080 * and uses an LRU pointer to improve performance for all memslots and GFNs. 1081 */ 1082 static void update_memslots(struct kvm_memslots *slots, 1083 struct kvm_memory_slot *memslot, 1084 enum kvm_mr_change change) 1085 { 1086 int i; 1087 1088 if (change == KVM_MR_DELETE) { 1089 kvm_memslot_delete(slots, memslot); 1090 } else { 1091 if (change == KVM_MR_CREATE) 1092 i = kvm_memslot_insert_back(slots); 1093 else 1094 i = kvm_memslot_move_backward(slots, memslot); 1095 i = kvm_memslot_move_forward(slots, memslot, i); 1096 1097 /* 1098 * Copy the memslot to its new position in memslots and update 1099 * its index accordingly. 1100 */ 1101 slots->memslots[i] = *memslot; 1102 slots->id_to_index[memslot->id] = i; 1103 } 1104 } 1105 1106 static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem) 1107 { 1108 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; 1109 1110 #ifdef __KVM_HAVE_READONLY_MEM 1111 valid_flags |= KVM_MEM_READONLY; 1112 #endif 1113 1114 if (mem->flags & ~valid_flags) 1115 return -EINVAL; 1116 1117 return 0; 1118 } 1119 1120 static struct kvm_memslots *install_new_memslots(struct kvm *kvm, 1121 int as_id, struct kvm_memslots *slots) 1122 { 1123 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); 1124 u64 gen = old_memslots->generation; 1125 1126 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); 1127 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; 1128 1129 rcu_assign_pointer(kvm->memslots[as_id], slots); 1130 synchronize_srcu_expedited(&kvm->srcu); 1131 1132 /* 1133 * Increment the new memslot generation a second time, dropping the 1134 * update in-progress flag and incrementing the generation based on 1135 * the number of address spaces. This provides a unique and easily 1136 * identifiable generation number while the memslots are in flux. 1137 */ 1138 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; 1139 1140 /* 1141 * Generations must be unique even across address spaces. We do not need 1142 * a global counter for that, instead the generation space is evenly split 1143 * across address spaces. For example, with two address spaces, address 1144 * space 0 will use generations 0, 2, 4, ... while address space 1 will 1145 * use generations 1, 3, 5, ... 1146 */ 1147 gen += KVM_ADDRESS_SPACE_NUM; 1148 1149 kvm_arch_memslots_updated(kvm, gen); 1150 1151 slots->generation = gen; 1152 1153 return old_memslots; 1154 } 1155 1156 /* 1157 * Note, at a minimum, the current number of used slots must be allocated, even 1158 * when deleting a memslot, as we need a complete duplicate of the memslots for 1159 * use when invalidating a memslot prior to deleting/moving the memslot. 1160 */ 1161 static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old, 1162 enum kvm_mr_change change) 1163 { 1164 struct kvm_memslots *slots; 1165 size_t old_size, new_size; 1166 1167 old_size = sizeof(struct kvm_memslots) + 1168 (sizeof(struct kvm_memory_slot) * old->used_slots); 1169 1170 if (change == KVM_MR_CREATE) 1171 new_size = old_size + sizeof(struct kvm_memory_slot); 1172 else 1173 new_size = old_size; 1174 1175 slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT); 1176 if (likely(slots)) 1177 memcpy(slots, old, old_size); 1178 1179 return slots; 1180 } 1181 1182 static int kvm_set_memslot(struct kvm *kvm, 1183 const struct kvm_userspace_memory_region *mem, 1184 struct kvm_memory_slot *old, 1185 struct kvm_memory_slot *new, int as_id, 1186 enum kvm_mr_change change) 1187 { 1188 struct kvm_memory_slot *slot; 1189 struct kvm_memslots *slots; 1190 int r; 1191 1192 slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change); 1193 if (!slots) 1194 return -ENOMEM; 1195 1196 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { 1197 /* 1198 * Note, the INVALID flag needs to be in the appropriate entry 1199 * in the freshly allocated memslots, not in @old or @new. 1200 */ 1201 slot = id_to_memslot(slots, old->id); 1202 slot->flags |= KVM_MEMSLOT_INVALID; 1203 1204 /* 1205 * We can re-use the old memslots, the only difference from the 1206 * newly installed memslots is the invalid flag, which will get 1207 * dropped by update_memslots anyway. We'll also revert to the 1208 * old memslots if preparing the new memory region fails. 1209 */ 1210 slots = install_new_memslots(kvm, as_id, slots); 1211 1212 /* From this point no new shadow pages pointing to a deleted, 1213 * or moved, memslot will be created. 1214 * 1215 * validation of sp->gfn happens in: 1216 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 1217 * - kvm_is_visible_gfn (mmu_check_root) 1218 */ 1219 kvm_arch_flush_shadow_memslot(kvm, slot); 1220 } 1221 1222 r = kvm_arch_prepare_memory_region(kvm, new, mem, change); 1223 if (r) 1224 goto out_slots; 1225 1226 update_memslots(slots, new, change); 1227 slots = install_new_memslots(kvm, as_id, slots); 1228 1229 kvm_arch_commit_memory_region(kvm, mem, old, new, change); 1230 1231 kvfree(slots); 1232 return 0; 1233 1234 out_slots: 1235 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) 1236 slots = install_new_memslots(kvm, as_id, slots); 1237 kvfree(slots); 1238 return r; 1239 } 1240 1241 static int kvm_delete_memslot(struct kvm *kvm, 1242 const struct kvm_userspace_memory_region *mem, 1243 struct kvm_memory_slot *old, int as_id) 1244 { 1245 struct kvm_memory_slot new; 1246 int r; 1247 1248 if (!old->npages) 1249 return -EINVAL; 1250 1251 memset(&new, 0, sizeof(new)); 1252 new.id = old->id; 1253 /* 1254 * This is only for debugging purpose; it should never be referenced 1255 * for a removed memslot. 1256 */ 1257 new.as_id = as_id; 1258 1259 r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE); 1260 if (r) 1261 return r; 1262 1263 kvm_free_memslot(kvm, old); 1264 return 0; 1265 } 1266 1267 /* 1268 * Allocate some memory and give it an address in the guest physical address 1269 * space. 1270 * 1271 * Discontiguous memory is allowed, mostly for framebuffers. 1272 * 1273 * Must be called holding kvm->slots_lock for write. 1274 */ 1275 int __kvm_set_memory_region(struct kvm *kvm, 1276 const struct kvm_userspace_memory_region *mem) 1277 { 1278 struct kvm_memory_slot old, new; 1279 struct kvm_memory_slot *tmp; 1280 enum kvm_mr_change change; 1281 int as_id, id; 1282 int r; 1283 1284 r = check_memory_region_flags(mem); 1285 if (r) 1286 return r; 1287 1288 as_id = mem->slot >> 16; 1289 id = (u16)mem->slot; 1290 1291 /* General sanity checks */ 1292 if (mem->memory_size & (PAGE_SIZE - 1)) 1293 return -EINVAL; 1294 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 1295 return -EINVAL; 1296 /* We can read the guest memory with __xxx_user() later on. */ 1297 if ((mem->userspace_addr & (PAGE_SIZE - 1)) || 1298 (mem->userspace_addr != untagged_addr(mem->userspace_addr)) || 1299 !access_ok((void __user *)(unsigned long)mem->userspace_addr, 1300 mem->memory_size)) 1301 return -EINVAL; 1302 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) 1303 return -EINVAL; 1304 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 1305 return -EINVAL; 1306 1307 /* 1308 * Make a full copy of the old memslot, the pointer will become stale 1309 * when the memslots are re-sorted by update_memslots(), and the old 1310 * memslot needs to be referenced after calling update_memslots(), e.g. 1311 * to free its resources and for arch specific behavior. 1312 */ 1313 tmp = id_to_memslot(__kvm_memslots(kvm, as_id), id); 1314 if (tmp) { 1315 old = *tmp; 1316 tmp = NULL; 1317 } else { 1318 memset(&old, 0, sizeof(old)); 1319 old.id = id; 1320 } 1321 1322 if (!mem->memory_size) 1323 return kvm_delete_memslot(kvm, mem, &old, as_id); 1324 1325 new.as_id = as_id; 1326 new.id = id; 1327 new.base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 1328 new.npages = mem->memory_size >> PAGE_SHIFT; 1329 new.flags = mem->flags; 1330 new.userspace_addr = mem->userspace_addr; 1331 1332 if (new.npages > KVM_MEM_MAX_NR_PAGES) 1333 return -EINVAL; 1334 1335 if (!old.npages) { 1336 change = KVM_MR_CREATE; 1337 new.dirty_bitmap = NULL; 1338 memset(&new.arch, 0, sizeof(new.arch)); 1339 } else { /* Modify an existing slot. */ 1340 if ((new.userspace_addr != old.userspace_addr) || 1341 (new.npages != old.npages) || 1342 ((new.flags ^ old.flags) & KVM_MEM_READONLY)) 1343 return -EINVAL; 1344 1345 if (new.base_gfn != old.base_gfn) 1346 change = KVM_MR_MOVE; 1347 else if (new.flags != old.flags) 1348 change = KVM_MR_FLAGS_ONLY; 1349 else /* Nothing to change. */ 1350 return 0; 1351 1352 /* Copy dirty_bitmap and arch from the current memslot. */ 1353 new.dirty_bitmap = old.dirty_bitmap; 1354 memcpy(&new.arch, &old.arch, sizeof(new.arch)); 1355 } 1356 1357 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 1358 /* Check for overlaps */ 1359 kvm_for_each_memslot(tmp, __kvm_memslots(kvm, as_id)) { 1360 if (tmp->id == id) 1361 continue; 1362 if (!((new.base_gfn + new.npages <= tmp->base_gfn) || 1363 (new.base_gfn >= tmp->base_gfn + tmp->npages))) 1364 return -EEXIST; 1365 } 1366 } 1367 1368 /* Allocate/free page dirty bitmap as needed */ 1369 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 1370 new.dirty_bitmap = NULL; 1371 else if (!new.dirty_bitmap && !kvm->dirty_ring_size) { 1372 r = kvm_alloc_dirty_bitmap(&new); 1373 if (r) 1374 return r; 1375 1376 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) 1377 bitmap_set(new.dirty_bitmap, 0, new.npages); 1378 } 1379 1380 r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change); 1381 if (r) 1382 goto out_bitmap; 1383 1384 if (old.dirty_bitmap && !new.dirty_bitmap) 1385 kvm_destroy_dirty_bitmap(&old); 1386 return 0; 1387 1388 out_bitmap: 1389 if (new.dirty_bitmap && !old.dirty_bitmap) 1390 kvm_destroy_dirty_bitmap(&new); 1391 return r; 1392 } 1393 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 1394 1395 int kvm_set_memory_region(struct kvm *kvm, 1396 const struct kvm_userspace_memory_region *mem) 1397 { 1398 int r; 1399 1400 mutex_lock(&kvm->slots_lock); 1401 r = __kvm_set_memory_region(kvm, mem); 1402 mutex_unlock(&kvm->slots_lock); 1403 return r; 1404 } 1405 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 1406 1407 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 1408 struct kvm_userspace_memory_region *mem) 1409 { 1410 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) 1411 return -EINVAL; 1412 1413 return kvm_set_memory_region(kvm, mem); 1414 } 1415 1416 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 1417 /** 1418 * kvm_get_dirty_log - get a snapshot of dirty pages 1419 * @kvm: pointer to kvm instance 1420 * @log: slot id and address to which we copy the log 1421 * @is_dirty: set to '1' if any dirty pages were found 1422 * @memslot: set to the associated memslot, always valid on success 1423 */ 1424 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, 1425 int *is_dirty, struct kvm_memory_slot **memslot) 1426 { 1427 struct kvm_memslots *slots; 1428 int i, as_id, id; 1429 unsigned long n; 1430 unsigned long any = 0; 1431 1432 /* Dirty ring tracking is exclusive to dirty log tracking */ 1433 if (kvm->dirty_ring_size) 1434 return -ENXIO; 1435 1436 *memslot = NULL; 1437 *is_dirty = 0; 1438 1439 as_id = log->slot >> 16; 1440 id = (u16)log->slot; 1441 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1442 return -EINVAL; 1443 1444 slots = __kvm_memslots(kvm, as_id); 1445 *memslot = id_to_memslot(slots, id); 1446 if (!(*memslot) || !(*memslot)->dirty_bitmap) 1447 return -ENOENT; 1448 1449 kvm_arch_sync_dirty_log(kvm, *memslot); 1450 1451 n = kvm_dirty_bitmap_bytes(*memslot); 1452 1453 for (i = 0; !any && i < n/sizeof(long); ++i) 1454 any = (*memslot)->dirty_bitmap[i]; 1455 1456 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n)) 1457 return -EFAULT; 1458 1459 if (any) 1460 *is_dirty = 1; 1461 return 0; 1462 } 1463 EXPORT_SYMBOL_GPL(kvm_get_dirty_log); 1464 1465 #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 1466 /** 1467 * kvm_get_dirty_log_protect - get a snapshot of dirty pages 1468 * and reenable dirty page tracking for the corresponding pages. 1469 * @kvm: pointer to kvm instance 1470 * @log: slot id and address to which we copy the log 1471 * 1472 * We need to keep it in mind that VCPU threads can write to the bitmap 1473 * concurrently. So, to avoid losing track of dirty pages we keep the 1474 * following order: 1475 * 1476 * 1. Take a snapshot of the bit and clear it if needed. 1477 * 2. Write protect the corresponding page. 1478 * 3. Copy the snapshot to the userspace. 1479 * 4. Upon return caller flushes TLB's if needed. 1480 * 1481 * Between 2 and 4, the guest may write to the page using the remaining TLB 1482 * entry. This is not a problem because the page is reported dirty using 1483 * the snapshot taken before and step 4 ensures that writes done after 1484 * exiting to userspace will be logged for the next call. 1485 * 1486 */ 1487 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) 1488 { 1489 struct kvm_memslots *slots; 1490 struct kvm_memory_slot *memslot; 1491 int i, as_id, id; 1492 unsigned long n; 1493 unsigned long *dirty_bitmap; 1494 unsigned long *dirty_bitmap_buffer; 1495 bool flush; 1496 1497 /* Dirty ring tracking is exclusive to dirty log tracking */ 1498 if (kvm->dirty_ring_size) 1499 return -ENXIO; 1500 1501 as_id = log->slot >> 16; 1502 id = (u16)log->slot; 1503 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1504 return -EINVAL; 1505 1506 slots = __kvm_memslots(kvm, as_id); 1507 memslot = id_to_memslot(slots, id); 1508 if (!memslot || !memslot->dirty_bitmap) 1509 return -ENOENT; 1510 1511 dirty_bitmap = memslot->dirty_bitmap; 1512 1513 kvm_arch_sync_dirty_log(kvm, memslot); 1514 1515 n = kvm_dirty_bitmap_bytes(memslot); 1516 flush = false; 1517 if (kvm->manual_dirty_log_protect) { 1518 /* 1519 * Unlike kvm_get_dirty_log, we always return false in *flush, 1520 * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There 1521 * is some code duplication between this function and 1522 * kvm_get_dirty_log, but hopefully all architecture 1523 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log 1524 * can be eliminated. 1525 */ 1526 dirty_bitmap_buffer = dirty_bitmap; 1527 } else { 1528 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 1529 memset(dirty_bitmap_buffer, 0, n); 1530 1531 KVM_MMU_LOCK(kvm); 1532 for (i = 0; i < n / sizeof(long); i++) { 1533 unsigned long mask; 1534 gfn_t offset; 1535 1536 if (!dirty_bitmap[i]) 1537 continue; 1538 1539 flush = true; 1540 mask = xchg(&dirty_bitmap[i], 0); 1541 dirty_bitmap_buffer[i] = mask; 1542 1543 offset = i * BITS_PER_LONG; 1544 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 1545 offset, mask); 1546 } 1547 KVM_MMU_UNLOCK(kvm); 1548 } 1549 1550 if (flush) 1551 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); 1552 1553 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) 1554 return -EFAULT; 1555 return 0; 1556 } 1557 1558 1559 /** 1560 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot 1561 * @kvm: kvm instance 1562 * @log: slot id and address to which we copy the log 1563 * 1564 * Steps 1-4 below provide general overview of dirty page logging. See 1565 * kvm_get_dirty_log_protect() function description for additional details. 1566 * 1567 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we 1568 * always flush the TLB (step 4) even if previous step failed and the dirty 1569 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API 1570 * does not preclude user space subsequent dirty log read. Flushing TLB ensures 1571 * writes will be marked dirty for next log read. 1572 * 1573 * 1. Take a snapshot of the bit and clear it if needed. 1574 * 2. Write protect the corresponding page. 1575 * 3. Copy the snapshot to the userspace. 1576 * 4. Flush TLB's if needed. 1577 */ 1578 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 1579 struct kvm_dirty_log *log) 1580 { 1581 int r; 1582 1583 mutex_lock(&kvm->slots_lock); 1584 1585 r = kvm_get_dirty_log_protect(kvm, log); 1586 1587 mutex_unlock(&kvm->slots_lock); 1588 return r; 1589 } 1590 1591 /** 1592 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap 1593 * and reenable dirty page tracking for the corresponding pages. 1594 * @kvm: pointer to kvm instance 1595 * @log: slot id and address from which to fetch the bitmap of dirty pages 1596 */ 1597 static int kvm_clear_dirty_log_protect(struct kvm *kvm, 1598 struct kvm_clear_dirty_log *log) 1599 { 1600 struct kvm_memslots *slots; 1601 struct kvm_memory_slot *memslot; 1602 int as_id, id; 1603 gfn_t offset; 1604 unsigned long i, n; 1605 unsigned long *dirty_bitmap; 1606 unsigned long *dirty_bitmap_buffer; 1607 bool flush; 1608 1609 /* Dirty ring tracking is exclusive to dirty log tracking */ 1610 if (kvm->dirty_ring_size) 1611 return -ENXIO; 1612 1613 as_id = log->slot >> 16; 1614 id = (u16)log->slot; 1615 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1616 return -EINVAL; 1617 1618 if (log->first_page & 63) 1619 return -EINVAL; 1620 1621 slots = __kvm_memslots(kvm, as_id); 1622 memslot = id_to_memslot(slots, id); 1623 if (!memslot || !memslot->dirty_bitmap) 1624 return -ENOENT; 1625 1626 dirty_bitmap = memslot->dirty_bitmap; 1627 1628 n = ALIGN(log->num_pages, BITS_PER_LONG) / 8; 1629 1630 if (log->first_page > memslot->npages || 1631 log->num_pages > memslot->npages - log->first_page || 1632 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63))) 1633 return -EINVAL; 1634 1635 kvm_arch_sync_dirty_log(kvm, memslot); 1636 1637 flush = false; 1638 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 1639 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) 1640 return -EFAULT; 1641 1642 KVM_MMU_LOCK(kvm); 1643 for (offset = log->first_page, i = offset / BITS_PER_LONG, 1644 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--; 1645 i++, offset += BITS_PER_LONG) { 1646 unsigned long mask = *dirty_bitmap_buffer++; 1647 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i]; 1648 if (!mask) 1649 continue; 1650 1651 mask &= atomic_long_fetch_andnot(mask, p); 1652 1653 /* 1654 * mask contains the bits that really have been cleared. This 1655 * never includes any bits beyond the length of the memslot (if 1656 * the length is not aligned to 64 pages), therefore it is not 1657 * a problem if userspace sets them in log->dirty_bitmap. 1658 */ 1659 if (mask) { 1660 flush = true; 1661 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 1662 offset, mask); 1663 } 1664 } 1665 KVM_MMU_UNLOCK(kvm); 1666 1667 if (flush) 1668 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); 1669 1670 return 0; 1671 } 1672 1673 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, 1674 struct kvm_clear_dirty_log *log) 1675 { 1676 int r; 1677 1678 mutex_lock(&kvm->slots_lock); 1679 1680 r = kvm_clear_dirty_log_protect(kvm, log); 1681 1682 mutex_unlock(&kvm->slots_lock); 1683 return r; 1684 } 1685 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 1686 1687 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 1688 { 1689 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 1690 } 1691 EXPORT_SYMBOL_GPL(gfn_to_memslot); 1692 1693 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) 1694 { 1695 return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn); 1696 } 1697 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_memslot); 1698 1699 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 1700 { 1701 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); 1702 1703 return kvm_is_visible_memslot(memslot); 1704 } 1705 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 1706 1707 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 1708 { 1709 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1710 1711 return kvm_is_visible_memslot(memslot); 1712 } 1713 EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn); 1714 1715 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) 1716 { 1717 struct vm_area_struct *vma; 1718 unsigned long addr, size; 1719 1720 size = PAGE_SIZE; 1721 1722 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL); 1723 if (kvm_is_error_hva(addr)) 1724 return PAGE_SIZE; 1725 1726 mmap_read_lock(current->mm); 1727 vma = find_vma(current->mm, addr); 1728 if (!vma) 1729 goto out; 1730 1731 size = vma_kernel_pagesize(vma); 1732 1733 out: 1734 mmap_read_unlock(current->mm); 1735 1736 return size; 1737 } 1738 1739 static bool memslot_is_readonly(struct kvm_memory_slot *slot) 1740 { 1741 return slot->flags & KVM_MEM_READONLY; 1742 } 1743 1744 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1745 gfn_t *nr_pages, bool write) 1746 { 1747 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 1748 return KVM_HVA_ERR_BAD; 1749 1750 if (memslot_is_readonly(slot) && write) 1751 return KVM_HVA_ERR_RO_BAD; 1752 1753 if (nr_pages) 1754 *nr_pages = slot->npages - (gfn - slot->base_gfn); 1755 1756 return __gfn_to_hva_memslot(slot, gfn); 1757 } 1758 1759 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1760 gfn_t *nr_pages) 1761 { 1762 return __gfn_to_hva_many(slot, gfn, nr_pages, true); 1763 } 1764 1765 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 1766 gfn_t gfn) 1767 { 1768 return gfn_to_hva_many(slot, gfn, NULL); 1769 } 1770 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); 1771 1772 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 1773 { 1774 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); 1775 } 1776 EXPORT_SYMBOL_GPL(gfn_to_hva); 1777 1778 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) 1779 { 1780 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); 1781 } 1782 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); 1783 1784 /* 1785 * Return the hva of a @gfn and the R/W attribute if possible. 1786 * 1787 * @slot: the kvm_memory_slot which contains @gfn 1788 * @gfn: the gfn to be translated 1789 * @writable: used to return the read/write attribute of the @slot if the hva 1790 * is valid and @writable is not NULL 1791 */ 1792 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, 1793 gfn_t gfn, bool *writable) 1794 { 1795 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); 1796 1797 if (!kvm_is_error_hva(hva) && writable) 1798 *writable = !memslot_is_readonly(slot); 1799 1800 return hva; 1801 } 1802 1803 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 1804 { 1805 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1806 1807 return gfn_to_hva_memslot_prot(slot, gfn, writable); 1808 } 1809 1810 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) 1811 { 1812 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1813 1814 return gfn_to_hva_memslot_prot(slot, gfn, writable); 1815 } 1816 1817 static inline int check_user_page_hwpoison(unsigned long addr) 1818 { 1819 int rc, flags = FOLL_HWPOISON | FOLL_WRITE; 1820 1821 rc = get_user_pages(addr, 1, flags, NULL, NULL); 1822 return rc == -EHWPOISON; 1823 } 1824 1825 /* 1826 * The fast path to get the writable pfn which will be stored in @pfn, 1827 * true indicates success, otherwise false is returned. It's also the 1828 * only part that runs if we can in atomic context. 1829 */ 1830 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, 1831 bool *writable, kvm_pfn_t *pfn) 1832 { 1833 struct page *page[1]; 1834 1835 /* 1836 * Fast pin a writable pfn only if it is a write fault request 1837 * or the caller allows to map a writable pfn for a read fault 1838 * request. 1839 */ 1840 if (!(write_fault || writable)) 1841 return false; 1842 1843 if (get_user_page_fast_only(addr, FOLL_WRITE, page)) { 1844 *pfn = page_to_pfn(page[0]); 1845 1846 if (writable) 1847 *writable = true; 1848 return true; 1849 } 1850 1851 return false; 1852 } 1853 1854 /* 1855 * The slow path to get the pfn of the specified host virtual address, 1856 * 1 indicates success, -errno is returned if error is detected. 1857 */ 1858 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, 1859 bool *writable, kvm_pfn_t *pfn) 1860 { 1861 unsigned int flags = FOLL_HWPOISON; 1862 struct page *page; 1863 int npages = 0; 1864 1865 might_sleep(); 1866 1867 if (writable) 1868 *writable = write_fault; 1869 1870 if (write_fault) 1871 flags |= FOLL_WRITE; 1872 if (async) 1873 flags |= FOLL_NOWAIT; 1874 1875 npages = get_user_pages_unlocked(addr, 1, &page, flags); 1876 if (npages != 1) 1877 return npages; 1878 1879 /* map read fault as writable if possible */ 1880 if (unlikely(!write_fault) && writable) { 1881 struct page *wpage; 1882 1883 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) { 1884 *writable = true; 1885 put_page(page); 1886 page = wpage; 1887 } 1888 } 1889 *pfn = page_to_pfn(page); 1890 return npages; 1891 } 1892 1893 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) 1894 { 1895 if (unlikely(!(vma->vm_flags & VM_READ))) 1896 return false; 1897 1898 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) 1899 return false; 1900 1901 return true; 1902 } 1903 1904 static int hva_to_pfn_remapped(struct vm_area_struct *vma, 1905 unsigned long addr, bool *async, 1906 bool write_fault, bool *writable, 1907 kvm_pfn_t *p_pfn) 1908 { 1909 kvm_pfn_t pfn; 1910 pte_t *ptep; 1911 spinlock_t *ptl; 1912 int r; 1913 1914 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); 1915 if (r) { 1916 /* 1917 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does 1918 * not call the fault handler, so do it here. 1919 */ 1920 bool unlocked = false; 1921 r = fixup_user_fault(current->mm, addr, 1922 (write_fault ? FAULT_FLAG_WRITE : 0), 1923 &unlocked); 1924 if (unlocked) 1925 return -EAGAIN; 1926 if (r) 1927 return r; 1928 1929 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); 1930 if (r) 1931 return r; 1932 } 1933 1934 if (write_fault && !pte_write(*ptep)) { 1935 pfn = KVM_PFN_ERR_RO_FAULT; 1936 goto out; 1937 } 1938 1939 if (writable) 1940 *writable = pte_write(*ptep); 1941 pfn = pte_pfn(*ptep); 1942 1943 /* 1944 * Get a reference here because callers of *hva_to_pfn* and 1945 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the 1946 * returned pfn. This is only needed if the VMA has VM_MIXEDMAP 1947 * set, but the kvm_get_pfn/kvm_release_pfn_clean pair will 1948 * simply do nothing for reserved pfns. 1949 * 1950 * Whoever called remap_pfn_range is also going to call e.g. 1951 * unmap_mapping_range before the underlying pages are freed, 1952 * causing a call to our MMU notifier. 1953 */ 1954 kvm_get_pfn(pfn); 1955 1956 out: 1957 pte_unmap_unlock(ptep, ptl); 1958 *p_pfn = pfn; 1959 return 0; 1960 } 1961 1962 /* 1963 * Pin guest page in memory and return its pfn. 1964 * @addr: host virtual address which maps memory to the guest 1965 * @atomic: whether this function can sleep 1966 * @async: whether this function need to wait IO complete if the 1967 * host page is not in the memory 1968 * @write_fault: whether we should get a writable host page 1969 * @writable: whether it allows to map a writable host page for !@write_fault 1970 * 1971 * The function will map a writable host page for these two cases: 1972 * 1): @write_fault = true 1973 * 2): @write_fault = false && @writable, @writable will tell the caller 1974 * whether the mapping is writable. 1975 */ 1976 static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, 1977 bool write_fault, bool *writable) 1978 { 1979 struct vm_area_struct *vma; 1980 kvm_pfn_t pfn = 0; 1981 int npages, r; 1982 1983 /* we can do it either atomically or asynchronously, not both */ 1984 BUG_ON(atomic && async); 1985 1986 if (hva_to_pfn_fast(addr, write_fault, writable, &pfn)) 1987 return pfn; 1988 1989 if (atomic) 1990 return KVM_PFN_ERR_FAULT; 1991 1992 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); 1993 if (npages == 1) 1994 return pfn; 1995 1996 mmap_read_lock(current->mm); 1997 if (npages == -EHWPOISON || 1998 (!async && check_user_page_hwpoison(addr))) { 1999 pfn = KVM_PFN_ERR_HWPOISON; 2000 goto exit; 2001 } 2002 2003 retry: 2004 vma = find_vma_intersection(current->mm, addr, addr + 1); 2005 2006 if (vma == NULL) 2007 pfn = KVM_PFN_ERR_FAULT; 2008 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { 2009 r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn); 2010 if (r == -EAGAIN) 2011 goto retry; 2012 if (r < 0) 2013 pfn = KVM_PFN_ERR_FAULT; 2014 } else { 2015 if (async && vma_is_valid(vma, write_fault)) 2016 *async = true; 2017 pfn = KVM_PFN_ERR_FAULT; 2018 } 2019 exit: 2020 mmap_read_unlock(current->mm); 2021 return pfn; 2022 } 2023 2024 kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, 2025 bool atomic, bool *async, bool write_fault, 2026 bool *writable) 2027 { 2028 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); 2029 2030 if (addr == KVM_HVA_ERR_RO_BAD) { 2031 if (writable) 2032 *writable = false; 2033 return KVM_PFN_ERR_RO_FAULT; 2034 } 2035 2036 if (kvm_is_error_hva(addr)) { 2037 if (writable) 2038 *writable = false; 2039 return KVM_PFN_NOSLOT; 2040 } 2041 2042 /* Do not map writable pfn in the readonly memslot. */ 2043 if (writable && memslot_is_readonly(slot)) { 2044 *writable = false; 2045 writable = NULL; 2046 } 2047 2048 return hva_to_pfn(addr, atomic, async, write_fault, 2049 writable); 2050 } 2051 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); 2052 2053 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 2054 bool *writable) 2055 { 2056 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, 2057 write_fault, writable); 2058 } 2059 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 2060 2061 kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 2062 { 2063 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL); 2064 } 2065 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); 2066 2067 kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) 2068 { 2069 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL); 2070 } 2071 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); 2072 2073 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) 2074 { 2075 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 2076 } 2077 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic); 2078 2079 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 2080 { 2081 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); 2082 } 2083 EXPORT_SYMBOL_GPL(gfn_to_pfn); 2084 2085 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) 2086 { 2087 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 2088 } 2089 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn); 2090 2091 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 2092 struct page **pages, int nr_pages) 2093 { 2094 unsigned long addr; 2095 gfn_t entry = 0; 2096 2097 addr = gfn_to_hva_many(slot, gfn, &entry); 2098 if (kvm_is_error_hva(addr)) 2099 return -1; 2100 2101 if (entry < nr_pages) 2102 return 0; 2103 2104 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages); 2105 } 2106 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); 2107 2108 static struct page *kvm_pfn_to_page(kvm_pfn_t pfn) 2109 { 2110 if (is_error_noslot_pfn(pfn)) 2111 return KVM_ERR_PTR_BAD_PAGE; 2112 2113 if (kvm_is_reserved_pfn(pfn)) { 2114 WARN_ON(1); 2115 return KVM_ERR_PTR_BAD_PAGE; 2116 } 2117 2118 return pfn_to_page(pfn); 2119 } 2120 2121 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 2122 { 2123 kvm_pfn_t pfn; 2124 2125 pfn = gfn_to_pfn(kvm, gfn); 2126 2127 return kvm_pfn_to_page(pfn); 2128 } 2129 EXPORT_SYMBOL_GPL(gfn_to_page); 2130 2131 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache) 2132 { 2133 if (pfn == 0) 2134 return; 2135 2136 if (cache) 2137 cache->pfn = cache->gfn = 0; 2138 2139 if (dirty) 2140 kvm_release_pfn_dirty(pfn); 2141 else 2142 kvm_release_pfn_clean(pfn); 2143 } 2144 2145 static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn, 2146 struct gfn_to_pfn_cache *cache, u64 gen) 2147 { 2148 kvm_release_pfn(cache->pfn, cache->dirty, cache); 2149 2150 cache->pfn = gfn_to_pfn_memslot(slot, gfn); 2151 cache->gfn = gfn; 2152 cache->dirty = false; 2153 cache->generation = gen; 2154 } 2155 2156 static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn, 2157 struct kvm_host_map *map, 2158 struct gfn_to_pfn_cache *cache, 2159 bool atomic) 2160 { 2161 kvm_pfn_t pfn; 2162 void *hva = NULL; 2163 struct page *page = KVM_UNMAPPED_PAGE; 2164 struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn); 2165 u64 gen = slots->generation; 2166 2167 if (!map) 2168 return -EINVAL; 2169 2170 if (cache) { 2171 if (!cache->pfn || cache->gfn != gfn || 2172 cache->generation != gen) { 2173 if (atomic) 2174 return -EAGAIN; 2175 kvm_cache_gfn_to_pfn(slot, gfn, cache, gen); 2176 } 2177 pfn = cache->pfn; 2178 } else { 2179 if (atomic) 2180 return -EAGAIN; 2181 pfn = gfn_to_pfn_memslot(slot, gfn); 2182 } 2183 if (is_error_noslot_pfn(pfn)) 2184 return -EINVAL; 2185 2186 if (pfn_valid(pfn)) { 2187 page = pfn_to_page(pfn); 2188 if (atomic) 2189 hva = kmap_atomic(page); 2190 else 2191 hva = kmap(page); 2192 #ifdef CONFIG_HAS_IOMEM 2193 } else if (!atomic) { 2194 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); 2195 } else { 2196 return -EINVAL; 2197 #endif 2198 } 2199 2200 if (!hva) 2201 return -EFAULT; 2202 2203 map->page = page; 2204 map->hva = hva; 2205 map->pfn = pfn; 2206 map->gfn = gfn; 2207 2208 return 0; 2209 } 2210 2211 int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, 2212 struct gfn_to_pfn_cache *cache, bool atomic) 2213 { 2214 return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map, 2215 cache, atomic); 2216 } 2217 EXPORT_SYMBOL_GPL(kvm_map_gfn); 2218 2219 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) 2220 { 2221 return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map, 2222 NULL, false); 2223 } 2224 EXPORT_SYMBOL_GPL(kvm_vcpu_map); 2225 2226 static void __kvm_unmap_gfn(struct kvm *kvm, 2227 struct kvm_memory_slot *memslot, 2228 struct kvm_host_map *map, 2229 struct gfn_to_pfn_cache *cache, 2230 bool dirty, bool atomic) 2231 { 2232 if (!map) 2233 return; 2234 2235 if (!map->hva) 2236 return; 2237 2238 if (map->page != KVM_UNMAPPED_PAGE) { 2239 if (atomic) 2240 kunmap_atomic(map->hva); 2241 else 2242 kunmap(map->page); 2243 } 2244 #ifdef CONFIG_HAS_IOMEM 2245 else if (!atomic) 2246 memunmap(map->hva); 2247 else 2248 WARN_ONCE(1, "Unexpected unmapping in atomic context"); 2249 #endif 2250 2251 if (dirty) 2252 mark_page_dirty_in_slot(kvm, memslot, map->gfn); 2253 2254 if (cache) 2255 cache->dirty |= dirty; 2256 else 2257 kvm_release_pfn(map->pfn, dirty, NULL); 2258 2259 map->hva = NULL; 2260 map->page = NULL; 2261 } 2262 2263 int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, 2264 struct gfn_to_pfn_cache *cache, bool dirty, bool atomic) 2265 { 2266 __kvm_unmap_gfn(vcpu->kvm, gfn_to_memslot(vcpu->kvm, map->gfn), map, 2267 cache, dirty, atomic); 2268 return 0; 2269 } 2270 EXPORT_SYMBOL_GPL(kvm_unmap_gfn); 2271 2272 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) 2273 { 2274 __kvm_unmap_gfn(vcpu->kvm, kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), 2275 map, NULL, dirty, false); 2276 } 2277 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap); 2278 2279 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) 2280 { 2281 kvm_pfn_t pfn; 2282 2283 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn); 2284 2285 return kvm_pfn_to_page(pfn); 2286 } 2287 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page); 2288 2289 void kvm_release_page_clean(struct page *page) 2290 { 2291 WARN_ON(is_error_page(page)); 2292 2293 kvm_release_pfn_clean(page_to_pfn(page)); 2294 } 2295 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 2296 2297 void kvm_release_pfn_clean(kvm_pfn_t pfn) 2298 { 2299 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) 2300 put_page(pfn_to_page(pfn)); 2301 } 2302 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 2303 2304 void kvm_release_page_dirty(struct page *page) 2305 { 2306 WARN_ON(is_error_page(page)); 2307 2308 kvm_release_pfn_dirty(page_to_pfn(page)); 2309 } 2310 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 2311 2312 void kvm_release_pfn_dirty(kvm_pfn_t pfn) 2313 { 2314 kvm_set_pfn_dirty(pfn); 2315 kvm_release_pfn_clean(pfn); 2316 } 2317 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); 2318 2319 void kvm_set_pfn_dirty(kvm_pfn_t pfn) 2320 { 2321 if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) 2322 SetPageDirty(pfn_to_page(pfn)); 2323 } 2324 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 2325 2326 void kvm_set_pfn_accessed(kvm_pfn_t pfn) 2327 { 2328 if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) 2329 mark_page_accessed(pfn_to_page(pfn)); 2330 } 2331 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 2332 2333 void kvm_get_pfn(kvm_pfn_t pfn) 2334 { 2335 if (!kvm_is_reserved_pfn(pfn)) 2336 get_page(pfn_to_page(pfn)); 2337 } 2338 EXPORT_SYMBOL_GPL(kvm_get_pfn); 2339 2340 static int next_segment(unsigned long len, int offset) 2341 { 2342 if (len > PAGE_SIZE - offset) 2343 return PAGE_SIZE - offset; 2344 else 2345 return len; 2346 } 2347 2348 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, 2349 void *data, int offset, int len) 2350 { 2351 int r; 2352 unsigned long addr; 2353 2354 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 2355 if (kvm_is_error_hva(addr)) 2356 return -EFAULT; 2357 r = __copy_from_user(data, (void __user *)addr + offset, len); 2358 if (r) 2359 return -EFAULT; 2360 return 0; 2361 } 2362 2363 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 2364 int len) 2365 { 2366 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 2367 2368 return __kvm_read_guest_page(slot, gfn, data, offset, len); 2369 } 2370 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 2371 2372 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, 2373 int offset, int len) 2374 { 2375 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2376 2377 return __kvm_read_guest_page(slot, gfn, data, offset, len); 2378 } 2379 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); 2380 2381 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 2382 { 2383 gfn_t gfn = gpa >> PAGE_SHIFT; 2384 int seg; 2385 int offset = offset_in_page(gpa); 2386 int ret; 2387 2388 while ((seg = next_segment(len, offset)) != 0) { 2389 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 2390 if (ret < 0) 2391 return ret; 2392 offset = 0; 2393 len -= seg; 2394 data += seg; 2395 ++gfn; 2396 } 2397 return 0; 2398 } 2399 EXPORT_SYMBOL_GPL(kvm_read_guest); 2400 2401 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) 2402 { 2403 gfn_t gfn = gpa >> PAGE_SHIFT; 2404 int seg; 2405 int offset = offset_in_page(gpa); 2406 int ret; 2407 2408 while ((seg = next_segment(len, offset)) != 0) { 2409 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); 2410 if (ret < 0) 2411 return ret; 2412 offset = 0; 2413 len -= seg; 2414 data += seg; 2415 ++gfn; 2416 } 2417 return 0; 2418 } 2419 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest); 2420 2421 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 2422 void *data, int offset, unsigned long len) 2423 { 2424 int r; 2425 unsigned long addr; 2426 2427 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 2428 if (kvm_is_error_hva(addr)) 2429 return -EFAULT; 2430 pagefault_disable(); 2431 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 2432 pagefault_enable(); 2433 if (r) 2434 return -EFAULT; 2435 return 0; 2436 } 2437 2438 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, 2439 void *data, unsigned long len) 2440 { 2441 gfn_t gfn = gpa >> PAGE_SHIFT; 2442 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2443 int offset = offset_in_page(gpa); 2444 2445 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); 2446 } 2447 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic); 2448 2449 static int __kvm_write_guest_page(struct kvm *kvm, 2450 struct kvm_memory_slot *memslot, gfn_t gfn, 2451 const void *data, int offset, int len) 2452 { 2453 int r; 2454 unsigned long addr; 2455 2456 addr = gfn_to_hva_memslot(memslot, gfn); 2457 if (kvm_is_error_hva(addr)) 2458 return -EFAULT; 2459 r = __copy_to_user((void __user *)addr + offset, data, len); 2460 if (r) 2461 return -EFAULT; 2462 mark_page_dirty_in_slot(kvm, memslot, gfn); 2463 return 0; 2464 } 2465 2466 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, 2467 const void *data, int offset, int len) 2468 { 2469 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 2470 2471 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len); 2472 } 2473 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 2474 2475 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, 2476 const void *data, int offset, int len) 2477 { 2478 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2479 2480 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len); 2481 } 2482 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page); 2483 2484 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 2485 unsigned long len) 2486 { 2487 gfn_t gfn = gpa >> PAGE_SHIFT; 2488 int seg; 2489 int offset = offset_in_page(gpa); 2490 int ret; 2491 2492 while ((seg = next_segment(len, offset)) != 0) { 2493 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 2494 if (ret < 0) 2495 return ret; 2496 offset = 0; 2497 len -= seg; 2498 data += seg; 2499 ++gfn; 2500 } 2501 return 0; 2502 } 2503 EXPORT_SYMBOL_GPL(kvm_write_guest); 2504 2505 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 2506 unsigned long len) 2507 { 2508 gfn_t gfn = gpa >> PAGE_SHIFT; 2509 int seg; 2510 int offset = offset_in_page(gpa); 2511 int ret; 2512 2513 while ((seg = next_segment(len, offset)) != 0) { 2514 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); 2515 if (ret < 0) 2516 return ret; 2517 offset = 0; 2518 len -= seg; 2519 data += seg; 2520 ++gfn; 2521 } 2522 return 0; 2523 } 2524 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest); 2525 2526 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, 2527 struct gfn_to_hva_cache *ghc, 2528 gpa_t gpa, unsigned long len) 2529 { 2530 int offset = offset_in_page(gpa); 2531 gfn_t start_gfn = gpa >> PAGE_SHIFT; 2532 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; 2533 gfn_t nr_pages_needed = end_gfn - start_gfn + 1; 2534 gfn_t nr_pages_avail; 2535 2536 /* Update ghc->generation before performing any error checks. */ 2537 ghc->generation = slots->generation; 2538 2539 if (start_gfn > end_gfn) { 2540 ghc->hva = KVM_HVA_ERR_BAD; 2541 return -EINVAL; 2542 } 2543 2544 /* 2545 * If the requested region crosses two memslots, we still 2546 * verify that the entire region is valid here. 2547 */ 2548 for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) { 2549 ghc->memslot = __gfn_to_memslot(slots, start_gfn); 2550 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, 2551 &nr_pages_avail); 2552 if (kvm_is_error_hva(ghc->hva)) 2553 return -EFAULT; 2554 } 2555 2556 /* Use the slow path for cross page reads and writes. */ 2557 if (nr_pages_needed == 1) 2558 ghc->hva += offset; 2559 else 2560 ghc->memslot = NULL; 2561 2562 ghc->gpa = gpa; 2563 ghc->len = len; 2564 return 0; 2565 } 2566 2567 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 2568 gpa_t gpa, unsigned long len) 2569 { 2570 struct kvm_memslots *slots = kvm_memslots(kvm); 2571 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len); 2572 } 2573 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 2574 2575 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 2576 void *data, unsigned int offset, 2577 unsigned long len) 2578 { 2579 struct kvm_memslots *slots = kvm_memslots(kvm); 2580 int r; 2581 gpa_t gpa = ghc->gpa + offset; 2582 2583 BUG_ON(len + offset > ghc->len); 2584 2585 if (slots->generation != ghc->generation) { 2586 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) 2587 return -EFAULT; 2588 } 2589 2590 if (kvm_is_error_hva(ghc->hva)) 2591 return -EFAULT; 2592 2593 if (unlikely(!ghc->memslot)) 2594 return kvm_write_guest(kvm, gpa, data, len); 2595 2596 r = __copy_to_user((void __user *)ghc->hva + offset, data, len); 2597 if (r) 2598 return -EFAULT; 2599 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT); 2600 2601 return 0; 2602 } 2603 EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached); 2604 2605 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 2606 void *data, unsigned long len) 2607 { 2608 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); 2609 } 2610 EXPORT_SYMBOL_GPL(kvm_write_guest_cached); 2611 2612 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 2613 void *data, unsigned int offset, 2614 unsigned long len) 2615 { 2616 struct kvm_memslots *slots = kvm_memslots(kvm); 2617 int r; 2618 gpa_t gpa = ghc->gpa + offset; 2619 2620 BUG_ON(len + offset > ghc->len); 2621 2622 if (slots->generation != ghc->generation) { 2623 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) 2624 return -EFAULT; 2625 } 2626 2627 if (kvm_is_error_hva(ghc->hva)) 2628 return -EFAULT; 2629 2630 if (unlikely(!ghc->memslot)) 2631 return kvm_read_guest(kvm, gpa, data, len); 2632 2633 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len); 2634 if (r) 2635 return -EFAULT; 2636 2637 return 0; 2638 } 2639 EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached); 2640 2641 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 2642 void *data, unsigned long len) 2643 { 2644 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len); 2645 } 2646 EXPORT_SYMBOL_GPL(kvm_read_guest_cached); 2647 2648 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 2649 { 2650 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 2651 gfn_t gfn = gpa >> PAGE_SHIFT; 2652 int seg; 2653 int offset = offset_in_page(gpa); 2654 int ret; 2655 2656 while ((seg = next_segment(len, offset)) != 0) { 2657 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len); 2658 if (ret < 0) 2659 return ret; 2660 offset = 0; 2661 len -= seg; 2662 ++gfn; 2663 } 2664 return 0; 2665 } 2666 EXPORT_SYMBOL_GPL(kvm_clear_guest); 2667 2668 void mark_page_dirty_in_slot(struct kvm *kvm, 2669 struct kvm_memory_slot *memslot, 2670 gfn_t gfn) 2671 { 2672 if (memslot && kvm_slot_dirty_track_enabled(memslot)) { 2673 unsigned long rel_gfn = gfn - memslot->base_gfn; 2674 u32 slot = (memslot->as_id << 16) | memslot->id; 2675 2676 if (kvm->dirty_ring_size) 2677 kvm_dirty_ring_push(kvm_dirty_ring_get(kvm), 2678 slot, rel_gfn); 2679 else 2680 set_bit_le(rel_gfn, memslot->dirty_bitmap); 2681 } 2682 } 2683 EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot); 2684 2685 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 2686 { 2687 struct kvm_memory_slot *memslot; 2688 2689 memslot = gfn_to_memslot(kvm, gfn); 2690 mark_page_dirty_in_slot(kvm, memslot, gfn); 2691 } 2692 EXPORT_SYMBOL_GPL(mark_page_dirty); 2693 2694 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) 2695 { 2696 struct kvm_memory_slot *memslot; 2697 2698 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2699 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn); 2700 } 2701 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); 2702 2703 void kvm_sigset_activate(struct kvm_vcpu *vcpu) 2704 { 2705 if (!vcpu->sigset_active) 2706 return; 2707 2708 /* 2709 * This does a lockless modification of ->real_blocked, which is fine 2710 * because, only current can change ->real_blocked and all readers of 2711 * ->real_blocked don't care as long ->real_blocked is always a subset 2712 * of ->blocked. 2713 */ 2714 sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked); 2715 } 2716 2717 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu) 2718 { 2719 if (!vcpu->sigset_active) 2720 return; 2721 2722 sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL); 2723 sigemptyset(¤t->real_blocked); 2724 } 2725 2726 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) 2727 { 2728 unsigned int old, val, grow, grow_start; 2729 2730 old = val = vcpu->halt_poll_ns; 2731 grow_start = READ_ONCE(halt_poll_ns_grow_start); 2732 grow = READ_ONCE(halt_poll_ns_grow); 2733 if (!grow) 2734 goto out; 2735 2736 val *= grow; 2737 if (val < grow_start) 2738 val = grow_start; 2739 2740 if (val > halt_poll_ns) 2741 val = halt_poll_ns; 2742 2743 vcpu->halt_poll_ns = val; 2744 out: 2745 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); 2746 } 2747 2748 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) 2749 { 2750 unsigned int old, val, shrink; 2751 2752 old = val = vcpu->halt_poll_ns; 2753 shrink = READ_ONCE(halt_poll_ns_shrink); 2754 if (shrink == 0) 2755 val = 0; 2756 else 2757 val /= shrink; 2758 2759 vcpu->halt_poll_ns = val; 2760 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); 2761 } 2762 2763 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) 2764 { 2765 int ret = -EINTR; 2766 int idx = srcu_read_lock(&vcpu->kvm->srcu); 2767 2768 if (kvm_arch_vcpu_runnable(vcpu)) { 2769 kvm_make_request(KVM_REQ_UNHALT, vcpu); 2770 goto out; 2771 } 2772 if (kvm_cpu_has_pending_timer(vcpu)) 2773 goto out; 2774 if (signal_pending(current)) 2775 goto out; 2776 2777 ret = 0; 2778 out: 2779 srcu_read_unlock(&vcpu->kvm->srcu, idx); 2780 return ret; 2781 } 2782 2783 static inline void 2784 update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited) 2785 { 2786 if (waited) 2787 vcpu->stat.halt_poll_fail_ns += poll_ns; 2788 else 2789 vcpu->stat.halt_poll_success_ns += poll_ns; 2790 } 2791 2792 /* 2793 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 2794 */ 2795 void kvm_vcpu_block(struct kvm_vcpu *vcpu) 2796 { 2797 ktime_t start, cur, poll_end; 2798 bool waited = false; 2799 u64 block_ns; 2800 2801 kvm_arch_vcpu_blocking(vcpu); 2802 2803 start = cur = poll_end = ktime_get(); 2804 if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) { 2805 ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); 2806 2807 ++vcpu->stat.halt_attempted_poll; 2808 do { 2809 /* 2810 * This sets KVM_REQ_UNHALT if an interrupt 2811 * arrives. 2812 */ 2813 if (kvm_vcpu_check_block(vcpu) < 0) { 2814 ++vcpu->stat.halt_successful_poll; 2815 if (!vcpu_valid_wakeup(vcpu)) 2816 ++vcpu->stat.halt_poll_invalid; 2817 goto out; 2818 } 2819 poll_end = cur = ktime_get(); 2820 } while (single_task_running() && ktime_before(cur, stop)); 2821 } 2822 2823 prepare_to_rcuwait(&vcpu->wait); 2824 for (;;) { 2825 set_current_state(TASK_INTERRUPTIBLE); 2826 2827 if (kvm_vcpu_check_block(vcpu) < 0) 2828 break; 2829 2830 waited = true; 2831 schedule(); 2832 } 2833 finish_rcuwait(&vcpu->wait); 2834 cur = ktime_get(); 2835 out: 2836 kvm_arch_vcpu_unblocking(vcpu); 2837 block_ns = ktime_to_ns(cur) - ktime_to_ns(start); 2838 2839 update_halt_poll_stats( 2840 vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited); 2841 2842 if (!kvm_arch_no_poll(vcpu)) { 2843 if (!vcpu_valid_wakeup(vcpu)) { 2844 shrink_halt_poll_ns(vcpu); 2845 } else if (vcpu->kvm->max_halt_poll_ns) { 2846 if (block_ns <= vcpu->halt_poll_ns) 2847 ; 2848 /* we had a long block, shrink polling */ 2849 else if (vcpu->halt_poll_ns && 2850 block_ns > vcpu->kvm->max_halt_poll_ns) 2851 shrink_halt_poll_ns(vcpu); 2852 /* we had a short halt and our poll time is too small */ 2853 else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns && 2854 block_ns < vcpu->kvm->max_halt_poll_ns) 2855 grow_halt_poll_ns(vcpu); 2856 } else { 2857 vcpu->halt_poll_ns = 0; 2858 } 2859 } 2860 2861 trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu)); 2862 kvm_arch_vcpu_block_finish(vcpu); 2863 } 2864 EXPORT_SYMBOL_GPL(kvm_vcpu_block); 2865 2866 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) 2867 { 2868 struct rcuwait *waitp; 2869 2870 waitp = kvm_arch_vcpu_get_wait(vcpu); 2871 if (rcuwait_wake_up(waitp)) { 2872 WRITE_ONCE(vcpu->ready, true); 2873 ++vcpu->stat.halt_wakeup; 2874 return true; 2875 } 2876 2877 return false; 2878 } 2879 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up); 2880 2881 #ifndef CONFIG_S390 2882 /* 2883 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. 2884 */ 2885 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 2886 { 2887 int me; 2888 int cpu = vcpu->cpu; 2889 2890 if (kvm_vcpu_wake_up(vcpu)) 2891 return; 2892 2893 me = get_cpu(); 2894 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) 2895 if (kvm_arch_vcpu_should_kick(vcpu)) 2896 smp_send_reschedule(cpu); 2897 put_cpu(); 2898 } 2899 EXPORT_SYMBOL_GPL(kvm_vcpu_kick); 2900 #endif /* !CONFIG_S390 */ 2901 2902 int kvm_vcpu_yield_to(struct kvm_vcpu *target) 2903 { 2904 struct pid *pid; 2905 struct task_struct *task = NULL; 2906 int ret = 0; 2907 2908 rcu_read_lock(); 2909 pid = rcu_dereference(target->pid); 2910 if (pid) 2911 task = get_pid_task(pid, PIDTYPE_PID); 2912 rcu_read_unlock(); 2913 if (!task) 2914 return ret; 2915 ret = yield_to(task, 1); 2916 put_task_struct(task); 2917 2918 return ret; 2919 } 2920 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); 2921 2922 /* 2923 * Helper that checks whether a VCPU is eligible for directed yield. 2924 * Most eligible candidate to yield is decided by following heuristics: 2925 * 2926 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently 2927 * (preempted lock holder), indicated by @in_spin_loop. 2928 * Set at the beginning and cleared at the end of interception/PLE handler. 2929 * 2930 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get 2931 * chance last time (mostly it has become eligible now since we have probably 2932 * yielded to lockholder in last iteration. This is done by toggling 2933 * @dy_eligible each time a VCPU checked for eligibility.) 2934 * 2935 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding 2936 * to preempted lock-holder could result in wrong VCPU selection and CPU 2937 * burning. Giving priority for a potential lock-holder increases lock 2938 * progress. 2939 * 2940 * Since algorithm is based on heuristics, accessing another VCPU data without 2941 * locking does not harm. It may result in trying to yield to same VCPU, fail 2942 * and continue with next VCPU and so on. 2943 */ 2944 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) 2945 { 2946 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 2947 bool eligible; 2948 2949 eligible = !vcpu->spin_loop.in_spin_loop || 2950 vcpu->spin_loop.dy_eligible; 2951 2952 if (vcpu->spin_loop.in_spin_loop) 2953 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); 2954 2955 return eligible; 2956 #else 2957 return true; 2958 #endif 2959 } 2960 2961 /* 2962 * Unlike kvm_arch_vcpu_runnable, this function is called outside 2963 * a vcpu_load/vcpu_put pair. However, for most architectures 2964 * kvm_arch_vcpu_runnable does not require vcpu_load. 2965 */ 2966 bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) 2967 { 2968 return kvm_arch_vcpu_runnable(vcpu); 2969 } 2970 2971 static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu) 2972 { 2973 if (kvm_arch_dy_runnable(vcpu)) 2974 return true; 2975 2976 #ifdef CONFIG_KVM_ASYNC_PF 2977 if (!list_empty_careful(&vcpu->async_pf.done)) 2978 return true; 2979 #endif 2980 2981 return false; 2982 } 2983 2984 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) 2985 { 2986 struct kvm *kvm = me->kvm; 2987 struct kvm_vcpu *vcpu; 2988 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 2989 int yielded = 0; 2990 int try = 3; 2991 int pass; 2992 int i; 2993 2994 kvm_vcpu_set_in_spin_loop(me, true); 2995 /* 2996 * We boost the priority of a VCPU that is runnable but not 2997 * currently running, because it got preempted by something 2998 * else and called schedule in __vcpu_run. Hopefully that 2999 * VCPU is holding the lock that we need and will release it. 3000 * We approximate round-robin by starting at the last boosted VCPU. 3001 */ 3002 for (pass = 0; pass < 2 && !yielded && try; pass++) { 3003 kvm_for_each_vcpu(i, vcpu, kvm) { 3004 if (!pass && i <= last_boosted_vcpu) { 3005 i = last_boosted_vcpu; 3006 continue; 3007 } else if (pass && i > last_boosted_vcpu) 3008 break; 3009 if (!READ_ONCE(vcpu->ready)) 3010 continue; 3011 if (vcpu == me) 3012 continue; 3013 if (rcuwait_active(&vcpu->wait) && 3014 !vcpu_dy_runnable(vcpu)) 3015 continue; 3016 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && 3017 !kvm_arch_vcpu_in_kernel(vcpu)) 3018 continue; 3019 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 3020 continue; 3021 3022 yielded = kvm_vcpu_yield_to(vcpu); 3023 if (yielded > 0) { 3024 kvm->last_boosted_vcpu = i; 3025 break; 3026 } else if (yielded < 0) { 3027 try--; 3028 if (!try) 3029 break; 3030 } 3031 } 3032 } 3033 kvm_vcpu_set_in_spin_loop(me, false); 3034 3035 /* Ensure vcpu is not eligible during next spinloop */ 3036 kvm_vcpu_set_dy_eligible(me, false); 3037 } 3038 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 3039 3040 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff) 3041 { 3042 #if KVM_DIRTY_LOG_PAGE_OFFSET > 0 3043 return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) && 3044 (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET + 3045 kvm->dirty_ring_size / PAGE_SIZE); 3046 #else 3047 return false; 3048 #endif 3049 } 3050 3051 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf) 3052 { 3053 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data; 3054 struct page *page; 3055 3056 if (vmf->pgoff == 0) 3057 page = virt_to_page(vcpu->run); 3058 #ifdef CONFIG_X86 3059 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 3060 page = virt_to_page(vcpu->arch.pio_data); 3061 #endif 3062 #ifdef CONFIG_KVM_MMIO 3063 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 3064 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 3065 #endif 3066 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff)) 3067 page = kvm_dirty_ring_get_page( 3068 &vcpu->dirty_ring, 3069 vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET); 3070 else 3071 return kvm_arch_vcpu_fault(vcpu, vmf); 3072 get_page(page); 3073 vmf->page = page; 3074 return 0; 3075 } 3076 3077 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 3078 .fault = kvm_vcpu_fault, 3079 }; 3080 3081 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 3082 { 3083 struct kvm_vcpu *vcpu = file->private_data; 3084 unsigned long pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 3085 3086 if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) || 3087 kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) && 3088 ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED))) 3089 return -EINVAL; 3090 3091 vma->vm_ops = &kvm_vcpu_vm_ops; 3092 return 0; 3093 } 3094 3095 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 3096 { 3097 struct kvm_vcpu *vcpu = filp->private_data; 3098 3099 kvm_put_kvm(vcpu->kvm); 3100 return 0; 3101 } 3102 3103 static struct file_operations kvm_vcpu_fops = { 3104 .release = kvm_vcpu_release, 3105 .unlocked_ioctl = kvm_vcpu_ioctl, 3106 .mmap = kvm_vcpu_mmap, 3107 .llseek = noop_llseek, 3108 KVM_COMPAT(kvm_vcpu_compat_ioctl), 3109 }; 3110 3111 /* 3112 * Allocates an inode for the vcpu. 3113 */ 3114 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 3115 { 3116 char name[8 + 1 + ITOA_MAX_LEN + 1]; 3117 3118 snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id); 3119 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); 3120 } 3121 3122 static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) 3123 { 3124 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS 3125 struct dentry *debugfs_dentry; 3126 char dir_name[ITOA_MAX_LEN * 2]; 3127 3128 if (!debugfs_initialized()) 3129 return; 3130 3131 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id); 3132 debugfs_dentry = debugfs_create_dir(dir_name, 3133 vcpu->kvm->debugfs_dentry); 3134 3135 kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry); 3136 #endif 3137 } 3138 3139 /* 3140 * Creates some virtual cpus. Good luck creating more than one. 3141 */ 3142 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 3143 { 3144 int r; 3145 struct kvm_vcpu *vcpu; 3146 struct page *page; 3147 3148 if (id >= KVM_MAX_VCPU_ID) 3149 return -EINVAL; 3150 3151 mutex_lock(&kvm->lock); 3152 if (kvm->created_vcpus == KVM_MAX_VCPUS) { 3153 mutex_unlock(&kvm->lock); 3154 return -EINVAL; 3155 } 3156 3157 kvm->created_vcpus++; 3158 mutex_unlock(&kvm->lock); 3159 3160 r = kvm_arch_vcpu_precreate(kvm, id); 3161 if (r) 3162 goto vcpu_decrement; 3163 3164 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 3165 if (!vcpu) { 3166 r = -ENOMEM; 3167 goto vcpu_decrement; 3168 } 3169 3170 BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE); 3171 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 3172 if (!page) { 3173 r = -ENOMEM; 3174 goto vcpu_free; 3175 } 3176 vcpu->run = page_address(page); 3177 3178 kvm_vcpu_init(vcpu, kvm, id); 3179 3180 r = kvm_arch_vcpu_create(vcpu); 3181 if (r) 3182 goto vcpu_free_run_page; 3183 3184 if (kvm->dirty_ring_size) { 3185 r = kvm_dirty_ring_alloc(&vcpu->dirty_ring, 3186 id, kvm->dirty_ring_size); 3187 if (r) 3188 goto arch_vcpu_destroy; 3189 } 3190 3191 mutex_lock(&kvm->lock); 3192 if (kvm_get_vcpu_by_id(kvm, id)) { 3193 r = -EEXIST; 3194 goto unlock_vcpu_destroy; 3195 } 3196 3197 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); 3198 BUG_ON(kvm->vcpus[vcpu->vcpu_idx]); 3199 3200 /* Now it's all set up, let userspace reach it */ 3201 kvm_get_kvm(kvm); 3202 r = create_vcpu_fd(vcpu); 3203 if (r < 0) { 3204 kvm_put_kvm_no_destroy(kvm); 3205 goto unlock_vcpu_destroy; 3206 } 3207 3208 kvm->vcpus[vcpu->vcpu_idx] = vcpu; 3209 3210 /* 3211 * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus 3212 * before kvm->online_vcpu's incremented value. 3213 */ 3214 smp_wmb(); 3215 atomic_inc(&kvm->online_vcpus); 3216 3217 mutex_unlock(&kvm->lock); 3218 kvm_arch_vcpu_postcreate(vcpu); 3219 kvm_create_vcpu_debugfs(vcpu); 3220 return r; 3221 3222 unlock_vcpu_destroy: 3223 mutex_unlock(&kvm->lock); 3224 kvm_dirty_ring_free(&vcpu->dirty_ring); 3225 arch_vcpu_destroy: 3226 kvm_arch_vcpu_destroy(vcpu); 3227 vcpu_free_run_page: 3228 free_page((unsigned long)vcpu->run); 3229 vcpu_free: 3230 kmem_cache_free(kvm_vcpu_cache, vcpu); 3231 vcpu_decrement: 3232 mutex_lock(&kvm->lock); 3233 kvm->created_vcpus--; 3234 mutex_unlock(&kvm->lock); 3235 return r; 3236 } 3237 3238 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 3239 { 3240 if (sigset) { 3241 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 3242 vcpu->sigset_active = 1; 3243 vcpu->sigset = *sigset; 3244 } else 3245 vcpu->sigset_active = 0; 3246 return 0; 3247 } 3248 3249 static long kvm_vcpu_ioctl(struct file *filp, 3250 unsigned int ioctl, unsigned long arg) 3251 { 3252 struct kvm_vcpu *vcpu = filp->private_data; 3253 void __user *argp = (void __user *)arg; 3254 int r; 3255 struct kvm_fpu *fpu = NULL; 3256 struct kvm_sregs *kvm_sregs = NULL; 3257 3258 if (vcpu->kvm->mm != current->mm) 3259 return -EIO; 3260 3261 if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) 3262 return -EINVAL; 3263 3264 /* 3265 * Some architectures have vcpu ioctls that are asynchronous to vcpu 3266 * execution; mutex_lock() would break them. 3267 */ 3268 r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg); 3269 if (r != -ENOIOCTLCMD) 3270 return r; 3271 3272 if (mutex_lock_killable(&vcpu->mutex)) 3273 return -EINTR; 3274 switch (ioctl) { 3275 case KVM_RUN: { 3276 struct pid *oldpid; 3277 r = -EINVAL; 3278 if (arg) 3279 goto out; 3280 oldpid = rcu_access_pointer(vcpu->pid); 3281 if (unlikely(oldpid != task_pid(current))) { 3282 /* The thread running this VCPU changed. */ 3283 struct pid *newpid; 3284 3285 r = kvm_arch_vcpu_run_pid_change(vcpu); 3286 if (r) 3287 break; 3288 3289 newpid = get_task_pid(current, PIDTYPE_PID); 3290 rcu_assign_pointer(vcpu->pid, newpid); 3291 if (oldpid) 3292 synchronize_rcu(); 3293 put_pid(oldpid); 3294 } 3295 r = kvm_arch_vcpu_ioctl_run(vcpu); 3296 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 3297 break; 3298 } 3299 case KVM_GET_REGS: { 3300 struct kvm_regs *kvm_regs; 3301 3302 r = -ENOMEM; 3303 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT); 3304 if (!kvm_regs) 3305 goto out; 3306 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 3307 if (r) 3308 goto out_free1; 3309 r = -EFAULT; 3310 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 3311 goto out_free1; 3312 r = 0; 3313 out_free1: 3314 kfree(kvm_regs); 3315 break; 3316 } 3317 case KVM_SET_REGS: { 3318 struct kvm_regs *kvm_regs; 3319 3320 kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); 3321 if (IS_ERR(kvm_regs)) { 3322 r = PTR_ERR(kvm_regs); 3323 goto out; 3324 } 3325 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 3326 kfree(kvm_regs); 3327 break; 3328 } 3329 case KVM_GET_SREGS: { 3330 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), 3331 GFP_KERNEL_ACCOUNT); 3332 r = -ENOMEM; 3333 if (!kvm_sregs) 3334 goto out; 3335 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 3336 if (r) 3337 goto out; 3338 r = -EFAULT; 3339 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 3340 goto out; 3341 r = 0; 3342 break; 3343 } 3344 case KVM_SET_SREGS: { 3345 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); 3346 if (IS_ERR(kvm_sregs)) { 3347 r = PTR_ERR(kvm_sregs); 3348 kvm_sregs = NULL; 3349 goto out; 3350 } 3351 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 3352 break; 3353 } 3354 case KVM_GET_MP_STATE: { 3355 struct kvm_mp_state mp_state; 3356 3357 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 3358 if (r) 3359 goto out; 3360 r = -EFAULT; 3361 if (copy_to_user(argp, &mp_state, sizeof(mp_state))) 3362 goto out; 3363 r = 0; 3364 break; 3365 } 3366 case KVM_SET_MP_STATE: { 3367 struct kvm_mp_state mp_state; 3368 3369 r = -EFAULT; 3370 if (copy_from_user(&mp_state, argp, sizeof(mp_state))) 3371 goto out; 3372 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 3373 break; 3374 } 3375 case KVM_TRANSLATE: { 3376 struct kvm_translation tr; 3377 3378 r = -EFAULT; 3379 if (copy_from_user(&tr, argp, sizeof(tr))) 3380 goto out; 3381 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 3382 if (r) 3383 goto out; 3384 r = -EFAULT; 3385 if (copy_to_user(argp, &tr, sizeof(tr))) 3386 goto out; 3387 r = 0; 3388 break; 3389 } 3390 case KVM_SET_GUEST_DEBUG: { 3391 struct kvm_guest_debug dbg; 3392 3393 r = -EFAULT; 3394 if (copy_from_user(&dbg, argp, sizeof(dbg))) 3395 goto out; 3396 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 3397 break; 3398 } 3399 case KVM_SET_SIGNAL_MASK: { 3400 struct kvm_signal_mask __user *sigmask_arg = argp; 3401 struct kvm_signal_mask kvm_sigmask; 3402 sigset_t sigset, *p; 3403 3404 p = NULL; 3405 if (argp) { 3406 r = -EFAULT; 3407 if (copy_from_user(&kvm_sigmask, argp, 3408 sizeof(kvm_sigmask))) 3409 goto out; 3410 r = -EINVAL; 3411 if (kvm_sigmask.len != sizeof(sigset)) 3412 goto out; 3413 r = -EFAULT; 3414 if (copy_from_user(&sigset, sigmask_arg->sigset, 3415 sizeof(sigset))) 3416 goto out; 3417 p = &sigset; 3418 } 3419 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 3420 break; 3421 } 3422 case KVM_GET_FPU: { 3423 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT); 3424 r = -ENOMEM; 3425 if (!fpu) 3426 goto out; 3427 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 3428 if (r) 3429 goto out; 3430 r = -EFAULT; 3431 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 3432 goto out; 3433 r = 0; 3434 break; 3435 } 3436 case KVM_SET_FPU: { 3437 fpu = memdup_user(argp, sizeof(*fpu)); 3438 if (IS_ERR(fpu)) { 3439 r = PTR_ERR(fpu); 3440 fpu = NULL; 3441 goto out; 3442 } 3443 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 3444 break; 3445 } 3446 default: 3447 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 3448 } 3449 out: 3450 mutex_unlock(&vcpu->mutex); 3451 kfree(fpu); 3452 kfree(kvm_sregs); 3453 return r; 3454 } 3455 3456 #ifdef CONFIG_KVM_COMPAT 3457 static long kvm_vcpu_compat_ioctl(struct file *filp, 3458 unsigned int ioctl, unsigned long arg) 3459 { 3460 struct kvm_vcpu *vcpu = filp->private_data; 3461 void __user *argp = compat_ptr(arg); 3462 int r; 3463 3464 if (vcpu->kvm->mm != current->mm) 3465 return -EIO; 3466 3467 switch (ioctl) { 3468 case KVM_SET_SIGNAL_MASK: { 3469 struct kvm_signal_mask __user *sigmask_arg = argp; 3470 struct kvm_signal_mask kvm_sigmask; 3471 sigset_t sigset; 3472 3473 if (argp) { 3474 r = -EFAULT; 3475 if (copy_from_user(&kvm_sigmask, argp, 3476 sizeof(kvm_sigmask))) 3477 goto out; 3478 r = -EINVAL; 3479 if (kvm_sigmask.len != sizeof(compat_sigset_t)) 3480 goto out; 3481 r = -EFAULT; 3482 if (get_compat_sigset(&sigset, 3483 (compat_sigset_t __user *)sigmask_arg->sigset)) 3484 goto out; 3485 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 3486 } else 3487 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); 3488 break; 3489 } 3490 default: 3491 r = kvm_vcpu_ioctl(filp, ioctl, arg); 3492 } 3493 3494 out: 3495 return r; 3496 } 3497 #endif 3498 3499 static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma) 3500 { 3501 struct kvm_device *dev = filp->private_data; 3502 3503 if (dev->ops->mmap) 3504 return dev->ops->mmap(dev, vma); 3505 3506 return -ENODEV; 3507 } 3508 3509 static int kvm_device_ioctl_attr(struct kvm_device *dev, 3510 int (*accessor)(struct kvm_device *dev, 3511 struct kvm_device_attr *attr), 3512 unsigned long arg) 3513 { 3514 struct kvm_device_attr attr; 3515 3516 if (!accessor) 3517 return -EPERM; 3518 3519 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 3520 return -EFAULT; 3521 3522 return accessor(dev, &attr); 3523 } 3524 3525 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, 3526 unsigned long arg) 3527 { 3528 struct kvm_device *dev = filp->private_data; 3529 3530 if (dev->kvm->mm != current->mm) 3531 return -EIO; 3532 3533 switch (ioctl) { 3534 case KVM_SET_DEVICE_ATTR: 3535 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); 3536 case KVM_GET_DEVICE_ATTR: 3537 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); 3538 case KVM_HAS_DEVICE_ATTR: 3539 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); 3540 default: 3541 if (dev->ops->ioctl) 3542 return dev->ops->ioctl(dev, ioctl, arg); 3543 3544 return -ENOTTY; 3545 } 3546 } 3547 3548 static int kvm_device_release(struct inode *inode, struct file *filp) 3549 { 3550 struct kvm_device *dev = filp->private_data; 3551 struct kvm *kvm = dev->kvm; 3552 3553 if (dev->ops->release) { 3554 mutex_lock(&kvm->lock); 3555 list_del(&dev->vm_node); 3556 dev->ops->release(dev); 3557 mutex_unlock(&kvm->lock); 3558 } 3559 3560 kvm_put_kvm(kvm); 3561 return 0; 3562 } 3563 3564 static const struct file_operations kvm_device_fops = { 3565 .unlocked_ioctl = kvm_device_ioctl, 3566 .release = kvm_device_release, 3567 KVM_COMPAT(kvm_device_ioctl), 3568 .mmap = kvm_device_mmap, 3569 }; 3570 3571 struct kvm_device *kvm_device_from_filp(struct file *filp) 3572 { 3573 if (filp->f_op != &kvm_device_fops) 3574 return NULL; 3575 3576 return filp->private_data; 3577 } 3578 3579 static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { 3580 #ifdef CONFIG_KVM_MPIC 3581 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, 3582 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, 3583 #endif 3584 }; 3585 3586 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type) 3587 { 3588 if (type >= ARRAY_SIZE(kvm_device_ops_table)) 3589 return -ENOSPC; 3590 3591 if (kvm_device_ops_table[type] != NULL) 3592 return -EEXIST; 3593 3594 kvm_device_ops_table[type] = ops; 3595 return 0; 3596 } 3597 3598 void kvm_unregister_device_ops(u32 type) 3599 { 3600 if (kvm_device_ops_table[type] != NULL) 3601 kvm_device_ops_table[type] = NULL; 3602 } 3603 3604 static int kvm_ioctl_create_device(struct kvm *kvm, 3605 struct kvm_create_device *cd) 3606 { 3607 const struct kvm_device_ops *ops = NULL; 3608 struct kvm_device *dev; 3609 bool test = cd->flags & KVM_CREATE_DEVICE_TEST; 3610 int type; 3611 int ret; 3612 3613 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) 3614 return -ENODEV; 3615 3616 type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table)); 3617 ops = kvm_device_ops_table[type]; 3618 if (ops == NULL) 3619 return -ENODEV; 3620 3621 if (test) 3622 return 0; 3623 3624 dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT); 3625 if (!dev) 3626 return -ENOMEM; 3627 3628 dev->ops = ops; 3629 dev->kvm = kvm; 3630 3631 mutex_lock(&kvm->lock); 3632 ret = ops->create(dev, type); 3633 if (ret < 0) { 3634 mutex_unlock(&kvm->lock); 3635 kfree(dev); 3636 return ret; 3637 } 3638 list_add(&dev->vm_node, &kvm->devices); 3639 mutex_unlock(&kvm->lock); 3640 3641 if (ops->init) 3642 ops->init(dev); 3643 3644 kvm_get_kvm(kvm); 3645 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 3646 if (ret < 0) { 3647 kvm_put_kvm_no_destroy(kvm); 3648 mutex_lock(&kvm->lock); 3649 list_del(&dev->vm_node); 3650 mutex_unlock(&kvm->lock); 3651 ops->destroy(dev); 3652 return ret; 3653 } 3654 3655 cd->fd = ret; 3656 return 0; 3657 } 3658 3659 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) 3660 { 3661 switch (arg) { 3662 case KVM_CAP_USER_MEMORY: 3663 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 3664 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 3665 case KVM_CAP_INTERNAL_ERROR_DATA: 3666 #ifdef CONFIG_HAVE_KVM_MSI 3667 case KVM_CAP_SIGNAL_MSI: 3668 #endif 3669 #ifdef CONFIG_HAVE_KVM_IRQFD 3670 case KVM_CAP_IRQFD: 3671 case KVM_CAP_IRQFD_RESAMPLE: 3672 #endif 3673 case KVM_CAP_IOEVENTFD_ANY_LENGTH: 3674 case KVM_CAP_CHECK_EXTENSION_VM: 3675 case KVM_CAP_ENABLE_CAP_VM: 3676 case KVM_CAP_HALT_POLL: 3677 return 1; 3678 #ifdef CONFIG_KVM_MMIO 3679 case KVM_CAP_COALESCED_MMIO: 3680 return KVM_COALESCED_MMIO_PAGE_OFFSET; 3681 case KVM_CAP_COALESCED_PIO: 3682 return 1; 3683 #endif 3684 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 3685 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: 3686 return KVM_DIRTY_LOG_MANUAL_CAPS; 3687 #endif 3688 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 3689 case KVM_CAP_IRQ_ROUTING: 3690 return KVM_MAX_IRQ_ROUTES; 3691 #endif 3692 #if KVM_ADDRESS_SPACE_NUM > 1 3693 case KVM_CAP_MULTI_ADDRESS_SPACE: 3694 return KVM_ADDRESS_SPACE_NUM; 3695 #endif 3696 case KVM_CAP_NR_MEMSLOTS: 3697 return KVM_USER_MEM_SLOTS; 3698 case KVM_CAP_DIRTY_LOG_RING: 3699 #if KVM_DIRTY_LOG_PAGE_OFFSET > 0 3700 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn); 3701 #else 3702 return 0; 3703 #endif 3704 default: 3705 break; 3706 } 3707 return kvm_vm_ioctl_check_extension(kvm, arg); 3708 } 3709 3710 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size) 3711 { 3712 int r; 3713 3714 if (!KVM_DIRTY_LOG_PAGE_OFFSET) 3715 return -EINVAL; 3716 3717 /* the size should be power of 2 */ 3718 if (!size || (size & (size - 1))) 3719 return -EINVAL; 3720 3721 /* Should be bigger to keep the reserved entries, or a page */ 3722 if (size < kvm_dirty_ring_get_rsvd_entries() * 3723 sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE) 3724 return -EINVAL; 3725 3726 if (size > KVM_DIRTY_RING_MAX_ENTRIES * 3727 sizeof(struct kvm_dirty_gfn)) 3728 return -E2BIG; 3729 3730 /* We only allow it to set once */ 3731 if (kvm->dirty_ring_size) 3732 return -EINVAL; 3733 3734 mutex_lock(&kvm->lock); 3735 3736 if (kvm->created_vcpus) { 3737 /* We don't allow to change this value after vcpu created */ 3738 r = -EINVAL; 3739 } else { 3740 kvm->dirty_ring_size = size; 3741 r = 0; 3742 } 3743 3744 mutex_unlock(&kvm->lock); 3745 return r; 3746 } 3747 3748 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm) 3749 { 3750 int i; 3751 struct kvm_vcpu *vcpu; 3752 int cleared = 0; 3753 3754 if (!kvm->dirty_ring_size) 3755 return -EINVAL; 3756 3757 mutex_lock(&kvm->slots_lock); 3758 3759 kvm_for_each_vcpu(i, vcpu, kvm) 3760 cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring); 3761 3762 mutex_unlock(&kvm->slots_lock); 3763 3764 if (cleared) 3765 kvm_flush_remote_tlbs(kvm); 3766 3767 return cleared; 3768 } 3769 3770 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm, 3771 struct kvm_enable_cap *cap) 3772 { 3773 return -EINVAL; 3774 } 3775 3776 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, 3777 struct kvm_enable_cap *cap) 3778 { 3779 switch (cap->cap) { 3780 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 3781 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: { 3782 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE; 3783 3784 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE) 3785 allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS; 3786 3787 if (cap->flags || (cap->args[0] & ~allowed_options)) 3788 return -EINVAL; 3789 kvm->manual_dirty_log_protect = cap->args[0]; 3790 return 0; 3791 } 3792 #endif 3793 case KVM_CAP_HALT_POLL: { 3794 if (cap->flags || cap->args[0] != (unsigned int)cap->args[0]) 3795 return -EINVAL; 3796 3797 kvm->max_halt_poll_ns = cap->args[0]; 3798 return 0; 3799 } 3800 case KVM_CAP_DIRTY_LOG_RING: 3801 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]); 3802 default: 3803 return kvm_vm_ioctl_enable_cap(kvm, cap); 3804 } 3805 } 3806 3807 static long kvm_vm_ioctl(struct file *filp, 3808 unsigned int ioctl, unsigned long arg) 3809 { 3810 struct kvm *kvm = filp->private_data; 3811 void __user *argp = (void __user *)arg; 3812 int r; 3813 3814 if (kvm->mm != current->mm) 3815 return -EIO; 3816 switch (ioctl) { 3817 case KVM_CREATE_VCPU: 3818 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 3819 break; 3820 case KVM_ENABLE_CAP: { 3821 struct kvm_enable_cap cap; 3822 3823 r = -EFAULT; 3824 if (copy_from_user(&cap, argp, sizeof(cap))) 3825 goto out; 3826 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap); 3827 break; 3828 } 3829 case KVM_SET_USER_MEMORY_REGION: { 3830 struct kvm_userspace_memory_region kvm_userspace_mem; 3831 3832 r = -EFAULT; 3833 if (copy_from_user(&kvm_userspace_mem, argp, 3834 sizeof(kvm_userspace_mem))) 3835 goto out; 3836 3837 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); 3838 break; 3839 } 3840 case KVM_GET_DIRTY_LOG: { 3841 struct kvm_dirty_log log; 3842 3843 r = -EFAULT; 3844 if (copy_from_user(&log, argp, sizeof(log))) 3845 goto out; 3846 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 3847 break; 3848 } 3849 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 3850 case KVM_CLEAR_DIRTY_LOG: { 3851 struct kvm_clear_dirty_log log; 3852 3853 r = -EFAULT; 3854 if (copy_from_user(&log, argp, sizeof(log))) 3855 goto out; 3856 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); 3857 break; 3858 } 3859 #endif 3860 #ifdef CONFIG_KVM_MMIO 3861 case KVM_REGISTER_COALESCED_MMIO: { 3862 struct kvm_coalesced_mmio_zone zone; 3863 3864 r = -EFAULT; 3865 if (copy_from_user(&zone, argp, sizeof(zone))) 3866 goto out; 3867 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 3868 break; 3869 } 3870 case KVM_UNREGISTER_COALESCED_MMIO: { 3871 struct kvm_coalesced_mmio_zone zone; 3872 3873 r = -EFAULT; 3874 if (copy_from_user(&zone, argp, sizeof(zone))) 3875 goto out; 3876 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 3877 break; 3878 } 3879 #endif 3880 case KVM_IRQFD: { 3881 struct kvm_irqfd data; 3882 3883 r = -EFAULT; 3884 if (copy_from_user(&data, argp, sizeof(data))) 3885 goto out; 3886 r = kvm_irqfd(kvm, &data); 3887 break; 3888 } 3889 case KVM_IOEVENTFD: { 3890 struct kvm_ioeventfd data; 3891 3892 r = -EFAULT; 3893 if (copy_from_user(&data, argp, sizeof(data))) 3894 goto out; 3895 r = kvm_ioeventfd(kvm, &data); 3896 break; 3897 } 3898 #ifdef CONFIG_HAVE_KVM_MSI 3899 case KVM_SIGNAL_MSI: { 3900 struct kvm_msi msi; 3901 3902 r = -EFAULT; 3903 if (copy_from_user(&msi, argp, sizeof(msi))) 3904 goto out; 3905 r = kvm_send_userspace_msi(kvm, &msi); 3906 break; 3907 } 3908 #endif 3909 #ifdef __KVM_HAVE_IRQ_LINE 3910 case KVM_IRQ_LINE_STATUS: 3911 case KVM_IRQ_LINE: { 3912 struct kvm_irq_level irq_event; 3913 3914 r = -EFAULT; 3915 if (copy_from_user(&irq_event, argp, sizeof(irq_event))) 3916 goto out; 3917 3918 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, 3919 ioctl == KVM_IRQ_LINE_STATUS); 3920 if (r) 3921 goto out; 3922 3923 r = -EFAULT; 3924 if (ioctl == KVM_IRQ_LINE_STATUS) { 3925 if (copy_to_user(argp, &irq_event, sizeof(irq_event))) 3926 goto out; 3927 } 3928 3929 r = 0; 3930 break; 3931 } 3932 #endif 3933 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 3934 case KVM_SET_GSI_ROUTING: { 3935 struct kvm_irq_routing routing; 3936 struct kvm_irq_routing __user *urouting; 3937 struct kvm_irq_routing_entry *entries = NULL; 3938 3939 r = -EFAULT; 3940 if (copy_from_user(&routing, argp, sizeof(routing))) 3941 goto out; 3942 r = -EINVAL; 3943 if (!kvm_arch_can_set_irq_routing(kvm)) 3944 goto out; 3945 if (routing.nr > KVM_MAX_IRQ_ROUTES) 3946 goto out; 3947 if (routing.flags) 3948 goto out; 3949 if (routing.nr) { 3950 urouting = argp; 3951 entries = vmemdup_user(urouting->entries, 3952 array_size(sizeof(*entries), 3953 routing.nr)); 3954 if (IS_ERR(entries)) { 3955 r = PTR_ERR(entries); 3956 goto out; 3957 } 3958 } 3959 r = kvm_set_irq_routing(kvm, entries, routing.nr, 3960 routing.flags); 3961 kvfree(entries); 3962 break; 3963 } 3964 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ 3965 case KVM_CREATE_DEVICE: { 3966 struct kvm_create_device cd; 3967 3968 r = -EFAULT; 3969 if (copy_from_user(&cd, argp, sizeof(cd))) 3970 goto out; 3971 3972 r = kvm_ioctl_create_device(kvm, &cd); 3973 if (r) 3974 goto out; 3975 3976 r = -EFAULT; 3977 if (copy_to_user(argp, &cd, sizeof(cd))) 3978 goto out; 3979 3980 r = 0; 3981 break; 3982 } 3983 case KVM_CHECK_EXTENSION: 3984 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); 3985 break; 3986 case KVM_RESET_DIRTY_RINGS: 3987 r = kvm_vm_ioctl_reset_dirty_pages(kvm); 3988 break; 3989 default: 3990 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 3991 } 3992 out: 3993 return r; 3994 } 3995 3996 #ifdef CONFIG_KVM_COMPAT 3997 struct compat_kvm_dirty_log { 3998 __u32 slot; 3999 __u32 padding1; 4000 union { 4001 compat_uptr_t dirty_bitmap; /* one bit per page */ 4002 __u64 padding2; 4003 }; 4004 }; 4005 4006 static long kvm_vm_compat_ioctl(struct file *filp, 4007 unsigned int ioctl, unsigned long arg) 4008 { 4009 struct kvm *kvm = filp->private_data; 4010 int r; 4011 4012 if (kvm->mm != current->mm) 4013 return -EIO; 4014 switch (ioctl) { 4015 case KVM_GET_DIRTY_LOG: { 4016 struct compat_kvm_dirty_log compat_log; 4017 struct kvm_dirty_log log; 4018 4019 if (copy_from_user(&compat_log, (void __user *)arg, 4020 sizeof(compat_log))) 4021 return -EFAULT; 4022 log.slot = compat_log.slot; 4023 log.padding1 = compat_log.padding1; 4024 log.padding2 = compat_log.padding2; 4025 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 4026 4027 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 4028 break; 4029 } 4030 default: 4031 r = kvm_vm_ioctl(filp, ioctl, arg); 4032 } 4033 return r; 4034 } 4035 #endif 4036 4037 static struct file_operations kvm_vm_fops = { 4038 .release = kvm_vm_release, 4039 .unlocked_ioctl = kvm_vm_ioctl, 4040 .llseek = noop_llseek, 4041 KVM_COMPAT(kvm_vm_compat_ioctl), 4042 }; 4043 4044 static int kvm_dev_ioctl_create_vm(unsigned long type) 4045 { 4046 int r; 4047 struct kvm *kvm; 4048 struct file *file; 4049 4050 kvm = kvm_create_vm(type); 4051 if (IS_ERR(kvm)) 4052 return PTR_ERR(kvm); 4053 #ifdef CONFIG_KVM_MMIO 4054 r = kvm_coalesced_mmio_init(kvm); 4055 if (r < 0) 4056 goto put_kvm; 4057 #endif 4058 r = get_unused_fd_flags(O_CLOEXEC); 4059 if (r < 0) 4060 goto put_kvm; 4061 4062 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 4063 if (IS_ERR(file)) { 4064 put_unused_fd(r); 4065 r = PTR_ERR(file); 4066 goto put_kvm; 4067 } 4068 4069 /* 4070 * Don't call kvm_put_kvm anymore at this point; file->f_op is 4071 * already set, with ->release() being kvm_vm_release(). In error 4072 * cases it will be called by the final fput(file) and will take 4073 * care of doing kvm_put_kvm(kvm). 4074 */ 4075 if (kvm_create_vm_debugfs(kvm, r) < 0) { 4076 put_unused_fd(r); 4077 fput(file); 4078 return -ENOMEM; 4079 } 4080 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm); 4081 4082 fd_install(r, file); 4083 return r; 4084 4085 put_kvm: 4086 kvm_put_kvm(kvm); 4087 return r; 4088 } 4089 4090 static long kvm_dev_ioctl(struct file *filp, 4091 unsigned int ioctl, unsigned long arg) 4092 { 4093 long r = -EINVAL; 4094 4095 switch (ioctl) { 4096 case KVM_GET_API_VERSION: 4097 if (arg) 4098 goto out; 4099 r = KVM_API_VERSION; 4100 break; 4101 case KVM_CREATE_VM: 4102 r = kvm_dev_ioctl_create_vm(arg); 4103 break; 4104 case KVM_CHECK_EXTENSION: 4105 r = kvm_vm_ioctl_check_extension_generic(NULL, arg); 4106 break; 4107 case KVM_GET_VCPU_MMAP_SIZE: 4108 if (arg) 4109 goto out; 4110 r = PAGE_SIZE; /* struct kvm_run */ 4111 #ifdef CONFIG_X86 4112 r += PAGE_SIZE; /* pio data page */ 4113 #endif 4114 #ifdef CONFIG_KVM_MMIO 4115 r += PAGE_SIZE; /* coalesced mmio ring page */ 4116 #endif 4117 break; 4118 case KVM_TRACE_ENABLE: 4119 case KVM_TRACE_PAUSE: 4120 case KVM_TRACE_DISABLE: 4121 r = -EOPNOTSUPP; 4122 break; 4123 default: 4124 return kvm_arch_dev_ioctl(filp, ioctl, arg); 4125 } 4126 out: 4127 return r; 4128 } 4129 4130 static struct file_operations kvm_chardev_ops = { 4131 .unlocked_ioctl = kvm_dev_ioctl, 4132 .llseek = noop_llseek, 4133 KVM_COMPAT(kvm_dev_ioctl), 4134 }; 4135 4136 static struct miscdevice kvm_dev = { 4137 KVM_MINOR, 4138 "kvm", 4139 &kvm_chardev_ops, 4140 }; 4141 4142 static void hardware_enable_nolock(void *junk) 4143 { 4144 int cpu = raw_smp_processor_id(); 4145 int r; 4146 4147 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 4148 return; 4149 4150 cpumask_set_cpu(cpu, cpus_hardware_enabled); 4151 4152 r = kvm_arch_hardware_enable(); 4153 4154 if (r) { 4155 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 4156 atomic_inc(&hardware_enable_failed); 4157 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu); 4158 } 4159 } 4160 4161 static int kvm_starting_cpu(unsigned int cpu) 4162 { 4163 raw_spin_lock(&kvm_count_lock); 4164 if (kvm_usage_count) 4165 hardware_enable_nolock(NULL); 4166 raw_spin_unlock(&kvm_count_lock); 4167 return 0; 4168 } 4169 4170 static void hardware_disable_nolock(void *junk) 4171 { 4172 int cpu = raw_smp_processor_id(); 4173 4174 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 4175 return; 4176 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 4177 kvm_arch_hardware_disable(); 4178 } 4179 4180 static int kvm_dying_cpu(unsigned int cpu) 4181 { 4182 raw_spin_lock(&kvm_count_lock); 4183 if (kvm_usage_count) 4184 hardware_disable_nolock(NULL); 4185 raw_spin_unlock(&kvm_count_lock); 4186 return 0; 4187 } 4188 4189 static void hardware_disable_all_nolock(void) 4190 { 4191 BUG_ON(!kvm_usage_count); 4192 4193 kvm_usage_count--; 4194 if (!kvm_usage_count) 4195 on_each_cpu(hardware_disable_nolock, NULL, 1); 4196 } 4197 4198 static void hardware_disable_all(void) 4199 { 4200 raw_spin_lock(&kvm_count_lock); 4201 hardware_disable_all_nolock(); 4202 raw_spin_unlock(&kvm_count_lock); 4203 } 4204 4205 static int hardware_enable_all(void) 4206 { 4207 int r = 0; 4208 4209 raw_spin_lock(&kvm_count_lock); 4210 4211 kvm_usage_count++; 4212 if (kvm_usage_count == 1) { 4213 atomic_set(&hardware_enable_failed, 0); 4214 on_each_cpu(hardware_enable_nolock, NULL, 1); 4215 4216 if (atomic_read(&hardware_enable_failed)) { 4217 hardware_disable_all_nolock(); 4218 r = -EBUSY; 4219 } 4220 } 4221 4222 raw_spin_unlock(&kvm_count_lock); 4223 4224 return r; 4225 } 4226 4227 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 4228 void *v) 4229 { 4230 /* 4231 * Some (well, at least mine) BIOSes hang on reboot if 4232 * in vmx root mode. 4233 * 4234 * And Intel TXT required VMX off for all cpu when system shutdown. 4235 */ 4236 pr_info("kvm: exiting hardware virtualization\n"); 4237 kvm_rebooting = true; 4238 on_each_cpu(hardware_disable_nolock, NULL, 1); 4239 return NOTIFY_OK; 4240 } 4241 4242 static struct notifier_block kvm_reboot_notifier = { 4243 .notifier_call = kvm_reboot, 4244 .priority = 0, 4245 }; 4246 4247 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 4248 { 4249 int i; 4250 4251 for (i = 0; i < bus->dev_count; i++) { 4252 struct kvm_io_device *pos = bus->range[i].dev; 4253 4254 kvm_iodevice_destructor(pos); 4255 } 4256 kfree(bus); 4257 } 4258 4259 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, 4260 const struct kvm_io_range *r2) 4261 { 4262 gpa_t addr1 = r1->addr; 4263 gpa_t addr2 = r2->addr; 4264 4265 if (addr1 < addr2) 4266 return -1; 4267 4268 /* If r2->len == 0, match the exact address. If r2->len != 0, 4269 * accept any overlapping write. Any order is acceptable for 4270 * overlapping ranges, because kvm_io_bus_get_first_dev ensures 4271 * we process all of them. 4272 */ 4273 if (r2->len) { 4274 addr1 += r1->len; 4275 addr2 += r2->len; 4276 } 4277 4278 if (addr1 > addr2) 4279 return 1; 4280 4281 return 0; 4282 } 4283 4284 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) 4285 { 4286 return kvm_io_bus_cmp(p1, p2); 4287 } 4288 4289 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, 4290 gpa_t addr, int len) 4291 { 4292 struct kvm_io_range *range, key; 4293 int off; 4294 4295 key = (struct kvm_io_range) { 4296 .addr = addr, 4297 .len = len, 4298 }; 4299 4300 range = bsearch(&key, bus->range, bus->dev_count, 4301 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); 4302 if (range == NULL) 4303 return -ENOENT; 4304 4305 off = range - bus->range; 4306 4307 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) 4308 off--; 4309 4310 return off; 4311 } 4312 4313 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 4314 struct kvm_io_range *range, const void *val) 4315 { 4316 int idx; 4317 4318 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 4319 if (idx < 0) 4320 return -EOPNOTSUPP; 4321 4322 while (idx < bus->dev_count && 4323 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 4324 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, 4325 range->len, val)) 4326 return idx; 4327 idx++; 4328 } 4329 4330 return -EOPNOTSUPP; 4331 } 4332 4333 /* kvm_io_bus_write - called under kvm->slots_lock */ 4334 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 4335 int len, const void *val) 4336 { 4337 struct kvm_io_bus *bus; 4338 struct kvm_io_range range; 4339 int r; 4340 4341 range = (struct kvm_io_range) { 4342 .addr = addr, 4343 .len = len, 4344 }; 4345 4346 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 4347 if (!bus) 4348 return -ENOMEM; 4349 r = __kvm_io_bus_write(vcpu, bus, &range, val); 4350 return r < 0 ? r : 0; 4351 } 4352 EXPORT_SYMBOL_GPL(kvm_io_bus_write); 4353 4354 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ 4355 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 4356 gpa_t addr, int len, const void *val, long cookie) 4357 { 4358 struct kvm_io_bus *bus; 4359 struct kvm_io_range range; 4360 4361 range = (struct kvm_io_range) { 4362 .addr = addr, 4363 .len = len, 4364 }; 4365 4366 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 4367 if (!bus) 4368 return -ENOMEM; 4369 4370 /* First try the device referenced by cookie. */ 4371 if ((cookie >= 0) && (cookie < bus->dev_count) && 4372 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) 4373 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, 4374 val)) 4375 return cookie; 4376 4377 /* 4378 * cookie contained garbage; fall back to search and return the 4379 * correct cookie value. 4380 */ 4381 return __kvm_io_bus_write(vcpu, bus, &range, val); 4382 } 4383 4384 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 4385 struct kvm_io_range *range, void *val) 4386 { 4387 int idx; 4388 4389 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 4390 if (idx < 0) 4391 return -EOPNOTSUPP; 4392 4393 while (idx < bus->dev_count && 4394 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 4395 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, 4396 range->len, val)) 4397 return idx; 4398 idx++; 4399 } 4400 4401 return -EOPNOTSUPP; 4402 } 4403 4404 /* kvm_io_bus_read - called under kvm->slots_lock */ 4405 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 4406 int len, void *val) 4407 { 4408 struct kvm_io_bus *bus; 4409 struct kvm_io_range range; 4410 int r; 4411 4412 range = (struct kvm_io_range) { 4413 .addr = addr, 4414 .len = len, 4415 }; 4416 4417 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 4418 if (!bus) 4419 return -ENOMEM; 4420 r = __kvm_io_bus_read(vcpu, bus, &range, val); 4421 return r < 0 ? r : 0; 4422 } 4423 4424 /* Caller must hold slots_lock. */ 4425 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 4426 int len, struct kvm_io_device *dev) 4427 { 4428 int i; 4429 struct kvm_io_bus *new_bus, *bus; 4430 struct kvm_io_range range; 4431 4432 bus = kvm_get_bus(kvm, bus_idx); 4433 if (!bus) 4434 return -ENOMEM; 4435 4436 /* exclude ioeventfd which is limited by maximum fd */ 4437 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 4438 return -ENOSPC; 4439 4440 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1), 4441 GFP_KERNEL_ACCOUNT); 4442 if (!new_bus) 4443 return -ENOMEM; 4444 4445 range = (struct kvm_io_range) { 4446 .addr = addr, 4447 .len = len, 4448 .dev = dev, 4449 }; 4450 4451 for (i = 0; i < bus->dev_count; i++) 4452 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0) 4453 break; 4454 4455 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 4456 new_bus->dev_count++; 4457 new_bus->range[i] = range; 4458 memcpy(new_bus->range + i + 1, bus->range + i, 4459 (bus->dev_count - i) * sizeof(struct kvm_io_range)); 4460 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 4461 synchronize_srcu_expedited(&kvm->srcu); 4462 kfree(bus); 4463 4464 return 0; 4465 } 4466 4467 /* Caller must hold slots_lock. */ 4468 void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 4469 struct kvm_io_device *dev) 4470 { 4471 int i, j; 4472 struct kvm_io_bus *new_bus, *bus; 4473 4474 bus = kvm_get_bus(kvm, bus_idx); 4475 if (!bus) 4476 return; 4477 4478 for (i = 0; i < bus->dev_count; i++) 4479 if (bus->range[i].dev == dev) { 4480 break; 4481 } 4482 4483 if (i == bus->dev_count) 4484 return; 4485 4486 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1), 4487 GFP_KERNEL_ACCOUNT); 4488 if (new_bus) { 4489 memcpy(new_bus, bus, struct_size(bus, range, i)); 4490 new_bus->dev_count--; 4491 memcpy(new_bus->range + i, bus->range + i + 1, 4492 flex_array_size(new_bus, range, new_bus->dev_count - i)); 4493 } else { 4494 pr_err("kvm: failed to shrink bus, removing it completely\n"); 4495 for (j = 0; j < bus->dev_count; j++) { 4496 if (j == i) 4497 continue; 4498 kvm_iodevice_destructor(bus->range[j].dev); 4499 } 4500 } 4501 4502 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 4503 synchronize_srcu_expedited(&kvm->srcu); 4504 kfree(bus); 4505 return; 4506 } 4507 4508 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 4509 gpa_t addr) 4510 { 4511 struct kvm_io_bus *bus; 4512 int dev_idx, srcu_idx; 4513 struct kvm_io_device *iodev = NULL; 4514 4515 srcu_idx = srcu_read_lock(&kvm->srcu); 4516 4517 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 4518 if (!bus) 4519 goto out_unlock; 4520 4521 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); 4522 if (dev_idx < 0) 4523 goto out_unlock; 4524 4525 iodev = bus->range[dev_idx].dev; 4526 4527 out_unlock: 4528 srcu_read_unlock(&kvm->srcu, srcu_idx); 4529 4530 return iodev; 4531 } 4532 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev); 4533 4534 static int kvm_debugfs_open(struct inode *inode, struct file *file, 4535 int (*get)(void *, u64 *), int (*set)(void *, u64), 4536 const char *fmt) 4537 { 4538 struct kvm_stat_data *stat_data = (struct kvm_stat_data *) 4539 inode->i_private; 4540 4541 /* The debugfs files are a reference to the kvm struct which 4542 * is still valid when kvm_destroy_vm is called. 4543 * To avoid the race between open and the removal of the debugfs 4544 * directory we test against the users count. 4545 */ 4546 if (!refcount_inc_not_zero(&stat_data->kvm->users_count)) 4547 return -ENOENT; 4548 4549 if (simple_attr_open(inode, file, get, 4550 KVM_DBGFS_GET_MODE(stat_data->dbgfs_item) & 0222 4551 ? set : NULL, 4552 fmt)) { 4553 kvm_put_kvm(stat_data->kvm); 4554 return -ENOMEM; 4555 } 4556 4557 return 0; 4558 } 4559 4560 static int kvm_debugfs_release(struct inode *inode, struct file *file) 4561 { 4562 struct kvm_stat_data *stat_data = (struct kvm_stat_data *) 4563 inode->i_private; 4564 4565 simple_attr_release(inode, file); 4566 kvm_put_kvm(stat_data->kvm); 4567 4568 return 0; 4569 } 4570 4571 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val) 4572 { 4573 *val = *(ulong *)((void *)kvm + offset); 4574 4575 return 0; 4576 } 4577 4578 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset) 4579 { 4580 *(ulong *)((void *)kvm + offset) = 0; 4581 4582 return 0; 4583 } 4584 4585 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) 4586 { 4587 int i; 4588 struct kvm_vcpu *vcpu; 4589 4590 *val = 0; 4591 4592 kvm_for_each_vcpu(i, vcpu, kvm) 4593 *val += *(u64 *)((void *)vcpu + offset); 4594 4595 return 0; 4596 } 4597 4598 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset) 4599 { 4600 int i; 4601 struct kvm_vcpu *vcpu; 4602 4603 kvm_for_each_vcpu(i, vcpu, kvm) 4604 *(u64 *)((void *)vcpu + offset) = 0; 4605 4606 return 0; 4607 } 4608 4609 static int kvm_stat_data_get(void *data, u64 *val) 4610 { 4611 int r = -EFAULT; 4612 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 4613 4614 switch (stat_data->dbgfs_item->kind) { 4615 case KVM_STAT_VM: 4616 r = kvm_get_stat_per_vm(stat_data->kvm, 4617 stat_data->dbgfs_item->offset, val); 4618 break; 4619 case KVM_STAT_VCPU: 4620 r = kvm_get_stat_per_vcpu(stat_data->kvm, 4621 stat_data->dbgfs_item->offset, val); 4622 break; 4623 } 4624 4625 return r; 4626 } 4627 4628 static int kvm_stat_data_clear(void *data, u64 val) 4629 { 4630 int r = -EFAULT; 4631 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 4632 4633 if (val) 4634 return -EINVAL; 4635 4636 switch (stat_data->dbgfs_item->kind) { 4637 case KVM_STAT_VM: 4638 r = kvm_clear_stat_per_vm(stat_data->kvm, 4639 stat_data->dbgfs_item->offset); 4640 break; 4641 case KVM_STAT_VCPU: 4642 r = kvm_clear_stat_per_vcpu(stat_data->kvm, 4643 stat_data->dbgfs_item->offset); 4644 break; 4645 } 4646 4647 return r; 4648 } 4649 4650 static int kvm_stat_data_open(struct inode *inode, struct file *file) 4651 { 4652 __simple_attr_check_format("%llu\n", 0ull); 4653 return kvm_debugfs_open(inode, file, kvm_stat_data_get, 4654 kvm_stat_data_clear, "%llu\n"); 4655 } 4656 4657 static const struct file_operations stat_fops_per_vm = { 4658 .owner = THIS_MODULE, 4659 .open = kvm_stat_data_open, 4660 .release = kvm_debugfs_release, 4661 .read = simple_attr_read, 4662 .write = simple_attr_write, 4663 .llseek = no_llseek, 4664 }; 4665 4666 static int vm_stat_get(void *_offset, u64 *val) 4667 { 4668 unsigned offset = (long)_offset; 4669 struct kvm *kvm; 4670 u64 tmp_val; 4671 4672 *val = 0; 4673 mutex_lock(&kvm_lock); 4674 list_for_each_entry(kvm, &vm_list, vm_list) { 4675 kvm_get_stat_per_vm(kvm, offset, &tmp_val); 4676 *val += tmp_val; 4677 } 4678 mutex_unlock(&kvm_lock); 4679 return 0; 4680 } 4681 4682 static int vm_stat_clear(void *_offset, u64 val) 4683 { 4684 unsigned offset = (long)_offset; 4685 struct kvm *kvm; 4686 4687 if (val) 4688 return -EINVAL; 4689 4690 mutex_lock(&kvm_lock); 4691 list_for_each_entry(kvm, &vm_list, vm_list) { 4692 kvm_clear_stat_per_vm(kvm, offset); 4693 } 4694 mutex_unlock(&kvm_lock); 4695 4696 return 0; 4697 } 4698 4699 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n"); 4700 4701 static int vcpu_stat_get(void *_offset, u64 *val) 4702 { 4703 unsigned offset = (long)_offset; 4704 struct kvm *kvm; 4705 u64 tmp_val; 4706 4707 *val = 0; 4708 mutex_lock(&kvm_lock); 4709 list_for_each_entry(kvm, &vm_list, vm_list) { 4710 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val); 4711 *val += tmp_val; 4712 } 4713 mutex_unlock(&kvm_lock); 4714 return 0; 4715 } 4716 4717 static int vcpu_stat_clear(void *_offset, u64 val) 4718 { 4719 unsigned offset = (long)_offset; 4720 struct kvm *kvm; 4721 4722 if (val) 4723 return -EINVAL; 4724 4725 mutex_lock(&kvm_lock); 4726 list_for_each_entry(kvm, &vm_list, vm_list) { 4727 kvm_clear_stat_per_vcpu(kvm, offset); 4728 } 4729 mutex_unlock(&kvm_lock); 4730 4731 return 0; 4732 } 4733 4734 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear, 4735 "%llu\n"); 4736 4737 static const struct file_operations *stat_fops[] = { 4738 [KVM_STAT_VCPU] = &vcpu_stat_fops, 4739 [KVM_STAT_VM] = &vm_stat_fops, 4740 }; 4741 4742 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) 4743 { 4744 struct kobj_uevent_env *env; 4745 unsigned long long created, active; 4746 4747 if (!kvm_dev.this_device || !kvm) 4748 return; 4749 4750 mutex_lock(&kvm_lock); 4751 if (type == KVM_EVENT_CREATE_VM) { 4752 kvm_createvm_count++; 4753 kvm_active_vms++; 4754 } else if (type == KVM_EVENT_DESTROY_VM) { 4755 kvm_active_vms--; 4756 } 4757 created = kvm_createvm_count; 4758 active = kvm_active_vms; 4759 mutex_unlock(&kvm_lock); 4760 4761 env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT); 4762 if (!env) 4763 return; 4764 4765 add_uevent_var(env, "CREATED=%llu", created); 4766 add_uevent_var(env, "COUNT=%llu", active); 4767 4768 if (type == KVM_EVENT_CREATE_VM) { 4769 add_uevent_var(env, "EVENT=create"); 4770 kvm->userspace_pid = task_pid_nr(current); 4771 } else if (type == KVM_EVENT_DESTROY_VM) { 4772 add_uevent_var(env, "EVENT=destroy"); 4773 } 4774 add_uevent_var(env, "PID=%d", kvm->userspace_pid); 4775 4776 if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) { 4777 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT); 4778 4779 if (p) { 4780 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); 4781 if (!IS_ERR(tmp)) 4782 add_uevent_var(env, "STATS_PATH=%s", tmp); 4783 kfree(p); 4784 } 4785 } 4786 /* no need for checks, since we are adding at most only 5 keys */ 4787 env->envp[env->envp_idx++] = NULL; 4788 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp); 4789 kfree(env); 4790 } 4791 4792 static void kvm_init_debug(void) 4793 { 4794 struct kvm_stats_debugfs_item *p; 4795 4796 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 4797 4798 kvm_debugfs_num_entries = 0; 4799 for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) { 4800 debugfs_create_file(p->name, KVM_DBGFS_GET_MODE(p), 4801 kvm_debugfs_dir, (void *)(long)p->offset, 4802 stat_fops[p->kind]); 4803 } 4804 } 4805 4806 static int kvm_suspend(void) 4807 { 4808 if (kvm_usage_count) 4809 hardware_disable_nolock(NULL); 4810 return 0; 4811 } 4812 4813 static void kvm_resume(void) 4814 { 4815 if (kvm_usage_count) { 4816 #ifdef CONFIG_LOCKDEP 4817 WARN_ON(lockdep_is_held(&kvm_count_lock)); 4818 #endif 4819 hardware_enable_nolock(NULL); 4820 } 4821 } 4822 4823 static struct syscore_ops kvm_syscore_ops = { 4824 .suspend = kvm_suspend, 4825 .resume = kvm_resume, 4826 }; 4827 4828 static inline 4829 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 4830 { 4831 return container_of(pn, struct kvm_vcpu, preempt_notifier); 4832 } 4833 4834 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 4835 { 4836 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 4837 4838 WRITE_ONCE(vcpu->preempted, false); 4839 WRITE_ONCE(vcpu->ready, false); 4840 4841 __this_cpu_write(kvm_running_vcpu, vcpu); 4842 kvm_arch_sched_in(vcpu, cpu); 4843 kvm_arch_vcpu_load(vcpu, cpu); 4844 } 4845 4846 static void kvm_sched_out(struct preempt_notifier *pn, 4847 struct task_struct *next) 4848 { 4849 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 4850 4851 if (current->state == TASK_RUNNING) { 4852 WRITE_ONCE(vcpu->preempted, true); 4853 WRITE_ONCE(vcpu->ready, true); 4854 } 4855 kvm_arch_vcpu_put(vcpu); 4856 __this_cpu_write(kvm_running_vcpu, NULL); 4857 } 4858 4859 /** 4860 * kvm_get_running_vcpu - get the vcpu running on the current CPU. 4861 * 4862 * We can disable preemption locally around accessing the per-CPU variable, 4863 * and use the resolved vcpu pointer after enabling preemption again, 4864 * because even if the current thread is migrated to another CPU, reading 4865 * the per-CPU value later will give us the same value as we update the 4866 * per-CPU variable in the preempt notifier handlers. 4867 */ 4868 struct kvm_vcpu *kvm_get_running_vcpu(void) 4869 { 4870 struct kvm_vcpu *vcpu; 4871 4872 preempt_disable(); 4873 vcpu = __this_cpu_read(kvm_running_vcpu); 4874 preempt_enable(); 4875 4876 return vcpu; 4877 } 4878 EXPORT_SYMBOL_GPL(kvm_get_running_vcpu); 4879 4880 /** 4881 * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus. 4882 */ 4883 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) 4884 { 4885 return &kvm_running_vcpu; 4886 } 4887 4888 struct kvm_cpu_compat_check { 4889 void *opaque; 4890 int *ret; 4891 }; 4892 4893 static void check_processor_compat(void *data) 4894 { 4895 struct kvm_cpu_compat_check *c = data; 4896 4897 *c->ret = kvm_arch_check_processor_compat(c->opaque); 4898 } 4899 4900 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 4901 struct module *module) 4902 { 4903 struct kvm_cpu_compat_check c; 4904 int r; 4905 int cpu; 4906 4907 r = kvm_arch_init(opaque); 4908 if (r) 4909 goto out_fail; 4910 4911 /* 4912 * kvm_arch_init makes sure there's at most one caller 4913 * for architectures that support multiple implementations, 4914 * like intel and amd on x86. 4915 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating 4916 * conflicts in case kvm is already setup for another implementation. 4917 */ 4918 r = kvm_irqfd_init(); 4919 if (r) 4920 goto out_irqfd; 4921 4922 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 4923 r = -ENOMEM; 4924 goto out_free_0; 4925 } 4926 4927 r = kvm_arch_hardware_setup(opaque); 4928 if (r < 0) 4929 goto out_free_1; 4930 4931 c.ret = &r; 4932 c.opaque = opaque; 4933 for_each_online_cpu(cpu) { 4934 smp_call_function_single(cpu, check_processor_compat, &c, 1); 4935 if (r < 0) 4936 goto out_free_2; 4937 } 4938 4939 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting", 4940 kvm_starting_cpu, kvm_dying_cpu); 4941 if (r) 4942 goto out_free_2; 4943 register_reboot_notifier(&kvm_reboot_notifier); 4944 4945 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 4946 if (!vcpu_align) 4947 vcpu_align = __alignof__(struct kvm_vcpu); 4948 kvm_vcpu_cache = 4949 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align, 4950 SLAB_ACCOUNT, 4951 offsetof(struct kvm_vcpu, arch), 4952 sizeof_field(struct kvm_vcpu, arch), 4953 NULL); 4954 if (!kvm_vcpu_cache) { 4955 r = -ENOMEM; 4956 goto out_free_3; 4957 } 4958 4959 r = kvm_async_pf_init(); 4960 if (r) 4961 goto out_free; 4962 4963 kvm_chardev_ops.owner = module; 4964 kvm_vm_fops.owner = module; 4965 kvm_vcpu_fops.owner = module; 4966 4967 r = misc_register(&kvm_dev); 4968 if (r) { 4969 pr_err("kvm: misc device register failed\n"); 4970 goto out_unreg; 4971 } 4972 4973 register_syscore_ops(&kvm_syscore_ops); 4974 4975 kvm_preempt_ops.sched_in = kvm_sched_in; 4976 kvm_preempt_ops.sched_out = kvm_sched_out; 4977 4978 kvm_init_debug(); 4979 4980 r = kvm_vfio_ops_init(); 4981 WARN_ON(r); 4982 4983 return 0; 4984 4985 out_unreg: 4986 kvm_async_pf_deinit(); 4987 out_free: 4988 kmem_cache_destroy(kvm_vcpu_cache); 4989 out_free_3: 4990 unregister_reboot_notifier(&kvm_reboot_notifier); 4991 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); 4992 out_free_2: 4993 kvm_arch_hardware_unsetup(); 4994 out_free_1: 4995 free_cpumask_var(cpus_hardware_enabled); 4996 out_free_0: 4997 kvm_irqfd_exit(); 4998 out_irqfd: 4999 kvm_arch_exit(); 5000 out_fail: 5001 return r; 5002 } 5003 EXPORT_SYMBOL_GPL(kvm_init); 5004 5005 void kvm_exit(void) 5006 { 5007 debugfs_remove_recursive(kvm_debugfs_dir); 5008 misc_deregister(&kvm_dev); 5009 kmem_cache_destroy(kvm_vcpu_cache); 5010 kvm_async_pf_deinit(); 5011 unregister_syscore_ops(&kvm_syscore_ops); 5012 unregister_reboot_notifier(&kvm_reboot_notifier); 5013 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); 5014 on_each_cpu(hardware_disable_nolock, NULL, 1); 5015 kvm_arch_hardware_unsetup(); 5016 kvm_arch_exit(); 5017 kvm_irqfd_exit(); 5018 free_cpumask_var(cpus_hardware_enabled); 5019 kvm_vfio_ops_exit(); 5020 } 5021 EXPORT_SYMBOL_GPL(kvm_exit); 5022 5023 struct kvm_vm_worker_thread_context { 5024 struct kvm *kvm; 5025 struct task_struct *parent; 5026 struct completion init_done; 5027 kvm_vm_thread_fn_t thread_fn; 5028 uintptr_t data; 5029 int err; 5030 }; 5031 5032 static int kvm_vm_worker_thread(void *context) 5033 { 5034 /* 5035 * The init_context is allocated on the stack of the parent thread, so 5036 * we have to locally copy anything that is needed beyond initialization 5037 */ 5038 struct kvm_vm_worker_thread_context *init_context = context; 5039 struct kvm *kvm = init_context->kvm; 5040 kvm_vm_thread_fn_t thread_fn = init_context->thread_fn; 5041 uintptr_t data = init_context->data; 5042 int err; 5043 5044 err = kthread_park(current); 5045 /* kthread_park(current) is never supposed to return an error */ 5046 WARN_ON(err != 0); 5047 if (err) 5048 goto init_complete; 5049 5050 err = cgroup_attach_task_all(init_context->parent, current); 5051 if (err) { 5052 kvm_err("%s: cgroup_attach_task_all failed with err %d\n", 5053 __func__, err); 5054 goto init_complete; 5055 } 5056 5057 set_user_nice(current, task_nice(init_context->parent)); 5058 5059 init_complete: 5060 init_context->err = err; 5061 complete(&init_context->init_done); 5062 init_context = NULL; 5063 5064 if (err) 5065 return err; 5066 5067 /* Wait to be woken up by the spawner before proceeding. */ 5068 kthread_parkme(); 5069 5070 if (!kthread_should_stop()) 5071 err = thread_fn(kvm, data); 5072 5073 return err; 5074 } 5075 5076 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, 5077 uintptr_t data, const char *name, 5078 struct task_struct **thread_ptr) 5079 { 5080 struct kvm_vm_worker_thread_context init_context = {}; 5081 struct task_struct *thread; 5082 5083 *thread_ptr = NULL; 5084 init_context.kvm = kvm; 5085 init_context.parent = current; 5086 init_context.thread_fn = thread_fn; 5087 init_context.data = data; 5088 init_completion(&init_context.init_done); 5089 5090 thread = kthread_run(kvm_vm_worker_thread, &init_context, 5091 "%s-%d", name, task_pid_nr(current)); 5092 if (IS_ERR(thread)) 5093 return PTR_ERR(thread); 5094 5095 /* kthread_run is never supposed to return NULL */ 5096 WARN_ON(thread == NULL); 5097 5098 wait_for_completion(&init_context.init_done); 5099 5100 if (!init_context.err) 5101 *thread_ptr = thread; 5102 5103 return init_context.err; 5104 } 5105