1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * This module enables machines with Intel VT-x extensions to run virtual 6 * machines without emulation or binary translation. 7 * 8 * Copyright (C) 2006 Qumranet, Inc. 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 10 * 11 * Authors: 12 * Avi Kivity <avi@qumranet.com> 13 * Yaniv Kamay <yaniv@qumranet.com> 14 */ 15 16 #include <kvm/iodev.h> 17 18 #include <linux/kvm_host.h> 19 #include <linux/kvm.h> 20 #include <linux/module.h> 21 #include <linux/errno.h> 22 #include <linux/percpu.h> 23 #include <linux/mm.h> 24 #include <linux/miscdevice.h> 25 #include <linux/vmalloc.h> 26 #include <linux/reboot.h> 27 #include <linux/debugfs.h> 28 #include <linux/highmem.h> 29 #include <linux/file.h> 30 #include <linux/syscore_ops.h> 31 #include <linux/cpu.h> 32 #include <linux/sched/signal.h> 33 #include <linux/sched/mm.h> 34 #include <linux/sched/stat.h> 35 #include <linux/cpumask.h> 36 #include <linux/smp.h> 37 #include <linux/anon_inodes.h> 38 #include <linux/profile.h> 39 #include <linux/kvm_para.h> 40 #include <linux/pagemap.h> 41 #include <linux/mman.h> 42 #include <linux/swap.h> 43 #include <linux/bitops.h> 44 #include <linux/spinlock.h> 45 #include <linux/compat.h> 46 #include <linux/srcu.h> 47 #include <linux/hugetlb.h> 48 #include <linux/slab.h> 49 #include <linux/sort.h> 50 #include <linux/bsearch.h> 51 #include <linux/io.h> 52 #include <linux/lockdep.h> 53 #include <linux/kthread.h> 54 #include <linux/suspend.h> 55 56 #include <asm/processor.h> 57 #include <asm/ioctl.h> 58 #include <linux/uaccess.h> 59 60 #include "coalesced_mmio.h" 61 #include "async_pf.h" 62 #include "kvm_mm.h" 63 #include "vfio.h" 64 65 #define CREATE_TRACE_POINTS 66 #include <trace/events/kvm.h> 67 68 #include <linux/kvm_dirty_ring.h> 69 70 /* Worst case buffer size needed for holding an integer. */ 71 #define ITOA_MAX_LEN 12 72 73 MODULE_AUTHOR("Qumranet"); 74 MODULE_LICENSE("GPL"); 75 76 /* Architectures should define their poll value according to the halt latency */ 77 unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; 78 module_param(halt_poll_ns, uint, 0644); 79 EXPORT_SYMBOL_GPL(halt_poll_ns); 80 81 /* Default doubles per-vcpu halt_poll_ns. */ 82 unsigned int halt_poll_ns_grow = 2; 83 module_param(halt_poll_ns_grow, uint, 0644); 84 EXPORT_SYMBOL_GPL(halt_poll_ns_grow); 85 86 /* The start value to grow halt_poll_ns from */ 87 unsigned int halt_poll_ns_grow_start = 10000; /* 10us */ 88 module_param(halt_poll_ns_grow_start, uint, 0644); 89 EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start); 90 91 /* Default resets per-vcpu halt_poll_ns . */ 92 unsigned int halt_poll_ns_shrink; 93 module_param(halt_poll_ns_shrink, uint, 0644); 94 EXPORT_SYMBOL_GPL(halt_poll_ns_shrink); 95 96 /* 97 * Ordering of locks: 98 * 99 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 100 */ 101 102 DEFINE_MUTEX(kvm_lock); 103 LIST_HEAD(vm_list); 104 105 static struct kmem_cache *kvm_vcpu_cache; 106 107 static __read_mostly struct preempt_ops kvm_preempt_ops; 108 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu); 109 110 struct dentry *kvm_debugfs_dir; 111 EXPORT_SYMBOL_GPL(kvm_debugfs_dir); 112 113 static const struct file_operations stat_fops_per_vm; 114 115 static struct file_operations kvm_chardev_ops; 116 117 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 118 unsigned long arg); 119 #ifdef CONFIG_KVM_COMPAT 120 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, 121 unsigned long arg); 122 #define KVM_COMPAT(c) .compat_ioctl = (c) 123 #else 124 /* 125 * For architectures that don't implement a compat infrastructure, 126 * adopt a double line of defense: 127 * - Prevent a compat task from opening /dev/kvm 128 * - If the open has been done by a 64bit task, and the KVM fd 129 * passed to a compat task, let the ioctls fail. 130 */ 131 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl, 132 unsigned long arg) { return -EINVAL; } 133 134 static int kvm_no_compat_open(struct inode *inode, struct file *file) 135 { 136 return is_compat_task() ? -ENODEV : 0; 137 } 138 #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \ 139 .open = kvm_no_compat_open 140 #endif 141 static int hardware_enable_all(void); 142 static void hardware_disable_all(void); 143 144 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 145 146 #define KVM_EVENT_CREATE_VM 0 147 #define KVM_EVENT_DESTROY_VM 1 148 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); 149 static unsigned long long kvm_createvm_count; 150 static unsigned long long kvm_active_vms; 151 152 static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask); 153 154 __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, 155 unsigned long start, unsigned long end) 156 { 157 } 158 159 __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) 160 { 161 } 162 163 bool kvm_is_zone_device_page(struct page *page) 164 { 165 /* 166 * The metadata used by is_zone_device_page() to determine whether or 167 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if 168 * the device has been pinned, e.g. by get_user_pages(). WARN if the 169 * page_count() is zero to help detect bad usage of this helper. 170 */ 171 if (WARN_ON_ONCE(!page_count(page))) 172 return false; 173 174 return is_zone_device_page(page); 175 } 176 177 /* 178 * Returns a 'struct page' if the pfn is "valid" and backed by a refcounted 179 * page, NULL otherwise. Note, the list of refcounted PG_reserved page types 180 * is likely incomplete, it has been compiled purely through people wanting to 181 * back guest with a certain type of memory and encountering issues. 182 */ 183 struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn) 184 { 185 struct page *page; 186 187 if (!pfn_valid(pfn)) 188 return NULL; 189 190 page = pfn_to_page(pfn); 191 if (!PageReserved(page)) 192 return page; 193 194 /* The ZERO_PAGE(s) is marked PG_reserved, but is refcounted. */ 195 if (is_zero_pfn(pfn)) 196 return page; 197 198 /* 199 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting 200 * perspective they are "normal" pages, albeit with slightly different 201 * usage rules. 202 */ 203 if (kvm_is_zone_device_page(page)) 204 return page; 205 206 return NULL; 207 } 208 209 /* 210 * Switches to specified vcpu, until a matching vcpu_put() 211 */ 212 void vcpu_load(struct kvm_vcpu *vcpu) 213 { 214 int cpu = get_cpu(); 215 216 __this_cpu_write(kvm_running_vcpu, vcpu); 217 preempt_notifier_register(&vcpu->preempt_notifier); 218 kvm_arch_vcpu_load(vcpu, cpu); 219 put_cpu(); 220 } 221 EXPORT_SYMBOL_GPL(vcpu_load); 222 223 void vcpu_put(struct kvm_vcpu *vcpu) 224 { 225 preempt_disable(); 226 kvm_arch_vcpu_put(vcpu); 227 preempt_notifier_unregister(&vcpu->preempt_notifier); 228 __this_cpu_write(kvm_running_vcpu, NULL); 229 preempt_enable(); 230 } 231 EXPORT_SYMBOL_GPL(vcpu_put); 232 233 /* TODO: merge with kvm_arch_vcpu_should_kick */ 234 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req) 235 { 236 int mode = kvm_vcpu_exiting_guest_mode(vcpu); 237 238 /* 239 * We need to wait for the VCPU to reenable interrupts and get out of 240 * READING_SHADOW_PAGE_TABLES mode. 241 */ 242 if (req & KVM_REQUEST_WAIT) 243 return mode != OUTSIDE_GUEST_MODE; 244 245 /* 246 * Need to kick a running VCPU, but otherwise there is nothing to do. 247 */ 248 return mode == IN_GUEST_MODE; 249 } 250 251 static void ack_kick(void *_completed) 252 { 253 } 254 255 static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait) 256 { 257 if (cpumask_empty(cpus)) 258 return false; 259 260 smp_call_function_many(cpus, ack_kick, NULL, wait); 261 return true; 262 } 263 264 static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req, 265 struct cpumask *tmp, int current_cpu) 266 { 267 int cpu; 268 269 if (likely(!(req & KVM_REQUEST_NO_ACTION))) 270 __kvm_make_request(req, vcpu); 271 272 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) 273 return; 274 275 /* 276 * Note, the vCPU could get migrated to a different pCPU at any point 277 * after kvm_request_needs_ipi(), which could result in sending an IPI 278 * to the previous pCPU. But, that's OK because the purpose of the IPI 279 * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is 280 * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES 281 * after this point is also OK, as the requirement is only that KVM wait 282 * for vCPUs that were reading SPTEs _before_ any changes were 283 * finalized. See kvm_vcpu_kick() for more details on handling requests. 284 */ 285 if (kvm_request_needs_ipi(vcpu, req)) { 286 cpu = READ_ONCE(vcpu->cpu); 287 if (cpu != -1 && cpu != current_cpu) 288 __cpumask_set_cpu(cpu, tmp); 289 } 290 } 291 292 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, 293 unsigned long *vcpu_bitmap) 294 { 295 struct kvm_vcpu *vcpu; 296 struct cpumask *cpus; 297 int i, me; 298 bool called; 299 300 me = get_cpu(); 301 302 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); 303 cpumask_clear(cpus); 304 305 for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) { 306 vcpu = kvm_get_vcpu(kvm, i); 307 if (!vcpu) 308 continue; 309 kvm_make_vcpu_request(vcpu, req, cpus, me); 310 } 311 312 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); 313 put_cpu(); 314 315 return called; 316 } 317 318 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, 319 struct kvm_vcpu *except) 320 { 321 struct kvm_vcpu *vcpu; 322 struct cpumask *cpus; 323 unsigned long i; 324 bool called; 325 int me; 326 327 me = get_cpu(); 328 329 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); 330 cpumask_clear(cpus); 331 332 kvm_for_each_vcpu(i, vcpu, kvm) { 333 if (vcpu == except) 334 continue; 335 kvm_make_vcpu_request(vcpu, req, cpus, me); 336 } 337 338 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); 339 put_cpu(); 340 341 return called; 342 } 343 344 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) 345 { 346 return kvm_make_all_cpus_request_except(kvm, req, NULL); 347 } 348 EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request); 349 350 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL 351 void kvm_flush_remote_tlbs(struct kvm *kvm) 352 { 353 ++kvm->stat.generic.remote_tlb_flush_requests; 354 355 /* 356 * We want to publish modifications to the page tables before reading 357 * mode. Pairs with a memory barrier in arch-specific code. 358 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest 359 * and smp_mb in walk_shadow_page_lockless_begin/end. 360 * - powerpc: smp_mb in kvmppc_prepare_to_enter. 361 * 362 * There is already an smp_mb__after_atomic() before 363 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that 364 * barrier here. 365 */ 366 if (!kvm_arch_flush_remote_tlb(kvm) 367 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 368 ++kvm->stat.generic.remote_tlb_flush; 369 } 370 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); 371 #endif 372 373 static void kvm_flush_shadow_all(struct kvm *kvm) 374 { 375 kvm_arch_flush_shadow_all(kvm); 376 kvm_arch_guest_memory_reclaimed(kvm); 377 } 378 379 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 380 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, 381 gfp_t gfp_flags) 382 { 383 gfp_flags |= mc->gfp_zero; 384 385 if (mc->kmem_cache) 386 return kmem_cache_alloc(mc->kmem_cache, gfp_flags); 387 else 388 return (void *)__get_free_page(gfp_flags); 389 } 390 391 int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min) 392 { 393 gfp_t gfp = mc->gfp_custom ? mc->gfp_custom : GFP_KERNEL_ACCOUNT; 394 void *obj; 395 396 if (mc->nobjs >= min) 397 return 0; 398 399 if (unlikely(!mc->objects)) { 400 if (WARN_ON_ONCE(!capacity)) 401 return -EIO; 402 403 mc->objects = kvmalloc_array(sizeof(void *), capacity, gfp); 404 if (!mc->objects) 405 return -ENOMEM; 406 407 mc->capacity = capacity; 408 } 409 410 /* It is illegal to request a different capacity across topups. */ 411 if (WARN_ON_ONCE(mc->capacity != capacity)) 412 return -EIO; 413 414 while (mc->nobjs < mc->capacity) { 415 obj = mmu_memory_cache_alloc_obj(mc, gfp); 416 if (!obj) 417 return mc->nobjs >= min ? 0 : -ENOMEM; 418 mc->objects[mc->nobjs++] = obj; 419 } 420 return 0; 421 } 422 423 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min) 424 { 425 return __kvm_mmu_topup_memory_cache(mc, KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE, min); 426 } 427 428 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc) 429 { 430 return mc->nobjs; 431 } 432 433 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) 434 { 435 while (mc->nobjs) { 436 if (mc->kmem_cache) 437 kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]); 438 else 439 free_page((unsigned long)mc->objects[--mc->nobjs]); 440 } 441 442 kvfree(mc->objects); 443 444 mc->objects = NULL; 445 mc->capacity = 0; 446 } 447 448 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) 449 { 450 void *p; 451 452 if (WARN_ON(!mc->nobjs)) 453 p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT); 454 else 455 p = mc->objects[--mc->nobjs]; 456 BUG_ON(!p); 457 return p; 458 } 459 #endif 460 461 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 462 { 463 mutex_init(&vcpu->mutex); 464 vcpu->cpu = -1; 465 vcpu->kvm = kvm; 466 vcpu->vcpu_id = id; 467 vcpu->pid = NULL; 468 #ifndef __KVM_HAVE_ARCH_WQP 469 rcuwait_init(&vcpu->wait); 470 #endif 471 kvm_async_pf_vcpu_init(vcpu); 472 473 kvm_vcpu_set_in_spin_loop(vcpu, false); 474 kvm_vcpu_set_dy_eligible(vcpu, false); 475 vcpu->preempted = false; 476 vcpu->ready = false; 477 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 478 vcpu->last_used_slot = NULL; 479 480 /* Fill the stats id string for the vcpu */ 481 snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d", 482 task_pid_nr(current), id); 483 } 484 485 static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) 486 { 487 kvm_arch_vcpu_destroy(vcpu); 488 kvm_dirty_ring_free(&vcpu->dirty_ring); 489 490 /* 491 * No need for rcu_read_lock as VCPU_RUN is the only place that changes 492 * the vcpu->pid pointer, and at destruction time all file descriptors 493 * are already gone. 494 */ 495 put_pid(rcu_dereference_protected(vcpu->pid, 1)); 496 497 free_page((unsigned long)vcpu->run); 498 kmem_cache_free(kvm_vcpu_cache, vcpu); 499 } 500 501 void kvm_destroy_vcpus(struct kvm *kvm) 502 { 503 unsigned long i; 504 struct kvm_vcpu *vcpu; 505 506 kvm_for_each_vcpu(i, vcpu, kvm) { 507 kvm_vcpu_destroy(vcpu); 508 xa_erase(&kvm->vcpu_array, i); 509 } 510 511 atomic_set(&kvm->online_vcpus, 0); 512 } 513 EXPORT_SYMBOL_GPL(kvm_destroy_vcpus); 514 515 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 516 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 517 { 518 return container_of(mn, struct kvm, mmu_notifier); 519 } 520 521 static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn, 522 struct mm_struct *mm, 523 unsigned long start, unsigned long end) 524 { 525 struct kvm *kvm = mmu_notifier_to_kvm(mn); 526 int idx; 527 528 idx = srcu_read_lock(&kvm->srcu); 529 kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); 530 srcu_read_unlock(&kvm->srcu, idx); 531 } 532 533 typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range); 534 535 typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start, 536 unsigned long end); 537 538 typedef void (*on_unlock_fn_t)(struct kvm *kvm); 539 540 struct kvm_hva_range { 541 unsigned long start; 542 unsigned long end; 543 pte_t pte; 544 hva_handler_t handler; 545 on_lock_fn_t on_lock; 546 on_unlock_fn_t on_unlock; 547 bool flush_on_ret; 548 bool may_block; 549 }; 550 551 /* 552 * Use a dedicated stub instead of NULL to indicate that there is no callback 553 * function/handler. The compiler technically can't guarantee that a real 554 * function will have a non-zero address, and so it will generate code to 555 * check for !NULL, whereas comparing against a stub will be elided at compile 556 * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9). 557 */ 558 static void kvm_null_fn(void) 559 { 560 561 } 562 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn) 563 564 /* Iterate over each memslot intersecting [start, last] (inclusive) range */ 565 #define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \ 566 for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \ 567 node; \ 568 node = interval_tree_iter_next(node, start, last)) \ 569 570 static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, 571 const struct kvm_hva_range *range) 572 { 573 bool ret = false, locked = false; 574 struct kvm_gfn_range gfn_range; 575 struct kvm_memory_slot *slot; 576 struct kvm_memslots *slots; 577 int i, idx; 578 579 if (WARN_ON_ONCE(range->end <= range->start)) 580 return 0; 581 582 /* A null handler is allowed if and only if on_lock() is provided. */ 583 if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) && 584 IS_KVM_NULL_FN(range->handler))) 585 return 0; 586 587 idx = srcu_read_lock(&kvm->srcu); 588 589 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 590 struct interval_tree_node *node; 591 592 slots = __kvm_memslots(kvm, i); 593 kvm_for_each_memslot_in_hva_range(node, slots, 594 range->start, range->end - 1) { 595 unsigned long hva_start, hva_end; 596 597 slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]); 598 hva_start = max(range->start, slot->userspace_addr); 599 hva_end = min(range->end, slot->userspace_addr + 600 (slot->npages << PAGE_SHIFT)); 601 602 /* 603 * To optimize for the likely case where the address 604 * range is covered by zero or one memslots, don't 605 * bother making these conditional (to avoid writes on 606 * the second or later invocation of the handler). 607 */ 608 gfn_range.pte = range->pte; 609 gfn_range.may_block = range->may_block; 610 611 /* 612 * {gfn(page) | page intersects with [hva_start, hva_end)} = 613 * {gfn_start, gfn_start+1, ..., gfn_end-1}. 614 */ 615 gfn_range.start = hva_to_gfn_memslot(hva_start, slot); 616 gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot); 617 gfn_range.slot = slot; 618 619 if (!locked) { 620 locked = true; 621 KVM_MMU_LOCK(kvm); 622 if (!IS_KVM_NULL_FN(range->on_lock)) 623 range->on_lock(kvm, range->start, range->end); 624 if (IS_KVM_NULL_FN(range->handler)) 625 break; 626 } 627 ret |= range->handler(kvm, &gfn_range); 628 } 629 } 630 631 if (range->flush_on_ret && ret) 632 kvm_flush_remote_tlbs(kvm); 633 634 if (locked) { 635 KVM_MMU_UNLOCK(kvm); 636 if (!IS_KVM_NULL_FN(range->on_unlock)) 637 range->on_unlock(kvm); 638 } 639 640 srcu_read_unlock(&kvm->srcu, idx); 641 642 /* The notifiers are averse to booleans. :-( */ 643 return (int)ret; 644 } 645 646 static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn, 647 unsigned long start, 648 unsigned long end, 649 pte_t pte, 650 hva_handler_t handler) 651 { 652 struct kvm *kvm = mmu_notifier_to_kvm(mn); 653 const struct kvm_hva_range range = { 654 .start = start, 655 .end = end, 656 .pte = pte, 657 .handler = handler, 658 .on_lock = (void *)kvm_null_fn, 659 .on_unlock = (void *)kvm_null_fn, 660 .flush_on_ret = true, 661 .may_block = false, 662 }; 663 664 return __kvm_handle_hva_range(kvm, &range); 665 } 666 667 static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn, 668 unsigned long start, 669 unsigned long end, 670 hva_handler_t handler) 671 { 672 struct kvm *kvm = mmu_notifier_to_kvm(mn); 673 const struct kvm_hva_range range = { 674 .start = start, 675 .end = end, 676 .pte = __pte(0), 677 .handler = handler, 678 .on_lock = (void *)kvm_null_fn, 679 .on_unlock = (void *)kvm_null_fn, 680 .flush_on_ret = false, 681 .may_block = false, 682 }; 683 684 return __kvm_handle_hva_range(kvm, &range); 685 } 686 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 687 struct mm_struct *mm, 688 unsigned long address, 689 pte_t pte) 690 { 691 struct kvm *kvm = mmu_notifier_to_kvm(mn); 692 693 trace_kvm_set_spte_hva(address); 694 695 /* 696 * .change_pte() must be surrounded by .invalidate_range_{start,end}(). 697 * If mmu_invalidate_in_progress is zero, then no in-progress 698 * invalidations, including this one, found a relevant memslot at 699 * start(); rechecking memslots here is unnecessary. Note, a false 700 * positive (count elevated by a different invalidation) is sub-optimal 701 * but functionally ok. 702 */ 703 WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count)); 704 if (!READ_ONCE(kvm->mmu_invalidate_in_progress)) 705 return; 706 707 kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn); 708 } 709 710 void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start, 711 unsigned long end) 712 { 713 /* 714 * The count increase must become visible at unlock time as no 715 * spte can be established without taking the mmu_lock and 716 * count is also read inside the mmu_lock critical section. 717 */ 718 kvm->mmu_invalidate_in_progress++; 719 if (likely(kvm->mmu_invalidate_in_progress == 1)) { 720 kvm->mmu_invalidate_range_start = start; 721 kvm->mmu_invalidate_range_end = end; 722 } else { 723 /* 724 * Fully tracking multiple concurrent ranges has diminishing 725 * returns. Keep things simple and just find the minimal range 726 * which includes the current and new ranges. As there won't be 727 * enough information to subtract a range after its invalidate 728 * completes, any ranges invalidated concurrently will 729 * accumulate and persist until all outstanding invalidates 730 * complete. 731 */ 732 kvm->mmu_invalidate_range_start = 733 min(kvm->mmu_invalidate_range_start, start); 734 kvm->mmu_invalidate_range_end = 735 max(kvm->mmu_invalidate_range_end, end); 736 } 737 } 738 739 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 740 const struct mmu_notifier_range *range) 741 { 742 struct kvm *kvm = mmu_notifier_to_kvm(mn); 743 const struct kvm_hva_range hva_range = { 744 .start = range->start, 745 .end = range->end, 746 .pte = __pte(0), 747 .handler = kvm_unmap_gfn_range, 748 .on_lock = kvm_mmu_invalidate_begin, 749 .on_unlock = kvm_arch_guest_memory_reclaimed, 750 .flush_on_ret = true, 751 .may_block = mmu_notifier_range_blockable(range), 752 }; 753 754 trace_kvm_unmap_hva_range(range->start, range->end); 755 756 /* 757 * Prevent memslot modification between range_start() and range_end() 758 * so that conditionally locking provides the same result in both 759 * functions. Without that guarantee, the mmu_invalidate_in_progress 760 * adjustments will be imbalanced. 761 * 762 * Pairs with the decrement in range_end(). 763 */ 764 spin_lock(&kvm->mn_invalidate_lock); 765 kvm->mn_active_invalidate_count++; 766 spin_unlock(&kvm->mn_invalidate_lock); 767 768 /* 769 * Invalidate pfn caches _before_ invalidating the secondary MMUs, i.e. 770 * before acquiring mmu_lock, to avoid holding mmu_lock while acquiring 771 * each cache's lock. There are relatively few caches in existence at 772 * any given time, and the caches themselves can check for hva overlap, 773 * i.e. don't need to rely on memslot overlap checks for performance. 774 * Because this runs without holding mmu_lock, the pfn caches must use 775 * mn_active_invalidate_count (see above) instead of 776 * mmu_invalidate_in_progress. 777 */ 778 gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end, 779 hva_range.may_block); 780 781 __kvm_handle_hva_range(kvm, &hva_range); 782 783 return 0; 784 } 785 786 void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start, 787 unsigned long end) 788 { 789 /* 790 * This sequence increase will notify the kvm page fault that 791 * the page that is going to be mapped in the spte could have 792 * been freed. 793 */ 794 kvm->mmu_invalidate_seq++; 795 smp_wmb(); 796 /* 797 * The above sequence increase must be visible before the 798 * below count decrease, which is ensured by the smp_wmb above 799 * in conjunction with the smp_rmb in mmu_invalidate_retry(). 800 */ 801 kvm->mmu_invalidate_in_progress--; 802 } 803 804 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 805 const struct mmu_notifier_range *range) 806 { 807 struct kvm *kvm = mmu_notifier_to_kvm(mn); 808 const struct kvm_hva_range hva_range = { 809 .start = range->start, 810 .end = range->end, 811 .pte = __pte(0), 812 .handler = (void *)kvm_null_fn, 813 .on_lock = kvm_mmu_invalidate_end, 814 .on_unlock = (void *)kvm_null_fn, 815 .flush_on_ret = false, 816 .may_block = mmu_notifier_range_blockable(range), 817 }; 818 bool wake; 819 820 __kvm_handle_hva_range(kvm, &hva_range); 821 822 /* Pairs with the increment in range_start(). */ 823 spin_lock(&kvm->mn_invalidate_lock); 824 wake = (--kvm->mn_active_invalidate_count == 0); 825 spin_unlock(&kvm->mn_invalidate_lock); 826 827 /* 828 * There can only be one waiter, since the wait happens under 829 * slots_lock. 830 */ 831 if (wake) 832 rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait); 833 834 BUG_ON(kvm->mmu_invalidate_in_progress < 0); 835 } 836 837 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 838 struct mm_struct *mm, 839 unsigned long start, 840 unsigned long end) 841 { 842 trace_kvm_age_hva(start, end); 843 844 return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn); 845 } 846 847 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, 848 struct mm_struct *mm, 849 unsigned long start, 850 unsigned long end) 851 { 852 trace_kvm_age_hva(start, end); 853 854 /* 855 * Even though we do not flush TLB, this will still adversely 856 * affect performance on pre-Haswell Intel EPT, where there is 857 * no EPT Access Bit to clear so that we have to tear down EPT 858 * tables instead. If we find this unacceptable, we can always 859 * add a parameter to kvm_age_hva so that it effectively doesn't 860 * do anything on clear_young. 861 * 862 * Also note that currently we never issue secondary TLB flushes 863 * from clear_young, leaving this job up to the regular system 864 * cadence. If we find this inaccurate, we might come up with a 865 * more sophisticated heuristic later. 866 */ 867 return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn); 868 } 869 870 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, 871 struct mm_struct *mm, 872 unsigned long address) 873 { 874 trace_kvm_test_age_hva(address); 875 876 return kvm_handle_hva_range_no_flush(mn, address, address + 1, 877 kvm_test_age_gfn); 878 } 879 880 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 881 struct mm_struct *mm) 882 { 883 struct kvm *kvm = mmu_notifier_to_kvm(mn); 884 int idx; 885 886 idx = srcu_read_lock(&kvm->srcu); 887 kvm_flush_shadow_all(kvm); 888 srcu_read_unlock(&kvm->srcu, idx); 889 } 890 891 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 892 .invalidate_range = kvm_mmu_notifier_invalidate_range, 893 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 894 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 895 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 896 .clear_young = kvm_mmu_notifier_clear_young, 897 .test_young = kvm_mmu_notifier_test_young, 898 .change_pte = kvm_mmu_notifier_change_pte, 899 .release = kvm_mmu_notifier_release, 900 }; 901 902 static int kvm_init_mmu_notifier(struct kvm *kvm) 903 { 904 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 905 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 906 } 907 908 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 909 910 static int kvm_init_mmu_notifier(struct kvm *kvm) 911 { 912 return 0; 913 } 914 915 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 916 917 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 918 static int kvm_pm_notifier_call(struct notifier_block *bl, 919 unsigned long state, 920 void *unused) 921 { 922 struct kvm *kvm = container_of(bl, struct kvm, pm_notifier); 923 924 return kvm_arch_pm_notifier(kvm, state); 925 } 926 927 static void kvm_init_pm_notifier(struct kvm *kvm) 928 { 929 kvm->pm_notifier.notifier_call = kvm_pm_notifier_call; 930 /* Suspend KVM before we suspend ftrace, RCU, etc. */ 931 kvm->pm_notifier.priority = INT_MAX; 932 register_pm_notifier(&kvm->pm_notifier); 933 } 934 935 static void kvm_destroy_pm_notifier(struct kvm *kvm) 936 { 937 unregister_pm_notifier(&kvm->pm_notifier); 938 } 939 #else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */ 940 static void kvm_init_pm_notifier(struct kvm *kvm) 941 { 942 } 943 944 static void kvm_destroy_pm_notifier(struct kvm *kvm) 945 { 946 } 947 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */ 948 949 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) 950 { 951 if (!memslot->dirty_bitmap) 952 return; 953 954 kvfree(memslot->dirty_bitmap); 955 memslot->dirty_bitmap = NULL; 956 } 957 958 /* This does not remove the slot from struct kvm_memslots data structures */ 959 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 960 { 961 kvm_destroy_dirty_bitmap(slot); 962 963 kvm_arch_free_memslot(kvm, slot); 964 965 kfree(slot); 966 } 967 968 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) 969 { 970 struct hlist_node *idnode; 971 struct kvm_memory_slot *memslot; 972 int bkt; 973 974 /* 975 * The same memslot objects live in both active and inactive sets, 976 * arbitrarily free using index '1' so the second invocation of this 977 * function isn't operating over a structure with dangling pointers 978 * (even though this function isn't actually touching them). 979 */ 980 if (!slots->node_idx) 981 return; 982 983 hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1]) 984 kvm_free_memslot(kvm, memslot); 985 } 986 987 static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc) 988 { 989 switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) { 990 case KVM_STATS_TYPE_INSTANT: 991 return 0444; 992 case KVM_STATS_TYPE_CUMULATIVE: 993 case KVM_STATS_TYPE_PEAK: 994 default: 995 return 0644; 996 } 997 } 998 999 1000 static void kvm_destroy_vm_debugfs(struct kvm *kvm) 1001 { 1002 int i; 1003 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + 1004 kvm_vcpu_stats_header.num_desc; 1005 1006 if (IS_ERR(kvm->debugfs_dentry)) 1007 return; 1008 1009 debugfs_remove_recursive(kvm->debugfs_dentry); 1010 1011 if (kvm->debugfs_stat_data) { 1012 for (i = 0; i < kvm_debugfs_num_entries; i++) 1013 kfree(kvm->debugfs_stat_data[i]); 1014 kfree(kvm->debugfs_stat_data); 1015 } 1016 } 1017 1018 static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname) 1019 { 1020 static DEFINE_MUTEX(kvm_debugfs_lock); 1021 struct dentry *dent; 1022 char dir_name[ITOA_MAX_LEN * 2]; 1023 struct kvm_stat_data *stat_data; 1024 const struct _kvm_stats_desc *pdesc; 1025 int i, ret = -ENOMEM; 1026 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + 1027 kvm_vcpu_stats_header.num_desc; 1028 1029 if (!debugfs_initialized()) 1030 return 0; 1031 1032 snprintf(dir_name, sizeof(dir_name), "%d-%s", task_pid_nr(current), fdname); 1033 mutex_lock(&kvm_debugfs_lock); 1034 dent = debugfs_lookup(dir_name, kvm_debugfs_dir); 1035 if (dent) { 1036 pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name); 1037 dput(dent); 1038 mutex_unlock(&kvm_debugfs_lock); 1039 return 0; 1040 } 1041 dent = debugfs_create_dir(dir_name, kvm_debugfs_dir); 1042 mutex_unlock(&kvm_debugfs_lock); 1043 if (IS_ERR(dent)) 1044 return 0; 1045 1046 kvm->debugfs_dentry = dent; 1047 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, 1048 sizeof(*kvm->debugfs_stat_data), 1049 GFP_KERNEL_ACCOUNT); 1050 if (!kvm->debugfs_stat_data) 1051 goto out_err; 1052 1053 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) { 1054 pdesc = &kvm_vm_stats_desc[i]; 1055 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); 1056 if (!stat_data) 1057 goto out_err; 1058 1059 stat_data->kvm = kvm; 1060 stat_data->desc = pdesc; 1061 stat_data->kind = KVM_STAT_VM; 1062 kvm->debugfs_stat_data[i] = stat_data; 1063 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 1064 kvm->debugfs_dentry, stat_data, 1065 &stat_fops_per_vm); 1066 } 1067 1068 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { 1069 pdesc = &kvm_vcpu_stats_desc[i]; 1070 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); 1071 if (!stat_data) 1072 goto out_err; 1073 1074 stat_data->kvm = kvm; 1075 stat_data->desc = pdesc; 1076 stat_data->kind = KVM_STAT_VCPU; 1077 kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data; 1078 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 1079 kvm->debugfs_dentry, stat_data, 1080 &stat_fops_per_vm); 1081 } 1082 1083 ret = kvm_arch_create_vm_debugfs(kvm); 1084 if (ret) 1085 goto out_err; 1086 1087 return 0; 1088 out_err: 1089 kvm_destroy_vm_debugfs(kvm); 1090 return ret; 1091 } 1092 1093 /* 1094 * Called after the VM is otherwise initialized, but just before adding it to 1095 * the vm_list. 1096 */ 1097 int __weak kvm_arch_post_init_vm(struct kvm *kvm) 1098 { 1099 return 0; 1100 } 1101 1102 /* 1103 * Called just after removing the VM from the vm_list, but before doing any 1104 * other destruction. 1105 */ 1106 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm) 1107 { 1108 } 1109 1110 /* 1111 * Called after per-vm debugfs created. When called kvm->debugfs_dentry should 1112 * be setup already, so we can create arch-specific debugfs entries under it. 1113 * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so 1114 * a per-arch destroy interface is not needed. 1115 */ 1116 int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm) 1117 { 1118 return 0; 1119 } 1120 1121 static struct kvm *kvm_create_vm(unsigned long type, const char *fdname) 1122 { 1123 struct kvm *kvm = kvm_arch_alloc_vm(); 1124 struct kvm_memslots *slots; 1125 int r = -ENOMEM; 1126 int i, j; 1127 1128 if (!kvm) 1129 return ERR_PTR(-ENOMEM); 1130 1131 /* KVM is pinned via open("/dev/kvm"), the fd passed to this ioctl(). */ 1132 __module_get(kvm_chardev_ops.owner); 1133 1134 KVM_MMU_LOCK_INIT(kvm); 1135 mmgrab(current->mm); 1136 kvm->mm = current->mm; 1137 kvm_eventfd_init(kvm); 1138 mutex_init(&kvm->lock); 1139 mutex_init(&kvm->irq_lock); 1140 mutex_init(&kvm->slots_lock); 1141 mutex_init(&kvm->slots_arch_lock); 1142 spin_lock_init(&kvm->mn_invalidate_lock); 1143 rcuwait_init(&kvm->mn_memslots_update_rcuwait); 1144 xa_init(&kvm->vcpu_array); 1145 1146 INIT_LIST_HEAD(&kvm->gpc_list); 1147 spin_lock_init(&kvm->gpc_lock); 1148 1149 INIT_LIST_HEAD(&kvm->devices); 1150 kvm->max_vcpus = KVM_MAX_VCPUS; 1151 1152 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 1153 1154 /* 1155 * Force subsequent debugfs file creations to fail if the VM directory 1156 * is not created (by kvm_create_vm_debugfs()). 1157 */ 1158 kvm->debugfs_dentry = ERR_PTR(-ENOENT); 1159 1160 snprintf(kvm->stats_id, sizeof(kvm->stats_id), "kvm-%d", 1161 task_pid_nr(current)); 1162 1163 if (init_srcu_struct(&kvm->srcu)) 1164 goto out_err_no_srcu; 1165 if (init_srcu_struct(&kvm->irq_srcu)) 1166 goto out_err_no_irq_srcu; 1167 1168 refcount_set(&kvm->users_count, 1); 1169 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 1170 for (j = 0; j < 2; j++) { 1171 slots = &kvm->__memslots[i][j]; 1172 1173 atomic_long_set(&slots->last_used_slot, (unsigned long)NULL); 1174 slots->hva_tree = RB_ROOT_CACHED; 1175 slots->gfn_tree = RB_ROOT; 1176 hash_init(slots->id_hash); 1177 slots->node_idx = j; 1178 1179 /* Generations must be different for each address space. */ 1180 slots->generation = i; 1181 } 1182 1183 rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]); 1184 } 1185 1186 for (i = 0; i < KVM_NR_BUSES; i++) { 1187 rcu_assign_pointer(kvm->buses[i], 1188 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT)); 1189 if (!kvm->buses[i]) 1190 goto out_err_no_arch_destroy_vm; 1191 } 1192 1193 r = kvm_arch_init_vm(kvm, type); 1194 if (r) 1195 goto out_err_no_arch_destroy_vm; 1196 1197 r = hardware_enable_all(); 1198 if (r) 1199 goto out_err_no_disable; 1200 1201 #ifdef CONFIG_HAVE_KVM_IRQFD 1202 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 1203 #endif 1204 1205 r = kvm_init_mmu_notifier(kvm); 1206 if (r) 1207 goto out_err_no_mmu_notifier; 1208 1209 r = kvm_coalesced_mmio_init(kvm); 1210 if (r < 0) 1211 goto out_no_coalesced_mmio; 1212 1213 r = kvm_create_vm_debugfs(kvm, fdname); 1214 if (r) 1215 goto out_err_no_debugfs; 1216 1217 r = kvm_arch_post_init_vm(kvm); 1218 if (r) 1219 goto out_err; 1220 1221 mutex_lock(&kvm_lock); 1222 list_add(&kvm->vm_list, &vm_list); 1223 mutex_unlock(&kvm_lock); 1224 1225 preempt_notifier_inc(); 1226 kvm_init_pm_notifier(kvm); 1227 1228 return kvm; 1229 1230 out_err: 1231 kvm_destroy_vm_debugfs(kvm); 1232 out_err_no_debugfs: 1233 kvm_coalesced_mmio_free(kvm); 1234 out_no_coalesced_mmio: 1235 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 1236 if (kvm->mmu_notifier.ops) 1237 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); 1238 #endif 1239 out_err_no_mmu_notifier: 1240 hardware_disable_all(); 1241 out_err_no_disable: 1242 kvm_arch_destroy_vm(kvm); 1243 out_err_no_arch_destroy_vm: 1244 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); 1245 for (i = 0; i < KVM_NR_BUSES; i++) 1246 kfree(kvm_get_bus(kvm, i)); 1247 cleanup_srcu_struct(&kvm->irq_srcu); 1248 out_err_no_irq_srcu: 1249 cleanup_srcu_struct(&kvm->srcu); 1250 out_err_no_srcu: 1251 kvm_arch_free_vm(kvm); 1252 mmdrop(current->mm); 1253 module_put(kvm_chardev_ops.owner); 1254 return ERR_PTR(r); 1255 } 1256 1257 static void kvm_destroy_devices(struct kvm *kvm) 1258 { 1259 struct kvm_device *dev, *tmp; 1260 1261 /* 1262 * We do not need to take the kvm->lock here, because nobody else 1263 * has a reference to the struct kvm at this point and therefore 1264 * cannot access the devices list anyhow. 1265 */ 1266 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { 1267 list_del(&dev->vm_node); 1268 dev->ops->destroy(dev); 1269 } 1270 } 1271 1272 static void kvm_destroy_vm(struct kvm *kvm) 1273 { 1274 int i; 1275 struct mm_struct *mm = kvm->mm; 1276 1277 kvm_destroy_pm_notifier(kvm); 1278 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm); 1279 kvm_destroy_vm_debugfs(kvm); 1280 kvm_arch_sync_events(kvm); 1281 mutex_lock(&kvm_lock); 1282 list_del(&kvm->vm_list); 1283 mutex_unlock(&kvm_lock); 1284 kvm_arch_pre_destroy_vm(kvm); 1285 1286 kvm_free_irq_routing(kvm); 1287 for (i = 0; i < KVM_NR_BUSES; i++) { 1288 struct kvm_io_bus *bus = kvm_get_bus(kvm, i); 1289 1290 if (bus) 1291 kvm_io_bus_destroy(bus); 1292 kvm->buses[i] = NULL; 1293 } 1294 kvm_coalesced_mmio_free(kvm); 1295 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 1296 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 1297 /* 1298 * At this point, pending calls to invalidate_range_start() 1299 * have completed but no more MMU notifiers will run, so 1300 * mn_active_invalidate_count may remain unbalanced. 1301 * No threads can be waiting in install_new_memslots as the 1302 * last reference on KVM has been dropped, but freeing 1303 * memslots would deadlock without this manual intervention. 1304 */ 1305 WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait)); 1306 kvm->mn_active_invalidate_count = 0; 1307 #else 1308 kvm_flush_shadow_all(kvm); 1309 #endif 1310 kvm_arch_destroy_vm(kvm); 1311 kvm_destroy_devices(kvm); 1312 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 1313 kvm_free_memslots(kvm, &kvm->__memslots[i][0]); 1314 kvm_free_memslots(kvm, &kvm->__memslots[i][1]); 1315 } 1316 cleanup_srcu_struct(&kvm->irq_srcu); 1317 cleanup_srcu_struct(&kvm->srcu); 1318 kvm_arch_free_vm(kvm); 1319 preempt_notifier_dec(); 1320 hardware_disable_all(); 1321 mmdrop(mm); 1322 module_put(kvm_chardev_ops.owner); 1323 } 1324 1325 void kvm_get_kvm(struct kvm *kvm) 1326 { 1327 refcount_inc(&kvm->users_count); 1328 } 1329 EXPORT_SYMBOL_GPL(kvm_get_kvm); 1330 1331 /* 1332 * Make sure the vm is not during destruction, which is a safe version of 1333 * kvm_get_kvm(). Return true if kvm referenced successfully, false otherwise. 1334 */ 1335 bool kvm_get_kvm_safe(struct kvm *kvm) 1336 { 1337 return refcount_inc_not_zero(&kvm->users_count); 1338 } 1339 EXPORT_SYMBOL_GPL(kvm_get_kvm_safe); 1340 1341 void kvm_put_kvm(struct kvm *kvm) 1342 { 1343 if (refcount_dec_and_test(&kvm->users_count)) 1344 kvm_destroy_vm(kvm); 1345 } 1346 EXPORT_SYMBOL_GPL(kvm_put_kvm); 1347 1348 /* 1349 * Used to put a reference that was taken on behalf of an object associated 1350 * with a user-visible file descriptor, e.g. a vcpu or device, if installation 1351 * of the new file descriptor fails and the reference cannot be transferred to 1352 * its final owner. In such cases, the caller is still actively using @kvm and 1353 * will fail miserably if the refcount unexpectedly hits zero. 1354 */ 1355 void kvm_put_kvm_no_destroy(struct kvm *kvm) 1356 { 1357 WARN_ON(refcount_dec_and_test(&kvm->users_count)); 1358 } 1359 EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy); 1360 1361 static int kvm_vm_release(struct inode *inode, struct file *filp) 1362 { 1363 struct kvm *kvm = filp->private_data; 1364 1365 kvm_irqfd_release(kvm); 1366 1367 kvm_put_kvm(kvm); 1368 return 0; 1369 } 1370 1371 /* 1372 * Allocation size is twice as large as the actual dirty bitmap size. 1373 * See kvm_vm_ioctl_get_dirty_log() why this is needed. 1374 */ 1375 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) 1376 { 1377 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot); 1378 1379 memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT); 1380 if (!memslot->dirty_bitmap) 1381 return -ENOMEM; 1382 1383 return 0; 1384 } 1385 1386 static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id) 1387 { 1388 struct kvm_memslots *active = __kvm_memslots(kvm, as_id); 1389 int node_idx_inactive = active->node_idx ^ 1; 1390 1391 return &kvm->__memslots[as_id][node_idx_inactive]; 1392 } 1393 1394 /* 1395 * Helper to get the address space ID when one of memslot pointers may be NULL. 1396 * This also serves as a sanity that at least one of the pointers is non-NULL, 1397 * and that their address space IDs don't diverge. 1398 */ 1399 static int kvm_memslots_get_as_id(struct kvm_memory_slot *a, 1400 struct kvm_memory_slot *b) 1401 { 1402 if (WARN_ON_ONCE(!a && !b)) 1403 return 0; 1404 1405 if (!a) 1406 return b->as_id; 1407 if (!b) 1408 return a->as_id; 1409 1410 WARN_ON_ONCE(a->as_id != b->as_id); 1411 return a->as_id; 1412 } 1413 1414 static void kvm_insert_gfn_node(struct kvm_memslots *slots, 1415 struct kvm_memory_slot *slot) 1416 { 1417 struct rb_root *gfn_tree = &slots->gfn_tree; 1418 struct rb_node **node, *parent; 1419 int idx = slots->node_idx; 1420 1421 parent = NULL; 1422 for (node = &gfn_tree->rb_node; *node; ) { 1423 struct kvm_memory_slot *tmp; 1424 1425 tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]); 1426 parent = *node; 1427 if (slot->base_gfn < tmp->base_gfn) 1428 node = &(*node)->rb_left; 1429 else if (slot->base_gfn > tmp->base_gfn) 1430 node = &(*node)->rb_right; 1431 else 1432 BUG(); 1433 } 1434 1435 rb_link_node(&slot->gfn_node[idx], parent, node); 1436 rb_insert_color(&slot->gfn_node[idx], gfn_tree); 1437 } 1438 1439 static void kvm_erase_gfn_node(struct kvm_memslots *slots, 1440 struct kvm_memory_slot *slot) 1441 { 1442 rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree); 1443 } 1444 1445 static void kvm_replace_gfn_node(struct kvm_memslots *slots, 1446 struct kvm_memory_slot *old, 1447 struct kvm_memory_slot *new) 1448 { 1449 int idx = slots->node_idx; 1450 1451 WARN_ON_ONCE(old->base_gfn != new->base_gfn); 1452 1453 rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx], 1454 &slots->gfn_tree); 1455 } 1456 1457 /* 1458 * Replace @old with @new in the inactive memslots. 1459 * 1460 * With NULL @old this simply adds @new. 1461 * With NULL @new this simply removes @old. 1462 * 1463 * If @new is non-NULL its hva_node[slots_idx] range has to be set 1464 * appropriately. 1465 */ 1466 static void kvm_replace_memslot(struct kvm *kvm, 1467 struct kvm_memory_slot *old, 1468 struct kvm_memory_slot *new) 1469 { 1470 int as_id = kvm_memslots_get_as_id(old, new); 1471 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); 1472 int idx = slots->node_idx; 1473 1474 if (old) { 1475 hash_del(&old->id_node[idx]); 1476 interval_tree_remove(&old->hva_node[idx], &slots->hva_tree); 1477 1478 if ((long)old == atomic_long_read(&slots->last_used_slot)) 1479 atomic_long_set(&slots->last_used_slot, (long)new); 1480 1481 if (!new) { 1482 kvm_erase_gfn_node(slots, old); 1483 return; 1484 } 1485 } 1486 1487 /* 1488 * Initialize @new's hva range. Do this even when replacing an @old 1489 * slot, kvm_copy_memslot() deliberately does not touch node data. 1490 */ 1491 new->hva_node[idx].start = new->userspace_addr; 1492 new->hva_node[idx].last = new->userspace_addr + 1493 (new->npages << PAGE_SHIFT) - 1; 1494 1495 /* 1496 * (Re)Add the new memslot. There is no O(1) interval_tree_replace(), 1497 * hva_node needs to be swapped with remove+insert even though hva can't 1498 * change when replacing an existing slot. 1499 */ 1500 hash_add(slots->id_hash, &new->id_node[idx], new->id); 1501 interval_tree_insert(&new->hva_node[idx], &slots->hva_tree); 1502 1503 /* 1504 * If the memslot gfn is unchanged, rb_replace_node() can be used to 1505 * switch the node in the gfn tree instead of removing the old and 1506 * inserting the new as two separate operations. Replacement is a 1507 * single O(1) operation versus two O(log(n)) operations for 1508 * remove+insert. 1509 */ 1510 if (old && old->base_gfn == new->base_gfn) { 1511 kvm_replace_gfn_node(slots, old, new); 1512 } else { 1513 if (old) 1514 kvm_erase_gfn_node(slots, old); 1515 kvm_insert_gfn_node(slots, new); 1516 } 1517 } 1518 1519 static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem) 1520 { 1521 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; 1522 1523 #ifdef __KVM_HAVE_READONLY_MEM 1524 valid_flags |= KVM_MEM_READONLY; 1525 #endif 1526 1527 if (mem->flags & ~valid_flags) 1528 return -EINVAL; 1529 1530 return 0; 1531 } 1532 1533 static void kvm_swap_active_memslots(struct kvm *kvm, int as_id) 1534 { 1535 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); 1536 1537 /* Grab the generation from the activate memslots. */ 1538 u64 gen = __kvm_memslots(kvm, as_id)->generation; 1539 1540 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); 1541 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; 1542 1543 /* 1544 * Do not store the new memslots while there are invalidations in 1545 * progress, otherwise the locking in invalidate_range_start and 1546 * invalidate_range_end will be unbalanced. 1547 */ 1548 spin_lock(&kvm->mn_invalidate_lock); 1549 prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait); 1550 while (kvm->mn_active_invalidate_count) { 1551 set_current_state(TASK_UNINTERRUPTIBLE); 1552 spin_unlock(&kvm->mn_invalidate_lock); 1553 schedule(); 1554 spin_lock(&kvm->mn_invalidate_lock); 1555 } 1556 finish_rcuwait(&kvm->mn_memslots_update_rcuwait); 1557 rcu_assign_pointer(kvm->memslots[as_id], slots); 1558 spin_unlock(&kvm->mn_invalidate_lock); 1559 1560 /* 1561 * Acquired in kvm_set_memslot. Must be released before synchronize 1562 * SRCU below in order to avoid deadlock with another thread 1563 * acquiring the slots_arch_lock in an srcu critical section. 1564 */ 1565 mutex_unlock(&kvm->slots_arch_lock); 1566 1567 synchronize_srcu_expedited(&kvm->srcu); 1568 1569 /* 1570 * Increment the new memslot generation a second time, dropping the 1571 * update in-progress flag and incrementing the generation based on 1572 * the number of address spaces. This provides a unique and easily 1573 * identifiable generation number while the memslots are in flux. 1574 */ 1575 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; 1576 1577 /* 1578 * Generations must be unique even across address spaces. We do not need 1579 * a global counter for that, instead the generation space is evenly split 1580 * across address spaces. For example, with two address spaces, address 1581 * space 0 will use generations 0, 2, 4, ... while address space 1 will 1582 * use generations 1, 3, 5, ... 1583 */ 1584 gen += KVM_ADDRESS_SPACE_NUM; 1585 1586 kvm_arch_memslots_updated(kvm, gen); 1587 1588 slots->generation = gen; 1589 } 1590 1591 static int kvm_prepare_memory_region(struct kvm *kvm, 1592 const struct kvm_memory_slot *old, 1593 struct kvm_memory_slot *new, 1594 enum kvm_mr_change change) 1595 { 1596 int r; 1597 1598 /* 1599 * If dirty logging is disabled, nullify the bitmap; the old bitmap 1600 * will be freed on "commit". If logging is enabled in both old and 1601 * new, reuse the existing bitmap. If logging is enabled only in the 1602 * new and KVM isn't using a ring buffer, allocate and initialize a 1603 * new bitmap. 1604 */ 1605 if (change != KVM_MR_DELETE) { 1606 if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) 1607 new->dirty_bitmap = NULL; 1608 else if (old && old->dirty_bitmap) 1609 new->dirty_bitmap = old->dirty_bitmap; 1610 else if (kvm_use_dirty_bitmap(kvm)) { 1611 r = kvm_alloc_dirty_bitmap(new); 1612 if (r) 1613 return r; 1614 1615 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) 1616 bitmap_set(new->dirty_bitmap, 0, new->npages); 1617 } 1618 } 1619 1620 r = kvm_arch_prepare_memory_region(kvm, old, new, change); 1621 1622 /* Free the bitmap on failure if it was allocated above. */ 1623 if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap)) 1624 kvm_destroy_dirty_bitmap(new); 1625 1626 return r; 1627 } 1628 1629 static void kvm_commit_memory_region(struct kvm *kvm, 1630 struct kvm_memory_slot *old, 1631 const struct kvm_memory_slot *new, 1632 enum kvm_mr_change change) 1633 { 1634 int old_flags = old ? old->flags : 0; 1635 int new_flags = new ? new->flags : 0; 1636 /* 1637 * Update the total number of memslot pages before calling the arch 1638 * hook so that architectures can consume the result directly. 1639 */ 1640 if (change == KVM_MR_DELETE) 1641 kvm->nr_memslot_pages -= old->npages; 1642 else if (change == KVM_MR_CREATE) 1643 kvm->nr_memslot_pages += new->npages; 1644 1645 if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) { 1646 int change = (new_flags & KVM_MEM_LOG_DIRTY_PAGES) ? 1 : -1; 1647 atomic_set(&kvm->nr_memslots_dirty_logging, 1648 atomic_read(&kvm->nr_memslots_dirty_logging) + change); 1649 } 1650 1651 kvm_arch_commit_memory_region(kvm, old, new, change); 1652 1653 switch (change) { 1654 case KVM_MR_CREATE: 1655 /* Nothing more to do. */ 1656 break; 1657 case KVM_MR_DELETE: 1658 /* Free the old memslot and all its metadata. */ 1659 kvm_free_memslot(kvm, old); 1660 break; 1661 case KVM_MR_MOVE: 1662 case KVM_MR_FLAGS_ONLY: 1663 /* 1664 * Free the dirty bitmap as needed; the below check encompasses 1665 * both the flags and whether a ring buffer is being used) 1666 */ 1667 if (old->dirty_bitmap && !new->dirty_bitmap) 1668 kvm_destroy_dirty_bitmap(old); 1669 1670 /* 1671 * The final quirk. Free the detached, old slot, but only its 1672 * memory, not any metadata. Metadata, including arch specific 1673 * data, may be reused by @new. 1674 */ 1675 kfree(old); 1676 break; 1677 default: 1678 BUG(); 1679 } 1680 } 1681 1682 /* 1683 * Activate @new, which must be installed in the inactive slots by the caller, 1684 * by swapping the active slots and then propagating @new to @old once @old is 1685 * unreachable and can be safely modified. 1686 * 1687 * With NULL @old this simply adds @new to @active (while swapping the sets). 1688 * With NULL @new this simply removes @old from @active and frees it 1689 * (while also swapping the sets). 1690 */ 1691 static void kvm_activate_memslot(struct kvm *kvm, 1692 struct kvm_memory_slot *old, 1693 struct kvm_memory_slot *new) 1694 { 1695 int as_id = kvm_memslots_get_as_id(old, new); 1696 1697 kvm_swap_active_memslots(kvm, as_id); 1698 1699 /* Propagate the new memslot to the now inactive memslots. */ 1700 kvm_replace_memslot(kvm, old, new); 1701 } 1702 1703 static void kvm_copy_memslot(struct kvm_memory_slot *dest, 1704 const struct kvm_memory_slot *src) 1705 { 1706 dest->base_gfn = src->base_gfn; 1707 dest->npages = src->npages; 1708 dest->dirty_bitmap = src->dirty_bitmap; 1709 dest->arch = src->arch; 1710 dest->userspace_addr = src->userspace_addr; 1711 dest->flags = src->flags; 1712 dest->id = src->id; 1713 dest->as_id = src->as_id; 1714 } 1715 1716 static void kvm_invalidate_memslot(struct kvm *kvm, 1717 struct kvm_memory_slot *old, 1718 struct kvm_memory_slot *invalid_slot) 1719 { 1720 /* 1721 * Mark the current slot INVALID. As with all memslot modifications, 1722 * this must be done on an unreachable slot to avoid modifying the 1723 * current slot in the active tree. 1724 */ 1725 kvm_copy_memslot(invalid_slot, old); 1726 invalid_slot->flags |= KVM_MEMSLOT_INVALID; 1727 kvm_replace_memslot(kvm, old, invalid_slot); 1728 1729 /* 1730 * Activate the slot that is now marked INVALID, but don't propagate 1731 * the slot to the now inactive slots. The slot is either going to be 1732 * deleted or recreated as a new slot. 1733 */ 1734 kvm_swap_active_memslots(kvm, old->as_id); 1735 1736 /* 1737 * From this point no new shadow pages pointing to a deleted, or moved, 1738 * memslot will be created. Validation of sp->gfn happens in: 1739 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 1740 * - kvm_is_visible_gfn (mmu_check_root) 1741 */ 1742 kvm_arch_flush_shadow_memslot(kvm, old); 1743 kvm_arch_guest_memory_reclaimed(kvm); 1744 1745 /* Was released by kvm_swap_active_memslots, reacquire. */ 1746 mutex_lock(&kvm->slots_arch_lock); 1747 1748 /* 1749 * Copy the arch-specific field of the newly-installed slot back to the 1750 * old slot as the arch data could have changed between releasing 1751 * slots_arch_lock in install_new_memslots() and re-acquiring the lock 1752 * above. Writers are required to retrieve memslots *after* acquiring 1753 * slots_arch_lock, thus the active slot's data is guaranteed to be fresh. 1754 */ 1755 old->arch = invalid_slot->arch; 1756 } 1757 1758 static void kvm_create_memslot(struct kvm *kvm, 1759 struct kvm_memory_slot *new) 1760 { 1761 /* Add the new memslot to the inactive set and activate. */ 1762 kvm_replace_memslot(kvm, NULL, new); 1763 kvm_activate_memslot(kvm, NULL, new); 1764 } 1765 1766 static void kvm_delete_memslot(struct kvm *kvm, 1767 struct kvm_memory_slot *old, 1768 struct kvm_memory_slot *invalid_slot) 1769 { 1770 /* 1771 * Remove the old memslot (in the inactive memslots) by passing NULL as 1772 * the "new" slot, and for the invalid version in the active slots. 1773 */ 1774 kvm_replace_memslot(kvm, old, NULL); 1775 kvm_activate_memslot(kvm, invalid_slot, NULL); 1776 } 1777 1778 static void kvm_move_memslot(struct kvm *kvm, 1779 struct kvm_memory_slot *old, 1780 struct kvm_memory_slot *new, 1781 struct kvm_memory_slot *invalid_slot) 1782 { 1783 /* 1784 * Replace the old memslot in the inactive slots, and then swap slots 1785 * and replace the current INVALID with the new as well. 1786 */ 1787 kvm_replace_memslot(kvm, old, new); 1788 kvm_activate_memslot(kvm, invalid_slot, new); 1789 } 1790 1791 static void kvm_update_flags_memslot(struct kvm *kvm, 1792 struct kvm_memory_slot *old, 1793 struct kvm_memory_slot *new) 1794 { 1795 /* 1796 * Similar to the MOVE case, but the slot doesn't need to be zapped as 1797 * an intermediate step. Instead, the old memslot is simply replaced 1798 * with a new, updated copy in both memslot sets. 1799 */ 1800 kvm_replace_memslot(kvm, old, new); 1801 kvm_activate_memslot(kvm, old, new); 1802 } 1803 1804 static int kvm_set_memslot(struct kvm *kvm, 1805 struct kvm_memory_slot *old, 1806 struct kvm_memory_slot *new, 1807 enum kvm_mr_change change) 1808 { 1809 struct kvm_memory_slot *invalid_slot; 1810 int r; 1811 1812 /* 1813 * Released in kvm_swap_active_memslots. 1814 * 1815 * Must be held from before the current memslots are copied until 1816 * after the new memslots are installed with rcu_assign_pointer, 1817 * then released before the synchronize srcu in kvm_swap_active_memslots. 1818 * 1819 * When modifying memslots outside of the slots_lock, must be held 1820 * before reading the pointer to the current memslots until after all 1821 * changes to those memslots are complete. 1822 * 1823 * These rules ensure that installing new memslots does not lose 1824 * changes made to the previous memslots. 1825 */ 1826 mutex_lock(&kvm->slots_arch_lock); 1827 1828 /* 1829 * Invalidate the old slot if it's being deleted or moved. This is 1830 * done prior to actually deleting/moving the memslot to allow vCPUs to 1831 * continue running by ensuring there are no mappings or shadow pages 1832 * for the memslot when it is deleted/moved. Without pre-invalidation 1833 * (and without a lock), a window would exist between effecting the 1834 * delete/move and committing the changes in arch code where KVM or a 1835 * guest could access a non-existent memslot. 1836 * 1837 * Modifications are done on a temporary, unreachable slot. The old 1838 * slot needs to be preserved in case a later step fails and the 1839 * invalidation needs to be reverted. 1840 */ 1841 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { 1842 invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT); 1843 if (!invalid_slot) { 1844 mutex_unlock(&kvm->slots_arch_lock); 1845 return -ENOMEM; 1846 } 1847 kvm_invalidate_memslot(kvm, old, invalid_slot); 1848 } 1849 1850 r = kvm_prepare_memory_region(kvm, old, new, change); 1851 if (r) { 1852 /* 1853 * For DELETE/MOVE, revert the above INVALID change. No 1854 * modifications required since the original slot was preserved 1855 * in the inactive slots. Changing the active memslots also 1856 * release slots_arch_lock. 1857 */ 1858 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { 1859 kvm_activate_memslot(kvm, invalid_slot, old); 1860 kfree(invalid_slot); 1861 } else { 1862 mutex_unlock(&kvm->slots_arch_lock); 1863 } 1864 return r; 1865 } 1866 1867 /* 1868 * For DELETE and MOVE, the working slot is now active as the INVALID 1869 * version of the old slot. MOVE is particularly special as it reuses 1870 * the old slot and returns a copy of the old slot (in working_slot). 1871 * For CREATE, there is no old slot. For DELETE and FLAGS_ONLY, the 1872 * old slot is detached but otherwise preserved. 1873 */ 1874 if (change == KVM_MR_CREATE) 1875 kvm_create_memslot(kvm, new); 1876 else if (change == KVM_MR_DELETE) 1877 kvm_delete_memslot(kvm, old, invalid_slot); 1878 else if (change == KVM_MR_MOVE) 1879 kvm_move_memslot(kvm, old, new, invalid_slot); 1880 else if (change == KVM_MR_FLAGS_ONLY) 1881 kvm_update_flags_memslot(kvm, old, new); 1882 else 1883 BUG(); 1884 1885 /* Free the temporary INVALID slot used for DELETE and MOVE. */ 1886 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) 1887 kfree(invalid_slot); 1888 1889 /* 1890 * No need to refresh new->arch, changes after dropping slots_arch_lock 1891 * will directly hit the final, active memslot. Architectures are 1892 * responsible for knowing that new->arch may be stale. 1893 */ 1894 kvm_commit_memory_region(kvm, old, new, change); 1895 1896 return 0; 1897 } 1898 1899 static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id, 1900 gfn_t start, gfn_t end) 1901 { 1902 struct kvm_memslot_iter iter; 1903 1904 kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) { 1905 if (iter.slot->id != id) 1906 return true; 1907 } 1908 1909 return false; 1910 } 1911 1912 /* 1913 * Allocate some memory and give it an address in the guest physical address 1914 * space. 1915 * 1916 * Discontiguous memory is allowed, mostly for framebuffers. 1917 * 1918 * Must be called holding kvm->slots_lock for write. 1919 */ 1920 int __kvm_set_memory_region(struct kvm *kvm, 1921 const struct kvm_userspace_memory_region *mem) 1922 { 1923 struct kvm_memory_slot *old, *new; 1924 struct kvm_memslots *slots; 1925 enum kvm_mr_change change; 1926 unsigned long npages; 1927 gfn_t base_gfn; 1928 int as_id, id; 1929 int r; 1930 1931 r = check_memory_region_flags(mem); 1932 if (r) 1933 return r; 1934 1935 as_id = mem->slot >> 16; 1936 id = (u16)mem->slot; 1937 1938 /* General sanity checks */ 1939 if ((mem->memory_size & (PAGE_SIZE - 1)) || 1940 (mem->memory_size != (unsigned long)mem->memory_size)) 1941 return -EINVAL; 1942 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 1943 return -EINVAL; 1944 /* We can read the guest memory with __xxx_user() later on. */ 1945 if ((mem->userspace_addr & (PAGE_SIZE - 1)) || 1946 (mem->userspace_addr != untagged_addr(mem->userspace_addr)) || 1947 !access_ok((void __user *)(unsigned long)mem->userspace_addr, 1948 mem->memory_size)) 1949 return -EINVAL; 1950 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) 1951 return -EINVAL; 1952 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 1953 return -EINVAL; 1954 if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES) 1955 return -EINVAL; 1956 1957 slots = __kvm_memslots(kvm, as_id); 1958 1959 /* 1960 * Note, the old memslot (and the pointer itself!) may be invalidated 1961 * and/or destroyed by kvm_set_memslot(). 1962 */ 1963 old = id_to_memslot(slots, id); 1964 1965 if (!mem->memory_size) { 1966 if (!old || !old->npages) 1967 return -EINVAL; 1968 1969 if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages)) 1970 return -EIO; 1971 1972 return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE); 1973 } 1974 1975 base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT); 1976 npages = (mem->memory_size >> PAGE_SHIFT); 1977 1978 if (!old || !old->npages) { 1979 change = KVM_MR_CREATE; 1980 1981 /* 1982 * To simplify KVM internals, the total number of pages across 1983 * all memslots must fit in an unsigned long. 1984 */ 1985 if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages) 1986 return -EINVAL; 1987 } else { /* Modify an existing slot. */ 1988 if ((mem->userspace_addr != old->userspace_addr) || 1989 (npages != old->npages) || 1990 ((mem->flags ^ old->flags) & KVM_MEM_READONLY)) 1991 return -EINVAL; 1992 1993 if (base_gfn != old->base_gfn) 1994 change = KVM_MR_MOVE; 1995 else if (mem->flags != old->flags) 1996 change = KVM_MR_FLAGS_ONLY; 1997 else /* Nothing to change. */ 1998 return 0; 1999 } 2000 2001 if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) && 2002 kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages)) 2003 return -EEXIST; 2004 2005 /* Allocate a slot that will persist in the memslot. */ 2006 new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT); 2007 if (!new) 2008 return -ENOMEM; 2009 2010 new->as_id = as_id; 2011 new->id = id; 2012 new->base_gfn = base_gfn; 2013 new->npages = npages; 2014 new->flags = mem->flags; 2015 new->userspace_addr = mem->userspace_addr; 2016 2017 r = kvm_set_memslot(kvm, old, new, change); 2018 if (r) 2019 kfree(new); 2020 return r; 2021 } 2022 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 2023 2024 int kvm_set_memory_region(struct kvm *kvm, 2025 const struct kvm_userspace_memory_region *mem) 2026 { 2027 int r; 2028 2029 mutex_lock(&kvm->slots_lock); 2030 r = __kvm_set_memory_region(kvm, mem); 2031 mutex_unlock(&kvm->slots_lock); 2032 return r; 2033 } 2034 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 2035 2036 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 2037 struct kvm_userspace_memory_region *mem) 2038 { 2039 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) 2040 return -EINVAL; 2041 2042 return kvm_set_memory_region(kvm, mem); 2043 } 2044 2045 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 2046 /** 2047 * kvm_get_dirty_log - get a snapshot of dirty pages 2048 * @kvm: pointer to kvm instance 2049 * @log: slot id and address to which we copy the log 2050 * @is_dirty: set to '1' if any dirty pages were found 2051 * @memslot: set to the associated memslot, always valid on success 2052 */ 2053 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, 2054 int *is_dirty, struct kvm_memory_slot **memslot) 2055 { 2056 struct kvm_memslots *slots; 2057 int i, as_id, id; 2058 unsigned long n; 2059 unsigned long any = 0; 2060 2061 /* Dirty ring tracking may be exclusive to dirty log tracking */ 2062 if (!kvm_use_dirty_bitmap(kvm)) 2063 return -ENXIO; 2064 2065 *memslot = NULL; 2066 *is_dirty = 0; 2067 2068 as_id = log->slot >> 16; 2069 id = (u16)log->slot; 2070 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 2071 return -EINVAL; 2072 2073 slots = __kvm_memslots(kvm, as_id); 2074 *memslot = id_to_memslot(slots, id); 2075 if (!(*memslot) || !(*memslot)->dirty_bitmap) 2076 return -ENOENT; 2077 2078 kvm_arch_sync_dirty_log(kvm, *memslot); 2079 2080 n = kvm_dirty_bitmap_bytes(*memslot); 2081 2082 for (i = 0; !any && i < n/sizeof(long); ++i) 2083 any = (*memslot)->dirty_bitmap[i]; 2084 2085 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n)) 2086 return -EFAULT; 2087 2088 if (any) 2089 *is_dirty = 1; 2090 return 0; 2091 } 2092 EXPORT_SYMBOL_GPL(kvm_get_dirty_log); 2093 2094 #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 2095 /** 2096 * kvm_get_dirty_log_protect - get a snapshot of dirty pages 2097 * and reenable dirty page tracking for the corresponding pages. 2098 * @kvm: pointer to kvm instance 2099 * @log: slot id and address to which we copy the log 2100 * 2101 * We need to keep it in mind that VCPU threads can write to the bitmap 2102 * concurrently. So, to avoid losing track of dirty pages we keep the 2103 * following order: 2104 * 2105 * 1. Take a snapshot of the bit and clear it if needed. 2106 * 2. Write protect the corresponding page. 2107 * 3. Copy the snapshot to the userspace. 2108 * 4. Upon return caller flushes TLB's if needed. 2109 * 2110 * Between 2 and 4, the guest may write to the page using the remaining TLB 2111 * entry. This is not a problem because the page is reported dirty using 2112 * the snapshot taken before and step 4 ensures that writes done after 2113 * exiting to userspace will be logged for the next call. 2114 * 2115 */ 2116 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) 2117 { 2118 struct kvm_memslots *slots; 2119 struct kvm_memory_slot *memslot; 2120 int i, as_id, id; 2121 unsigned long n; 2122 unsigned long *dirty_bitmap; 2123 unsigned long *dirty_bitmap_buffer; 2124 bool flush; 2125 2126 /* Dirty ring tracking may be exclusive to dirty log tracking */ 2127 if (!kvm_use_dirty_bitmap(kvm)) 2128 return -ENXIO; 2129 2130 as_id = log->slot >> 16; 2131 id = (u16)log->slot; 2132 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 2133 return -EINVAL; 2134 2135 slots = __kvm_memslots(kvm, as_id); 2136 memslot = id_to_memslot(slots, id); 2137 if (!memslot || !memslot->dirty_bitmap) 2138 return -ENOENT; 2139 2140 dirty_bitmap = memslot->dirty_bitmap; 2141 2142 kvm_arch_sync_dirty_log(kvm, memslot); 2143 2144 n = kvm_dirty_bitmap_bytes(memslot); 2145 flush = false; 2146 if (kvm->manual_dirty_log_protect) { 2147 /* 2148 * Unlike kvm_get_dirty_log, we always return false in *flush, 2149 * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There 2150 * is some code duplication between this function and 2151 * kvm_get_dirty_log, but hopefully all architecture 2152 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log 2153 * can be eliminated. 2154 */ 2155 dirty_bitmap_buffer = dirty_bitmap; 2156 } else { 2157 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 2158 memset(dirty_bitmap_buffer, 0, n); 2159 2160 KVM_MMU_LOCK(kvm); 2161 for (i = 0; i < n / sizeof(long); i++) { 2162 unsigned long mask; 2163 gfn_t offset; 2164 2165 if (!dirty_bitmap[i]) 2166 continue; 2167 2168 flush = true; 2169 mask = xchg(&dirty_bitmap[i], 0); 2170 dirty_bitmap_buffer[i] = mask; 2171 2172 offset = i * BITS_PER_LONG; 2173 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 2174 offset, mask); 2175 } 2176 KVM_MMU_UNLOCK(kvm); 2177 } 2178 2179 if (flush) 2180 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); 2181 2182 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) 2183 return -EFAULT; 2184 return 0; 2185 } 2186 2187 2188 /** 2189 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot 2190 * @kvm: kvm instance 2191 * @log: slot id and address to which we copy the log 2192 * 2193 * Steps 1-4 below provide general overview of dirty page logging. See 2194 * kvm_get_dirty_log_protect() function description for additional details. 2195 * 2196 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we 2197 * always flush the TLB (step 4) even if previous step failed and the dirty 2198 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API 2199 * does not preclude user space subsequent dirty log read. Flushing TLB ensures 2200 * writes will be marked dirty for next log read. 2201 * 2202 * 1. Take a snapshot of the bit and clear it if needed. 2203 * 2. Write protect the corresponding page. 2204 * 3. Copy the snapshot to the userspace. 2205 * 4. Flush TLB's if needed. 2206 */ 2207 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 2208 struct kvm_dirty_log *log) 2209 { 2210 int r; 2211 2212 mutex_lock(&kvm->slots_lock); 2213 2214 r = kvm_get_dirty_log_protect(kvm, log); 2215 2216 mutex_unlock(&kvm->slots_lock); 2217 return r; 2218 } 2219 2220 /** 2221 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap 2222 * and reenable dirty page tracking for the corresponding pages. 2223 * @kvm: pointer to kvm instance 2224 * @log: slot id and address from which to fetch the bitmap of dirty pages 2225 */ 2226 static int kvm_clear_dirty_log_protect(struct kvm *kvm, 2227 struct kvm_clear_dirty_log *log) 2228 { 2229 struct kvm_memslots *slots; 2230 struct kvm_memory_slot *memslot; 2231 int as_id, id; 2232 gfn_t offset; 2233 unsigned long i, n; 2234 unsigned long *dirty_bitmap; 2235 unsigned long *dirty_bitmap_buffer; 2236 bool flush; 2237 2238 /* Dirty ring tracking may be exclusive to dirty log tracking */ 2239 if (!kvm_use_dirty_bitmap(kvm)) 2240 return -ENXIO; 2241 2242 as_id = log->slot >> 16; 2243 id = (u16)log->slot; 2244 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 2245 return -EINVAL; 2246 2247 if (log->first_page & 63) 2248 return -EINVAL; 2249 2250 slots = __kvm_memslots(kvm, as_id); 2251 memslot = id_to_memslot(slots, id); 2252 if (!memslot || !memslot->dirty_bitmap) 2253 return -ENOENT; 2254 2255 dirty_bitmap = memslot->dirty_bitmap; 2256 2257 n = ALIGN(log->num_pages, BITS_PER_LONG) / 8; 2258 2259 if (log->first_page > memslot->npages || 2260 log->num_pages > memslot->npages - log->first_page || 2261 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63))) 2262 return -EINVAL; 2263 2264 kvm_arch_sync_dirty_log(kvm, memslot); 2265 2266 flush = false; 2267 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 2268 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) 2269 return -EFAULT; 2270 2271 KVM_MMU_LOCK(kvm); 2272 for (offset = log->first_page, i = offset / BITS_PER_LONG, 2273 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--; 2274 i++, offset += BITS_PER_LONG) { 2275 unsigned long mask = *dirty_bitmap_buffer++; 2276 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i]; 2277 if (!mask) 2278 continue; 2279 2280 mask &= atomic_long_fetch_andnot(mask, p); 2281 2282 /* 2283 * mask contains the bits that really have been cleared. This 2284 * never includes any bits beyond the length of the memslot (if 2285 * the length is not aligned to 64 pages), therefore it is not 2286 * a problem if userspace sets them in log->dirty_bitmap. 2287 */ 2288 if (mask) { 2289 flush = true; 2290 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 2291 offset, mask); 2292 } 2293 } 2294 KVM_MMU_UNLOCK(kvm); 2295 2296 if (flush) 2297 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); 2298 2299 return 0; 2300 } 2301 2302 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, 2303 struct kvm_clear_dirty_log *log) 2304 { 2305 int r; 2306 2307 mutex_lock(&kvm->slots_lock); 2308 2309 r = kvm_clear_dirty_log_protect(kvm, log); 2310 2311 mutex_unlock(&kvm->slots_lock); 2312 return r; 2313 } 2314 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 2315 2316 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 2317 { 2318 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 2319 } 2320 EXPORT_SYMBOL_GPL(gfn_to_memslot); 2321 2322 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) 2323 { 2324 struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); 2325 u64 gen = slots->generation; 2326 struct kvm_memory_slot *slot; 2327 2328 /* 2329 * This also protects against using a memslot from a different address space, 2330 * since different address spaces have different generation numbers. 2331 */ 2332 if (unlikely(gen != vcpu->last_used_slot_gen)) { 2333 vcpu->last_used_slot = NULL; 2334 vcpu->last_used_slot_gen = gen; 2335 } 2336 2337 slot = try_get_memslot(vcpu->last_used_slot, gfn); 2338 if (slot) 2339 return slot; 2340 2341 /* 2342 * Fall back to searching all memslots. We purposely use 2343 * search_memslots() instead of __gfn_to_memslot() to avoid 2344 * thrashing the VM-wide last_used_slot in kvm_memslots. 2345 */ 2346 slot = search_memslots(slots, gfn, false); 2347 if (slot) { 2348 vcpu->last_used_slot = slot; 2349 return slot; 2350 } 2351 2352 return NULL; 2353 } 2354 2355 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 2356 { 2357 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); 2358 2359 return kvm_is_visible_memslot(memslot); 2360 } 2361 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 2362 2363 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 2364 { 2365 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2366 2367 return kvm_is_visible_memslot(memslot); 2368 } 2369 EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn); 2370 2371 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) 2372 { 2373 struct vm_area_struct *vma; 2374 unsigned long addr, size; 2375 2376 size = PAGE_SIZE; 2377 2378 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL); 2379 if (kvm_is_error_hva(addr)) 2380 return PAGE_SIZE; 2381 2382 mmap_read_lock(current->mm); 2383 vma = find_vma(current->mm, addr); 2384 if (!vma) 2385 goto out; 2386 2387 size = vma_kernel_pagesize(vma); 2388 2389 out: 2390 mmap_read_unlock(current->mm); 2391 2392 return size; 2393 } 2394 2395 static bool memslot_is_readonly(const struct kvm_memory_slot *slot) 2396 { 2397 return slot->flags & KVM_MEM_READONLY; 2398 } 2399 2400 static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn, 2401 gfn_t *nr_pages, bool write) 2402 { 2403 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 2404 return KVM_HVA_ERR_BAD; 2405 2406 if (memslot_is_readonly(slot) && write) 2407 return KVM_HVA_ERR_RO_BAD; 2408 2409 if (nr_pages) 2410 *nr_pages = slot->npages - (gfn - slot->base_gfn); 2411 2412 return __gfn_to_hva_memslot(slot, gfn); 2413 } 2414 2415 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 2416 gfn_t *nr_pages) 2417 { 2418 return __gfn_to_hva_many(slot, gfn, nr_pages, true); 2419 } 2420 2421 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 2422 gfn_t gfn) 2423 { 2424 return gfn_to_hva_many(slot, gfn, NULL); 2425 } 2426 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); 2427 2428 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 2429 { 2430 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); 2431 } 2432 EXPORT_SYMBOL_GPL(gfn_to_hva); 2433 2434 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) 2435 { 2436 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); 2437 } 2438 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); 2439 2440 /* 2441 * Return the hva of a @gfn and the R/W attribute if possible. 2442 * 2443 * @slot: the kvm_memory_slot which contains @gfn 2444 * @gfn: the gfn to be translated 2445 * @writable: used to return the read/write attribute of the @slot if the hva 2446 * is valid and @writable is not NULL 2447 */ 2448 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, 2449 gfn_t gfn, bool *writable) 2450 { 2451 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); 2452 2453 if (!kvm_is_error_hva(hva) && writable) 2454 *writable = !memslot_is_readonly(slot); 2455 2456 return hva; 2457 } 2458 2459 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 2460 { 2461 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 2462 2463 return gfn_to_hva_memslot_prot(slot, gfn, writable); 2464 } 2465 2466 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) 2467 { 2468 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2469 2470 return gfn_to_hva_memslot_prot(slot, gfn, writable); 2471 } 2472 2473 static inline int check_user_page_hwpoison(unsigned long addr) 2474 { 2475 int rc, flags = FOLL_HWPOISON | FOLL_WRITE; 2476 2477 rc = get_user_pages(addr, 1, flags, NULL, NULL); 2478 return rc == -EHWPOISON; 2479 } 2480 2481 /* 2482 * The fast path to get the writable pfn which will be stored in @pfn, 2483 * true indicates success, otherwise false is returned. It's also the 2484 * only part that runs if we can in atomic context. 2485 */ 2486 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, 2487 bool *writable, kvm_pfn_t *pfn) 2488 { 2489 struct page *page[1]; 2490 2491 /* 2492 * Fast pin a writable pfn only if it is a write fault request 2493 * or the caller allows to map a writable pfn for a read fault 2494 * request. 2495 */ 2496 if (!(write_fault || writable)) 2497 return false; 2498 2499 if (get_user_page_fast_only(addr, FOLL_WRITE, page)) { 2500 *pfn = page_to_pfn(page[0]); 2501 2502 if (writable) 2503 *writable = true; 2504 return true; 2505 } 2506 2507 return false; 2508 } 2509 2510 /* 2511 * The slow path to get the pfn of the specified host virtual address, 2512 * 1 indicates success, -errno is returned if error is detected. 2513 */ 2514 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, 2515 bool interruptible, bool *writable, kvm_pfn_t *pfn) 2516 { 2517 unsigned int flags = FOLL_HWPOISON; 2518 struct page *page; 2519 int npages; 2520 2521 might_sleep(); 2522 2523 if (writable) 2524 *writable = write_fault; 2525 2526 if (write_fault) 2527 flags |= FOLL_WRITE; 2528 if (async) 2529 flags |= FOLL_NOWAIT; 2530 if (interruptible) 2531 flags |= FOLL_INTERRUPTIBLE; 2532 2533 npages = get_user_pages_unlocked(addr, 1, &page, flags); 2534 if (npages != 1) 2535 return npages; 2536 2537 /* map read fault as writable if possible */ 2538 if (unlikely(!write_fault) && writable) { 2539 struct page *wpage; 2540 2541 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) { 2542 *writable = true; 2543 put_page(page); 2544 page = wpage; 2545 } 2546 } 2547 *pfn = page_to_pfn(page); 2548 return npages; 2549 } 2550 2551 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) 2552 { 2553 if (unlikely(!(vma->vm_flags & VM_READ))) 2554 return false; 2555 2556 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) 2557 return false; 2558 2559 return true; 2560 } 2561 2562 static int kvm_try_get_pfn(kvm_pfn_t pfn) 2563 { 2564 struct page *page = kvm_pfn_to_refcounted_page(pfn); 2565 2566 if (!page) 2567 return 1; 2568 2569 return get_page_unless_zero(page); 2570 } 2571 2572 static int hva_to_pfn_remapped(struct vm_area_struct *vma, 2573 unsigned long addr, bool write_fault, 2574 bool *writable, kvm_pfn_t *p_pfn) 2575 { 2576 kvm_pfn_t pfn; 2577 pte_t *ptep; 2578 spinlock_t *ptl; 2579 int r; 2580 2581 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); 2582 if (r) { 2583 /* 2584 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does 2585 * not call the fault handler, so do it here. 2586 */ 2587 bool unlocked = false; 2588 r = fixup_user_fault(current->mm, addr, 2589 (write_fault ? FAULT_FLAG_WRITE : 0), 2590 &unlocked); 2591 if (unlocked) 2592 return -EAGAIN; 2593 if (r) 2594 return r; 2595 2596 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); 2597 if (r) 2598 return r; 2599 } 2600 2601 if (write_fault && !pte_write(*ptep)) { 2602 pfn = KVM_PFN_ERR_RO_FAULT; 2603 goto out; 2604 } 2605 2606 if (writable) 2607 *writable = pte_write(*ptep); 2608 pfn = pte_pfn(*ptep); 2609 2610 /* 2611 * Get a reference here because callers of *hva_to_pfn* and 2612 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the 2613 * returned pfn. This is only needed if the VMA has VM_MIXEDMAP 2614 * set, but the kvm_try_get_pfn/kvm_release_pfn_clean pair will 2615 * simply do nothing for reserved pfns. 2616 * 2617 * Whoever called remap_pfn_range is also going to call e.g. 2618 * unmap_mapping_range before the underlying pages are freed, 2619 * causing a call to our MMU notifier. 2620 * 2621 * Certain IO or PFNMAP mappings can be backed with valid 2622 * struct pages, but be allocated without refcounting e.g., 2623 * tail pages of non-compound higher order allocations, which 2624 * would then underflow the refcount when the caller does the 2625 * required put_page. Don't allow those pages here. 2626 */ 2627 if (!kvm_try_get_pfn(pfn)) 2628 r = -EFAULT; 2629 2630 out: 2631 pte_unmap_unlock(ptep, ptl); 2632 *p_pfn = pfn; 2633 2634 return r; 2635 } 2636 2637 /* 2638 * Pin guest page in memory and return its pfn. 2639 * @addr: host virtual address which maps memory to the guest 2640 * @atomic: whether this function can sleep 2641 * @interruptible: whether the process can be interrupted by non-fatal signals 2642 * @async: whether this function need to wait IO complete if the 2643 * host page is not in the memory 2644 * @write_fault: whether we should get a writable host page 2645 * @writable: whether it allows to map a writable host page for !@write_fault 2646 * 2647 * The function will map a writable host page for these two cases: 2648 * 1): @write_fault = true 2649 * 2): @write_fault = false && @writable, @writable will tell the caller 2650 * whether the mapping is writable. 2651 */ 2652 kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible, 2653 bool *async, bool write_fault, bool *writable) 2654 { 2655 struct vm_area_struct *vma; 2656 kvm_pfn_t pfn; 2657 int npages, r; 2658 2659 /* we can do it either atomically or asynchronously, not both */ 2660 BUG_ON(atomic && async); 2661 2662 if (hva_to_pfn_fast(addr, write_fault, writable, &pfn)) 2663 return pfn; 2664 2665 if (atomic) 2666 return KVM_PFN_ERR_FAULT; 2667 2668 npages = hva_to_pfn_slow(addr, async, write_fault, interruptible, 2669 writable, &pfn); 2670 if (npages == 1) 2671 return pfn; 2672 if (npages == -EINTR) 2673 return KVM_PFN_ERR_SIGPENDING; 2674 2675 mmap_read_lock(current->mm); 2676 if (npages == -EHWPOISON || 2677 (!async && check_user_page_hwpoison(addr))) { 2678 pfn = KVM_PFN_ERR_HWPOISON; 2679 goto exit; 2680 } 2681 2682 retry: 2683 vma = vma_lookup(current->mm, addr); 2684 2685 if (vma == NULL) 2686 pfn = KVM_PFN_ERR_FAULT; 2687 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { 2688 r = hva_to_pfn_remapped(vma, addr, write_fault, writable, &pfn); 2689 if (r == -EAGAIN) 2690 goto retry; 2691 if (r < 0) 2692 pfn = KVM_PFN_ERR_FAULT; 2693 } else { 2694 if (async && vma_is_valid(vma, write_fault)) 2695 *async = true; 2696 pfn = KVM_PFN_ERR_FAULT; 2697 } 2698 exit: 2699 mmap_read_unlock(current->mm); 2700 return pfn; 2701 } 2702 2703 kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, 2704 bool atomic, bool interruptible, bool *async, 2705 bool write_fault, bool *writable, hva_t *hva) 2706 { 2707 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); 2708 2709 if (hva) 2710 *hva = addr; 2711 2712 if (addr == KVM_HVA_ERR_RO_BAD) { 2713 if (writable) 2714 *writable = false; 2715 return KVM_PFN_ERR_RO_FAULT; 2716 } 2717 2718 if (kvm_is_error_hva(addr)) { 2719 if (writable) 2720 *writable = false; 2721 return KVM_PFN_NOSLOT; 2722 } 2723 2724 /* Do not map writable pfn in the readonly memslot. */ 2725 if (writable && memslot_is_readonly(slot)) { 2726 *writable = false; 2727 writable = NULL; 2728 } 2729 2730 return hva_to_pfn(addr, atomic, interruptible, async, write_fault, 2731 writable); 2732 } 2733 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); 2734 2735 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 2736 bool *writable) 2737 { 2738 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, false, 2739 NULL, write_fault, writable, NULL); 2740 } 2741 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 2742 2743 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) 2744 { 2745 return __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, true, 2746 NULL, NULL); 2747 } 2748 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); 2749 2750 kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn) 2751 { 2752 return __gfn_to_pfn_memslot(slot, gfn, true, false, NULL, true, 2753 NULL, NULL); 2754 } 2755 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); 2756 2757 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) 2758 { 2759 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 2760 } 2761 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic); 2762 2763 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 2764 { 2765 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); 2766 } 2767 EXPORT_SYMBOL_GPL(gfn_to_pfn); 2768 2769 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) 2770 { 2771 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 2772 } 2773 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn); 2774 2775 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 2776 struct page **pages, int nr_pages) 2777 { 2778 unsigned long addr; 2779 gfn_t entry = 0; 2780 2781 addr = gfn_to_hva_many(slot, gfn, &entry); 2782 if (kvm_is_error_hva(addr)) 2783 return -1; 2784 2785 if (entry < nr_pages) 2786 return 0; 2787 2788 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages); 2789 } 2790 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); 2791 2792 /* 2793 * Do not use this helper unless you are absolutely certain the gfn _must_ be 2794 * backed by 'struct page'. A valid example is if the backing memslot is 2795 * controlled by KVM. Note, if the returned page is valid, it's refcount has 2796 * been elevated by gfn_to_pfn(). 2797 */ 2798 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 2799 { 2800 struct page *page; 2801 kvm_pfn_t pfn; 2802 2803 pfn = gfn_to_pfn(kvm, gfn); 2804 2805 if (is_error_noslot_pfn(pfn)) 2806 return KVM_ERR_PTR_BAD_PAGE; 2807 2808 page = kvm_pfn_to_refcounted_page(pfn); 2809 if (!page) 2810 return KVM_ERR_PTR_BAD_PAGE; 2811 2812 return page; 2813 } 2814 EXPORT_SYMBOL_GPL(gfn_to_page); 2815 2816 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty) 2817 { 2818 if (dirty) 2819 kvm_release_pfn_dirty(pfn); 2820 else 2821 kvm_release_pfn_clean(pfn); 2822 } 2823 2824 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) 2825 { 2826 kvm_pfn_t pfn; 2827 void *hva = NULL; 2828 struct page *page = KVM_UNMAPPED_PAGE; 2829 2830 if (!map) 2831 return -EINVAL; 2832 2833 pfn = gfn_to_pfn(vcpu->kvm, gfn); 2834 if (is_error_noslot_pfn(pfn)) 2835 return -EINVAL; 2836 2837 if (pfn_valid(pfn)) { 2838 page = pfn_to_page(pfn); 2839 hva = kmap(page); 2840 #ifdef CONFIG_HAS_IOMEM 2841 } else { 2842 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); 2843 #endif 2844 } 2845 2846 if (!hva) 2847 return -EFAULT; 2848 2849 map->page = page; 2850 map->hva = hva; 2851 map->pfn = pfn; 2852 map->gfn = gfn; 2853 2854 return 0; 2855 } 2856 EXPORT_SYMBOL_GPL(kvm_vcpu_map); 2857 2858 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) 2859 { 2860 if (!map) 2861 return; 2862 2863 if (!map->hva) 2864 return; 2865 2866 if (map->page != KVM_UNMAPPED_PAGE) 2867 kunmap(map->page); 2868 #ifdef CONFIG_HAS_IOMEM 2869 else 2870 memunmap(map->hva); 2871 #endif 2872 2873 if (dirty) 2874 kvm_vcpu_mark_page_dirty(vcpu, map->gfn); 2875 2876 kvm_release_pfn(map->pfn, dirty); 2877 2878 map->hva = NULL; 2879 map->page = NULL; 2880 } 2881 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap); 2882 2883 static bool kvm_is_ad_tracked_page(struct page *page) 2884 { 2885 /* 2886 * Per page-flags.h, pages tagged PG_reserved "should in general not be 2887 * touched (e.g. set dirty) except by its owner". 2888 */ 2889 return !PageReserved(page); 2890 } 2891 2892 static void kvm_set_page_dirty(struct page *page) 2893 { 2894 if (kvm_is_ad_tracked_page(page)) 2895 SetPageDirty(page); 2896 } 2897 2898 static void kvm_set_page_accessed(struct page *page) 2899 { 2900 if (kvm_is_ad_tracked_page(page)) 2901 mark_page_accessed(page); 2902 } 2903 2904 void kvm_release_page_clean(struct page *page) 2905 { 2906 WARN_ON(is_error_page(page)); 2907 2908 kvm_set_page_accessed(page); 2909 put_page(page); 2910 } 2911 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 2912 2913 void kvm_release_pfn_clean(kvm_pfn_t pfn) 2914 { 2915 struct page *page; 2916 2917 if (is_error_noslot_pfn(pfn)) 2918 return; 2919 2920 page = kvm_pfn_to_refcounted_page(pfn); 2921 if (!page) 2922 return; 2923 2924 kvm_release_page_clean(page); 2925 } 2926 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 2927 2928 void kvm_release_page_dirty(struct page *page) 2929 { 2930 WARN_ON(is_error_page(page)); 2931 2932 kvm_set_page_dirty(page); 2933 kvm_release_page_clean(page); 2934 } 2935 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 2936 2937 void kvm_release_pfn_dirty(kvm_pfn_t pfn) 2938 { 2939 struct page *page; 2940 2941 if (is_error_noslot_pfn(pfn)) 2942 return; 2943 2944 page = kvm_pfn_to_refcounted_page(pfn); 2945 if (!page) 2946 return; 2947 2948 kvm_release_page_dirty(page); 2949 } 2950 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); 2951 2952 /* 2953 * Note, checking for an error/noslot pfn is the caller's responsibility when 2954 * directly marking a page dirty/accessed. Unlike the "release" helpers, the 2955 * "set" helpers are not to be used when the pfn might point at garbage. 2956 */ 2957 void kvm_set_pfn_dirty(kvm_pfn_t pfn) 2958 { 2959 if (WARN_ON(is_error_noslot_pfn(pfn))) 2960 return; 2961 2962 if (pfn_valid(pfn)) 2963 kvm_set_page_dirty(pfn_to_page(pfn)); 2964 } 2965 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 2966 2967 void kvm_set_pfn_accessed(kvm_pfn_t pfn) 2968 { 2969 if (WARN_ON(is_error_noslot_pfn(pfn))) 2970 return; 2971 2972 if (pfn_valid(pfn)) 2973 kvm_set_page_accessed(pfn_to_page(pfn)); 2974 } 2975 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 2976 2977 static int next_segment(unsigned long len, int offset) 2978 { 2979 if (len > PAGE_SIZE - offset) 2980 return PAGE_SIZE - offset; 2981 else 2982 return len; 2983 } 2984 2985 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, 2986 void *data, int offset, int len) 2987 { 2988 int r; 2989 unsigned long addr; 2990 2991 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 2992 if (kvm_is_error_hva(addr)) 2993 return -EFAULT; 2994 r = __copy_from_user(data, (void __user *)addr + offset, len); 2995 if (r) 2996 return -EFAULT; 2997 return 0; 2998 } 2999 3000 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 3001 int len) 3002 { 3003 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 3004 3005 return __kvm_read_guest_page(slot, gfn, data, offset, len); 3006 } 3007 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 3008 3009 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, 3010 int offset, int len) 3011 { 3012 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 3013 3014 return __kvm_read_guest_page(slot, gfn, data, offset, len); 3015 } 3016 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); 3017 3018 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 3019 { 3020 gfn_t gfn = gpa >> PAGE_SHIFT; 3021 int seg; 3022 int offset = offset_in_page(gpa); 3023 int ret; 3024 3025 while ((seg = next_segment(len, offset)) != 0) { 3026 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 3027 if (ret < 0) 3028 return ret; 3029 offset = 0; 3030 len -= seg; 3031 data += seg; 3032 ++gfn; 3033 } 3034 return 0; 3035 } 3036 EXPORT_SYMBOL_GPL(kvm_read_guest); 3037 3038 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) 3039 { 3040 gfn_t gfn = gpa >> PAGE_SHIFT; 3041 int seg; 3042 int offset = offset_in_page(gpa); 3043 int ret; 3044 3045 while ((seg = next_segment(len, offset)) != 0) { 3046 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); 3047 if (ret < 0) 3048 return ret; 3049 offset = 0; 3050 len -= seg; 3051 data += seg; 3052 ++gfn; 3053 } 3054 return 0; 3055 } 3056 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest); 3057 3058 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 3059 void *data, int offset, unsigned long len) 3060 { 3061 int r; 3062 unsigned long addr; 3063 3064 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 3065 if (kvm_is_error_hva(addr)) 3066 return -EFAULT; 3067 pagefault_disable(); 3068 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 3069 pagefault_enable(); 3070 if (r) 3071 return -EFAULT; 3072 return 0; 3073 } 3074 3075 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, 3076 void *data, unsigned long len) 3077 { 3078 gfn_t gfn = gpa >> PAGE_SHIFT; 3079 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 3080 int offset = offset_in_page(gpa); 3081 3082 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); 3083 } 3084 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic); 3085 3086 static int __kvm_write_guest_page(struct kvm *kvm, 3087 struct kvm_memory_slot *memslot, gfn_t gfn, 3088 const void *data, int offset, int len) 3089 { 3090 int r; 3091 unsigned long addr; 3092 3093 addr = gfn_to_hva_memslot(memslot, gfn); 3094 if (kvm_is_error_hva(addr)) 3095 return -EFAULT; 3096 r = __copy_to_user((void __user *)addr + offset, data, len); 3097 if (r) 3098 return -EFAULT; 3099 mark_page_dirty_in_slot(kvm, memslot, gfn); 3100 return 0; 3101 } 3102 3103 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, 3104 const void *data, int offset, int len) 3105 { 3106 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 3107 3108 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len); 3109 } 3110 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 3111 3112 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, 3113 const void *data, int offset, int len) 3114 { 3115 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 3116 3117 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len); 3118 } 3119 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page); 3120 3121 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 3122 unsigned long len) 3123 { 3124 gfn_t gfn = gpa >> PAGE_SHIFT; 3125 int seg; 3126 int offset = offset_in_page(gpa); 3127 int ret; 3128 3129 while ((seg = next_segment(len, offset)) != 0) { 3130 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 3131 if (ret < 0) 3132 return ret; 3133 offset = 0; 3134 len -= seg; 3135 data += seg; 3136 ++gfn; 3137 } 3138 return 0; 3139 } 3140 EXPORT_SYMBOL_GPL(kvm_write_guest); 3141 3142 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 3143 unsigned long len) 3144 { 3145 gfn_t gfn = gpa >> PAGE_SHIFT; 3146 int seg; 3147 int offset = offset_in_page(gpa); 3148 int ret; 3149 3150 while ((seg = next_segment(len, offset)) != 0) { 3151 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); 3152 if (ret < 0) 3153 return ret; 3154 offset = 0; 3155 len -= seg; 3156 data += seg; 3157 ++gfn; 3158 } 3159 return 0; 3160 } 3161 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest); 3162 3163 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, 3164 struct gfn_to_hva_cache *ghc, 3165 gpa_t gpa, unsigned long len) 3166 { 3167 int offset = offset_in_page(gpa); 3168 gfn_t start_gfn = gpa >> PAGE_SHIFT; 3169 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; 3170 gfn_t nr_pages_needed = end_gfn - start_gfn + 1; 3171 gfn_t nr_pages_avail; 3172 3173 /* Update ghc->generation before performing any error checks. */ 3174 ghc->generation = slots->generation; 3175 3176 if (start_gfn > end_gfn) { 3177 ghc->hva = KVM_HVA_ERR_BAD; 3178 return -EINVAL; 3179 } 3180 3181 /* 3182 * If the requested region crosses two memslots, we still 3183 * verify that the entire region is valid here. 3184 */ 3185 for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) { 3186 ghc->memslot = __gfn_to_memslot(slots, start_gfn); 3187 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, 3188 &nr_pages_avail); 3189 if (kvm_is_error_hva(ghc->hva)) 3190 return -EFAULT; 3191 } 3192 3193 /* Use the slow path for cross page reads and writes. */ 3194 if (nr_pages_needed == 1) 3195 ghc->hva += offset; 3196 else 3197 ghc->memslot = NULL; 3198 3199 ghc->gpa = gpa; 3200 ghc->len = len; 3201 return 0; 3202 } 3203 3204 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3205 gpa_t gpa, unsigned long len) 3206 { 3207 struct kvm_memslots *slots = kvm_memslots(kvm); 3208 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len); 3209 } 3210 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 3211 3212 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3213 void *data, unsigned int offset, 3214 unsigned long len) 3215 { 3216 struct kvm_memslots *slots = kvm_memslots(kvm); 3217 int r; 3218 gpa_t gpa = ghc->gpa + offset; 3219 3220 if (WARN_ON_ONCE(len + offset > ghc->len)) 3221 return -EINVAL; 3222 3223 if (slots->generation != ghc->generation) { 3224 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) 3225 return -EFAULT; 3226 } 3227 3228 if (kvm_is_error_hva(ghc->hva)) 3229 return -EFAULT; 3230 3231 if (unlikely(!ghc->memslot)) 3232 return kvm_write_guest(kvm, gpa, data, len); 3233 3234 r = __copy_to_user((void __user *)ghc->hva + offset, data, len); 3235 if (r) 3236 return -EFAULT; 3237 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT); 3238 3239 return 0; 3240 } 3241 EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached); 3242 3243 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3244 void *data, unsigned long len) 3245 { 3246 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); 3247 } 3248 EXPORT_SYMBOL_GPL(kvm_write_guest_cached); 3249 3250 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3251 void *data, unsigned int offset, 3252 unsigned long len) 3253 { 3254 struct kvm_memslots *slots = kvm_memslots(kvm); 3255 int r; 3256 gpa_t gpa = ghc->gpa + offset; 3257 3258 if (WARN_ON_ONCE(len + offset > ghc->len)) 3259 return -EINVAL; 3260 3261 if (slots->generation != ghc->generation) { 3262 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) 3263 return -EFAULT; 3264 } 3265 3266 if (kvm_is_error_hva(ghc->hva)) 3267 return -EFAULT; 3268 3269 if (unlikely(!ghc->memslot)) 3270 return kvm_read_guest(kvm, gpa, data, len); 3271 3272 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len); 3273 if (r) 3274 return -EFAULT; 3275 3276 return 0; 3277 } 3278 EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached); 3279 3280 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3281 void *data, unsigned long len) 3282 { 3283 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len); 3284 } 3285 EXPORT_SYMBOL_GPL(kvm_read_guest_cached); 3286 3287 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 3288 { 3289 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 3290 gfn_t gfn = gpa >> PAGE_SHIFT; 3291 int seg; 3292 int offset = offset_in_page(gpa); 3293 int ret; 3294 3295 while ((seg = next_segment(len, offset)) != 0) { 3296 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len); 3297 if (ret < 0) 3298 return ret; 3299 offset = 0; 3300 len -= seg; 3301 ++gfn; 3302 } 3303 return 0; 3304 } 3305 EXPORT_SYMBOL_GPL(kvm_clear_guest); 3306 3307 void mark_page_dirty_in_slot(struct kvm *kvm, 3308 const struct kvm_memory_slot *memslot, 3309 gfn_t gfn) 3310 { 3311 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); 3312 3313 #ifdef CONFIG_HAVE_KVM_DIRTY_RING 3314 if (WARN_ON_ONCE(vcpu && vcpu->kvm != kvm)) 3315 return; 3316 3317 WARN_ON_ONCE(!vcpu && !kvm_arch_allow_write_without_running_vcpu(kvm)); 3318 #endif 3319 3320 if (memslot && kvm_slot_dirty_track_enabled(memslot)) { 3321 unsigned long rel_gfn = gfn - memslot->base_gfn; 3322 u32 slot = (memslot->as_id << 16) | memslot->id; 3323 3324 if (kvm->dirty_ring_size && vcpu) 3325 kvm_dirty_ring_push(vcpu, slot, rel_gfn); 3326 else if (memslot->dirty_bitmap) 3327 set_bit_le(rel_gfn, memslot->dirty_bitmap); 3328 } 3329 } 3330 EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot); 3331 3332 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 3333 { 3334 struct kvm_memory_slot *memslot; 3335 3336 memslot = gfn_to_memslot(kvm, gfn); 3337 mark_page_dirty_in_slot(kvm, memslot, gfn); 3338 } 3339 EXPORT_SYMBOL_GPL(mark_page_dirty); 3340 3341 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) 3342 { 3343 struct kvm_memory_slot *memslot; 3344 3345 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 3346 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn); 3347 } 3348 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); 3349 3350 void kvm_sigset_activate(struct kvm_vcpu *vcpu) 3351 { 3352 if (!vcpu->sigset_active) 3353 return; 3354 3355 /* 3356 * This does a lockless modification of ->real_blocked, which is fine 3357 * because, only current can change ->real_blocked and all readers of 3358 * ->real_blocked don't care as long ->real_blocked is always a subset 3359 * of ->blocked. 3360 */ 3361 sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked); 3362 } 3363 3364 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu) 3365 { 3366 if (!vcpu->sigset_active) 3367 return; 3368 3369 sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL); 3370 sigemptyset(¤t->real_blocked); 3371 } 3372 3373 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) 3374 { 3375 unsigned int old, val, grow, grow_start; 3376 3377 old = val = vcpu->halt_poll_ns; 3378 grow_start = READ_ONCE(halt_poll_ns_grow_start); 3379 grow = READ_ONCE(halt_poll_ns_grow); 3380 if (!grow) 3381 goto out; 3382 3383 val *= grow; 3384 if (val < grow_start) 3385 val = grow_start; 3386 3387 vcpu->halt_poll_ns = val; 3388 out: 3389 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); 3390 } 3391 3392 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) 3393 { 3394 unsigned int old, val, shrink, grow_start; 3395 3396 old = val = vcpu->halt_poll_ns; 3397 shrink = READ_ONCE(halt_poll_ns_shrink); 3398 grow_start = READ_ONCE(halt_poll_ns_grow_start); 3399 if (shrink == 0) 3400 val = 0; 3401 else 3402 val /= shrink; 3403 3404 if (val < grow_start) 3405 val = 0; 3406 3407 vcpu->halt_poll_ns = val; 3408 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); 3409 } 3410 3411 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) 3412 { 3413 int ret = -EINTR; 3414 int idx = srcu_read_lock(&vcpu->kvm->srcu); 3415 3416 if (kvm_arch_vcpu_runnable(vcpu)) 3417 goto out; 3418 if (kvm_cpu_has_pending_timer(vcpu)) 3419 goto out; 3420 if (signal_pending(current)) 3421 goto out; 3422 if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu)) 3423 goto out; 3424 3425 ret = 0; 3426 out: 3427 srcu_read_unlock(&vcpu->kvm->srcu, idx); 3428 return ret; 3429 } 3430 3431 /* 3432 * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is 3433 * pending. This is mostly used when halting a vCPU, but may also be used 3434 * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI. 3435 */ 3436 bool kvm_vcpu_block(struct kvm_vcpu *vcpu) 3437 { 3438 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); 3439 bool waited = false; 3440 3441 vcpu->stat.generic.blocking = 1; 3442 3443 preempt_disable(); 3444 kvm_arch_vcpu_blocking(vcpu); 3445 prepare_to_rcuwait(wait); 3446 preempt_enable(); 3447 3448 for (;;) { 3449 set_current_state(TASK_INTERRUPTIBLE); 3450 3451 if (kvm_vcpu_check_block(vcpu) < 0) 3452 break; 3453 3454 waited = true; 3455 schedule(); 3456 } 3457 3458 preempt_disable(); 3459 finish_rcuwait(wait); 3460 kvm_arch_vcpu_unblocking(vcpu); 3461 preempt_enable(); 3462 3463 vcpu->stat.generic.blocking = 0; 3464 3465 return waited; 3466 } 3467 3468 static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, 3469 ktime_t end, bool success) 3470 { 3471 struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic; 3472 u64 poll_ns = ktime_to_ns(ktime_sub(end, start)); 3473 3474 ++vcpu->stat.generic.halt_attempted_poll; 3475 3476 if (success) { 3477 ++vcpu->stat.generic.halt_successful_poll; 3478 3479 if (!vcpu_valid_wakeup(vcpu)) 3480 ++vcpu->stat.generic.halt_poll_invalid; 3481 3482 stats->halt_poll_success_ns += poll_ns; 3483 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns); 3484 } else { 3485 stats->halt_poll_fail_ns += poll_ns; 3486 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns); 3487 } 3488 } 3489 3490 static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu) 3491 { 3492 struct kvm *kvm = vcpu->kvm; 3493 3494 if (kvm->override_halt_poll_ns) { 3495 /* 3496 * Ensure kvm->max_halt_poll_ns is not read before 3497 * kvm->override_halt_poll_ns. 3498 * 3499 * Pairs with the smp_wmb() when enabling KVM_CAP_HALT_POLL. 3500 */ 3501 smp_rmb(); 3502 return READ_ONCE(kvm->max_halt_poll_ns); 3503 } 3504 3505 return READ_ONCE(halt_poll_ns); 3506 } 3507 3508 /* 3509 * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc... If halt 3510 * polling is enabled, busy wait for a short time before blocking to avoid the 3511 * expensive block+unblock sequence if a wake event arrives soon after the vCPU 3512 * is halted. 3513 */ 3514 void kvm_vcpu_halt(struct kvm_vcpu *vcpu) 3515 { 3516 unsigned int max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu); 3517 bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); 3518 ktime_t start, cur, poll_end; 3519 bool waited = false; 3520 bool do_halt_poll; 3521 u64 halt_ns; 3522 3523 if (vcpu->halt_poll_ns > max_halt_poll_ns) 3524 vcpu->halt_poll_ns = max_halt_poll_ns; 3525 3526 do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns; 3527 3528 start = cur = poll_end = ktime_get(); 3529 if (do_halt_poll) { 3530 ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns); 3531 3532 do { 3533 if (kvm_vcpu_check_block(vcpu) < 0) 3534 goto out; 3535 cpu_relax(); 3536 poll_end = cur = ktime_get(); 3537 } while (kvm_vcpu_can_poll(cur, stop)); 3538 } 3539 3540 waited = kvm_vcpu_block(vcpu); 3541 3542 cur = ktime_get(); 3543 if (waited) { 3544 vcpu->stat.generic.halt_wait_ns += 3545 ktime_to_ns(cur) - ktime_to_ns(poll_end); 3546 KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist, 3547 ktime_to_ns(cur) - ktime_to_ns(poll_end)); 3548 } 3549 out: 3550 /* The total time the vCPU was "halted", including polling time. */ 3551 halt_ns = ktime_to_ns(cur) - ktime_to_ns(start); 3552 3553 /* 3554 * Note, halt-polling is considered successful so long as the vCPU was 3555 * never actually scheduled out, i.e. even if the wake event arrived 3556 * after of the halt-polling loop itself, but before the full wait. 3557 */ 3558 if (do_halt_poll) 3559 update_halt_poll_stats(vcpu, start, poll_end, !waited); 3560 3561 if (halt_poll_allowed) { 3562 /* Recompute the max halt poll time in case it changed. */ 3563 max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu); 3564 3565 if (!vcpu_valid_wakeup(vcpu)) { 3566 shrink_halt_poll_ns(vcpu); 3567 } else if (max_halt_poll_ns) { 3568 if (halt_ns <= vcpu->halt_poll_ns) 3569 ; 3570 /* we had a long block, shrink polling */ 3571 else if (vcpu->halt_poll_ns && 3572 halt_ns > max_halt_poll_ns) 3573 shrink_halt_poll_ns(vcpu); 3574 /* we had a short halt and our poll time is too small */ 3575 else if (vcpu->halt_poll_ns < max_halt_poll_ns && 3576 halt_ns < max_halt_poll_ns) 3577 grow_halt_poll_ns(vcpu); 3578 } else { 3579 vcpu->halt_poll_ns = 0; 3580 } 3581 } 3582 3583 trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu)); 3584 } 3585 EXPORT_SYMBOL_GPL(kvm_vcpu_halt); 3586 3587 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) 3588 { 3589 if (__kvm_vcpu_wake_up(vcpu)) { 3590 WRITE_ONCE(vcpu->ready, true); 3591 ++vcpu->stat.generic.halt_wakeup; 3592 return true; 3593 } 3594 3595 return false; 3596 } 3597 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up); 3598 3599 #ifndef CONFIG_S390 3600 /* 3601 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. 3602 */ 3603 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 3604 { 3605 int me, cpu; 3606 3607 if (kvm_vcpu_wake_up(vcpu)) 3608 return; 3609 3610 me = get_cpu(); 3611 /* 3612 * The only state change done outside the vcpu mutex is IN_GUEST_MODE 3613 * to EXITING_GUEST_MODE. Therefore the moderately expensive "should 3614 * kick" check does not need atomic operations if kvm_vcpu_kick is used 3615 * within the vCPU thread itself. 3616 */ 3617 if (vcpu == __this_cpu_read(kvm_running_vcpu)) { 3618 if (vcpu->mode == IN_GUEST_MODE) 3619 WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE); 3620 goto out; 3621 } 3622 3623 /* 3624 * Note, the vCPU could get migrated to a different pCPU at any point 3625 * after kvm_arch_vcpu_should_kick(), which could result in sending an 3626 * IPI to the previous pCPU. But, that's ok because the purpose of the 3627 * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the 3628 * vCPU also requires it to leave IN_GUEST_MODE. 3629 */ 3630 if (kvm_arch_vcpu_should_kick(vcpu)) { 3631 cpu = READ_ONCE(vcpu->cpu); 3632 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) 3633 smp_send_reschedule(cpu); 3634 } 3635 out: 3636 put_cpu(); 3637 } 3638 EXPORT_SYMBOL_GPL(kvm_vcpu_kick); 3639 #endif /* !CONFIG_S390 */ 3640 3641 int kvm_vcpu_yield_to(struct kvm_vcpu *target) 3642 { 3643 struct pid *pid; 3644 struct task_struct *task = NULL; 3645 int ret = 0; 3646 3647 rcu_read_lock(); 3648 pid = rcu_dereference(target->pid); 3649 if (pid) 3650 task = get_pid_task(pid, PIDTYPE_PID); 3651 rcu_read_unlock(); 3652 if (!task) 3653 return ret; 3654 ret = yield_to(task, 1); 3655 put_task_struct(task); 3656 3657 return ret; 3658 } 3659 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); 3660 3661 /* 3662 * Helper that checks whether a VCPU is eligible for directed yield. 3663 * Most eligible candidate to yield is decided by following heuristics: 3664 * 3665 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently 3666 * (preempted lock holder), indicated by @in_spin_loop. 3667 * Set at the beginning and cleared at the end of interception/PLE handler. 3668 * 3669 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get 3670 * chance last time (mostly it has become eligible now since we have probably 3671 * yielded to lockholder in last iteration. This is done by toggling 3672 * @dy_eligible each time a VCPU checked for eligibility.) 3673 * 3674 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding 3675 * to preempted lock-holder could result in wrong VCPU selection and CPU 3676 * burning. Giving priority for a potential lock-holder increases lock 3677 * progress. 3678 * 3679 * Since algorithm is based on heuristics, accessing another VCPU data without 3680 * locking does not harm. It may result in trying to yield to same VCPU, fail 3681 * and continue with next VCPU and so on. 3682 */ 3683 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) 3684 { 3685 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 3686 bool eligible; 3687 3688 eligible = !vcpu->spin_loop.in_spin_loop || 3689 vcpu->spin_loop.dy_eligible; 3690 3691 if (vcpu->spin_loop.in_spin_loop) 3692 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); 3693 3694 return eligible; 3695 #else 3696 return true; 3697 #endif 3698 } 3699 3700 /* 3701 * Unlike kvm_arch_vcpu_runnable, this function is called outside 3702 * a vcpu_load/vcpu_put pair. However, for most architectures 3703 * kvm_arch_vcpu_runnable does not require vcpu_load. 3704 */ 3705 bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) 3706 { 3707 return kvm_arch_vcpu_runnable(vcpu); 3708 } 3709 3710 static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu) 3711 { 3712 if (kvm_arch_dy_runnable(vcpu)) 3713 return true; 3714 3715 #ifdef CONFIG_KVM_ASYNC_PF 3716 if (!list_empty_careful(&vcpu->async_pf.done)) 3717 return true; 3718 #endif 3719 3720 return false; 3721 } 3722 3723 bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) 3724 { 3725 return false; 3726 } 3727 3728 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) 3729 { 3730 struct kvm *kvm = me->kvm; 3731 struct kvm_vcpu *vcpu; 3732 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 3733 unsigned long i; 3734 int yielded = 0; 3735 int try = 3; 3736 int pass; 3737 3738 kvm_vcpu_set_in_spin_loop(me, true); 3739 /* 3740 * We boost the priority of a VCPU that is runnable but not 3741 * currently running, because it got preempted by something 3742 * else and called schedule in __vcpu_run. Hopefully that 3743 * VCPU is holding the lock that we need and will release it. 3744 * We approximate round-robin by starting at the last boosted VCPU. 3745 */ 3746 for (pass = 0; pass < 2 && !yielded && try; pass++) { 3747 kvm_for_each_vcpu(i, vcpu, kvm) { 3748 if (!pass && i <= last_boosted_vcpu) { 3749 i = last_boosted_vcpu; 3750 continue; 3751 } else if (pass && i > last_boosted_vcpu) 3752 break; 3753 if (!READ_ONCE(vcpu->ready)) 3754 continue; 3755 if (vcpu == me) 3756 continue; 3757 if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu)) 3758 continue; 3759 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && 3760 !kvm_arch_dy_has_pending_interrupt(vcpu) && 3761 !kvm_arch_vcpu_in_kernel(vcpu)) 3762 continue; 3763 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 3764 continue; 3765 3766 yielded = kvm_vcpu_yield_to(vcpu); 3767 if (yielded > 0) { 3768 kvm->last_boosted_vcpu = i; 3769 break; 3770 } else if (yielded < 0) { 3771 try--; 3772 if (!try) 3773 break; 3774 } 3775 } 3776 } 3777 kvm_vcpu_set_in_spin_loop(me, false); 3778 3779 /* Ensure vcpu is not eligible during next spinloop */ 3780 kvm_vcpu_set_dy_eligible(me, false); 3781 } 3782 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 3783 3784 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff) 3785 { 3786 #ifdef CONFIG_HAVE_KVM_DIRTY_RING 3787 return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) && 3788 (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET + 3789 kvm->dirty_ring_size / PAGE_SIZE); 3790 #else 3791 return false; 3792 #endif 3793 } 3794 3795 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf) 3796 { 3797 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data; 3798 struct page *page; 3799 3800 if (vmf->pgoff == 0) 3801 page = virt_to_page(vcpu->run); 3802 #ifdef CONFIG_X86 3803 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 3804 page = virt_to_page(vcpu->arch.pio_data); 3805 #endif 3806 #ifdef CONFIG_KVM_MMIO 3807 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 3808 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 3809 #endif 3810 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff)) 3811 page = kvm_dirty_ring_get_page( 3812 &vcpu->dirty_ring, 3813 vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET); 3814 else 3815 return kvm_arch_vcpu_fault(vcpu, vmf); 3816 get_page(page); 3817 vmf->page = page; 3818 return 0; 3819 } 3820 3821 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 3822 .fault = kvm_vcpu_fault, 3823 }; 3824 3825 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 3826 { 3827 struct kvm_vcpu *vcpu = file->private_data; 3828 unsigned long pages = vma_pages(vma); 3829 3830 if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) || 3831 kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) && 3832 ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED))) 3833 return -EINVAL; 3834 3835 vma->vm_ops = &kvm_vcpu_vm_ops; 3836 return 0; 3837 } 3838 3839 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 3840 { 3841 struct kvm_vcpu *vcpu = filp->private_data; 3842 3843 kvm_put_kvm(vcpu->kvm); 3844 return 0; 3845 } 3846 3847 static const struct file_operations kvm_vcpu_fops = { 3848 .release = kvm_vcpu_release, 3849 .unlocked_ioctl = kvm_vcpu_ioctl, 3850 .mmap = kvm_vcpu_mmap, 3851 .llseek = noop_llseek, 3852 KVM_COMPAT(kvm_vcpu_compat_ioctl), 3853 }; 3854 3855 /* 3856 * Allocates an inode for the vcpu. 3857 */ 3858 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 3859 { 3860 char name[8 + 1 + ITOA_MAX_LEN + 1]; 3861 3862 snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id); 3863 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); 3864 } 3865 3866 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS 3867 static int vcpu_get_pid(void *data, u64 *val) 3868 { 3869 struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data; 3870 *val = pid_nr(rcu_access_pointer(vcpu->pid)); 3871 return 0; 3872 } 3873 3874 DEFINE_SIMPLE_ATTRIBUTE(vcpu_get_pid_fops, vcpu_get_pid, NULL, "%llu\n"); 3875 3876 static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) 3877 { 3878 struct dentry *debugfs_dentry; 3879 char dir_name[ITOA_MAX_LEN * 2]; 3880 3881 if (!debugfs_initialized()) 3882 return; 3883 3884 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id); 3885 debugfs_dentry = debugfs_create_dir(dir_name, 3886 vcpu->kvm->debugfs_dentry); 3887 debugfs_create_file("pid", 0444, debugfs_dentry, vcpu, 3888 &vcpu_get_pid_fops); 3889 3890 kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry); 3891 } 3892 #endif 3893 3894 /* 3895 * Creates some virtual cpus. Good luck creating more than one. 3896 */ 3897 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 3898 { 3899 int r; 3900 struct kvm_vcpu *vcpu; 3901 struct page *page; 3902 3903 if (id >= KVM_MAX_VCPU_IDS) 3904 return -EINVAL; 3905 3906 mutex_lock(&kvm->lock); 3907 if (kvm->created_vcpus >= kvm->max_vcpus) { 3908 mutex_unlock(&kvm->lock); 3909 return -EINVAL; 3910 } 3911 3912 r = kvm_arch_vcpu_precreate(kvm, id); 3913 if (r) { 3914 mutex_unlock(&kvm->lock); 3915 return r; 3916 } 3917 3918 kvm->created_vcpus++; 3919 mutex_unlock(&kvm->lock); 3920 3921 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT); 3922 if (!vcpu) { 3923 r = -ENOMEM; 3924 goto vcpu_decrement; 3925 } 3926 3927 BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE); 3928 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 3929 if (!page) { 3930 r = -ENOMEM; 3931 goto vcpu_free; 3932 } 3933 vcpu->run = page_address(page); 3934 3935 kvm_vcpu_init(vcpu, kvm, id); 3936 3937 r = kvm_arch_vcpu_create(vcpu); 3938 if (r) 3939 goto vcpu_free_run_page; 3940 3941 if (kvm->dirty_ring_size) { 3942 r = kvm_dirty_ring_alloc(&vcpu->dirty_ring, 3943 id, kvm->dirty_ring_size); 3944 if (r) 3945 goto arch_vcpu_destroy; 3946 } 3947 3948 mutex_lock(&kvm->lock); 3949 3950 #ifdef CONFIG_LOCKDEP 3951 /* Ensure that lockdep knows vcpu->mutex is taken *inside* kvm->lock */ 3952 mutex_lock(&vcpu->mutex); 3953 mutex_unlock(&vcpu->mutex); 3954 #endif 3955 3956 if (kvm_get_vcpu_by_id(kvm, id)) { 3957 r = -EEXIST; 3958 goto unlock_vcpu_destroy; 3959 } 3960 3961 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); 3962 r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT); 3963 BUG_ON(r == -EBUSY); 3964 if (r) 3965 goto unlock_vcpu_destroy; 3966 3967 /* Now it's all set up, let userspace reach it */ 3968 kvm_get_kvm(kvm); 3969 r = create_vcpu_fd(vcpu); 3970 if (r < 0) { 3971 xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx); 3972 kvm_put_kvm_no_destroy(kvm); 3973 goto unlock_vcpu_destroy; 3974 } 3975 3976 /* 3977 * Pairs with smp_rmb() in kvm_get_vcpu. Store the vcpu 3978 * pointer before kvm->online_vcpu's incremented value. 3979 */ 3980 smp_wmb(); 3981 atomic_inc(&kvm->online_vcpus); 3982 3983 mutex_unlock(&kvm->lock); 3984 kvm_arch_vcpu_postcreate(vcpu); 3985 kvm_create_vcpu_debugfs(vcpu); 3986 return r; 3987 3988 unlock_vcpu_destroy: 3989 mutex_unlock(&kvm->lock); 3990 kvm_dirty_ring_free(&vcpu->dirty_ring); 3991 arch_vcpu_destroy: 3992 kvm_arch_vcpu_destroy(vcpu); 3993 vcpu_free_run_page: 3994 free_page((unsigned long)vcpu->run); 3995 vcpu_free: 3996 kmem_cache_free(kvm_vcpu_cache, vcpu); 3997 vcpu_decrement: 3998 mutex_lock(&kvm->lock); 3999 kvm->created_vcpus--; 4000 mutex_unlock(&kvm->lock); 4001 return r; 4002 } 4003 4004 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 4005 { 4006 if (sigset) { 4007 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 4008 vcpu->sigset_active = 1; 4009 vcpu->sigset = *sigset; 4010 } else 4011 vcpu->sigset_active = 0; 4012 return 0; 4013 } 4014 4015 static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer, 4016 size_t size, loff_t *offset) 4017 { 4018 struct kvm_vcpu *vcpu = file->private_data; 4019 4020 return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header, 4021 &kvm_vcpu_stats_desc[0], &vcpu->stat, 4022 sizeof(vcpu->stat), user_buffer, size, offset); 4023 } 4024 4025 static const struct file_operations kvm_vcpu_stats_fops = { 4026 .read = kvm_vcpu_stats_read, 4027 .llseek = noop_llseek, 4028 }; 4029 4030 static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu) 4031 { 4032 int fd; 4033 struct file *file; 4034 char name[15 + ITOA_MAX_LEN + 1]; 4035 4036 snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id); 4037 4038 fd = get_unused_fd_flags(O_CLOEXEC); 4039 if (fd < 0) 4040 return fd; 4041 4042 file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY); 4043 if (IS_ERR(file)) { 4044 put_unused_fd(fd); 4045 return PTR_ERR(file); 4046 } 4047 file->f_mode |= FMODE_PREAD; 4048 fd_install(fd, file); 4049 4050 return fd; 4051 } 4052 4053 static long kvm_vcpu_ioctl(struct file *filp, 4054 unsigned int ioctl, unsigned long arg) 4055 { 4056 struct kvm_vcpu *vcpu = filp->private_data; 4057 void __user *argp = (void __user *)arg; 4058 int r; 4059 struct kvm_fpu *fpu = NULL; 4060 struct kvm_sregs *kvm_sregs = NULL; 4061 4062 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead) 4063 return -EIO; 4064 4065 if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) 4066 return -EINVAL; 4067 4068 /* 4069 * Some architectures have vcpu ioctls that are asynchronous to vcpu 4070 * execution; mutex_lock() would break them. 4071 */ 4072 r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg); 4073 if (r != -ENOIOCTLCMD) 4074 return r; 4075 4076 if (mutex_lock_killable(&vcpu->mutex)) 4077 return -EINTR; 4078 switch (ioctl) { 4079 case KVM_RUN: { 4080 struct pid *oldpid; 4081 r = -EINVAL; 4082 if (arg) 4083 goto out; 4084 oldpid = rcu_access_pointer(vcpu->pid); 4085 if (unlikely(oldpid != task_pid(current))) { 4086 /* The thread running this VCPU changed. */ 4087 struct pid *newpid; 4088 4089 r = kvm_arch_vcpu_run_pid_change(vcpu); 4090 if (r) 4091 break; 4092 4093 newpid = get_task_pid(current, PIDTYPE_PID); 4094 rcu_assign_pointer(vcpu->pid, newpid); 4095 if (oldpid) 4096 synchronize_rcu(); 4097 put_pid(oldpid); 4098 } 4099 r = kvm_arch_vcpu_ioctl_run(vcpu); 4100 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 4101 break; 4102 } 4103 case KVM_GET_REGS: { 4104 struct kvm_regs *kvm_regs; 4105 4106 r = -ENOMEM; 4107 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT); 4108 if (!kvm_regs) 4109 goto out; 4110 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 4111 if (r) 4112 goto out_free1; 4113 r = -EFAULT; 4114 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 4115 goto out_free1; 4116 r = 0; 4117 out_free1: 4118 kfree(kvm_regs); 4119 break; 4120 } 4121 case KVM_SET_REGS: { 4122 struct kvm_regs *kvm_regs; 4123 4124 kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); 4125 if (IS_ERR(kvm_regs)) { 4126 r = PTR_ERR(kvm_regs); 4127 goto out; 4128 } 4129 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 4130 kfree(kvm_regs); 4131 break; 4132 } 4133 case KVM_GET_SREGS: { 4134 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), 4135 GFP_KERNEL_ACCOUNT); 4136 r = -ENOMEM; 4137 if (!kvm_sregs) 4138 goto out; 4139 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 4140 if (r) 4141 goto out; 4142 r = -EFAULT; 4143 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 4144 goto out; 4145 r = 0; 4146 break; 4147 } 4148 case KVM_SET_SREGS: { 4149 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); 4150 if (IS_ERR(kvm_sregs)) { 4151 r = PTR_ERR(kvm_sregs); 4152 kvm_sregs = NULL; 4153 goto out; 4154 } 4155 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 4156 break; 4157 } 4158 case KVM_GET_MP_STATE: { 4159 struct kvm_mp_state mp_state; 4160 4161 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 4162 if (r) 4163 goto out; 4164 r = -EFAULT; 4165 if (copy_to_user(argp, &mp_state, sizeof(mp_state))) 4166 goto out; 4167 r = 0; 4168 break; 4169 } 4170 case KVM_SET_MP_STATE: { 4171 struct kvm_mp_state mp_state; 4172 4173 r = -EFAULT; 4174 if (copy_from_user(&mp_state, argp, sizeof(mp_state))) 4175 goto out; 4176 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 4177 break; 4178 } 4179 case KVM_TRANSLATE: { 4180 struct kvm_translation tr; 4181 4182 r = -EFAULT; 4183 if (copy_from_user(&tr, argp, sizeof(tr))) 4184 goto out; 4185 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 4186 if (r) 4187 goto out; 4188 r = -EFAULT; 4189 if (copy_to_user(argp, &tr, sizeof(tr))) 4190 goto out; 4191 r = 0; 4192 break; 4193 } 4194 case KVM_SET_GUEST_DEBUG: { 4195 struct kvm_guest_debug dbg; 4196 4197 r = -EFAULT; 4198 if (copy_from_user(&dbg, argp, sizeof(dbg))) 4199 goto out; 4200 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 4201 break; 4202 } 4203 case KVM_SET_SIGNAL_MASK: { 4204 struct kvm_signal_mask __user *sigmask_arg = argp; 4205 struct kvm_signal_mask kvm_sigmask; 4206 sigset_t sigset, *p; 4207 4208 p = NULL; 4209 if (argp) { 4210 r = -EFAULT; 4211 if (copy_from_user(&kvm_sigmask, argp, 4212 sizeof(kvm_sigmask))) 4213 goto out; 4214 r = -EINVAL; 4215 if (kvm_sigmask.len != sizeof(sigset)) 4216 goto out; 4217 r = -EFAULT; 4218 if (copy_from_user(&sigset, sigmask_arg->sigset, 4219 sizeof(sigset))) 4220 goto out; 4221 p = &sigset; 4222 } 4223 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 4224 break; 4225 } 4226 case KVM_GET_FPU: { 4227 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT); 4228 r = -ENOMEM; 4229 if (!fpu) 4230 goto out; 4231 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 4232 if (r) 4233 goto out; 4234 r = -EFAULT; 4235 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 4236 goto out; 4237 r = 0; 4238 break; 4239 } 4240 case KVM_SET_FPU: { 4241 fpu = memdup_user(argp, sizeof(*fpu)); 4242 if (IS_ERR(fpu)) { 4243 r = PTR_ERR(fpu); 4244 fpu = NULL; 4245 goto out; 4246 } 4247 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 4248 break; 4249 } 4250 case KVM_GET_STATS_FD: { 4251 r = kvm_vcpu_ioctl_get_stats_fd(vcpu); 4252 break; 4253 } 4254 default: 4255 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 4256 } 4257 out: 4258 mutex_unlock(&vcpu->mutex); 4259 kfree(fpu); 4260 kfree(kvm_sregs); 4261 return r; 4262 } 4263 4264 #ifdef CONFIG_KVM_COMPAT 4265 static long kvm_vcpu_compat_ioctl(struct file *filp, 4266 unsigned int ioctl, unsigned long arg) 4267 { 4268 struct kvm_vcpu *vcpu = filp->private_data; 4269 void __user *argp = compat_ptr(arg); 4270 int r; 4271 4272 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead) 4273 return -EIO; 4274 4275 switch (ioctl) { 4276 case KVM_SET_SIGNAL_MASK: { 4277 struct kvm_signal_mask __user *sigmask_arg = argp; 4278 struct kvm_signal_mask kvm_sigmask; 4279 sigset_t sigset; 4280 4281 if (argp) { 4282 r = -EFAULT; 4283 if (copy_from_user(&kvm_sigmask, argp, 4284 sizeof(kvm_sigmask))) 4285 goto out; 4286 r = -EINVAL; 4287 if (kvm_sigmask.len != sizeof(compat_sigset_t)) 4288 goto out; 4289 r = -EFAULT; 4290 if (get_compat_sigset(&sigset, 4291 (compat_sigset_t __user *)sigmask_arg->sigset)) 4292 goto out; 4293 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 4294 } else 4295 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); 4296 break; 4297 } 4298 default: 4299 r = kvm_vcpu_ioctl(filp, ioctl, arg); 4300 } 4301 4302 out: 4303 return r; 4304 } 4305 #endif 4306 4307 static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma) 4308 { 4309 struct kvm_device *dev = filp->private_data; 4310 4311 if (dev->ops->mmap) 4312 return dev->ops->mmap(dev, vma); 4313 4314 return -ENODEV; 4315 } 4316 4317 static int kvm_device_ioctl_attr(struct kvm_device *dev, 4318 int (*accessor)(struct kvm_device *dev, 4319 struct kvm_device_attr *attr), 4320 unsigned long arg) 4321 { 4322 struct kvm_device_attr attr; 4323 4324 if (!accessor) 4325 return -EPERM; 4326 4327 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 4328 return -EFAULT; 4329 4330 return accessor(dev, &attr); 4331 } 4332 4333 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, 4334 unsigned long arg) 4335 { 4336 struct kvm_device *dev = filp->private_data; 4337 4338 if (dev->kvm->mm != current->mm || dev->kvm->vm_dead) 4339 return -EIO; 4340 4341 switch (ioctl) { 4342 case KVM_SET_DEVICE_ATTR: 4343 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); 4344 case KVM_GET_DEVICE_ATTR: 4345 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); 4346 case KVM_HAS_DEVICE_ATTR: 4347 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); 4348 default: 4349 if (dev->ops->ioctl) 4350 return dev->ops->ioctl(dev, ioctl, arg); 4351 4352 return -ENOTTY; 4353 } 4354 } 4355 4356 static int kvm_device_release(struct inode *inode, struct file *filp) 4357 { 4358 struct kvm_device *dev = filp->private_data; 4359 struct kvm *kvm = dev->kvm; 4360 4361 if (dev->ops->release) { 4362 mutex_lock(&kvm->lock); 4363 list_del(&dev->vm_node); 4364 dev->ops->release(dev); 4365 mutex_unlock(&kvm->lock); 4366 } 4367 4368 kvm_put_kvm(kvm); 4369 return 0; 4370 } 4371 4372 static const struct file_operations kvm_device_fops = { 4373 .unlocked_ioctl = kvm_device_ioctl, 4374 .release = kvm_device_release, 4375 KVM_COMPAT(kvm_device_ioctl), 4376 .mmap = kvm_device_mmap, 4377 }; 4378 4379 struct kvm_device *kvm_device_from_filp(struct file *filp) 4380 { 4381 if (filp->f_op != &kvm_device_fops) 4382 return NULL; 4383 4384 return filp->private_data; 4385 } 4386 4387 static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { 4388 #ifdef CONFIG_KVM_MPIC 4389 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, 4390 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, 4391 #endif 4392 }; 4393 4394 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type) 4395 { 4396 if (type >= ARRAY_SIZE(kvm_device_ops_table)) 4397 return -ENOSPC; 4398 4399 if (kvm_device_ops_table[type] != NULL) 4400 return -EEXIST; 4401 4402 kvm_device_ops_table[type] = ops; 4403 return 0; 4404 } 4405 4406 void kvm_unregister_device_ops(u32 type) 4407 { 4408 if (kvm_device_ops_table[type] != NULL) 4409 kvm_device_ops_table[type] = NULL; 4410 } 4411 4412 static int kvm_ioctl_create_device(struct kvm *kvm, 4413 struct kvm_create_device *cd) 4414 { 4415 const struct kvm_device_ops *ops; 4416 struct kvm_device *dev; 4417 bool test = cd->flags & KVM_CREATE_DEVICE_TEST; 4418 int type; 4419 int ret; 4420 4421 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) 4422 return -ENODEV; 4423 4424 type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table)); 4425 ops = kvm_device_ops_table[type]; 4426 if (ops == NULL) 4427 return -ENODEV; 4428 4429 if (test) 4430 return 0; 4431 4432 dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT); 4433 if (!dev) 4434 return -ENOMEM; 4435 4436 dev->ops = ops; 4437 dev->kvm = kvm; 4438 4439 mutex_lock(&kvm->lock); 4440 ret = ops->create(dev, type); 4441 if (ret < 0) { 4442 mutex_unlock(&kvm->lock); 4443 kfree(dev); 4444 return ret; 4445 } 4446 list_add(&dev->vm_node, &kvm->devices); 4447 mutex_unlock(&kvm->lock); 4448 4449 if (ops->init) 4450 ops->init(dev); 4451 4452 kvm_get_kvm(kvm); 4453 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 4454 if (ret < 0) { 4455 kvm_put_kvm_no_destroy(kvm); 4456 mutex_lock(&kvm->lock); 4457 list_del(&dev->vm_node); 4458 if (ops->release) 4459 ops->release(dev); 4460 mutex_unlock(&kvm->lock); 4461 if (ops->destroy) 4462 ops->destroy(dev); 4463 return ret; 4464 } 4465 4466 cd->fd = ret; 4467 return 0; 4468 } 4469 4470 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) 4471 { 4472 switch (arg) { 4473 case KVM_CAP_USER_MEMORY: 4474 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 4475 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 4476 case KVM_CAP_INTERNAL_ERROR_DATA: 4477 #ifdef CONFIG_HAVE_KVM_MSI 4478 case KVM_CAP_SIGNAL_MSI: 4479 #endif 4480 #ifdef CONFIG_HAVE_KVM_IRQFD 4481 case KVM_CAP_IRQFD: 4482 #endif 4483 case KVM_CAP_IOEVENTFD_ANY_LENGTH: 4484 case KVM_CAP_CHECK_EXTENSION_VM: 4485 case KVM_CAP_ENABLE_CAP_VM: 4486 case KVM_CAP_HALT_POLL: 4487 return 1; 4488 #ifdef CONFIG_KVM_MMIO 4489 case KVM_CAP_COALESCED_MMIO: 4490 return KVM_COALESCED_MMIO_PAGE_OFFSET; 4491 case KVM_CAP_COALESCED_PIO: 4492 return 1; 4493 #endif 4494 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4495 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: 4496 return KVM_DIRTY_LOG_MANUAL_CAPS; 4497 #endif 4498 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 4499 case KVM_CAP_IRQ_ROUTING: 4500 return KVM_MAX_IRQ_ROUTES; 4501 #endif 4502 #if KVM_ADDRESS_SPACE_NUM > 1 4503 case KVM_CAP_MULTI_ADDRESS_SPACE: 4504 return KVM_ADDRESS_SPACE_NUM; 4505 #endif 4506 case KVM_CAP_NR_MEMSLOTS: 4507 return KVM_USER_MEM_SLOTS; 4508 case KVM_CAP_DIRTY_LOG_RING: 4509 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_TSO 4510 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn); 4511 #else 4512 return 0; 4513 #endif 4514 case KVM_CAP_DIRTY_LOG_RING_ACQ_REL: 4515 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL 4516 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn); 4517 #else 4518 return 0; 4519 #endif 4520 #ifdef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP 4521 case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP: 4522 #endif 4523 case KVM_CAP_BINARY_STATS_FD: 4524 case KVM_CAP_SYSTEM_EVENT_DATA: 4525 return 1; 4526 default: 4527 break; 4528 } 4529 return kvm_vm_ioctl_check_extension(kvm, arg); 4530 } 4531 4532 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size) 4533 { 4534 int r; 4535 4536 if (!KVM_DIRTY_LOG_PAGE_OFFSET) 4537 return -EINVAL; 4538 4539 /* the size should be power of 2 */ 4540 if (!size || (size & (size - 1))) 4541 return -EINVAL; 4542 4543 /* Should be bigger to keep the reserved entries, or a page */ 4544 if (size < kvm_dirty_ring_get_rsvd_entries() * 4545 sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE) 4546 return -EINVAL; 4547 4548 if (size > KVM_DIRTY_RING_MAX_ENTRIES * 4549 sizeof(struct kvm_dirty_gfn)) 4550 return -E2BIG; 4551 4552 /* We only allow it to set once */ 4553 if (kvm->dirty_ring_size) 4554 return -EINVAL; 4555 4556 mutex_lock(&kvm->lock); 4557 4558 if (kvm->created_vcpus) { 4559 /* We don't allow to change this value after vcpu created */ 4560 r = -EINVAL; 4561 } else { 4562 kvm->dirty_ring_size = size; 4563 r = 0; 4564 } 4565 4566 mutex_unlock(&kvm->lock); 4567 return r; 4568 } 4569 4570 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm) 4571 { 4572 unsigned long i; 4573 struct kvm_vcpu *vcpu; 4574 int cleared = 0; 4575 4576 if (!kvm->dirty_ring_size) 4577 return -EINVAL; 4578 4579 mutex_lock(&kvm->slots_lock); 4580 4581 kvm_for_each_vcpu(i, vcpu, kvm) 4582 cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring); 4583 4584 mutex_unlock(&kvm->slots_lock); 4585 4586 if (cleared) 4587 kvm_flush_remote_tlbs(kvm); 4588 4589 return cleared; 4590 } 4591 4592 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm, 4593 struct kvm_enable_cap *cap) 4594 { 4595 return -EINVAL; 4596 } 4597 4598 static bool kvm_are_all_memslots_empty(struct kvm *kvm) 4599 { 4600 int i; 4601 4602 lockdep_assert_held(&kvm->slots_lock); 4603 4604 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 4605 if (!kvm_memslots_empty(__kvm_memslots(kvm, i))) 4606 return false; 4607 } 4608 4609 return true; 4610 } 4611 4612 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, 4613 struct kvm_enable_cap *cap) 4614 { 4615 switch (cap->cap) { 4616 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4617 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: { 4618 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE; 4619 4620 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE) 4621 allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS; 4622 4623 if (cap->flags || (cap->args[0] & ~allowed_options)) 4624 return -EINVAL; 4625 kvm->manual_dirty_log_protect = cap->args[0]; 4626 return 0; 4627 } 4628 #endif 4629 case KVM_CAP_HALT_POLL: { 4630 if (cap->flags || cap->args[0] != (unsigned int)cap->args[0]) 4631 return -EINVAL; 4632 4633 kvm->max_halt_poll_ns = cap->args[0]; 4634 4635 /* 4636 * Ensure kvm->override_halt_poll_ns does not become visible 4637 * before kvm->max_halt_poll_ns. 4638 * 4639 * Pairs with the smp_rmb() in kvm_vcpu_max_halt_poll_ns(). 4640 */ 4641 smp_wmb(); 4642 kvm->override_halt_poll_ns = true; 4643 4644 return 0; 4645 } 4646 case KVM_CAP_DIRTY_LOG_RING: 4647 case KVM_CAP_DIRTY_LOG_RING_ACQ_REL: 4648 if (!kvm_vm_ioctl_check_extension_generic(kvm, cap->cap)) 4649 return -EINVAL; 4650 4651 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]); 4652 case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP: { 4653 int r = -EINVAL; 4654 4655 if (!IS_ENABLED(CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP) || 4656 !kvm->dirty_ring_size || cap->flags) 4657 return r; 4658 4659 mutex_lock(&kvm->slots_lock); 4660 4661 /* 4662 * For simplicity, allow enabling ring+bitmap if and only if 4663 * there are no memslots, e.g. to ensure all memslots allocate 4664 * a bitmap after the capability is enabled. 4665 */ 4666 if (kvm_are_all_memslots_empty(kvm)) { 4667 kvm->dirty_ring_with_bitmap = true; 4668 r = 0; 4669 } 4670 4671 mutex_unlock(&kvm->slots_lock); 4672 4673 return r; 4674 } 4675 default: 4676 return kvm_vm_ioctl_enable_cap(kvm, cap); 4677 } 4678 } 4679 4680 static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer, 4681 size_t size, loff_t *offset) 4682 { 4683 struct kvm *kvm = file->private_data; 4684 4685 return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header, 4686 &kvm_vm_stats_desc[0], &kvm->stat, 4687 sizeof(kvm->stat), user_buffer, size, offset); 4688 } 4689 4690 static const struct file_operations kvm_vm_stats_fops = { 4691 .read = kvm_vm_stats_read, 4692 .llseek = noop_llseek, 4693 }; 4694 4695 static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm) 4696 { 4697 int fd; 4698 struct file *file; 4699 4700 fd = get_unused_fd_flags(O_CLOEXEC); 4701 if (fd < 0) 4702 return fd; 4703 4704 file = anon_inode_getfile("kvm-vm-stats", 4705 &kvm_vm_stats_fops, kvm, O_RDONLY); 4706 if (IS_ERR(file)) { 4707 put_unused_fd(fd); 4708 return PTR_ERR(file); 4709 } 4710 file->f_mode |= FMODE_PREAD; 4711 fd_install(fd, file); 4712 4713 return fd; 4714 } 4715 4716 static long kvm_vm_ioctl(struct file *filp, 4717 unsigned int ioctl, unsigned long arg) 4718 { 4719 struct kvm *kvm = filp->private_data; 4720 void __user *argp = (void __user *)arg; 4721 int r; 4722 4723 if (kvm->mm != current->mm || kvm->vm_dead) 4724 return -EIO; 4725 switch (ioctl) { 4726 case KVM_CREATE_VCPU: 4727 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 4728 break; 4729 case KVM_ENABLE_CAP: { 4730 struct kvm_enable_cap cap; 4731 4732 r = -EFAULT; 4733 if (copy_from_user(&cap, argp, sizeof(cap))) 4734 goto out; 4735 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap); 4736 break; 4737 } 4738 case KVM_SET_USER_MEMORY_REGION: { 4739 struct kvm_userspace_memory_region kvm_userspace_mem; 4740 4741 r = -EFAULT; 4742 if (copy_from_user(&kvm_userspace_mem, argp, 4743 sizeof(kvm_userspace_mem))) 4744 goto out; 4745 4746 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); 4747 break; 4748 } 4749 case KVM_GET_DIRTY_LOG: { 4750 struct kvm_dirty_log log; 4751 4752 r = -EFAULT; 4753 if (copy_from_user(&log, argp, sizeof(log))) 4754 goto out; 4755 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 4756 break; 4757 } 4758 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4759 case KVM_CLEAR_DIRTY_LOG: { 4760 struct kvm_clear_dirty_log log; 4761 4762 r = -EFAULT; 4763 if (copy_from_user(&log, argp, sizeof(log))) 4764 goto out; 4765 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); 4766 break; 4767 } 4768 #endif 4769 #ifdef CONFIG_KVM_MMIO 4770 case KVM_REGISTER_COALESCED_MMIO: { 4771 struct kvm_coalesced_mmio_zone zone; 4772 4773 r = -EFAULT; 4774 if (copy_from_user(&zone, argp, sizeof(zone))) 4775 goto out; 4776 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 4777 break; 4778 } 4779 case KVM_UNREGISTER_COALESCED_MMIO: { 4780 struct kvm_coalesced_mmio_zone zone; 4781 4782 r = -EFAULT; 4783 if (copy_from_user(&zone, argp, sizeof(zone))) 4784 goto out; 4785 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 4786 break; 4787 } 4788 #endif 4789 case KVM_IRQFD: { 4790 struct kvm_irqfd data; 4791 4792 r = -EFAULT; 4793 if (copy_from_user(&data, argp, sizeof(data))) 4794 goto out; 4795 r = kvm_irqfd(kvm, &data); 4796 break; 4797 } 4798 case KVM_IOEVENTFD: { 4799 struct kvm_ioeventfd data; 4800 4801 r = -EFAULT; 4802 if (copy_from_user(&data, argp, sizeof(data))) 4803 goto out; 4804 r = kvm_ioeventfd(kvm, &data); 4805 break; 4806 } 4807 #ifdef CONFIG_HAVE_KVM_MSI 4808 case KVM_SIGNAL_MSI: { 4809 struct kvm_msi msi; 4810 4811 r = -EFAULT; 4812 if (copy_from_user(&msi, argp, sizeof(msi))) 4813 goto out; 4814 r = kvm_send_userspace_msi(kvm, &msi); 4815 break; 4816 } 4817 #endif 4818 #ifdef __KVM_HAVE_IRQ_LINE 4819 case KVM_IRQ_LINE_STATUS: 4820 case KVM_IRQ_LINE: { 4821 struct kvm_irq_level irq_event; 4822 4823 r = -EFAULT; 4824 if (copy_from_user(&irq_event, argp, sizeof(irq_event))) 4825 goto out; 4826 4827 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, 4828 ioctl == KVM_IRQ_LINE_STATUS); 4829 if (r) 4830 goto out; 4831 4832 r = -EFAULT; 4833 if (ioctl == KVM_IRQ_LINE_STATUS) { 4834 if (copy_to_user(argp, &irq_event, sizeof(irq_event))) 4835 goto out; 4836 } 4837 4838 r = 0; 4839 break; 4840 } 4841 #endif 4842 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 4843 case KVM_SET_GSI_ROUTING: { 4844 struct kvm_irq_routing routing; 4845 struct kvm_irq_routing __user *urouting; 4846 struct kvm_irq_routing_entry *entries = NULL; 4847 4848 r = -EFAULT; 4849 if (copy_from_user(&routing, argp, sizeof(routing))) 4850 goto out; 4851 r = -EINVAL; 4852 if (!kvm_arch_can_set_irq_routing(kvm)) 4853 goto out; 4854 if (routing.nr > KVM_MAX_IRQ_ROUTES) 4855 goto out; 4856 if (routing.flags) 4857 goto out; 4858 if (routing.nr) { 4859 urouting = argp; 4860 entries = vmemdup_user(urouting->entries, 4861 array_size(sizeof(*entries), 4862 routing.nr)); 4863 if (IS_ERR(entries)) { 4864 r = PTR_ERR(entries); 4865 goto out; 4866 } 4867 } 4868 r = kvm_set_irq_routing(kvm, entries, routing.nr, 4869 routing.flags); 4870 kvfree(entries); 4871 break; 4872 } 4873 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ 4874 case KVM_CREATE_DEVICE: { 4875 struct kvm_create_device cd; 4876 4877 r = -EFAULT; 4878 if (copy_from_user(&cd, argp, sizeof(cd))) 4879 goto out; 4880 4881 r = kvm_ioctl_create_device(kvm, &cd); 4882 if (r) 4883 goto out; 4884 4885 r = -EFAULT; 4886 if (copy_to_user(argp, &cd, sizeof(cd))) 4887 goto out; 4888 4889 r = 0; 4890 break; 4891 } 4892 case KVM_CHECK_EXTENSION: 4893 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); 4894 break; 4895 case KVM_RESET_DIRTY_RINGS: 4896 r = kvm_vm_ioctl_reset_dirty_pages(kvm); 4897 break; 4898 case KVM_GET_STATS_FD: 4899 r = kvm_vm_ioctl_get_stats_fd(kvm); 4900 break; 4901 default: 4902 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 4903 } 4904 out: 4905 return r; 4906 } 4907 4908 #ifdef CONFIG_KVM_COMPAT 4909 struct compat_kvm_dirty_log { 4910 __u32 slot; 4911 __u32 padding1; 4912 union { 4913 compat_uptr_t dirty_bitmap; /* one bit per page */ 4914 __u64 padding2; 4915 }; 4916 }; 4917 4918 struct compat_kvm_clear_dirty_log { 4919 __u32 slot; 4920 __u32 num_pages; 4921 __u64 first_page; 4922 union { 4923 compat_uptr_t dirty_bitmap; /* one bit per page */ 4924 __u64 padding2; 4925 }; 4926 }; 4927 4928 long __weak kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, 4929 unsigned long arg) 4930 { 4931 return -ENOTTY; 4932 } 4933 4934 static long kvm_vm_compat_ioctl(struct file *filp, 4935 unsigned int ioctl, unsigned long arg) 4936 { 4937 struct kvm *kvm = filp->private_data; 4938 int r; 4939 4940 if (kvm->mm != current->mm || kvm->vm_dead) 4941 return -EIO; 4942 4943 r = kvm_arch_vm_compat_ioctl(filp, ioctl, arg); 4944 if (r != -ENOTTY) 4945 return r; 4946 4947 switch (ioctl) { 4948 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4949 case KVM_CLEAR_DIRTY_LOG: { 4950 struct compat_kvm_clear_dirty_log compat_log; 4951 struct kvm_clear_dirty_log log; 4952 4953 if (copy_from_user(&compat_log, (void __user *)arg, 4954 sizeof(compat_log))) 4955 return -EFAULT; 4956 log.slot = compat_log.slot; 4957 log.num_pages = compat_log.num_pages; 4958 log.first_page = compat_log.first_page; 4959 log.padding2 = compat_log.padding2; 4960 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 4961 4962 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); 4963 break; 4964 } 4965 #endif 4966 case KVM_GET_DIRTY_LOG: { 4967 struct compat_kvm_dirty_log compat_log; 4968 struct kvm_dirty_log log; 4969 4970 if (copy_from_user(&compat_log, (void __user *)arg, 4971 sizeof(compat_log))) 4972 return -EFAULT; 4973 log.slot = compat_log.slot; 4974 log.padding1 = compat_log.padding1; 4975 log.padding2 = compat_log.padding2; 4976 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 4977 4978 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 4979 break; 4980 } 4981 default: 4982 r = kvm_vm_ioctl(filp, ioctl, arg); 4983 } 4984 return r; 4985 } 4986 #endif 4987 4988 static const struct file_operations kvm_vm_fops = { 4989 .release = kvm_vm_release, 4990 .unlocked_ioctl = kvm_vm_ioctl, 4991 .llseek = noop_llseek, 4992 KVM_COMPAT(kvm_vm_compat_ioctl), 4993 }; 4994 4995 bool file_is_kvm(struct file *file) 4996 { 4997 return file && file->f_op == &kvm_vm_fops; 4998 } 4999 EXPORT_SYMBOL_GPL(file_is_kvm); 5000 5001 static int kvm_dev_ioctl_create_vm(unsigned long type) 5002 { 5003 char fdname[ITOA_MAX_LEN + 1]; 5004 int r, fd; 5005 struct kvm *kvm; 5006 struct file *file; 5007 5008 fd = get_unused_fd_flags(O_CLOEXEC); 5009 if (fd < 0) 5010 return fd; 5011 5012 snprintf(fdname, sizeof(fdname), "%d", fd); 5013 5014 kvm = kvm_create_vm(type, fdname); 5015 if (IS_ERR(kvm)) { 5016 r = PTR_ERR(kvm); 5017 goto put_fd; 5018 } 5019 5020 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 5021 if (IS_ERR(file)) { 5022 r = PTR_ERR(file); 5023 goto put_kvm; 5024 } 5025 5026 /* 5027 * Don't call kvm_put_kvm anymore at this point; file->f_op is 5028 * already set, with ->release() being kvm_vm_release(). In error 5029 * cases it will be called by the final fput(file) and will take 5030 * care of doing kvm_put_kvm(kvm). 5031 */ 5032 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm); 5033 5034 fd_install(fd, file); 5035 return fd; 5036 5037 put_kvm: 5038 kvm_put_kvm(kvm); 5039 put_fd: 5040 put_unused_fd(fd); 5041 return r; 5042 } 5043 5044 static long kvm_dev_ioctl(struct file *filp, 5045 unsigned int ioctl, unsigned long arg) 5046 { 5047 long r = -EINVAL; 5048 5049 switch (ioctl) { 5050 case KVM_GET_API_VERSION: 5051 if (arg) 5052 goto out; 5053 r = KVM_API_VERSION; 5054 break; 5055 case KVM_CREATE_VM: 5056 r = kvm_dev_ioctl_create_vm(arg); 5057 break; 5058 case KVM_CHECK_EXTENSION: 5059 r = kvm_vm_ioctl_check_extension_generic(NULL, arg); 5060 break; 5061 case KVM_GET_VCPU_MMAP_SIZE: 5062 if (arg) 5063 goto out; 5064 r = PAGE_SIZE; /* struct kvm_run */ 5065 #ifdef CONFIG_X86 5066 r += PAGE_SIZE; /* pio data page */ 5067 #endif 5068 #ifdef CONFIG_KVM_MMIO 5069 r += PAGE_SIZE; /* coalesced mmio ring page */ 5070 #endif 5071 break; 5072 case KVM_TRACE_ENABLE: 5073 case KVM_TRACE_PAUSE: 5074 case KVM_TRACE_DISABLE: 5075 r = -EOPNOTSUPP; 5076 break; 5077 default: 5078 return kvm_arch_dev_ioctl(filp, ioctl, arg); 5079 } 5080 out: 5081 return r; 5082 } 5083 5084 static struct file_operations kvm_chardev_ops = { 5085 .unlocked_ioctl = kvm_dev_ioctl, 5086 .llseek = noop_llseek, 5087 KVM_COMPAT(kvm_dev_ioctl), 5088 }; 5089 5090 static struct miscdevice kvm_dev = { 5091 KVM_MINOR, 5092 "kvm", 5093 &kvm_chardev_ops, 5094 }; 5095 5096 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING 5097 __visible bool kvm_rebooting; 5098 EXPORT_SYMBOL_GPL(kvm_rebooting); 5099 5100 static DEFINE_PER_CPU(bool, hardware_enabled); 5101 static int kvm_usage_count; 5102 5103 static int __hardware_enable_nolock(void) 5104 { 5105 if (__this_cpu_read(hardware_enabled)) 5106 return 0; 5107 5108 if (kvm_arch_hardware_enable()) { 5109 pr_info("kvm: enabling virtualization on CPU%d failed\n", 5110 raw_smp_processor_id()); 5111 return -EIO; 5112 } 5113 5114 __this_cpu_write(hardware_enabled, true); 5115 return 0; 5116 } 5117 5118 static void hardware_enable_nolock(void *failed) 5119 { 5120 if (__hardware_enable_nolock()) 5121 atomic_inc(failed); 5122 } 5123 5124 static int kvm_online_cpu(unsigned int cpu) 5125 { 5126 int ret = 0; 5127 5128 /* 5129 * Abort the CPU online process if hardware virtualization cannot 5130 * be enabled. Otherwise running VMs would encounter unrecoverable 5131 * errors when scheduled to this CPU. 5132 */ 5133 mutex_lock(&kvm_lock); 5134 if (kvm_usage_count) 5135 ret = __hardware_enable_nolock(); 5136 mutex_unlock(&kvm_lock); 5137 return ret; 5138 } 5139 5140 static void hardware_disable_nolock(void *junk) 5141 { 5142 /* 5143 * Note, hardware_disable_all_nolock() tells all online CPUs to disable 5144 * hardware, not just CPUs that successfully enabled hardware! 5145 */ 5146 if (!__this_cpu_read(hardware_enabled)) 5147 return; 5148 5149 kvm_arch_hardware_disable(); 5150 5151 __this_cpu_write(hardware_enabled, false); 5152 } 5153 5154 static int kvm_offline_cpu(unsigned int cpu) 5155 { 5156 mutex_lock(&kvm_lock); 5157 if (kvm_usage_count) 5158 hardware_disable_nolock(NULL); 5159 mutex_unlock(&kvm_lock); 5160 return 0; 5161 } 5162 5163 static void hardware_disable_all_nolock(void) 5164 { 5165 BUG_ON(!kvm_usage_count); 5166 5167 kvm_usage_count--; 5168 if (!kvm_usage_count) 5169 on_each_cpu(hardware_disable_nolock, NULL, 1); 5170 } 5171 5172 static void hardware_disable_all(void) 5173 { 5174 cpus_read_lock(); 5175 mutex_lock(&kvm_lock); 5176 hardware_disable_all_nolock(); 5177 mutex_unlock(&kvm_lock); 5178 cpus_read_unlock(); 5179 } 5180 5181 static int hardware_enable_all(void) 5182 { 5183 atomic_t failed = ATOMIC_INIT(0); 5184 int r = 0; 5185 5186 /* 5187 * When onlining a CPU, cpu_online_mask is set before kvm_online_cpu() 5188 * is called, and so on_each_cpu() between them includes the CPU that 5189 * is being onlined. As a result, hardware_enable_nolock() may get 5190 * invoked before kvm_online_cpu(), which also enables hardware if the 5191 * usage count is non-zero. Disable CPU hotplug to avoid attempting to 5192 * enable hardware multiple times. 5193 */ 5194 cpus_read_lock(); 5195 mutex_lock(&kvm_lock); 5196 5197 kvm_usage_count++; 5198 if (kvm_usage_count == 1) { 5199 on_each_cpu(hardware_enable_nolock, &failed, 1); 5200 5201 if (atomic_read(&failed)) { 5202 hardware_disable_all_nolock(); 5203 r = -EBUSY; 5204 } 5205 } 5206 5207 mutex_unlock(&kvm_lock); 5208 cpus_read_unlock(); 5209 5210 return r; 5211 } 5212 5213 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 5214 void *v) 5215 { 5216 /* 5217 * Some (well, at least mine) BIOSes hang on reboot if 5218 * in vmx root mode. 5219 * 5220 * And Intel TXT required VMX off for all cpu when system shutdown. 5221 */ 5222 pr_info("kvm: exiting hardware virtualization\n"); 5223 kvm_rebooting = true; 5224 on_each_cpu(hardware_disable_nolock, NULL, 1); 5225 return NOTIFY_OK; 5226 } 5227 5228 static struct notifier_block kvm_reboot_notifier = { 5229 .notifier_call = kvm_reboot, 5230 .priority = 0, 5231 }; 5232 5233 static int kvm_suspend(void) 5234 { 5235 /* 5236 * Secondary CPUs and CPU hotplug are disabled across the suspend/resume 5237 * callbacks, i.e. no need to acquire kvm_lock to ensure the usage count 5238 * is stable. Assert that kvm_lock is not held to ensure the system 5239 * isn't suspended while KVM is enabling hardware. Hardware enabling 5240 * can be preempted, but the task cannot be frozen until it has dropped 5241 * all locks (userspace tasks are frozen via a fake signal). 5242 */ 5243 lockdep_assert_not_held(&kvm_lock); 5244 lockdep_assert_irqs_disabled(); 5245 5246 if (kvm_usage_count) 5247 hardware_disable_nolock(NULL); 5248 return 0; 5249 } 5250 5251 static void kvm_resume(void) 5252 { 5253 lockdep_assert_not_held(&kvm_lock); 5254 lockdep_assert_irqs_disabled(); 5255 5256 if (kvm_usage_count) 5257 WARN_ON_ONCE(__hardware_enable_nolock()); 5258 } 5259 5260 static struct syscore_ops kvm_syscore_ops = { 5261 .suspend = kvm_suspend, 5262 .resume = kvm_resume, 5263 }; 5264 #else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */ 5265 static int hardware_enable_all(void) 5266 { 5267 return 0; 5268 } 5269 5270 static void hardware_disable_all(void) 5271 { 5272 5273 } 5274 #endif /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */ 5275 5276 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 5277 { 5278 int i; 5279 5280 for (i = 0; i < bus->dev_count; i++) { 5281 struct kvm_io_device *pos = bus->range[i].dev; 5282 5283 kvm_iodevice_destructor(pos); 5284 } 5285 kfree(bus); 5286 } 5287 5288 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, 5289 const struct kvm_io_range *r2) 5290 { 5291 gpa_t addr1 = r1->addr; 5292 gpa_t addr2 = r2->addr; 5293 5294 if (addr1 < addr2) 5295 return -1; 5296 5297 /* If r2->len == 0, match the exact address. If r2->len != 0, 5298 * accept any overlapping write. Any order is acceptable for 5299 * overlapping ranges, because kvm_io_bus_get_first_dev ensures 5300 * we process all of them. 5301 */ 5302 if (r2->len) { 5303 addr1 += r1->len; 5304 addr2 += r2->len; 5305 } 5306 5307 if (addr1 > addr2) 5308 return 1; 5309 5310 return 0; 5311 } 5312 5313 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) 5314 { 5315 return kvm_io_bus_cmp(p1, p2); 5316 } 5317 5318 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, 5319 gpa_t addr, int len) 5320 { 5321 struct kvm_io_range *range, key; 5322 int off; 5323 5324 key = (struct kvm_io_range) { 5325 .addr = addr, 5326 .len = len, 5327 }; 5328 5329 range = bsearch(&key, bus->range, bus->dev_count, 5330 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); 5331 if (range == NULL) 5332 return -ENOENT; 5333 5334 off = range - bus->range; 5335 5336 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) 5337 off--; 5338 5339 return off; 5340 } 5341 5342 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 5343 struct kvm_io_range *range, const void *val) 5344 { 5345 int idx; 5346 5347 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 5348 if (idx < 0) 5349 return -EOPNOTSUPP; 5350 5351 while (idx < bus->dev_count && 5352 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 5353 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, 5354 range->len, val)) 5355 return idx; 5356 idx++; 5357 } 5358 5359 return -EOPNOTSUPP; 5360 } 5361 5362 /* kvm_io_bus_write - called under kvm->slots_lock */ 5363 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 5364 int len, const void *val) 5365 { 5366 struct kvm_io_bus *bus; 5367 struct kvm_io_range range; 5368 int r; 5369 5370 range = (struct kvm_io_range) { 5371 .addr = addr, 5372 .len = len, 5373 }; 5374 5375 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5376 if (!bus) 5377 return -ENOMEM; 5378 r = __kvm_io_bus_write(vcpu, bus, &range, val); 5379 return r < 0 ? r : 0; 5380 } 5381 EXPORT_SYMBOL_GPL(kvm_io_bus_write); 5382 5383 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ 5384 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 5385 gpa_t addr, int len, const void *val, long cookie) 5386 { 5387 struct kvm_io_bus *bus; 5388 struct kvm_io_range range; 5389 5390 range = (struct kvm_io_range) { 5391 .addr = addr, 5392 .len = len, 5393 }; 5394 5395 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5396 if (!bus) 5397 return -ENOMEM; 5398 5399 /* First try the device referenced by cookie. */ 5400 if ((cookie >= 0) && (cookie < bus->dev_count) && 5401 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) 5402 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, 5403 val)) 5404 return cookie; 5405 5406 /* 5407 * cookie contained garbage; fall back to search and return the 5408 * correct cookie value. 5409 */ 5410 return __kvm_io_bus_write(vcpu, bus, &range, val); 5411 } 5412 5413 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 5414 struct kvm_io_range *range, void *val) 5415 { 5416 int idx; 5417 5418 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 5419 if (idx < 0) 5420 return -EOPNOTSUPP; 5421 5422 while (idx < bus->dev_count && 5423 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 5424 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, 5425 range->len, val)) 5426 return idx; 5427 idx++; 5428 } 5429 5430 return -EOPNOTSUPP; 5431 } 5432 5433 /* kvm_io_bus_read - called under kvm->slots_lock */ 5434 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 5435 int len, void *val) 5436 { 5437 struct kvm_io_bus *bus; 5438 struct kvm_io_range range; 5439 int r; 5440 5441 range = (struct kvm_io_range) { 5442 .addr = addr, 5443 .len = len, 5444 }; 5445 5446 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5447 if (!bus) 5448 return -ENOMEM; 5449 r = __kvm_io_bus_read(vcpu, bus, &range, val); 5450 return r < 0 ? r : 0; 5451 } 5452 5453 /* Caller must hold slots_lock. */ 5454 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 5455 int len, struct kvm_io_device *dev) 5456 { 5457 int i; 5458 struct kvm_io_bus *new_bus, *bus; 5459 struct kvm_io_range range; 5460 5461 bus = kvm_get_bus(kvm, bus_idx); 5462 if (!bus) 5463 return -ENOMEM; 5464 5465 /* exclude ioeventfd which is limited by maximum fd */ 5466 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 5467 return -ENOSPC; 5468 5469 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1), 5470 GFP_KERNEL_ACCOUNT); 5471 if (!new_bus) 5472 return -ENOMEM; 5473 5474 range = (struct kvm_io_range) { 5475 .addr = addr, 5476 .len = len, 5477 .dev = dev, 5478 }; 5479 5480 for (i = 0; i < bus->dev_count; i++) 5481 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0) 5482 break; 5483 5484 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 5485 new_bus->dev_count++; 5486 new_bus->range[i] = range; 5487 memcpy(new_bus->range + i + 1, bus->range + i, 5488 (bus->dev_count - i) * sizeof(struct kvm_io_range)); 5489 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 5490 synchronize_srcu_expedited(&kvm->srcu); 5491 kfree(bus); 5492 5493 return 0; 5494 } 5495 5496 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 5497 struct kvm_io_device *dev) 5498 { 5499 int i, j; 5500 struct kvm_io_bus *new_bus, *bus; 5501 5502 lockdep_assert_held(&kvm->slots_lock); 5503 5504 bus = kvm_get_bus(kvm, bus_idx); 5505 if (!bus) 5506 return 0; 5507 5508 for (i = 0; i < bus->dev_count; i++) { 5509 if (bus->range[i].dev == dev) { 5510 break; 5511 } 5512 } 5513 5514 if (i == bus->dev_count) 5515 return 0; 5516 5517 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1), 5518 GFP_KERNEL_ACCOUNT); 5519 if (new_bus) { 5520 memcpy(new_bus, bus, struct_size(bus, range, i)); 5521 new_bus->dev_count--; 5522 memcpy(new_bus->range + i, bus->range + i + 1, 5523 flex_array_size(new_bus, range, new_bus->dev_count - i)); 5524 } 5525 5526 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 5527 synchronize_srcu_expedited(&kvm->srcu); 5528 5529 /* Destroy the old bus _after_ installing the (null) bus. */ 5530 if (!new_bus) { 5531 pr_err("kvm: failed to shrink bus, removing it completely\n"); 5532 for (j = 0; j < bus->dev_count; j++) { 5533 if (j == i) 5534 continue; 5535 kvm_iodevice_destructor(bus->range[j].dev); 5536 } 5537 } 5538 5539 kfree(bus); 5540 return new_bus ? 0 : -ENOMEM; 5541 } 5542 5543 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 5544 gpa_t addr) 5545 { 5546 struct kvm_io_bus *bus; 5547 int dev_idx, srcu_idx; 5548 struct kvm_io_device *iodev = NULL; 5549 5550 srcu_idx = srcu_read_lock(&kvm->srcu); 5551 5552 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 5553 if (!bus) 5554 goto out_unlock; 5555 5556 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); 5557 if (dev_idx < 0) 5558 goto out_unlock; 5559 5560 iodev = bus->range[dev_idx].dev; 5561 5562 out_unlock: 5563 srcu_read_unlock(&kvm->srcu, srcu_idx); 5564 5565 return iodev; 5566 } 5567 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev); 5568 5569 static int kvm_debugfs_open(struct inode *inode, struct file *file, 5570 int (*get)(void *, u64 *), int (*set)(void *, u64), 5571 const char *fmt) 5572 { 5573 int ret; 5574 struct kvm_stat_data *stat_data = (struct kvm_stat_data *) 5575 inode->i_private; 5576 5577 /* 5578 * The debugfs files are a reference to the kvm struct which 5579 * is still valid when kvm_destroy_vm is called. kvm_get_kvm_safe 5580 * avoids the race between open and the removal of the debugfs directory. 5581 */ 5582 if (!kvm_get_kvm_safe(stat_data->kvm)) 5583 return -ENOENT; 5584 5585 ret = simple_attr_open(inode, file, get, 5586 kvm_stats_debugfs_mode(stat_data->desc) & 0222 5587 ? set : NULL, fmt); 5588 if (ret) 5589 kvm_put_kvm(stat_data->kvm); 5590 5591 return ret; 5592 } 5593 5594 static int kvm_debugfs_release(struct inode *inode, struct file *file) 5595 { 5596 struct kvm_stat_data *stat_data = (struct kvm_stat_data *) 5597 inode->i_private; 5598 5599 simple_attr_release(inode, file); 5600 kvm_put_kvm(stat_data->kvm); 5601 5602 return 0; 5603 } 5604 5605 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val) 5606 { 5607 *val = *(u64 *)((void *)(&kvm->stat) + offset); 5608 5609 return 0; 5610 } 5611 5612 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset) 5613 { 5614 *(u64 *)((void *)(&kvm->stat) + offset) = 0; 5615 5616 return 0; 5617 } 5618 5619 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) 5620 { 5621 unsigned long i; 5622 struct kvm_vcpu *vcpu; 5623 5624 *val = 0; 5625 5626 kvm_for_each_vcpu(i, vcpu, kvm) 5627 *val += *(u64 *)((void *)(&vcpu->stat) + offset); 5628 5629 return 0; 5630 } 5631 5632 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset) 5633 { 5634 unsigned long i; 5635 struct kvm_vcpu *vcpu; 5636 5637 kvm_for_each_vcpu(i, vcpu, kvm) 5638 *(u64 *)((void *)(&vcpu->stat) + offset) = 0; 5639 5640 return 0; 5641 } 5642 5643 static int kvm_stat_data_get(void *data, u64 *val) 5644 { 5645 int r = -EFAULT; 5646 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 5647 5648 switch (stat_data->kind) { 5649 case KVM_STAT_VM: 5650 r = kvm_get_stat_per_vm(stat_data->kvm, 5651 stat_data->desc->desc.offset, val); 5652 break; 5653 case KVM_STAT_VCPU: 5654 r = kvm_get_stat_per_vcpu(stat_data->kvm, 5655 stat_data->desc->desc.offset, val); 5656 break; 5657 } 5658 5659 return r; 5660 } 5661 5662 static int kvm_stat_data_clear(void *data, u64 val) 5663 { 5664 int r = -EFAULT; 5665 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 5666 5667 if (val) 5668 return -EINVAL; 5669 5670 switch (stat_data->kind) { 5671 case KVM_STAT_VM: 5672 r = kvm_clear_stat_per_vm(stat_data->kvm, 5673 stat_data->desc->desc.offset); 5674 break; 5675 case KVM_STAT_VCPU: 5676 r = kvm_clear_stat_per_vcpu(stat_data->kvm, 5677 stat_data->desc->desc.offset); 5678 break; 5679 } 5680 5681 return r; 5682 } 5683 5684 static int kvm_stat_data_open(struct inode *inode, struct file *file) 5685 { 5686 __simple_attr_check_format("%llu\n", 0ull); 5687 return kvm_debugfs_open(inode, file, kvm_stat_data_get, 5688 kvm_stat_data_clear, "%llu\n"); 5689 } 5690 5691 static const struct file_operations stat_fops_per_vm = { 5692 .owner = THIS_MODULE, 5693 .open = kvm_stat_data_open, 5694 .release = kvm_debugfs_release, 5695 .read = simple_attr_read, 5696 .write = simple_attr_write, 5697 .llseek = no_llseek, 5698 }; 5699 5700 static int vm_stat_get(void *_offset, u64 *val) 5701 { 5702 unsigned offset = (long)_offset; 5703 struct kvm *kvm; 5704 u64 tmp_val; 5705 5706 *val = 0; 5707 mutex_lock(&kvm_lock); 5708 list_for_each_entry(kvm, &vm_list, vm_list) { 5709 kvm_get_stat_per_vm(kvm, offset, &tmp_val); 5710 *val += tmp_val; 5711 } 5712 mutex_unlock(&kvm_lock); 5713 return 0; 5714 } 5715 5716 static int vm_stat_clear(void *_offset, u64 val) 5717 { 5718 unsigned offset = (long)_offset; 5719 struct kvm *kvm; 5720 5721 if (val) 5722 return -EINVAL; 5723 5724 mutex_lock(&kvm_lock); 5725 list_for_each_entry(kvm, &vm_list, vm_list) { 5726 kvm_clear_stat_per_vm(kvm, offset); 5727 } 5728 mutex_unlock(&kvm_lock); 5729 5730 return 0; 5731 } 5732 5733 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n"); 5734 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n"); 5735 5736 static int vcpu_stat_get(void *_offset, u64 *val) 5737 { 5738 unsigned offset = (long)_offset; 5739 struct kvm *kvm; 5740 u64 tmp_val; 5741 5742 *val = 0; 5743 mutex_lock(&kvm_lock); 5744 list_for_each_entry(kvm, &vm_list, vm_list) { 5745 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val); 5746 *val += tmp_val; 5747 } 5748 mutex_unlock(&kvm_lock); 5749 return 0; 5750 } 5751 5752 static int vcpu_stat_clear(void *_offset, u64 val) 5753 { 5754 unsigned offset = (long)_offset; 5755 struct kvm *kvm; 5756 5757 if (val) 5758 return -EINVAL; 5759 5760 mutex_lock(&kvm_lock); 5761 list_for_each_entry(kvm, &vm_list, vm_list) { 5762 kvm_clear_stat_per_vcpu(kvm, offset); 5763 } 5764 mutex_unlock(&kvm_lock); 5765 5766 return 0; 5767 } 5768 5769 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear, 5770 "%llu\n"); 5771 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n"); 5772 5773 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) 5774 { 5775 struct kobj_uevent_env *env; 5776 unsigned long long created, active; 5777 5778 if (!kvm_dev.this_device || !kvm) 5779 return; 5780 5781 mutex_lock(&kvm_lock); 5782 if (type == KVM_EVENT_CREATE_VM) { 5783 kvm_createvm_count++; 5784 kvm_active_vms++; 5785 } else if (type == KVM_EVENT_DESTROY_VM) { 5786 kvm_active_vms--; 5787 } 5788 created = kvm_createvm_count; 5789 active = kvm_active_vms; 5790 mutex_unlock(&kvm_lock); 5791 5792 env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT); 5793 if (!env) 5794 return; 5795 5796 add_uevent_var(env, "CREATED=%llu", created); 5797 add_uevent_var(env, "COUNT=%llu", active); 5798 5799 if (type == KVM_EVENT_CREATE_VM) { 5800 add_uevent_var(env, "EVENT=create"); 5801 kvm->userspace_pid = task_pid_nr(current); 5802 } else if (type == KVM_EVENT_DESTROY_VM) { 5803 add_uevent_var(env, "EVENT=destroy"); 5804 } 5805 add_uevent_var(env, "PID=%d", kvm->userspace_pid); 5806 5807 if (!IS_ERR(kvm->debugfs_dentry)) { 5808 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT); 5809 5810 if (p) { 5811 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); 5812 if (!IS_ERR(tmp)) 5813 add_uevent_var(env, "STATS_PATH=%s", tmp); 5814 kfree(p); 5815 } 5816 } 5817 /* no need for checks, since we are adding at most only 5 keys */ 5818 env->envp[env->envp_idx++] = NULL; 5819 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp); 5820 kfree(env); 5821 } 5822 5823 static void kvm_init_debug(void) 5824 { 5825 const struct file_operations *fops; 5826 const struct _kvm_stats_desc *pdesc; 5827 int i; 5828 5829 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 5830 5831 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) { 5832 pdesc = &kvm_vm_stats_desc[i]; 5833 if (kvm_stats_debugfs_mode(pdesc) & 0222) 5834 fops = &vm_stat_fops; 5835 else 5836 fops = &vm_stat_readonly_fops; 5837 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 5838 kvm_debugfs_dir, 5839 (void *)(long)pdesc->desc.offset, fops); 5840 } 5841 5842 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { 5843 pdesc = &kvm_vcpu_stats_desc[i]; 5844 if (kvm_stats_debugfs_mode(pdesc) & 0222) 5845 fops = &vcpu_stat_fops; 5846 else 5847 fops = &vcpu_stat_readonly_fops; 5848 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 5849 kvm_debugfs_dir, 5850 (void *)(long)pdesc->desc.offset, fops); 5851 } 5852 } 5853 5854 static inline 5855 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 5856 { 5857 return container_of(pn, struct kvm_vcpu, preempt_notifier); 5858 } 5859 5860 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 5861 { 5862 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 5863 5864 WRITE_ONCE(vcpu->preempted, false); 5865 WRITE_ONCE(vcpu->ready, false); 5866 5867 __this_cpu_write(kvm_running_vcpu, vcpu); 5868 kvm_arch_sched_in(vcpu, cpu); 5869 kvm_arch_vcpu_load(vcpu, cpu); 5870 } 5871 5872 static void kvm_sched_out(struct preempt_notifier *pn, 5873 struct task_struct *next) 5874 { 5875 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 5876 5877 if (current->on_rq) { 5878 WRITE_ONCE(vcpu->preempted, true); 5879 WRITE_ONCE(vcpu->ready, true); 5880 } 5881 kvm_arch_vcpu_put(vcpu); 5882 __this_cpu_write(kvm_running_vcpu, NULL); 5883 } 5884 5885 /** 5886 * kvm_get_running_vcpu - get the vcpu running on the current CPU. 5887 * 5888 * We can disable preemption locally around accessing the per-CPU variable, 5889 * and use the resolved vcpu pointer after enabling preemption again, 5890 * because even if the current thread is migrated to another CPU, reading 5891 * the per-CPU value later will give us the same value as we update the 5892 * per-CPU variable in the preempt notifier handlers. 5893 */ 5894 struct kvm_vcpu *kvm_get_running_vcpu(void) 5895 { 5896 struct kvm_vcpu *vcpu; 5897 5898 preempt_disable(); 5899 vcpu = __this_cpu_read(kvm_running_vcpu); 5900 preempt_enable(); 5901 5902 return vcpu; 5903 } 5904 EXPORT_SYMBOL_GPL(kvm_get_running_vcpu); 5905 5906 /** 5907 * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus. 5908 */ 5909 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) 5910 { 5911 return &kvm_running_vcpu; 5912 } 5913 5914 #ifdef CONFIG_GUEST_PERF_EVENTS 5915 static unsigned int kvm_guest_state(void) 5916 { 5917 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); 5918 unsigned int state; 5919 5920 if (!kvm_arch_pmi_in_guest(vcpu)) 5921 return 0; 5922 5923 state = PERF_GUEST_ACTIVE; 5924 if (!kvm_arch_vcpu_in_kernel(vcpu)) 5925 state |= PERF_GUEST_USER; 5926 5927 return state; 5928 } 5929 5930 static unsigned long kvm_guest_get_ip(void) 5931 { 5932 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); 5933 5934 /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */ 5935 if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu))) 5936 return 0; 5937 5938 return kvm_arch_vcpu_get_ip(vcpu); 5939 } 5940 5941 static struct perf_guest_info_callbacks kvm_guest_cbs = { 5942 .state = kvm_guest_state, 5943 .get_ip = kvm_guest_get_ip, 5944 .handle_intel_pt_intr = NULL, 5945 }; 5946 5947 void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void)) 5948 { 5949 kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler; 5950 perf_register_guest_info_callbacks(&kvm_guest_cbs); 5951 } 5952 void kvm_unregister_perf_callbacks(void) 5953 { 5954 perf_unregister_guest_info_callbacks(&kvm_guest_cbs); 5955 } 5956 #endif 5957 5958 int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module) 5959 { 5960 int r; 5961 int cpu; 5962 5963 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING 5964 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_ONLINE, "kvm/cpu:online", 5965 kvm_online_cpu, kvm_offline_cpu); 5966 if (r) 5967 return r; 5968 5969 register_reboot_notifier(&kvm_reboot_notifier); 5970 register_syscore_ops(&kvm_syscore_ops); 5971 #endif 5972 5973 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 5974 if (!vcpu_align) 5975 vcpu_align = __alignof__(struct kvm_vcpu); 5976 kvm_vcpu_cache = 5977 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align, 5978 SLAB_ACCOUNT, 5979 offsetof(struct kvm_vcpu, arch), 5980 offsetofend(struct kvm_vcpu, stats_id) 5981 - offsetof(struct kvm_vcpu, arch), 5982 NULL); 5983 if (!kvm_vcpu_cache) { 5984 r = -ENOMEM; 5985 goto err_vcpu_cache; 5986 } 5987 5988 for_each_possible_cpu(cpu) { 5989 if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu), 5990 GFP_KERNEL, cpu_to_node(cpu))) { 5991 r = -ENOMEM; 5992 goto err_cpu_kick_mask; 5993 } 5994 } 5995 5996 r = kvm_irqfd_init(); 5997 if (r) 5998 goto err_irqfd; 5999 6000 r = kvm_async_pf_init(); 6001 if (r) 6002 goto err_async_pf; 6003 6004 kvm_chardev_ops.owner = module; 6005 6006 kvm_preempt_ops.sched_in = kvm_sched_in; 6007 kvm_preempt_ops.sched_out = kvm_sched_out; 6008 6009 kvm_init_debug(); 6010 6011 r = kvm_vfio_ops_init(); 6012 if (WARN_ON_ONCE(r)) 6013 goto err_vfio; 6014 6015 /* 6016 * Registration _must_ be the very last thing done, as this exposes 6017 * /dev/kvm to userspace, i.e. all infrastructure must be setup! 6018 */ 6019 r = misc_register(&kvm_dev); 6020 if (r) { 6021 pr_err("kvm: misc device register failed\n"); 6022 goto err_register; 6023 } 6024 6025 return 0; 6026 6027 err_register: 6028 kvm_vfio_ops_exit(); 6029 err_vfio: 6030 kvm_async_pf_deinit(); 6031 err_async_pf: 6032 kvm_irqfd_exit(); 6033 err_irqfd: 6034 err_cpu_kick_mask: 6035 for_each_possible_cpu(cpu) 6036 free_cpumask_var(per_cpu(cpu_kick_mask, cpu)); 6037 kmem_cache_destroy(kvm_vcpu_cache); 6038 err_vcpu_cache: 6039 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING 6040 unregister_syscore_ops(&kvm_syscore_ops); 6041 unregister_reboot_notifier(&kvm_reboot_notifier); 6042 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE); 6043 #endif 6044 return r; 6045 } 6046 EXPORT_SYMBOL_GPL(kvm_init); 6047 6048 void kvm_exit(void) 6049 { 6050 int cpu; 6051 6052 /* 6053 * Note, unregistering /dev/kvm doesn't strictly need to come first, 6054 * fops_get(), a.k.a. try_module_get(), prevents acquiring references 6055 * to KVM while the module is being stopped. 6056 */ 6057 misc_deregister(&kvm_dev); 6058 6059 debugfs_remove_recursive(kvm_debugfs_dir); 6060 for_each_possible_cpu(cpu) 6061 free_cpumask_var(per_cpu(cpu_kick_mask, cpu)); 6062 kmem_cache_destroy(kvm_vcpu_cache); 6063 kvm_vfio_ops_exit(); 6064 kvm_async_pf_deinit(); 6065 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING 6066 unregister_syscore_ops(&kvm_syscore_ops); 6067 unregister_reboot_notifier(&kvm_reboot_notifier); 6068 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE); 6069 #endif 6070 kvm_irqfd_exit(); 6071 } 6072 EXPORT_SYMBOL_GPL(kvm_exit); 6073 6074 struct kvm_vm_worker_thread_context { 6075 struct kvm *kvm; 6076 struct task_struct *parent; 6077 struct completion init_done; 6078 kvm_vm_thread_fn_t thread_fn; 6079 uintptr_t data; 6080 int err; 6081 }; 6082 6083 static int kvm_vm_worker_thread(void *context) 6084 { 6085 /* 6086 * The init_context is allocated on the stack of the parent thread, so 6087 * we have to locally copy anything that is needed beyond initialization 6088 */ 6089 struct kvm_vm_worker_thread_context *init_context = context; 6090 struct task_struct *parent; 6091 struct kvm *kvm = init_context->kvm; 6092 kvm_vm_thread_fn_t thread_fn = init_context->thread_fn; 6093 uintptr_t data = init_context->data; 6094 int err; 6095 6096 err = kthread_park(current); 6097 /* kthread_park(current) is never supposed to return an error */ 6098 WARN_ON(err != 0); 6099 if (err) 6100 goto init_complete; 6101 6102 err = cgroup_attach_task_all(init_context->parent, current); 6103 if (err) { 6104 kvm_err("%s: cgroup_attach_task_all failed with err %d\n", 6105 __func__, err); 6106 goto init_complete; 6107 } 6108 6109 set_user_nice(current, task_nice(init_context->parent)); 6110 6111 init_complete: 6112 init_context->err = err; 6113 complete(&init_context->init_done); 6114 init_context = NULL; 6115 6116 if (err) 6117 goto out; 6118 6119 /* Wait to be woken up by the spawner before proceeding. */ 6120 kthread_parkme(); 6121 6122 if (!kthread_should_stop()) 6123 err = thread_fn(kvm, data); 6124 6125 out: 6126 /* 6127 * Move kthread back to its original cgroup to prevent it lingering in 6128 * the cgroup of the VM process, after the latter finishes its 6129 * execution. 6130 * 6131 * kthread_stop() waits on the 'exited' completion condition which is 6132 * set in exit_mm(), via mm_release(), in do_exit(). However, the 6133 * kthread is removed from the cgroup in the cgroup_exit() which is 6134 * called after the exit_mm(). This causes the kthread_stop() to return 6135 * before the kthread actually quits the cgroup. 6136 */ 6137 rcu_read_lock(); 6138 parent = rcu_dereference(current->real_parent); 6139 get_task_struct(parent); 6140 rcu_read_unlock(); 6141 cgroup_attach_task_all(parent, current); 6142 put_task_struct(parent); 6143 6144 return err; 6145 } 6146 6147 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, 6148 uintptr_t data, const char *name, 6149 struct task_struct **thread_ptr) 6150 { 6151 struct kvm_vm_worker_thread_context init_context = {}; 6152 struct task_struct *thread; 6153 6154 *thread_ptr = NULL; 6155 init_context.kvm = kvm; 6156 init_context.parent = current; 6157 init_context.thread_fn = thread_fn; 6158 init_context.data = data; 6159 init_completion(&init_context.init_done); 6160 6161 thread = kthread_run(kvm_vm_worker_thread, &init_context, 6162 "%s-%d", name, task_pid_nr(current)); 6163 if (IS_ERR(thread)) 6164 return PTR_ERR(thread); 6165 6166 /* kthread_run is never supposed to return NULL */ 6167 WARN_ON(thread == NULL); 6168 6169 wait_for_completion(&init_context.init_done); 6170 6171 if (!init_context.err) 6172 *thread_ptr = thread; 6173 6174 return init_context.err; 6175 } 6176