1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * This module enables machines with Intel VT-x extensions to run virtual 6 * machines without emulation or binary translation. 7 * 8 * Copyright (C) 2006 Qumranet, Inc. 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 10 * 11 * Authors: 12 * Avi Kivity <avi@qumranet.com> 13 * Yaniv Kamay <yaniv@qumranet.com> 14 */ 15 16 #include <kvm/iodev.h> 17 18 #include <linux/kvm_host.h> 19 #include <linux/kvm.h> 20 #include <linux/module.h> 21 #include <linux/errno.h> 22 #include <linux/percpu.h> 23 #include <linux/mm.h> 24 #include <linux/miscdevice.h> 25 #include <linux/vmalloc.h> 26 #include <linux/reboot.h> 27 #include <linux/debugfs.h> 28 #include <linux/highmem.h> 29 #include <linux/file.h> 30 #include <linux/syscore_ops.h> 31 #include <linux/cpu.h> 32 #include <linux/sched/signal.h> 33 #include <linux/sched/mm.h> 34 #include <linux/sched/stat.h> 35 #include <linux/cpumask.h> 36 #include <linux/smp.h> 37 #include <linux/anon_inodes.h> 38 #include <linux/profile.h> 39 #include <linux/kvm_para.h> 40 #include <linux/pagemap.h> 41 #include <linux/mman.h> 42 #include <linux/swap.h> 43 #include <linux/bitops.h> 44 #include <linux/spinlock.h> 45 #include <linux/compat.h> 46 #include <linux/srcu.h> 47 #include <linux/hugetlb.h> 48 #include <linux/slab.h> 49 #include <linux/sort.h> 50 #include <linux/bsearch.h> 51 #include <linux/io.h> 52 #include <linux/lockdep.h> 53 #include <linux/kthread.h> 54 #include <linux/suspend.h> 55 56 #include <asm/processor.h> 57 #include <asm/ioctl.h> 58 #include <linux/uaccess.h> 59 60 #include "coalesced_mmio.h" 61 #include "async_pf.h" 62 #include "kvm_mm.h" 63 #include "vfio.h" 64 65 #include <trace/events/ipi.h> 66 67 #define CREATE_TRACE_POINTS 68 #include <trace/events/kvm.h> 69 70 #include <linux/kvm_dirty_ring.h> 71 72 73 /* Worst case buffer size needed for holding an integer. */ 74 #define ITOA_MAX_LEN 12 75 76 MODULE_AUTHOR("Qumranet"); 77 MODULE_LICENSE("GPL"); 78 79 /* Architectures should define their poll value according to the halt latency */ 80 unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; 81 module_param(halt_poll_ns, uint, 0644); 82 EXPORT_SYMBOL_GPL(halt_poll_ns); 83 84 /* Default doubles per-vcpu halt_poll_ns. */ 85 unsigned int halt_poll_ns_grow = 2; 86 module_param(halt_poll_ns_grow, uint, 0644); 87 EXPORT_SYMBOL_GPL(halt_poll_ns_grow); 88 89 /* The start value to grow halt_poll_ns from */ 90 unsigned int halt_poll_ns_grow_start = 10000; /* 10us */ 91 module_param(halt_poll_ns_grow_start, uint, 0644); 92 EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start); 93 94 /* Default resets per-vcpu halt_poll_ns . */ 95 unsigned int halt_poll_ns_shrink; 96 module_param(halt_poll_ns_shrink, uint, 0644); 97 EXPORT_SYMBOL_GPL(halt_poll_ns_shrink); 98 99 /* 100 * Ordering of locks: 101 * 102 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 103 */ 104 105 DEFINE_MUTEX(kvm_lock); 106 LIST_HEAD(vm_list); 107 108 static struct kmem_cache *kvm_vcpu_cache; 109 110 static __read_mostly struct preempt_ops kvm_preempt_ops; 111 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu); 112 113 struct dentry *kvm_debugfs_dir; 114 EXPORT_SYMBOL_GPL(kvm_debugfs_dir); 115 116 static const struct file_operations stat_fops_per_vm; 117 118 static struct file_operations kvm_chardev_ops; 119 120 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 121 unsigned long arg); 122 #ifdef CONFIG_KVM_COMPAT 123 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, 124 unsigned long arg); 125 #define KVM_COMPAT(c) .compat_ioctl = (c) 126 #else 127 /* 128 * For architectures that don't implement a compat infrastructure, 129 * adopt a double line of defense: 130 * - Prevent a compat task from opening /dev/kvm 131 * - If the open has been done by a 64bit task, and the KVM fd 132 * passed to a compat task, let the ioctls fail. 133 */ 134 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl, 135 unsigned long arg) { return -EINVAL; } 136 137 static int kvm_no_compat_open(struct inode *inode, struct file *file) 138 { 139 return is_compat_task() ? -ENODEV : 0; 140 } 141 #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \ 142 .open = kvm_no_compat_open 143 #endif 144 static int hardware_enable_all(void); 145 static void hardware_disable_all(void); 146 147 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 148 149 #define KVM_EVENT_CREATE_VM 0 150 #define KVM_EVENT_DESTROY_VM 1 151 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); 152 static unsigned long long kvm_createvm_count; 153 static unsigned long long kvm_active_vms; 154 155 static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask); 156 157 __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, 158 unsigned long start, unsigned long end) 159 { 160 } 161 162 __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) 163 { 164 } 165 166 bool kvm_is_zone_device_page(struct page *page) 167 { 168 /* 169 * The metadata used by is_zone_device_page() to determine whether or 170 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if 171 * the device has been pinned, e.g. by get_user_pages(). WARN if the 172 * page_count() is zero to help detect bad usage of this helper. 173 */ 174 if (WARN_ON_ONCE(!page_count(page))) 175 return false; 176 177 return is_zone_device_page(page); 178 } 179 180 /* 181 * Returns a 'struct page' if the pfn is "valid" and backed by a refcounted 182 * page, NULL otherwise. Note, the list of refcounted PG_reserved page types 183 * is likely incomplete, it has been compiled purely through people wanting to 184 * back guest with a certain type of memory and encountering issues. 185 */ 186 struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn) 187 { 188 struct page *page; 189 190 if (!pfn_valid(pfn)) 191 return NULL; 192 193 page = pfn_to_page(pfn); 194 if (!PageReserved(page)) 195 return page; 196 197 /* The ZERO_PAGE(s) is marked PG_reserved, but is refcounted. */ 198 if (is_zero_pfn(pfn)) 199 return page; 200 201 /* 202 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting 203 * perspective they are "normal" pages, albeit with slightly different 204 * usage rules. 205 */ 206 if (kvm_is_zone_device_page(page)) 207 return page; 208 209 return NULL; 210 } 211 212 /* 213 * Switches to specified vcpu, until a matching vcpu_put() 214 */ 215 void vcpu_load(struct kvm_vcpu *vcpu) 216 { 217 int cpu = get_cpu(); 218 219 __this_cpu_write(kvm_running_vcpu, vcpu); 220 preempt_notifier_register(&vcpu->preempt_notifier); 221 kvm_arch_vcpu_load(vcpu, cpu); 222 put_cpu(); 223 } 224 EXPORT_SYMBOL_GPL(vcpu_load); 225 226 void vcpu_put(struct kvm_vcpu *vcpu) 227 { 228 preempt_disable(); 229 kvm_arch_vcpu_put(vcpu); 230 preempt_notifier_unregister(&vcpu->preempt_notifier); 231 __this_cpu_write(kvm_running_vcpu, NULL); 232 preempt_enable(); 233 } 234 EXPORT_SYMBOL_GPL(vcpu_put); 235 236 /* TODO: merge with kvm_arch_vcpu_should_kick */ 237 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req) 238 { 239 int mode = kvm_vcpu_exiting_guest_mode(vcpu); 240 241 /* 242 * We need to wait for the VCPU to reenable interrupts and get out of 243 * READING_SHADOW_PAGE_TABLES mode. 244 */ 245 if (req & KVM_REQUEST_WAIT) 246 return mode != OUTSIDE_GUEST_MODE; 247 248 /* 249 * Need to kick a running VCPU, but otherwise there is nothing to do. 250 */ 251 return mode == IN_GUEST_MODE; 252 } 253 254 static void ack_kick(void *_completed) 255 { 256 } 257 258 static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait) 259 { 260 if (cpumask_empty(cpus)) 261 return false; 262 263 smp_call_function_many(cpus, ack_kick, NULL, wait); 264 return true; 265 } 266 267 static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req, 268 struct cpumask *tmp, int current_cpu) 269 { 270 int cpu; 271 272 if (likely(!(req & KVM_REQUEST_NO_ACTION))) 273 __kvm_make_request(req, vcpu); 274 275 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) 276 return; 277 278 /* 279 * Note, the vCPU could get migrated to a different pCPU at any point 280 * after kvm_request_needs_ipi(), which could result in sending an IPI 281 * to the previous pCPU. But, that's OK because the purpose of the IPI 282 * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is 283 * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES 284 * after this point is also OK, as the requirement is only that KVM wait 285 * for vCPUs that were reading SPTEs _before_ any changes were 286 * finalized. See kvm_vcpu_kick() for more details on handling requests. 287 */ 288 if (kvm_request_needs_ipi(vcpu, req)) { 289 cpu = READ_ONCE(vcpu->cpu); 290 if (cpu != -1 && cpu != current_cpu) 291 __cpumask_set_cpu(cpu, tmp); 292 } 293 } 294 295 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, 296 unsigned long *vcpu_bitmap) 297 { 298 struct kvm_vcpu *vcpu; 299 struct cpumask *cpus; 300 int i, me; 301 bool called; 302 303 me = get_cpu(); 304 305 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); 306 cpumask_clear(cpus); 307 308 for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) { 309 vcpu = kvm_get_vcpu(kvm, i); 310 if (!vcpu) 311 continue; 312 kvm_make_vcpu_request(vcpu, req, cpus, me); 313 } 314 315 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); 316 put_cpu(); 317 318 return called; 319 } 320 321 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, 322 struct kvm_vcpu *except) 323 { 324 struct kvm_vcpu *vcpu; 325 struct cpumask *cpus; 326 unsigned long i; 327 bool called; 328 int me; 329 330 me = get_cpu(); 331 332 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); 333 cpumask_clear(cpus); 334 335 kvm_for_each_vcpu(i, vcpu, kvm) { 336 if (vcpu == except) 337 continue; 338 kvm_make_vcpu_request(vcpu, req, cpus, me); 339 } 340 341 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); 342 put_cpu(); 343 344 return called; 345 } 346 347 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) 348 { 349 return kvm_make_all_cpus_request_except(kvm, req, NULL); 350 } 351 EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request); 352 353 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL 354 void kvm_flush_remote_tlbs(struct kvm *kvm) 355 { 356 ++kvm->stat.generic.remote_tlb_flush_requests; 357 358 /* 359 * We want to publish modifications to the page tables before reading 360 * mode. Pairs with a memory barrier in arch-specific code. 361 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest 362 * and smp_mb in walk_shadow_page_lockless_begin/end. 363 * - powerpc: smp_mb in kvmppc_prepare_to_enter. 364 * 365 * There is already an smp_mb__after_atomic() before 366 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that 367 * barrier here. 368 */ 369 if (!kvm_arch_flush_remote_tlb(kvm) 370 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 371 ++kvm->stat.generic.remote_tlb_flush; 372 } 373 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); 374 #endif 375 376 static void kvm_flush_shadow_all(struct kvm *kvm) 377 { 378 kvm_arch_flush_shadow_all(kvm); 379 kvm_arch_guest_memory_reclaimed(kvm); 380 } 381 382 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 383 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, 384 gfp_t gfp_flags) 385 { 386 gfp_flags |= mc->gfp_zero; 387 388 if (mc->kmem_cache) 389 return kmem_cache_alloc(mc->kmem_cache, gfp_flags); 390 else 391 return (void *)__get_free_page(gfp_flags); 392 } 393 394 int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min) 395 { 396 gfp_t gfp = mc->gfp_custom ? mc->gfp_custom : GFP_KERNEL_ACCOUNT; 397 void *obj; 398 399 if (mc->nobjs >= min) 400 return 0; 401 402 if (unlikely(!mc->objects)) { 403 if (WARN_ON_ONCE(!capacity)) 404 return -EIO; 405 406 mc->objects = kvmalloc_array(sizeof(void *), capacity, gfp); 407 if (!mc->objects) 408 return -ENOMEM; 409 410 mc->capacity = capacity; 411 } 412 413 /* It is illegal to request a different capacity across topups. */ 414 if (WARN_ON_ONCE(mc->capacity != capacity)) 415 return -EIO; 416 417 while (mc->nobjs < mc->capacity) { 418 obj = mmu_memory_cache_alloc_obj(mc, gfp); 419 if (!obj) 420 return mc->nobjs >= min ? 0 : -ENOMEM; 421 mc->objects[mc->nobjs++] = obj; 422 } 423 return 0; 424 } 425 426 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min) 427 { 428 return __kvm_mmu_topup_memory_cache(mc, KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE, min); 429 } 430 431 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc) 432 { 433 return mc->nobjs; 434 } 435 436 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) 437 { 438 while (mc->nobjs) { 439 if (mc->kmem_cache) 440 kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]); 441 else 442 free_page((unsigned long)mc->objects[--mc->nobjs]); 443 } 444 445 kvfree(mc->objects); 446 447 mc->objects = NULL; 448 mc->capacity = 0; 449 } 450 451 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) 452 { 453 void *p; 454 455 if (WARN_ON(!mc->nobjs)) 456 p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT); 457 else 458 p = mc->objects[--mc->nobjs]; 459 BUG_ON(!p); 460 return p; 461 } 462 #endif 463 464 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 465 { 466 mutex_init(&vcpu->mutex); 467 vcpu->cpu = -1; 468 vcpu->kvm = kvm; 469 vcpu->vcpu_id = id; 470 vcpu->pid = NULL; 471 #ifndef __KVM_HAVE_ARCH_WQP 472 rcuwait_init(&vcpu->wait); 473 #endif 474 kvm_async_pf_vcpu_init(vcpu); 475 476 kvm_vcpu_set_in_spin_loop(vcpu, false); 477 kvm_vcpu_set_dy_eligible(vcpu, false); 478 vcpu->preempted = false; 479 vcpu->ready = false; 480 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 481 vcpu->last_used_slot = NULL; 482 483 /* Fill the stats id string for the vcpu */ 484 snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d", 485 task_pid_nr(current), id); 486 } 487 488 static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) 489 { 490 kvm_arch_vcpu_destroy(vcpu); 491 kvm_dirty_ring_free(&vcpu->dirty_ring); 492 493 /* 494 * No need for rcu_read_lock as VCPU_RUN is the only place that changes 495 * the vcpu->pid pointer, and at destruction time all file descriptors 496 * are already gone. 497 */ 498 put_pid(rcu_dereference_protected(vcpu->pid, 1)); 499 500 free_page((unsigned long)vcpu->run); 501 kmem_cache_free(kvm_vcpu_cache, vcpu); 502 } 503 504 void kvm_destroy_vcpus(struct kvm *kvm) 505 { 506 unsigned long i; 507 struct kvm_vcpu *vcpu; 508 509 kvm_for_each_vcpu(i, vcpu, kvm) { 510 kvm_vcpu_destroy(vcpu); 511 xa_erase(&kvm->vcpu_array, i); 512 } 513 514 atomic_set(&kvm->online_vcpus, 0); 515 } 516 EXPORT_SYMBOL_GPL(kvm_destroy_vcpus); 517 518 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 519 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 520 { 521 return container_of(mn, struct kvm, mmu_notifier); 522 } 523 524 static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn, 525 struct mm_struct *mm, 526 unsigned long start, unsigned long end) 527 { 528 struct kvm *kvm = mmu_notifier_to_kvm(mn); 529 int idx; 530 531 idx = srcu_read_lock(&kvm->srcu); 532 kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); 533 srcu_read_unlock(&kvm->srcu, idx); 534 } 535 536 typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range); 537 538 typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start, 539 unsigned long end); 540 541 typedef void (*on_unlock_fn_t)(struct kvm *kvm); 542 543 struct kvm_hva_range { 544 unsigned long start; 545 unsigned long end; 546 pte_t pte; 547 hva_handler_t handler; 548 on_lock_fn_t on_lock; 549 on_unlock_fn_t on_unlock; 550 bool flush_on_ret; 551 bool may_block; 552 }; 553 554 /* 555 * Use a dedicated stub instead of NULL to indicate that there is no callback 556 * function/handler. The compiler technically can't guarantee that a real 557 * function will have a non-zero address, and so it will generate code to 558 * check for !NULL, whereas comparing against a stub will be elided at compile 559 * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9). 560 */ 561 static void kvm_null_fn(void) 562 { 563 564 } 565 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn) 566 567 /* Iterate over each memslot intersecting [start, last] (inclusive) range */ 568 #define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \ 569 for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \ 570 node; \ 571 node = interval_tree_iter_next(node, start, last)) \ 572 573 static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, 574 const struct kvm_hva_range *range) 575 { 576 bool ret = false, locked = false; 577 struct kvm_gfn_range gfn_range; 578 struct kvm_memory_slot *slot; 579 struct kvm_memslots *slots; 580 int i, idx; 581 582 if (WARN_ON_ONCE(range->end <= range->start)) 583 return 0; 584 585 /* A null handler is allowed if and only if on_lock() is provided. */ 586 if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) && 587 IS_KVM_NULL_FN(range->handler))) 588 return 0; 589 590 idx = srcu_read_lock(&kvm->srcu); 591 592 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 593 struct interval_tree_node *node; 594 595 slots = __kvm_memslots(kvm, i); 596 kvm_for_each_memslot_in_hva_range(node, slots, 597 range->start, range->end - 1) { 598 unsigned long hva_start, hva_end; 599 600 slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]); 601 hva_start = max(range->start, slot->userspace_addr); 602 hva_end = min(range->end, slot->userspace_addr + 603 (slot->npages << PAGE_SHIFT)); 604 605 /* 606 * To optimize for the likely case where the address 607 * range is covered by zero or one memslots, don't 608 * bother making these conditional (to avoid writes on 609 * the second or later invocation of the handler). 610 */ 611 gfn_range.pte = range->pte; 612 gfn_range.may_block = range->may_block; 613 614 /* 615 * {gfn(page) | page intersects with [hva_start, hva_end)} = 616 * {gfn_start, gfn_start+1, ..., gfn_end-1}. 617 */ 618 gfn_range.start = hva_to_gfn_memslot(hva_start, slot); 619 gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot); 620 gfn_range.slot = slot; 621 622 if (!locked) { 623 locked = true; 624 KVM_MMU_LOCK(kvm); 625 if (!IS_KVM_NULL_FN(range->on_lock)) 626 range->on_lock(kvm, range->start, range->end); 627 if (IS_KVM_NULL_FN(range->handler)) 628 break; 629 } 630 ret |= range->handler(kvm, &gfn_range); 631 } 632 } 633 634 if (range->flush_on_ret && ret) 635 kvm_flush_remote_tlbs(kvm); 636 637 if (locked) { 638 KVM_MMU_UNLOCK(kvm); 639 if (!IS_KVM_NULL_FN(range->on_unlock)) 640 range->on_unlock(kvm); 641 } 642 643 srcu_read_unlock(&kvm->srcu, idx); 644 645 /* The notifiers are averse to booleans. :-( */ 646 return (int)ret; 647 } 648 649 static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn, 650 unsigned long start, 651 unsigned long end, 652 pte_t pte, 653 hva_handler_t handler) 654 { 655 struct kvm *kvm = mmu_notifier_to_kvm(mn); 656 const struct kvm_hva_range range = { 657 .start = start, 658 .end = end, 659 .pte = pte, 660 .handler = handler, 661 .on_lock = (void *)kvm_null_fn, 662 .on_unlock = (void *)kvm_null_fn, 663 .flush_on_ret = true, 664 .may_block = false, 665 }; 666 667 return __kvm_handle_hva_range(kvm, &range); 668 } 669 670 static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn, 671 unsigned long start, 672 unsigned long end, 673 hva_handler_t handler) 674 { 675 struct kvm *kvm = mmu_notifier_to_kvm(mn); 676 const struct kvm_hva_range range = { 677 .start = start, 678 .end = end, 679 .pte = __pte(0), 680 .handler = handler, 681 .on_lock = (void *)kvm_null_fn, 682 .on_unlock = (void *)kvm_null_fn, 683 .flush_on_ret = false, 684 .may_block = false, 685 }; 686 687 return __kvm_handle_hva_range(kvm, &range); 688 } 689 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 690 struct mm_struct *mm, 691 unsigned long address, 692 pte_t pte) 693 { 694 struct kvm *kvm = mmu_notifier_to_kvm(mn); 695 696 trace_kvm_set_spte_hva(address); 697 698 /* 699 * .change_pte() must be surrounded by .invalidate_range_{start,end}(). 700 * If mmu_invalidate_in_progress is zero, then no in-progress 701 * invalidations, including this one, found a relevant memslot at 702 * start(); rechecking memslots here is unnecessary. Note, a false 703 * positive (count elevated by a different invalidation) is sub-optimal 704 * but functionally ok. 705 */ 706 WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count)); 707 if (!READ_ONCE(kvm->mmu_invalidate_in_progress)) 708 return; 709 710 kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn); 711 } 712 713 void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start, 714 unsigned long end) 715 { 716 /* 717 * The count increase must become visible at unlock time as no 718 * spte can be established without taking the mmu_lock and 719 * count is also read inside the mmu_lock critical section. 720 */ 721 kvm->mmu_invalidate_in_progress++; 722 if (likely(kvm->mmu_invalidate_in_progress == 1)) { 723 kvm->mmu_invalidate_range_start = start; 724 kvm->mmu_invalidate_range_end = end; 725 } else { 726 /* 727 * Fully tracking multiple concurrent ranges has diminishing 728 * returns. Keep things simple and just find the minimal range 729 * which includes the current and new ranges. As there won't be 730 * enough information to subtract a range after its invalidate 731 * completes, any ranges invalidated concurrently will 732 * accumulate and persist until all outstanding invalidates 733 * complete. 734 */ 735 kvm->mmu_invalidate_range_start = 736 min(kvm->mmu_invalidate_range_start, start); 737 kvm->mmu_invalidate_range_end = 738 max(kvm->mmu_invalidate_range_end, end); 739 } 740 } 741 742 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 743 const struct mmu_notifier_range *range) 744 { 745 struct kvm *kvm = mmu_notifier_to_kvm(mn); 746 const struct kvm_hva_range hva_range = { 747 .start = range->start, 748 .end = range->end, 749 .pte = __pte(0), 750 .handler = kvm_unmap_gfn_range, 751 .on_lock = kvm_mmu_invalidate_begin, 752 .on_unlock = kvm_arch_guest_memory_reclaimed, 753 .flush_on_ret = true, 754 .may_block = mmu_notifier_range_blockable(range), 755 }; 756 757 trace_kvm_unmap_hva_range(range->start, range->end); 758 759 /* 760 * Prevent memslot modification between range_start() and range_end() 761 * so that conditionally locking provides the same result in both 762 * functions. Without that guarantee, the mmu_invalidate_in_progress 763 * adjustments will be imbalanced. 764 * 765 * Pairs with the decrement in range_end(). 766 */ 767 spin_lock(&kvm->mn_invalidate_lock); 768 kvm->mn_active_invalidate_count++; 769 spin_unlock(&kvm->mn_invalidate_lock); 770 771 /* 772 * Invalidate pfn caches _before_ invalidating the secondary MMUs, i.e. 773 * before acquiring mmu_lock, to avoid holding mmu_lock while acquiring 774 * each cache's lock. There are relatively few caches in existence at 775 * any given time, and the caches themselves can check for hva overlap, 776 * i.e. don't need to rely on memslot overlap checks for performance. 777 * Because this runs without holding mmu_lock, the pfn caches must use 778 * mn_active_invalidate_count (see above) instead of 779 * mmu_invalidate_in_progress. 780 */ 781 gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end, 782 hva_range.may_block); 783 784 __kvm_handle_hva_range(kvm, &hva_range); 785 786 return 0; 787 } 788 789 void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start, 790 unsigned long end) 791 { 792 /* 793 * This sequence increase will notify the kvm page fault that 794 * the page that is going to be mapped in the spte could have 795 * been freed. 796 */ 797 kvm->mmu_invalidate_seq++; 798 smp_wmb(); 799 /* 800 * The above sequence increase must be visible before the 801 * below count decrease, which is ensured by the smp_wmb above 802 * in conjunction with the smp_rmb in mmu_invalidate_retry(). 803 */ 804 kvm->mmu_invalidate_in_progress--; 805 } 806 807 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 808 const struct mmu_notifier_range *range) 809 { 810 struct kvm *kvm = mmu_notifier_to_kvm(mn); 811 const struct kvm_hva_range hva_range = { 812 .start = range->start, 813 .end = range->end, 814 .pte = __pte(0), 815 .handler = (void *)kvm_null_fn, 816 .on_lock = kvm_mmu_invalidate_end, 817 .on_unlock = (void *)kvm_null_fn, 818 .flush_on_ret = false, 819 .may_block = mmu_notifier_range_blockable(range), 820 }; 821 bool wake; 822 823 __kvm_handle_hva_range(kvm, &hva_range); 824 825 /* Pairs with the increment in range_start(). */ 826 spin_lock(&kvm->mn_invalidate_lock); 827 wake = (--kvm->mn_active_invalidate_count == 0); 828 spin_unlock(&kvm->mn_invalidate_lock); 829 830 /* 831 * There can only be one waiter, since the wait happens under 832 * slots_lock. 833 */ 834 if (wake) 835 rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait); 836 837 BUG_ON(kvm->mmu_invalidate_in_progress < 0); 838 } 839 840 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 841 struct mm_struct *mm, 842 unsigned long start, 843 unsigned long end) 844 { 845 trace_kvm_age_hva(start, end); 846 847 return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn); 848 } 849 850 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, 851 struct mm_struct *mm, 852 unsigned long start, 853 unsigned long end) 854 { 855 trace_kvm_age_hva(start, end); 856 857 /* 858 * Even though we do not flush TLB, this will still adversely 859 * affect performance on pre-Haswell Intel EPT, where there is 860 * no EPT Access Bit to clear so that we have to tear down EPT 861 * tables instead. If we find this unacceptable, we can always 862 * add a parameter to kvm_age_hva so that it effectively doesn't 863 * do anything on clear_young. 864 * 865 * Also note that currently we never issue secondary TLB flushes 866 * from clear_young, leaving this job up to the regular system 867 * cadence. If we find this inaccurate, we might come up with a 868 * more sophisticated heuristic later. 869 */ 870 return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn); 871 } 872 873 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, 874 struct mm_struct *mm, 875 unsigned long address) 876 { 877 trace_kvm_test_age_hva(address); 878 879 return kvm_handle_hva_range_no_flush(mn, address, address + 1, 880 kvm_test_age_gfn); 881 } 882 883 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 884 struct mm_struct *mm) 885 { 886 struct kvm *kvm = mmu_notifier_to_kvm(mn); 887 int idx; 888 889 idx = srcu_read_lock(&kvm->srcu); 890 kvm_flush_shadow_all(kvm); 891 srcu_read_unlock(&kvm->srcu, idx); 892 } 893 894 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 895 .invalidate_range = kvm_mmu_notifier_invalidate_range, 896 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 897 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 898 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 899 .clear_young = kvm_mmu_notifier_clear_young, 900 .test_young = kvm_mmu_notifier_test_young, 901 .change_pte = kvm_mmu_notifier_change_pte, 902 .release = kvm_mmu_notifier_release, 903 }; 904 905 static int kvm_init_mmu_notifier(struct kvm *kvm) 906 { 907 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 908 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 909 } 910 911 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 912 913 static int kvm_init_mmu_notifier(struct kvm *kvm) 914 { 915 return 0; 916 } 917 918 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 919 920 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 921 static int kvm_pm_notifier_call(struct notifier_block *bl, 922 unsigned long state, 923 void *unused) 924 { 925 struct kvm *kvm = container_of(bl, struct kvm, pm_notifier); 926 927 return kvm_arch_pm_notifier(kvm, state); 928 } 929 930 static void kvm_init_pm_notifier(struct kvm *kvm) 931 { 932 kvm->pm_notifier.notifier_call = kvm_pm_notifier_call; 933 /* Suspend KVM before we suspend ftrace, RCU, etc. */ 934 kvm->pm_notifier.priority = INT_MAX; 935 register_pm_notifier(&kvm->pm_notifier); 936 } 937 938 static void kvm_destroy_pm_notifier(struct kvm *kvm) 939 { 940 unregister_pm_notifier(&kvm->pm_notifier); 941 } 942 #else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */ 943 static void kvm_init_pm_notifier(struct kvm *kvm) 944 { 945 } 946 947 static void kvm_destroy_pm_notifier(struct kvm *kvm) 948 { 949 } 950 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */ 951 952 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) 953 { 954 if (!memslot->dirty_bitmap) 955 return; 956 957 kvfree(memslot->dirty_bitmap); 958 memslot->dirty_bitmap = NULL; 959 } 960 961 /* This does not remove the slot from struct kvm_memslots data structures */ 962 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 963 { 964 kvm_destroy_dirty_bitmap(slot); 965 966 kvm_arch_free_memslot(kvm, slot); 967 968 kfree(slot); 969 } 970 971 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) 972 { 973 struct hlist_node *idnode; 974 struct kvm_memory_slot *memslot; 975 int bkt; 976 977 /* 978 * The same memslot objects live in both active and inactive sets, 979 * arbitrarily free using index '1' so the second invocation of this 980 * function isn't operating over a structure with dangling pointers 981 * (even though this function isn't actually touching them). 982 */ 983 if (!slots->node_idx) 984 return; 985 986 hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1]) 987 kvm_free_memslot(kvm, memslot); 988 } 989 990 static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc) 991 { 992 switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) { 993 case KVM_STATS_TYPE_INSTANT: 994 return 0444; 995 case KVM_STATS_TYPE_CUMULATIVE: 996 case KVM_STATS_TYPE_PEAK: 997 default: 998 return 0644; 999 } 1000 } 1001 1002 1003 static void kvm_destroy_vm_debugfs(struct kvm *kvm) 1004 { 1005 int i; 1006 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + 1007 kvm_vcpu_stats_header.num_desc; 1008 1009 if (IS_ERR(kvm->debugfs_dentry)) 1010 return; 1011 1012 debugfs_remove_recursive(kvm->debugfs_dentry); 1013 1014 if (kvm->debugfs_stat_data) { 1015 for (i = 0; i < kvm_debugfs_num_entries; i++) 1016 kfree(kvm->debugfs_stat_data[i]); 1017 kfree(kvm->debugfs_stat_data); 1018 } 1019 } 1020 1021 static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname) 1022 { 1023 static DEFINE_MUTEX(kvm_debugfs_lock); 1024 struct dentry *dent; 1025 char dir_name[ITOA_MAX_LEN * 2]; 1026 struct kvm_stat_data *stat_data; 1027 const struct _kvm_stats_desc *pdesc; 1028 int i, ret = -ENOMEM; 1029 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + 1030 kvm_vcpu_stats_header.num_desc; 1031 1032 if (!debugfs_initialized()) 1033 return 0; 1034 1035 snprintf(dir_name, sizeof(dir_name), "%d-%s", task_pid_nr(current), fdname); 1036 mutex_lock(&kvm_debugfs_lock); 1037 dent = debugfs_lookup(dir_name, kvm_debugfs_dir); 1038 if (dent) { 1039 pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name); 1040 dput(dent); 1041 mutex_unlock(&kvm_debugfs_lock); 1042 return 0; 1043 } 1044 dent = debugfs_create_dir(dir_name, kvm_debugfs_dir); 1045 mutex_unlock(&kvm_debugfs_lock); 1046 if (IS_ERR(dent)) 1047 return 0; 1048 1049 kvm->debugfs_dentry = dent; 1050 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, 1051 sizeof(*kvm->debugfs_stat_data), 1052 GFP_KERNEL_ACCOUNT); 1053 if (!kvm->debugfs_stat_data) 1054 goto out_err; 1055 1056 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) { 1057 pdesc = &kvm_vm_stats_desc[i]; 1058 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); 1059 if (!stat_data) 1060 goto out_err; 1061 1062 stat_data->kvm = kvm; 1063 stat_data->desc = pdesc; 1064 stat_data->kind = KVM_STAT_VM; 1065 kvm->debugfs_stat_data[i] = stat_data; 1066 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 1067 kvm->debugfs_dentry, stat_data, 1068 &stat_fops_per_vm); 1069 } 1070 1071 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { 1072 pdesc = &kvm_vcpu_stats_desc[i]; 1073 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); 1074 if (!stat_data) 1075 goto out_err; 1076 1077 stat_data->kvm = kvm; 1078 stat_data->desc = pdesc; 1079 stat_data->kind = KVM_STAT_VCPU; 1080 kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data; 1081 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 1082 kvm->debugfs_dentry, stat_data, 1083 &stat_fops_per_vm); 1084 } 1085 1086 ret = kvm_arch_create_vm_debugfs(kvm); 1087 if (ret) 1088 goto out_err; 1089 1090 return 0; 1091 out_err: 1092 kvm_destroy_vm_debugfs(kvm); 1093 return ret; 1094 } 1095 1096 /* 1097 * Called after the VM is otherwise initialized, but just before adding it to 1098 * the vm_list. 1099 */ 1100 int __weak kvm_arch_post_init_vm(struct kvm *kvm) 1101 { 1102 return 0; 1103 } 1104 1105 /* 1106 * Called just after removing the VM from the vm_list, but before doing any 1107 * other destruction. 1108 */ 1109 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm) 1110 { 1111 } 1112 1113 /* 1114 * Called after per-vm debugfs created. When called kvm->debugfs_dentry should 1115 * be setup already, so we can create arch-specific debugfs entries under it. 1116 * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so 1117 * a per-arch destroy interface is not needed. 1118 */ 1119 int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm) 1120 { 1121 return 0; 1122 } 1123 1124 static struct kvm *kvm_create_vm(unsigned long type, const char *fdname) 1125 { 1126 struct kvm *kvm = kvm_arch_alloc_vm(); 1127 struct kvm_memslots *slots; 1128 int r = -ENOMEM; 1129 int i, j; 1130 1131 if (!kvm) 1132 return ERR_PTR(-ENOMEM); 1133 1134 /* KVM is pinned via open("/dev/kvm"), the fd passed to this ioctl(). */ 1135 __module_get(kvm_chardev_ops.owner); 1136 1137 KVM_MMU_LOCK_INIT(kvm); 1138 mmgrab(current->mm); 1139 kvm->mm = current->mm; 1140 kvm_eventfd_init(kvm); 1141 mutex_init(&kvm->lock); 1142 mutex_init(&kvm->irq_lock); 1143 mutex_init(&kvm->slots_lock); 1144 mutex_init(&kvm->slots_arch_lock); 1145 spin_lock_init(&kvm->mn_invalidate_lock); 1146 rcuwait_init(&kvm->mn_memslots_update_rcuwait); 1147 xa_init(&kvm->vcpu_array); 1148 1149 INIT_LIST_HEAD(&kvm->gpc_list); 1150 spin_lock_init(&kvm->gpc_lock); 1151 1152 INIT_LIST_HEAD(&kvm->devices); 1153 kvm->max_vcpus = KVM_MAX_VCPUS; 1154 1155 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 1156 1157 /* 1158 * Force subsequent debugfs file creations to fail if the VM directory 1159 * is not created (by kvm_create_vm_debugfs()). 1160 */ 1161 kvm->debugfs_dentry = ERR_PTR(-ENOENT); 1162 1163 snprintf(kvm->stats_id, sizeof(kvm->stats_id), "kvm-%d", 1164 task_pid_nr(current)); 1165 1166 if (init_srcu_struct(&kvm->srcu)) 1167 goto out_err_no_srcu; 1168 if (init_srcu_struct(&kvm->irq_srcu)) 1169 goto out_err_no_irq_srcu; 1170 1171 refcount_set(&kvm->users_count, 1); 1172 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 1173 for (j = 0; j < 2; j++) { 1174 slots = &kvm->__memslots[i][j]; 1175 1176 atomic_long_set(&slots->last_used_slot, (unsigned long)NULL); 1177 slots->hva_tree = RB_ROOT_CACHED; 1178 slots->gfn_tree = RB_ROOT; 1179 hash_init(slots->id_hash); 1180 slots->node_idx = j; 1181 1182 /* Generations must be different for each address space. */ 1183 slots->generation = i; 1184 } 1185 1186 rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]); 1187 } 1188 1189 for (i = 0; i < KVM_NR_BUSES; i++) { 1190 rcu_assign_pointer(kvm->buses[i], 1191 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT)); 1192 if (!kvm->buses[i]) 1193 goto out_err_no_arch_destroy_vm; 1194 } 1195 1196 r = kvm_arch_init_vm(kvm, type); 1197 if (r) 1198 goto out_err_no_arch_destroy_vm; 1199 1200 r = hardware_enable_all(); 1201 if (r) 1202 goto out_err_no_disable; 1203 1204 #ifdef CONFIG_HAVE_KVM_IRQFD 1205 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 1206 #endif 1207 1208 r = kvm_init_mmu_notifier(kvm); 1209 if (r) 1210 goto out_err_no_mmu_notifier; 1211 1212 r = kvm_coalesced_mmio_init(kvm); 1213 if (r < 0) 1214 goto out_no_coalesced_mmio; 1215 1216 r = kvm_create_vm_debugfs(kvm, fdname); 1217 if (r) 1218 goto out_err_no_debugfs; 1219 1220 r = kvm_arch_post_init_vm(kvm); 1221 if (r) 1222 goto out_err; 1223 1224 mutex_lock(&kvm_lock); 1225 list_add(&kvm->vm_list, &vm_list); 1226 mutex_unlock(&kvm_lock); 1227 1228 preempt_notifier_inc(); 1229 kvm_init_pm_notifier(kvm); 1230 1231 return kvm; 1232 1233 out_err: 1234 kvm_destroy_vm_debugfs(kvm); 1235 out_err_no_debugfs: 1236 kvm_coalesced_mmio_free(kvm); 1237 out_no_coalesced_mmio: 1238 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 1239 if (kvm->mmu_notifier.ops) 1240 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); 1241 #endif 1242 out_err_no_mmu_notifier: 1243 hardware_disable_all(); 1244 out_err_no_disable: 1245 kvm_arch_destroy_vm(kvm); 1246 out_err_no_arch_destroy_vm: 1247 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); 1248 for (i = 0; i < KVM_NR_BUSES; i++) 1249 kfree(kvm_get_bus(kvm, i)); 1250 cleanup_srcu_struct(&kvm->irq_srcu); 1251 out_err_no_irq_srcu: 1252 cleanup_srcu_struct(&kvm->srcu); 1253 out_err_no_srcu: 1254 kvm_arch_free_vm(kvm); 1255 mmdrop(current->mm); 1256 module_put(kvm_chardev_ops.owner); 1257 return ERR_PTR(r); 1258 } 1259 1260 static void kvm_destroy_devices(struct kvm *kvm) 1261 { 1262 struct kvm_device *dev, *tmp; 1263 1264 /* 1265 * We do not need to take the kvm->lock here, because nobody else 1266 * has a reference to the struct kvm at this point and therefore 1267 * cannot access the devices list anyhow. 1268 */ 1269 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { 1270 list_del(&dev->vm_node); 1271 dev->ops->destroy(dev); 1272 } 1273 } 1274 1275 static void kvm_destroy_vm(struct kvm *kvm) 1276 { 1277 int i; 1278 struct mm_struct *mm = kvm->mm; 1279 1280 kvm_destroy_pm_notifier(kvm); 1281 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm); 1282 kvm_destroy_vm_debugfs(kvm); 1283 kvm_arch_sync_events(kvm); 1284 mutex_lock(&kvm_lock); 1285 list_del(&kvm->vm_list); 1286 mutex_unlock(&kvm_lock); 1287 kvm_arch_pre_destroy_vm(kvm); 1288 1289 kvm_free_irq_routing(kvm); 1290 for (i = 0; i < KVM_NR_BUSES; i++) { 1291 struct kvm_io_bus *bus = kvm_get_bus(kvm, i); 1292 1293 if (bus) 1294 kvm_io_bus_destroy(bus); 1295 kvm->buses[i] = NULL; 1296 } 1297 kvm_coalesced_mmio_free(kvm); 1298 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 1299 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 1300 /* 1301 * At this point, pending calls to invalidate_range_start() 1302 * have completed but no more MMU notifiers will run, so 1303 * mn_active_invalidate_count may remain unbalanced. 1304 * No threads can be waiting in kvm_swap_active_memslots() as the 1305 * last reference on KVM has been dropped, but freeing 1306 * memslots would deadlock without this manual intervention. 1307 */ 1308 WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait)); 1309 kvm->mn_active_invalidate_count = 0; 1310 #else 1311 kvm_flush_shadow_all(kvm); 1312 #endif 1313 kvm_arch_destroy_vm(kvm); 1314 kvm_destroy_devices(kvm); 1315 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 1316 kvm_free_memslots(kvm, &kvm->__memslots[i][0]); 1317 kvm_free_memslots(kvm, &kvm->__memslots[i][1]); 1318 } 1319 cleanup_srcu_struct(&kvm->irq_srcu); 1320 cleanup_srcu_struct(&kvm->srcu); 1321 kvm_arch_free_vm(kvm); 1322 preempt_notifier_dec(); 1323 hardware_disable_all(); 1324 mmdrop(mm); 1325 module_put(kvm_chardev_ops.owner); 1326 } 1327 1328 void kvm_get_kvm(struct kvm *kvm) 1329 { 1330 refcount_inc(&kvm->users_count); 1331 } 1332 EXPORT_SYMBOL_GPL(kvm_get_kvm); 1333 1334 /* 1335 * Make sure the vm is not during destruction, which is a safe version of 1336 * kvm_get_kvm(). Return true if kvm referenced successfully, false otherwise. 1337 */ 1338 bool kvm_get_kvm_safe(struct kvm *kvm) 1339 { 1340 return refcount_inc_not_zero(&kvm->users_count); 1341 } 1342 EXPORT_SYMBOL_GPL(kvm_get_kvm_safe); 1343 1344 void kvm_put_kvm(struct kvm *kvm) 1345 { 1346 if (refcount_dec_and_test(&kvm->users_count)) 1347 kvm_destroy_vm(kvm); 1348 } 1349 EXPORT_SYMBOL_GPL(kvm_put_kvm); 1350 1351 /* 1352 * Used to put a reference that was taken on behalf of an object associated 1353 * with a user-visible file descriptor, e.g. a vcpu or device, if installation 1354 * of the new file descriptor fails and the reference cannot be transferred to 1355 * its final owner. In such cases, the caller is still actively using @kvm and 1356 * will fail miserably if the refcount unexpectedly hits zero. 1357 */ 1358 void kvm_put_kvm_no_destroy(struct kvm *kvm) 1359 { 1360 WARN_ON(refcount_dec_and_test(&kvm->users_count)); 1361 } 1362 EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy); 1363 1364 static int kvm_vm_release(struct inode *inode, struct file *filp) 1365 { 1366 struct kvm *kvm = filp->private_data; 1367 1368 kvm_irqfd_release(kvm); 1369 1370 kvm_put_kvm(kvm); 1371 return 0; 1372 } 1373 1374 /* 1375 * Allocation size is twice as large as the actual dirty bitmap size. 1376 * See kvm_vm_ioctl_get_dirty_log() why this is needed. 1377 */ 1378 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) 1379 { 1380 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot); 1381 1382 memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT); 1383 if (!memslot->dirty_bitmap) 1384 return -ENOMEM; 1385 1386 return 0; 1387 } 1388 1389 static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id) 1390 { 1391 struct kvm_memslots *active = __kvm_memslots(kvm, as_id); 1392 int node_idx_inactive = active->node_idx ^ 1; 1393 1394 return &kvm->__memslots[as_id][node_idx_inactive]; 1395 } 1396 1397 /* 1398 * Helper to get the address space ID when one of memslot pointers may be NULL. 1399 * This also serves as a sanity that at least one of the pointers is non-NULL, 1400 * and that their address space IDs don't diverge. 1401 */ 1402 static int kvm_memslots_get_as_id(struct kvm_memory_slot *a, 1403 struct kvm_memory_slot *b) 1404 { 1405 if (WARN_ON_ONCE(!a && !b)) 1406 return 0; 1407 1408 if (!a) 1409 return b->as_id; 1410 if (!b) 1411 return a->as_id; 1412 1413 WARN_ON_ONCE(a->as_id != b->as_id); 1414 return a->as_id; 1415 } 1416 1417 static void kvm_insert_gfn_node(struct kvm_memslots *slots, 1418 struct kvm_memory_slot *slot) 1419 { 1420 struct rb_root *gfn_tree = &slots->gfn_tree; 1421 struct rb_node **node, *parent; 1422 int idx = slots->node_idx; 1423 1424 parent = NULL; 1425 for (node = &gfn_tree->rb_node; *node; ) { 1426 struct kvm_memory_slot *tmp; 1427 1428 tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]); 1429 parent = *node; 1430 if (slot->base_gfn < tmp->base_gfn) 1431 node = &(*node)->rb_left; 1432 else if (slot->base_gfn > tmp->base_gfn) 1433 node = &(*node)->rb_right; 1434 else 1435 BUG(); 1436 } 1437 1438 rb_link_node(&slot->gfn_node[idx], parent, node); 1439 rb_insert_color(&slot->gfn_node[idx], gfn_tree); 1440 } 1441 1442 static void kvm_erase_gfn_node(struct kvm_memslots *slots, 1443 struct kvm_memory_slot *slot) 1444 { 1445 rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree); 1446 } 1447 1448 static void kvm_replace_gfn_node(struct kvm_memslots *slots, 1449 struct kvm_memory_slot *old, 1450 struct kvm_memory_slot *new) 1451 { 1452 int idx = slots->node_idx; 1453 1454 WARN_ON_ONCE(old->base_gfn != new->base_gfn); 1455 1456 rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx], 1457 &slots->gfn_tree); 1458 } 1459 1460 /* 1461 * Replace @old with @new in the inactive memslots. 1462 * 1463 * With NULL @old this simply adds @new. 1464 * With NULL @new this simply removes @old. 1465 * 1466 * If @new is non-NULL its hva_node[slots_idx] range has to be set 1467 * appropriately. 1468 */ 1469 static void kvm_replace_memslot(struct kvm *kvm, 1470 struct kvm_memory_slot *old, 1471 struct kvm_memory_slot *new) 1472 { 1473 int as_id = kvm_memslots_get_as_id(old, new); 1474 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); 1475 int idx = slots->node_idx; 1476 1477 if (old) { 1478 hash_del(&old->id_node[idx]); 1479 interval_tree_remove(&old->hva_node[idx], &slots->hva_tree); 1480 1481 if ((long)old == atomic_long_read(&slots->last_used_slot)) 1482 atomic_long_set(&slots->last_used_slot, (long)new); 1483 1484 if (!new) { 1485 kvm_erase_gfn_node(slots, old); 1486 return; 1487 } 1488 } 1489 1490 /* 1491 * Initialize @new's hva range. Do this even when replacing an @old 1492 * slot, kvm_copy_memslot() deliberately does not touch node data. 1493 */ 1494 new->hva_node[idx].start = new->userspace_addr; 1495 new->hva_node[idx].last = new->userspace_addr + 1496 (new->npages << PAGE_SHIFT) - 1; 1497 1498 /* 1499 * (Re)Add the new memslot. There is no O(1) interval_tree_replace(), 1500 * hva_node needs to be swapped with remove+insert even though hva can't 1501 * change when replacing an existing slot. 1502 */ 1503 hash_add(slots->id_hash, &new->id_node[idx], new->id); 1504 interval_tree_insert(&new->hva_node[idx], &slots->hva_tree); 1505 1506 /* 1507 * If the memslot gfn is unchanged, rb_replace_node() can be used to 1508 * switch the node in the gfn tree instead of removing the old and 1509 * inserting the new as two separate operations. Replacement is a 1510 * single O(1) operation versus two O(log(n)) operations for 1511 * remove+insert. 1512 */ 1513 if (old && old->base_gfn == new->base_gfn) { 1514 kvm_replace_gfn_node(slots, old, new); 1515 } else { 1516 if (old) 1517 kvm_erase_gfn_node(slots, old); 1518 kvm_insert_gfn_node(slots, new); 1519 } 1520 } 1521 1522 static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem) 1523 { 1524 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; 1525 1526 #ifdef __KVM_HAVE_READONLY_MEM 1527 valid_flags |= KVM_MEM_READONLY; 1528 #endif 1529 1530 if (mem->flags & ~valid_flags) 1531 return -EINVAL; 1532 1533 return 0; 1534 } 1535 1536 static void kvm_swap_active_memslots(struct kvm *kvm, int as_id) 1537 { 1538 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); 1539 1540 /* Grab the generation from the activate memslots. */ 1541 u64 gen = __kvm_memslots(kvm, as_id)->generation; 1542 1543 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); 1544 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; 1545 1546 /* 1547 * Do not store the new memslots while there are invalidations in 1548 * progress, otherwise the locking in invalidate_range_start and 1549 * invalidate_range_end will be unbalanced. 1550 */ 1551 spin_lock(&kvm->mn_invalidate_lock); 1552 prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait); 1553 while (kvm->mn_active_invalidate_count) { 1554 set_current_state(TASK_UNINTERRUPTIBLE); 1555 spin_unlock(&kvm->mn_invalidate_lock); 1556 schedule(); 1557 spin_lock(&kvm->mn_invalidate_lock); 1558 } 1559 finish_rcuwait(&kvm->mn_memslots_update_rcuwait); 1560 rcu_assign_pointer(kvm->memslots[as_id], slots); 1561 spin_unlock(&kvm->mn_invalidate_lock); 1562 1563 /* 1564 * Acquired in kvm_set_memslot. Must be released before synchronize 1565 * SRCU below in order to avoid deadlock with another thread 1566 * acquiring the slots_arch_lock in an srcu critical section. 1567 */ 1568 mutex_unlock(&kvm->slots_arch_lock); 1569 1570 synchronize_srcu_expedited(&kvm->srcu); 1571 1572 /* 1573 * Increment the new memslot generation a second time, dropping the 1574 * update in-progress flag and incrementing the generation based on 1575 * the number of address spaces. This provides a unique and easily 1576 * identifiable generation number while the memslots are in flux. 1577 */ 1578 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; 1579 1580 /* 1581 * Generations must be unique even across address spaces. We do not need 1582 * a global counter for that, instead the generation space is evenly split 1583 * across address spaces. For example, with two address spaces, address 1584 * space 0 will use generations 0, 2, 4, ... while address space 1 will 1585 * use generations 1, 3, 5, ... 1586 */ 1587 gen += KVM_ADDRESS_SPACE_NUM; 1588 1589 kvm_arch_memslots_updated(kvm, gen); 1590 1591 slots->generation = gen; 1592 } 1593 1594 static int kvm_prepare_memory_region(struct kvm *kvm, 1595 const struct kvm_memory_slot *old, 1596 struct kvm_memory_slot *new, 1597 enum kvm_mr_change change) 1598 { 1599 int r; 1600 1601 /* 1602 * If dirty logging is disabled, nullify the bitmap; the old bitmap 1603 * will be freed on "commit". If logging is enabled in both old and 1604 * new, reuse the existing bitmap. If logging is enabled only in the 1605 * new and KVM isn't using a ring buffer, allocate and initialize a 1606 * new bitmap. 1607 */ 1608 if (change != KVM_MR_DELETE) { 1609 if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) 1610 new->dirty_bitmap = NULL; 1611 else if (old && old->dirty_bitmap) 1612 new->dirty_bitmap = old->dirty_bitmap; 1613 else if (kvm_use_dirty_bitmap(kvm)) { 1614 r = kvm_alloc_dirty_bitmap(new); 1615 if (r) 1616 return r; 1617 1618 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) 1619 bitmap_set(new->dirty_bitmap, 0, new->npages); 1620 } 1621 } 1622 1623 r = kvm_arch_prepare_memory_region(kvm, old, new, change); 1624 1625 /* Free the bitmap on failure if it was allocated above. */ 1626 if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap)) 1627 kvm_destroy_dirty_bitmap(new); 1628 1629 return r; 1630 } 1631 1632 static void kvm_commit_memory_region(struct kvm *kvm, 1633 struct kvm_memory_slot *old, 1634 const struct kvm_memory_slot *new, 1635 enum kvm_mr_change change) 1636 { 1637 int old_flags = old ? old->flags : 0; 1638 int new_flags = new ? new->flags : 0; 1639 /* 1640 * Update the total number of memslot pages before calling the arch 1641 * hook so that architectures can consume the result directly. 1642 */ 1643 if (change == KVM_MR_DELETE) 1644 kvm->nr_memslot_pages -= old->npages; 1645 else if (change == KVM_MR_CREATE) 1646 kvm->nr_memslot_pages += new->npages; 1647 1648 if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) { 1649 int change = (new_flags & KVM_MEM_LOG_DIRTY_PAGES) ? 1 : -1; 1650 atomic_set(&kvm->nr_memslots_dirty_logging, 1651 atomic_read(&kvm->nr_memslots_dirty_logging) + change); 1652 } 1653 1654 kvm_arch_commit_memory_region(kvm, old, new, change); 1655 1656 switch (change) { 1657 case KVM_MR_CREATE: 1658 /* Nothing more to do. */ 1659 break; 1660 case KVM_MR_DELETE: 1661 /* Free the old memslot and all its metadata. */ 1662 kvm_free_memslot(kvm, old); 1663 break; 1664 case KVM_MR_MOVE: 1665 case KVM_MR_FLAGS_ONLY: 1666 /* 1667 * Free the dirty bitmap as needed; the below check encompasses 1668 * both the flags and whether a ring buffer is being used) 1669 */ 1670 if (old->dirty_bitmap && !new->dirty_bitmap) 1671 kvm_destroy_dirty_bitmap(old); 1672 1673 /* 1674 * The final quirk. Free the detached, old slot, but only its 1675 * memory, not any metadata. Metadata, including arch specific 1676 * data, may be reused by @new. 1677 */ 1678 kfree(old); 1679 break; 1680 default: 1681 BUG(); 1682 } 1683 } 1684 1685 /* 1686 * Activate @new, which must be installed in the inactive slots by the caller, 1687 * by swapping the active slots and then propagating @new to @old once @old is 1688 * unreachable and can be safely modified. 1689 * 1690 * With NULL @old this simply adds @new to @active (while swapping the sets). 1691 * With NULL @new this simply removes @old from @active and frees it 1692 * (while also swapping the sets). 1693 */ 1694 static void kvm_activate_memslot(struct kvm *kvm, 1695 struct kvm_memory_slot *old, 1696 struct kvm_memory_slot *new) 1697 { 1698 int as_id = kvm_memslots_get_as_id(old, new); 1699 1700 kvm_swap_active_memslots(kvm, as_id); 1701 1702 /* Propagate the new memslot to the now inactive memslots. */ 1703 kvm_replace_memslot(kvm, old, new); 1704 } 1705 1706 static void kvm_copy_memslot(struct kvm_memory_slot *dest, 1707 const struct kvm_memory_slot *src) 1708 { 1709 dest->base_gfn = src->base_gfn; 1710 dest->npages = src->npages; 1711 dest->dirty_bitmap = src->dirty_bitmap; 1712 dest->arch = src->arch; 1713 dest->userspace_addr = src->userspace_addr; 1714 dest->flags = src->flags; 1715 dest->id = src->id; 1716 dest->as_id = src->as_id; 1717 } 1718 1719 static void kvm_invalidate_memslot(struct kvm *kvm, 1720 struct kvm_memory_slot *old, 1721 struct kvm_memory_slot *invalid_slot) 1722 { 1723 /* 1724 * Mark the current slot INVALID. As with all memslot modifications, 1725 * this must be done on an unreachable slot to avoid modifying the 1726 * current slot in the active tree. 1727 */ 1728 kvm_copy_memslot(invalid_slot, old); 1729 invalid_slot->flags |= KVM_MEMSLOT_INVALID; 1730 kvm_replace_memslot(kvm, old, invalid_slot); 1731 1732 /* 1733 * Activate the slot that is now marked INVALID, but don't propagate 1734 * the slot to the now inactive slots. The slot is either going to be 1735 * deleted or recreated as a new slot. 1736 */ 1737 kvm_swap_active_memslots(kvm, old->as_id); 1738 1739 /* 1740 * From this point no new shadow pages pointing to a deleted, or moved, 1741 * memslot will be created. Validation of sp->gfn happens in: 1742 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 1743 * - kvm_is_visible_gfn (mmu_check_root) 1744 */ 1745 kvm_arch_flush_shadow_memslot(kvm, old); 1746 kvm_arch_guest_memory_reclaimed(kvm); 1747 1748 /* Was released by kvm_swap_active_memslots(), reacquire. */ 1749 mutex_lock(&kvm->slots_arch_lock); 1750 1751 /* 1752 * Copy the arch-specific field of the newly-installed slot back to the 1753 * old slot as the arch data could have changed between releasing 1754 * slots_arch_lock in kvm_swap_active_memslots() and re-acquiring the lock 1755 * above. Writers are required to retrieve memslots *after* acquiring 1756 * slots_arch_lock, thus the active slot's data is guaranteed to be fresh. 1757 */ 1758 old->arch = invalid_slot->arch; 1759 } 1760 1761 static void kvm_create_memslot(struct kvm *kvm, 1762 struct kvm_memory_slot *new) 1763 { 1764 /* Add the new memslot to the inactive set and activate. */ 1765 kvm_replace_memslot(kvm, NULL, new); 1766 kvm_activate_memslot(kvm, NULL, new); 1767 } 1768 1769 static void kvm_delete_memslot(struct kvm *kvm, 1770 struct kvm_memory_slot *old, 1771 struct kvm_memory_slot *invalid_slot) 1772 { 1773 /* 1774 * Remove the old memslot (in the inactive memslots) by passing NULL as 1775 * the "new" slot, and for the invalid version in the active slots. 1776 */ 1777 kvm_replace_memslot(kvm, old, NULL); 1778 kvm_activate_memslot(kvm, invalid_slot, NULL); 1779 } 1780 1781 static void kvm_move_memslot(struct kvm *kvm, 1782 struct kvm_memory_slot *old, 1783 struct kvm_memory_slot *new, 1784 struct kvm_memory_slot *invalid_slot) 1785 { 1786 /* 1787 * Replace the old memslot in the inactive slots, and then swap slots 1788 * and replace the current INVALID with the new as well. 1789 */ 1790 kvm_replace_memslot(kvm, old, new); 1791 kvm_activate_memslot(kvm, invalid_slot, new); 1792 } 1793 1794 static void kvm_update_flags_memslot(struct kvm *kvm, 1795 struct kvm_memory_slot *old, 1796 struct kvm_memory_slot *new) 1797 { 1798 /* 1799 * Similar to the MOVE case, but the slot doesn't need to be zapped as 1800 * an intermediate step. Instead, the old memslot is simply replaced 1801 * with a new, updated copy in both memslot sets. 1802 */ 1803 kvm_replace_memslot(kvm, old, new); 1804 kvm_activate_memslot(kvm, old, new); 1805 } 1806 1807 static int kvm_set_memslot(struct kvm *kvm, 1808 struct kvm_memory_slot *old, 1809 struct kvm_memory_slot *new, 1810 enum kvm_mr_change change) 1811 { 1812 struct kvm_memory_slot *invalid_slot; 1813 int r; 1814 1815 /* 1816 * Released in kvm_swap_active_memslots(). 1817 * 1818 * Must be held from before the current memslots are copied until after 1819 * the new memslots are installed with rcu_assign_pointer, then 1820 * released before the synchronize srcu in kvm_swap_active_memslots(). 1821 * 1822 * When modifying memslots outside of the slots_lock, must be held 1823 * before reading the pointer to the current memslots until after all 1824 * changes to those memslots are complete. 1825 * 1826 * These rules ensure that installing new memslots does not lose 1827 * changes made to the previous memslots. 1828 */ 1829 mutex_lock(&kvm->slots_arch_lock); 1830 1831 /* 1832 * Invalidate the old slot if it's being deleted or moved. This is 1833 * done prior to actually deleting/moving the memslot to allow vCPUs to 1834 * continue running by ensuring there are no mappings or shadow pages 1835 * for the memslot when it is deleted/moved. Without pre-invalidation 1836 * (and without a lock), a window would exist between effecting the 1837 * delete/move and committing the changes in arch code where KVM or a 1838 * guest could access a non-existent memslot. 1839 * 1840 * Modifications are done on a temporary, unreachable slot. The old 1841 * slot needs to be preserved in case a later step fails and the 1842 * invalidation needs to be reverted. 1843 */ 1844 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { 1845 invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT); 1846 if (!invalid_slot) { 1847 mutex_unlock(&kvm->slots_arch_lock); 1848 return -ENOMEM; 1849 } 1850 kvm_invalidate_memslot(kvm, old, invalid_slot); 1851 } 1852 1853 r = kvm_prepare_memory_region(kvm, old, new, change); 1854 if (r) { 1855 /* 1856 * For DELETE/MOVE, revert the above INVALID change. No 1857 * modifications required since the original slot was preserved 1858 * in the inactive slots. Changing the active memslots also 1859 * release slots_arch_lock. 1860 */ 1861 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { 1862 kvm_activate_memslot(kvm, invalid_slot, old); 1863 kfree(invalid_slot); 1864 } else { 1865 mutex_unlock(&kvm->slots_arch_lock); 1866 } 1867 return r; 1868 } 1869 1870 /* 1871 * For DELETE and MOVE, the working slot is now active as the INVALID 1872 * version of the old slot. MOVE is particularly special as it reuses 1873 * the old slot and returns a copy of the old slot (in working_slot). 1874 * For CREATE, there is no old slot. For DELETE and FLAGS_ONLY, the 1875 * old slot is detached but otherwise preserved. 1876 */ 1877 if (change == KVM_MR_CREATE) 1878 kvm_create_memslot(kvm, new); 1879 else if (change == KVM_MR_DELETE) 1880 kvm_delete_memslot(kvm, old, invalid_slot); 1881 else if (change == KVM_MR_MOVE) 1882 kvm_move_memslot(kvm, old, new, invalid_slot); 1883 else if (change == KVM_MR_FLAGS_ONLY) 1884 kvm_update_flags_memslot(kvm, old, new); 1885 else 1886 BUG(); 1887 1888 /* Free the temporary INVALID slot used for DELETE and MOVE. */ 1889 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) 1890 kfree(invalid_slot); 1891 1892 /* 1893 * No need to refresh new->arch, changes after dropping slots_arch_lock 1894 * will directly hit the final, active memslot. Architectures are 1895 * responsible for knowing that new->arch may be stale. 1896 */ 1897 kvm_commit_memory_region(kvm, old, new, change); 1898 1899 return 0; 1900 } 1901 1902 static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id, 1903 gfn_t start, gfn_t end) 1904 { 1905 struct kvm_memslot_iter iter; 1906 1907 kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) { 1908 if (iter.slot->id != id) 1909 return true; 1910 } 1911 1912 return false; 1913 } 1914 1915 /* 1916 * Allocate some memory and give it an address in the guest physical address 1917 * space. 1918 * 1919 * Discontiguous memory is allowed, mostly for framebuffers. 1920 * 1921 * Must be called holding kvm->slots_lock for write. 1922 */ 1923 int __kvm_set_memory_region(struct kvm *kvm, 1924 const struct kvm_userspace_memory_region *mem) 1925 { 1926 struct kvm_memory_slot *old, *new; 1927 struct kvm_memslots *slots; 1928 enum kvm_mr_change change; 1929 unsigned long npages; 1930 gfn_t base_gfn; 1931 int as_id, id; 1932 int r; 1933 1934 r = check_memory_region_flags(mem); 1935 if (r) 1936 return r; 1937 1938 as_id = mem->slot >> 16; 1939 id = (u16)mem->slot; 1940 1941 /* General sanity checks */ 1942 if ((mem->memory_size & (PAGE_SIZE - 1)) || 1943 (mem->memory_size != (unsigned long)mem->memory_size)) 1944 return -EINVAL; 1945 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 1946 return -EINVAL; 1947 /* We can read the guest memory with __xxx_user() later on. */ 1948 if ((mem->userspace_addr & (PAGE_SIZE - 1)) || 1949 (mem->userspace_addr != untagged_addr(mem->userspace_addr)) || 1950 !access_ok((void __user *)(unsigned long)mem->userspace_addr, 1951 mem->memory_size)) 1952 return -EINVAL; 1953 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) 1954 return -EINVAL; 1955 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 1956 return -EINVAL; 1957 if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES) 1958 return -EINVAL; 1959 1960 slots = __kvm_memslots(kvm, as_id); 1961 1962 /* 1963 * Note, the old memslot (and the pointer itself!) may be invalidated 1964 * and/or destroyed by kvm_set_memslot(). 1965 */ 1966 old = id_to_memslot(slots, id); 1967 1968 if (!mem->memory_size) { 1969 if (!old || !old->npages) 1970 return -EINVAL; 1971 1972 if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages)) 1973 return -EIO; 1974 1975 return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE); 1976 } 1977 1978 base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT); 1979 npages = (mem->memory_size >> PAGE_SHIFT); 1980 1981 if (!old || !old->npages) { 1982 change = KVM_MR_CREATE; 1983 1984 /* 1985 * To simplify KVM internals, the total number of pages across 1986 * all memslots must fit in an unsigned long. 1987 */ 1988 if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages) 1989 return -EINVAL; 1990 } else { /* Modify an existing slot. */ 1991 if ((mem->userspace_addr != old->userspace_addr) || 1992 (npages != old->npages) || 1993 ((mem->flags ^ old->flags) & KVM_MEM_READONLY)) 1994 return -EINVAL; 1995 1996 if (base_gfn != old->base_gfn) 1997 change = KVM_MR_MOVE; 1998 else if (mem->flags != old->flags) 1999 change = KVM_MR_FLAGS_ONLY; 2000 else /* Nothing to change. */ 2001 return 0; 2002 } 2003 2004 if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) && 2005 kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages)) 2006 return -EEXIST; 2007 2008 /* Allocate a slot that will persist in the memslot. */ 2009 new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT); 2010 if (!new) 2011 return -ENOMEM; 2012 2013 new->as_id = as_id; 2014 new->id = id; 2015 new->base_gfn = base_gfn; 2016 new->npages = npages; 2017 new->flags = mem->flags; 2018 new->userspace_addr = mem->userspace_addr; 2019 2020 r = kvm_set_memslot(kvm, old, new, change); 2021 if (r) 2022 kfree(new); 2023 return r; 2024 } 2025 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 2026 2027 int kvm_set_memory_region(struct kvm *kvm, 2028 const struct kvm_userspace_memory_region *mem) 2029 { 2030 int r; 2031 2032 mutex_lock(&kvm->slots_lock); 2033 r = __kvm_set_memory_region(kvm, mem); 2034 mutex_unlock(&kvm->slots_lock); 2035 return r; 2036 } 2037 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 2038 2039 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 2040 struct kvm_userspace_memory_region *mem) 2041 { 2042 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) 2043 return -EINVAL; 2044 2045 return kvm_set_memory_region(kvm, mem); 2046 } 2047 2048 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 2049 /** 2050 * kvm_get_dirty_log - get a snapshot of dirty pages 2051 * @kvm: pointer to kvm instance 2052 * @log: slot id and address to which we copy the log 2053 * @is_dirty: set to '1' if any dirty pages were found 2054 * @memslot: set to the associated memslot, always valid on success 2055 */ 2056 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, 2057 int *is_dirty, struct kvm_memory_slot **memslot) 2058 { 2059 struct kvm_memslots *slots; 2060 int i, as_id, id; 2061 unsigned long n; 2062 unsigned long any = 0; 2063 2064 /* Dirty ring tracking may be exclusive to dirty log tracking */ 2065 if (!kvm_use_dirty_bitmap(kvm)) 2066 return -ENXIO; 2067 2068 *memslot = NULL; 2069 *is_dirty = 0; 2070 2071 as_id = log->slot >> 16; 2072 id = (u16)log->slot; 2073 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 2074 return -EINVAL; 2075 2076 slots = __kvm_memslots(kvm, as_id); 2077 *memslot = id_to_memslot(slots, id); 2078 if (!(*memslot) || !(*memslot)->dirty_bitmap) 2079 return -ENOENT; 2080 2081 kvm_arch_sync_dirty_log(kvm, *memslot); 2082 2083 n = kvm_dirty_bitmap_bytes(*memslot); 2084 2085 for (i = 0; !any && i < n/sizeof(long); ++i) 2086 any = (*memslot)->dirty_bitmap[i]; 2087 2088 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n)) 2089 return -EFAULT; 2090 2091 if (any) 2092 *is_dirty = 1; 2093 return 0; 2094 } 2095 EXPORT_SYMBOL_GPL(kvm_get_dirty_log); 2096 2097 #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 2098 /** 2099 * kvm_get_dirty_log_protect - get a snapshot of dirty pages 2100 * and reenable dirty page tracking for the corresponding pages. 2101 * @kvm: pointer to kvm instance 2102 * @log: slot id and address to which we copy the log 2103 * 2104 * We need to keep it in mind that VCPU threads can write to the bitmap 2105 * concurrently. So, to avoid losing track of dirty pages we keep the 2106 * following order: 2107 * 2108 * 1. Take a snapshot of the bit and clear it if needed. 2109 * 2. Write protect the corresponding page. 2110 * 3. Copy the snapshot to the userspace. 2111 * 4. Upon return caller flushes TLB's if needed. 2112 * 2113 * Between 2 and 4, the guest may write to the page using the remaining TLB 2114 * entry. This is not a problem because the page is reported dirty using 2115 * the snapshot taken before and step 4 ensures that writes done after 2116 * exiting to userspace will be logged for the next call. 2117 * 2118 */ 2119 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) 2120 { 2121 struct kvm_memslots *slots; 2122 struct kvm_memory_slot *memslot; 2123 int i, as_id, id; 2124 unsigned long n; 2125 unsigned long *dirty_bitmap; 2126 unsigned long *dirty_bitmap_buffer; 2127 bool flush; 2128 2129 /* Dirty ring tracking may be exclusive to dirty log tracking */ 2130 if (!kvm_use_dirty_bitmap(kvm)) 2131 return -ENXIO; 2132 2133 as_id = log->slot >> 16; 2134 id = (u16)log->slot; 2135 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 2136 return -EINVAL; 2137 2138 slots = __kvm_memslots(kvm, as_id); 2139 memslot = id_to_memslot(slots, id); 2140 if (!memslot || !memslot->dirty_bitmap) 2141 return -ENOENT; 2142 2143 dirty_bitmap = memslot->dirty_bitmap; 2144 2145 kvm_arch_sync_dirty_log(kvm, memslot); 2146 2147 n = kvm_dirty_bitmap_bytes(memslot); 2148 flush = false; 2149 if (kvm->manual_dirty_log_protect) { 2150 /* 2151 * Unlike kvm_get_dirty_log, we always return false in *flush, 2152 * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There 2153 * is some code duplication between this function and 2154 * kvm_get_dirty_log, but hopefully all architecture 2155 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log 2156 * can be eliminated. 2157 */ 2158 dirty_bitmap_buffer = dirty_bitmap; 2159 } else { 2160 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 2161 memset(dirty_bitmap_buffer, 0, n); 2162 2163 KVM_MMU_LOCK(kvm); 2164 for (i = 0; i < n / sizeof(long); i++) { 2165 unsigned long mask; 2166 gfn_t offset; 2167 2168 if (!dirty_bitmap[i]) 2169 continue; 2170 2171 flush = true; 2172 mask = xchg(&dirty_bitmap[i], 0); 2173 dirty_bitmap_buffer[i] = mask; 2174 2175 offset = i * BITS_PER_LONG; 2176 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 2177 offset, mask); 2178 } 2179 KVM_MMU_UNLOCK(kvm); 2180 } 2181 2182 if (flush) 2183 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); 2184 2185 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) 2186 return -EFAULT; 2187 return 0; 2188 } 2189 2190 2191 /** 2192 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot 2193 * @kvm: kvm instance 2194 * @log: slot id and address to which we copy the log 2195 * 2196 * Steps 1-4 below provide general overview of dirty page logging. See 2197 * kvm_get_dirty_log_protect() function description for additional details. 2198 * 2199 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we 2200 * always flush the TLB (step 4) even if previous step failed and the dirty 2201 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API 2202 * does not preclude user space subsequent dirty log read. Flushing TLB ensures 2203 * writes will be marked dirty for next log read. 2204 * 2205 * 1. Take a snapshot of the bit and clear it if needed. 2206 * 2. Write protect the corresponding page. 2207 * 3. Copy the snapshot to the userspace. 2208 * 4. Flush TLB's if needed. 2209 */ 2210 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 2211 struct kvm_dirty_log *log) 2212 { 2213 int r; 2214 2215 mutex_lock(&kvm->slots_lock); 2216 2217 r = kvm_get_dirty_log_protect(kvm, log); 2218 2219 mutex_unlock(&kvm->slots_lock); 2220 return r; 2221 } 2222 2223 /** 2224 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap 2225 * and reenable dirty page tracking for the corresponding pages. 2226 * @kvm: pointer to kvm instance 2227 * @log: slot id and address from which to fetch the bitmap of dirty pages 2228 */ 2229 static int kvm_clear_dirty_log_protect(struct kvm *kvm, 2230 struct kvm_clear_dirty_log *log) 2231 { 2232 struct kvm_memslots *slots; 2233 struct kvm_memory_slot *memslot; 2234 int as_id, id; 2235 gfn_t offset; 2236 unsigned long i, n; 2237 unsigned long *dirty_bitmap; 2238 unsigned long *dirty_bitmap_buffer; 2239 bool flush; 2240 2241 /* Dirty ring tracking may be exclusive to dirty log tracking */ 2242 if (!kvm_use_dirty_bitmap(kvm)) 2243 return -ENXIO; 2244 2245 as_id = log->slot >> 16; 2246 id = (u16)log->slot; 2247 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 2248 return -EINVAL; 2249 2250 if (log->first_page & 63) 2251 return -EINVAL; 2252 2253 slots = __kvm_memslots(kvm, as_id); 2254 memslot = id_to_memslot(slots, id); 2255 if (!memslot || !memslot->dirty_bitmap) 2256 return -ENOENT; 2257 2258 dirty_bitmap = memslot->dirty_bitmap; 2259 2260 n = ALIGN(log->num_pages, BITS_PER_LONG) / 8; 2261 2262 if (log->first_page > memslot->npages || 2263 log->num_pages > memslot->npages - log->first_page || 2264 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63))) 2265 return -EINVAL; 2266 2267 kvm_arch_sync_dirty_log(kvm, memslot); 2268 2269 flush = false; 2270 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 2271 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) 2272 return -EFAULT; 2273 2274 KVM_MMU_LOCK(kvm); 2275 for (offset = log->first_page, i = offset / BITS_PER_LONG, 2276 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--; 2277 i++, offset += BITS_PER_LONG) { 2278 unsigned long mask = *dirty_bitmap_buffer++; 2279 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i]; 2280 if (!mask) 2281 continue; 2282 2283 mask &= atomic_long_fetch_andnot(mask, p); 2284 2285 /* 2286 * mask contains the bits that really have been cleared. This 2287 * never includes any bits beyond the length of the memslot (if 2288 * the length is not aligned to 64 pages), therefore it is not 2289 * a problem if userspace sets them in log->dirty_bitmap. 2290 */ 2291 if (mask) { 2292 flush = true; 2293 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 2294 offset, mask); 2295 } 2296 } 2297 KVM_MMU_UNLOCK(kvm); 2298 2299 if (flush) 2300 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); 2301 2302 return 0; 2303 } 2304 2305 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, 2306 struct kvm_clear_dirty_log *log) 2307 { 2308 int r; 2309 2310 mutex_lock(&kvm->slots_lock); 2311 2312 r = kvm_clear_dirty_log_protect(kvm, log); 2313 2314 mutex_unlock(&kvm->slots_lock); 2315 return r; 2316 } 2317 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 2318 2319 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 2320 { 2321 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 2322 } 2323 EXPORT_SYMBOL_GPL(gfn_to_memslot); 2324 2325 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) 2326 { 2327 struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); 2328 u64 gen = slots->generation; 2329 struct kvm_memory_slot *slot; 2330 2331 /* 2332 * This also protects against using a memslot from a different address space, 2333 * since different address spaces have different generation numbers. 2334 */ 2335 if (unlikely(gen != vcpu->last_used_slot_gen)) { 2336 vcpu->last_used_slot = NULL; 2337 vcpu->last_used_slot_gen = gen; 2338 } 2339 2340 slot = try_get_memslot(vcpu->last_used_slot, gfn); 2341 if (slot) 2342 return slot; 2343 2344 /* 2345 * Fall back to searching all memslots. We purposely use 2346 * search_memslots() instead of __gfn_to_memslot() to avoid 2347 * thrashing the VM-wide last_used_slot in kvm_memslots. 2348 */ 2349 slot = search_memslots(slots, gfn, false); 2350 if (slot) { 2351 vcpu->last_used_slot = slot; 2352 return slot; 2353 } 2354 2355 return NULL; 2356 } 2357 2358 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 2359 { 2360 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); 2361 2362 return kvm_is_visible_memslot(memslot); 2363 } 2364 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 2365 2366 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 2367 { 2368 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2369 2370 return kvm_is_visible_memslot(memslot); 2371 } 2372 EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn); 2373 2374 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) 2375 { 2376 struct vm_area_struct *vma; 2377 unsigned long addr, size; 2378 2379 size = PAGE_SIZE; 2380 2381 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL); 2382 if (kvm_is_error_hva(addr)) 2383 return PAGE_SIZE; 2384 2385 mmap_read_lock(current->mm); 2386 vma = find_vma(current->mm, addr); 2387 if (!vma) 2388 goto out; 2389 2390 size = vma_kernel_pagesize(vma); 2391 2392 out: 2393 mmap_read_unlock(current->mm); 2394 2395 return size; 2396 } 2397 2398 static bool memslot_is_readonly(const struct kvm_memory_slot *slot) 2399 { 2400 return slot->flags & KVM_MEM_READONLY; 2401 } 2402 2403 static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn, 2404 gfn_t *nr_pages, bool write) 2405 { 2406 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 2407 return KVM_HVA_ERR_BAD; 2408 2409 if (memslot_is_readonly(slot) && write) 2410 return KVM_HVA_ERR_RO_BAD; 2411 2412 if (nr_pages) 2413 *nr_pages = slot->npages - (gfn - slot->base_gfn); 2414 2415 return __gfn_to_hva_memslot(slot, gfn); 2416 } 2417 2418 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 2419 gfn_t *nr_pages) 2420 { 2421 return __gfn_to_hva_many(slot, gfn, nr_pages, true); 2422 } 2423 2424 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 2425 gfn_t gfn) 2426 { 2427 return gfn_to_hva_many(slot, gfn, NULL); 2428 } 2429 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); 2430 2431 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 2432 { 2433 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); 2434 } 2435 EXPORT_SYMBOL_GPL(gfn_to_hva); 2436 2437 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) 2438 { 2439 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); 2440 } 2441 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); 2442 2443 /* 2444 * Return the hva of a @gfn and the R/W attribute if possible. 2445 * 2446 * @slot: the kvm_memory_slot which contains @gfn 2447 * @gfn: the gfn to be translated 2448 * @writable: used to return the read/write attribute of the @slot if the hva 2449 * is valid and @writable is not NULL 2450 */ 2451 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, 2452 gfn_t gfn, bool *writable) 2453 { 2454 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); 2455 2456 if (!kvm_is_error_hva(hva) && writable) 2457 *writable = !memslot_is_readonly(slot); 2458 2459 return hva; 2460 } 2461 2462 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 2463 { 2464 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 2465 2466 return gfn_to_hva_memslot_prot(slot, gfn, writable); 2467 } 2468 2469 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) 2470 { 2471 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2472 2473 return gfn_to_hva_memslot_prot(slot, gfn, writable); 2474 } 2475 2476 static inline int check_user_page_hwpoison(unsigned long addr) 2477 { 2478 int rc, flags = FOLL_HWPOISON | FOLL_WRITE; 2479 2480 rc = get_user_pages(addr, 1, flags, NULL); 2481 return rc == -EHWPOISON; 2482 } 2483 2484 /* 2485 * The fast path to get the writable pfn which will be stored in @pfn, 2486 * true indicates success, otherwise false is returned. It's also the 2487 * only part that runs if we can in atomic context. 2488 */ 2489 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, 2490 bool *writable, kvm_pfn_t *pfn) 2491 { 2492 struct page *page[1]; 2493 2494 /* 2495 * Fast pin a writable pfn only if it is a write fault request 2496 * or the caller allows to map a writable pfn for a read fault 2497 * request. 2498 */ 2499 if (!(write_fault || writable)) 2500 return false; 2501 2502 if (get_user_page_fast_only(addr, FOLL_WRITE, page)) { 2503 *pfn = page_to_pfn(page[0]); 2504 2505 if (writable) 2506 *writable = true; 2507 return true; 2508 } 2509 2510 return false; 2511 } 2512 2513 /* 2514 * The slow path to get the pfn of the specified host virtual address, 2515 * 1 indicates success, -errno is returned if error is detected. 2516 */ 2517 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, 2518 bool interruptible, bool *writable, kvm_pfn_t *pfn) 2519 { 2520 unsigned int flags = FOLL_HWPOISON; 2521 struct page *page; 2522 int npages; 2523 2524 might_sleep(); 2525 2526 if (writable) 2527 *writable = write_fault; 2528 2529 if (write_fault) 2530 flags |= FOLL_WRITE; 2531 if (async) 2532 flags |= FOLL_NOWAIT; 2533 if (interruptible) 2534 flags |= FOLL_INTERRUPTIBLE; 2535 2536 npages = get_user_pages_unlocked(addr, 1, &page, flags); 2537 if (npages != 1) 2538 return npages; 2539 2540 /* map read fault as writable if possible */ 2541 if (unlikely(!write_fault) && writable) { 2542 struct page *wpage; 2543 2544 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) { 2545 *writable = true; 2546 put_page(page); 2547 page = wpage; 2548 } 2549 } 2550 *pfn = page_to_pfn(page); 2551 return npages; 2552 } 2553 2554 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) 2555 { 2556 if (unlikely(!(vma->vm_flags & VM_READ))) 2557 return false; 2558 2559 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) 2560 return false; 2561 2562 return true; 2563 } 2564 2565 static int kvm_try_get_pfn(kvm_pfn_t pfn) 2566 { 2567 struct page *page = kvm_pfn_to_refcounted_page(pfn); 2568 2569 if (!page) 2570 return 1; 2571 2572 return get_page_unless_zero(page); 2573 } 2574 2575 static int hva_to_pfn_remapped(struct vm_area_struct *vma, 2576 unsigned long addr, bool write_fault, 2577 bool *writable, kvm_pfn_t *p_pfn) 2578 { 2579 kvm_pfn_t pfn; 2580 pte_t *ptep; 2581 pte_t pte; 2582 spinlock_t *ptl; 2583 int r; 2584 2585 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); 2586 if (r) { 2587 /* 2588 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does 2589 * not call the fault handler, so do it here. 2590 */ 2591 bool unlocked = false; 2592 r = fixup_user_fault(current->mm, addr, 2593 (write_fault ? FAULT_FLAG_WRITE : 0), 2594 &unlocked); 2595 if (unlocked) 2596 return -EAGAIN; 2597 if (r) 2598 return r; 2599 2600 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); 2601 if (r) 2602 return r; 2603 } 2604 2605 pte = ptep_get(ptep); 2606 2607 if (write_fault && !pte_write(pte)) { 2608 pfn = KVM_PFN_ERR_RO_FAULT; 2609 goto out; 2610 } 2611 2612 if (writable) 2613 *writable = pte_write(pte); 2614 pfn = pte_pfn(pte); 2615 2616 /* 2617 * Get a reference here because callers of *hva_to_pfn* and 2618 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the 2619 * returned pfn. This is only needed if the VMA has VM_MIXEDMAP 2620 * set, but the kvm_try_get_pfn/kvm_release_pfn_clean pair will 2621 * simply do nothing for reserved pfns. 2622 * 2623 * Whoever called remap_pfn_range is also going to call e.g. 2624 * unmap_mapping_range before the underlying pages are freed, 2625 * causing a call to our MMU notifier. 2626 * 2627 * Certain IO or PFNMAP mappings can be backed with valid 2628 * struct pages, but be allocated without refcounting e.g., 2629 * tail pages of non-compound higher order allocations, which 2630 * would then underflow the refcount when the caller does the 2631 * required put_page. Don't allow those pages here. 2632 */ 2633 if (!kvm_try_get_pfn(pfn)) 2634 r = -EFAULT; 2635 2636 out: 2637 pte_unmap_unlock(ptep, ptl); 2638 *p_pfn = pfn; 2639 2640 return r; 2641 } 2642 2643 /* 2644 * Pin guest page in memory and return its pfn. 2645 * @addr: host virtual address which maps memory to the guest 2646 * @atomic: whether this function can sleep 2647 * @interruptible: whether the process can be interrupted by non-fatal signals 2648 * @async: whether this function need to wait IO complete if the 2649 * host page is not in the memory 2650 * @write_fault: whether we should get a writable host page 2651 * @writable: whether it allows to map a writable host page for !@write_fault 2652 * 2653 * The function will map a writable host page for these two cases: 2654 * 1): @write_fault = true 2655 * 2): @write_fault = false && @writable, @writable will tell the caller 2656 * whether the mapping is writable. 2657 */ 2658 kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible, 2659 bool *async, bool write_fault, bool *writable) 2660 { 2661 struct vm_area_struct *vma; 2662 kvm_pfn_t pfn; 2663 int npages, r; 2664 2665 /* we can do it either atomically or asynchronously, not both */ 2666 BUG_ON(atomic && async); 2667 2668 if (hva_to_pfn_fast(addr, write_fault, writable, &pfn)) 2669 return pfn; 2670 2671 if (atomic) 2672 return KVM_PFN_ERR_FAULT; 2673 2674 npages = hva_to_pfn_slow(addr, async, write_fault, interruptible, 2675 writable, &pfn); 2676 if (npages == 1) 2677 return pfn; 2678 if (npages == -EINTR) 2679 return KVM_PFN_ERR_SIGPENDING; 2680 2681 mmap_read_lock(current->mm); 2682 if (npages == -EHWPOISON || 2683 (!async && check_user_page_hwpoison(addr))) { 2684 pfn = KVM_PFN_ERR_HWPOISON; 2685 goto exit; 2686 } 2687 2688 retry: 2689 vma = vma_lookup(current->mm, addr); 2690 2691 if (vma == NULL) 2692 pfn = KVM_PFN_ERR_FAULT; 2693 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { 2694 r = hva_to_pfn_remapped(vma, addr, write_fault, writable, &pfn); 2695 if (r == -EAGAIN) 2696 goto retry; 2697 if (r < 0) 2698 pfn = KVM_PFN_ERR_FAULT; 2699 } else { 2700 if (async && vma_is_valid(vma, write_fault)) 2701 *async = true; 2702 pfn = KVM_PFN_ERR_FAULT; 2703 } 2704 exit: 2705 mmap_read_unlock(current->mm); 2706 return pfn; 2707 } 2708 2709 kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, 2710 bool atomic, bool interruptible, bool *async, 2711 bool write_fault, bool *writable, hva_t *hva) 2712 { 2713 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); 2714 2715 if (hva) 2716 *hva = addr; 2717 2718 if (addr == KVM_HVA_ERR_RO_BAD) { 2719 if (writable) 2720 *writable = false; 2721 return KVM_PFN_ERR_RO_FAULT; 2722 } 2723 2724 if (kvm_is_error_hva(addr)) { 2725 if (writable) 2726 *writable = false; 2727 return KVM_PFN_NOSLOT; 2728 } 2729 2730 /* Do not map writable pfn in the readonly memslot. */ 2731 if (writable && memslot_is_readonly(slot)) { 2732 *writable = false; 2733 writable = NULL; 2734 } 2735 2736 return hva_to_pfn(addr, atomic, interruptible, async, write_fault, 2737 writable); 2738 } 2739 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); 2740 2741 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 2742 bool *writable) 2743 { 2744 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, false, 2745 NULL, write_fault, writable, NULL); 2746 } 2747 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 2748 2749 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) 2750 { 2751 return __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, true, 2752 NULL, NULL); 2753 } 2754 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); 2755 2756 kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn) 2757 { 2758 return __gfn_to_pfn_memslot(slot, gfn, true, false, NULL, true, 2759 NULL, NULL); 2760 } 2761 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); 2762 2763 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) 2764 { 2765 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 2766 } 2767 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic); 2768 2769 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 2770 { 2771 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); 2772 } 2773 EXPORT_SYMBOL_GPL(gfn_to_pfn); 2774 2775 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) 2776 { 2777 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 2778 } 2779 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn); 2780 2781 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 2782 struct page **pages, int nr_pages) 2783 { 2784 unsigned long addr; 2785 gfn_t entry = 0; 2786 2787 addr = gfn_to_hva_many(slot, gfn, &entry); 2788 if (kvm_is_error_hva(addr)) 2789 return -1; 2790 2791 if (entry < nr_pages) 2792 return 0; 2793 2794 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages); 2795 } 2796 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); 2797 2798 /* 2799 * Do not use this helper unless you are absolutely certain the gfn _must_ be 2800 * backed by 'struct page'. A valid example is if the backing memslot is 2801 * controlled by KVM. Note, if the returned page is valid, it's refcount has 2802 * been elevated by gfn_to_pfn(). 2803 */ 2804 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 2805 { 2806 struct page *page; 2807 kvm_pfn_t pfn; 2808 2809 pfn = gfn_to_pfn(kvm, gfn); 2810 2811 if (is_error_noslot_pfn(pfn)) 2812 return KVM_ERR_PTR_BAD_PAGE; 2813 2814 page = kvm_pfn_to_refcounted_page(pfn); 2815 if (!page) 2816 return KVM_ERR_PTR_BAD_PAGE; 2817 2818 return page; 2819 } 2820 EXPORT_SYMBOL_GPL(gfn_to_page); 2821 2822 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty) 2823 { 2824 if (dirty) 2825 kvm_release_pfn_dirty(pfn); 2826 else 2827 kvm_release_pfn_clean(pfn); 2828 } 2829 2830 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) 2831 { 2832 kvm_pfn_t pfn; 2833 void *hva = NULL; 2834 struct page *page = KVM_UNMAPPED_PAGE; 2835 2836 if (!map) 2837 return -EINVAL; 2838 2839 pfn = gfn_to_pfn(vcpu->kvm, gfn); 2840 if (is_error_noslot_pfn(pfn)) 2841 return -EINVAL; 2842 2843 if (pfn_valid(pfn)) { 2844 page = pfn_to_page(pfn); 2845 hva = kmap(page); 2846 #ifdef CONFIG_HAS_IOMEM 2847 } else { 2848 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); 2849 #endif 2850 } 2851 2852 if (!hva) 2853 return -EFAULT; 2854 2855 map->page = page; 2856 map->hva = hva; 2857 map->pfn = pfn; 2858 map->gfn = gfn; 2859 2860 return 0; 2861 } 2862 EXPORT_SYMBOL_GPL(kvm_vcpu_map); 2863 2864 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) 2865 { 2866 if (!map) 2867 return; 2868 2869 if (!map->hva) 2870 return; 2871 2872 if (map->page != KVM_UNMAPPED_PAGE) 2873 kunmap(map->page); 2874 #ifdef CONFIG_HAS_IOMEM 2875 else 2876 memunmap(map->hva); 2877 #endif 2878 2879 if (dirty) 2880 kvm_vcpu_mark_page_dirty(vcpu, map->gfn); 2881 2882 kvm_release_pfn(map->pfn, dirty); 2883 2884 map->hva = NULL; 2885 map->page = NULL; 2886 } 2887 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap); 2888 2889 static bool kvm_is_ad_tracked_page(struct page *page) 2890 { 2891 /* 2892 * Per page-flags.h, pages tagged PG_reserved "should in general not be 2893 * touched (e.g. set dirty) except by its owner". 2894 */ 2895 return !PageReserved(page); 2896 } 2897 2898 static void kvm_set_page_dirty(struct page *page) 2899 { 2900 if (kvm_is_ad_tracked_page(page)) 2901 SetPageDirty(page); 2902 } 2903 2904 static void kvm_set_page_accessed(struct page *page) 2905 { 2906 if (kvm_is_ad_tracked_page(page)) 2907 mark_page_accessed(page); 2908 } 2909 2910 void kvm_release_page_clean(struct page *page) 2911 { 2912 WARN_ON(is_error_page(page)); 2913 2914 kvm_set_page_accessed(page); 2915 put_page(page); 2916 } 2917 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 2918 2919 void kvm_release_pfn_clean(kvm_pfn_t pfn) 2920 { 2921 struct page *page; 2922 2923 if (is_error_noslot_pfn(pfn)) 2924 return; 2925 2926 page = kvm_pfn_to_refcounted_page(pfn); 2927 if (!page) 2928 return; 2929 2930 kvm_release_page_clean(page); 2931 } 2932 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 2933 2934 void kvm_release_page_dirty(struct page *page) 2935 { 2936 WARN_ON(is_error_page(page)); 2937 2938 kvm_set_page_dirty(page); 2939 kvm_release_page_clean(page); 2940 } 2941 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 2942 2943 void kvm_release_pfn_dirty(kvm_pfn_t pfn) 2944 { 2945 struct page *page; 2946 2947 if (is_error_noslot_pfn(pfn)) 2948 return; 2949 2950 page = kvm_pfn_to_refcounted_page(pfn); 2951 if (!page) 2952 return; 2953 2954 kvm_release_page_dirty(page); 2955 } 2956 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); 2957 2958 /* 2959 * Note, checking for an error/noslot pfn is the caller's responsibility when 2960 * directly marking a page dirty/accessed. Unlike the "release" helpers, the 2961 * "set" helpers are not to be used when the pfn might point at garbage. 2962 */ 2963 void kvm_set_pfn_dirty(kvm_pfn_t pfn) 2964 { 2965 if (WARN_ON(is_error_noslot_pfn(pfn))) 2966 return; 2967 2968 if (pfn_valid(pfn)) 2969 kvm_set_page_dirty(pfn_to_page(pfn)); 2970 } 2971 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 2972 2973 void kvm_set_pfn_accessed(kvm_pfn_t pfn) 2974 { 2975 if (WARN_ON(is_error_noslot_pfn(pfn))) 2976 return; 2977 2978 if (pfn_valid(pfn)) 2979 kvm_set_page_accessed(pfn_to_page(pfn)); 2980 } 2981 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 2982 2983 static int next_segment(unsigned long len, int offset) 2984 { 2985 if (len > PAGE_SIZE - offset) 2986 return PAGE_SIZE - offset; 2987 else 2988 return len; 2989 } 2990 2991 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, 2992 void *data, int offset, int len) 2993 { 2994 int r; 2995 unsigned long addr; 2996 2997 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 2998 if (kvm_is_error_hva(addr)) 2999 return -EFAULT; 3000 r = __copy_from_user(data, (void __user *)addr + offset, len); 3001 if (r) 3002 return -EFAULT; 3003 return 0; 3004 } 3005 3006 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 3007 int len) 3008 { 3009 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 3010 3011 return __kvm_read_guest_page(slot, gfn, data, offset, len); 3012 } 3013 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 3014 3015 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, 3016 int offset, int len) 3017 { 3018 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 3019 3020 return __kvm_read_guest_page(slot, gfn, data, offset, len); 3021 } 3022 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); 3023 3024 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 3025 { 3026 gfn_t gfn = gpa >> PAGE_SHIFT; 3027 int seg; 3028 int offset = offset_in_page(gpa); 3029 int ret; 3030 3031 while ((seg = next_segment(len, offset)) != 0) { 3032 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 3033 if (ret < 0) 3034 return ret; 3035 offset = 0; 3036 len -= seg; 3037 data += seg; 3038 ++gfn; 3039 } 3040 return 0; 3041 } 3042 EXPORT_SYMBOL_GPL(kvm_read_guest); 3043 3044 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) 3045 { 3046 gfn_t gfn = gpa >> PAGE_SHIFT; 3047 int seg; 3048 int offset = offset_in_page(gpa); 3049 int ret; 3050 3051 while ((seg = next_segment(len, offset)) != 0) { 3052 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); 3053 if (ret < 0) 3054 return ret; 3055 offset = 0; 3056 len -= seg; 3057 data += seg; 3058 ++gfn; 3059 } 3060 return 0; 3061 } 3062 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest); 3063 3064 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 3065 void *data, int offset, unsigned long len) 3066 { 3067 int r; 3068 unsigned long addr; 3069 3070 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 3071 if (kvm_is_error_hva(addr)) 3072 return -EFAULT; 3073 pagefault_disable(); 3074 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 3075 pagefault_enable(); 3076 if (r) 3077 return -EFAULT; 3078 return 0; 3079 } 3080 3081 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, 3082 void *data, unsigned long len) 3083 { 3084 gfn_t gfn = gpa >> PAGE_SHIFT; 3085 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 3086 int offset = offset_in_page(gpa); 3087 3088 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); 3089 } 3090 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic); 3091 3092 static int __kvm_write_guest_page(struct kvm *kvm, 3093 struct kvm_memory_slot *memslot, gfn_t gfn, 3094 const void *data, int offset, int len) 3095 { 3096 int r; 3097 unsigned long addr; 3098 3099 addr = gfn_to_hva_memslot(memslot, gfn); 3100 if (kvm_is_error_hva(addr)) 3101 return -EFAULT; 3102 r = __copy_to_user((void __user *)addr + offset, data, len); 3103 if (r) 3104 return -EFAULT; 3105 mark_page_dirty_in_slot(kvm, memslot, gfn); 3106 return 0; 3107 } 3108 3109 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, 3110 const void *data, int offset, int len) 3111 { 3112 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 3113 3114 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len); 3115 } 3116 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 3117 3118 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, 3119 const void *data, int offset, int len) 3120 { 3121 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 3122 3123 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len); 3124 } 3125 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page); 3126 3127 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 3128 unsigned long len) 3129 { 3130 gfn_t gfn = gpa >> PAGE_SHIFT; 3131 int seg; 3132 int offset = offset_in_page(gpa); 3133 int ret; 3134 3135 while ((seg = next_segment(len, offset)) != 0) { 3136 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 3137 if (ret < 0) 3138 return ret; 3139 offset = 0; 3140 len -= seg; 3141 data += seg; 3142 ++gfn; 3143 } 3144 return 0; 3145 } 3146 EXPORT_SYMBOL_GPL(kvm_write_guest); 3147 3148 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 3149 unsigned long len) 3150 { 3151 gfn_t gfn = gpa >> PAGE_SHIFT; 3152 int seg; 3153 int offset = offset_in_page(gpa); 3154 int ret; 3155 3156 while ((seg = next_segment(len, offset)) != 0) { 3157 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); 3158 if (ret < 0) 3159 return ret; 3160 offset = 0; 3161 len -= seg; 3162 data += seg; 3163 ++gfn; 3164 } 3165 return 0; 3166 } 3167 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest); 3168 3169 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, 3170 struct gfn_to_hva_cache *ghc, 3171 gpa_t gpa, unsigned long len) 3172 { 3173 int offset = offset_in_page(gpa); 3174 gfn_t start_gfn = gpa >> PAGE_SHIFT; 3175 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; 3176 gfn_t nr_pages_needed = end_gfn - start_gfn + 1; 3177 gfn_t nr_pages_avail; 3178 3179 /* Update ghc->generation before performing any error checks. */ 3180 ghc->generation = slots->generation; 3181 3182 if (start_gfn > end_gfn) { 3183 ghc->hva = KVM_HVA_ERR_BAD; 3184 return -EINVAL; 3185 } 3186 3187 /* 3188 * If the requested region crosses two memslots, we still 3189 * verify that the entire region is valid here. 3190 */ 3191 for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) { 3192 ghc->memslot = __gfn_to_memslot(slots, start_gfn); 3193 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, 3194 &nr_pages_avail); 3195 if (kvm_is_error_hva(ghc->hva)) 3196 return -EFAULT; 3197 } 3198 3199 /* Use the slow path for cross page reads and writes. */ 3200 if (nr_pages_needed == 1) 3201 ghc->hva += offset; 3202 else 3203 ghc->memslot = NULL; 3204 3205 ghc->gpa = gpa; 3206 ghc->len = len; 3207 return 0; 3208 } 3209 3210 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3211 gpa_t gpa, unsigned long len) 3212 { 3213 struct kvm_memslots *slots = kvm_memslots(kvm); 3214 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len); 3215 } 3216 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 3217 3218 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3219 void *data, unsigned int offset, 3220 unsigned long len) 3221 { 3222 struct kvm_memslots *slots = kvm_memslots(kvm); 3223 int r; 3224 gpa_t gpa = ghc->gpa + offset; 3225 3226 if (WARN_ON_ONCE(len + offset > ghc->len)) 3227 return -EINVAL; 3228 3229 if (slots->generation != ghc->generation) { 3230 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) 3231 return -EFAULT; 3232 } 3233 3234 if (kvm_is_error_hva(ghc->hva)) 3235 return -EFAULT; 3236 3237 if (unlikely(!ghc->memslot)) 3238 return kvm_write_guest(kvm, gpa, data, len); 3239 3240 r = __copy_to_user((void __user *)ghc->hva + offset, data, len); 3241 if (r) 3242 return -EFAULT; 3243 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT); 3244 3245 return 0; 3246 } 3247 EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached); 3248 3249 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3250 void *data, unsigned long len) 3251 { 3252 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); 3253 } 3254 EXPORT_SYMBOL_GPL(kvm_write_guest_cached); 3255 3256 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3257 void *data, unsigned int offset, 3258 unsigned long len) 3259 { 3260 struct kvm_memslots *slots = kvm_memslots(kvm); 3261 int r; 3262 gpa_t gpa = ghc->gpa + offset; 3263 3264 if (WARN_ON_ONCE(len + offset > ghc->len)) 3265 return -EINVAL; 3266 3267 if (slots->generation != ghc->generation) { 3268 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) 3269 return -EFAULT; 3270 } 3271 3272 if (kvm_is_error_hva(ghc->hva)) 3273 return -EFAULT; 3274 3275 if (unlikely(!ghc->memslot)) 3276 return kvm_read_guest(kvm, gpa, data, len); 3277 3278 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len); 3279 if (r) 3280 return -EFAULT; 3281 3282 return 0; 3283 } 3284 EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached); 3285 3286 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3287 void *data, unsigned long len) 3288 { 3289 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len); 3290 } 3291 EXPORT_SYMBOL_GPL(kvm_read_guest_cached); 3292 3293 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 3294 { 3295 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 3296 gfn_t gfn = gpa >> PAGE_SHIFT; 3297 int seg; 3298 int offset = offset_in_page(gpa); 3299 int ret; 3300 3301 while ((seg = next_segment(len, offset)) != 0) { 3302 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len); 3303 if (ret < 0) 3304 return ret; 3305 offset = 0; 3306 len -= seg; 3307 ++gfn; 3308 } 3309 return 0; 3310 } 3311 EXPORT_SYMBOL_GPL(kvm_clear_guest); 3312 3313 void mark_page_dirty_in_slot(struct kvm *kvm, 3314 const struct kvm_memory_slot *memslot, 3315 gfn_t gfn) 3316 { 3317 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); 3318 3319 #ifdef CONFIG_HAVE_KVM_DIRTY_RING 3320 if (WARN_ON_ONCE(vcpu && vcpu->kvm != kvm)) 3321 return; 3322 3323 WARN_ON_ONCE(!vcpu && !kvm_arch_allow_write_without_running_vcpu(kvm)); 3324 #endif 3325 3326 if (memslot && kvm_slot_dirty_track_enabled(memslot)) { 3327 unsigned long rel_gfn = gfn - memslot->base_gfn; 3328 u32 slot = (memslot->as_id << 16) | memslot->id; 3329 3330 if (kvm->dirty_ring_size && vcpu) 3331 kvm_dirty_ring_push(vcpu, slot, rel_gfn); 3332 else if (memslot->dirty_bitmap) 3333 set_bit_le(rel_gfn, memslot->dirty_bitmap); 3334 } 3335 } 3336 EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot); 3337 3338 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 3339 { 3340 struct kvm_memory_slot *memslot; 3341 3342 memslot = gfn_to_memslot(kvm, gfn); 3343 mark_page_dirty_in_slot(kvm, memslot, gfn); 3344 } 3345 EXPORT_SYMBOL_GPL(mark_page_dirty); 3346 3347 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) 3348 { 3349 struct kvm_memory_slot *memslot; 3350 3351 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 3352 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn); 3353 } 3354 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); 3355 3356 void kvm_sigset_activate(struct kvm_vcpu *vcpu) 3357 { 3358 if (!vcpu->sigset_active) 3359 return; 3360 3361 /* 3362 * This does a lockless modification of ->real_blocked, which is fine 3363 * because, only current can change ->real_blocked and all readers of 3364 * ->real_blocked don't care as long ->real_blocked is always a subset 3365 * of ->blocked. 3366 */ 3367 sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked); 3368 } 3369 3370 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu) 3371 { 3372 if (!vcpu->sigset_active) 3373 return; 3374 3375 sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL); 3376 sigemptyset(¤t->real_blocked); 3377 } 3378 3379 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) 3380 { 3381 unsigned int old, val, grow, grow_start; 3382 3383 old = val = vcpu->halt_poll_ns; 3384 grow_start = READ_ONCE(halt_poll_ns_grow_start); 3385 grow = READ_ONCE(halt_poll_ns_grow); 3386 if (!grow) 3387 goto out; 3388 3389 val *= grow; 3390 if (val < grow_start) 3391 val = grow_start; 3392 3393 vcpu->halt_poll_ns = val; 3394 out: 3395 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); 3396 } 3397 3398 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) 3399 { 3400 unsigned int old, val, shrink, grow_start; 3401 3402 old = val = vcpu->halt_poll_ns; 3403 shrink = READ_ONCE(halt_poll_ns_shrink); 3404 grow_start = READ_ONCE(halt_poll_ns_grow_start); 3405 if (shrink == 0) 3406 val = 0; 3407 else 3408 val /= shrink; 3409 3410 if (val < grow_start) 3411 val = 0; 3412 3413 vcpu->halt_poll_ns = val; 3414 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); 3415 } 3416 3417 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) 3418 { 3419 int ret = -EINTR; 3420 int idx = srcu_read_lock(&vcpu->kvm->srcu); 3421 3422 if (kvm_arch_vcpu_runnable(vcpu)) 3423 goto out; 3424 if (kvm_cpu_has_pending_timer(vcpu)) 3425 goto out; 3426 if (signal_pending(current)) 3427 goto out; 3428 if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu)) 3429 goto out; 3430 3431 ret = 0; 3432 out: 3433 srcu_read_unlock(&vcpu->kvm->srcu, idx); 3434 return ret; 3435 } 3436 3437 /* 3438 * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is 3439 * pending. This is mostly used when halting a vCPU, but may also be used 3440 * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI. 3441 */ 3442 bool kvm_vcpu_block(struct kvm_vcpu *vcpu) 3443 { 3444 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); 3445 bool waited = false; 3446 3447 vcpu->stat.generic.blocking = 1; 3448 3449 preempt_disable(); 3450 kvm_arch_vcpu_blocking(vcpu); 3451 prepare_to_rcuwait(wait); 3452 preempt_enable(); 3453 3454 for (;;) { 3455 set_current_state(TASK_INTERRUPTIBLE); 3456 3457 if (kvm_vcpu_check_block(vcpu) < 0) 3458 break; 3459 3460 waited = true; 3461 schedule(); 3462 } 3463 3464 preempt_disable(); 3465 finish_rcuwait(wait); 3466 kvm_arch_vcpu_unblocking(vcpu); 3467 preempt_enable(); 3468 3469 vcpu->stat.generic.blocking = 0; 3470 3471 return waited; 3472 } 3473 3474 static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, 3475 ktime_t end, bool success) 3476 { 3477 struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic; 3478 u64 poll_ns = ktime_to_ns(ktime_sub(end, start)); 3479 3480 ++vcpu->stat.generic.halt_attempted_poll; 3481 3482 if (success) { 3483 ++vcpu->stat.generic.halt_successful_poll; 3484 3485 if (!vcpu_valid_wakeup(vcpu)) 3486 ++vcpu->stat.generic.halt_poll_invalid; 3487 3488 stats->halt_poll_success_ns += poll_ns; 3489 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns); 3490 } else { 3491 stats->halt_poll_fail_ns += poll_ns; 3492 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns); 3493 } 3494 } 3495 3496 static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu) 3497 { 3498 struct kvm *kvm = vcpu->kvm; 3499 3500 if (kvm->override_halt_poll_ns) { 3501 /* 3502 * Ensure kvm->max_halt_poll_ns is not read before 3503 * kvm->override_halt_poll_ns. 3504 * 3505 * Pairs with the smp_wmb() when enabling KVM_CAP_HALT_POLL. 3506 */ 3507 smp_rmb(); 3508 return READ_ONCE(kvm->max_halt_poll_ns); 3509 } 3510 3511 return READ_ONCE(halt_poll_ns); 3512 } 3513 3514 /* 3515 * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc... If halt 3516 * polling is enabled, busy wait for a short time before blocking to avoid the 3517 * expensive block+unblock sequence if a wake event arrives soon after the vCPU 3518 * is halted. 3519 */ 3520 void kvm_vcpu_halt(struct kvm_vcpu *vcpu) 3521 { 3522 unsigned int max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu); 3523 bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); 3524 ktime_t start, cur, poll_end; 3525 bool waited = false; 3526 bool do_halt_poll; 3527 u64 halt_ns; 3528 3529 if (vcpu->halt_poll_ns > max_halt_poll_ns) 3530 vcpu->halt_poll_ns = max_halt_poll_ns; 3531 3532 do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns; 3533 3534 start = cur = poll_end = ktime_get(); 3535 if (do_halt_poll) { 3536 ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns); 3537 3538 do { 3539 if (kvm_vcpu_check_block(vcpu) < 0) 3540 goto out; 3541 cpu_relax(); 3542 poll_end = cur = ktime_get(); 3543 } while (kvm_vcpu_can_poll(cur, stop)); 3544 } 3545 3546 waited = kvm_vcpu_block(vcpu); 3547 3548 cur = ktime_get(); 3549 if (waited) { 3550 vcpu->stat.generic.halt_wait_ns += 3551 ktime_to_ns(cur) - ktime_to_ns(poll_end); 3552 KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist, 3553 ktime_to_ns(cur) - ktime_to_ns(poll_end)); 3554 } 3555 out: 3556 /* The total time the vCPU was "halted", including polling time. */ 3557 halt_ns = ktime_to_ns(cur) - ktime_to_ns(start); 3558 3559 /* 3560 * Note, halt-polling is considered successful so long as the vCPU was 3561 * never actually scheduled out, i.e. even if the wake event arrived 3562 * after of the halt-polling loop itself, but before the full wait. 3563 */ 3564 if (do_halt_poll) 3565 update_halt_poll_stats(vcpu, start, poll_end, !waited); 3566 3567 if (halt_poll_allowed) { 3568 /* Recompute the max halt poll time in case it changed. */ 3569 max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu); 3570 3571 if (!vcpu_valid_wakeup(vcpu)) { 3572 shrink_halt_poll_ns(vcpu); 3573 } else if (max_halt_poll_ns) { 3574 if (halt_ns <= vcpu->halt_poll_ns) 3575 ; 3576 /* we had a long block, shrink polling */ 3577 else if (vcpu->halt_poll_ns && 3578 halt_ns > max_halt_poll_ns) 3579 shrink_halt_poll_ns(vcpu); 3580 /* we had a short halt and our poll time is too small */ 3581 else if (vcpu->halt_poll_ns < max_halt_poll_ns && 3582 halt_ns < max_halt_poll_ns) 3583 grow_halt_poll_ns(vcpu); 3584 } else { 3585 vcpu->halt_poll_ns = 0; 3586 } 3587 } 3588 3589 trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu)); 3590 } 3591 EXPORT_SYMBOL_GPL(kvm_vcpu_halt); 3592 3593 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) 3594 { 3595 if (__kvm_vcpu_wake_up(vcpu)) { 3596 WRITE_ONCE(vcpu->ready, true); 3597 ++vcpu->stat.generic.halt_wakeup; 3598 return true; 3599 } 3600 3601 return false; 3602 } 3603 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up); 3604 3605 #ifndef CONFIG_S390 3606 /* 3607 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. 3608 */ 3609 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 3610 { 3611 int me, cpu; 3612 3613 if (kvm_vcpu_wake_up(vcpu)) 3614 return; 3615 3616 me = get_cpu(); 3617 /* 3618 * The only state change done outside the vcpu mutex is IN_GUEST_MODE 3619 * to EXITING_GUEST_MODE. Therefore the moderately expensive "should 3620 * kick" check does not need atomic operations if kvm_vcpu_kick is used 3621 * within the vCPU thread itself. 3622 */ 3623 if (vcpu == __this_cpu_read(kvm_running_vcpu)) { 3624 if (vcpu->mode == IN_GUEST_MODE) 3625 WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE); 3626 goto out; 3627 } 3628 3629 /* 3630 * Note, the vCPU could get migrated to a different pCPU at any point 3631 * after kvm_arch_vcpu_should_kick(), which could result in sending an 3632 * IPI to the previous pCPU. But, that's ok because the purpose of the 3633 * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the 3634 * vCPU also requires it to leave IN_GUEST_MODE. 3635 */ 3636 if (kvm_arch_vcpu_should_kick(vcpu)) { 3637 cpu = READ_ONCE(vcpu->cpu); 3638 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) 3639 smp_send_reschedule(cpu); 3640 } 3641 out: 3642 put_cpu(); 3643 } 3644 EXPORT_SYMBOL_GPL(kvm_vcpu_kick); 3645 #endif /* !CONFIG_S390 */ 3646 3647 int kvm_vcpu_yield_to(struct kvm_vcpu *target) 3648 { 3649 struct pid *pid; 3650 struct task_struct *task = NULL; 3651 int ret = 0; 3652 3653 rcu_read_lock(); 3654 pid = rcu_dereference(target->pid); 3655 if (pid) 3656 task = get_pid_task(pid, PIDTYPE_PID); 3657 rcu_read_unlock(); 3658 if (!task) 3659 return ret; 3660 ret = yield_to(task, 1); 3661 put_task_struct(task); 3662 3663 return ret; 3664 } 3665 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); 3666 3667 /* 3668 * Helper that checks whether a VCPU is eligible for directed yield. 3669 * Most eligible candidate to yield is decided by following heuristics: 3670 * 3671 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently 3672 * (preempted lock holder), indicated by @in_spin_loop. 3673 * Set at the beginning and cleared at the end of interception/PLE handler. 3674 * 3675 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get 3676 * chance last time (mostly it has become eligible now since we have probably 3677 * yielded to lockholder in last iteration. This is done by toggling 3678 * @dy_eligible each time a VCPU checked for eligibility.) 3679 * 3680 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding 3681 * to preempted lock-holder could result in wrong VCPU selection and CPU 3682 * burning. Giving priority for a potential lock-holder increases lock 3683 * progress. 3684 * 3685 * Since algorithm is based on heuristics, accessing another VCPU data without 3686 * locking does not harm. It may result in trying to yield to same VCPU, fail 3687 * and continue with next VCPU and so on. 3688 */ 3689 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) 3690 { 3691 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 3692 bool eligible; 3693 3694 eligible = !vcpu->spin_loop.in_spin_loop || 3695 vcpu->spin_loop.dy_eligible; 3696 3697 if (vcpu->spin_loop.in_spin_loop) 3698 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); 3699 3700 return eligible; 3701 #else 3702 return true; 3703 #endif 3704 } 3705 3706 /* 3707 * Unlike kvm_arch_vcpu_runnable, this function is called outside 3708 * a vcpu_load/vcpu_put pair. However, for most architectures 3709 * kvm_arch_vcpu_runnable does not require vcpu_load. 3710 */ 3711 bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) 3712 { 3713 return kvm_arch_vcpu_runnable(vcpu); 3714 } 3715 3716 static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu) 3717 { 3718 if (kvm_arch_dy_runnable(vcpu)) 3719 return true; 3720 3721 #ifdef CONFIG_KVM_ASYNC_PF 3722 if (!list_empty_careful(&vcpu->async_pf.done)) 3723 return true; 3724 #endif 3725 3726 return false; 3727 } 3728 3729 bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) 3730 { 3731 return false; 3732 } 3733 3734 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) 3735 { 3736 struct kvm *kvm = me->kvm; 3737 struct kvm_vcpu *vcpu; 3738 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 3739 unsigned long i; 3740 int yielded = 0; 3741 int try = 3; 3742 int pass; 3743 3744 kvm_vcpu_set_in_spin_loop(me, true); 3745 /* 3746 * We boost the priority of a VCPU that is runnable but not 3747 * currently running, because it got preempted by something 3748 * else and called schedule in __vcpu_run. Hopefully that 3749 * VCPU is holding the lock that we need and will release it. 3750 * We approximate round-robin by starting at the last boosted VCPU. 3751 */ 3752 for (pass = 0; pass < 2 && !yielded && try; pass++) { 3753 kvm_for_each_vcpu(i, vcpu, kvm) { 3754 if (!pass && i <= last_boosted_vcpu) { 3755 i = last_boosted_vcpu; 3756 continue; 3757 } else if (pass && i > last_boosted_vcpu) 3758 break; 3759 if (!READ_ONCE(vcpu->ready)) 3760 continue; 3761 if (vcpu == me) 3762 continue; 3763 if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu)) 3764 continue; 3765 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && 3766 !kvm_arch_dy_has_pending_interrupt(vcpu) && 3767 !kvm_arch_vcpu_in_kernel(vcpu)) 3768 continue; 3769 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 3770 continue; 3771 3772 yielded = kvm_vcpu_yield_to(vcpu); 3773 if (yielded > 0) { 3774 kvm->last_boosted_vcpu = i; 3775 break; 3776 } else if (yielded < 0) { 3777 try--; 3778 if (!try) 3779 break; 3780 } 3781 } 3782 } 3783 kvm_vcpu_set_in_spin_loop(me, false); 3784 3785 /* Ensure vcpu is not eligible during next spinloop */ 3786 kvm_vcpu_set_dy_eligible(me, false); 3787 } 3788 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 3789 3790 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff) 3791 { 3792 #ifdef CONFIG_HAVE_KVM_DIRTY_RING 3793 return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) && 3794 (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET + 3795 kvm->dirty_ring_size / PAGE_SIZE); 3796 #else 3797 return false; 3798 #endif 3799 } 3800 3801 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf) 3802 { 3803 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data; 3804 struct page *page; 3805 3806 if (vmf->pgoff == 0) 3807 page = virt_to_page(vcpu->run); 3808 #ifdef CONFIG_X86 3809 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 3810 page = virt_to_page(vcpu->arch.pio_data); 3811 #endif 3812 #ifdef CONFIG_KVM_MMIO 3813 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 3814 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 3815 #endif 3816 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff)) 3817 page = kvm_dirty_ring_get_page( 3818 &vcpu->dirty_ring, 3819 vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET); 3820 else 3821 return kvm_arch_vcpu_fault(vcpu, vmf); 3822 get_page(page); 3823 vmf->page = page; 3824 return 0; 3825 } 3826 3827 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 3828 .fault = kvm_vcpu_fault, 3829 }; 3830 3831 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 3832 { 3833 struct kvm_vcpu *vcpu = file->private_data; 3834 unsigned long pages = vma_pages(vma); 3835 3836 if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) || 3837 kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) && 3838 ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED))) 3839 return -EINVAL; 3840 3841 vma->vm_ops = &kvm_vcpu_vm_ops; 3842 return 0; 3843 } 3844 3845 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 3846 { 3847 struct kvm_vcpu *vcpu = filp->private_data; 3848 3849 kvm_put_kvm(vcpu->kvm); 3850 return 0; 3851 } 3852 3853 static const struct file_operations kvm_vcpu_fops = { 3854 .release = kvm_vcpu_release, 3855 .unlocked_ioctl = kvm_vcpu_ioctl, 3856 .mmap = kvm_vcpu_mmap, 3857 .llseek = noop_llseek, 3858 KVM_COMPAT(kvm_vcpu_compat_ioctl), 3859 }; 3860 3861 /* 3862 * Allocates an inode for the vcpu. 3863 */ 3864 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 3865 { 3866 char name[8 + 1 + ITOA_MAX_LEN + 1]; 3867 3868 snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id); 3869 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); 3870 } 3871 3872 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS 3873 static int vcpu_get_pid(void *data, u64 *val) 3874 { 3875 struct kvm_vcpu *vcpu = data; 3876 *val = pid_nr(rcu_access_pointer(vcpu->pid)); 3877 return 0; 3878 } 3879 3880 DEFINE_SIMPLE_ATTRIBUTE(vcpu_get_pid_fops, vcpu_get_pid, NULL, "%llu\n"); 3881 3882 static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) 3883 { 3884 struct dentry *debugfs_dentry; 3885 char dir_name[ITOA_MAX_LEN * 2]; 3886 3887 if (!debugfs_initialized()) 3888 return; 3889 3890 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id); 3891 debugfs_dentry = debugfs_create_dir(dir_name, 3892 vcpu->kvm->debugfs_dentry); 3893 debugfs_create_file("pid", 0444, debugfs_dentry, vcpu, 3894 &vcpu_get_pid_fops); 3895 3896 kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry); 3897 } 3898 #endif 3899 3900 /* 3901 * Creates some virtual cpus. Good luck creating more than one. 3902 */ 3903 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 3904 { 3905 int r; 3906 struct kvm_vcpu *vcpu; 3907 struct page *page; 3908 3909 if (id >= KVM_MAX_VCPU_IDS) 3910 return -EINVAL; 3911 3912 mutex_lock(&kvm->lock); 3913 if (kvm->created_vcpus >= kvm->max_vcpus) { 3914 mutex_unlock(&kvm->lock); 3915 return -EINVAL; 3916 } 3917 3918 r = kvm_arch_vcpu_precreate(kvm, id); 3919 if (r) { 3920 mutex_unlock(&kvm->lock); 3921 return r; 3922 } 3923 3924 kvm->created_vcpus++; 3925 mutex_unlock(&kvm->lock); 3926 3927 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT); 3928 if (!vcpu) { 3929 r = -ENOMEM; 3930 goto vcpu_decrement; 3931 } 3932 3933 BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE); 3934 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 3935 if (!page) { 3936 r = -ENOMEM; 3937 goto vcpu_free; 3938 } 3939 vcpu->run = page_address(page); 3940 3941 kvm_vcpu_init(vcpu, kvm, id); 3942 3943 r = kvm_arch_vcpu_create(vcpu); 3944 if (r) 3945 goto vcpu_free_run_page; 3946 3947 if (kvm->dirty_ring_size) { 3948 r = kvm_dirty_ring_alloc(&vcpu->dirty_ring, 3949 id, kvm->dirty_ring_size); 3950 if (r) 3951 goto arch_vcpu_destroy; 3952 } 3953 3954 mutex_lock(&kvm->lock); 3955 3956 #ifdef CONFIG_LOCKDEP 3957 /* Ensure that lockdep knows vcpu->mutex is taken *inside* kvm->lock */ 3958 mutex_lock(&vcpu->mutex); 3959 mutex_unlock(&vcpu->mutex); 3960 #endif 3961 3962 if (kvm_get_vcpu_by_id(kvm, id)) { 3963 r = -EEXIST; 3964 goto unlock_vcpu_destroy; 3965 } 3966 3967 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); 3968 r = xa_reserve(&kvm->vcpu_array, vcpu->vcpu_idx, GFP_KERNEL_ACCOUNT); 3969 if (r) 3970 goto unlock_vcpu_destroy; 3971 3972 /* Now it's all set up, let userspace reach it */ 3973 kvm_get_kvm(kvm); 3974 r = create_vcpu_fd(vcpu); 3975 if (r < 0) 3976 goto kvm_put_xa_release; 3977 3978 if (KVM_BUG_ON(!!xa_store(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, 0), kvm)) { 3979 r = -EINVAL; 3980 goto kvm_put_xa_release; 3981 } 3982 3983 /* 3984 * Pairs with smp_rmb() in kvm_get_vcpu. Store the vcpu 3985 * pointer before kvm->online_vcpu's incremented value. 3986 */ 3987 smp_wmb(); 3988 atomic_inc(&kvm->online_vcpus); 3989 3990 mutex_unlock(&kvm->lock); 3991 kvm_arch_vcpu_postcreate(vcpu); 3992 kvm_create_vcpu_debugfs(vcpu); 3993 return r; 3994 3995 kvm_put_xa_release: 3996 kvm_put_kvm_no_destroy(kvm); 3997 xa_release(&kvm->vcpu_array, vcpu->vcpu_idx); 3998 unlock_vcpu_destroy: 3999 mutex_unlock(&kvm->lock); 4000 kvm_dirty_ring_free(&vcpu->dirty_ring); 4001 arch_vcpu_destroy: 4002 kvm_arch_vcpu_destroy(vcpu); 4003 vcpu_free_run_page: 4004 free_page((unsigned long)vcpu->run); 4005 vcpu_free: 4006 kmem_cache_free(kvm_vcpu_cache, vcpu); 4007 vcpu_decrement: 4008 mutex_lock(&kvm->lock); 4009 kvm->created_vcpus--; 4010 mutex_unlock(&kvm->lock); 4011 return r; 4012 } 4013 4014 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 4015 { 4016 if (sigset) { 4017 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 4018 vcpu->sigset_active = 1; 4019 vcpu->sigset = *sigset; 4020 } else 4021 vcpu->sigset_active = 0; 4022 return 0; 4023 } 4024 4025 static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer, 4026 size_t size, loff_t *offset) 4027 { 4028 struct kvm_vcpu *vcpu = file->private_data; 4029 4030 return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header, 4031 &kvm_vcpu_stats_desc[0], &vcpu->stat, 4032 sizeof(vcpu->stat), user_buffer, size, offset); 4033 } 4034 4035 static const struct file_operations kvm_vcpu_stats_fops = { 4036 .read = kvm_vcpu_stats_read, 4037 .llseek = noop_llseek, 4038 }; 4039 4040 static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu) 4041 { 4042 int fd; 4043 struct file *file; 4044 char name[15 + ITOA_MAX_LEN + 1]; 4045 4046 snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id); 4047 4048 fd = get_unused_fd_flags(O_CLOEXEC); 4049 if (fd < 0) 4050 return fd; 4051 4052 file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY); 4053 if (IS_ERR(file)) { 4054 put_unused_fd(fd); 4055 return PTR_ERR(file); 4056 } 4057 file->f_mode |= FMODE_PREAD; 4058 fd_install(fd, file); 4059 4060 return fd; 4061 } 4062 4063 static long kvm_vcpu_ioctl(struct file *filp, 4064 unsigned int ioctl, unsigned long arg) 4065 { 4066 struct kvm_vcpu *vcpu = filp->private_data; 4067 void __user *argp = (void __user *)arg; 4068 int r; 4069 struct kvm_fpu *fpu = NULL; 4070 struct kvm_sregs *kvm_sregs = NULL; 4071 4072 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead) 4073 return -EIO; 4074 4075 if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) 4076 return -EINVAL; 4077 4078 /* 4079 * Some architectures have vcpu ioctls that are asynchronous to vcpu 4080 * execution; mutex_lock() would break them. 4081 */ 4082 r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg); 4083 if (r != -ENOIOCTLCMD) 4084 return r; 4085 4086 if (mutex_lock_killable(&vcpu->mutex)) 4087 return -EINTR; 4088 switch (ioctl) { 4089 case KVM_RUN: { 4090 struct pid *oldpid; 4091 r = -EINVAL; 4092 if (arg) 4093 goto out; 4094 oldpid = rcu_access_pointer(vcpu->pid); 4095 if (unlikely(oldpid != task_pid(current))) { 4096 /* The thread running this VCPU changed. */ 4097 struct pid *newpid; 4098 4099 r = kvm_arch_vcpu_run_pid_change(vcpu); 4100 if (r) 4101 break; 4102 4103 newpid = get_task_pid(current, PIDTYPE_PID); 4104 rcu_assign_pointer(vcpu->pid, newpid); 4105 if (oldpid) 4106 synchronize_rcu(); 4107 put_pid(oldpid); 4108 } 4109 r = kvm_arch_vcpu_ioctl_run(vcpu); 4110 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 4111 break; 4112 } 4113 case KVM_GET_REGS: { 4114 struct kvm_regs *kvm_regs; 4115 4116 r = -ENOMEM; 4117 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT); 4118 if (!kvm_regs) 4119 goto out; 4120 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 4121 if (r) 4122 goto out_free1; 4123 r = -EFAULT; 4124 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 4125 goto out_free1; 4126 r = 0; 4127 out_free1: 4128 kfree(kvm_regs); 4129 break; 4130 } 4131 case KVM_SET_REGS: { 4132 struct kvm_regs *kvm_regs; 4133 4134 kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); 4135 if (IS_ERR(kvm_regs)) { 4136 r = PTR_ERR(kvm_regs); 4137 goto out; 4138 } 4139 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 4140 kfree(kvm_regs); 4141 break; 4142 } 4143 case KVM_GET_SREGS: { 4144 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), 4145 GFP_KERNEL_ACCOUNT); 4146 r = -ENOMEM; 4147 if (!kvm_sregs) 4148 goto out; 4149 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 4150 if (r) 4151 goto out; 4152 r = -EFAULT; 4153 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 4154 goto out; 4155 r = 0; 4156 break; 4157 } 4158 case KVM_SET_SREGS: { 4159 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); 4160 if (IS_ERR(kvm_sregs)) { 4161 r = PTR_ERR(kvm_sregs); 4162 kvm_sregs = NULL; 4163 goto out; 4164 } 4165 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 4166 break; 4167 } 4168 case KVM_GET_MP_STATE: { 4169 struct kvm_mp_state mp_state; 4170 4171 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 4172 if (r) 4173 goto out; 4174 r = -EFAULT; 4175 if (copy_to_user(argp, &mp_state, sizeof(mp_state))) 4176 goto out; 4177 r = 0; 4178 break; 4179 } 4180 case KVM_SET_MP_STATE: { 4181 struct kvm_mp_state mp_state; 4182 4183 r = -EFAULT; 4184 if (copy_from_user(&mp_state, argp, sizeof(mp_state))) 4185 goto out; 4186 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 4187 break; 4188 } 4189 case KVM_TRANSLATE: { 4190 struct kvm_translation tr; 4191 4192 r = -EFAULT; 4193 if (copy_from_user(&tr, argp, sizeof(tr))) 4194 goto out; 4195 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 4196 if (r) 4197 goto out; 4198 r = -EFAULT; 4199 if (copy_to_user(argp, &tr, sizeof(tr))) 4200 goto out; 4201 r = 0; 4202 break; 4203 } 4204 case KVM_SET_GUEST_DEBUG: { 4205 struct kvm_guest_debug dbg; 4206 4207 r = -EFAULT; 4208 if (copy_from_user(&dbg, argp, sizeof(dbg))) 4209 goto out; 4210 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 4211 break; 4212 } 4213 case KVM_SET_SIGNAL_MASK: { 4214 struct kvm_signal_mask __user *sigmask_arg = argp; 4215 struct kvm_signal_mask kvm_sigmask; 4216 sigset_t sigset, *p; 4217 4218 p = NULL; 4219 if (argp) { 4220 r = -EFAULT; 4221 if (copy_from_user(&kvm_sigmask, argp, 4222 sizeof(kvm_sigmask))) 4223 goto out; 4224 r = -EINVAL; 4225 if (kvm_sigmask.len != sizeof(sigset)) 4226 goto out; 4227 r = -EFAULT; 4228 if (copy_from_user(&sigset, sigmask_arg->sigset, 4229 sizeof(sigset))) 4230 goto out; 4231 p = &sigset; 4232 } 4233 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 4234 break; 4235 } 4236 case KVM_GET_FPU: { 4237 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT); 4238 r = -ENOMEM; 4239 if (!fpu) 4240 goto out; 4241 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 4242 if (r) 4243 goto out; 4244 r = -EFAULT; 4245 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 4246 goto out; 4247 r = 0; 4248 break; 4249 } 4250 case KVM_SET_FPU: { 4251 fpu = memdup_user(argp, sizeof(*fpu)); 4252 if (IS_ERR(fpu)) { 4253 r = PTR_ERR(fpu); 4254 fpu = NULL; 4255 goto out; 4256 } 4257 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 4258 break; 4259 } 4260 case KVM_GET_STATS_FD: { 4261 r = kvm_vcpu_ioctl_get_stats_fd(vcpu); 4262 break; 4263 } 4264 default: 4265 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 4266 } 4267 out: 4268 mutex_unlock(&vcpu->mutex); 4269 kfree(fpu); 4270 kfree(kvm_sregs); 4271 return r; 4272 } 4273 4274 #ifdef CONFIG_KVM_COMPAT 4275 static long kvm_vcpu_compat_ioctl(struct file *filp, 4276 unsigned int ioctl, unsigned long arg) 4277 { 4278 struct kvm_vcpu *vcpu = filp->private_data; 4279 void __user *argp = compat_ptr(arg); 4280 int r; 4281 4282 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead) 4283 return -EIO; 4284 4285 switch (ioctl) { 4286 case KVM_SET_SIGNAL_MASK: { 4287 struct kvm_signal_mask __user *sigmask_arg = argp; 4288 struct kvm_signal_mask kvm_sigmask; 4289 sigset_t sigset; 4290 4291 if (argp) { 4292 r = -EFAULT; 4293 if (copy_from_user(&kvm_sigmask, argp, 4294 sizeof(kvm_sigmask))) 4295 goto out; 4296 r = -EINVAL; 4297 if (kvm_sigmask.len != sizeof(compat_sigset_t)) 4298 goto out; 4299 r = -EFAULT; 4300 if (get_compat_sigset(&sigset, 4301 (compat_sigset_t __user *)sigmask_arg->sigset)) 4302 goto out; 4303 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 4304 } else 4305 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); 4306 break; 4307 } 4308 default: 4309 r = kvm_vcpu_ioctl(filp, ioctl, arg); 4310 } 4311 4312 out: 4313 return r; 4314 } 4315 #endif 4316 4317 static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma) 4318 { 4319 struct kvm_device *dev = filp->private_data; 4320 4321 if (dev->ops->mmap) 4322 return dev->ops->mmap(dev, vma); 4323 4324 return -ENODEV; 4325 } 4326 4327 static int kvm_device_ioctl_attr(struct kvm_device *dev, 4328 int (*accessor)(struct kvm_device *dev, 4329 struct kvm_device_attr *attr), 4330 unsigned long arg) 4331 { 4332 struct kvm_device_attr attr; 4333 4334 if (!accessor) 4335 return -EPERM; 4336 4337 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 4338 return -EFAULT; 4339 4340 return accessor(dev, &attr); 4341 } 4342 4343 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, 4344 unsigned long arg) 4345 { 4346 struct kvm_device *dev = filp->private_data; 4347 4348 if (dev->kvm->mm != current->mm || dev->kvm->vm_dead) 4349 return -EIO; 4350 4351 switch (ioctl) { 4352 case KVM_SET_DEVICE_ATTR: 4353 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); 4354 case KVM_GET_DEVICE_ATTR: 4355 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); 4356 case KVM_HAS_DEVICE_ATTR: 4357 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); 4358 default: 4359 if (dev->ops->ioctl) 4360 return dev->ops->ioctl(dev, ioctl, arg); 4361 4362 return -ENOTTY; 4363 } 4364 } 4365 4366 static int kvm_device_release(struct inode *inode, struct file *filp) 4367 { 4368 struct kvm_device *dev = filp->private_data; 4369 struct kvm *kvm = dev->kvm; 4370 4371 if (dev->ops->release) { 4372 mutex_lock(&kvm->lock); 4373 list_del(&dev->vm_node); 4374 dev->ops->release(dev); 4375 mutex_unlock(&kvm->lock); 4376 } 4377 4378 kvm_put_kvm(kvm); 4379 return 0; 4380 } 4381 4382 static const struct file_operations kvm_device_fops = { 4383 .unlocked_ioctl = kvm_device_ioctl, 4384 .release = kvm_device_release, 4385 KVM_COMPAT(kvm_device_ioctl), 4386 .mmap = kvm_device_mmap, 4387 }; 4388 4389 struct kvm_device *kvm_device_from_filp(struct file *filp) 4390 { 4391 if (filp->f_op != &kvm_device_fops) 4392 return NULL; 4393 4394 return filp->private_data; 4395 } 4396 4397 static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { 4398 #ifdef CONFIG_KVM_MPIC 4399 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, 4400 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, 4401 #endif 4402 }; 4403 4404 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type) 4405 { 4406 if (type >= ARRAY_SIZE(kvm_device_ops_table)) 4407 return -ENOSPC; 4408 4409 if (kvm_device_ops_table[type] != NULL) 4410 return -EEXIST; 4411 4412 kvm_device_ops_table[type] = ops; 4413 return 0; 4414 } 4415 4416 void kvm_unregister_device_ops(u32 type) 4417 { 4418 if (kvm_device_ops_table[type] != NULL) 4419 kvm_device_ops_table[type] = NULL; 4420 } 4421 4422 static int kvm_ioctl_create_device(struct kvm *kvm, 4423 struct kvm_create_device *cd) 4424 { 4425 const struct kvm_device_ops *ops; 4426 struct kvm_device *dev; 4427 bool test = cd->flags & KVM_CREATE_DEVICE_TEST; 4428 int type; 4429 int ret; 4430 4431 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) 4432 return -ENODEV; 4433 4434 type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table)); 4435 ops = kvm_device_ops_table[type]; 4436 if (ops == NULL) 4437 return -ENODEV; 4438 4439 if (test) 4440 return 0; 4441 4442 dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT); 4443 if (!dev) 4444 return -ENOMEM; 4445 4446 dev->ops = ops; 4447 dev->kvm = kvm; 4448 4449 mutex_lock(&kvm->lock); 4450 ret = ops->create(dev, type); 4451 if (ret < 0) { 4452 mutex_unlock(&kvm->lock); 4453 kfree(dev); 4454 return ret; 4455 } 4456 list_add(&dev->vm_node, &kvm->devices); 4457 mutex_unlock(&kvm->lock); 4458 4459 if (ops->init) 4460 ops->init(dev); 4461 4462 kvm_get_kvm(kvm); 4463 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 4464 if (ret < 0) { 4465 kvm_put_kvm_no_destroy(kvm); 4466 mutex_lock(&kvm->lock); 4467 list_del(&dev->vm_node); 4468 if (ops->release) 4469 ops->release(dev); 4470 mutex_unlock(&kvm->lock); 4471 if (ops->destroy) 4472 ops->destroy(dev); 4473 return ret; 4474 } 4475 4476 cd->fd = ret; 4477 return 0; 4478 } 4479 4480 static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) 4481 { 4482 switch (arg) { 4483 case KVM_CAP_USER_MEMORY: 4484 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 4485 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 4486 case KVM_CAP_INTERNAL_ERROR_DATA: 4487 #ifdef CONFIG_HAVE_KVM_MSI 4488 case KVM_CAP_SIGNAL_MSI: 4489 #endif 4490 #ifdef CONFIG_HAVE_KVM_IRQFD 4491 case KVM_CAP_IRQFD: 4492 #endif 4493 case KVM_CAP_IOEVENTFD_ANY_LENGTH: 4494 case KVM_CAP_CHECK_EXTENSION_VM: 4495 case KVM_CAP_ENABLE_CAP_VM: 4496 case KVM_CAP_HALT_POLL: 4497 return 1; 4498 #ifdef CONFIG_KVM_MMIO 4499 case KVM_CAP_COALESCED_MMIO: 4500 return KVM_COALESCED_MMIO_PAGE_OFFSET; 4501 case KVM_CAP_COALESCED_PIO: 4502 return 1; 4503 #endif 4504 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4505 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: 4506 return KVM_DIRTY_LOG_MANUAL_CAPS; 4507 #endif 4508 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 4509 case KVM_CAP_IRQ_ROUTING: 4510 return KVM_MAX_IRQ_ROUTES; 4511 #endif 4512 #if KVM_ADDRESS_SPACE_NUM > 1 4513 case KVM_CAP_MULTI_ADDRESS_SPACE: 4514 return KVM_ADDRESS_SPACE_NUM; 4515 #endif 4516 case KVM_CAP_NR_MEMSLOTS: 4517 return KVM_USER_MEM_SLOTS; 4518 case KVM_CAP_DIRTY_LOG_RING: 4519 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_TSO 4520 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn); 4521 #else 4522 return 0; 4523 #endif 4524 case KVM_CAP_DIRTY_LOG_RING_ACQ_REL: 4525 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL 4526 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn); 4527 #else 4528 return 0; 4529 #endif 4530 #ifdef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP 4531 case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP: 4532 #endif 4533 case KVM_CAP_BINARY_STATS_FD: 4534 case KVM_CAP_SYSTEM_EVENT_DATA: 4535 return 1; 4536 default: 4537 break; 4538 } 4539 return kvm_vm_ioctl_check_extension(kvm, arg); 4540 } 4541 4542 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size) 4543 { 4544 int r; 4545 4546 if (!KVM_DIRTY_LOG_PAGE_OFFSET) 4547 return -EINVAL; 4548 4549 /* the size should be power of 2 */ 4550 if (!size || (size & (size - 1))) 4551 return -EINVAL; 4552 4553 /* Should be bigger to keep the reserved entries, or a page */ 4554 if (size < kvm_dirty_ring_get_rsvd_entries() * 4555 sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE) 4556 return -EINVAL; 4557 4558 if (size > KVM_DIRTY_RING_MAX_ENTRIES * 4559 sizeof(struct kvm_dirty_gfn)) 4560 return -E2BIG; 4561 4562 /* We only allow it to set once */ 4563 if (kvm->dirty_ring_size) 4564 return -EINVAL; 4565 4566 mutex_lock(&kvm->lock); 4567 4568 if (kvm->created_vcpus) { 4569 /* We don't allow to change this value after vcpu created */ 4570 r = -EINVAL; 4571 } else { 4572 kvm->dirty_ring_size = size; 4573 r = 0; 4574 } 4575 4576 mutex_unlock(&kvm->lock); 4577 return r; 4578 } 4579 4580 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm) 4581 { 4582 unsigned long i; 4583 struct kvm_vcpu *vcpu; 4584 int cleared = 0; 4585 4586 if (!kvm->dirty_ring_size) 4587 return -EINVAL; 4588 4589 mutex_lock(&kvm->slots_lock); 4590 4591 kvm_for_each_vcpu(i, vcpu, kvm) 4592 cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring); 4593 4594 mutex_unlock(&kvm->slots_lock); 4595 4596 if (cleared) 4597 kvm_flush_remote_tlbs(kvm); 4598 4599 return cleared; 4600 } 4601 4602 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm, 4603 struct kvm_enable_cap *cap) 4604 { 4605 return -EINVAL; 4606 } 4607 4608 static bool kvm_are_all_memslots_empty(struct kvm *kvm) 4609 { 4610 int i; 4611 4612 lockdep_assert_held(&kvm->slots_lock); 4613 4614 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 4615 if (!kvm_memslots_empty(__kvm_memslots(kvm, i))) 4616 return false; 4617 } 4618 4619 return true; 4620 } 4621 4622 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, 4623 struct kvm_enable_cap *cap) 4624 { 4625 switch (cap->cap) { 4626 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4627 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: { 4628 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE; 4629 4630 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE) 4631 allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS; 4632 4633 if (cap->flags || (cap->args[0] & ~allowed_options)) 4634 return -EINVAL; 4635 kvm->manual_dirty_log_protect = cap->args[0]; 4636 return 0; 4637 } 4638 #endif 4639 case KVM_CAP_HALT_POLL: { 4640 if (cap->flags || cap->args[0] != (unsigned int)cap->args[0]) 4641 return -EINVAL; 4642 4643 kvm->max_halt_poll_ns = cap->args[0]; 4644 4645 /* 4646 * Ensure kvm->override_halt_poll_ns does not become visible 4647 * before kvm->max_halt_poll_ns. 4648 * 4649 * Pairs with the smp_rmb() in kvm_vcpu_max_halt_poll_ns(). 4650 */ 4651 smp_wmb(); 4652 kvm->override_halt_poll_ns = true; 4653 4654 return 0; 4655 } 4656 case KVM_CAP_DIRTY_LOG_RING: 4657 case KVM_CAP_DIRTY_LOG_RING_ACQ_REL: 4658 if (!kvm_vm_ioctl_check_extension_generic(kvm, cap->cap)) 4659 return -EINVAL; 4660 4661 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]); 4662 case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP: { 4663 int r = -EINVAL; 4664 4665 if (!IS_ENABLED(CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP) || 4666 !kvm->dirty_ring_size || cap->flags) 4667 return r; 4668 4669 mutex_lock(&kvm->slots_lock); 4670 4671 /* 4672 * For simplicity, allow enabling ring+bitmap if and only if 4673 * there are no memslots, e.g. to ensure all memslots allocate 4674 * a bitmap after the capability is enabled. 4675 */ 4676 if (kvm_are_all_memslots_empty(kvm)) { 4677 kvm->dirty_ring_with_bitmap = true; 4678 r = 0; 4679 } 4680 4681 mutex_unlock(&kvm->slots_lock); 4682 4683 return r; 4684 } 4685 default: 4686 return kvm_vm_ioctl_enable_cap(kvm, cap); 4687 } 4688 } 4689 4690 static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer, 4691 size_t size, loff_t *offset) 4692 { 4693 struct kvm *kvm = file->private_data; 4694 4695 return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header, 4696 &kvm_vm_stats_desc[0], &kvm->stat, 4697 sizeof(kvm->stat), user_buffer, size, offset); 4698 } 4699 4700 static const struct file_operations kvm_vm_stats_fops = { 4701 .read = kvm_vm_stats_read, 4702 .llseek = noop_llseek, 4703 }; 4704 4705 static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm) 4706 { 4707 int fd; 4708 struct file *file; 4709 4710 fd = get_unused_fd_flags(O_CLOEXEC); 4711 if (fd < 0) 4712 return fd; 4713 4714 file = anon_inode_getfile("kvm-vm-stats", 4715 &kvm_vm_stats_fops, kvm, O_RDONLY); 4716 if (IS_ERR(file)) { 4717 put_unused_fd(fd); 4718 return PTR_ERR(file); 4719 } 4720 file->f_mode |= FMODE_PREAD; 4721 fd_install(fd, file); 4722 4723 return fd; 4724 } 4725 4726 static long kvm_vm_ioctl(struct file *filp, 4727 unsigned int ioctl, unsigned long arg) 4728 { 4729 struct kvm *kvm = filp->private_data; 4730 void __user *argp = (void __user *)arg; 4731 int r; 4732 4733 if (kvm->mm != current->mm || kvm->vm_dead) 4734 return -EIO; 4735 switch (ioctl) { 4736 case KVM_CREATE_VCPU: 4737 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 4738 break; 4739 case KVM_ENABLE_CAP: { 4740 struct kvm_enable_cap cap; 4741 4742 r = -EFAULT; 4743 if (copy_from_user(&cap, argp, sizeof(cap))) 4744 goto out; 4745 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap); 4746 break; 4747 } 4748 case KVM_SET_USER_MEMORY_REGION: { 4749 struct kvm_userspace_memory_region kvm_userspace_mem; 4750 4751 r = -EFAULT; 4752 if (copy_from_user(&kvm_userspace_mem, argp, 4753 sizeof(kvm_userspace_mem))) 4754 goto out; 4755 4756 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); 4757 break; 4758 } 4759 case KVM_GET_DIRTY_LOG: { 4760 struct kvm_dirty_log log; 4761 4762 r = -EFAULT; 4763 if (copy_from_user(&log, argp, sizeof(log))) 4764 goto out; 4765 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 4766 break; 4767 } 4768 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4769 case KVM_CLEAR_DIRTY_LOG: { 4770 struct kvm_clear_dirty_log log; 4771 4772 r = -EFAULT; 4773 if (copy_from_user(&log, argp, sizeof(log))) 4774 goto out; 4775 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); 4776 break; 4777 } 4778 #endif 4779 #ifdef CONFIG_KVM_MMIO 4780 case KVM_REGISTER_COALESCED_MMIO: { 4781 struct kvm_coalesced_mmio_zone zone; 4782 4783 r = -EFAULT; 4784 if (copy_from_user(&zone, argp, sizeof(zone))) 4785 goto out; 4786 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 4787 break; 4788 } 4789 case KVM_UNREGISTER_COALESCED_MMIO: { 4790 struct kvm_coalesced_mmio_zone zone; 4791 4792 r = -EFAULT; 4793 if (copy_from_user(&zone, argp, sizeof(zone))) 4794 goto out; 4795 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 4796 break; 4797 } 4798 #endif 4799 case KVM_IRQFD: { 4800 struct kvm_irqfd data; 4801 4802 r = -EFAULT; 4803 if (copy_from_user(&data, argp, sizeof(data))) 4804 goto out; 4805 r = kvm_irqfd(kvm, &data); 4806 break; 4807 } 4808 case KVM_IOEVENTFD: { 4809 struct kvm_ioeventfd data; 4810 4811 r = -EFAULT; 4812 if (copy_from_user(&data, argp, sizeof(data))) 4813 goto out; 4814 r = kvm_ioeventfd(kvm, &data); 4815 break; 4816 } 4817 #ifdef CONFIG_HAVE_KVM_MSI 4818 case KVM_SIGNAL_MSI: { 4819 struct kvm_msi msi; 4820 4821 r = -EFAULT; 4822 if (copy_from_user(&msi, argp, sizeof(msi))) 4823 goto out; 4824 r = kvm_send_userspace_msi(kvm, &msi); 4825 break; 4826 } 4827 #endif 4828 #ifdef __KVM_HAVE_IRQ_LINE 4829 case KVM_IRQ_LINE_STATUS: 4830 case KVM_IRQ_LINE: { 4831 struct kvm_irq_level irq_event; 4832 4833 r = -EFAULT; 4834 if (copy_from_user(&irq_event, argp, sizeof(irq_event))) 4835 goto out; 4836 4837 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, 4838 ioctl == KVM_IRQ_LINE_STATUS); 4839 if (r) 4840 goto out; 4841 4842 r = -EFAULT; 4843 if (ioctl == KVM_IRQ_LINE_STATUS) { 4844 if (copy_to_user(argp, &irq_event, sizeof(irq_event))) 4845 goto out; 4846 } 4847 4848 r = 0; 4849 break; 4850 } 4851 #endif 4852 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 4853 case KVM_SET_GSI_ROUTING: { 4854 struct kvm_irq_routing routing; 4855 struct kvm_irq_routing __user *urouting; 4856 struct kvm_irq_routing_entry *entries = NULL; 4857 4858 r = -EFAULT; 4859 if (copy_from_user(&routing, argp, sizeof(routing))) 4860 goto out; 4861 r = -EINVAL; 4862 if (!kvm_arch_can_set_irq_routing(kvm)) 4863 goto out; 4864 if (routing.nr > KVM_MAX_IRQ_ROUTES) 4865 goto out; 4866 if (routing.flags) 4867 goto out; 4868 if (routing.nr) { 4869 urouting = argp; 4870 entries = vmemdup_user(urouting->entries, 4871 array_size(sizeof(*entries), 4872 routing.nr)); 4873 if (IS_ERR(entries)) { 4874 r = PTR_ERR(entries); 4875 goto out; 4876 } 4877 } 4878 r = kvm_set_irq_routing(kvm, entries, routing.nr, 4879 routing.flags); 4880 kvfree(entries); 4881 break; 4882 } 4883 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ 4884 case KVM_CREATE_DEVICE: { 4885 struct kvm_create_device cd; 4886 4887 r = -EFAULT; 4888 if (copy_from_user(&cd, argp, sizeof(cd))) 4889 goto out; 4890 4891 r = kvm_ioctl_create_device(kvm, &cd); 4892 if (r) 4893 goto out; 4894 4895 r = -EFAULT; 4896 if (copy_to_user(argp, &cd, sizeof(cd))) 4897 goto out; 4898 4899 r = 0; 4900 break; 4901 } 4902 case KVM_CHECK_EXTENSION: 4903 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); 4904 break; 4905 case KVM_RESET_DIRTY_RINGS: 4906 r = kvm_vm_ioctl_reset_dirty_pages(kvm); 4907 break; 4908 case KVM_GET_STATS_FD: 4909 r = kvm_vm_ioctl_get_stats_fd(kvm); 4910 break; 4911 default: 4912 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 4913 } 4914 out: 4915 return r; 4916 } 4917 4918 #ifdef CONFIG_KVM_COMPAT 4919 struct compat_kvm_dirty_log { 4920 __u32 slot; 4921 __u32 padding1; 4922 union { 4923 compat_uptr_t dirty_bitmap; /* one bit per page */ 4924 __u64 padding2; 4925 }; 4926 }; 4927 4928 struct compat_kvm_clear_dirty_log { 4929 __u32 slot; 4930 __u32 num_pages; 4931 __u64 first_page; 4932 union { 4933 compat_uptr_t dirty_bitmap; /* one bit per page */ 4934 __u64 padding2; 4935 }; 4936 }; 4937 4938 long __weak kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, 4939 unsigned long arg) 4940 { 4941 return -ENOTTY; 4942 } 4943 4944 static long kvm_vm_compat_ioctl(struct file *filp, 4945 unsigned int ioctl, unsigned long arg) 4946 { 4947 struct kvm *kvm = filp->private_data; 4948 int r; 4949 4950 if (kvm->mm != current->mm || kvm->vm_dead) 4951 return -EIO; 4952 4953 r = kvm_arch_vm_compat_ioctl(filp, ioctl, arg); 4954 if (r != -ENOTTY) 4955 return r; 4956 4957 switch (ioctl) { 4958 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4959 case KVM_CLEAR_DIRTY_LOG: { 4960 struct compat_kvm_clear_dirty_log compat_log; 4961 struct kvm_clear_dirty_log log; 4962 4963 if (copy_from_user(&compat_log, (void __user *)arg, 4964 sizeof(compat_log))) 4965 return -EFAULT; 4966 log.slot = compat_log.slot; 4967 log.num_pages = compat_log.num_pages; 4968 log.first_page = compat_log.first_page; 4969 log.padding2 = compat_log.padding2; 4970 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 4971 4972 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); 4973 break; 4974 } 4975 #endif 4976 case KVM_GET_DIRTY_LOG: { 4977 struct compat_kvm_dirty_log compat_log; 4978 struct kvm_dirty_log log; 4979 4980 if (copy_from_user(&compat_log, (void __user *)arg, 4981 sizeof(compat_log))) 4982 return -EFAULT; 4983 log.slot = compat_log.slot; 4984 log.padding1 = compat_log.padding1; 4985 log.padding2 = compat_log.padding2; 4986 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 4987 4988 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 4989 break; 4990 } 4991 default: 4992 r = kvm_vm_ioctl(filp, ioctl, arg); 4993 } 4994 return r; 4995 } 4996 #endif 4997 4998 static const struct file_operations kvm_vm_fops = { 4999 .release = kvm_vm_release, 5000 .unlocked_ioctl = kvm_vm_ioctl, 5001 .llseek = noop_llseek, 5002 KVM_COMPAT(kvm_vm_compat_ioctl), 5003 }; 5004 5005 bool file_is_kvm(struct file *file) 5006 { 5007 return file && file->f_op == &kvm_vm_fops; 5008 } 5009 EXPORT_SYMBOL_GPL(file_is_kvm); 5010 5011 static int kvm_dev_ioctl_create_vm(unsigned long type) 5012 { 5013 char fdname[ITOA_MAX_LEN + 1]; 5014 int r, fd; 5015 struct kvm *kvm; 5016 struct file *file; 5017 5018 fd = get_unused_fd_flags(O_CLOEXEC); 5019 if (fd < 0) 5020 return fd; 5021 5022 snprintf(fdname, sizeof(fdname), "%d", fd); 5023 5024 kvm = kvm_create_vm(type, fdname); 5025 if (IS_ERR(kvm)) { 5026 r = PTR_ERR(kvm); 5027 goto put_fd; 5028 } 5029 5030 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 5031 if (IS_ERR(file)) { 5032 r = PTR_ERR(file); 5033 goto put_kvm; 5034 } 5035 5036 /* 5037 * Don't call kvm_put_kvm anymore at this point; file->f_op is 5038 * already set, with ->release() being kvm_vm_release(). In error 5039 * cases it will be called by the final fput(file) and will take 5040 * care of doing kvm_put_kvm(kvm). 5041 */ 5042 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm); 5043 5044 fd_install(fd, file); 5045 return fd; 5046 5047 put_kvm: 5048 kvm_put_kvm(kvm); 5049 put_fd: 5050 put_unused_fd(fd); 5051 return r; 5052 } 5053 5054 static long kvm_dev_ioctl(struct file *filp, 5055 unsigned int ioctl, unsigned long arg) 5056 { 5057 int r = -EINVAL; 5058 5059 switch (ioctl) { 5060 case KVM_GET_API_VERSION: 5061 if (arg) 5062 goto out; 5063 r = KVM_API_VERSION; 5064 break; 5065 case KVM_CREATE_VM: 5066 r = kvm_dev_ioctl_create_vm(arg); 5067 break; 5068 case KVM_CHECK_EXTENSION: 5069 r = kvm_vm_ioctl_check_extension_generic(NULL, arg); 5070 break; 5071 case KVM_GET_VCPU_MMAP_SIZE: 5072 if (arg) 5073 goto out; 5074 r = PAGE_SIZE; /* struct kvm_run */ 5075 #ifdef CONFIG_X86 5076 r += PAGE_SIZE; /* pio data page */ 5077 #endif 5078 #ifdef CONFIG_KVM_MMIO 5079 r += PAGE_SIZE; /* coalesced mmio ring page */ 5080 #endif 5081 break; 5082 case KVM_TRACE_ENABLE: 5083 case KVM_TRACE_PAUSE: 5084 case KVM_TRACE_DISABLE: 5085 r = -EOPNOTSUPP; 5086 break; 5087 default: 5088 return kvm_arch_dev_ioctl(filp, ioctl, arg); 5089 } 5090 out: 5091 return r; 5092 } 5093 5094 static struct file_operations kvm_chardev_ops = { 5095 .unlocked_ioctl = kvm_dev_ioctl, 5096 .llseek = noop_llseek, 5097 KVM_COMPAT(kvm_dev_ioctl), 5098 }; 5099 5100 static struct miscdevice kvm_dev = { 5101 KVM_MINOR, 5102 "kvm", 5103 &kvm_chardev_ops, 5104 }; 5105 5106 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING 5107 __visible bool kvm_rebooting; 5108 EXPORT_SYMBOL_GPL(kvm_rebooting); 5109 5110 static DEFINE_PER_CPU(bool, hardware_enabled); 5111 static int kvm_usage_count; 5112 5113 static int __hardware_enable_nolock(void) 5114 { 5115 if (__this_cpu_read(hardware_enabled)) 5116 return 0; 5117 5118 if (kvm_arch_hardware_enable()) { 5119 pr_info("kvm: enabling virtualization on CPU%d failed\n", 5120 raw_smp_processor_id()); 5121 return -EIO; 5122 } 5123 5124 __this_cpu_write(hardware_enabled, true); 5125 return 0; 5126 } 5127 5128 static void hardware_enable_nolock(void *failed) 5129 { 5130 if (__hardware_enable_nolock()) 5131 atomic_inc(failed); 5132 } 5133 5134 static int kvm_online_cpu(unsigned int cpu) 5135 { 5136 int ret = 0; 5137 5138 /* 5139 * Abort the CPU online process if hardware virtualization cannot 5140 * be enabled. Otherwise running VMs would encounter unrecoverable 5141 * errors when scheduled to this CPU. 5142 */ 5143 mutex_lock(&kvm_lock); 5144 if (kvm_usage_count) 5145 ret = __hardware_enable_nolock(); 5146 mutex_unlock(&kvm_lock); 5147 return ret; 5148 } 5149 5150 static void hardware_disable_nolock(void *junk) 5151 { 5152 /* 5153 * Note, hardware_disable_all_nolock() tells all online CPUs to disable 5154 * hardware, not just CPUs that successfully enabled hardware! 5155 */ 5156 if (!__this_cpu_read(hardware_enabled)) 5157 return; 5158 5159 kvm_arch_hardware_disable(); 5160 5161 __this_cpu_write(hardware_enabled, false); 5162 } 5163 5164 static int kvm_offline_cpu(unsigned int cpu) 5165 { 5166 mutex_lock(&kvm_lock); 5167 if (kvm_usage_count) 5168 hardware_disable_nolock(NULL); 5169 mutex_unlock(&kvm_lock); 5170 return 0; 5171 } 5172 5173 static void hardware_disable_all_nolock(void) 5174 { 5175 BUG_ON(!kvm_usage_count); 5176 5177 kvm_usage_count--; 5178 if (!kvm_usage_count) 5179 on_each_cpu(hardware_disable_nolock, NULL, 1); 5180 } 5181 5182 static void hardware_disable_all(void) 5183 { 5184 cpus_read_lock(); 5185 mutex_lock(&kvm_lock); 5186 hardware_disable_all_nolock(); 5187 mutex_unlock(&kvm_lock); 5188 cpus_read_unlock(); 5189 } 5190 5191 static int hardware_enable_all(void) 5192 { 5193 atomic_t failed = ATOMIC_INIT(0); 5194 int r; 5195 5196 /* 5197 * Do not enable hardware virtualization if the system is going down. 5198 * If userspace initiated a forced reboot, e.g. reboot -f, then it's 5199 * possible for an in-flight KVM_CREATE_VM to trigger hardware enabling 5200 * after kvm_reboot() is called. Note, this relies on system_state 5201 * being set _before_ kvm_reboot(), which is why KVM uses a syscore ops 5202 * hook instead of registering a dedicated reboot notifier (the latter 5203 * runs before system_state is updated). 5204 */ 5205 if (system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF || 5206 system_state == SYSTEM_RESTART) 5207 return -EBUSY; 5208 5209 /* 5210 * When onlining a CPU, cpu_online_mask is set before kvm_online_cpu() 5211 * is called, and so on_each_cpu() between them includes the CPU that 5212 * is being onlined. As a result, hardware_enable_nolock() may get 5213 * invoked before kvm_online_cpu(), which also enables hardware if the 5214 * usage count is non-zero. Disable CPU hotplug to avoid attempting to 5215 * enable hardware multiple times. 5216 */ 5217 cpus_read_lock(); 5218 mutex_lock(&kvm_lock); 5219 5220 r = 0; 5221 5222 kvm_usage_count++; 5223 if (kvm_usage_count == 1) { 5224 on_each_cpu(hardware_enable_nolock, &failed, 1); 5225 5226 if (atomic_read(&failed)) { 5227 hardware_disable_all_nolock(); 5228 r = -EBUSY; 5229 } 5230 } 5231 5232 mutex_unlock(&kvm_lock); 5233 cpus_read_unlock(); 5234 5235 return r; 5236 } 5237 5238 static void kvm_shutdown(void) 5239 { 5240 /* 5241 * Disable hardware virtualization and set kvm_rebooting to indicate 5242 * that KVM has asynchronously disabled hardware virtualization, i.e. 5243 * that relevant errors and exceptions aren't entirely unexpected. 5244 * Some flavors of hardware virtualization need to be disabled before 5245 * transferring control to firmware (to perform shutdown/reboot), e.g. 5246 * on x86, virtualization can block INIT interrupts, which are used by 5247 * firmware to pull APs back under firmware control. Note, this path 5248 * is used for both shutdown and reboot scenarios, i.e. neither name is 5249 * 100% comprehensive. 5250 */ 5251 pr_info("kvm: exiting hardware virtualization\n"); 5252 kvm_rebooting = true; 5253 on_each_cpu(hardware_disable_nolock, NULL, 1); 5254 } 5255 5256 static int kvm_suspend(void) 5257 { 5258 /* 5259 * Secondary CPUs and CPU hotplug are disabled across the suspend/resume 5260 * callbacks, i.e. no need to acquire kvm_lock to ensure the usage count 5261 * is stable. Assert that kvm_lock is not held to ensure the system 5262 * isn't suspended while KVM is enabling hardware. Hardware enabling 5263 * can be preempted, but the task cannot be frozen until it has dropped 5264 * all locks (userspace tasks are frozen via a fake signal). 5265 */ 5266 lockdep_assert_not_held(&kvm_lock); 5267 lockdep_assert_irqs_disabled(); 5268 5269 if (kvm_usage_count) 5270 hardware_disable_nolock(NULL); 5271 return 0; 5272 } 5273 5274 static void kvm_resume(void) 5275 { 5276 lockdep_assert_not_held(&kvm_lock); 5277 lockdep_assert_irqs_disabled(); 5278 5279 if (kvm_usage_count) 5280 WARN_ON_ONCE(__hardware_enable_nolock()); 5281 } 5282 5283 static struct syscore_ops kvm_syscore_ops = { 5284 .suspend = kvm_suspend, 5285 .resume = kvm_resume, 5286 .shutdown = kvm_shutdown, 5287 }; 5288 #else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */ 5289 static int hardware_enable_all(void) 5290 { 5291 return 0; 5292 } 5293 5294 static void hardware_disable_all(void) 5295 { 5296 5297 } 5298 #endif /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */ 5299 5300 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 5301 { 5302 int i; 5303 5304 for (i = 0; i < bus->dev_count; i++) { 5305 struct kvm_io_device *pos = bus->range[i].dev; 5306 5307 kvm_iodevice_destructor(pos); 5308 } 5309 kfree(bus); 5310 } 5311 5312 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, 5313 const struct kvm_io_range *r2) 5314 { 5315 gpa_t addr1 = r1->addr; 5316 gpa_t addr2 = r2->addr; 5317 5318 if (addr1 < addr2) 5319 return -1; 5320 5321 /* If r2->len == 0, match the exact address. If r2->len != 0, 5322 * accept any overlapping write. Any order is acceptable for 5323 * overlapping ranges, because kvm_io_bus_get_first_dev ensures 5324 * we process all of them. 5325 */ 5326 if (r2->len) { 5327 addr1 += r1->len; 5328 addr2 += r2->len; 5329 } 5330 5331 if (addr1 > addr2) 5332 return 1; 5333 5334 return 0; 5335 } 5336 5337 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) 5338 { 5339 return kvm_io_bus_cmp(p1, p2); 5340 } 5341 5342 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, 5343 gpa_t addr, int len) 5344 { 5345 struct kvm_io_range *range, key; 5346 int off; 5347 5348 key = (struct kvm_io_range) { 5349 .addr = addr, 5350 .len = len, 5351 }; 5352 5353 range = bsearch(&key, bus->range, bus->dev_count, 5354 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); 5355 if (range == NULL) 5356 return -ENOENT; 5357 5358 off = range - bus->range; 5359 5360 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) 5361 off--; 5362 5363 return off; 5364 } 5365 5366 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 5367 struct kvm_io_range *range, const void *val) 5368 { 5369 int idx; 5370 5371 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 5372 if (idx < 0) 5373 return -EOPNOTSUPP; 5374 5375 while (idx < bus->dev_count && 5376 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 5377 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, 5378 range->len, val)) 5379 return idx; 5380 idx++; 5381 } 5382 5383 return -EOPNOTSUPP; 5384 } 5385 5386 /* kvm_io_bus_write - called under kvm->slots_lock */ 5387 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 5388 int len, const void *val) 5389 { 5390 struct kvm_io_bus *bus; 5391 struct kvm_io_range range; 5392 int r; 5393 5394 range = (struct kvm_io_range) { 5395 .addr = addr, 5396 .len = len, 5397 }; 5398 5399 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5400 if (!bus) 5401 return -ENOMEM; 5402 r = __kvm_io_bus_write(vcpu, bus, &range, val); 5403 return r < 0 ? r : 0; 5404 } 5405 EXPORT_SYMBOL_GPL(kvm_io_bus_write); 5406 5407 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ 5408 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 5409 gpa_t addr, int len, const void *val, long cookie) 5410 { 5411 struct kvm_io_bus *bus; 5412 struct kvm_io_range range; 5413 5414 range = (struct kvm_io_range) { 5415 .addr = addr, 5416 .len = len, 5417 }; 5418 5419 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5420 if (!bus) 5421 return -ENOMEM; 5422 5423 /* First try the device referenced by cookie. */ 5424 if ((cookie >= 0) && (cookie < bus->dev_count) && 5425 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) 5426 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, 5427 val)) 5428 return cookie; 5429 5430 /* 5431 * cookie contained garbage; fall back to search and return the 5432 * correct cookie value. 5433 */ 5434 return __kvm_io_bus_write(vcpu, bus, &range, val); 5435 } 5436 5437 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 5438 struct kvm_io_range *range, void *val) 5439 { 5440 int idx; 5441 5442 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 5443 if (idx < 0) 5444 return -EOPNOTSUPP; 5445 5446 while (idx < bus->dev_count && 5447 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 5448 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, 5449 range->len, val)) 5450 return idx; 5451 idx++; 5452 } 5453 5454 return -EOPNOTSUPP; 5455 } 5456 5457 /* kvm_io_bus_read - called under kvm->slots_lock */ 5458 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 5459 int len, void *val) 5460 { 5461 struct kvm_io_bus *bus; 5462 struct kvm_io_range range; 5463 int r; 5464 5465 range = (struct kvm_io_range) { 5466 .addr = addr, 5467 .len = len, 5468 }; 5469 5470 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5471 if (!bus) 5472 return -ENOMEM; 5473 r = __kvm_io_bus_read(vcpu, bus, &range, val); 5474 return r < 0 ? r : 0; 5475 } 5476 5477 /* Caller must hold slots_lock. */ 5478 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 5479 int len, struct kvm_io_device *dev) 5480 { 5481 int i; 5482 struct kvm_io_bus *new_bus, *bus; 5483 struct kvm_io_range range; 5484 5485 bus = kvm_get_bus(kvm, bus_idx); 5486 if (!bus) 5487 return -ENOMEM; 5488 5489 /* exclude ioeventfd which is limited by maximum fd */ 5490 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 5491 return -ENOSPC; 5492 5493 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1), 5494 GFP_KERNEL_ACCOUNT); 5495 if (!new_bus) 5496 return -ENOMEM; 5497 5498 range = (struct kvm_io_range) { 5499 .addr = addr, 5500 .len = len, 5501 .dev = dev, 5502 }; 5503 5504 for (i = 0; i < bus->dev_count; i++) 5505 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0) 5506 break; 5507 5508 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 5509 new_bus->dev_count++; 5510 new_bus->range[i] = range; 5511 memcpy(new_bus->range + i + 1, bus->range + i, 5512 (bus->dev_count - i) * sizeof(struct kvm_io_range)); 5513 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 5514 synchronize_srcu_expedited(&kvm->srcu); 5515 kfree(bus); 5516 5517 return 0; 5518 } 5519 5520 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 5521 struct kvm_io_device *dev) 5522 { 5523 int i, j; 5524 struct kvm_io_bus *new_bus, *bus; 5525 5526 lockdep_assert_held(&kvm->slots_lock); 5527 5528 bus = kvm_get_bus(kvm, bus_idx); 5529 if (!bus) 5530 return 0; 5531 5532 for (i = 0; i < bus->dev_count; i++) { 5533 if (bus->range[i].dev == dev) { 5534 break; 5535 } 5536 } 5537 5538 if (i == bus->dev_count) 5539 return 0; 5540 5541 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1), 5542 GFP_KERNEL_ACCOUNT); 5543 if (new_bus) { 5544 memcpy(new_bus, bus, struct_size(bus, range, i)); 5545 new_bus->dev_count--; 5546 memcpy(new_bus->range + i, bus->range + i + 1, 5547 flex_array_size(new_bus, range, new_bus->dev_count - i)); 5548 } 5549 5550 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 5551 synchronize_srcu_expedited(&kvm->srcu); 5552 5553 /* Destroy the old bus _after_ installing the (null) bus. */ 5554 if (!new_bus) { 5555 pr_err("kvm: failed to shrink bus, removing it completely\n"); 5556 for (j = 0; j < bus->dev_count; j++) { 5557 if (j == i) 5558 continue; 5559 kvm_iodevice_destructor(bus->range[j].dev); 5560 } 5561 } 5562 5563 kfree(bus); 5564 return new_bus ? 0 : -ENOMEM; 5565 } 5566 5567 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 5568 gpa_t addr) 5569 { 5570 struct kvm_io_bus *bus; 5571 int dev_idx, srcu_idx; 5572 struct kvm_io_device *iodev = NULL; 5573 5574 srcu_idx = srcu_read_lock(&kvm->srcu); 5575 5576 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 5577 if (!bus) 5578 goto out_unlock; 5579 5580 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); 5581 if (dev_idx < 0) 5582 goto out_unlock; 5583 5584 iodev = bus->range[dev_idx].dev; 5585 5586 out_unlock: 5587 srcu_read_unlock(&kvm->srcu, srcu_idx); 5588 5589 return iodev; 5590 } 5591 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev); 5592 5593 static int kvm_debugfs_open(struct inode *inode, struct file *file, 5594 int (*get)(void *, u64 *), int (*set)(void *, u64), 5595 const char *fmt) 5596 { 5597 int ret; 5598 struct kvm_stat_data *stat_data = inode->i_private; 5599 5600 /* 5601 * The debugfs files are a reference to the kvm struct which 5602 * is still valid when kvm_destroy_vm is called. kvm_get_kvm_safe 5603 * avoids the race between open and the removal of the debugfs directory. 5604 */ 5605 if (!kvm_get_kvm_safe(stat_data->kvm)) 5606 return -ENOENT; 5607 5608 ret = simple_attr_open(inode, file, get, 5609 kvm_stats_debugfs_mode(stat_data->desc) & 0222 5610 ? set : NULL, fmt); 5611 if (ret) 5612 kvm_put_kvm(stat_data->kvm); 5613 5614 return ret; 5615 } 5616 5617 static int kvm_debugfs_release(struct inode *inode, struct file *file) 5618 { 5619 struct kvm_stat_data *stat_data = inode->i_private; 5620 5621 simple_attr_release(inode, file); 5622 kvm_put_kvm(stat_data->kvm); 5623 5624 return 0; 5625 } 5626 5627 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val) 5628 { 5629 *val = *(u64 *)((void *)(&kvm->stat) + offset); 5630 5631 return 0; 5632 } 5633 5634 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset) 5635 { 5636 *(u64 *)((void *)(&kvm->stat) + offset) = 0; 5637 5638 return 0; 5639 } 5640 5641 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) 5642 { 5643 unsigned long i; 5644 struct kvm_vcpu *vcpu; 5645 5646 *val = 0; 5647 5648 kvm_for_each_vcpu(i, vcpu, kvm) 5649 *val += *(u64 *)((void *)(&vcpu->stat) + offset); 5650 5651 return 0; 5652 } 5653 5654 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset) 5655 { 5656 unsigned long i; 5657 struct kvm_vcpu *vcpu; 5658 5659 kvm_for_each_vcpu(i, vcpu, kvm) 5660 *(u64 *)((void *)(&vcpu->stat) + offset) = 0; 5661 5662 return 0; 5663 } 5664 5665 static int kvm_stat_data_get(void *data, u64 *val) 5666 { 5667 int r = -EFAULT; 5668 struct kvm_stat_data *stat_data = data; 5669 5670 switch (stat_data->kind) { 5671 case KVM_STAT_VM: 5672 r = kvm_get_stat_per_vm(stat_data->kvm, 5673 stat_data->desc->desc.offset, val); 5674 break; 5675 case KVM_STAT_VCPU: 5676 r = kvm_get_stat_per_vcpu(stat_data->kvm, 5677 stat_data->desc->desc.offset, val); 5678 break; 5679 } 5680 5681 return r; 5682 } 5683 5684 static int kvm_stat_data_clear(void *data, u64 val) 5685 { 5686 int r = -EFAULT; 5687 struct kvm_stat_data *stat_data = data; 5688 5689 if (val) 5690 return -EINVAL; 5691 5692 switch (stat_data->kind) { 5693 case KVM_STAT_VM: 5694 r = kvm_clear_stat_per_vm(stat_data->kvm, 5695 stat_data->desc->desc.offset); 5696 break; 5697 case KVM_STAT_VCPU: 5698 r = kvm_clear_stat_per_vcpu(stat_data->kvm, 5699 stat_data->desc->desc.offset); 5700 break; 5701 } 5702 5703 return r; 5704 } 5705 5706 static int kvm_stat_data_open(struct inode *inode, struct file *file) 5707 { 5708 __simple_attr_check_format("%llu\n", 0ull); 5709 return kvm_debugfs_open(inode, file, kvm_stat_data_get, 5710 kvm_stat_data_clear, "%llu\n"); 5711 } 5712 5713 static const struct file_operations stat_fops_per_vm = { 5714 .owner = THIS_MODULE, 5715 .open = kvm_stat_data_open, 5716 .release = kvm_debugfs_release, 5717 .read = simple_attr_read, 5718 .write = simple_attr_write, 5719 .llseek = no_llseek, 5720 }; 5721 5722 static int vm_stat_get(void *_offset, u64 *val) 5723 { 5724 unsigned offset = (long)_offset; 5725 struct kvm *kvm; 5726 u64 tmp_val; 5727 5728 *val = 0; 5729 mutex_lock(&kvm_lock); 5730 list_for_each_entry(kvm, &vm_list, vm_list) { 5731 kvm_get_stat_per_vm(kvm, offset, &tmp_val); 5732 *val += tmp_val; 5733 } 5734 mutex_unlock(&kvm_lock); 5735 return 0; 5736 } 5737 5738 static int vm_stat_clear(void *_offset, u64 val) 5739 { 5740 unsigned offset = (long)_offset; 5741 struct kvm *kvm; 5742 5743 if (val) 5744 return -EINVAL; 5745 5746 mutex_lock(&kvm_lock); 5747 list_for_each_entry(kvm, &vm_list, vm_list) { 5748 kvm_clear_stat_per_vm(kvm, offset); 5749 } 5750 mutex_unlock(&kvm_lock); 5751 5752 return 0; 5753 } 5754 5755 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n"); 5756 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n"); 5757 5758 static int vcpu_stat_get(void *_offset, u64 *val) 5759 { 5760 unsigned offset = (long)_offset; 5761 struct kvm *kvm; 5762 u64 tmp_val; 5763 5764 *val = 0; 5765 mutex_lock(&kvm_lock); 5766 list_for_each_entry(kvm, &vm_list, vm_list) { 5767 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val); 5768 *val += tmp_val; 5769 } 5770 mutex_unlock(&kvm_lock); 5771 return 0; 5772 } 5773 5774 static int vcpu_stat_clear(void *_offset, u64 val) 5775 { 5776 unsigned offset = (long)_offset; 5777 struct kvm *kvm; 5778 5779 if (val) 5780 return -EINVAL; 5781 5782 mutex_lock(&kvm_lock); 5783 list_for_each_entry(kvm, &vm_list, vm_list) { 5784 kvm_clear_stat_per_vcpu(kvm, offset); 5785 } 5786 mutex_unlock(&kvm_lock); 5787 5788 return 0; 5789 } 5790 5791 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear, 5792 "%llu\n"); 5793 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n"); 5794 5795 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) 5796 { 5797 struct kobj_uevent_env *env; 5798 unsigned long long created, active; 5799 5800 if (!kvm_dev.this_device || !kvm) 5801 return; 5802 5803 mutex_lock(&kvm_lock); 5804 if (type == KVM_EVENT_CREATE_VM) { 5805 kvm_createvm_count++; 5806 kvm_active_vms++; 5807 } else if (type == KVM_EVENT_DESTROY_VM) { 5808 kvm_active_vms--; 5809 } 5810 created = kvm_createvm_count; 5811 active = kvm_active_vms; 5812 mutex_unlock(&kvm_lock); 5813 5814 env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT); 5815 if (!env) 5816 return; 5817 5818 add_uevent_var(env, "CREATED=%llu", created); 5819 add_uevent_var(env, "COUNT=%llu", active); 5820 5821 if (type == KVM_EVENT_CREATE_VM) { 5822 add_uevent_var(env, "EVENT=create"); 5823 kvm->userspace_pid = task_pid_nr(current); 5824 } else if (type == KVM_EVENT_DESTROY_VM) { 5825 add_uevent_var(env, "EVENT=destroy"); 5826 } 5827 add_uevent_var(env, "PID=%d", kvm->userspace_pid); 5828 5829 if (!IS_ERR(kvm->debugfs_dentry)) { 5830 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT); 5831 5832 if (p) { 5833 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); 5834 if (!IS_ERR(tmp)) 5835 add_uevent_var(env, "STATS_PATH=%s", tmp); 5836 kfree(p); 5837 } 5838 } 5839 /* no need for checks, since we are adding at most only 5 keys */ 5840 env->envp[env->envp_idx++] = NULL; 5841 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp); 5842 kfree(env); 5843 } 5844 5845 static void kvm_init_debug(void) 5846 { 5847 const struct file_operations *fops; 5848 const struct _kvm_stats_desc *pdesc; 5849 int i; 5850 5851 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 5852 5853 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) { 5854 pdesc = &kvm_vm_stats_desc[i]; 5855 if (kvm_stats_debugfs_mode(pdesc) & 0222) 5856 fops = &vm_stat_fops; 5857 else 5858 fops = &vm_stat_readonly_fops; 5859 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 5860 kvm_debugfs_dir, 5861 (void *)(long)pdesc->desc.offset, fops); 5862 } 5863 5864 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { 5865 pdesc = &kvm_vcpu_stats_desc[i]; 5866 if (kvm_stats_debugfs_mode(pdesc) & 0222) 5867 fops = &vcpu_stat_fops; 5868 else 5869 fops = &vcpu_stat_readonly_fops; 5870 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 5871 kvm_debugfs_dir, 5872 (void *)(long)pdesc->desc.offset, fops); 5873 } 5874 } 5875 5876 static inline 5877 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 5878 { 5879 return container_of(pn, struct kvm_vcpu, preempt_notifier); 5880 } 5881 5882 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 5883 { 5884 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 5885 5886 WRITE_ONCE(vcpu->preempted, false); 5887 WRITE_ONCE(vcpu->ready, false); 5888 5889 __this_cpu_write(kvm_running_vcpu, vcpu); 5890 kvm_arch_sched_in(vcpu, cpu); 5891 kvm_arch_vcpu_load(vcpu, cpu); 5892 } 5893 5894 static void kvm_sched_out(struct preempt_notifier *pn, 5895 struct task_struct *next) 5896 { 5897 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 5898 5899 if (current->on_rq) { 5900 WRITE_ONCE(vcpu->preempted, true); 5901 WRITE_ONCE(vcpu->ready, true); 5902 } 5903 kvm_arch_vcpu_put(vcpu); 5904 __this_cpu_write(kvm_running_vcpu, NULL); 5905 } 5906 5907 /** 5908 * kvm_get_running_vcpu - get the vcpu running on the current CPU. 5909 * 5910 * We can disable preemption locally around accessing the per-CPU variable, 5911 * and use the resolved vcpu pointer after enabling preemption again, 5912 * because even if the current thread is migrated to another CPU, reading 5913 * the per-CPU value later will give us the same value as we update the 5914 * per-CPU variable in the preempt notifier handlers. 5915 */ 5916 struct kvm_vcpu *kvm_get_running_vcpu(void) 5917 { 5918 struct kvm_vcpu *vcpu; 5919 5920 preempt_disable(); 5921 vcpu = __this_cpu_read(kvm_running_vcpu); 5922 preempt_enable(); 5923 5924 return vcpu; 5925 } 5926 EXPORT_SYMBOL_GPL(kvm_get_running_vcpu); 5927 5928 /** 5929 * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus. 5930 */ 5931 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) 5932 { 5933 return &kvm_running_vcpu; 5934 } 5935 5936 #ifdef CONFIG_GUEST_PERF_EVENTS 5937 static unsigned int kvm_guest_state(void) 5938 { 5939 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); 5940 unsigned int state; 5941 5942 if (!kvm_arch_pmi_in_guest(vcpu)) 5943 return 0; 5944 5945 state = PERF_GUEST_ACTIVE; 5946 if (!kvm_arch_vcpu_in_kernel(vcpu)) 5947 state |= PERF_GUEST_USER; 5948 5949 return state; 5950 } 5951 5952 static unsigned long kvm_guest_get_ip(void) 5953 { 5954 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); 5955 5956 /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */ 5957 if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu))) 5958 return 0; 5959 5960 return kvm_arch_vcpu_get_ip(vcpu); 5961 } 5962 5963 static struct perf_guest_info_callbacks kvm_guest_cbs = { 5964 .state = kvm_guest_state, 5965 .get_ip = kvm_guest_get_ip, 5966 .handle_intel_pt_intr = NULL, 5967 }; 5968 5969 void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void)) 5970 { 5971 kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler; 5972 perf_register_guest_info_callbacks(&kvm_guest_cbs); 5973 } 5974 void kvm_unregister_perf_callbacks(void) 5975 { 5976 perf_unregister_guest_info_callbacks(&kvm_guest_cbs); 5977 } 5978 #endif 5979 5980 int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module) 5981 { 5982 int r; 5983 int cpu; 5984 5985 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING 5986 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_ONLINE, "kvm/cpu:online", 5987 kvm_online_cpu, kvm_offline_cpu); 5988 if (r) 5989 return r; 5990 5991 register_syscore_ops(&kvm_syscore_ops); 5992 #endif 5993 5994 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 5995 if (!vcpu_align) 5996 vcpu_align = __alignof__(struct kvm_vcpu); 5997 kvm_vcpu_cache = 5998 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align, 5999 SLAB_ACCOUNT, 6000 offsetof(struct kvm_vcpu, arch), 6001 offsetofend(struct kvm_vcpu, stats_id) 6002 - offsetof(struct kvm_vcpu, arch), 6003 NULL); 6004 if (!kvm_vcpu_cache) { 6005 r = -ENOMEM; 6006 goto err_vcpu_cache; 6007 } 6008 6009 for_each_possible_cpu(cpu) { 6010 if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu), 6011 GFP_KERNEL, cpu_to_node(cpu))) { 6012 r = -ENOMEM; 6013 goto err_cpu_kick_mask; 6014 } 6015 } 6016 6017 r = kvm_irqfd_init(); 6018 if (r) 6019 goto err_irqfd; 6020 6021 r = kvm_async_pf_init(); 6022 if (r) 6023 goto err_async_pf; 6024 6025 kvm_chardev_ops.owner = module; 6026 6027 kvm_preempt_ops.sched_in = kvm_sched_in; 6028 kvm_preempt_ops.sched_out = kvm_sched_out; 6029 6030 kvm_init_debug(); 6031 6032 r = kvm_vfio_ops_init(); 6033 if (WARN_ON_ONCE(r)) 6034 goto err_vfio; 6035 6036 /* 6037 * Registration _must_ be the very last thing done, as this exposes 6038 * /dev/kvm to userspace, i.e. all infrastructure must be setup! 6039 */ 6040 r = misc_register(&kvm_dev); 6041 if (r) { 6042 pr_err("kvm: misc device register failed\n"); 6043 goto err_register; 6044 } 6045 6046 return 0; 6047 6048 err_register: 6049 kvm_vfio_ops_exit(); 6050 err_vfio: 6051 kvm_async_pf_deinit(); 6052 err_async_pf: 6053 kvm_irqfd_exit(); 6054 err_irqfd: 6055 err_cpu_kick_mask: 6056 for_each_possible_cpu(cpu) 6057 free_cpumask_var(per_cpu(cpu_kick_mask, cpu)); 6058 kmem_cache_destroy(kvm_vcpu_cache); 6059 err_vcpu_cache: 6060 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING 6061 unregister_syscore_ops(&kvm_syscore_ops); 6062 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE); 6063 #endif 6064 return r; 6065 } 6066 EXPORT_SYMBOL_GPL(kvm_init); 6067 6068 void kvm_exit(void) 6069 { 6070 int cpu; 6071 6072 /* 6073 * Note, unregistering /dev/kvm doesn't strictly need to come first, 6074 * fops_get(), a.k.a. try_module_get(), prevents acquiring references 6075 * to KVM while the module is being stopped. 6076 */ 6077 misc_deregister(&kvm_dev); 6078 6079 debugfs_remove_recursive(kvm_debugfs_dir); 6080 for_each_possible_cpu(cpu) 6081 free_cpumask_var(per_cpu(cpu_kick_mask, cpu)); 6082 kmem_cache_destroy(kvm_vcpu_cache); 6083 kvm_vfio_ops_exit(); 6084 kvm_async_pf_deinit(); 6085 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING 6086 unregister_syscore_ops(&kvm_syscore_ops); 6087 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE); 6088 #endif 6089 kvm_irqfd_exit(); 6090 } 6091 EXPORT_SYMBOL_GPL(kvm_exit); 6092 6093 struct kvm_vm_worker_thread_context { 6094 struct kvm *kvm; 6095 struct task_struct *parent; 6096 struct completion init_done; 6097 kvm_vm_thread_fn_t thread_fn; 6098 uintptr_t data; 6099 int err; 6100 }; 6101 6102 static int kvm_vm_worker_thread(void *context) 6103 { 6104 /* 6105 * The init_context is allocated on the stack of the parent thread, so 6106 * we have to locally copy anything that is needed beyond initialization 6107 */ 6108 struct kvm_vm_worker_thread_context *init_context = context; 6109 struct task_struct *parent; 6110 struct kvm *kvm = init_context->kvm; 6111 kvm_vm_thread_fn_t thread_fn = init_context->thread_fn; 6112 uintptr_t data = init_context->data; 6113 int err; 6114 6115 err = kthread_park(current); 6116 /* kthread_park(current) is never supposed to return an error */ 6117 WARN_ON(err != 0); 6118 if (err) 6119 goto init_complete; 6120 6121 err = cgroup_attach_task_all(init_context->parent, current); 6122 if (err) { 6123 kvm_err("%s: cgroup_attach_task_all failed with err %d\n", 6124 __func__, err); 6125 goto init_complete; 6126 } 6127 6128 set_user_nice(current, task_nice(init_context->parent)); 6129 6130 init_complete: 6131 init_context->err = err; 6132 complete(&init_context->init_done); 6133 init_context = NULL; 6134 6135 if (err) 6136 goto out; 6137 6138 /* Wait to be woken up by the spawner before proceeding. */ 6139 kthread_parkme(); 6140 6141 if (!kthread_should_stop()) 6142 err = thread_fn(kvm, data); 6143 6144 out: 6145 /* 6146 * Move kthread back to its original cgroup to prevent it lingering in 6147 * the cgroup of the VM process, after the latter finishes its 6148 * execution. 6149 * 6150 * kthread_stop() waits on the 'exited' completion condition which is 6151 * set in exit_mm(), via mm_release(), in do_exit(). However, the 6152 * kthread is removed from the cgroup in the cgroup_exit() which is 6153 * called after the exit_mm(). This causes the kthread_stop() to return 6154 * before the kthread actually quits the cgroup. 6155 */ 6156 rcu_read_lock(); 6157 parent = rcu_dereference(current->real_parent); 6158 get_task_struct(parent); 6159 rcu_read_unlock(); 6160 cgroup_attach_task_all(parent, current); 6161 put_task_struct(parent); 6162 6163 return err; 6164 } 6165 6166 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, 6167 uintptr_t data, const char *name, 6168 struct task_struct **thread_ptr) 6169 { 6170 struct kvm_vm_worker_thread_context init_context = {}; 6171 struct task_struct *thread; 6172 6173 *thread_ptr = NULL; 6174 init_context.kvm = kvm; 6175 init_context.parent = current; 6176 init_context.thread_fn = thread_fn; 6177 init_context.data = data; 6178 init_completion(&init_context.init_done); 6179 6180 thread = kthread_run(kvm_vm_worker_thread, &init_context, 6181 "%s-%d", name, task_pid_nr(current)); 6182 if (IS_ERR(thread)) 6183 return PTR_ERR(thread); 6184 6185 /* kthread_run is never supposed to return NULL */ 6186 WARN_ON(thread == NULL); 6187 6188 wait_for_completion(&init_context.init_done); 6189 6190 if (!init_context.err) 6191 *thread_ptr = thread; 6192 6193 return init_context.err; 6194 } 6195