1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * This module enables machines with Intel VT-x extensions to run virtual 6 * machines without emulation or binary translation. 7 * 8 * Copyright (C) 2006 Qumranet, Inc. 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 10 * 11 * Authors: 12 * Avi Kivity <avi@qumranet.com> 13 * Yaniv Kamay <yaniv@qumranet.com> 14 */ 15 16 #include <kvm/iodev.h> 17 18 #include <linux/kvm_host.h> 19 #include <linux/kvm.h> 20 #include <linux/module.h> 21 #include <linux/errno.h> 22 #include <linux/percpu.h> 23 #include <linux/mm.h> 24 #include <linux/miscdevice.h> 25 #include <linux/vmalloc.h> 26 #include <linux/reboot.h> 27 #include <linux/debugfs.h> 28 #include <linux/highmem.h> 29 #include <linux/file.h> 30 #include <linux/syscore_ops.h> 31 #include <linux/cpu.h> 32 #include <linux/sched/signal.h> 33 #include <linux/sched/mm.h> 34 #include <linux/sched/stat.h> 35 #include <linux/cpumask.h> 36 #include <linux/smp.h> 37 #include <linux/anon_inodes.h> 38 #include <linux/profile.h> 39 #include <linux/kvm_para.h> 40 #include <linux/pagemap.h> 41 #include <linux/mman.h> 42 #include <linux/swap.h> 43 #include <linux/bitops.h> 44 #include <linux/spinlock.h> 45 #include <linux/compat.h> 46 #include <linux/srcu.h> 47 #include <linux/hugetlb.h> 48 #include <linux/slab.h> 49 #include <linux/sort.h> 50 #include <linux/bsearch.h> 51 #include <linux/io.h> 52 #include <linux/lockdep.h> 53 #include <linux/kthread.h> 54 #include <linux/suspend.h> 55 56 #include <asm/processor.h> 57 #include <asm/ioctl.h> 58 #include <linux/uaccess.h> 59 60 #include "coalesced_mmio.h" 61 #include "async_pf.h" 62 #include "kvm_mm.h" 63 #include "vfio.h" 64 65 #define CREATE_TRACE_POINTS 66 #include <trace/events/kvm.h> 67 68 #include <linux/kvm_dirty_ring.h> 69 70 /* Worst case buffer size needed for holding an integer. */ 71 #define ITOA_MAX_LEN 12 72 73 MODULE_AUTHOR("Qumranet"); 74 MODULE_LICENSE("GPL"); 75 76 /* Architectures should define their poll value according to the halt latency */ 77 unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; 78 module_param(halt_poll_ns, uint, 0644); 79 EXPORT_SYMBOL_GPL(halt_poll_ns); 80 81 /* Default doubles per-vcpu halt_poll_ns. */ 82 unsigned int halt_poll_ns_grow = 2; 83 module_param(halt_poll_ns_grow, uint, 0644); 84 EXPORT_SYMBOL_GPL(halt_poll_ns_grow); 85 86 /* The start value to grow halt_poll_ns from */ 87 unsigned int halt_poll_ns_grow_start = 10000; /* 10us */ 88 module_param(halt_poll_ns_grow_start, uint, 0644); 89 EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start); 90 91 /* Default resets per-vcpu halt_poll_ns . */ 92 unsigned int halt_poll_ns_shrink; 93 module_param(halt_poll_ns_shrink, uint, 0644); 94 EXPORT_SYMBOL_GPL(halt_poll_ns_shrink); 95 96 /* 97 * Ordering of locks: 98 * 99 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 100 */ 101 102 DEFINE_MUTEX(kvm_lock); 103 static DEFINE_RAW_SPINLOCK(kvm_count_lock); 104 LIST_HEAD(vm_list); 105 106 static cpumask_var_t cpus_hardware_enabled; 107 static int kvm_usage_count; 108 static atomic_t hardware_enable_failed; 109 110 static struct kmem_cache *kvm_vcpu_cache; 111 112 static __read_mostly struct preempt_ops kvm_preempt_ops; 113 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu); 114 115 struct dentry *kvm_debugfs_dir; 116 EXPORT_SYMBOL_GPL(kvm_debugfs_dir); 117 118 static const struct file_operations stat_fops_per_vm; 119 120 static struct file_operations kvm_chardev_ops; 121 122 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 123 unsigned long arg); 124 #ifdef CONFIG_KVM_COMPAT 125 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, 126 unsigned long arg); 127 #define KVM_COMPAT(c) .compat_ioctl = (c) 128 #else 129 /* 130 * For architectures that don't implement a compat infrastructure, 131 * adopt a double line of defense: 132 * - Prevent a compat task from opening /dev/kvm 133 * - If the open has been done by a 64bit task, and the KVM fd 134 * passed to a compat task, let the ioctls fail. 135 */ 136 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl, 137 unsigned long arg) { return -EINVAL; } 138 139 static int kvm_no_compat_open(struct inode *inode, struct file *file) 140 { 141 return is_compat_task() ? -ENODEV : 0; 142 } 143 #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \ 144 .open = kvm_no_compat_open 145 #endif 146 static int hardware_enable_all(void); 147 static void hardware_disable_all(void); 148 149 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 150 151 __visible bool kvm_rebooting; 152 EXPORT_SYMBOL_GPL(kvm_rebooting); 153 154 #define KVM_EVENT_CREATE_VM 0 155 #define KVM_EVENT_DESTROY_VM 1 156 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); 157 static unsigned long long kvm_createvm_count; 158 static unsigned long long kvm_active_vms; 159 160 static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask); 161 162 __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, 163 unsigned long start, unsigned long end) 164 { 165 } 166 167 __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) 168 { 169 } 170 171 bool kvm_is_zone_device_page(struct page *page) 172 { 173 /* 174 * The metadata used by is_zone_device_page() to determine whether or 175 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if 176 * the device has been pinned, e.g. by get_user_pages(). WARN if the 177 * page_count() is zero to help detect bad usage of this helper. 178 */ 179 if (WARN_ON_ONCE(!page_count(page))) 180 return false; 181 182 return is_zone_device_page(page); 183 } 184 185 /* 186 * Returns a 'struct page' if the pfn is "valid" and backed by a refcounted 187 * page, NULL otherwise. Note, the list of refcounted PG_reserved page types 188 * is likely incomplete, it has been compiled purely through people wanting to 189 * back guest with a certain type of memory and encountering issues. 190 */ 191 struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn) 192 { 193 struct page *page; 194 195 if (!pfn_valid(pfn)) 196 return NULL; 197 198 page = pfn_to_page(pfn); 199 if (!PageReserved(page)) 200 return page; 201 202 /* The ZERO_PAGE(s) is marked PG_reserved, but is refcounted. */ 203 if (is_zero_pfn(pfn)) 204 return page; 205 206 /* 207 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting 208 * perspective they are "normal" pages, albeit with slightly different 209 * usage rules. 210 */ 211 if (kvm_is_zone_device_page(page)) 212 return page; 213 214 return NULL; 215 } 216 217 /* 218 * Switches to specified vcpu, until a matching vcpu_put() 219 */ 220 void vcpu_load(struct kvm_vcpu *vcpu) 221 { 222 int cpu = get_cpu(); 223 224 __this_cpu_write(kvm_running_vcpu, vcpu); 225 preempt_notifier_register(&vcpu->preempt_notifier); 226 kvm_arch_vcpu_load(vcpu, cpu); 227 put_cpu(); 228 } 229 EXPORT_SYMBOL_GPL(vcpu_load); 230 231 void vcpu_put(struct kvm_vcpu *vcpu) 232 { 233 preempt_disable(); 234 kvm_arch_vcpu_put(vcpu); 235 preempt_notifier_unregister(&vcpu->preempt_notifier); 236 __this_cpu_write(kvm_running_vcpu, NULL); 237 preempt_enable(); 238 } 239 EXPORT_SYMBOL_GPL(vcpu_put); 240 241 /* TODO: merge with kvm_arch_vcpu_should_kick */ 242 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req) 243 { 244 int mode = kvm_vcpu_exiting_guest_mode(vcpu); 245 246 /* 247 * We need to wait for the VCPU to reenable interrupts and get out of 248 * READING_SHADOW_PAGE_TABLES mode. 249 */ 250 if (req & KVM_REQUEST_WAIT) 251 return mode != OUTSIDE_GUEST_MODE; 252 253 /* 254 * Need to kick a running VCPU, but otherwise there is nothing to do. 255 */ 256 return mode == IN_GUEST_MODE; 257 } 258 259 static void ack_kick(void *_completed) 260 { 261 } 262 263 static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait) 264 { 265 if (cpumask_empty(cpus)) 266 return false; 267 268 smp_call_function_many(cpus, ack_kick, NULL, wait); 269 return true; 270 } 271 272 static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req, 273 struct cpumask *tmp, int current_cpu) 274 { 275 int cpu; 276 277 if (likely(!(req & KVM_REQUEST_NO_ACTION))) 278 __kvm_make_request(req, vcpu); 279 280 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) 281 return; 282 283 /* 284 * Note, the vCPU could get migrated to a different pCPU at any point 285 * after kvm_request_needs_ipi(), which could result in sending an IPI 286 * to the previous pCPU. But, that's OK because the purpose of the IPI 287 * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is 288 * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES 289 * after this point is also OK, as the requirement is only that KVM wait 290 * for vCPUs that were reading SPTEs _before_ any changes were 291 * finalized. See kvm_vcpu_kick() for more details on handling requests. 292 */ 293 if (kvm_request_needs_ipi(vcpu, req)) { 294 cpu = READ_ONCE(vcpu->cpu); 295 if (cpu != -1 && cpu != current_cpu) 296 __cpumask_set_cpu(cpu, tmp); 297 } 298 } 299 300 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, 301 unsigned long *vcpu_bitmap) 302 { 303 struct kvm_vcpu *vcpu; 304 struct cpumask *cpus; 305 int i, me; 306 bool called; 307 308 me = get_cpu(); 309 310 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); 311 cpumask_clear(cpus); 312 313 for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) { 314 vcpu = kvm_get_vcpu(kvm, i); 315 if (!vcpu) 316 continue; 317 kvm_make_vcpu_request(vcpu, req, cpus, me); 318 } 319 320 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); 321 put_cpu(); 322 323 return called; 324 } 325 326 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, 327 struct kvm_vcpu *except) 328 { 329 struct kvm_vcpu *vcpu; 330 struct cpumask *cpus; 331 unsigned long i; 332 bool called; 333 int me; 334 335 me = get_cpu(); 336 337 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); 338 cpumask_clear(cpus); 339 340 kvm_for_each_vcpu(i, vcpu, kvm) { 341 if (vcpu == except) 342 continue; 343 kvm_make_vcpu_request(vcpu, req, cpus, me); 344 } 345 346 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); 347 put_cpu(); 348 349 return called; 350 } 351 352 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) 353 { 354 return kvm_make_all_cpus_request_except(kvm, req, NULL); 355 } 356 EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request); 357 358 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL 359 void kvm_flush_remote_tlbs(struct kvm *kvm) 360 { 361 ++kvm->stat.generic.remote_tlb_flush_requests; 362 363 /* 364 * We want to publish modifications to the page tables before reading 365 * mode. Pairs with a memory barrier in arch-specific code. 366 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest 367 * and smp_mb in walk_shadow_page_lockless_begin/end. 368 * - powerpc: smp_mb in kvmppc_prepare_to_enter. 369 * 370 * There is already an smp_mb__after_atomic() before 371 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that 372 * barrier here. 373 */ 374 if (!kvm_arch_flush_remote_tlb(kvm) 375 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 376 ++kvm->stat.generic.remote_tlb_flush; 377 } 378 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); 379 #endif 380 381 static void kvm_flush_shadow_all(struct kvm *kvm) 382 { 383 kvm_arch_flush_shadow_all(kvm); 384 kvm_arch_guest_memory_reclaimed(kvm); 385 } 386 387 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 388 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, 389 gfp_t gfp_flags) 390 { 391 gfp_flags |= mc->gfp_zero; 392 393 if (mc->kmem_cache) 394 return kmem_cache_alloc(mc->kmem_cache, gfp_flags); 395 else 396 return (void *)__get_free_page(gfp_flags); 397 } 398 399 int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min) 400 { 401 gfp_t gfp = mc->gfp_custom ? mc->gfp_custom : GFP_KERNEL_ACCOUNT; 402 void *obj; 403 404 if (mc->nobjs >= min) 405 return 0; 406 407 if (unlikely(!mc->objects)) { 408 if (WARN_ON_ONCE(!capacity)) 409 return -EIO; 410 411 mc->objects = kvmalloc_array(sizeof(void *), capacity, gfp); 412 if (!mc->objects) 413 return -ENOMEM; 414 415 mc->capacity = capacity; 416 } 417 418 /* It is illegal to request a different capacity across topups. */ 419 if (WARN_ON_ONCE(mc->capacity != capacity)) 420 return -EIO; 421 422 while (mc->nobjs < mc->capacity) { 423 obj = mmu_memory_cache_alloc_obj(mc, gfp); 424 if (!obj) 425 return mc->nobjs >= min ? 0 : -ENOMEM; 426 mc->objects[mc->nobjs++] = obj; 427 } 428 return 0; 429 } 430 431 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min) 432 { 433 return __kvm_mmu_topup_memory_cache(mc, KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE, min); 434 } 435 436 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc) 437 { 438 return mc->nobjs; 439 } 440 441 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) 442 { 443 while (mc->nobjs) { 444 if (mc->kmem_cache) 445 kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]); 446 else 447 free_page((unsigned long)mc->objects[--mc->nobjs]); 448 } 449 450 kvfree(mc->objects); 451 452 mc->objects = NULL; 453 mc->capacity = 0; 454 } 455 456 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) 457 { 458 void *p; 459 460 if (WARN_ON(!mc->nobjs)) 461 p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT); 462 else 463 p = mc->objects[--mc->nobjs]; 464 BUG_ON(!p); 465 return p; 466 } 467 #endif 468 469 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 470 { 471 mutex_init(&vcpu->mutex); 472 vcpu->cpu = -1; 473 vcpu->kvm = kvm; 474 vcpu->vcpu_id = id; 475 vcpu->pid = NULL; 476 #ifndef __KVM_HAVE_ARCH_WQP 477 rcuwait_init(&vcpu->wait); 478 #endif 479 kvm_async_pf_vcpu_init(vcpu); 480 481 kvm_vcpu_set_in_spin_loop(vcpu, false); 482 kvm_vcpu_set_dy_eligible(vcpu, false); 483 vcpu->preempted = false; 484 vcpu->ready = false; 485 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 486 vcpu->last_used_slot = NULL; 487 488 /* Fill the stats id string for the vcpu */ 489 snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d", 490 task_pid_nr(current), id); 491 } 492 493 static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) 494 { 495 kvm_arch_vcpu_destroy(vcpu); 496 kvm_dirty_ring_free(&vcpu->dirty_ring); 497 498 /* 499 * No need for rcu_read_lock as VCPU_RUN is the only place that changes 500 * the vcpu->pid pointer, and at destruction time all file descriptors 501 * are already gone. 502 */ 503 put_pid(rcu_dereference_protected(vcpu->pid, 1)); 504 505 free_page((unsigned long)vcpu->run); 506 kmem_cache_free(kvm_vcpu_cache, vcpu); 507 } 508 509 void kvm_destroy_vcpus(struct kvm *kvm) 510 { 511 unsigned long i; 512 struct kvm_vcpu *vcpu; 513 514 kvm_for_each_vcpu(i, vcpu, kvm) { 515 kvm_vcpu_destroy(vcpu); 516 xa_erase(&kvm->vcpu_array, i); 517 } 518 519 atomic_set(&kvm->online_vcpus, 0); 520 } 521 EXPORT_SYMBOL_GPL(kvm_destroy_vcpus); 522 523 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 524 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 525 { 526 return container_of(mn, struct kvm, mmu_notifier); 527 } 528 529 static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn, 530 struct mm_struct *mm, 531 unsigned long start, unsigned long end) 532 { 533 struct kvm *kvm = mmu_notifier_to_kvm(mn); 534 int idx; 535 536 idx = srcu_read_lock(&kvm->srcu); 537 kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); 538 srcu_read_unlock(&kvm->srcu, idx); 539 } 540 541 typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range); 542 543 typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start, 544 unsigned long end); 545 546 typedef void (*on_unlock_fn_t)(struct kvm *kvm); 547 548 struct kvm_hva_range { 549 unsigned long start; 550 unsigned long end; 551 pte_t pte; 552 hva_handler_t handler; 553 on_lock_fn_t on_lock; 554 on_unlock_fn_t on_unlock; 555 bool flush_on_ret; 556 bool may_block; 557 }; 558 559 /* 560 * Use a dedicated stub instead of NULL to indicate that there is no callback 561 * function/handler. The compiler technically can't guarantee that a real 562 * function will have a non-zero address, and so it will generate code to 563 * check for !NULL, whereas comparing against a stub will be elided at compile 564 * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9). 565 */ 566 static void kvm_null_fn(void) 567 { 568 569 } 570 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn) 571 572 /* Iterate over each memslot intersecting [start, last] (inclusive) range */ 573 #define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \ 574 for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \ 575 node; \ 576 node = interval_tree_iter_next(node, start, last)) \ 577 578 static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, 579 const struct kvm_hva_range *range) 580 { 581 bool ret = false, locked = false; 582 struct kvm_gfn_range gfn_range; 583 struct kvm_memory_slot *slot; 584 struct kvm_memslots *slots; 585 int i, idx; 586 587 if (WARN_ON_ONCE(range->end <= range->start)) 588 return 0; 589 590 /* A null handler is allowed if and only if on_lock() is provided. */ 591 if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) && 592 IS_KVM_NULL_FN(range->handler))) 593 return 0; 594 595 idx = srcu_read_lock(&kvm->srcu); 596 597 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 598 struct interval_tree_node *node; 599 600 slots = __kvm_memslots(kvm, i); 601 kvm_for_each_memslot_in_hva_range(node, slots, 602 range->start, range->end - 1) { 603 unsigned long hva_start, hva_end; 604 605 slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]); 606 hva_start = max(range->start, slot->userspace_addr); 607 hva_end = min(range->end, slot->userspace_addr + 608 (slot->npages << PAGE_SHIFT)); 609 610 /* 611 * To optimize for the likely case where the address 612 * range is covered by zero or one memslots, don't 613 * bother making these conditional (to avoid writes on 614 * the second or later invocation of the handler). 615 */ 616 gfn_range.pte = range->pte; 617 gfn_range.may_block = range->may_block; 618 619 /* 620 * {gfn(page) | page intersects with [hva_start, hva_end)} = 621 * {gfn_start, gfn_start+1, ..., gfn_end-1}. 622 */ 623 gfn_range.start = hva_to_gfn_memslot(hva_start, slot); 624 gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot); 625 gfn_range.slot = slot; 626 627 if (!locked) { 628 locked = true; 629 KVM_MMU_LOCK(kvm); 630 if (!IS_KVM_NULL_FN(range->on_lock)) 631 range->on_lock(kvm, range->start, range->end); 632 if (IS_KVM_NULL_FN(range->handler)) 633 break; 634 } 635 ret |= range->handler(kvm, &gfn_range); 636 } 637 } 638 639 if (range->flush_on_ret && ret) 640 kvm_flush_remote_tlbs(kvm); 641 642 if (locked) { 643 KVM_MMU_UNLOCK(kvm); 644 if (!IS_KVM_NULL_FN(range->on_unlock)) 645 range->on_unlock(kvm); 646 } 647 648 srcu_read_unlock(&kvm->srcu, idx); 649 650 /* The notifiers are averse to booleans. :-( */ 651 return (int)ret; 652 } 653 654 static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn, 655 unsigned long start, 656 unsigned long end, 657 pte_t pte, 658 hva_handler_t handler) 659 { 660 struct kvm *kvm = mmu_notifier_to_kvm(mn); 661 const struct kvm_hva_range range = { 662 .start = start, 663 .end = end, 664 .pte = pte, 665 .handler = handler, 666 .on_lock = (void *)kvm_null_fn, 667 .on_unlock = (void *)kvm_null_fn, 668 .flush_on_ret = true, 669 .may_block = false, 670 }; 671 672 return __kvm_handle_hva_range(kvm, &range); 673 } 674 675 static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn, 676 unsigned long start, 677 unsigned long end, 678 hva_handler_t handler) 679 { 680 struct kvm *kvm = mmu_notifier_to_kvm(mn); 681 const struct kvm_hva_range range = { 682 .start = start, 683 .end = end, 684 .pte = __pte(0), 685 .handler = handler, 686 .on_lock = (void *)kvm_null_fn, 687 .on_unlock = (void *)kvm_null_fn, 688 .flush_on_ret = false, 689 .may_block = false, 690 }; 691 692 return __kvm_handle_hva_range(kvm, &range); 693 } 694 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 695 struct mm_struct *mm, 696 unsigned long address, 697 pte_t pte) 698 { 699 struct kvm *kvm = mmu_notifier_to_kvm(mn); 700 701 trace_kvm_set_spte_hva(address); 702 703 /* 704 * .change_pte() must be surrounded by .invalidate_range_{start,end}(). 705 * If mmu_invalidate_in_progress is zero, then no in-progress 706 * invalidations, including this one, found a relevant memslot at 707 * start(); rechecking memslots here is unnecessary. Note, a false 708 * positive (count elevated by a different invalidation) is sub-optimal 709 * but functionally ok. 710 */ 711 WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count)); 712 if (!READ_ONCE(kvm->mmu_invalidate_in_progress)) 713 return; 714 715 kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn); 716 } 717 718 void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start, 719 unsigned long end) 720 { 721 /* 722 * The count increase must become visible at unlock time as no 723 * spte can be established without taking the mmu_lock and 724 * count is also read inside the mmu_lock critical section. 725 */ 726 kvm->mmu_invalidate_in_progress++; 727 if (likely(kvm->mmu_invalidate_in_progress == 1)) { 728 kvm->mmu_invalidate_range_start = start; 729 kvm->mmu_invalidate_range_end = end; 730 } else { 731 /* 732 * Fully tracking multiple concurrent ranges has diminishing 733 * returns. Keep things simple and just find the minimal range 734 * which includes the current and new ranges. As there won't be 735 * enough information to subtract a range after its invalidate 736 * completes, any ranges invalidated concurrently will 737 * accumulate and persist until all outstanding invalidates 738 * complete. 739 */ 740 kvm->mmu_invalidate_range_start = 741 min(kvm->mmu_invalidate_range_start, start); 742 kvm->mmu_invalidate_range_end = 743 max(kvm->mmu_invalidate_range_end, end); 744 } 745 } 746 747 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 748 const struct mmu_notifier_range *range) 749 { 750 struct kvm *kvm = mmu_notifier_to_kvm(mn); 751 const struct kvm_hva_range hva_range = { 752 .start = range->start, 753 .end = range->end, 754 .pte = __pte(0), 755 .handler = kvm_unmap_gfn_range, 756 .on_lock = kvm_mmu_invalidate_begin, 757 .on_unlock = kvm_arch_guest_memory_reclaimed, 758 .flush_on_ret = true, 759 .may_block = mmu_notifier_range_blockable(range), 760 }; 761 762 trace_kvm_unmap_hva_range(range->start, range->end); 763 764 /* 765 * Prevent memslot modification between range_start() and range_end() 766 * so that conditionally locking provides the same result in both 767 * functions. Without that guarantee, the mmu_invalidate_in_progress 768 * adjustments will be imbalanced. 769 * 770 * Pairs with the decrement in range_end(). 771 */ 772 spin_lock(&kvm->mn_invalidate_lock); 773 kvm->mn_active_invalidate_count++; 774 spin_unlock(&kvm->mn_invalidate_lock); 775 776 /* 777 * Invalidate pfn caches _before_ invalidating the secondary MMUs, i.e. 778 * before acquiring mmu_lock, to avoid holding mmu_lock while acquiring 779 * each cache's lock. There are relatively few caches in existence at 780 * any given time, and the caches themselves can check for hva overlap, 781 * i.e. don't need to rely on memslot overlap checks for performance. 782 * Because this runs without holding mmu_lock, the pfn caches must use 783 * mn_active_invalidate_count (see above) instead of 784 * mmu_invalidate_in_progress. 785 */ 786 gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end, 787 hva_range.may_block); 788 789 __kvm_handle_hva_range(kvm, &hva_range); 790 791 return 0; 792 } 793 794 void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start, 795 unsigned long end) 796 { 797 /* 798 * This sequence increase will notify the kvm page fault that 799 * the page that is going to be mapped in the spte could have 800 * been freed. 801 */ 802 kvm->mmu_invalidate_seq++; 803 smp_wmb(); 804 /* 805 * The above sequence increase must be visible before the 806 * below count decrease, which is ensured by the smp_wmb above 807 * in conjunction with the smp_rmb in mmu_invalidate_retry(). 808 */ 809 kvm->mmu_invalidate_in_progress--; 810 } 811 812 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 813 const struct mmu_notifier_range *range) 814 { 815 struct kvm *kvm = mmu_notifier_to_kvm(mn); 816 const struct kvm_hva_range hva_range = { 817 .start = range->start, 818 .end = range->end, 819 .pte = __pte(0), 820 .handler = (void *)kvm_null_fn, 821 .on_lock = kvm_mmu_invalidate_end, 822 .on_unlock = (void *)kvm_null_fn, 823 .flush_on_ret = false, 824 .may_block = mmu_notifier_range_blockable(range), 825 }; 826 bool wake; 827 828 __kvm_handle_hva_range(kvm, &hva_range); 829 830 /* Pairs with the increment in range_start(). */ 831 spin_lock(&kvm->mn_invalidate_lock); 832 wake = (--kvm->mn_active_invalidate_count == 0); 833 spin_unlock(&kvm->mn_invalidate_lock); 834 835 /* 836 * There can only be one waiter, since the wait happens under 837 * slots_lock. 838 */ 839 if (wake) 840 rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait); 841 842 BUG_ON(kvm->mmu_invalidate_in_progress < 0); 843 } 844 845 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 846 struct mm_struct *mm, 847 unsigned long start, 848 unsigned long end) 849 { 850 trace_kvm_age_hva(start, end); 851 852 return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn); 853 } 854 855 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, 856 struct mm_struct *mm, 857 unsigned long start, 858 unsigned long end) 859 { 860 trace_kvm_age_hva(start, end); 861 862 /* 863 * Even though we do not flush TLB, this will still adversely 864 * affect performance on pre-Haswell Intel EPT, where there is 865 * no EPT Access Bit to clear so that we have to tear down EPT 866 * tables instead. If we find this unacceptable, we can always 867 * add a parameter to kvm_age_hva so that it effectively doesn't 868 * do anything on clear_young. 869 * 870 * Also note that currently we never issue secondary TLB flushes 871 * from clear_young, leaving this job up to the regular system 872 * cadence. If we find this inaccurate, we might come up with a 873 * more sophisticated heuristic later. 874 */ 875 return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn); 876 } 877 878 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, 879 struct mm_struct *mm, 880 unsigned long address) 881 { 882 trace_kvm_test_age_hva(address); 883 884 return kvm_handle_hva_range_no_flush(mn, address, address + 1, 885 kvm_test_age_gfn); 886 } 887 888 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 889 struct mm_struct *mm) 890 { 891 struct kvm *kvm = mmu_notifier_to_kvm(mn); 892 int idx; 893 894 idx = srcu_read_lock(&kvm->srcu); 895 kvm_flush_shadow_all(kvm); 896 srcu_read_unlock(&kvm->srcu, idx); 897 } 898 899 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 900 .invalidate_range = kvm_mmu_notifier_invalidate_range, 901 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 902 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 903 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 904 .clear_young = kvm_mmu_notifier_clear_young, 905 .test_young = kvm_mmu_notifier_test_young, 906 .change_pte = kvm_mmu_notifier_change_pte, 907 .release = kvm_mmu_notifier_release, 908 }; 909 910 static int kvm_init_mmu_notifier(struct kvm *kvm) 911 { 912 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 913 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 914 } 915 916 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 917 918 static int kvm_init_mmu_notifier(struct kvm *kvm) 919 { 920 return 0; 921 } 922 923 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 924 925 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 926 static int kvm_pm_notifier_call(struct notifier_block *bl, 927 unsigned long state, 928 void *unused) 929 { 930 struct kvm *kvm = container_of(bl, struct kvm, pm_notifier); 931 932 return kvm_arch_pm_notifier(kvm, state); 933 } 934 935 static void kvm_init_pm_notifier(struct kvm *kvm) 936 { 937 kvm->pm_notifier.notifier_call = kvm_pm_notifier_call; 938 /* Suspend KVM before we suspend ftrace, RCU, etc. */ 939 kvm->pm_notifier.priority = INT_MAX; 940 register_pm_notifier(&kvm->pm_notifier); 941 } 942 943 static void kvm_destroy_pm_notifier(struct kvm *kvm) 944 { 945 unregister_pm_notifier(&kvm->pm_notifier); 946 } 947 #else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */ 948 static void kvm_init_pm_notifier(struct kvm *kvm) 949 { 950 } 951 952 static void kvm_destroy_pm_notifier(struct kvm *kvm) 953 { 954 } 955 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */ 956 957 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) 958 { 959 if (!memslot->dirty_bitmap) 960 return; 961 962 kvfree(memslot->dirty_bitmap); 963 memslot->dirty_bitmap = NULL; 964 } 965 966 /* This does not remove the slot from struct kvm_memslots data structures */ 967 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 968 { 969 kvm_destroy_dirty_bitmap(slot); 970 971 kvm_arch_free_memslot(kvm, slot); 972 973 kfree(slot); 974 } 975 976 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) 977 { 978 struct hlist_node *idnode; 979 struct kvm_memory_slot *memslot; 980 int bkt; 981 982 /* 983 * The same memslot objects live in both active and inactive sets, 984 * arbitrarily free using index '1' so the second invocation of this 985 * function isn't operating over a structure with dangling pointers 986 * (even though this function isn't actually touching them). 987 */ 988 if (!slots->node_idx) 989 return; 990 991 hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1]) 992 kvm_free_memslot(kvm, memslot); 993 } 994 995 static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc) 996 { 997 switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) { 998 case KVM_STATS_TYPE_INSTANT: 999 return 0444; 1000 case KVM_STATS_TYPE_CUMULATIVE: 1001 case KVM_STATS_TYPE_PEAK: 1002 default: 1003 return 0644; 1004 } 1005 } 1006 1007 1008 static void kvm_destroy_vm_debugfs(struct kvm *kvm) 1009 { 1010 int i; 1011 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + 1012 kvm_vcpu_stats_header.num_desc; 1013 1014 if (IS_ERR(kvm->debugfs_dentry)) 1015 return; 1016 1017 debugfs_remove_recursive(kvm->debugfs_dentry); 1018 1019 if (kvm->debugfs_stat_data) { 1020 for (i = 0; i < kvm_debugfs_num_entries; i++) 1021 kfree(kvm->debugfs_stat_data[i]); 1022 kfree(kvm->debugfs_stat_data); 1023 } 1024 } 1025 1026 static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname) 1027 { 1028 static DEFINE_MUTEX(kvm_debugfs_lock); 1029 struct dentry *dent; 1030 char dir_name[ITOA_MAX_LEN * 2]; 1031 struct kvm_stat_data *stat_data; 1032 const struct _kvm_stats_desc *pdesc; 1033 int i, ret = -ENOMEM; 1034 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + 1035 kvm_vcpu_stats_header.num_desc; 1036 1037 if (!debugfs_initialized()) 1038 return 0; 1039 1040 snprintf(dir_name, sizeof(dir_name), "%d-%s", task_pid_nr(current), fdname); 1041 mutex_lock(&kvm_debugfs_lock); 1042 dent = debugfs_lookup(dir_name, kvm_debugfs_dir); 1043 if (dent) { 1044 pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name); 1045 dput(dent); 1046 mutex_unlock(&kvm_debugfs_lock); 1047 return 0; 1048 } 1049 dent = debugfs_create_dir(dir_name, kvm_debugfs_dir); 1050 mutex_unlock(&kvm_debugfs_lock); 1051 if (IS_ERR(dent)) 1052 return 0; 1053 1054 kvm->debugfs_dentry = dent; 1055 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, 1056 sizeof(*kvm->debugfs_stat_data), 1057 GFP_KERNEL_ACCOUNT); 1058 if (!kvm->debugfs_stat_data) 1059 goto out_err; 1060 1061 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) { 1062 pdesc = &kvm_vm_stats_desc[i]; 1063 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); 1064 if (!stat_data) 1065 goto out_err; 1066 1067 stat_data->kvm = kvm; 1068 stat_data->desc = pdesc; 1069 stat_data->kind = KVM_STAT_VM; 1070 kvm->debugfs_stat_data[i] = stat_data; 1071 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 1072 kvm->debugfs_dentry, stat_data, 1073 &stat_fops_per_vm); 1074 } 1075 1076 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { 1077 pdesc = &kvm_vcpu_stats_desc[i]; 1078 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); 1079 if (!stat_data) 1080 goto out_err; 1081 1082 stat_data->kvm = kvm; 1083 stat_data->desc = pdesc; 1084 stat_data->kind = KVM_STAT_VCPU; 1085 kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data; 1086 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 1087 kvm->debugfs_dentry, stat_data, 1088 &stat_fops_per_vm); 1089 } 1090 1091 ret = kvm_arch_create_vm_debugfs(kvm); 1092 if (ret) 1093 goto out_err; 1094 1095 return 0; 1096 out_err: 1097 kvm_destroy_vm_debugfs(kvm); 1098 return ret; 1099 } 1100 1101 /* 1102 * Called after the VM is otherwise initialized, but just before adding it to 1103 * the vm_list. 1104 */ 1105 int __weak kvm_arch_post_init_vm(struct kvm *kvm) 1106 { 1107 return 0; 1108 } 1109 1110 /* 1111 * Called just after removing the VM from the vm_list, but before doing any 1112 * other destruction. 1113 */ 1114 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm) 1115 { 1116 } 1117 1118 /* 1119 * Called after per-vm debugfs created. When called kvm->debugfs_dentry should 1120 * be setup already, so we can create arch-specific debugfs entries under it. 1121 * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so 1122 * a per-arch destroy interface is not needed. 1123 */ 1124 int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm) 1125 { 1126 return 0; 1127 } 1128 1129 static struct kvm *kvm_create_vm(unsigned long type, const char *fdname) 1130 { 1131 struct kvm *kvm = kvm_arch_alloc_vm(); 1132 struct kvm_memslots *slots; 1133 int r = -ENOMEM; 1134 int i, j; 1135 1136 if (!kvm) 1137 return ERR_PTR(-ENOMEM); 1138 1139 /* KVM is pinned via open("/dev/kvm"), the fd passed to this ioctl(). */ 1140 __module_get(kvm_chardev_ops.owner); 1141 1142 KVM_MMU_LOCK_INIT(kvm); 1143 mmgrab(current->mm); 1144 kvm->mm = current->mm; 1145 kvm_eventfd_init(kvm); 1146 mutex_init(&kvm->lock); 1147 mutex_init(&kvm->irq_lock); 1148 mutex_init(&kvm->slots_lock); 1149 mutex_init(&kvm->slots_arch_lock); 1150 spin_lock_init(&kvm->mn_invalidate_lock); 1151 rcuwait_init(&kvm->mn_memslots_update_rcuwait); 1152 xa_init(&kvm->vcpu_array); 1153 1154 INIT_LIST_HEAD(&kvm->gpc_list); 1155 spin_lock_init(&kvm->gpc_lock); 1156 1157 INIT_LIST_HEAD(&kvm->devices); 1158 kvm->max_vcpus = KVM_MAX_VCPUS; 1159 1160 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 1161 1162 /* 1163 * Force subsequent debugfs file creations to fail if the VM directory 1164 * is not created (by kvm_create_vm_debugfs()). 1165 */ 1166 kvm->debugfs_dentry = ERR_PTR(-ENOENT); 1167 1168 snprintf(kvm->stats_id, sizeof(kvm->stats_id), "kvm-%d", 1169 task_pid_nr(current)); 1170 1171 if (init_srcu_struct(&kvm->srcu)) 1172 goto out_err_no_srcu; 1173 if (init_srcu_struct(&kvm->irq_srcu)) 1174 goto out_err_no_irq_srcu; 1175 1176 refcount_set(&kvm->users_count, 1); 1177 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 1178 for (j = 0; j < 2; j++) { 1179 slots = &kvm->__memslots[i][j]; 1180 1181 atomic_long_set(&slots->last_used_slot, (unsigned long)NULL); 1182 slots->hva_tree = RB_ROOT_CACHED; 1183 slots->gfn_tree = RB_ROOT; 1184 hash_init(slots->id_hash); 1185 slots->node_idx = j; 1186 1187 /* Generations must be different for each address space. */ 1188 slots->generation = i; 1189 } 1190 1191 rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]); 1192 } 1193 1194 for (i = 0; i < KVM_NR_BUSES; i++) { 1195 rcu_assign_pointer(kvm->buses[i], 1196 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT)); 1197 if (!kvm->buses[i]) 1198 goto out_err_no_arch_destroy_vm; 1199 } 1200 1201 kvm->max_halt_poll_ns = halt_poll_ns; 1202 1203 r = kvm_arch_init_vm(kvm, type); 1204 if (r) 1205 goto out_err_no_arch_destroy_vm; 1206 1207 r = hardware_enable_all(); 1208 if (r) 1209 goto out_err_no_disable; 1210 1211 #ifdef CONFIG_HAVE_KVM_IRQFD 1212 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 1213 #endif 1214 1215 r = kvm_init_mmu_notifier(kvm); 1216 if (r) 1217 goto out_err_no_mmu_notifier; 1218 1219 r = kvm_coalesced_mmio_init(kvm); 1220 if (r < 0) 1221 goto out_no_coalesced_mmio; 1222 1223 r = kvm_create_vm_debugfs(kvm, fdname); 1224 if (r) 1225 goto out_err_no_debugfs; 1226 1227 r = kvm_arch_post_init_vm(kvm); 1228 if (r) 1229 goto out_err; 1230 1231 mutex_lock(&kvm_lock); 1232 list_add(&kvm->vm_list, &vm_list); 1233 mutex_unlock(&kvm_lock); 1234 1235 preempt_notifier_inc(); 1236 kvm_init_pm_notifier(kvm); 1237 1238 return kvm; 1239 1240 out_err: 1241 kvm_destroy_vm_debugfs(kvm); 1242 out_err_no_debugfs: 1243 kvm_coalesced_mmio_free(kvm); 1244 out_no_coalesced_mmio: 1245 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 1246 if (kvm->mmu_notifier.ops) 1247 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); 1248 #endif 1249 out_err_no_mmu_notifier: 1250 hardware_disable_all(); 1251 out_err_no_disable: 1252 kvm_arch_destroy_vm(kvm); 1253 out_err_no_arch_destroy_vm: 1254 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); 1255 for (i = 0; i < KVM_NR_BUSES; i++) 1256 kfree(kvm_get_bus(kvm, i)); 1257 cleanup_srcu_struct(&kvm->irq_srcu); 1258 out_err_no_irq_srcu: 1259 cleanup_srcu_struct(&kvm->srcu); 1260 out_err_no_srcu: 1261 kvm_arch_free_vm(kvm); 1262 mmdrop(current->mm); 1263 module_put(kvm_chardev_ops.owner); 1264 return ERR_PTR(r); 1265 } 1266 1267 static void kvm_destroy_devices(struct kvm *kvm) 1268 { 1269 struct kvm_device *dev, *tmp; 1270 1271 /* 1272 * We do not need to take the kvm->lock here, because nobody else 1273 * has a reference to the struct kvm at this point and therefore 1274 * cannot access the devices list anyhow. 1275 */ 1276 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { 1277 list_del(&dev->vm_node); 1278 dev->ops->destroy(dev); 1279 } 1280 } 1281 1282 static void kvm_destroy_vm(struct kvm *kvm) 1283 { 1284 int i; 1285 struct mm_struct *mm = kvm->mm; 1286 1287 kvm_destroy_pm_notifier(kvm); 1288 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm); 1289 kvm_destroy_vm_debugfs(kvm); 1290 kvm_arch_sync_events(kvm); 1291 mutex_lock(&kvm_lock); 1292 list_del(&kvm->vm_list); 1293 mutex_unlock(&kvm_lock); 1294 kvm_arch_pre_destroy_vm(kvm); 1295 1296 kvm_free_irq_routing(kvm); 1297 for (i = 0; i < KVM_NR_BUSES; i++) { 1298 struct kvm_io_bus *bus = kvm_get_bus(kvm, i); 1299 1300 if (bus) 1301 kvm_io_bus_destroy(bus); 1302 kvm->buses[i] = NULL; 1303 } 1304 kvm_coalesced_mmio_free(kvm); 1305 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 1306 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 1307 /* 1308 * At this point, pending calls to invalidate_range_start() 1309 * have completed but no more MMU notifiers will run, so 1310 * mn_active_invalidate_count may remain unbalanced. 1311 * No threads can be waiting in install_new_memslots as the 1312 * last reference on KVM has been dropped, but freeing 1313 * memslots would deadlock without this manual intervention. 1314 */ 1315 WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait)); 1316 kvm->mn_active_invalidate_count = 0; 1317 #else 1318 kvm_flush_shadow_all(kvm); 1319 #endif 1320 kvm_arch_destroy_vm(kvm); 1321 kvm_destroy_devices(kvm); 1322 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 1323 kvm_free_memslots(kvm, &kvm->__memslots[i][0]); 1324 kvm_free_memslots(kvm, &kvm->__memslots[i][1]); 1325 } 1326 cleanup_srcu_struct(&kvm->irq_srcu); 1327 cleanup_srcu_struct(&kvm->srcu); 1328 kvm_arch_free_vm(kvm); 1329 preempt_notifier_dec(); 1330 hardware_disable_all(); 1331 mmdrop(mm); 1332 module_put(kvm_chardev_ops.owner); 1333 } 1334 1335 void kvm_get_kvm(struct kvm *kvm) 1336 { 1337 refcount_inc(&kvm->users_count); 1338 } 1339 EXPORT_SYMBOL_GPL(kvm_get_kvm); 1340 1341 /* 1342 * Make sure the vm is not during destruction, which is a safe version of 1343 * kvm_get_kvm(). Return true if kvm referenced successfully, false otherwise. 1344 */ 1345 bool kvm_get_kvm_safe(struct kvm *kvm) 1346 { 1347 return refcount_inc_not_zero(&kvm->users_count); 1348 } 1349 EXPORT_SYMBOL_GPL(kvm_get_kvm_safe); 1350 1351 void kvm_put_kvm(struct kvm *kvm) 1352 { 1353 if (refcount_dec_and_test(&kvm->users_count)) 1354 kvm_destroy_vm(kvm); 1355 } 1356 EXPORT_SYMBOL_GPL(kvm_put_kvm); 1357 1358 /* 1359 * Used to put a reference that was taken on behalf of an object associated 1360 * with a user-visible file descriptor, e.g. a vcpu or device, if installation 1361 * of the new file descriptor fails and the reference cannot be transferred to 1362 * its final owner. In such cases, the caller is still actively using @kvm and 1363 * will fail miserably if the refcount unexpectedly hits zero. 1364 */ 1365 void kvm_put_kvm_no_destroy(struct kvm *kvm) 1366 { 1367 WARN_ON(refcount_dec_and_test(&kvm->users_count)); 1368 } 1369 EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy); 1370 1371 static int kvm_vm_release(struct inode *inode, struct file *filp) 1372 { 1373 struct kvm *kvm = filp->private_data; 1374 1375 kvm_irqfd_release(kvm); 1376 1377 kvm_put_kvm(kvm); 1378 return 0; 1379 } 1380 1381 /* 1382 * Allocation size is twice as large as the actual dirty bitmap size. 1383 * See kvm_vm_ioctl_get_dirty_log() why this is needed. 1384 */ 1385 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) 1386 { 1387 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot); 1388 1389 memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT); 1390 if (!memslot->dirty_bitmap) 1391 return -ENOMEM; 1392 1393 return 0; 1394 } 1395 1396 static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id) 1397 { 1398 struct kvm_memslots *active = __kvm_memslots(kvm, as_id); 1399 int node_idx_inactive = active->node_idx ^ 1; 1400 1401 return &kvm->__memslots[as_id][node_idx_inactive]; 1402 } 1403 1404 /* 1405 * Helper to get the address space ID when one of memslot pointers may be NULL. 1406 * This also serves as a sanity that at least one of the pointers is non-NULL, 1407 * and that their address space IDs don't diverge. 1408 */ 1409 static int kvm_memslots_get_as_id(struct kvm_memory_slot *a, 1410 struct kvm_memory_slot *b) 1411 { 1412 if (WARN_ON_ONCE(!a && !b)) 1413 return 0; 1414 1415 if (!a) 1416 return b->as_id; 1417 if (!b) 1418 return a->as_id; 1419 1420 WARN_ON_ONCE(a->as_id != b->as_id); 1421 return a->as_id; 1422 } 1423 1424 static void kvm_insert_gfn_node(struct kvm_memslots *slots, 1425 struct kvm_memory_slot *slot) 1426 { 1427 struct rb_root *gfn_tree = &slots->gfn_tree; 1428 struct rb_node **node, *parent; 1429 int idx = slots->node_idx; 1430 1431 parent = NULL; 1432 for (node = &gfn_tree->rb_node; *node; ) { 1433 struct kvm_memory_slot *tmp; 1434 1435 tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]); 1436 parent = *node; 1437 if (slot->base_gfn < tmp->base_gfn) 1438 node = &(*node)->rb_left; 1439 else if (slot->base_gfn > tmp->base_gfn) 1440 node = &(*node)->rb_right; 1441 else 1442 BUG(); 1443 } 1444 1445 rb_link_node(&slot->gfn_node[idx], parent, node); 1446 rb_insert_color(&slot->gfn_node[idx], gfn_tree); 1447 } 1448 1449 static void kvm_erase_gfn_node(struct kvm_memslots *slots, 1450 struct kvm_memory_slot *slot) 1451 { 1452 rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree); 1453 } 1454 1455 static void kvm_replace_gfn_node(struct kvm_memslots *slots, 1456 struct kvm_memory_slot *old, 1457 struct kvm_memory_slot *new) 1458 { 1459 int idx = slots->node_idx; 1460 1461 WARN_ON_ONCE(old->base_gfn != new->base_gfn); 1462 1463 rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx], 1464 &slots->gfn_tree); 1465 } 1466 1467 /* 1468 * Replace @old with @new in the inactive memslots. 1469 * 1470 * With NULL @old this simply adds @new. 1471 * With NULL @new this simply removes @old. 1472 * 1473 * If @new is non-NULL its hva_node[slots_idx] range has to be set 1474 * appropriately. 1475 */ 1476 static void kvm_replace_memslot(struct kvm *kvm, 1477 struct kvm_memory_slot *old, 1478 struct kvm_memory_slot *new) 1479 { 1480 int as_id = kvm_memslots_get_as_id(old, new); 1481 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); 1482 int idx = slots->node_idx; 1483 1484 if (old) { 1485 hash_del(&old->id_node[idx]); 1486 interval_tree_remove(&old->hva_node[idx], &slots->hva_tree); 1487 1488 if ((long)old == atomic_long_read(&slots->last_used_slot)) 1489 atomic_long_set(&slots->last_used_slot, (long)new); 1490 1491 if (!new) { 1492 kvm_erase_gfn_node(slots, old); 1493 return; 1494 } 1495 } 1496 1497 /* 1498 * Initialize @new's hva range. Do this even when replacing an @old 1499 * slot, kvm_copy_memslot() deliberately does not touch node data. 1500 */ 1501 new->hva_node[idx].start = new->userspace_addr; 1502 new->hva_node[idx].last = new->userspace_addr + 1503 (new->npages << PAGE_SHIFT) - 1; 1504 1505 /* 1506 * (Re)Add the new memslot. There is no O(1) interval_tree_replace(), 1507 * hva_node needs to be swapped with remove+insert even though hva can't 1508 * change when replacing an existing slot. 1509 */ 1510 hash_add(slots->id_hash, &new->id_node[idx], new->id); 1511 interval_tree_insert(&new->hva_node[idx], &slots->hva_tree); 1512 1513 /* 1514 * If the memslot gfn is unchanged, rb_replace_node() can be used to 1515 * switch the node in the gfn tree instead of removing the old and 1516 * inserting the new as two separate operations. Replacement is a 1517 * single O(1) operation versus two O(log(n)) operations for 1518 * remove+insert. 1519 */ 1520 if (old && old->base_gfn == new->base_gfn) { 1521 kvm_replace_gfn_node(slots, old, new); 1522 } else { 1523 if (old) 1524 kvm_erase_gfn_node(slots, old); 1525 kvm_insert_gfn_node(slots, new); 1526 } 1527 } 1528 1529 static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem) 1530 { 1531 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; 1532 1533 #ifdef __KVM_HAVE_READONLY_MEM 1534 valid_flags |= KVM_MEM_READONLY; 1535 #endif 1536 1537 if (mem->flags & ~valid_flags) 1538 return -EINVAL; 1539 1540 return 0; 1541 } 1542 1543 static void kvm_swap_active_memslots(struct kvm *kvm, int as_id) 1544 { 1545 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); 1546 1547 /* Grab the generation from the activate memslots. */ 1548 u64 gen = __kvm_memslots(kvm, as_id)->generation; 1549 1550 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); 1551 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; 1552 1553 /* 1554 * Do not store the new memslots while there are invalidations in 1555 * progress, otherwise the locking in invalidate_range_start and 1556 * invalidate_range_end will be unbalanced. 1557 */ 1558 spin_lock(&kvm->mn_invalidate_lock); 1559 prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait); 1560 while (kvm->mn_active_invalidate_count) { 1561 set_current_state(TASK_UNINTERRUPTIBLE); 1562 spin_unlock(&kvm->mn_invalidate_lock); 1563 schedule(); 1564 spin_lock(&kvm->mn_invalidate_lock); 1565 } 1566 finish_rcuwait(&kvm->mn_memslots_update_rcuwait); 1567 rcu_assign_pointer(kvm->memslots[as_id], slots); 1568 spin_unlock(&kvm->mn_invalidate_lock); 1569 1570 /* 1571 * Acquired in kvm_set_memslot. Must be released before synchronize 1572 * SRCU below in order to avoid deadlock with another thread 1573 * acquiring the slots_arch_lock in an srcu critical section. 1574 */ 1575 mutex_unlock(&kvm->slots_arch_lock); 1576 1577 synchronize_srcu_expedited(&kvm->srcu); 1578 1579 /* 1580 * Increment the new memslot generation a second time, dropping the 1581 * update in-progress flag and incrementing the generation based on 1582 * the number of address spaces. This provides a unique and easily 1583 * identifiable generation number while the memslots are in flux. 1584 */ 1585 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; 1586 1587 /* 1588 * Generations must be unique even across address spaces. We do not need 1589 * a global counter for that, instead the generation space is evenly split 1590 * across address spaces. For example, with two address spaces, address 1591 * space 0 will use generations 0, 2, 4, ... while address space 1 will 1592 * use generations 1, 3, 5, ... 1593 */ 1594 gen += KVM_ADDRESS_SPACE_NUM; 1595 1596 kvm_arch_memslots_updated(kvm, gen); 1597 1598 slots->generation = gen; 1599 } 1600 1601 static int kvm_prepare_memory_region(struct kvm *kvm, 1602 const struct kvm_memory_slot *old, 1603 struct kvm_memory_slot *new, 1604 enum kvm_mr_change change) 1605 { 1606 int r; 1607 1608 /* 1609 * If dirty logging is disabled, nullify the bitmap; the old bitmap 1610 * will be freed on "commit". If logging is enabled in both old and 1611 * new, reuse the existing bitmap. If logging is enabled only in the 1612 * new and KVM isn't using a ring buffer, allocate and initialize a 1613 * new bitmap. 1614 */ 1615 if (change != KVM_MR_DELETE) { 1616 if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) 1617 new->dirty_bitmap = NULL; 1618 else if (old && old->dirty_bitmap) 1619 new->dirty_bitmap = old->dirty_bitmap; 1620 else if (!kvm->dirty_ring_size) { 1621 r = kvm_alloc_dirty_bitmap(new); 1622 if (r) 1623 return r; 1624 1625 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) 1626 bitmap_set(new->dirty_bitmap, 0, new->npages); 1627 } 1628 } 1629 1630 r = kvm_arch_prepare_memory_region(kvm, old, new, change); 1631 1632 /* Free the bitmap on failure if it was allocated above. */ 1633 if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap)) 1634 kvm_destroy_dirty_bitmap(new); 1635 1636 return r; 1637 } 1638 1639 static void kvm_commit_memory_region(struct kvm *kvm, 1640 struct kvm_memory_slot *old, 1641 const struct kvm_memory_slot *new, 1642 enum kvm_mr_change change) 1643 { 1644 /* 1645 * Update the total number of memslot pages before calling the arch 1646 * hook so that architectures can consume the result directly. 1647 */ 1648 if (change == KVM_MR_DELETE) 1649 kvm->nr_memslot_pages -= old->npages; 1650 else if (change == KVM_MR_CREATE) 1651 kvm->nr_memslot_pages += new->npages; 1652 1653 kvm_arch_commit_memory_region(kvm, old, new, change); 1654 1655 switch (change) { 1656 case KVM_MR_CREATE: 1657 /* Nothing more to do. */ 1658 break; 1659 case KVM_MR_DELETE: 1660 /* Free the old memslot and all its metadata. */ 1661 kvm_free_memslot(kvm, old); 1662 break; 1663 case KVM_MR_MOVE: 1664 case KVM_MR_FLAGS_ONLY: 1665 /* 1666 * Free the dirty bitmap as needed; the below check encompasses 1667 * both the flags and whether a ring buffer is being used) 1668 */ 1669 if (old->dirty_bitmap && !new->dirty_bitmap) 1670 kvm_destroy_dirty_bitmap(old); 1671 1672 /* 1673 * The final quirk. Free the detached, old slot, but only its 1674 * memory, not any metadata. Metadata, including arch specific 1675 * data, may be reused by @new. 1676 */ 1677 kfree(old); 1678 break; 1679 default: 1680 BUG(); 1681 } 1682 } 1683 1684 /* 1685 * Activate @new, which must be installed in the inactive slots by the caller, 1686 * by swapping the active slots and then propagating @new to @old once @old is 1687 * unreachable and can be safely modified. 1688 * 1689 * With NULL @old this simply adds @new to @active (while swapping the sets). 1690 * With NULL @new this simply removes @old from @active and frees it 1691 * (while also swapping the sets). 1692 */ 1693 static void kvm_activate_memslot(struct kvm *kvm, 1694 struct kvm_memory_slot *old, 1695 struct kvm_memory_slot *new) 1696 { 1697 int as_id = kvm_memslots_get_as_id(old, new); 1698 1699 kvm_swap_active_memslots(kvm, as_id); 1700 1701 /* Propagate the new memslot to the now inactive memslots. */ 1702 kvm_replace_memslot(kvm, old, new); 1703 } 1704 1705 static void kvm_copy_memslot(struct kvm_memory_slot *dest, 1706 const struct kvm_memory_slot *src) 1707 { 1708 dest->base_gfn = src->base_gfn; 1709 dest->npages = src->npages; 1710 dest->dirty_bitmap = src->dirty_bitmap; 1711 dest->arch = src->arch; 1712 dest->userspace_addr = src->userspace_addr; 1713 dest->flags = src->flags; 1714 dest->id = src->id; 1715 dest->as_id = src->as_id; 1716 } 1717 1718 static void kvm_invalidate_memslot(struct kvm *kvm, 1719 struct kvm_memory_slot *old, 1720 struct kvm_memory_slot *invalid_slot) 1721 { 1722 /* 1723 * Mark the current slot INVALID. As with all memslot modifications, 1724 * this must be done on an unreachable slot to avoid modifying the 1725 * current slot in the active tree. 1726 */ 1727 kvm_copy_memslot(invalid_slot, old); 1728 invalid_slot->flags |= KVM_MEMSLOT_INVALID; 1729 kvm_replace_memslot(kvm, old, invalid_slot); 1730 1731 /* 1732 * Activate the slot that is now marked INVALID, but don't propagate 1733 * the slot to the now inactive slots. The slot is either going to be 1734 * deleted or recreated as a new slot. 1735 */ 1736 kvm_swap_active_memslots(kvm, old->as_id); 1737 1738 /* 1739 * From this point no new shadow pages pointing to a deleted, or moved, 1740 * memslot will be created. Validation of sp->gfn happens in: 1741 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 1742 * - kvm_is_visible_gfn (mmu_check_root) 1743 */ 1744 kvm_arch_flush_shadow_memslot(kvm, old); 1745 kvm_arch_guest_memory_reclaimed(kvm); 1746 1747 /* Was released by kvm_swap_active_memslots, reacquire. */ 1748 mutex_lock(&kvm->slots_arch_lock); 1749 1750 /* 1751 * Copy the arch-specific field of the newly-installed slot back to the 1752 * old slot as the arch data could have changed between releasing 1753 * slots_arch_lock in install_new_memslots() and re-acquiring the lock 1754 * above. Writers are required to retrieve memslots *after* acquiring 1755 * slots_arch_lock, thus the active slot's data is guaranteed to be fresh. 1756 */ 1757 old->arch = invalid_slot->arch; 1758 } 1759 1760 static void kvm_create_memslot(struct kvm *kvm, 1761 struct kvm_memory_slot *new) 1762 { 1763 /* Add the new memslot to the inactive set and activate. */ 1764 kvm_replace_memslot(kvm, NULL, new); 1765 kvm_activate_memslot(kvm, NULL, new); 1766 } 1767 1768 static void kvm_delete_memslot(struct kvm *kvm, 1769 struct kvm_memory_slot *old, 1770 struct kvm_memory_slot *invalid_slot) 1771 { 1772 /* 1773 * Remove the old memslot (in the inactive memslots) by passing NULL as 1774 * the "new" slot, and for the invalid version in the active slots. 1775 */ 1776 kvm_replace_memslot(kvm, old, NULL); 1777 kvm_activate_memslot(kvm, invalid_slot, NULL); 1778 } 1779 1780 static void kvm_move_memslot(struct kvm *kvm, 1781 struct kvm_memory_slot *old, 1782 struct kvm_memory_slot *new, 1783 struct kvm_memory_slot *invalid_slot) 1784 { 1785 /* 1786 * Replace the old memslot in the inactive slots, and then swap slots 1787 * and replace the current INVALID with the new as well. 1788 */ 1789 kvm_replace_memslot(kvm, old, new); 1790 kvm_activate_memslot(kvm, invalid_slot, new); 1791 } 1792 1793 static void kvm_update_flags_memslot(struct kvm *kvm, 1794 struct kvm_memory_slot *old, 1795 struct kvm_memory_slot *new) 1796 { 1797 /* 1798 * Similar to the MOVE case, but the slot doesn't need to be zapped as 1799 * an intermediate step. Instead, the old memslot is simply replaced 1800 * with a new, updated copy in both memslot sets. 1801 */ 1802 kvm_replace_memslot(kvm, old, new); 1803 kvm_activate_memslot(kvm, old, new); 1804 } 1805 1806 static int kvm_set_memslot(struct kvm *kvm, 1807 struct kvm_memory_slot *old, 1808 struct kvm_memory_slot *new, 1809 enum kvm_mr_change change) 1810 { 1811 struct kvm_memory_slot *invalid_slot; 1812 int r; 1813 1814 /* 1815 * Released in kvm_swap_active_memslots. 1816 * 1817 * Must be held from before the current memslots are copied until 1818 * after the new memslots are installed with rcu_assign_pointer, 1819 * then released before the synchronize srcu in kvm_swap_active_memslots. 1820 * 1821 * When modifying memslots outside of the slots_lock, must be held 1822 * before reading the pointer to the current memslots until after all 1823 * changes to those memslots are complete. 1824 * 1825 * These rules ensure that installing new memslots does not lose 1826 * changes made to the previous memslots. 1827 */ 1828 mutex_lock(&kvm->slots_arch_lock); 1829 1830 /* 1831 * Invalidate the old slot if it's being deleted or moved. This is 1832 * done prior to actually deleting/moving the memslot to allow vCPUs to 1833 * continue running by ensuring there are no mappings or shadow pages 1834 * for the memslot when it is deleted/moved. Without pre-invalidation 1835 * (and without a lock), a window would exist between effecting the 1836 * delete/move and committing the changes in arch code where KVM or a 1837 * guest could access a non-existent memslot. 1838 * 1839 * Modifications are done on a temporary, unreachable slot. The old 1840 * slot needs to be preserved in case a later step fails and the 1841 * invalidation needs to be reverted. 1842 */ 1843 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { 1844 invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT); 1845 if (!invalid_slot) { 1846 mutex_unlock(&kvm->slots_arch_lock); 1847 return -ENOMEM; 1848 } 1849 kvm_invalidate_memslot(kvm, old, invalid_slot); 1850 } 1851 1852 r = kvm_prepare_memory_region(kvm, old, new, change); 1853 if (r) { 1854 /* 1855 * For DELETE/MOVE, revert the above INVALID change. No 1856 * modifications required since the original slot was preserved 1857 * in the inactive slots. Changing the active memslots also 1858 * release slots_arch_lock. 1859 */ 1860 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { 1861 kvm_activate_memslot(kvm, invalid_slot, old); 1862 kfree(invalid_slot); 1863 } else { 1864 mutex_unlock(&kvm->slots_arch_lock); 1865 } 1866 return r; 1867 } 1868 1869 /* 1870 * For DELETE and MOVE, the working slot is now active as the INVALID 1871 * version of the old slot. MOVE is particularly special as it reuses 1872 * the old slot and returns a copy of the old slot (in working_slot). 1873 * For CREATE, there is no old slot. For DELETE and FLAGS_ONLY, the 1874 * old slot is detached but otherwise preserved. 1875 */ 1876 if (change == KVM_MR_CREATE) 1877 kvm_create_memslot(kvm, new); 1878 else if (change == KVM_MR_DELETE) 1879 kvm_delete_memslot(kvm, old, invalid_slot); 1880 else if (change == KVM_MR_MOVE) 1881 kvm_move_memslot(kvm, old, new, invalid_slot); 1882 else if (change == KVM_MR_FLAGS_ONLY) 1883 kvm_update_flags_memslot(kvm, old, new); 1884 else 1885 BUG(); 1886 1887 /* Free the temporary INVALID slot used for DELETE and MOVE. */ 1888 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) 1889 kfree(invalid_slot); 1890 1891 /* 1892 * No need to refresh new->arch, changes after dropping slots_arch_lock 1893 * will directly hit the final, active memslot. Architectures are 1894 * responsible for knowing that new->arch may be stale. 1895 */ 1896 kvm_commit_memory_region(kvm, old, new, change); 1897 1898 return 0; 1899 } 1900 1901 static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id, 1902 gfn_t start, gfn_t end) 1903 { 1904 struct kvm_memslot_iter iter; 1905 1906 kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) { 1907 if (iter.slot->id != id) 1908 return true; 1909 } 1910 1911 return false; 1912 } 1913 1914 /* 1915 * Allocate some memory and give it an address in the guest physical address 1916 * space. 1917 * 1918 * Discontiguous memory is allowed, mostly for framebuffers. 1919 * 1920 * Must be called holding kvm->slots_lock for write. 1921 */ 1922 int __kvm_set_memory_region(struct kvm *kvm, 1923 const struct kvm_userspace_memory_region *mem) 1924 { 1925 struct kvm_memory_slot *old, *new; 1926 struct kvm_memslots *slots; 1927 enum kvm_mr_change change; 1928 unsigned long npages; 1929 gfn_t base_gfn; 1930 int as_id, id; 1931 int r; 1932 1933 r = check_memory_region_flags(mem); 1934 if (r) 1935 return r; 1936 1937 as_id = mem->slot >> 16; 1938 id = (u16)mem->slot; 1939 1940 /* General sanity checks */ 1941 if ((mem->memory_size & (PAGE_SIZE - 1)) || 1942 (mem->memory_size != (unsigned long)mem->memory_size)) 1943 return -EINVAL; 1944 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 1945 return -EINVAL; 1946 /* We can read the guest memory with __xxx_user() later on. */ 1947 if ((mem->userspace_addr & (PAGE_SIZE - 1)) || 1948 (mem->userspace_addr != untagged_addr(mem->userspace_addr)) || 1949 !access_ok((void __user *)(unsigned long)mem->userspace_addr, 1950 mem->memory_size)) 1951 return -EINVAL; 1952 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) 1953 return -EINVAL; 1954 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 1955 return -EINVAL; 1956 if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES) 1957 return -EINVAL; 1958 1959 slots = __kvm_memslots(kvm, as_id); 1960 1961 /* 1962 * Note, the old memslot (and the pointer itself!) may be invalidated 1963 * and/or destroyed by kvm_set_memslot(). 1964 */ 1965 old = id_to_memslot(slots, id); 1966 1967 if (!mem->memory_size) { 1968 if (!old || !old->npages) 1969 return -EINVAL; 1970 1971 if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages)) 1972 return -EIO; 1973 1974 return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE); 1975 } 1976 1977 base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT); 1978 npages = (mem->memory_size >> PAGE_SHIFT); 1979 1980 if (!old || !old->npages) { 1981 change = KVM_MR_CREATE; 1982 1983 /* 1984 * To simplify KVM internals, the total number of pages across 1985 * all memslots must fit in an unsigned long. 1986 */ 1987 if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages) 1988 return -EINVAL; 1989 } else { /* Modify an existing slot. */ 1990 if ((mem->userspace_addr != old->userspace_addr) || 1991 (npages != old->npages) || 1992 ((mem->flags ^ old->flags) & KVM_MEM_READONLY)) 1993 return -EINVAL; 1994 1995 if (base_gfn != old->base_gfn) 1996 change = KVM_MR_MOVE; 1997 else if (mem->flags != old->flags) 1998 change = KVM_MR_FLAGS_ONLY; 1999 else /* Nothing to change. */ 2000 return 0; 2001 } 2002 2003 if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) && 2004 kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages)) 2005 return -EEXIST; 2006 2007 /* Allocate a slot that will persist in the memslot. */ 2008 new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT); 2009 if (!new) 2010 return -ENOMEM; 2011 2012 new->as_id = as_id; 2013 new->id = id; 2014 new->base_gfn = base_gfn; 2015 new->npages = npages; 2016 new->flags = mem->flags; 2017 new->userspace_addr = mem->userspace_addr; 2018 2019 r = kvm_set_memslot(kvm, old, new, change); 2020 if (r) 2021 kfree(new); 2022 return r; 2023 } 2024 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 2025 2026 int kvm_set_memory_region(struct kvm *kvm, 2027 const struct kvm_userspace_memory_region *mem) 2028 { 2029 int r; 2030 2031 mutex_lock(&kvm->slots_lock); 2032 r = __kvm_set_memory_region(kvm, mem); 2033 mutex_unlock(&kvm->slots_lock); 2034 return r; 2035 } 2036 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 2037 2038 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 2039 struct kvm_userspace_memory_region *mem) 2040 { 2041 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) 2042 return -EINVAL; 2043 2044 return kvm_set_memory_region(kvm, mem); 2045 } 2046 2047 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 2048 /** 2049 * kvm_get_dirty_log - get a snapshot of dirty pages 2050 * @kvm: pointer to kvm instance 2051 * @log: slot id and address to which we copy the log 2052 * @is_dirty: set to '1' if any dirty pages were found 2053 * @memslot: set to the associated memslot, always valid on success 2054 */ 2055 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, 2056 int *is_dirty, struct kvm_memory_slot **memslot) 2057 { 2058 struct kvm_memslots *slots; 2059 int i, as_id, id; 2060 unsigned long n; 2061 unsigned long any = 0; 2062 2063 /* Dirty ring tracking is exclusive to dirty log tracking */ 2064 if (kvm->dirty_ring_size) 2065 return -ENXIO; 2066 2067 *memslot = NULL; 2068 *is_dirty = 0; 2069 2070 as_id = log->slot >> 16; 2071 id = (u16)log->slot; 2072 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 2073 return -EINVAL; 2074 2075 slots = __kvm_memslots(kvm, as_id); 2076 *memslot = id_to_memslot(slots, id); 2077 if (!(*memslot) || !(*memslot)->dirty_bitmap) 2078 return -ENOENT; 2079 2080 kvm_arch_sync_dirty_log(kvm, *memslot); 2081 2082 n = kvm_dirty_bitmap_bytes(*memslot); 2083 2084 for (i = 0; !any && i < n/sizeof(long); ++i) 2085 any = (*memslot)->dirty_bitmap[i]; 2086 2087 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n)) 2088 return -EFAULT; 2089 2090 if (any) 2091 *is_dirty = 1; 2092 return 0; 2093 } 2094 EXPORT_SYMBOL_GPL(kvm_get_dirty_log); 2095 2096 #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 2097 /** 2098 * kvm_get_dirty_log_protect - get a snapshot of dirty pages 2099 * and reenable dirty page tracking for the corresponding pages. 2100 * @kvm: pointer to kvm instance 2101 * @log: slot id and address to which we copy the log 2102 * 2103 * We need to keep it in mind that VCPU threads can write to the bitmap 2104 * concurrently. So, to avoid losing track of dirty pages we keep the 2105 * following order: 2106 * 2107 * 1. Take a snapshot of the bit and clear it if needed. 2108 * 2. Write protect the corresponding page. 2109 * 3. Copy the snapshot to the userspace. 2110 * 4. Upon return caller flushes TLB's if needed. 2111 * 2112 * Between 2 and 4, the guest may write to the page using the remaining TLB 2113 * entry. This is not a problem because the page is reported dirty using 2114 * the snapshot taken before and step 4 ensures that writes done after 2115 * exiting to userspace will be logged for the next call. 2116 * 2117 */ 2118 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) 2119 { 2120 struct kvm_memslots *slots; 2121 struct kvm_memory_slot *memslot; 2122 int i, as_id, id; 2123 unsigned long n; 2124 unsigned long *dirty_bitmap; 2125 unsigned long *dirty_bitmap_buffer; 2126 bool flush; 2127 2128 /* Dirty ring tracking is exclusive to dirty log tracking */ 2129 if (kvm->dirty_ring_size) 2130 return -ENXIO; 2131 2132 as_id = log->slot >> 16; 2133 id = (u16)log->slot; 2134 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 2135 return -EINVAL; 2136 2137 slots = __kvm_memslots(kvm, as_id); 2138 memslot = id_to_memslot(slots, id); 2139 if (!memslot || !memslot->dirty_bitmap) 2140 return -ENOENT; 2141 2142 dirty_bitmap = memslot->dirty_bitmap; 2143 2144 kvm_arch_sync_dirty_log(kvm, memslot); 2145 2146 n = kvm_dirty_bitmap_bytes(memslot); 2147 flush = false; 2148 if (kvm->manual_dirty_log_protect) { 2149 /* 2150 * Unlike kvm_get_dirty_log, we always return false in *flush, 2151 * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There 2152 * is some code duplication between this function and 2153 * kvm_get_dirty_log, but hopefully all architecture 2154 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log 2155 * can be eliminated. 2156 */ 2157 dirty_bitmap_buffer = dirty_bitmap; 2158 } else { 2159 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 2160 memset(dirty_bitmap_buffer, 0, n); 2161 2162 KVM_MMU_LOCK(kvm); 2163 for (i = 0; i < n / sizeof(long); i++) { 2164 unsigned long mask; 2165 gfn_t offset; 2166 2167 if (!dirty_bitmap[i]) 2168 continue; 2169 2170 flush = true; 2171 mask = xchg(&dirty_bitmap[i], 0); 2172 dirty_bitmap_buffer[i] = mask; 2173 2174 offset = i * BITS_PER_LONG; 2175 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 2176 offset, mask); 2177 } 2178 KVM_MMU_UNLOCK(kvm); 2179 } 2180 2181 if (flush) 2182 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); 2183 2184 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) 2185 return -EFAULT; 2186 return 0; 2187 } 2188 2189 2190 /** 2191 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot 2192 * @kvm: kvm instance 2193 * @log: slot id and address to which we copy the log 2194 * 2195 * Steps 1-4 below provide general overview of dirty page logging. See 2196 * kvm_get_dirty_log_protect() function description for additional details. 2197 * 2198 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we 2199 * always flush the TLB (step 4) even if previous step failed and the dirty 2200 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API 2201 * does not preclude user space subsequent dirty log read. Flushing TLB ensures 2202 * writes will be marked dirty for next log read. 2203 * 2204 * 1. Take a snapshot of the bit and clear it if needed. 2205 * 2. Write protect the corresponding page. 2206 * 3. Copy the snapshot to the userspace. 2207 * 4. Flush TLB's if needed. 2208 */ 2209 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 2210 struct kvm_dirty_log *log) 2211 { 2212 int r; 2213 2214 mutex_lock(&kvm->slots_lock); 2215 2216 r = kvm_get_dirty_log_protect(kvm, log); 2217 2218 mutex_unlock(&kvm->slots_lock); 2219 return r; 2220 } 2221 2222 /** 2223 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap 2224 * and reenable dirty page tracking for the corresponding pages. 2225 * @kvm: pointer to kvm instance 2226 * @log: slot id and address from which to fetch the bitmap of dirty pages 2227 */ 2228 static int kvm_clear_dirty_log_protect(struct kvm *kvm, 2229 struct kvm_clear_dirty_log *log) 2230 { 2231 struct kvm_memslots *slots; 2232 struct kvm_memory_slot *memslot; 2233 int as_id, id; 2234 gfn_t offset; 2235 unsigned long i, n; 2236 unsigned long *dirty_bitmap; 2237 unsigned long *dirty_bitmap_buffer; 2238 bool flush; 2239 2240 /* Dirty ring tracking is exclusive to dirty log tracking */ 2241 if (kvm->dirty_ring_size) 2242 return -ENXIO; 2243 2244 as_id = log->slot >> 16; 2245 id = (u16)log->slot; 2246 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 2247 return -EINVAL; 2248 2249 if (log->first_page & 63) 2250 return -EINVAL; 2251 2252 slots = __kvm_memslots(kvm, as_id); 2253 memslot = id_to_memslot(slots, id); 2254 if (!memslot || !memslot->dirty_bitmap) 2255 return -ENOENT; 2256 2257 dirty_bitmap = memslot->dirty_bitmap; 2258 2259 n = ALIGN(log->num_pages, BITS_PER_LONG) / 8; 2260 2261 if (log->first_page > memslot->npages || 2262 log->num_pages > memslot->npages - log->first_page || 2263 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63))) 2264 return -EINVAL; 2265 2266 kvm_arch_sync_dirty_log(kvm, memslot); 2267 2268 flush = false; 2269 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 2270 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) 2271 return -EFAULT; 2272 2273 KVM_MMU_LOCK(kvm); 2274 for (offset = log->first_page, i = offset / BITS_PER_LONG, 2275 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--; 2276 i++, offset += BITS_PER_LONG) { 2277 unsigned long mask = *dirty_bitmap_buffer++; 2278 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i]; 2279 if (!mask) 2280 continue; 2281 2282 mask &= atomic_long_fetch_andnot(mask, p); 2283 2284 /* 2285 * mask contains the bits that really have been cleared. This 2286 * never includes any bits beyond the length of the memslot (if 2287 * the length is not aligned to 64 pages), therefore it is not 2288 * a problem if userspace sets them in log->dirty_bitmap. 2289 */ 2290 if (mask) { 2291 flush = true; 2292 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 2293 offset, mask); 2294 } 2295 } 2296 KVM_MMU_UNLOCK(kvm); 2297 2298 if (flush) 2299 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); 2300 2301 return 0; 2302 } 2303 2304 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, 2305 struct kvm_clear_dirty_log *log) 2306 { 2307 int r; 2308 2309 mutex_lock(&kvm->slots_lock); 2310 2311 r = kvm_clear_dirty_log_protect(kvm, log); 2312 2313 mutex_unlock(&kvm->slots_lock); 2314 return r; 2315 } 2316 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 2317 2318 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 2319 { 2320 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 2321 } 2322 EXPORT_SYMBOL_GPL(gfn_to_memslot); 2323 2324 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) 2325 { 2326 struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); 2327 u64 gen = slots->generation; 2328 struct kvm_memory_slot *slot; 2329 2330 /* 2331 * This also protects against using a memslot from a different address space, 2332 * since different address spaces have different generation numbers. 2333 */ 2334 if (unlikely(gen != vcpu->last_used_slot_gen)) { 2335 vcpu->last_used_slot = NULL; 2336 vcpu->last_used_slot_gen = gen; 2337 } 2338 2339 slot = try_get_memslot(vcpu->last_used_slot, gfn); 2340 if (slot) 2341 return slot; 2342 2343 /* 2344 * Fall back to searching all memslots. We purposely use 2345 * search_memslots() instead of __gfn_to_memslot() to avoid 2346 * thrashing the VM-wide last_used_slot in kvm_memslots. 2347 */ 2348 slot = search_memslots(slots, gfn, false); 2349 if (slot) { 2350 vcpu->last_used_slot = slot; 2351 return slot; 2352 } 2353 2354 return NULL; 2355 } 2356 2357 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 2358 { 2359 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); 2360 2361 return kvm_is_visible_memslot(memslot); 2362 } 2363 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 2364 2365 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 2366 { 2367 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2368 2369 return kvm_is_visible_memslot(memslot); 2370 } 2371 EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn); 2372 2373 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) 2374 { 2375 struct vm_area_struct *vma; 2376 unsigned long addr, size; 2377 2378 size = PAGE_SIZE; 2379 2380 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL); 2381 if (kvm_is_error_hva(addr)) 2382 return PAGE_SIZE; 2383 2384 mmap_read_lock(current->mm); 2385 vma = find_vma(current->mm, addr); 2386 if (!vma) 2387 goto out; 2388 2389 size = vma_kernel_pagesize(vma); 2390 2391 out: 2392 mmap_read_unlock(current->mm); 2393 2394 return size; 2395 } 2396 2397 static bool memslot_is_readonly(const struct kvm_memory_slot *slot) 2398 { 2399 return slot->flags & KVM_MEM_READONLY; 2400 } 2401 2402 static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn, 2403 gfn_t *nr_pages, bool write) 2404 { 2405 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 2406 return KVM_HVA_ERR_BAD; 2407 2408 if (memslot_is_readonly(slot) && write) 2409 return KVM_HVA_ERR_RO_BAD; 2410 2411 if (nr_pages) 2412 *nr_pages = slot->npages - (gfn - slot->base_gfn); 2413 2414 return __gfn_to_hva_memslot(slot, gfn); 2415 } 2416 2417 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 2418 gfn_t *nr_pages) 2419 { 2420 return __gfn_to_hva_many(slot, gfn, nr_pages, true); 2421 } 2422 2423 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 2424 gfn_t gfn) 2425 { 2426 return gfn_to_hva_many(slot, gfn, NULL); 2427 } 2428 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); 2429 2430 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 2431 { 2432 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); 2433 } 2434 EXPORT_SYMBOL_GPL(gfn_to_hva); 2435 2436 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) 2437 { 2438 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); 2439 } 2440 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); 2441 2442 /* 2443 * Return the hva of a @gfn and the R/W attribute if possible. 2444 * 2445 * @slot: the kvm_memory_slot which contains @gfn 2446 * @gfn: the gfn to be translated 2447 * @writable: used to return the read/write attribute of the @slot if the hva 2448 * is valid and @writable is not NULL 2449 */ 2450 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, 2451 gfn_t gfn, bool *writable) 2452 { 2453 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); 2454 2455 if (!kvm_is_error_hva(hva) && writable) 2456 *writable = !memslot_is_readonly(slot); 2457 2458 return hva; 2459 } 2460 2461 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 2462 { 2463 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 2464 2465 return gfn_to_hva_memslot_prot(slot, gfn, writable); 2466 } 2467 2468 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) 2469 { 2470 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2471 2472 return gfn_to_hva_memslot_prot(slot, gfn, writable); 2473 } 2474 2475 static inline int check_user_page_hwpoison(unsigned long addr) 2476 { 2477 int rc, flags = FOLL_HWPOISON | FOLL_WRITE; 2478 2479 rc = get_user_pages(addr, 1, flags, NULL, NULL); 2480 return rc == -EHWPOISON; 2481 } 2482 2483 /* 2484 * The fast path to get the writable pfn which will be stored in @pfn, 2485 * true indicates success, otherwise false is returned. It's also the 2486 * only part that runs if we can in atomic context. 2487 */ 2488 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, 2489 bool *writable, kvm_pfn_t *pfn) 2490 { 2491 struct page *page[1]; 2492 2493 /* 2494 * Fast pin a writable pfn only if it is a write fault request 2495 * or the caller allows to map a writable pfn for a read fault 2496 * request. 2497 */ 2498 if (!(write_fault || writable)) 2499 return false; 2500 2501 if (get_user_page_fast_only(addr, FOLL_WRITE, page)) { 2502 *pfn = page_to_pfn(page[0]); 2503 2504 if (writable) 2505 *writable = true; 2506 return true; 2507 } 2508 2509 return false; 2510 } 2511 2512 /* 2513 * The slow path to get the pfn of the specified host virtual address, 2514 * 1 indicates success, -errno is returned if error is detected. 2515 */ 2516 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, 2517 bool *writable, kvm_pfn_t *pfn) 2518 { 2519 unsigned int flags = FOLL_HWPOISON; 2520 struct page *page; 2521 int npages; 2522 2523 might_sleep(); 2524 2525 if (writable) 2526 *writable = write_fault; 2527 2528 if (write_fault) 2529 flags |= FOLL_WRITE; 2530 if (async) 2531 flags |= FOLL_NOWAIT; 2532 2533 npages = get_user_pages_unlocked(addr, 1, &page, flags); 2534 if (npages != 1) 2535 return npages; 2536 2537 /* map read fault as writable if possible */ 2538 if (unlikely(!write_fault) && writable) { 2539 struct page *wpage; 2540 2541 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) { 2542 *writable = true; 2543 put_page(page); 2544 page = wpage; 2545 } 2546 } 2547 *pfn = page_to_pfn(page); 2548 return npages; 2549 } 2550 2551 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) 2552 { 2553 if (unlikely(!(vma->vm_flags & VM_READ))) 2554 return false; 2555 2556 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) 2557 return false; 2558 2559 return true; 2560 } 2561 2562 static int kvm_try_get_pfn(kvm_pfn_t pfn) 2563 { 2564 struct page *page = kvm_pfn_to_refcounted_page(pfn); 2565 2566 if (!page) 2567 return 1; 2568 2569 return get_page_unless_zero(page); 2570 } 2571 2572 static int hva_to_pfn_remapped(struct vm_area_struct *vma, 2573 unsigned long addr, bool write_fault, 2574 bool *writable, kvm_pfn_t *p_pfn) 2575 { 2576 kvm_pfn_t pfn; 2577 pte_t *ptep; 2578 spinlock_t *ptl; 2579 int r; 2580 2581 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); 2582 if (r) { 2583 /* 2584 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does 2585 * not call the fault handler, so do it here. 2586 */ 2587 bool unlocked = false; 2588 r = fixup_user_fault(current->mm, addr, 2589 (write_fault ? FAULT_FLAG_WRITE : 0), 2590 &unlocked); 2591 if (unlocked) 2592 return -EAGAIN; 2593 if (r) 2594 return r; 2595 2596 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); 2597 if (r) 2598 return r; 2599 } 2600 2601 if (write_fault && !pte_write(*ptep)) { 2602 pfn = KVM_PFN_ERR_RO_FAULT; 2603 goto out; 2604 } 2605 2606 if (writable) 2607 *writable = pte_write(*ptep); 2608 pfn = pte_pfn(*ptep); 2609 2610 /* 2611 * Get a reference here because callers of *hva_to_pfn* and 2612 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the 2613 * returned pfn. This is only needed if the VMA has VM_MIXEDMAP 2614 * set, but the kvm_try_get_pfn/kvm_release_pfn_clean pair will 2615 * simply do nothing for reserved pfns. 2616 * 2617 * Whoever called remap_pfn_range is also going to call e.g. 2618 * unmap_mapping_range before the underlying pages are freed, 2619 * causing a call to our MMU notifier. 2620 * 2621 * Certain IO or PFNMAP mappings can be backed with valid 2622 * struct pages, but be allocated without refcounting e.g., 2623 * tail pages of non-compound higher order allocations, which 2624 * would then underflow the refcount when the caller does the 2625 * required put_page. Don't allow those pages here. 2626 */ 2627 if (!kvm_try_get_pfn(pfn)) 2628 r = -EFAULT; 2629 2630 out: 2631 pte_unmap_unlock(ptep, ptl); 2632 *p_pfn = pfn; 2633 2634 return r; 2635 } 2636 2637 /* 2638 * Pin guest page in memory and return its pfn. 2639 * @addr: host virtual address which maps memory to the guest 2640 * @atomic: whether this function can sleep 2641 * @async: whether this function need to wait IO complete if the 2642 * host page is not in the memory 2643 * @write_fault: whether we should get a writable host page 2644 * @writable: whether it allows to map a writable host page for !@write_fault 2645 * 2646 * The function will map a writable host page for these two cases: 2647 * 1): @write_fault = true 2648 * 2): @write_fault = false && @writable, @writable will tell the caller 2649 * whether the mapping is writable. 2650 */ 2651 kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, 2652 bool write_fault, bool *writable) 2653 { 2654 struct vm_area_struct *vma; 2655 kvm_pfn_t pfn; 2656 int npages, r; 2657 2658 /* we can do it either atomically or asynchronously, not both */ 2659 BUG_ON(atomic && async); 2660 2661 if (hva_to_pfn_fast(addr, write_fault, writable, &pfn)) 2662 return pfn; 2663 2664 if (atomic) 2665 return KVM_PFN_ERR_FAULT; 2666 2667 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); 2668 if (npages == 1) 2669 return pfn; 2670 2671 mmap_read_lock(current->mm); 2672 if (npages == -EHWPOISON || 2673 (!async && check_user_page_hwpoison(addr))) { 2674 pfn = KVM_PFN_ERR_HWPOISON; 2675 goto exit; 2676 } 2677 2678 retry: 2679 vma = vma_lookup(current->mm, addr); 2680 2681 if (vma == NULL) 2682 pfn = KVM_PFN_ERR_FAULT; 2683 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { 2684 r = hva_to_pfn_remapped(vma, addr, write_fault, writable, &pfn); 2685 if (r == -EAGAIN) 2686 goto retry; 2687 if (r < 0) 2688 pfn = KVM_PFN_ERR_FAULT; 2689 } else { 2690 if (async && vma_is_valid(vma, write_fault)) 2691 *async = true; 2692 pfn = KVM_PFN_ERR_FAULT; 2693 } 2694 exit: 2695 mmap_read_unlock(current->mm); 2696 return pfn; 2697 } 2698 2699 kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, 2700 bool atomic, bool *async, bool write_fault, 2701 bool *writable, hva_t *hva) 2702 { 2703 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); 2704 2705 if (hva) 2706 *hva = addr; 2707 2708 if (addr == KVM_HVA_ERR_RO_BAD) { 2709 if (writable) 2710 *writable = false; 2711 return KVM_PFN_ERR_RO_FAULT; 2712 } 2713 2714 if (kvm_is_error_hva(addr)) { 2715 if (writable) 2716 *writable = false; 2717 return KVM_PFN_NOSLOT; 2718 } 2719 2720 /* Do not map writable pfn in the readonly memslot. */ 2721 if (writable && memslot_is_readonly(slot)) { 2722 *writable = false; 2723 writable = NULL; 2724 } 2725 2726 return hva_to_pfn(addr, atomic, async, write_fault, 2727 writable); 2728 } 2729 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); 2730 2731 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 2732 bool *writable) 2733 { 2734 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, 2735 write_fault, writable, NULL); 2736 } 2737 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 2738 2739 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) 2740 { 2741 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL); 2742 } 2743 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); 2744 2745 kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn) 2746 { 2747 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL, NULL); 2748 } 2749 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); 2750 2751 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) 2752 { 2753 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 2754 } 2755 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic); 2756 2757 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 2758 { 2759 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); 2760 } 2761 EXPORT_SYMBOL_GPL(gfn_to_pfn); 2762 2763 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) 2764 { 2765 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 2766 } 2767 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn); 2768 2769 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 2770 struct page **pages, int nr_pages) 2771 { 2772 unsigned long addr; 2773 gfn_t entry = 0; 2774 2775 addr = gfn_to_hva_many(slot, gfn, &entry); 2776 if (kvm_is_error_hva(addr)) 2777 return -1; 2778 2779 if (entry < nr_pages) 2780 return 0; 2781 2782 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages); 2783 } 2784 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); 2785 2786 /* 2787 * Do not use this helper unless you are absolutely certain the gfn _must_ be 2788 * backed by 'struct page'. A valid example is if the backing memslot is 2789 * controlled by KVM. Note, if the returned page is valid, it's refcount has 2790 * been elevated by gfn_to_pfn(). 2791 */ 2792 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 2793 { 2794 struct page *page; 2795 kvm_pfn_t pfn; 2796 2797 pfn = gfn_to_pfn(kvm, gfn); 2798 2799 if (is_error_noslot_pfn(pfn)) 2800 return KVM_ERR_PTR_BAD_PAGE; 2801 2802 page = kvm_pfn_to_refcounted_page(pfn); 2803 if (!page) 2804 return KVM_ERR_PTR_BAD_PAGE; 2805 2806 return page; 2807 } 2808 EXPORT_SYMBOL_GPL(gfn_to_page); 2809 2810 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty) 2811 { 2812 if (dirty) 2813 kvm_release_pfn_dirty(pfn); 2814 else 2815 kvm_release_pfn_clean(pfn); 2816 } 2817 2818 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) 2819 { 2820 kvm_pfn_t pfn; 2821 void *hva = NULL; 2822 struct page *page = KVM_UNMAPPED_PAGE; 2823 2824 if (!map) 2825 return -EINVAL; 2826 2827 pfn = gfn_to_pfn(vcpu->kvm, gfn); 2828 if (is_error_noslot_pfn(pfn)) 2829 return -EINVAL; 2830 2831 if (pfn_valid(pfn)) { 2832 page = pfn_to_page(pfn); 2833 hva = kmap(page); 2834 #ifdef CONFIG_HAS_IOMEM 2835 } else { 2836 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); 2837 #endif 2838 } 2839 2840 if (!hva) 2841 return -EFAULT; 2842 2843 map->page = page; 2844 map->hva = hva; 2845 map->pfn = pfn; 2846 map->gfn = gfn; 2847 2848 return 0; 2849 } 2850 EXPORT_SYMBOL_GPL(kvm_vcpu_map); 2851 2852 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) 2853 { 2854 if (!map) 2855 return; 2856 2857 if (!map->hva) 2858 return; 2859 2860 if (map->page != KVM_UNMAPPED_PAGE) 2861 kunmap(map->page); 2862 #ifdef CONFIG_HAS_IOMEM 2863 else 2864 memunmap(map->hva); 2865 #endif 2866 2867 if (dirty) 2868 kvm_vcpu_mark_page_dirty(vcpu, map->gfn); 2869 2870 kvm_release_pfn(map->pfn, dirty); 2871 2872 map->hva = NULL; 2873 map->page = NULL; 2874 } 2875 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap); 2876 2877 static bool kvm_is_ad_tracked_page(struct page *page) 2878 { 2879 /* 2880 * Per page-flags.h, pages tagged PG_reserved "should in general not be 2881 * touched (e.g. set dirty) except by its owner". 2882 */ 2883 return !PageReserved(page); 2884 } 2885 2886 static void kvm_set_page_dirty(struct page *page) 2887 { 2888 if (kvm_is_ad_tracked_page(page)) 2889 SetPageDirty(page); 2890 } 2891 2892 static void kvm_set_page_accessed(struct page *page) 2893 { 2894 if (kvm_is_ad_tracked_page(page)) 2895 mark_page_accessed(page); 2896 } 2897 2898 void kvm_release_page_clean(struct page *page) 2899 { 2900 WARN_ON(is_error_page(page)); 2901 2902 kvm_set_page_accessed(page); 2903 put_page(page); 2904 } 2905 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 2906 2907 void kvm_release_pfn_clean(kvm_pfn_t pfn) 2908 { 2909 struct page *page; 2910 2911 if (is_error_noslot_pfn(pfn)) 2912 return; 2913 2914 page = kvm_pfn_to_refcounted_page(pfn); 2915 if (!page) 2916 return; 2917 2918 kvm_release_page_clean(page); 2919 } 2920 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 2921 2922 void kvm_release_page_dirty(struct page *page) 2923 { 2924 WARN_ON(is_error_page(page)); 2925 2926 kvm_set_page_dirty(page); 2927 kvm_release_page_clean(page); 2928 } 2929 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 2930 2931 void kvm_release_pfn_dirty(kvm_pfn_t pfn) 2932 { 2933 struct page *page; 2934 2935 if (is_error_noslot_pfn(pfn)) 2936 return; 2937 2938 page = kvm_pfn_to_refcounted_page(pfn); 2939 if (!page) 2940 return; 2941 2942 kvm_release_page_dirty(page); 2943 } 2944 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); 2945 2946 /* 2947 * Note, checking for an error/noslot pfn is the caller's responsibility when 2948 * directly marking a page dirty/accessed. Unlike the "release" helpers, the 2949 * "set" helpers are not to be used when the pfn might point at garbage. 2950 */ 2951 void kvm_set_pfn_dirty(kvm_pfn_t pfn) 2952 { 2953 if (WARN_ON(is_error_noslot_pfn(pfn))) 2954 return; 2955 2956 if (pfn_valid(pfn)) 2957 kvm_set_page_dirty(pfn_to_page(pfn)); 2958 } 2959 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 2960 2961 void kvm_set_pfn_accessed(kvm_pfn_t pfn) 2962 { 2963 if (WARN_ON(is_error_noslot_pfn(pfn))) 2964 return; 2965 2966 if (pfn_valid(pfn)) 2967 kvm_set_page_accessed(pfn_to_page(pfn)); 2968 } 2969 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 2970 2971 static int next_segment(unsigned long len, int offset) 2972 { 2973 if (len > PAGE_SIZE - offset) 2974 return PAGE_SIZE - offset; 2975 else 2976 return len; 2977 } 2978 2979 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, 2980 void *data, int offset, int len) 2981 { 2982 int r; 2983 unsigned long addr; 2984 2985 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 2986 if (kvm_is_error_hva(addr)) 2987 return -EFAULT; 2988 r = __copy_from_user(data, (void __user *)addr + offset, len); 2989 if (r) 2990 return -EFAULT; 2991 return 0; 2992 } 2993 2994 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 2995 int len) 2996 { 2997 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 2998 2999 return __kvm_read_guest_page(slot, gfn, data, offset, len); 3000 } 3001 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 3002 3003 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, 3004 int offset, int len) 3005 { 3006 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 3007 3008 return __kvm_read_guest_page(slot, gfn, data, offset, len); 3009 } 3010 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); 3011 3012 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 3013 { 3014 gfn_t gfn = gpa >> PAGE_SHIFT; 3015 int seg; 3016 int offset = offset_in_page(gpa); 3017 int ret; 3018 3019 while ((seg = next_segment(len, offset)) != 0) { 3020 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 3021 if (ret < 0) 3022 return ret; 3023 offset = 0; 3024 len -= seg; 3025 data += seg; 3026 ++gfn; 3027 } 3028 return 0; 3029 } 3030 EXPORT_SYMBOL_GPL(kvm_read_guest); 3031 3032 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) 3033 { 3034 gfn_t gfn = gpa >> PAGE_SHIFT; 3035 int seg; 3036 int offset = offset_in_page(gpa); 3037 int ret; 3038 3039 while ((seg = next_segment(len, offset)) != 0) { 3040 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); 3041 if (ret < 0) 3042 return ret; 3043 offset = 0; 3044 len -= seg; 3045 data += seg; 3046 ++gfn; 3047 } 3048 return 0; 3049 } 3050 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest); 3051 3052 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 3053 void *data, int offset, unsigned long len) 3054 { 3055 int r; 3056 unsigned long addr; 3057 3058 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 3059 if (kvm_is_error_hva(addr)) 3060 return -EFAULT; 3061 pagefault_disable(); 3062 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 3063 pagefault_enable(); 3064 if (r) 3065 return -EFAULT; 3066 return 0; 3067 } 3068 3069 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, 3070 void *data, unsigned long len) 3071 { 3072 gfn_t gfn = gpa >> PAGE_SHIFT; 3073 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 3074 int offset = offset_in_page(gpa); 3075 3076 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); 3077 } 3078 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic); 3079 3080 static int __kvm_write_guest_page(struct kvm *kvm, 3081 struct kvm_memory_slot *memslot, gfn_t gfn, 3082 const void *data, int offset, int len) 3083 { 3084 int r; 3085 unsigned long addr; 3086 3087 addr = gfn_to_hva_memslot(memslot, gfn); 3088 if (kvm_is_error_hva(addr)) 3089 return -EFAULT; 3090 r = __copy_to_user((void __user *)addr + offset, data, len); 3091 if (r) 3092 return -EFAULT; 3093 mark_page_dirty_in_slot(kvm, memslot, gfn); 3094 return 0; 3095 } 3096 3097 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, 3098 const void *data, int offset, int len) 3099 { 3100 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 3101 3102 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len); 3103 } 3104 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 3105 3106 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, 3107 const void *data, int offset, int len) 3108 { 3109 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 3110 3111 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len); 3112 } 3113 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page); 3114 3115 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 3116 unsigned long len) 3117 { 3118 gfn_t gfn = gpa >> PAGE_SHIFT; 3119 int seg; 3120 int offset = offset_in_page(gpa); 3121 int ret; 3122 3123 while ((seg = next_segment(len, offset)) != 0) { 3124 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 3125 if (ret < 0) 3126 return ret; 3127 offset = 0; 3128 len -= seg; 3129 data += seg; 3130 ++gfn; 3131 } 3132 return 0; 3133 } 3134 EXPORT_SYMBOL_GPL(kvm_write_guest); 3135 3136 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 3137 unsigned long len) 3138 { 3139 gfn_t gfn = gpa >> PAGE_SHIFT; 3140 int seg; 3141 int offset = offset_in_page(gpa); 3142 int ret; 3143 3144 while ((seg = next_segment(len, offset)) != 0) { 3145 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); 3146 if (ret < 0) 3147 return ret; 3148 offset = 0; 3149 len -= seg; 3150 data += seg; 3151 ++gfn; 3152 } 3153 return 0; 3154 } 3155 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest); 3156 3157 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, 3158 struct gfn_to_hva_cache *ghc, 3159 gpa_t gpa, unsigned long len) 3160 { 3161 int offset = offset_in_page(gpa); 3162 gfn_t start_gfn = gpa >> PAGE_SHIFT; 3163 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; 3164 gfn_t nr_pages_needed = end_gfn - start_gfn + 1; 3165 gfn_t nr_pages_avail; 3166 3167 /* Update ghc->generation before performing any error checks. */ 3168 ghc->generation = slots->generation; 3169 3170 if (start_gfn > end_gfn) { 3171 ghc->hva = KVM_HVA_ERR_BAD; 3172 return -EINVAL; 3173 } 3174 3175 /* 3176 * If the requested region crosses two memslots, we still 3177 * verify that the entire region is valid here. 3178 */ 3179 for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) { 3180 ghc->memslot = __gfn_to_memslot(slots, start_gfn); 3181 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, 3182 &nr_pages_avail); 3183 if (kvm_is_error_hva(ghc->hva)) 3184 return -EFAULT; 3185 } 3186 3187 /* Use the slow path for cross page reads and writes. */ 3188 if (nr_pages_needed == 1) 3189 ghc->hva += offset; 3190 else 3191 ghc->memslot = NULL; 3192 3193 ghc->gpa = gpa; 3194 ghc->len = len; 3195 return 0; 3196 } 3197 3198 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3199 gpa_t gpa, unsigned long len) 3200 { 3201 struct kvm_memslots *slots = kvm_memslots(kvm); 3202 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len); 3203 } 3204 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 3205 3206 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3207 void *data, unsigned int offset, 3208 unsigned long len) 3209 { 3210 struct kvm_memslots *slots = kvm_memslots(kvm); 3211 int r; 3212 gpa_t gpa = ghc->gpa + offset; 3213 3214 if (WARN_ON_ONCE(len + offset > ghc->len)) 3215 return -EINVAL; 3216 3217 if (slots->generation != ghc->generation) { 3218 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) 3219 return -EFAULT; 3220 } 3221 3222 if (kvm_is_error_hva(ghc->hva)) 3223 return -EFAULT; 3224 3225 if (unlikely(!ghc->memslot)) 3226 return kvm_write_guest(kvm, gpa, data, len); 3227 3228 r = __copy_to_user((void __user *)ghc->hva + offset, data, len); 3229 if (r) 3230 return -EFAULT; 3231 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT); 3232 3233 return 0; 3234 } 3235 EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached); 3236 3237 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3238 void *data, unsigned long len) 3239 { 3240 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); 3241 } 3242 EXPORT_SYMBOL_GPL(kvm_write_guest_cached); 3243 3244 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3245 void *data, unsigned int offset, 3246 unsigned long len) 3247 { 3248 struct kvm_memslots *slots = kvm_memslots(kvm); 3249 int r; 3250 gpa_t gpa = ghc->gpa + offset; 3251 3252 if (WARN_ON_ONCE(len + offset > ghc->len)) 3253 return -EINVAL; 3254 3255 if (slots->generation != ghc->generation) { 3256 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) 3257 return -EFAULT; 3258 } 3259 3260 if (kvm_is_error_hva(ghc->hva)) 3261 return -EFAULT; 3262 3263 if (unlikely(!ghc->memslot)) 3264 return kvm_read_guest(kvm, gpa, data, len); 3265 3266 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len); 3267 if (r) 3268 return -EFAULT; 3269 3270 return 0; 3271 } 3272 EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached); 3273 3274 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3275 void *data, unsigned long len) 3276 { 3277 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len); 3278 } 3279 EXPORT_SYMBOL_GPL(kvm_read_guest_cached); 3280 3281 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 3282 { 3283 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 3284 gfn_t gfn = gpa >> PAGE_SHIFT; 3285 int seg; 3286 int offset = offset_in_page(gpa); 3287 int ret; 3288 3289 while ((seg = next_segment(len, offset)) != 0) { 3290 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len); 3291 if (ret < 0) 3292 return ret; 3293 offset = 0; 3294 len -= seg; 3295 ++gfn; 3296 } 3297 return 0; 3298 } 3299 EXPORT_SYMBOL_GPL(kvm_clear_guest); 3300 3301 void mark_page_dirty_in_slot(struct kvm *kvm, 3302 const struct kvm_memory_slot *memslot, 3303 gfn_t gfn) 3304 { 3305 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); 3306 3307 #ifdef CONFIG_HAVE_KVM_DIRTY_RING 3308 if (WARN_ON_ONCE(!vcpu) || WARN_ON_ONCE(vcpu->kvm != kvm)) 3309 return; 3310 #endif 3311 3312 if (memslot && kvm_slot_dirty_track_enabled(memslot)) { 3313 unsigned long rel_gfn = gfn - memslot->base_gfn; 3314 u32 slot = (memslot->as_id << 16) | memslot->id; 3315 3316 if (kvm->dirty_ring_size) 3317 kvm_dirty_ring_push(&vcpu->dirty_ring, 3318 slot, rel_gfn); 3319 else 3320 set_bit_le(rel_gfn, memslot->dirty_bitmap); 3321 } 3322 } 3323 EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot); 3324 3325 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 3326 { 3327 struct kvm_memory_slot *memslot; 3328 3329 memslot = gfn_to_memslot(kvm, gfn); 3330 mark_page_dirty_in_slot(kvm, memslot, gfn); 3331 } 3332 EXPORT_SYMBOL_GPL(mark_page_dirty); 3333 3334 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) 3335 { 3336 struct kvm_memory_slot *memslot; 3337 3338 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 3339 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn); 3340 } 3341 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); 3342 3343 void kvm_sigset_activate(struct kvm_vcpu *vcpu) 3344 { 3345 if (!vcpu->sigset_active) 3346 return; 3347 3348 /* 3349 * This does a lockless modification of ->real_blocked, which is fine 3350 * because, only current can change ->real_blocked and all readers of 3351 * ->real_blocked don't care as long ->real_blocked is always a subset 3352 * of ->blocked. 3353 */ 3354 sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked); 3355 } 3356 3357 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu) 3358 { 3359 if (!vcpu->sigset_active) 3360 return; 3361 3362 sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL); 3363 sigemptyset(¤t->real_blocked); 3364 } 3365 3366 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) 3367 { 3368 unsigned int old, val, grow, grow_start; 3369 3370 old = val = vcpu->halt_poll_ns; 3371 grow_start = READ_ONCE(halt_poll_ns_grow_start); 3372 grow = READ_ONCE(halt_poll_ns_grow); 3373 if (!grow) 3374 goto out; 3375 3376 val *= grow; 3377 if (val < grow_start) 3378 val = grow_start; 3379 3380 if (val > vcpu->kvm->max_halt_poll_ns) 3381 val = vcpu->kvm->max_halt_poll_ns; 3382 3383 vcpu->halt_poll_ns = val; 3384 out: 3385 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); 3386 } 3387 3388 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) 3389 { 3390 unsigned int old, val, shrink, grow_start; 3391 3392 old = val = vcpu->halt_poll_ns; 3393 shrink = READ_ONCE(halt_poll_ns_shrink); 3394 grow_start = READ_ONCE(halt_poll_ns_grow_start); 3395 if (shrink == 0) 3396 val = 0; 3397 else 3398 val /= shrink; 3399 3400 if (val < grow_start) 3401 val = 0; 3402 3403 vcpu->halt_poll_ns = val; 3404 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); 3405 } 3406 3407 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) 3408 { 3409 int ret = -EINTR; 3410 int idx = srcu_read_lock(&vcpu->kvm->srcu); 3411 3412 if (kvm_arch_vcpu_runnable(vcpu)) { 3413 kvm_make_request(KVM_REQ_UNHALT, vcpu); 3414 goto out; 3415 } 3416 if (kvm_cpu_has_pending_timer(vcpu)) 3417 goto out; 3418 if (signal_pending(current)) 3419 goto out; 3420 if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu)) 3421 goto out; 3422 3423 ret = 0; 3424 out: 3425 srcu_read_unlock(&vcpu->kvm->srcu, idx); 3426 return ret; 3427 } 3428 3429 /* 3430 * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is 3431 * pending. This is mostly used when halting a vCPU, but may also be used 3432 * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI. 3433 */ 3434 bool kvm_vcpu_block(struct kvm_vcpu *vcpu) 3435 { 3436 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); 3437 bool waited = false; 3438 3439 vcpu->stat.generic.blocking = 1; 3440 3441 preempt_disable(); 3442 kvm_arch_vcpu_blocking(vcpu); 3443 prepare_to_rcuwait(wait); 3444 preempt_enable(); 3445 3446 for (;;) { 3447 set_current_state(TASK_INTERRUPTIBLE); 3448 3449 if (kvm_vcpu_check_block(vcpu) < 0) 3450 break; 3451 3452 waited = true; 3453 schedule(); 3454 } 3455 3456 preempt_disable(); 3457 finish_rcuwait(wait); 3458 kvm_arch_vcpu_unblocking(vcpu); 3459 preempt_enable(); 3460 3461 vcpu->stat.generic.blocking = 0; 3462 3463 return waited; 3464 } 3465 3466 static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, 3467 ktime_t end, bool success) 3468 { 3469 struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic; 3470 u64 poll_ns = ktime_to_ns(ktime_sub(end, start)); 3471 3472 ++vcpu->stat.generic.halt_attempted_poll; 3473 3474 if (success) { 3475 ++vcpu->stat.generic.halt_successful_poll; 3476 3477 if (!vcpu_valid_wakeup(vcpu)) 3478 ++vcpu->stat.generic.halt_poll_invalid; 3479 3480 stats->halt_poll_success_ns += poll_ns; 3481 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns); 3482 } else { 3483 stats->halt_poll_fail_ns += poll_ns; 3484 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns); 3485 } 3486 } 3487 3488 /* 3489 * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc... If halt 3490 * polling is enabled, busy wait for a short time before blocking to avoid the 3491 * expensive block+unblock sequence if a wake event arrives soon after the vCPU 3492 * is halted. 3493 */ 3494 void kvm_vcpu_halt(struct kvm_vcpu *vcpu) 3495 { 3496 bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); 3497 bool do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns; 3498 ktime_t start, cur, poll_end; 3499 bool waited = false; 3500 u64 halt_ns; 3501 3502 start = cur = poll_end = ktime_get(); 3503 if (do_halt_poll) { 3504 ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns); 3505 3506 do { 3507 /* 3508 * This sets KVM_REQ_UNHALT if an interrupt 3509 * arrives. 3510 */ 3511 if (kvm_vcpu_check_block(vcpu) < 0) 3512 goto out; 3513 cpu_relax(); 3514 poll_end = cur = ktime_get(); 3515 } while (kvm_vcpu_can_poll(cur, stop)); 3516 } 3517 3518 waited = kvm_vcpu_block(vcpu); 3519 3520 cur = ktime_get(); 3521 if (waited) { 3522 vcpu->stat.generic.halt_wait_ns += 3523 ktime_to_ns(cur) - ktime_to_ns(poll_end); 3524 KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist, 3525 ktime_to_ns(cur) - ktime_to_ns(poll_end)); 3526 } 3527 out: 3528 /* The total time the vCPU was "halted", including polling time. */ 3529 halt_ns = ktime_to_ns(cur) - ktime_to_ns(start); 3530 3531 /* 3532 * Note, halt-polling is considered successful so long as the vCPU was 3533 * never actually scheduled out, i.e. even if the wake event arrived 3534 * after of the halt-polling loop itself, but before the full wait. 3535 */ 3536 if (do_halt_poll) 3537 update_halt_poll_stats(vcpu, start, poll_end, !waited); 3538 3539 if (halt_poll_allowed) { 3540 if (!vcpu_valid_wakeup(vcpu)) { 3541 shrink_halt_poll_ns(vcpu); 3542 } else if (vcpu->kvm->max_halt_poll_ns) { 3543 if (halt_ns <= vcpu->halt_poll_ns) 3544 ; 3545 /* we had a long block, shrink polling */ 3546 else if (vcpu->halt_poll_ns && 3547 halt_ns > vcpu->kvm->max_halt_poll_ns) 3548 shrink_halt_poll_ns(vcpu); 3549 /* we had a short halt and our poll time is too small */ 3550 else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns && 3551 halt_ns < vcpu->kvm->max_halt_poll_ns) 3552 grow_halt_poll_ns(vcpu); 3553 } else { 3554 vcpu->halt_poll_ns = 0; 3555 } 3556 } 3557 3558 trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu)); 3559 } 3560 EXPORT_SYMBOL_GPL(kvm_vcpu_halt); 3561 3562 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) 3563 { 3564 if (__kvm_vcpu_wake_up(vcpu)) { 3565 WRITE_ONCE(vcpu->ready, true); 3566 ++vcpu->stat.generic.halt_wakeup; 3567 return true; 3568 } 3569 3570 return false; 3571 } 3572 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up); 3573 3574 #ifndef CONFIG_S390 3575 /* 3576 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. 3577 */ 3578 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 3579 { 3580 int me, cpu; 3581 3582 if (kvm_vcpu_wake_up(vcpu)) 3583 return; 3584 3585 me = get_cpu(); 3586 /* 3587 * The only state change done outside the vcpu mutex is IN_GUEST_MODE 3588 * to EXITING_GUEST_MODE. Therefore the moderately expensive "should 3589 * kick" check does not need atomic operations if kvm_vcpu_kick is used 3590 * within the vCPU thread itself. 3591 */ 3592 if (vcpu == __this_cpu_read(kvm_running_vcpu)) { 3593 if (vcpu->mode == IN_GUEST_MODE) 3594 WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE); 3595 goto out; 3596 } 3597 3598 /* 3599 * Note, the vCPU could get migrated to a different pCPU at any point 3600 * after kvm_arch_vcpu_should_kick(), which could result in sending an 3601 * IPI to the previous pCPU. But, that's ok because the purpose of the 3602 * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the 3603 * vCPU also requires it to leave IN_GUEST_MODE. 3604 */ 3605 if (kvm_arch_vcpu_should_kick(vcpu)) { 3606 cpu = READ_ONCE(vcpu->cpu); 3607 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) 3608 smp_send_reschedule(cpu); 3609 } 3610 out: 3611 put_cpu(); 3612 } 3613 EXPORT_SYMBOL_GPL(kvm_vcpu_kick); 3614 #endif /* !CONFIG_S390 */ 3615 3616 int kvm_vcpu_yield_to(struct kvm_vcpu *target) 3617 { 3618 struct pid *pid; 3619 struct task_struct *task = NULL; 3620 int ret = 0; 3621 3622 rcu_read_lock(); 3623 pid = rcu_dereference(target->pid); 3624 if (pid) 3625 task = get_pid_task(pid, PIDTYPE_PID); 3626 rcu_read_unlock(); 3627 if (!task) 3628 return ret; 3629 ret = yield_to(task, 1); 3630 put_task_struct(task); 3631 3632 return ret; 3633 } 3634 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); 3635 3636 /* 3637 * Helper that checks whether a VCPU is eligible for directed yield. 3638 * Most eligible candidate to yield is decided by following heuristics: 3639 * 3640 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently 3641 * (preempted lock holder), indicated by @in_spin_loop. 3642 * Set at the beginning and cleared at the end of interception/PLE handler. 3643 * 3644 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get 3645 * chance last time (mostly it has become eligible now since we have probably 3646 * yielded to lockholder in last iteration. This is done by toggling 3647 * @dy_eligible each time a VCPU checked for eligibility.) 3648 * 3649 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding 3650 * to preempted lock-holder could result in wrong VCPU selection and CPU 3651 * burning. Giving priority for a potential lock-holder increases lock 3652 * progress. 3653 * 3654 * Since algorithm is based on heuristics, accessing another VCPU data without 3655 * locking does not harm. It may result in trying to yield to same VCPU, fail 3656 * and continue with next VCPU and so on. 3657 */ 3658 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) 3659 { 3660 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 3661 bool eligible; 3662 3663 eligible = !vcpu->spin_loop.in_spin_loop || 3664 vcpu->spin_loop.dy_eligible; 3665 3666 if (vcpu->spin_loop.in_spin_loop) 3667 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); 3668 3669 return eligible; 3670 #else 3671 return true; 3672 #endif 3673 } 3674 3675 /* 3676 * Unlike kvm_arch_vcpu_runnable, this function is called outside 3677 * a vcpu_load/vcpu_put pair. However, for most architectures 3678 * kvm_arch_vcpu_runnable does not require vcpu_load. 3679 */ 3680 bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) 3681 { 3682 return kvm_arch_vcpu_runnable(vcpu); 3683 } 3684 3685 static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu) 3686 { 3687 if (kvm_arch_dy_runnable(vcpu)) 3688 return true; 3689 3690 #ifdef CONFIG_KVM_ASYNC_PF 3691 if (!list_empty_careful(&vcpu->async_pf.done)) 3692 return true; 3693 #endif 3694 3695 return false; 3696 } 3697 3698 bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) 3699 { 3700 return false; 3701 } 3702 3703 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) 3704 { 3705 struct kvm *kvm = me->kvm; 3706 struct kvm_vcpu *vcpu; 3707 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 3708 unsigned long i; 3709 int yielded = 0; 3710 int try = 3; 3711 int pass; 3712 3713 kvm_vcpu_set_in_spin_loop(me, true); 3714 /* 3715 * We boost the priority of a VCPU that is runnable but not 3716 * currently running, because it got preempted by something 3717 * else and called schedule in __vcpu_run. Hopefully that 3718 * VCPU is holding the lock that we need and will release it. 3719 * We approximate round-robin by starting at the last boosted VCPU. 3720 */ 3721 for (pass = 0; pass < 2 && !yielded && try; pass++) { 3722 kvm_for_each_vcpu(i, vcpu, kvm) { 3723 if (!pass && i <= last_boosted_vcpu) { 3724 i = last_boosted_vcpu; 3725 continue; 3726 } else if (pass && i > last_boosted_vcpu) 3727 break; 3728 if (!READ_ONCE(vcpu->ready)) 3729 continue; 3730 if (vcpu == me) 3731 continue; 3732 if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu)) 3733 continue; 3734 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && 3735 !kvm_arch_dy_has_pending_interrupt(vcpu) && 3736 !kvm_arch_vcpu_in_kernel(vcpu)) 3737 continue; 3738 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 3739 continue; 3740 3741 yielded = kvm_vcpu_yield_to(vcpu); 3742 if (yielded > 0) { 3743 kvm->last_boosted_vcpu = i; 3744 break; 3745 } else if (yielded < 0) { 3746 try--; 3747 if (!try) 3748 break; 3749 } 3750 } 3751 } 3752 kvm_vcpu_set_in_spin_loop(me, false); 3753 3754 /* Ensure vcpu is not eligible during next spinloop */ 3755 kvm_vcpu_set_dy_eligible(me, false); 3756 } 3757 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 3758 3759 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff) 3760 { 3761 #ifdef CONFIG_HAVE_KVM_DIRTY_RING 3762 return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) && 3763 (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET + 3764 kvm->dirty_ring_size / PAGE_SIZE); 3765 #else 3766 return false; 3767 #endif 3768 } 3769 3770 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf) 3771 { 3772 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data; 3773 struct page *page; 3774 3775 if (vmf->pgoff == 0) 3776 page = virt_to_page(vcpu->run); 3777 #ifdef CONFIG_X86 3778 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 3779 page = virt_to_page(vcpu->arch.pio_data); 3780 #endif 3781 #ifdef CONFIG_KVM_MMIO 3782 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 3783 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 3784 #endif 3785 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff)) 3786 page = kvm_dirty_ring_get_page( 3787 &vcpu->dirty_ring, 3788 vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET); 3789 else 3790 return kvm_arch_vcpu_fault(vcpu, vmf); 3791 get_page(page); 3792 vmf->page = page; 3793 return 0; 3794 } 3795 3796 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 3797 .fault = kvm_vcpu_fault, 3798 }; 3799 3800 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 3801 { 3802 struct kvm_vcpu *vcpu = file->private_data; 3803 unsigned long pages = vma_pages(vma); 3804 3805 if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) || 3806 kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) && 3807 ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED))) 3808 return -EINVAL; 3809 3810 vma->vm_ops = &kvm_vcpu_vm_ops; 3811 return 0; 3812 } 3813 3814 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 3815 { 3816 struct kvm_vcpu *vcpu = filp->private_data; 3817 3818 kvm_put_kvm(vcpu->kvm); 3819 return 0; 3820 } 3821 3822 static const struct file_operations kvm_vcpu_fops = { 3823 .release = kvm_vcpu_release, 3824 .unlocked_ioctl = kvm_vcpu_ioctl, 3825 .mmap = kvm_vcpu_mmap, 3826 .llseek = noop_llseek, 3827 KVM_COMPAT(kvm_vcpu_compat_ioctl), 3828 }; 3829 3830 /* 3831 * Allocates an inode for the vcpu. 3832 */ 3833 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 3834 { 3835 char name[8 + 1 + ITOA_MAX_LEN + 1]; 3836 3837 snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id); 3838 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); 3839 } 3840 3841 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS 3842 static int vcpu_get_pid(void *data, u64 *val) 3843 { 3844 struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data; 3845 *val = pid_nr(rcu_access_pointer(vcpu->pid)); 3846 return 0; 3847 } 3848 3849 DEFINE_SIMPLE_ATTRIBUTE(vcpu_get_pid_fops, vcpu_get_pid, NULL, "%llu\n"); 3850 3851 static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) 3852 { 3853 struct dentry *debugfs_dentry; 3854 char dir_name[ITOA_MAX_LEN * 2]; 3855 3856 if (!debugfs_initialized()) 3857 return; 3858 3859 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id); 3860 debugfs_dentry = debugfs_create_dir(dir_name, 3861 vcpu->kvm->debugfs_dentry); 3862 debugfs_create_file("pid", 0444, debugfs_dentry, vcpu, 3863 &vcpu_get_pid_fops); 3864 3865 kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry); 3866 } 3867 #endif 3868 3869 /* 3870 * Creates some virtual cpus. Good luck creating more than one. 3871 */ 3872 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 3873 { 3874 int r; 3875 struct kvm_vcpu *vcpu; 3876 struct page *page; 3877 3878 if (id >= KVM_MAX_VCPU_IDS) 3879 return -EINVAL; 3880 3881 mutex_lock(&kvm->lock); 3882 if (kvm->created_vcpus >= kvm->max_vcpus) { 3883 mutex_unlock(&kvm->lock); 3884 return -EINVAL; 3885 } 3886 3887 r = kvm_arch_vcpu_precreate(kvm, id); 3888 if (r) { 3889 mutex_unlock(&kvm->lock); 3890 return r; 3891 } 3892 3893 kvm->created_vcpus++; 3894 mutex_unlock(&kvm->lock); 3895 3896 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT); 3897 if (!vcpu) { 3898 r = -ENOMEM; 3899 goto vcpu_decrement; 3900 } 3901 3902 BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE); 3903 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 3904 if (!page) { 3905 r = -ENOMEM; 3906 goto vcpu_free; 3907 } 3908 vcpu->run = page_address(page); 3909 3910 kvm_vcpu_init(vcpu, kvm, id); 3911 3912 r = kvm_arch_vcpu_create(vcpu); 3913 if (r) 3914 goto vcpu_free_run_page; 3915 3916 if (kvm->dirty_ring_size) { 3917 r = kvm_dirty_ring_alloc(&vcpu->dirty_ring, 3918 id, kvm->dirty_ring_size); 3919 if (r) 3920 goto arch_vcpu_destroy; 3921 } 3922 3923 mutex_lock(&kvm->lock); 3924 if (kvm_get_vcpu_by_id(kvm, id)) { 3925 r = -EEXIST; 3926 goto unlock_vcpu_destroy; 3927 } 3928 3929 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); 3930 r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT); 3931 BUG_ON(r == -EBUSY); 3932 if (r) 3933 goto unlock_vcpu_destroy; 3934 3935 /* Now it's all set up, let userspace reach it */ 3936 kvm_get_kvm(kvm); 3937 r = create_vcpu_fd(vcpu); 3938 if (r < 0) { 3939 xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx); 3940 kvm_put_kvm_no_destroy(kvm); 3941 goto unlock_vcpu_destroy; 3942 } 3943 3944 /* 3945 * Pairs with smp_rmb() in kvm_get_vcpu. Store the vcpu 3946 * pointer before kvm->online_vcpu's incremented value. 3947 */ 3948 smp_wmb(); 3949 atomic_inc(&kvm->online_vcpus); 3950 3951 mutex_unlock(&kvm->lock); 3952 kvm_arch_vcpu_postcreate(vcpu); 3953 kvm_create_vcpu_debugfs(vcpu); 3954 return r; 3955 3956 unlock_vcpu_destroy: 3957 mutex_unlock(&kvm->lock); 3958 kvm_dirty_ring_free(&vcpu->dirty_ring); 3959 arch_vcpu_destroy: 3960 kvm_arch_vcpu_destroy(vcpu); 3961 vcpu_free_run_page: 3962 free_page((unsigned long)vcpu->run); 3963 vcpu_free: 3964 kmem_cache_free(kvm_vcpu_cache, vcpu); 3965 vcpu_decrement: 3966 mutex_lock(&kvm->lock); 3967 kvm->created_vcpus--; 3968 mutex_unlock(&kvm->lock); 3969 return r; 3970 } 3971 3972 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 3973 { 3974 if (sigset) { 3975 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 3976 vcpu->sigset_active = 1; 3977 vcpu->sigset = *sigset; 3978 } else 3979 vcpu->sigset_active = 0; 3980 return 0; 3981 } 3982 3983 static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer, 3984 size_t size, loff_t *offset) 3985 { 3986 struct kvm_vcpu *vcpu = file->private_data; 3987 3988 return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header, 3989 &kvm_vcpu_stats_desc[0], &vcpu->stat, 3990 sizeof(vcpu->stat), user_buffer, size, offset); 3991 } 3992 3993 static const struct file_operations kvm_vcpu_stats_fops = { 3994 .read = kvm_vcpu_stats_read, 3995 .llseek = noop_llseek, 3996 }; 3997 3998 static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu) 3999 { 4000 int fd; 4001 struct file *file; 4002 char name[15 + ITOA_MAX_LEN + 1]; 4003 4004 snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id); 4005 4006 fd = get_unused_fd_flags(O_CLOEXEC); 4007 if (fd < 0) 4008 return fd; 4009 4010 file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY); 4011 if (IS_ERR(file)) { 4012 put_unused_fd(fd); 4013 return PTR_ERR(file); 4014 } 4015 file->f_mode |= FMODE_PREAD; 4016 fd_install(fd, file); 4017 4018 return fd; 4019 } 4020 4021 static long kvm_vcpu_ioctl(struct file *filp, 4022 unsigned int ioctl, unsigned long arg) 4023 { 4024 struct kvm_vcpu *vcpu = filp->private_data; 4025 void __user *argp = (void __user *)arg; 4026 int r; 4027 struct kvm_fpu *fpu = NULL; 4028 struct kvm_sregs *kvm_sregs = NULL; 4029 4030 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead) 4031 return -EIO; 4032 4033 if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) 4034 return -EINVAL; 4035 4036 /* 4037 * Some architectures have vcpu ioctls that are asynchronous to vcpu 4038 * execution; mutex_lock() would break them. 4039 */ 4040 r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg); 4041 if (r != -ENOIOCTLCMD) 4042 return r; 4043 4044 if (mutex_lock_killable(&vcpu->mutex)) 4045 return -EINTR; 4046 switch (ioctl) { 4047 case KVM_RUN: { 4048 struct pid *oldpid; 4049 r = -EINVAL; 4050 if (arg) 4051 goto out; 4052 oldpid = rcu_access_pointer(vcpu->pid); 4053 if (unlikely(oldpid != task_pid(current))) { 4054 /* The thread running this VCPU changed. */ 4055 struct pid *newpid; 4056 4057 r = kvm_arch_vcpu_run_pid_change(vcpu); 4058 if (r) 4059 break; 4060 4061 newpid = get_task_pid(current, PIDTYPE_PID); 4062 rcu_assign_pointer(vcpu->pid, newpid); 4063 if (oldpid) 4064 synchronize_rcu(); 4065 put_pid(oldpid); 4066 } 4067 r = kvm_arch_vcpu_ioctl_run(vcpu); 4068 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 4069 break; 4070 } 4071 case KVM_GET_REGS: { 4072 struct kvm_regs *kvm_regs; 4073 4074 r = -ENOMEM; 4075 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT); 4076 if (!kvm_regs) 4077 goto out; 4078 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 4079 if (r) 4080 goto out_free1; 4081 r = -EFAULT; 4082 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 4083 goto out_free1; 4084 r = 0; 4085 out_free1: 4086 kfree(kvm_regs); 4087 break; 4088 } 4089 case KVM_SET_REGS: { 4090 struct kvm_regs *kvm_regs; 4091 4092 kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); 4093 if (IS_ERR(kvm_regs)) { 4094 r = PTR_ERR(kvm_regs); 4095 goto out; 4096 } 4097 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 4098 kfree(kvm_regs); 4099 break; 4100 } 4101 case KVM_GET_SREGS: { 4102 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), 4103 GFP_KERNEL_ACCOUNT); 4104 r = -ENOMEM; 4105 if (!kvm_sregs) 4106 goto out; 4107 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 4108 if (r) 4109 goto out; 4110 r = -EFAULT; 4111 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 4112 goto out; 4113 r = 0; 4114 break; 4115 } 4116 case KVM_SET_SREGS: { 4117 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); 4118 if (IS_ERR(kvm_sregs)) { 4119 r = PTR_ERR(kvm_sregs); 4120 kvm_sregs = NULL; 4121 goto out; 4122 } 4123 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 4124 break; 4125 } 4126 case KVM_GET_MP_STATE: { 4127 struct kvm_mp_state mp_state; 4128 4129 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 4130 if (r) 4131 goto out; 4132 r = -EFAULT; 4133 if (copy_to_user(argp, &mp_state, sizeof(mp_state))) 4134 goto out; 4135 r = 0; 4136 break; 4137 } 4138 case KVM_SET_MP_STATE: { 4139 struct kvm_mp_state mp_state; 4140 4141 r = -EFAULT; 4142 if (copy_from_user(&mp_state, argp, sizeof(mp_state))) 4143 goto out; 4144 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 4145 break; 4146 } 4147 case KVM_TRANSLATE: { 4148 struct kvm_translation tr; 4149 4150 r = -EFAULT; 4151 if (copy_from_user(&tr, argp, sizeof(tr))) 4152 goto out; 4153 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 4154 if (r) 4155 goto out; 4156 r = -EFAULT; 4157 if (copy_to_user(argp, &tr, sizeof(tr))) 4158 goto out; 4159 r = 0; 4160 break; 4161 } 4162 case KVM_SET_GUEST_DEBUG: { 4163 struct kvm_guest_debug dbg; 4164 4165 r = -EFAULT; 4166 if (copy_from_user(&dbg, argp, sizeof(dbg))) 4167 goto out; 4168 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 4169 break; 4170 } 4171 case KVM_SET_SIGNAL_MASK: { 4172 struct kvm_signal_mask __user *sigmask_arg = argp; 4173 struct kvm_signal_mask kvm_sigmask; 4174 sigset_t sigset, *p; 4175 4176 p = NULL; 4177 if (argp) { 4178 r = -EFAULT; 4179 if (copy_from_user(&kvm_sigmask, argp, 4180 sizeof(kvm_sigmask))) 4181 goto out; 4182 r = -EINVAL; 4183 if (kvm_sigmask.len != sizeof(sigset)) 4184 goto out; 4185 r = -EFAULT; 4186 if (copy_from_user(&sigset, sigmask_arg->sigset, 4187 sizeof(sigset))) 4188 goto out; 4189 p = &sigset; 4190 } 4191 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 4192 break; 4193 } 4194 case KVM_GET_FPU: { 4195 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT); 4196 r = -ENOMEM; 4197 if (!fpu) 4198 goto out; 4199 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 4200 if (r) 4201 goto out; 4202 r = -EFAULT; 4203 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 4204 goto out; 4205 r = 0; 4206 break; 4207 } 4208 case KVM_SET_FPU: { 4209 fpu = memdup_user(argp, sizeof(*fpu)); 4210 if (IS_ERR(fpu)) { 4211 r = PTR_ERR(fpu); 4212 fpu = NULL; 4213 goto out; 4214 } 4215 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 4216 break; 4217 } 4218 case KVM_GET_STATS_FD: { 4219 r = kvm_vcpu_ioctl_get_stats_fd(vcpu); 4220 break; 4221 } 4222 default: 4223 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 4224 } 4225 out: 4226 mutex_unlock(&vcpu->mutex); 4227 kfree(fpu); 4228 kfree(kvm_sregs); 4229 return r; 4230 } 4231 4232 #ifdef CONFIG_KVM_COMPAT 4233 static long kvm_vcpu_compat_ioctl(struct file *filp, 4234 unsigned int ioctl, unsigned long arg) 4235 { 4236 struct kvm_vcpu *vcpu = filp->private_data; 4237 void __user *argp = compat_ptr(arg); 4238 int r; 4239 4240 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead) 4241 return -EIO; 4242 4243 switch (ioctl) { 4244 case KVM_SET_SIGNAL_MASK: { 4245 struct kvm_signal_mask __user *sigmask_arg = argp; 4246 struct kvm_signal_mask kvm_sigmask; 4247 sigset_t sigset; 4248 4249 if (argp) { 4250 r = -EFAULT; 4251 if (copy_from_user(&kvm_sigmask, argp, 4252 sizeof(kvm_sigmask))) 4253 goto out; 4254 r = -EINVAL; 4255 if (kvm_sigmask.len != sizeof(compat_sigset_t)) 4256 goto out; 4257 r = -EFAULT; 4258 if (get_compat_sigset(&sigset, 4259 (compat_sigset_t __user *)sigmask_arg->sigset)) 4260 goto out; 4261 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 4262 } else 4263 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); 4264 break; 4265 } 4266 default: 4267 r = kvm_vcpu_ioctl(filp, ioctl, arg); 4268 } 4269 4270 out: 4271 return r; 4272 } 4273 #endif 4274 4275 static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma) 4276 { 4277 struct kvm_device *dev = filp->private_data; 4278 4279 if (dev->ops->mmap) 4280 return dev->ops->mmap(dev, vma); 4281 4282 return -ENODEV; 4283 } 4284 4285 static int kvm_device_ioctl_attr(struct kvm_device *dev, 4286 int (*accessor)(struct kvm_device *dev, 4287 struct kvm_device_attr *attr), 4288 unsigned long arg) 4289 { 4290 struct kvm_device_attr attr; 4291 4292 if (!accessor) 4293 return -EPERM; 4294 4295 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 4296 return -EFAULT; 4297 4298 return accessor(dev, &attr); 4299 } 4300 4301 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, 4302 unsigned long arg) 4303 { 4304 struct kvm_device *dev = filp->private_data; 4305 4306 if (dev->kvm->mm != current->mm || dev->kvm->vm_dead) 4307 return -EIO; 4308 4309 switch (ioctl) { 4310 case KVM_SET_DEVICE_ATTR: 4311 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); 4312 case KVM_GET_DEVICE_ATTR: 4313 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); 4314 case KVM_HAS_DEVICE_ATTR: 4315 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); 4316 default: 4317 if (dev->ops->ioctl) 4318 return dev->ops->ioctl(dev, ioctl, arg); 4319 4320 return -ENOTTY; 4321 } 4322 } 4323 4324 static int kvm_device_release(struct inode *inode, struct file *filp) 4325 { 4326 struct kvm_device *dev = filp->private_data; 4327 struct kvm *kvm = dev->kvm; 4328 4329 if (dev->ops->release) { 4330 mutex_lock(&kvm->lock); 4331 list_del(&dev->vm_node); 4332 dev->ops->release(dev); 4333 mutex_unlock(&kvm->lock); 4334 } 4335 4336 kvm_put_kvm(kvm); 4337 return 0; 4338 } 4339 4340 static const struct file_operations kvm_device_fops = { 4341 .unlocked_ioctl = kvm_device_ioctl, 4342 .release = kvm_device_release, 4343 KVM_COMPAT(kvm_device_ioctl), 4344 .mmap = kvm_device_mmap, 4345 }; 4346 4347 struct kvm_device *kvm_device_from_filp(struct file *filp) 4348 { 4349 if (filp->f_op != &kvm_device_fops) 4350 return NULL; 4351 4352 return filp->private_data; 4353 } 4354 4355 static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { 4356 #ifdef CONFIG_KVM_MPIC 4357 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, 4358 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, 4359 #endif 4360 }; 4361 4362 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type) 4363 { 4364 if (type >= ARRAY_SIZE(kvm_device_ops_table)) 4365 return -ENOSPC; 4366 4367 if (kvm_device_ops_table[type] != NULL) 4368 return -EEXIST; 4369 4370 kvm_device_ops_table[type] = ops; 4371 return 0; 4372 } 4373 4374 void kvm_unregister_device_ops(u32 type) 4375 { 4376 if (kvm_device_ops_table[type] != NULL) 4377 kvm_device_ops_table[type] = NULL; 4378 } 4379 4380 static int kvm_ioctl_create_device(struct kvm *kvm, 4381 struct kvm_create_device *cd) 4382 { 4383 const struct kvm_device_ops *ops; 4384 struct kvm_device *dev; 4385 bool test = cd->flags & KVM_CREATE_DEVICE_TEST; 4386 int type; 4387 int ret; 4388 4389 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) 4390 return -ENODEV; 4391 4392 type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table)); 4393 ops = kvm_device_ops_table[type]; 4394 if (ops == NULL) 4395 return -ENODEV; 4396 4397 if (test) 4398 return 0; 4399 4400 dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT); 4401 if (!dev) 4402 return -ENOMEM; 4403 4404 dev->ops = ops; 4405 dev->kvm = kvm; 4406 4407 mutex_lock(&kvm->lock); 4408 ret = ops->create(dev, type); 4409 if (ret < 0) { 4410 mutex_unlock(&kvm->lock); 4411 kfree(dev); 4412 return ret; 4413 } 4414 list_add(&dev->vm_node, &kvm->devices); 4415 mutex_unlock(&kvm->lock); 4416 4417 if (ops->init) 4418 ops->init(dev); 4419 4420 kvm_get_kvm(kvm); 4421 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 4422 if (ret < 0) { 4423 kvm_put_kvm_no_destroy(kvm); 4424 mutex_lock(&kvm->lock); 4425 list_del(&dev->vm_node); 4426 if (ops->release) 4427 ops->release(dev); 4428 mutex_unlock(&kvm->lock); 4429 if (ops->destroy) 4430 ops->destroy(dev); 4431 return ret; 4432 } 4433 4434 cd->fd = ret; 4435 return 0; 4436 } 4437 4438 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) 4439 { 4440 switch (arg) { 4441 case KVM_CAP_USER_MEMORY: 4442 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 4443 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 4444 case KVM_CAP_INTERNAL_ERROR_DATA: 4445 #ifdef CONFIG_HAVE_KVM_MSI 4446 case KVM_CAP_SIGNAL_MSI: 4447 #endif 4448 #ifdef CONFIG_HAVE_KVM_IRQFD 4449 case KVM_CAP_IRQFD: 4450 case KVM_CAP_IRQFD_RESAMPLE: 4451 #endif 4452 case KVM_CAP_IOEVENTFD_ANY_LENGTH: 4453 case KVM_CAP_CHECK_EXTENSION_VM: 4454 case KVM_CAP_ENABLE_CAP_VM: 4455 case KVM_CAP_HALT_POLL: 4456 return 1; 4457 #ifdef CONFIG_KVM_MMIO 4458 case KVM_CAP_COALESCED_MMIO: 4459 return KVM_COALESCED_MMIO_PAGE_OFFSET; 4460 case KVM_CAP_COALESCED_PIO: 4461 return 1; 4462 #endif 4463 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4464 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: 4465 return KVM_DIRTY_LOG_MANUAL_CAPS; 4466 #endif 4467 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 4468 case KVM_CAP_IRQ_ROUTING: 4469 return KVM_MAX_IRQ_ROUTES; 4470 #endif 4471 #if KVM_ADDRESS_SPACE_NUM > 1 4472 case KVM_CAP_MULTI_ADDRESS_SPACE: 4473 return KVM_ADDRESS_SPACE_NUM; 4474 #endif 4475 case KVM_CAP_NR_MEMSLOTS: 4476 return KVM_USER_MEM_SLOTS; 4477 case KVM_CAP_DIRTY_LOG_RING: 4478 #ifdef CONFIG_HAVE_KVM_DIRTY_RING 4479 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn); 4480 #else 4481 return 0; 4482 #endif 4483 case KVM_CAP_BINARY_STATS_FD: 4484 case KVM_CAP_SYSTEM_EVENT_DATA: 4485 return 1; 4486 default: 4487 break; 4488 } 4489 return kvm_vm_ioctl_check_extension(kvm, arg); 4490 } 4491 4492 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size) 4493 { 4494 int r; 4495 4496 if (!KVM_DIRTY_LOG_PAGE_OFFSET) 4497 return -EINVAL; 4498 4499 /* the size should be power of 2 */ 4500 if (!size || (size & (size - 1))) 4501 return -EINVAL; 4502 4503 /* Should be bigger to keep the reserved entries, or a page */ 4504 if (size < kvm_dirty_ring_get_rsvd_entries() * 4505 sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE) 4506 return -EINVAL; 4507 4508 if (size > KVM_DIRTY_RING_MAX_ENTRIES * 4509 sizeof(struct kvm_dirty_gfn)) 4510 return -E2BIG; 4511 4512 /* We only allow it to set once */ 4513 if (kvm->dirty_ring_size) 4514 return -EINVAL; 4515 4516 mutex_lock(&kvm->lock); 4517 4518 if (kvm->created_vcpus) { 4519 /* We don't allow to change this value after vcpu created */ 4520 r = -EINVAL; 4521 } else { 4522 kvm->dirty_ring_size = size; 4523 r = 0; 4524 } 4525 4526 mutex_unlock(&kvm->lock); 4527 return r; 4528 } 4529 4530 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm) 4531 { 4532 unsigned long i; 4533 struct kvm_vcpu *vcpu; 4534 int cleared = 0; 4535 4536 if (!kvm->dirty_ring_size) 4537 return -EINVAL; 4538 4539 mutex_lock(&kvm->slots_lock); 4540 4541 kvm_for_each_vcpu(i, vcpu, kvm) 4542 cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring); 4543 4544 mutex_unlock(&kvm->slots_lock); 4545 4546 if (cleared) 4547 kvm_flush_remote_tlbs(kvm); 4548 4549 return cleared; 4550 } 4551 4552 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm, 4553 struct kvm_enable_cap *cap) 4554 { 4555 return -EINVAL; 4556 } 4557 4558 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, 4559 struct kvm_enable_cap *cap) 4560 { 4561 switch (cap->cap) { 4562 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4563 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: { 4564 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE; 4565 4566 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE) 4567 allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS; 4568 4569 if (cap->flags || (cap->args[0] & ~allowed_options)) 4570 return -EINVAL; 4571 kvm->manual_dirty_log_protect = cap->args[0]; 4572 return 0; 4573 } 4574 #endif 4575 case KVM_CAP_HALT_POLL: { 4576 if (cap->flags || cap->args[0] != (unsigned int)cap->args[0]) 4577 return -EINVAL; 4578 4579 kvm->max_halt_poll_ns = cap->args[0]; 4580 return 0; 4581 } 4582 case KVM_CAP_DIRTY_LOG_RING: 4583 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]); 4584 default: 4585 return kvm_vm_ioctl_enable_cap(kvm, cap); 4586 } 4587 } 4588 4589 static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer, 4590 size_t size, loff_t *offset) 4591 { 4592 struct kvm *kvm = file->private_data; 4593 4594 return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header, 4595 &kvm_vm_stats_desc[0], &kvm->stat, 4596 sizeof(kvm->stat), user_buffer, size, offset); 4597 } 4598 4599 static const struct file_operations kvm_vm_stats_fops = { 4600 .read = kvm_vm_stats_read, 4601 .llseek = noop_llseek, 4602 }; 4603 4604 static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm) 4605 { 4606 int fd; 4607 struct file *file; 4608 4609 fd = get_unused_fd_flags(O_CLOEXEC); 4610 if (fd < 0) 4611 return fd; 4612 4613 file = anon_inode_getfile("kvm-vm-stats", 4614 &kvm_vm_stats_fops, kvm, O_RDONLY); 4615 if (IS_ERR(file)) { 4616 put_unused_fd(fd); 4617 return PTR_ERR(file); 4618 } 4619 file->f_mode |= FMODE_PREAD; 4620 fd_install(fd, file); 4621 4622 return fd; 4623 } 4624 4625 static long kvm_vm_ioctl(struct file *filp, 4626 unsigned int ioctl, unsigned long arg) 4627 { 4628 struct kvm *kvm = filp->private_data; 4629 void __user *argp = (void __user *)arg; 4630 int r; 4631 4632 if (kvm->mm != current->mm || kvm->vm_dead) 4633 return -EIO; 4634 switch (ioctl) { 4635 case KVM_CREATE_VCPU: 4636 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 4637 break; 4638 case KVM_ENABLE_CAP: { 4639 struct kvm_enable_cap cap; 4640 4641 r = -EFAULT; 4642 if (copy_from_user(&cap, argp, sizeof(cap))) 4643 goto out; 4644 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap); 4645 break; 4646 } 4647 case KVM_SET_USER_MEMORY_REGION: { 4648 struct kvm_userspace_memory_region kvm_userspace_mem; 4649 4650 r = -EFAULT; 4651 if (copy_from_user(&kvm_userspace_mem, argp, 4652 sizeof(kvm_userspace_mem))) 4653 goto out; 4654 4655 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); 4656 break; 4657 } 4658 case KVM_GET_DIRTY_LOG: { 4659 struct kvm_dirty_log log; 4660 4661 r = -EFAULT; 4662 if (copy_from_user(&log, argp, sizeof(log))) 4663 goto out; 4664 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 4665 break; 4666 } 4667 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4668 case KVM_CLEAR_DIRTY_LOG: { 4669 struct kvm_clear_dirty_log log; 4670 4671 r = -EFAULT; 4672 if (copy_from_user(&log, argp, sizeof(log))) 4673 goto out; 4674 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); 4675 break; 4676 } 4677 #endif 4678 #ifdef CONFIG_KVM_MMIO 4679 case KVM_REGISTER_COALESCED_MMIO: { 4680 struct kvm_coalesced_mmio_zone zone; 4681 4682 r = -EFAULT; 4683 if (copy_from_user(&zone, argp, sizeof(zone))) 4684 goto out; 4685 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 4686 break; 4687 } 4688 case KVM_UNREGISTER_COALESCED_MMIO: { 4689 struct kvm_coalesced_mmio_zone zone; 4690 4691 r = -EFAULT; 4692 if (copy_from_user(&zone, argp, sizeof(zone))) 4693 goto out; 4694 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 4695 break; 4696 } 4697 #endif 4698 case KVM_IRQFD: { 4699 struct kvm_irqfd data; 4700 4701 r = -EFAULT; 4702 if (copy_from_user(&data, argp, sizeof(data))) 4703 goto out; 4704 r = kvm_irqfd(kvm, &data); 4705 break; 4706 } 4707 case KVM_IOEVENTFD: { 4708 struct kvm_ioeventfd data; 4709 4710 r = -EFAULT; 4711 if (copy_from_user(&data, argp, sizeof(data))) 4712 goto out; 4713 r = kvm_ioeventfd(kvm, &data); 4714 break; 4715 } 4716 #ifdef CONFIG_HAVE_KVM_MSI 4717 case KVM_SIGNAL_MSI: { 4718 struct kvm_msi msi; 4719 4720 r = -EFAULT; 4721 if (copy_from_user(&msi, argp, sizeof(msi))) 4722 goto out; 4723 r = kvm_send_userspace_msi(kvm, &msi); 4724 break; 4725 } 4726 #endif 4727 #ifdef __KVM_HAVE_IRQ_LINE 4728 case KVM_IRQ_LINE_STATUS: 4729 case KVM_IRQ_LINE: { 4730 struct kvm_irq_level irq_event; 4731 4732 r = -EFAULT; 4733 if (copy_from_user(&irq_event, argp, sizeof(irq_event))) 4734 goto out; 4735 4736 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, 4737 ioctl == KVM_IRQ_LINE_STATUS); 4738 if (r) 4739 goto out; 4740 4741 r = -EFAULT; 4742 if (ioctl == KVM_IRQ_LINE_STATUS) { 4743 if (copy_to_user(argp, &irq_event, sizeof(irq_event))) 4744 goto out; 4745 } 4746 4747 r = 0; 4748 break; 4749 } 4750 #endif 4751 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 4752 case KVM_SET_GSI_ROUTING: { 4753 struct kvm_irq_routing routing; 4754 struct kvm_irq_routing __user *urouting; 4755 struct kvm_irq_routing_entry *entries = NULL; 4756 4757 r = -EFAULT; 4758 if (copy_from_user(&routing, argp, sizeof(routing))) 4759 goto out; 4760 r = -EINVAL; 4761 if (!kvm_arch_can_set_irq_routing(kvm)) 4762 goto out; 4763 if (routing.nr > KVM_MAX_IRQ_ROUTES) 4764 goto out; 4765 if (routing.flags) 4766 goto out; 4767 if (routing.nr) { 4768 urouting = argp; 4769 entries = vmemdup_user(urouting->entries, 4770 array_size(sizeof(*entries), 4771 routing.nr)); 4772 if (IS_ERR(entries)) { 4773 r = PTR_ERR(entries); 4774 goto out; 4775 } 4776 } 4777 r = kvm_set_irq_routing(kvm, entries, routing.nr, 4778 routing.flags); 4779 kvfree(entries); 4780 break; 4781 } 4782 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ 4783 case KVM_CREATE_DEVICE: { 4784 struct kvm_create_device cd; 4785 4786 r = -EFAULT; 4787 if (copy_from_user(&cd, argp, sizeof(cd))) 4788 goto out; 4789 4790 r = kvm_ioctl_create_device(kvm, &cd); 4791 if (r) 4792 goto out; 4793 4794 r = -EFAULT; 4795 if (copy_to_user(argp, &cd, sizeof(cd))) 4796 goto out; 4797 4798 r = 0; 4799 break; 4800 } 4801 case KVM_CHECK_EXTENSION: 4802 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); 4803 break; 4804 case KVM_RESET_DIRTY_RINGS: 4805 r = kvm_vm_ioctl_reset_dirty_pages(kvm); 4806 break; 4807 case KVM_GET_STATS_FD: 4808 r = kvm_vm_ioctl_get_stats_fd(kvm); 4809 break; 4810 default: 4811 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 4812 } 4813 out: 4814 return r; 4815 } 4816 4817 #ifdef CONFIG_KVM_COMPAT 4818 struct compat_kvm_dirty_log { 4819 __u32 slot; 4820 __u32 padding1; 4821 union { 4822 compat_uptr_t dirty_bitmap; /* one bit per page */ 4823 __u64 padding2; 4824 }; 4825 }; 4826 4827 struct compat_kvm_clear_dirty_log { 4828 __u32 slot; 4829 __u32 num_pages; 4830 __u64 first_page; 4831 union { 4832 compat_uptr_t dirty_bitmap; /* one bit per page */ 4833 __u64 padding2; 4834 }; 4835 }; 4836 4837 static long kvm_vm_compat_ioctl(struct file *filp, 4838 unsigned int ioctl, unsigned long arg) 4839 { 4840 struct kvm *kvm = filp->private_data; 4841 int r; 4842 4843 if (kvm->mm != current->mm || kvm->vm_dead) 4844 return -EIO; 4845 switch (ioctl) { 4846 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4847 case KVM_CLEAR_DIRTY_LOG: { 4848 struct compat_kvm_clear_dirty_log compat_log; 4849 struct kvm_clear_dirty_log log; 4850 4851 if (copy_from_user(&compat_log, (void __user *)arg, 4852 sizeof(compat_log))) 4853 return -EFAULT; 4854 log.slot = compat_log.slot; 4855 log.num_pages = compat_log.num_pages; 4856 log.first_page = compat_log.first_page; 4857 log.padding2 = compat_log.padding2; 4858 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 4859 4860 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); 4861 break; 4862 } 4863 #endif 4864 case KVM_GET_DIRTY_LOG: { 4865 struct compat_kvm_dirty_log compat_log; 4866 struct kvm_dirty_log log; 4867 4868 if (copy_from_user(&compat_log, (void __user *)arg, 4869 sizeof(compat_log))) 4870 return -EFAULT; 4871 log.slot = compat_log.slot; 4872 log.padding1 = compat_log.padding1; 4873 log.padding2 = compat_log.padding2; 4874 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 4875 4876 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 4877 break; 4878 } 4879 default: 4880 r = kvm_vm_ioctl(filp, ioctl, arg); 4881 } 4882 return r; 4883 } 4884 #endif 4885 4886 static const struct file_operations kvm_vm_fops = { 4887 .release = kvm_vm_release, 4888 .unlocked_ioctl = kvm_vm_ioctl, 4889 .llseek = noop_llseek, 4890 KVM_COMPAT(kvm_vm_compat_ioctl), 4891 }; 4892 4893 bool file_is_kvm(struct file *file) 4894 { 4895 return file && file->f_op == &kvm_vm_fops; 4896 } 4897 EXPORT_SYMBOL_GPL(file_is_kvm); 4898 4899 static int kvm_dev_ioctl_create_vm(unsigned long type) 4900 { 4901 char fdname[ITOA_MAX_LEN + 1]; 4902 int r, fd; 4903 struct kvm *kvm; 4904 struct file *file; 4905 4906 fd = get_unused_fd_flags(O_CLOEXEC); 4907 if (fd < 0) 4908 return fd; 4909 4910 snprintf(fdname, sizeof(fdname), "%d", fd); 4911 4912 kvm = kvm_create_vm(type, fdname); 4913 if (IS_ERR(kvm)) { 4914 r = PTR_ERR(kvm); 4915 goto put_fd; 4916 } 4917 4918 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 4919 if (IS_ERR(file)) { 4920 r = PTR_ERR(file); 4921 goto put_kvm; 4922 } 4923 4924 /* 4925 * Don't call kvm_put_kvm anymore at this point; file->f_op is 4926 * already set, with ->release() being kvm_vm_release(). In error 4927 * cases it will be called by the final fput(file) and will take 4928 * care of doing kvm_put_kvm(kvm). 4929 */ 4930 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm); 4931 4932 fd_install(fd, file); 4933 return fd; 4934 4935 put_kvm: 4936 kvm_put_kvm(kvm); 4937 put_fd: 4938 put_unused_fd(fd); 4939 return r; 4940 } 4941 4942 static long kvm_dev_ioctl(struct file *filp, 4943 unsigned int ioctl, unsigned long arg) 4944 { 4945 long r = -EINVAL; 4946 4947 switch (ioctl) { 4948 case KVM_GET_API_VERSION: 4949 if (arg) 4950 goto out; 4951 r = KVM_API_VERSION; 4952 break; 4953 case KVM_CREATE_VM: 4954 r = kvm_dev_ioctl_create_vm(arg); 4955 break; 4956 case KVM_CHECK_EXTENSION: 4957 r = kvm_vm_ioctl_check_extension_generic(NULL, arg); 4958 break; 4959 case KVM_GET_VCPU_MMAP_SIZE: 4960 if (arg) 4961 goto out; 4962 r = PAGE_SIZE; /* struct kvm_run */ 4963 #ifdef CONFIG_X86 4964 r += PAGE_SIZE; /* pio data page */ 4965 #endif 4966 #ifdef CONFIG_KVM_MMIO 4967 r += PAGE_SIZE; /* coalesced mmio ring page */ 4968 #endif 4969 break; 4970 case KVM_TRACE_ENABLE: 4971 case KVM_TRACE_PAUSE: 4972 case KVM_TRACE_DISABLE: 4973 r = -EOPNOTSUPP; 4974 break; 4975 default: 4976 return kvm_arch_dev_ioctl(filp, ioctl, arg); 4977 } 4978 out: 4979 return r; 4980 } 4981 4982 static struct file_operations kvm_chardev_ops = { 4983 .unlocked_ioctl = kvm_dev_ioctl, 4984 .llseek = noop_llseek, 4985 KVM_COMPAT(kvm_dev_ioctl), 4986 }; 4987 4988 static struct miscdevice kvm_dev = { 4989 KVM_MINOR, 4990 "kvm", 4991 &kvm_chardev_ops, 4992 }; 4993 4994 static void hardware_enable_nolock(void *junk) 4995 { 4996 int cpu = raw_smp_processor_id(); 4997 int r; 4998 4999 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 5000 return; 5001 5002 cpumask_set_cpu(cpu, cpus_hardware_enabled); 5003 5004 r = kvm_arch_hardware_enable(); 5005 5006 if (r) { 5007 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 5008 atomic_inc(&hardware_enable_failed); 5009 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu); 5010 } 5011 } 5012 5013 static int kvm_starting_cpu(unsigned int cpu) 5014 { 5015 raw_spin_lock(&kvm_count_lock); 5016 if (kvm_usage_count) 5017 hardware_enable_nolock(NULL); 5018 raw_spin_unlock(&kvm_count_lock); 5019 return 0; 5020 } 5021 5022 static void hardware_disable_nolock(void *junk) 5023 { 5024 int cpu = raw_smp_processor_id(); 5025 5026 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 5027 return; 5028 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 5029 kvm_arch_hardware_disable(); 5030 } 5031 5032 static int kvm_dying_cpu(unsigned int cpu) 5033 { 5034 raw_spin_lock(&kvm_count_lock); 5035 if (kvm_usage_count) 5036 hardware_disable_nolock(NULL); 5037 raw_spin_unlock(&kvm_count_lock); 5038 return 0; 5039 } 5040 5041 static void hardware_disable_all_nolock(void) 5042 { 5043 BUG_ON(!kvm_usage_count); 5044 5045 kvm_usage_count--; 5046 if (!kvm_usage_count) 5047 on_each_cpu(hardware_disable_nolock, NULL, 1); 5048 } 5049 5050 static void hardware_disable_all(void) 5051 { 5052 raw_spin_lock(&kvm_count_lock); 5053 hardware_disable_all_nolock(); 5054 raw_spin_unlock(&kvm_count_lock); 5055 } 5056 5057 static int hardware_enable_all(void) 5058 { 5059 int r = 0; 5060 5061 raw_spin_lock(&kvm_count_lock); 5062 5063 kvm_usage_count++; 5064 if (kvm_usage_count == 1) { 5065 atomic_set(&hardware_enable_failed, 0); 5066 on_each_cpu(hardware_enable_nolock, NULL, 1); 5067 5068 if (atomic_read(&hardware_enable_failed)) { 5069 hardware_disable_all_nolock(); 5070 r = -EBUSY; 5071 } 5072 } 5073 5074 raw_spin_unlock(&kvm_count_lock); 5075 5076 return r; 5077 } 5078 5079 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 5080 void *v) 5081 { 5082 /* 5083 * Some (well, at least mine) BIOSes hang on reboot if 5084 * in vmx root mode. 5085 * 5086 * And Intel TXT required VMX off for all cpu when system shutdown. 5087 */ 5088 pr_info("kvm: exiting hardware virtualization\n"); 5089 kvm_rebooting = true; 5090 on_each_cpu(hardware_disable_nolock, NULL, 1); 5091 return NOTIFY_OK; 5092 } 5093 5094 static struct notifier_block kvm_reboot_notifier = { 5095 .notifier_call = kvm_reboot, 5096 .priority = 0, 5097 }; 5098 5099 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 5100 { 5101 int i; 5102 5103 for (i = 0; i < bus->dev_count; i++) { 5104 struct kvm_io_device *pos = bus->range[i].dev; 5105 5106 kvm_iodevice_destructor(pos); 5107 } 5108 kfree(bus); 5109 } 5110 5111 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, 5112 const struct kvm_io_range *r2) 5113 { 5114 gpa_t addr1 = r1->addr; 5115 gpa_t addr2 = r2->addr; 5116 5117 if (addr1 < addr2) 5118 return -1; 5119 5120 /* If r2->len == 0, match the exact address. If r2->len != 0, 5121 * accept any overlapping write. Any order is acceptable for 5122 * overlapping ranges, because kvm_io_bus_get_first_dev ensures 5123 * we process all of them. 5124 */ 5125 if (r2->len) { 5126 addr1 += r1->len; 5127 addr2 += r2->len; 5128 } 5129 5130 if (addr1 > addr2) 5131 return 1; 5132 5133 return 0; 5134 } 5135 5136 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) 5137 { 5138 return kvm_io_bus_cmp(p1, p2); 5139 } 5140 5141 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, 5142 gpa_t addr, int len) 5143 { 5144 struct kvm_io_range *range, key; 5145 int off; 5146 5147 key = (struct kvm_io_range) { 5148 .addr = addr, 5149 .len = len, 5150 }; 5151 5152 range = bsearch(&key, bus->range, bus->dev_count, 5153 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); 5154 if (range == NULL) 5155 return -ENOENT; 5156 5157 off = range - bus->range; 5158 5159 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) 5160 off--; 5161 5162 return off; 5163 } 5164 5165 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 5166 struct kvm_io_range *range, const void *val) 5167 { 5168 int idx; 5169 5170 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 5171 if (idx < 0) 5172 return -EOPNOTSUPP; 5173 5174 while (idx < bus->dev_count && 5175 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 5176 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, 5177 range->len, val)) 5178 return idx; 5179 idx++; 5180 } 5181 5182 return -EOPNOTSUPP; 5183 } 5184 5185 /* kvm_io_bus_write - called under kvm->slots_lock */ 5186 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 5187 int len, const void *val) 5188 { 5189 struct kvm_io_bus *bus; 5190 struct kvm_io_range range; 5191 int r; 5192 5193 range = (struct kvm_io_range) { 5194 .addr = addr, 5195 .len = len, 5196 }; 5197 5198 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5199 if (!bus) 5200 return -ENOMEM; 5201 r = __kvm_io_bus_write(vcpu, bus, &range, val); 5202 return r < 0 ? r : 0; 5203 } 5204 EXPORT_SYMBOL_GPL(kvm_io_bus_write); 5205 5206 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ 5207 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 5208 gpa_t addr, int len, const void *val, long cookie) 5209 { 5210 struct kvm_io_bus *bus; 5211 struct kvm_io_range range; 5212 5213 range = (struct kvm_io_range) { 5214 .addr = addr, 5215 .len = len, 5216 }; 5217 5218 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5219 if (!bus) 5220 return -ENOMEM; 5221 5222 /* First try the device referenced by cookie. */ 5223 if ((cookie >= 0) && (cookie < bus->dev_count) && 5224 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) 5225 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, 5226 val)) 5227 return cookie; 5228 5229 /* 5230 * cookie contained garbage; fall back to search and return the 5231 * correct cookie value. 5232 */ 5233 return __kvm_io_bus_write(vcpu, bus, &range, val); 5234 } 5235 5236 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 5237 struct kvm_io_range *range, void *val) 5238 { 5239 int idx; 5240 5241 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 5242 if (idx < 0) 5243 return -EOPNOTSUPP; 5244 5245 while (idx < bus->dev_count && 5246 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 5247 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, 5248 range->len, val)) 5249 return idx; 5250 idx++; 5251 } 5252 5253 return -EOPNOTSUPP; 5254 } 5255 5256 /* kvm_io_bus_read - called under kvm->slots_lock */ 5257 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 5258 int len, void *val) 5259 { 5260 struct kvm_io_bus *bus; 5261 struct kvm_io_range range; 5262 int r; 5263 5264 range = (struct kvm_io_range) { 5265 .addr = addr, 5266 .len = len, 5267 }; 5268 5269 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5270 if (!bus) 5271 return -ENOMEM; 5272 r = __kvm_io_bus_read(vcpu, bus, &range, val); 5273 return r < 0 ? r : 0; 5274 } 5275 5276 /* Caller must hold slots_lock. */ 5277 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 5278 int len, struct kvm_io_device *dev) 5279 { 5280 int i; 5281 struct kvm_io_bus *new_bus, *bus; 5282 struct kvm_io_range range; 5283 5284 bus = kvm_get_bus(kvm, bus_idx); 5285 if (!bus) 5286 return -ENOMEM; 5287 5288 /* exclude ioeventfd which is limited by maximum fd */ 5289 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 5290 return -ENOSPC; 5291 5292 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1), 5293 GFP_KERNEL_ACCOUNT); 5294 if (!new_bus) 5295 return -ENOMEM; 5296 5297 range = (struct kvm_io_range) { 5298 .addr = addr, 5299 .len = len, 5300 .dev = dev, 5301 }; 5302 5303 for (i = 0; i < bus->dev_count; i++) 5304 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0) 5305 break; 5306 5307 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 5308 new_bus->dev_count++; 5309 new_bus->range[i] = range; 5310 memcpy(new_bus->range + i + 1, bus->range + i, 5311 (bus->dev_count - i) * sizeof(struct kvm_io_range)); 5312 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 5313 synchronize_srcu_expedited(&kvm->srcu); 5314 kfree(bus); 5315 5316 return 0; 5317 } 5318 5319 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 5320 struct kvm_io_device *dev) 5321 { 5322 int i, j; 5323 struct kvm_io_bus *new_bus, *bus; 5324 5325 lockdep_assert_held(&kvm->slots_lock); 5326 5327 bus = kvm_get_bus(kvm, bus_idx); 5328 if (!bus) 5329 return 0; 5330 5331 for (i = 0; i < bus->dev_count; i++) { 5332 if (bus->range[i].dev == dev) { 5333 break; 5334 } 5335 } 5336 5337 if (i == bus->dev_count) 5338 return 0; 5339 5340 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1), 5341 GFP_KERNEL_ACCOUNT); 5342 if (new_bus) { 5343 memcpy(new_bus, bus, struct_size(bus, range, i)); 5344 new_bus->dev_count--; 5345 memcpy(new_bus->range + i, bus->range + i + 1, 5346 flex_array_size(new_bus, range, new_bus->dev_count - i)); 5347 } 5348 5349 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 5350 synchronize_srcu_expedited(&kvm->srcu); 5351 5352 /* Destroy the old bus _after_ installing the (null) bus. */ 5353 if (!new_bus) { 5354 pr_err("kvm: failed to shrink bus, removing it completely\n"); 5355 for (j = 0; j < bus->dev_count; j++) { 5356 if (j == i) 5357 continue; 5358 kvm_iodevice_destructor(bus->range[j].dev); 5359 } 5360 } 5361 5362 kfree(bus); 5363 return new_bus ? 0 : -ENOMEM; 5364 } 5365 5366 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 5367 gpa_t addr) 5368 { 5369 struct kvm_io_bus *bus; 5370 int dev_idx, srcu_idx; 5371 struct kvm_io_device *iodev = NULL; 5372 5373 srcu_idx = srcu_read_lock(&kvm->srcu); 5374 5375 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 5376 if (!bus) 5377 goto out_unlock; 5378 5379 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); 5380 if (dev_idx < 0) 5381 goto out_unlock; 5382 5383 iodev = bus->range[dev_idx].dev; 5384 5385 out_unlock: 5386 srcu_read_unlock(&kvm->srcu, srcu_idx); 5387 5388 return iodev; 5389 } 5390 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev); 5391 5392 static int kvm_debugfs_open(struct inode *inode, struct file *file, 5393 int (*get)(void *, u64 *), int (*set)(void *, u64), 5394 const char *fmt) 5395 { 5396 struct kvm_stat_data *stat_data = (struct kvm_stat_data *) 5397 inode->i_private; 5398 5399 /* 5400 * The debugfs files are a reference to the kvm struct which 5401 * is still valid when kvm_destroy_vm is called. kvm_get_kvm_safe 5402 * avoids the race between open and the removal of the debugfs directory. 5403 */ 5404 if (!kvm_get_kvm_safe(stat_data->kvm)) 5405 return -ENOENT; 5406 5407 if (simple_attr_open(inode, file, get, 5408 kvm_stats_debugfs_mode(stat_data->desc) & 0222 5409 ? set : NULL, 5410 fmt)) { 5411 kvm_put_kvm(stat_data->kvm); 5412 return -ENOMEM; 5413 } 5414 5415 return 0; 5416 } 5417 5418 static int kvm_debugfs_release(struct inode *inode, struct file *file) 5419 { 5420 struct kvm_stat_data *stat_data = (struct kvm_stat_data *) 5421 inode->i_private; 5422 5423 simple_attr_release(inode, file); 5424 kvm_put_kvm(stat_data->kvm); 5425 5426 return 0; 5427 } 5428 5429 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val) 5430 { 5431 *val = *(u64 *)((void *)(&kvm->stat) + offset); 5432 5433 return 0; 5434 } 5435 5436 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset) 5437 { 5438 *(u64 *)((void *)(&kvm->stat) + offset) = 0; 5439 5440 return 0; 5441 } 5442 5443 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) 5444 { 5445 unsigned long i; 5446 struct kvm_vcpu *vcpu; 5447 5448 *val = 0; 5449 5450 kvm_for_each_vcpu(i, vcpu, kvm) 5451 *val += *(u64 *)((void *)(&vcpu->stat) + offset); 5452 5453 return 0; 5454 } 5455 5456 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset) 5457 { 5458 unsigned long i; 5459 struct kvm_vcpu *vcpu; 5460 5461 kvm_for_each_vcpu(i, vcpu, kvm) 5462 *(u64 *)((void *)(&vcpu->stat) + offset) = 0; 5463 5464 return 0; 5465 } 5466 5467 static int kvm_stat_data_get(void *data, u64 *val) 5468 { 5469 int r = -EFAULT; 5470 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 5471 5472 switch (stat_data->kind) { 5473 case KVM_STAT_VM: 5474 r = kvm_get_stat_per_vm(stat_data->kvm, 5475 stat_data->desc->desc.offset, val); 5476 break; 5477 case KVM_STAT_VCPU: 5478 r = kvm_get_stat_per_vcpu(stat_data->kvm, 5479 stat_data->desc->desc.offset, val); 5480 break; 5481 } 5482 5483 return r; 5484 } 5485 5486 static int kvm_stat_data_clear(void *data, u64 val) 5487 { 5488 int r = -EFAULT; 5489 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 5490 5491 if (val) 5492 return -EINVAL; 5493 5494 switch (stat_data->kind) { 5495 case KVM_STAT_VM: 5496 r = kvm_clear_stat_per_vm(stat_data->kvm, 5497 stat_data->desc->desc.offset); 5498 break; 5499 case KVM_STAT_VCPU: 5500 r = kvm_clear_stat_per_vcpu(stat_data->kvm, 5501 stat_data->desc->desc.offset); 5502 break; 5503 } 5504 5505 return r; 5506 } 5507 5508 static int kvm_stat_data_open(struct inode *inode, struct file *file) 5509 { 5510 __simple_attr_check_format("%llu\n", 0ull); 5511 return kvm_debugfs_open(inode, file, kvm_stat_data_get, 5512 kvm_stat_data_clear, "%llu\n"); 5513 } 5514 5515 static const struct file_operations stat_fops_per_vm = { 5516 .owner = THIS_MODULE, 5517 .open = kvm_stat_data_open, 5518 .release = kvm_debugfs_release, 5519 .read = simple_attr_read, 5520 .write = simple_attr_write, 5521 .llseek = no_llseek, 5522 }; 5523 5524 static int vm_stat_get(void *_offset, u64 *val) 5525 { 5526 unsigned offset = (long)_offset; 5527 struct kvm *kvm; 5528 u64 tmp_val; 5529 5530 *val = 0; 5531 mutex_lock(&kvm_lock); 5532 list_for_each_entry(kvm, &vm_list, vm_list) { 5533 kvm_get_stat_per_vm(kvm, offset, &tmp_val); 5534 *val += tmp_val; 5535 } 5536 mutex_unlock(&kvm_lock); 5537 return 0; 5538 } 5539 5540 static int vm_stat_clear(void *_offset, u64 val) 5541 { 5542 unsigned offset = (long)_offset; 5543 struct kvm *kvm; 5544 5545 if (val) 5546 return -EINVAL; 5547 5548 mutex_lock(&kvm_lock); 5549 list_for_each_entry(kvm, &vm_list, vm_list) { 5550 kvm_clear_stat_per_vm(kvm, offset); 5551 } 5552 mutex_unlock(&kvm_lock); 5553 5554 return 0; 5555 } 5556 5557 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n"); 5558 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n"); 5559 5560 static int vcpu_stat_get(void *_offset, u64 *val) 5561 { 5562 unsigned offset = (long)_offset; 5563 struct kvm *kvm; 5564 u64 tmp_val; 5565 5566 *val = 0; 5567 mutex_lock(&kvm_lock); 5568 list_for_each_entry(kvm, &vm_list, vm_list) { 5569 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val); 5570 *val += tmp_val; 5571 } 5572 mutex_unlock(&kvm_lock); 5573 return 0; 5574 } 5575 5576 static int vcpu_stat_clear(void *_offset, u64 val) 5577 { 5578 unsigned offset = (long)_offset; 5579 struct kvm *kvm; 5580 5581 if (val) 5582 return -EINVAL; 5583 5584 mutex_lock(&kvm_lock); 5585 list_for_each_entry(kvm, &vm_list, vm_list) { 5586 kvm_clear_stat_per_vcpu(kvm, offset); 5587 } 5588 mutex_unlock(&kvm_lock); 5589 5590 return 0; 5591 } 5592 5593 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear, 5594 "%llu\n"); 5595 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n"); 5596 5597 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) 5598 { 5599 struct kobj_uevent_env *env; 5600 unsigned long long created, active; 5601 5602 if (!kvm_dev.this_device || !kvm) 5603 return; 5604 5605 mutex_lock(&kvm_lock); 5606 if (type == KVM_EVENT_CREATE_VM) { 5607 kvm_createvm_count++; 5608 kvm_active_vms++; 5609 } else if (type == KVM_EVENT_DESTROY_VM) { 5610 kvm_active_vms--; 5611 } 5612 created = kvm_createvm_count; 5613 active = kvm_active_vms; 5614 mutex_unlock(&kvm_lock); 5615 5616 env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT); 5617 if (!env) 5618 return; 5619 5620 add_uevent_var(env, "CREATED=%llu", created); 5621 add_uevent_var(env, "COUNT=%llu", active); 5622 5623 if (type == KVM_EVENT_CREATE_VM) { 5624 add_uevent_var(env, "EVENT=create"); 5625 kvm->userspace_pid = task_pid_nr(current); 5626 } else if (type == KVM_EVENT_DESTROY_VM) { 5627 add_uevent_var(env, "EVENT=destroy"); 5628 } 5629 add_uevent_var(env, "PID=%d", kvm->userspace_pid); 5630 5631 if (!IS_ERR(kvm->debugfs_dentry)) { 5632 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT); 5633 5634 if (p) { 5635 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); 5636 if (!IS_ERR(tmp)) 5637 add_uevent_var(env, "STATS_PATH=%s", tmp); 5638 kfree(p); 5639 } 5640 } 5641 /* no need for checks, since we are adding at most only 5 keys */ 5642 env->envp[env->envp_idx++] = NULL; 5643 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp); 5644 kfree(env); 5645 } 5646 5647 static void kvm_init_debug(void) 5648 { 5649 const struct file_operations *fops; 5650 const struct _kvm_stats_desc *pdesc; 5651 int i; 5652 5653 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 5654 5655 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) { 5656 pdesc = &kvm_vm_stats_desc[i]; 5657 if (kvm_stats_debugfs_mode(pdesc) & 0222) 5658 fops = &vm_stat_fops; 5659 else 5660 fops = &vm_stat_readonly_fops; 5661 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 5662 kvm_debugfs_dir, 5663 (void *)(long)pdesc->desc.offset, fops); 5664 } 5665 5666 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { 5667 pdesc = &kvm_vcpu_stats_desc[i]; 5668 if (kvm_stats_debugfs_mode(pdesc) & 0222) 5669 fops = &vcpu_stat_fops; 5670 else 5671 fops = &vcpu_stat_readonly_fops; 5672 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 5673 kvm_debugfs_dir, 5674 (void *)(long)pdesc->desc.offset, fops); 5675 } 5676 } 5677 5678 static int kvm_suspend(void) 5679 { 5680 if (kvm_usage_count) 5681 hardware_disable_nolock(NULL); 5682 return 0; 5683 } 5684 5685 static void kvm_resume(void) 5686 { 5687 if (kvm_usage_count) { 5688 lockdep_assert_not_held(&kvm_count_lock); 5689 hardware_enable_nolock(NULL); 5690 } 5691 } 5692 5693 static struct syscore_ops kvm_syscore_ops = { 5694 .suspend = kvm_suspend, 5695 .resume = kvm_resume, 5696 }; 5697 5698 static inline 5699 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 5700 { 5701 return container_of(pn, struct kvm_vcpu, preempt_notifier); 5702 } 5703 5704 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 5705 { 5706 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 5707 5708 WRITE_ONCE(vcpu->preempted, false); 5709 WRITE_ONCE(vcpu->ready, false); 5710 5711 __this_cpu_write(kvm_running_vcpu, vcpu); 5712 kvm_arch_sched_in(vcpu, cpu); 5713 kvm_arch_vcpu_load(vcpu, cpu); 5714 } 5715 5716 static void kvm_sched_out(struct preempt_notifier *pn, 5717 struct task_struct *next) 5718 { 5719 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 5720 5721 if (current->on_rq) { 5722 WRITE_ONCE(vcpu->preempted, true); 5723 WRITE_ONCE(vcpu->ready, true); 5724 } 5725 kvm_arch_vcpu_put(vcpu); 5726 __this_cpu_write(kvm_running_vcpu, NULL); 5727 } 5728 5729 /** 5730 * kvm_get_running_vcpu - get the vcpu running on the current CPU. 5731 * 5732 * We can disable preemption locally around accessing the per-CPU variable, 5733 * and use the resolved vcpu pointer after enabling preemption again, 5734 * because even if the current thread is migrated to another CPU, reading 5735 * the per-CPU value later will give us the same value as we update the 5736 * per-CPU variable in the preempt notifier handlers. 5737 */ 5738 struct kvm_vcpu *kvm_get_running_vcpu(void) 5739 { 5740 struct kvm_vcpu *vcpu; 5741 5742 preempt_disable(); 5743 vcpu = __this_cpu_read(kvm_running_vcpu); 5744 preempt_enable(); 5745 5746 return vcpu; 5747 } 5748 EXPORT_SYMBOL_GPL(kvm_get_running_vcpu); 5749 5750 /** 5751 * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus. 5752 */ 5753 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) 5754 { 5755 return &kvm_running_vcpu; 5756 } 5757 5758 #ifdef CONFIG_GUEST_PERF_EVENTS 5759 static unsigned int kvm_guest_state(void) 5760 { 5761 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); 5762 unsigned int state; 5763 5764 if (!kvm_arch_pmi_in_guest(vcpu)) 5765 return 0; 5766 5767 state = PERF_GUEST_ACTIVE; 5768 if (!kvm_arch_vcpu_in_kernel(vcpu)) 5769 state |= PERF_GUEST_USER; 5770 5771 return state; 5772 } 5773 5774 static unsigned long kvm_guest_get_ip(void) 5775 { 5776 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); 5777 5778 /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */ 5779 if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu))) 5780 return 0; 5781 5782 return kvm_arch_vcpu_get_ip(vcpu); 5783 } 5784 5785 static struct perf_guest_info_callbacks kvm_guest_cbs = { 5786 .state = kvm_guest_state, 5787 .get_ip = kvm_guest_get_ip, 5788 .handle_intel_pt_intr = NULL, 5789 }; 5790 5791 void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void)) 5792 { 5793 kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler; 5794 perf_register_guest_info_callbacks(&kvm_guest_cbs); 5795 } 5796 void kvm_unregister_perf_callbacks(void) 5797 { 5798 perf_unregister_guest_info_callbacks(&kvm_guest_cbs); 5799 } 5800 #endif 5801 5802 struct kvm_cpu_compat_check { 5803 void *opaque; 5804 int *ret; 5805 }; 5806 5807 static void check_processor_compat(void *data) 5808 { 5809 struct kvm_cpu_compat_check *c = data; 5810 5811 *c->ret = kvm_arch_check_processor_compat(c->opaque); 5812 } 5813 5814 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 5815 struct module *module) 5816 { 5817 struct kvm_cpu_compat_check c; 5818 int r; 5819 int cpu; 5820 5821 r = kvm_arch_init(opaque); 5822 if (r) 5823 goto out_fail; 5824 5825 /* 5826 * kvm_arch_init makes sure there's at most one caller 5827 * for architectures that support multiple implementations, 5828 * like intel and amd on x86. 5829 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating 5830 * conflicts in case kvm is already setup for another implementation. 5831 */ 5832 r = kvm_irqfd_init(); 5833 if (r) 5834 goto out_irqfd; 5835 5836 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 5837 r = -ENOMEM; 5838 goto out_free_0; 5839 } 5840 5841 r = kvm_arch_hardware_setup(opaque); 5842 if (r < 0) 5843 goto out_free_1; 5844 5845 c.ret = &r; 5846 c.opaque = opaque; 5847 for_each_online_cpu(cpu) { 5848 smp_call_function_single(cpu, check_processor_compat, &c, 1); 5849 if (r < 0) 5850 goto out_free_2; 5851 } 5852 5853 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting", 5854 kvm_starting_cpu, kvm_dying_cpu); 5855 if (r) 5856 goto out_free_2; 5857 register_reboot_notifier(&kvm_reboot_notifier); 5858 5859 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 5860 if (!vcpu_align) 5861 vcpu_align = __alignof__(struct kvm_vcpu); 5862 kvm_vcpu_cache = 5863 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align, 5864 SLAB_ACCOUNT, 5865 offsetof(struct kvm_vcpu, arch), 5866 offsetofend(struct kvm_vcpu, stats_id) 5867 - offsetof(struct kvm_vcpu, arch), 5868 NULL); 5869 if (!kvm_vcpu_cache) { 5870 r = -ENOMEM; 5871 goto out_free_3; 5872 } 5873 5874 for_each_possible_cpu(cpu) { 5875 if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu), 5876 GFP_KERNEL, cpu_to_node(cpu))) { 5877 r = -ENOMEM; 5878 goto out_free_4; 5879 } 5880 } 5881 5882 r = kvm_async_pf_init(); 5883 if (r) 5884 goto out_free_5; 5885 5886 kvm_chardev_ops.owner = module; 5887 5888 r = misc_register(&kvm_dev); 5889 if (r) { 5890 pr_err("kvm: misc device register failed\n"); 5891 goto out_unreg; 5892 } 5893 5894 register_syscore_ops(&kvm_syscore_ops); 5895 5896 kvm_preempt_ops.sched_in = kvm_sched_in; 5897 kvm_preempt_ops.sched_out = kvm_sched_out; 5898 5899 kvm_init_debug(); 5900 5901 r = kvm_vfio_ops_init(); 5902 WARN_ON(r); 5903 5904 return 0; 5905 5906 out_unreg: 5907 kvm_async_pf_deinit(); 5908 out_free_5: 5909 for_each_possible_cpu(cpu) 5910 free_cpumask_var(per_cpu(cpu_kick_mask, cpu)); 5911 out_free_4: 5912 kmem_cache_destroy(kvm_vcpu_cache); 5913 out_free_3: 5914 unregister_reboot_notifier(&kvm_reboot_notifier); 5915 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); 5916 out_free_2: 5917 kvm_arch_hardware_unsetup(); 5918 out_free_1: 5919 free_cpumask_var(cpus_hardware_enabled); 5920 out_free_0: 5921 kvm_irqfd_exit(); 5922 out_irqfd: 5923 kvm_arch_exit(); 5924 out_fail: 5925 return r; 5926 } 5927 EXPORT_SYMBOL_GPL(kvm_init); 5928 5929 void kvm_exit(void) 5930 { 5931 int cpu; 5932 5933 debugfs_remove_recursive(kvm_debugfs_dir); 5934 misc_deregister(&kvm_dev); 5935 for_each_possible_cpu(cpu) 5936 free_cpumask_var(per_cpu(cpu_kick_mask, cpu)); 5937 kmem_cache_destroy(kvm_vcpu_cache); 5938 kvm_async_pf_deinit(); 5939 unregister_syscore_ops(&kvm_syscore_ops); 5940 unregister_reboot_notifier(&kvm_reboot_notifier); 5941 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); 5942 on_each_cpu(hardware_disable_nolock, NULL, 1); 5943 kvm_arch_hardware_unsetup(); 5944 kvm_arch_exit(); 5945 kvm_irqfd_exit(); 5946 free_cpumask_var(cpus_hardware_enabled); 5947 kvm_vfio_ops_exit(); 5948 } 5949 EXPORT_SYMBOL_GPL(kvm_exit); 5950 5951 struct kvm_vm_worker_thread_context { 5952 struct kvm *kvm; 5953 struct task_struct *parent; 5954 struct completion init_done; 5955 kvm_vm_thread_fn_t thread_fn; 5956 uintptr_t data; 5957 int err; 5958 }; 5959 5960 static int kvm_vm_worker_thread(void *context) 5961 { 5962 /* 5963 * The init_context is allocated on the stack of the parent thread, so 5964 * we have to locally copy anything that is needed beyond initialization 5965 */ 5966 struct kvm_vm_worker_thread_context *init_context = context; 5967 struct task_struct *parent; 5968 struct kvm *kvm = init_context->kvm; 5969 kvm_vm_thread_fn_t thread_fn = init_context->thread_fn; 5970 uintptr_t data = init_context->data; 5971 int err; 5972 5973 err = kthread_park(current); 5974 /* kthread_park(current) is never supposed to return an error */ 5975 WARN_ON(err != 0); 5976 if (err) 5977 goto init_complete; 5978 5979 err = cgroup_attach_task_all(init_context->parent, current); 5980 if (err) { 5981 kvm_err("%s: cgroup_attach_task_all failed with err %d\n", 5982 __func__, err); 5983 goto init_complete; 5984 } 5985 5986 set_user_nice(current, task_nice(init_context->parent)); 5987 5988 init_complete: 5989 init_context->err = err; 5990 complete(&init_context->init_done); 5991 init_context = NULL; 5992 5993 if (err) 5994 goto out; 5995 5996 /* Wait to be woken up by the spawner before proceeding. */ 5997 kthread_parkme(); 5998 5999 if (!kthread_should_stop()) 6000 err = thread_fn(kvm, data); 6001 6002 out: 6003 /* 6004 * Move kthread back to its original cgroup to prevent it lingering in 6005 * the cgroup of the VM process, after the latter finishes its 6006 * execution. 6007 * 6008 * kthread_stop() waits on the 'exited' completion condition which is 6009 * set in exit_mm(), via mm_release(), in do_exit(). However, the 6010 * kthread is removed from the cgroup in the cgroup_exit() which is 6011 * called after the exit_mm(). This causes the kthread_stop() to return 6012 * before the kthread actually quits the cgroup. 6013 */ 6014 rcu_read_lock(); 6015 parent = rcu_dereference(current->real_parent); 6016 get_task_struct(parent); 6017 rcu_read_unlock(); 6018 cgroup_attach_task_all(parent, current); 6019 put_task_struct(parent); 6020 6021 return err; 6022 } 6023 6024 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, 6025 uintptr_t data, const char *name, 6026 struct task_struct **thread_ptr) 6027 { 6028 struct kvm_vm_worker_thread_context init_context = {}; 6029 struct task_struct *thread; 6030 6031 *thread_ptr = NULL; 6032 init_context.kvm = kvm; 6033 init_context.parent = current; 6034 init_context.thread_fn = thread_fn; 6035 init_context.data = data; 6036 init_completion(&init_context.init_done); 6037 6038 thread = kthread_run(kvm_vm_worker_thread, &init_context, 6039 "%s-%d", name, task_pid_nr(current)); 6040 if (IS_ERR(thread)) 6041 return PTR_ERR(thread); 6042 6043 /* kthread_run is never supposed to return NULL */ 6044 WARN_ON(thread == NULL); 6045 6046 wait_for_completion(&init_context.init_done); 6047 6048 if (!init_context.err) 6049 *thread_ptr = thread; 6050 6051 return init_context.err; 6052 } 6053