1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * This module enables machines with Intel VT-x extensions to run virtual 6 * machines without emulation or binary translation. 7 * 8 * Copyright (C) 2006 Qumranet, Inc. 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 10 * 11 * Authors: 12 * Avi Kivity <avi@qumranet.com> 13 * Yaniv Kamay <yaniv@qumranet.com> 14 */ 15 16 #include <kvm/iodev.h> 17 18 #include <linux/kvm_host.h> 19 #include <linux/kvm.h> 20 #include <linux/module.h> 21 #include <linux/errno.h> 22 #include <linux/percpu.h> 23 #include <linux/mm.h> 24 #include <linux/miscdevice.h> 25 #include <linux/vmalloc.h> 26 #include <linux/reboot.h> 27 #include <linux/debugfs.h> 28 #include <linux/highmem.h> 29 #include <linux/file.h> 30 #include <linux/syscore_ops.h> 31 #include <linux/cpu.h> 32 #include <linux/sched/signal.h> 33 #include <linux/sched/mm.h> 34 #include <linux/sched/stat.h> 35 #include <linux/cpumask.h> 36 #include <linux/smp.h> 37 #include <linux/anon_inodes.h> 38 #include <linux/profile.h> 39 #include <linux/kvm_para.h> 40 #include <linux/pagemap.h> 41 #include <linux/mman.h> 42 #include <linux/swap.h> 43 #include <linux/bitops.h> 44 #include <linux/spinlock.h> 45 #include <linux/compat.h> 46 #include <linux/srcu.h> 47 #include <linux/hugetlb.h> 48 #include <linux/slab.h> 49 #include <linux/sort.h> 50 #include <linux/bsearch.h> 51 #include <linux/io.h> 52 #include <linux/lockdep.h> 53 #include <linux/kthread.h> 54 #include <linux/suspend.h> 55 56 #include <asm/processor.h> 57 #include <asm/ioctl.h> 58 #include <linux/uaccess.h> 59 60 #include "coalesced_mmio.h" 61 #include "async_pf.h" 62 #include "mmu_lock.h" 63 #include "vfio.h" 64 65 #define CREATE_TRACE_POINTS 66 #include <trace/events/kvm.h> 67 68 #include <linux/kvm_dirty_ring.h> 69 70 /* Worst case buffer size needed for holding an integer. */ 71 #define ITOA_MAX_LEN 12 72 73 MODULE_AUTHOR("Qumranet"); 74 MODULE_LICENSE("GPL"); 75 76 /* Architectures should define their poll value according to the halt latency */ 77 unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; 78 module_param(halt_poll_ns, uint, 0644); 79 EXPORT_SYMBOL_GPL(halt_poll_ns); 80 81 /* Default doubles per-vcpu halt_poll_ns. */ 82 unsigned int halt_poll_ns_grow = 2; 83 module_param(halt_poll_ns_grow, uint, 0644); 84 EXPORT_SYMBOL_GPL(halt_poll_ns_grow); 85 86 /* The start value to grow halt_poll_ns from */ 87 unsigned int halt_poll_ns_grow_start = 10000; /* 10us */ 88 module_param(halt_poll_ns_grow_start, uint, 0644); 89 EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start); 90 91 /* Default resets per-vcpu halt_poll_ns . */ 92 unsigned int halt_poll_ns_shrink; 93 module_param(halt_poll_ns_shrink, uint, 0644); 94 EXPORT_SYMBOL_GPL(halt_poll_ns_shrink); 95 96 /* 97 * Ordering of locks: 98 * 99 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 100 */ 101 102 DEFINE_MUTEX(kvm_lock); 103 static DEFINE_RAW_SPINLOCK(kvm_count_lock); 104 LIST_HEAD(vm_list); 105 106 static cpumask_var_t cpus_hardware_enabled; 107 static int kvm_usage_count; 108 static atomic_t hardware_enable_failed; 109 110 static struct kmem_cache *kvm_vcpu_cache; 111 112 static __read_mostly struct preempt_ops kvm_preempt_ops; 113 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu); 114 115 struct dentry *kvm_debugfs_dir; 116 EXPORT_SYMBOL_GPL(kvm_debugfs_dir); 117 118 static const struct file_operations stat_fops_per_vm; 119 120 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 121 unsigned long arg); 122 #ifdef CONFIG_KVM_COMPAT 123 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, 124 unsigned long arg); 125 #define KVM_COMPAT(c) .compat_ioctl = (c) 126 #else 127 /* 128 * For architectures that don't implement a compat infrastructure, 129 * adopt a double line of defense: 130 * - Prevent a compat task from opening /dev/kvm 131 * - If the open has been done by a 64bit task, and the KVM fd 132 * passed to a compat task, let the ioctls fail. 133 */ 134 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl, 135 unsigned long arg) { return -EINVAL; } 136 137 static int kvm_no_compat_open(struct inode *inode, struct file *file) 138 { 139 return is_compat_task() ? -ENODEV : 0; 140 } 141 #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \ 142 .open = kvm_no_compat_open 143 #endif 144 static int hardware_enable_all(void); 145 static void hardware_disable_all(void); 146 147 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 148 149 __visible bool kvm_rebooting; 150 EXPORT_SYMBOL_GPL(kvm_rebooting); 151 152 #define KVM_EVENT_CREATE_VM 0 153 #define KVM_EVENT_DESTROY_VM 1 154 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); 155 static unsigned long long kvm_createvm_count; 156 static unsigned long long kvm_active_vms; 157 158 static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask); 159 160 __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, 161 unsigned long start, unsigned long end) 162 { 163 } 164 165 bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) 166 { 167 /* 168 * The metadata used by is_zone_device_page() to determine whether or 169 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if 170 * the device has been pinned, e.g. by get_user_pages(). WARN if the 171 * page_count() is zero to help detect bad usage of this helper. 172 */ 173 if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn)))) 174 return false; 175 176 return is_zone_device_page(pfn_to_page(pfn)); 177 } 178 179 bool kvm_is_reserved_pfn(kvm_pfn_t pfn) 180 { 181 /* 182 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting 183 * perspective they are "normal" pages, albeit with slightly different 184 * usage rules. 185 */ 186 if (pfn_valid(pfn)) 187 return PageReserved(pfn_to_page(pfn)) && 188 !is_zero_pfn(pfn) && 189 !kvm_is_zone_device_pfn(pfn); 190 191 return true; 192 } 193 194 /* 195 * Switches to specified vcpu, until a matching vcpu_put() 196 */ 197 void vcpu_load(struct kvm_vcpu *vcpu) 198 { 199 int cpu = get_cpu(); 200 201 __this_cpu_write(kvm_running_vcpu, vcpu); 202 preempt_notifier_register(&vcpu->preempt_notifier); 203 kvm_arch_vcpu_load(vcpu, cpu); 204 put_cpu(); 205 } 206 EXPORT_SYMBOL_GPL(vcpu_load); 207 208 void vcpu_put(struct kvm_vcpu *vcpu) 209 { 210 preempt_disable(); 211 kvm_arch_vcpu_put(vcpu); 212 preempt_notifier_unregister(&vcpu->preempt_notifier); 213 __this_cpu_write(kvm_running_vcpu, NULL); 214 preempt_enable(); 215 } 216 EXPORT_SYMBOL_GPL(vcpu_put); 217 218 /* TODO: merge with kvm_arch_vcpu_should_kick */ 219 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req) 220 { 221 int mode = kvm_vcpu_exiting_guest_mode(vcpu); 222 223 /* 224 * We need to wait for the VCPU to reenable interrupts and get out of 225 * READING_SHADOW_PAGE_TABLES mode. 226 */ 227 if (req & KVM_REQUEST_WAIT) 228 return mode != OUTSIDE_GUEST_MODE; 229 230 /* 231 * Need to kick a running VCPU, but otherwise there is nothing to do. 232 */ 233 return mode == IN_GUEST_MODE; 234 } 235 236 static void ack_flush(void *_completed) 237 { 238 } 239 240 static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait) 241 { 242 if (cpumask_empty(cpus)) 243 return false; 244 245 smp_call_function_many(cpus, ack_flush, NULL, wait); 246 return true; 247 } 248 249 static void kvm_make_vcpu_request(struct kvm *kvm, struct kvm_vcpu *vcpu, 250 unsigned int req, struct cpumask *tmp, 251 int current_cpu) 252 { 253 int cpu; 254 255 kvm_make_request(req, vcpu); 256 257 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) 258 return; 259 260 /* 261 * Note, the vCPU could get migrated to a different pCPU at any point 262 * after kvm_request_needs_ipi(), which could result in sending an IPI 263 * to the previous pCPU. But, that's OK because the purpose of the IPI 264 * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is 265 * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES 266 * after this point is also OK, as the requirement is only that KVM wait 267 * for vCPUs that were reading SPTEs _before_ any changes were 268 * finalized. See kvm_vcpu_kick() for more details on handling requests. 269 */ 270 if (kvm_request_needs_ipi(vcpu, req)) { 271 cpu = READ_ONCE(vcpu->cpu); 272 if (cpu != -1 && cpu != current_cpu) 273 __cpumask_set_cpu(cpu, tmp); 274 } 275 } 276 277 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, 278 unsigned long *vcpu_bitmap) 279 { 280 struct kvm_vcpu *vcpu; 281 struct cpumask *cpus; 282 int i, me; 283 bool called; 284 285 me = get_cpu(); 286 287 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); 288 cpumask_clear(cpus); 289 290 for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) { 291 vcpu = kvm_get_vcpu(kvm, i); 292 if (!vcpu) 293 continue; 294 kvm_make_vcpu_request(kvm, vcpu, req, cpus, me); 295 } 296 297 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); 298 put_cpu(); 299 300 return called; 301 } 302 303 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, 304 struct kvm_vcpu *except) 305 { 306 struct kvm_vcpu *vcpu; 307 struct cpumask *cpus; 308 unsigned long i; 309 bool called; 310 int me; 311 312 me = get_cpu(); 313 314 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); 315 cpumask_clear(cpus); 316 317 kvm_for_each_vcpu(i, vcpu, kvm) { 318 if (vcpu == except) 319 continue; 320 kvm_make_vcpu_request(kvm, vcpu, req, cpus, me); 321 } 322 323 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); 324 put_cpu(); 325 326 return called; 327 } 328 329 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) 330 { 331 return kvm_make_all_cpus_request_except(kvm, req, NULL); 332 } 333 EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request); 334 335 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL 336 void kvm_flush_remote_tlbs(struct kvm *kvm) 337 { 338 ++kvm->stat.generic.remote_tlb_flush_requests; 339 340 /* 341 * We want to publish modifications to the page tables before reading 342 * mode. Pairs with a memory barrier in arch-specific code. 343 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest 344 * and smp_mb in walk_shadow_page_lockless_begin/end. 345 * - powerpc: smp_mb in kvmppc_prepare_to_enter. 346 * 347 * There is already an smp_mb__after_atomic() before 348 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that 349 * barrier here. 350 */ 351 if (!kvm_arch_flush_remote_tlb(kvm) 352 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 353 ++kvm->stat.generic.remote_tlb_flush; 354 } 355 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); 356 #endif 357 358 void kvm_reload_remote_mmus(struct kvm *kvm) 359 { 360 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); 361 } 362 363 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 364 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, 365 gfp_t gfp_flags) 366 { 367 gfp_flags |= mc->gfp_zero; 368 369 if (mc->kmem_cache) 370 return kmem_cache_alloc(mc->kmem_cache, gfp_flags); 371 else 372 return (void *)__get_free_page(gfp_flags); 373 } 374 375 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min) 376 { 377 void *obj; 378 379 if (mc->nobjs >= min) 380 return 0; 381 while (mc->nobjs < ARRAY_SIZE(mc->objects)) { 382 obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT); 383 if (!obj) 384 return mc->nobjs >= min ? 0 : -ENOMEM; 385 mc->objects[mc->nobjs++] = obj; 386 } 387 return 0; 388 } 389 390 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc) 391 { 392 return mc->nobjs; 393 } 394 395 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) 396 { 397 while (mc->nobjs) { 398 if (mc->kmem_cache) 399 kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]); 400 else 401 free_page((unsigned long)mc->objects[--mc->nobjs]); 402 } 403 } 404 405 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) 406 { 407 void *p; 408 409 if (WARN_ON(!mc->nobjs)) 410 p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT); 411 else 412 p = mc->objects[--mc->nobjs]; 413 BUG_ON(!p); 414 return p; 415 } 416 #endif 417 418 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 419 { 420 mutex_init(&vcpu->mutex); 421 vcpu->cpu = -1; 422 vcpu->kvm = kvm; 423 vcpu->vcpu_id = id; 424 vcpu->pid = NULL; 425 #ifndef __KVM_HAVE_ARCH_WQP 426 rcuwait_init(&vcpu->wait); 427 #endif 428 kvm_async_pf_vcpu_init(vcpu); 429 430 vcpu->pre_pcpu = -1; 431 INIT_LIST_HEAD(&vcpu->blocked_vcpu_list); 432 433 kvm_vcpu_set_in_spin_loop(vcpu, false); 434 kvm_vcpu_set_dy_eligible(vcpu, false); 435 vcpu->preempted = false; 436 vcpu->ready = false; 437 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 438 vcpu->last_used_slot = NULL; 439 } 440 441 static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) 442 { 443 kvm_dirty_ring_free(&vcpu->dirty_ring); 444 kvm_arch_vcpu_destroy(vcpu); 445 446 /* 447 * No need for rcu_read_lock as VCPU_RUN is the only place that changes 448 * the vcpu->pid pointer, and at destruction time all file descriptors 449 * are already gone. 450 */ 451 put_pid(rcu_dereference_protected(vcpu->pid, 1)); 452 453 free_page((unsigned long)vcpu->run); 454 kmem_cache_free(kvm_vcpu_cache, vcpu); 455 } 456 457 void kvm_destroy_vcpus(struct kvm *kvm) 458 { 459 unsigned long i; 460 struct kvm_vcpu *vcpu; 461 462 kvm_for_each_vcpu(i, vcpu, kvm) { 463 kvm_vcpu_destroy(vcpu); 464 xa_erase(&kvm->vcpu_array, i); 465 } 466 467 atomic_set(&kvm->online_vcpus, 0); 468 } 469 EXPORT_SYMBOL_GPL(kvm_destroy_vcpus); 470 471 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 472 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 473 { 474 return container_of(mn, struct kvm, mmu_notifier); 475 } 476 477 static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn, 478 struct mm_struct *mm, 479 unsigned long start, unsigned long end) 480 { 481 struct kvm *kvm = mmu_notifier_to_kvm(mn); 482 int idx; 483 484 idx = srcu_read_lock(&kvm->srcu); 485 kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); 486 srcu_read_unlock(&kvm->srcu, idx); 487 } 488 489 typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range); 490 491 typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start, 492 unsigned long end); 493 494 struct kvm_hva_range { 495 unsigned long start; 496 unsigned long end; 497 pte_t pte; 498 hva_handler_t handler; 499 on_lock_fn_t on_lock; 500 bool flush_on_ret; 501 bool may_block; 502 }; 503 504 /* 505 * Use a dedicated stub instead of NULL to indicate that there is no callback 506 * function/handler. The compiler technically can't guarantee that a real 507 * function will have a non-zero address, and so it will generate code to 508 * check for !NULL, whereas comparing against a stub will be elided at compile 509 * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9). 510 */ 511 static void kvm_null_fn(void) 512 { 513 514 } 515 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn) 516 517 /* Iterate over each memslot intersecting [start, last] (inclusive) range */ 518 #define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \ 519 for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \ 520 node; \ 521 node = interval_tree_iter_next(node, start, last)) \ 522 523 static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, 524 const struct kvm_hva_range *range) 525 { 526 bool ret = false, locked = false; 527 struct kvm_gfn_range gfn_range; 528 struct kvm_memory_slot *slot; 529 struct kvm_memslots *slots; 530 int i, idx; 531 532 if (WARN_ON_ONCE(range->end <= range->start)) 533 return 0; 534 535 /* A null handler is allowed if and only if on_lock() is provided. */ 536 if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) && 537 IS_KVM_NULL_FN(range->handler))) 538 return 0; 539 540 idx = srcu_read_lock(&kvm->srcu); 541 542 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 543 struct interval_tree_node *node; 544 545 slots = __kvm_memslots(kvm, i); 546 kvm_for_each_memslot_in_hva_range(node, slots, 547 range->start, range->end - 1) { 548 unsigned long hva_start, hva_end; 549 550 slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]); 551 hva_start = max(range->start, slot->userspace_addr); 552 hva_end = min(range->end, slot->userspace_addr + 553 (slot->npages << PAGE_SHIFT)); 554 555 /* 556 * To optimize for the likely case where the address 557 * range is covered by zero or one memslots, don't 558 * bother making these conditional (to avoid writes on 559 * the second or later invocation of the handler). 560 */ 561 gfn_range.pte = range->pte; 562 gfn_range.may_block = range->may_block; 563 564 /* 565 * {gfn(page) | page intersects with [hva_start, hva_end)} = 566 * {gfn_start, gfn_start+1, ..., gfn_end-1}. 567 */ 568 gfn_range.start = hva_to_gfn_memslot(hva_start, slot); 569 gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot); 570 gfn_range.slot = slot; 571 572 if (!locked) { 573 locked = true; 574 KVM_MMU_LOCK(kvm); 575 if (!IS_KVM_NULL_FN(range->on_lock)) 576 range->on_lock(kvm, range->start, range->end); 577 if (IS_KVM_NULL_FN(range->handler)) 578 break; 579 } 580 ret |= range->handler(kvm, &gfn_range); 581 } 582 } 583 584 if (range->flush_on_ret && ret) 585 kvm_flush_remote_tlbs(kvm); 586 587 if (locked) 588 KVM_MMU_UNLOCK(kvm); 589 590 srcu_read_unlock(&kvm->srcu, idx); 591 592 /* The notifiers are averse to booleans. :-( */ 593 return (int)ret; 594 } 595 596 static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn, 597 unsigned long start, 598 unsigned long end, 599 pte_t pte, 600 hva_handler_t handler) 601 { 602 struct kvm *kvm = mmu_notifier_to_kvm(mn); 603 const struct kvm_hva_range range = { 604 .start = start, 605 .end = end, 606 .pte = pte, 607 .handler = handler, 608 .on_lock = (void *)kvm_null_fn, 609 .flush_on_ret = true, 610 .may_block = false, 611 }; 612 613 return __kvm_handle_hva_range(kvm, &range); 614 } 615 616 static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn, 617 unsigned long start, 618 unsigned long end, 619 hva_handler_t handler) 620 { 621 struct kvm *kvm = mmu_notifier_to_kvm(mn); 622 const struct kvm_hva_range range = { 623 .start = start, 624 .end = end, 625 .pte = __pte(0), 626 .handler = handler, 627 .on_lock = (void *)kvm_null_fn, 628 .flush_on_ret = false, 629 .may_block = false, 630 }; 631 632 return __kvm_handle_hva_range(kvm, &range); 633 } 634 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 635 struct mm_struct *mm, 636 unsigned long address, 637 pte_t pte) 638 { 639 struct kvm *kvm = mmu_notifier_to_kvm(mn); 640 641 trace_kvm_set_spte_hva(address); 642 643 /* 644 * .change_pte() must be surrounded by .invalidate_range_{start,end}(). 645 * If mmu_notifier_count is zero, then no in-progress invalidations, 646 * including this one, found a relevant memslot at start(); rechecking 647 * memslots here is unnecessary. Note, a false positive (count elevated 648 * by a different invalidation) is sub-optimal but functionally ok. 649 */ 650 WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count)); 651 if (!READ_ONCE(kvm->mmu_notifier_count)) 652 return; 653 654 kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn); 655 } 656 657 void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start, 658 unsigned long end) 659 { 660 /* 661 * The count increase must become visible at unlock time as no 662 * spte can be established without taking the mmu_lock and 663 * count is also read inside the mmu_lock critical section. 664 */ 665 kvm->mmu_notifier_count++; 666 if (likely(kvm->mmu_notifier_count == 1)) { 667 kvm->mmu_notifier_range_start = start; 668 kvm->mmu_notifier_range_end = end; 669 } else { 670 /* 671 * Fully tracking multiple concurrent ranges has dimishing 672 * returns. Keep things simple and just find the minimal range 673 * which includes the current and new ranges. As there won't be 674 * enough information to subtract a range after its invalidate 675 * completes, any ranges invalidated concurrently will 676 * accumulate and persist until all outstanding invalidates 677 * complete. 678 */ 679 kvm->mmu_notifier_range_start = 680 min(kvm->mmu_notifier_range_start, start); 681 kvm->mmu_notifier_range_end = 682 max(kvm->mmu_notifier_range_end, end); 683 } 684 } 685 686 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 687 const struct mmu_notifier_range *range) 688 { 689 struct kvm *kvm = mmu_notifier_to_kvm(mn); 690 const struct kvm_hva_range hva_range = { 691 .start = range->start, 692 .end = range->end, 693 .pte = __pte(0), 694 .handler = kvm_unmap_gfn_range, 695 .on_lock = kvm_inc_notifier_count, 696 .flush_on_ret = true, 697 .may_block = mmu_notifier_range_blockable(range), 698 }; 699 700 trace_kvm_unmap_hva_range(range->start, range->end); 701 702 /* 703 * Prevent memslot modification between range_start() and range_end() 704 * so that conditionally locking provides the same result in both 705 * functions. Without that guarantee, the mmu_notifier_count 706 * adjustments will be imbalanced. 707 * 708 * Pairs with the decrement in range_end(). 709 */ 710 spin_lock(&kvm->mn_invalidate_lock); 711 kvm->mn_active_invalidate_count++; 712 spin_unlock(&kvm->mn_invalidate_lock); 713 714 __kvm_handle_hva_range(kvm, &hva_range); 715 716 return 0; 717 } 718 719 void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start, 720 unsigned long end) 721 { 722 /* 723 * This sequence increase will notify the kvm page fault that 724 * the page that is going to be mapped in the spte could have 725 * been freed. 726 */ 727 kvm->mmu_notifier_seq++; 728 smp_wmb(); 729 /* 730 * The above sequence increase must be visible before the 731 * below count decrease, which is ensured by the smp_wmb above 732 * in conjunction with the smp_rmb in mmu_notifier_retry(). 733 */ 734 kvm->mmu_notifier_count--; 735 } 736 737 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 738 const struct mmu_notifier_range *range) 739 { 740 struct kvm *kvm = mmu_notifier_to_kvm(mn); 741 const struct kvm_hva_range hva_range = { 742 .start = range->start, 743 .end = range->end, 744 .pte = __pte(0), 745 .handler = (void *)kvm_null_fn, 746 .on_lock = kvm_dec_notifier_count, 747 .flush_on_ret = false, 748 .may_block = mmu_notifier_range_blockable(range), 749 }; 750 bool wake; 751 752 __kvm_handle_hva_range(kvm, &hva_range); 753 754 /* Pairs with the increment in range_start(). */ 755 spin_lock(&kvm->mn_invalidate_lock); 756 wake = (--kvm->mn_active_invalidate_count == 0); 757 spin_unlock(&kvm->mn_invalidate_lock); 758 759 /* 760 * There can only be one waiter, since the wait happens under 761 * slots_lock. 762 */ 763 if (wake) 764 rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait); 765 766 BUG_ON(kvm->mmu_notifier_count < 0); 767 } 768 769 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 770 struct mm_struct *mm, 771 unsigned long start, 772 unsigned long end) 773 { 774 trace_kvm_age_hva(start, end); 775 776 return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn); 777 } 778 779 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, 780 struct mm_struct *mm, 781 unsigned long start, 782 unsigned long end) 783 { 784 trace_kvm_age_hva(start, end); 785 786 /* 787 * Even though we do not flush TLB, this will still adversely 788 * affect performance on pre-Haswell Intel EPT, where there is 789 * no EPT Access Bit to clear so that we have to tear down EPT 790 * tables instead. If we find this unacceptable, we can always 791 * add a parameter to kvm_age_hva so that it effectively doesn't 792 * do anything on clear_young. 793 * 794 * Also note that currently we never issue secondary TLB flushes 795 * from clear_young, leaving this job up to the regular system 796 * cadence. If we find this inaccurate, we might come up with a 797 * more sophisticated heuristic later. 798 */ 799 return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn); 800 } 801 802 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, 803 struct mm_struct *mm, 804 unsigned long address) 805 { 806 trace_kvm_test_age_hva(address); 807 808 return kvm_handle_hva_range_no_flush(mn, address, address + 1, 809 kvm_test_age_gfn); 810 } 811 812 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 813 struct mm_struct *mm) 814 { 815 struct kvm *kvm = mmu_notifier_to_kvm(mn); 816 int idx; 817 818 idx = srcu_read_lock(&kvm->srcu); 819 kvm_arch_flush_shadow_all(kvm); 820 srcu_read_unlock(&kvm->srcu, idx); 821 } 822 823 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 824 .invalidate_range = kvm_mmu_notifier_invalidate_range, 825 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 826 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 827 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 828 .clear_young = kvm_mmu_notifier_clear_young, 829 .test_young = kvm_mmu_notifier_test_young, 830 .change_pte = kvm_mmu_notifier_change_pte, 831 .release = kvm_mmu_notifier_release, 832 }; 833 834 static int kvm_init_mmu_notifier(struct kvm *kvm) 835 { 836 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 837 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 838 } 839 840 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 841 842 static int kvm_init_mmu_notifier(struct kvm *kvm) 843 { 844 return 0; 845 } 846 847 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 848 849 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 850 static int kvm_pm_notifier_call(struct notifier_block *bl, 851 unsigned long state, 852 void *unused) 853 { 854 struct kvm *kvm = container_of(bl, struct kvm, pm_notifier); 855 856 return kvm_arch_pm_notifier(kvm, state); 857 } 858 859 static void kvm_init_pm_notifier(struct kvm *kvm) 860 { 861 kvm->pm_notifier.notifier_call = kvm_pm_notifier_call; 862 /* Suspend KVM before we suspend ftrace, RCU, etc. */ 863 kvm->pm_notifier.priority = INT_MAX; 864 register_pm_notifier(&kvm->pm_notifier); 865 } 866 867 static void kvm_destroy_pm_notifier(struct kvm *kvm) 868 { 869 unregister_pm_notifier(&kvm->pm_notifier); 870 } 871 #else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */ 872 static void kvm_init_pm_notifier(struct kvm *kvm) 873 { 874 } 875 876 static void kvm_destroy_pm_notifier(struct kvm *kvm) 877 { 878 } 879 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */ 880 881 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) 882 { 883 if (!memslot->dirty_bitmap) 884 return; 885 886 kvfree(memslot->dirty_bitmap); 887 memslot->dirty_bitmap = NULL; 888 } 889 890 /* This does not remove the slot from struct kvm_memslots data structures */ 891 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 892 { 893 kvm_destroy_dirty_bitmap(slot); 894 895 kvm_arch_free_memslot(kvm, slot); 896 897 kfree(slot); 898 } 899 900 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) 901 { 902 struct hlist_node *idnode; 903 struct kvm_memory_slot *memslot; 904 int bkt; 905 906 /* 907 * The same memslot objects live in both active and inactive sets, 908 * arbitrarily free using index '1' so the second invocation of this 909 * function isn't operating over a structure with dangling pointers 910 * (even though this function isn't actually touching them). 911 */ 912 if (!slots->node_idx) 913 return; 914 915 hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1]) 916 kvm_free_memslot(kvm, memslot); 917 } 918 919 static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc) 920 { 921 switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) { 922 case KVM_STATS_TYPE_INSTANT: 923 return 0444; 924 case KVM_STATS_TYPE_CUMULATIVE: 925 case KVM_STATS_TYPE_PEAK: 926 default: 927 return 0644; 928 } 929 } 930 931 932 static void kvm_destroy_vm_debugfs(struct kvm *kvm) 933 { 934 int i; 935 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + 936 kvm_vcpu_stats_header.num_desc; 937 938 if (!kvm->debugfs_dentry) 939 return; 940 941 debugfs_remove_recursive(kvm->debugfs_dentry); 942 943 if (kvm->debugfs_stat_data) { 944 for (i = 0; i < kvm_debugfs_num_entries; i++) 945 kfree(kvm->debugfs_stat_data[i]); 946 kfree(kvm->debugfs_stat_data); 947 } 948 } 949 950 static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) 951 { 952 static DEFINE_MUTEX(kvm_debugfs_lock); 953 struct dentry *dent; 954 char dir_name[ITOA_MAX_LEN * 2]; 955 struct kvm_stat_data *stat_data; 956 const struct _kvm_stats_desc *pdesc; 957 int i, ret; 958 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + 959 kvm_vcpu_stats_header.num_desc; 960 961 if (!debugfs_initialized()) 962 return 0; 963 964 snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd); 965 mutex_lock(&kvm_debugfs_lock); 966 dent = debugfs_lookup(dir_name, kvm_debugfs_dir); 967 if (dent) { 968 pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name); 969 dput(dent); 970 mutex_unlock(&kvm_debugfs_lock); 971 return 0; 972 } 973 dent = debugfs_create_dir(dir_name, kvm_debugfs_dir); 974 mutex_unlock(&kvm_debugfs_lock); 975 if (IS_ERR(dent)) 976 return 0; 977 978 kvm->debugfs_dentry = dent; 979 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, 980 sizeof(*kvm->debugfs_stat_data), 981 GFP_KERNEL_ACCOUNT); 982 if (!kvm->debugfs_stat_data) 983 return -ENOMEM; 984 985 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) { 986 pdesc = &kvm_vm_stats_desc[i]; 987 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); 988 if (!stat_data) 989 return -ENOMEM; 990 991 stat_data->kvm = kvm; 992 stat_data->desc = pdesc; 993 stat_data->kind = KVM_STAT_VM; 994 kvm->debugfs_stat_data[i] = stat_data; 995 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 996 kvm->debugfs_dentry, stat_data, 997 &stat_fops_per_vm); 998 } 999 1000 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { 1001 pdesc = &kvm_vcpu_stats_desc[i]; 1002 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); 1003 if (!stat_data) 1004 return -ENOMEM; 1005 1006 stat_data->kvm = kvm; 1007 stat_data->desc = pdesc; 1008 stat_data->kind = KVM_STAT_VCPU; 1009 kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data; 1010 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 1011 kvm->debugfs_dentry, stat_data, 1012 &stat_fops_per_vm); 1013 } 1014 1015 ret = kvm_arch_create_vm_debugfs(kvm); 1016 if (ret) { 1017 kvm_destroy_vm_debugfs(kvm); 1018 return i; 1019 } 1020 1021 return 0; 1022 } 1023 1024 /* 1025 * Called after the VM is otherwise initialized, but just before adding it to 1026 * the vm_list. 1027 */ 1028 int __weak kvm_arch_post_init_vm(struct kvm *kvm) 1029 { 1030 return 0; 1031 } 1032 1033 /* 1034 * Called just after removing the VM from the vm_list, but before doing any 1035 * other destruction. 1036 */ 1037 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm) 1038 { 1039 } 1040 1041 /* 1042 * Called after per-vm debugfs created. When called kvm->debugfs_dentry should 1043 * be setup already, so we can create arch-specific debugfs entries under it. 1044 * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so 1045 * a per-arch destroy interface is not needed. 1046 */ 1047 int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm) 1048 { 1049 return 0; 1050 } 1051 1052 static struct kvm *kvm_create_vm(unsigned long type) 1053 { 1054 struct kvm *kvm = kvm_arch_alloc_vm(); 1055 struct kvm_memslots *slots; 1056 int r = -ENOMEM; 1057 int i, j; 1058 1059 if (!kvm) 1060 return ERR_PTR(-ENOMEM); 1061 1062 KVM_MMU_LOCK_INIT(kvm); 1063 mmgrab(current->mm); 1064 kvm->mm = current->mm; 1065 kvm_eventfd_init(kvm); 1066 mutex_init(&kvm->lock); 1067 mutex_init(&kvm->irq_lock); 1068 mutex_init(&kvm->slots_lock); 1069 mutex_init(&kvm->slots_arch_lock); 1070 spin_lock_init(&kvm->mn_invalidate_lock); 1071 rcuwait_init(&kvm->mn_memslots_update_rcuwait); 1072 xa_init(&kvm->vcpu_array); 1073 1074 INIT_LIST_HEAD(&kvm->devices); 1075 1076 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 1077 1078 if (init_srcu_struct(&kvm->srcu)) 1079 goto out_err_no_srcu; 1080 if (init_srcu_struct(&kvm->irq_srcu)) 1081 goto out_err_no_irq_srcu; 1082 1083 refcount_set(&kvm->users_count, 1); 1084 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 1085 for (j = 0; j < 2; j++) { 1086 slots = &kvm->__memslots[i][j]; 1087 1088 atomic_long_set(&slots->last_used_slot, (unsigned long)NULL); 1089 slots->hva_tree = RB_ROOT_CACHED; 1090 slots->gfn_tree = RB_ROOT; 1091 hash_init(slots->id_hash); 1092 slots->node_idx = j; 1093 1094 /* Generations must be different for each address space. */ 1095 slots->generation = i; 1096 } 1097 1098 rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]); 1099 } 1100 1101 for (i = 0; i < KVM_NR_BUSES; i++) { 1102 rcu_assign_pointer(kvm->buses[i], 1103 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT)); 1104 if (!kvm->buses[i]) 1105 goto out_err_no_arch_destroy_vm; 1106 } 1107 1108 kvm->max_halt_poll_ns = halt_poll_ns; 1109 1110 r = kvm_arch_init_vm(kvm, type); 1111 if (r) 1112 goto out_err_no_arch_destroy_vm; 1113 1114 r = hardware_enable_all(); 1115 if (r) 1116 goto out_err_no_disable; 1117 1118 #ifdef CONFIG_HAVE_KVM_IRQFD 1119 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 1120 #endif 1121 1122 r = kvm_init_mmu_notifier(kvm); 1123 if (r) 1124 goto out_err_no_mmu_notifier; 1125 1126 r = kvm_arch_post_init_vm(kvm); 1127 if (r) 1128 goto out_err; 1129 1130 mutex_lock(&kvm_lock); 1131 list_add(&kvm->vm_list, &vm_list); 1132 mutex_unlock(&kvm_lock); 1133 1134 preempt_notifier_inc(); 1135 kvm_init_pm_notifier(kvm); 1136 1137 return kvm; 1138 1139 out_err: 1140 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 1141 if (kvm->mmu_notifier.ops) 1142 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); 1143 #endif 1144 out_err_no_mmu_notifier: 1145 hardware_disable_all(); 1146 out_err_no_disable: 1147 kvm_arch_destroy_vm(kvm); 1148 out_err_no_arch_destroy_vm: 1149 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); 1150 for (i = 0; i < KVM_NR_BUSES; i++) 1151 kfree(kvm_get_bus(kvm, i)); 1152 cleanup_srcu_struct(&kvm->irq_srcu); 1153 out_err_no_irq_srcu: 1154 cleanup_srcu_struct(&kvm->srcu); 1155 out_err_no_srcu: 1156 kvm_arch_free_vm(kvm); 1157 mmdrop(current->mm); 1158 return ERR_PTR(r); 1159 } 1160 1161 static void kvm_destroy_devices(struct kvm *kvm) 1162 { 1163 struct kvm_device *dev, *tmp; 1164 1165 /* 1166 * We do not need to take the kvm->lock here, because nobody else 1167 * has a reference to the struct kvm at this point and therefore 1168 * cannot access the devices list anyhow. 1169 */ 1170 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { 1171 list_del(&dev->vm_node); 1172 dev->ops->destroy(dev); 1173 } 1174 } 1175 1176 static void kvm_destroy_vm(struct kvm *kvm) 1177 { 1178 int i; 1179 struct mm_struct *mm = kvm->mm; 1180 1181 kvm_destroy_pm_notifier(kvm); 1182 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm); 1183 kvm_destroy_vm_debugfs(kvm); 1184 kvm_arch_sync_events(kvm); 1185 mutex_lock(&kvm_lock); 1186 list_del(&kvm->vm_list); 1187 mutex_unlock(&kvm_lock); 1188 kvm_arch_pre_destroy_vm(kvm); 1189 1190 kvm_free_irq_routing(kvm); 1191 for (i = 0; i < KVM_NR_BUSES; i++) { 1192 struct kvm_io_bus *bus = kvm_get_bus(kvm, i); 1193 1194 if (bus) 1195 kvm_io_bus_destroy(bus); 1196 kvm->buses[i] = NULL; 1197 } 1198 kvm_coalesced_mmio_free(kvm); 1199 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 1200 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 1201 /* 1202 * At this point, pending calls to invalidate_range_start() 1203 * have completed but no more MMU notifiers will run, so 1204 * mn_active_invalidate_count may remain unbalanced. 1205 * No threads can be waiting in install_new_memslots as the 1206 * last reference on KVM has been dropped, but freeing 1207 * memslots would deadlock without this manual intervention. 1208 */ 1209 WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait)); 1210 kvm->mn_active_invalidate_count = 0; 1211 #else 1212 kvm_arch_flush_shadow_all(kvm); 1213 #endif 1214 kvm_arch_destroy_vm(kvm); 1215 kvm_destroy_devices(kvm); 1216 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 1217 kvm_free_memslots(kvm, &kvm->__memslots[i][0]); 1218 kvm_free_memslots(kvm, &kvm->__memslots[i][1]); 1219 } 1220 cleanup_srcu_struct(&kvm->irq_srcu); 1221 cleanup_srcu_struct(&kvm->srcu); 1222 kvm_arch_free_vm(kvm); 1223 preempt_notifier_dec(); 1224 hardware_disable_all(); 1225 mmdrop(mm); 1226 } 1227 1228 void kvm_get_kvm(struct kvm *kvm) 1229 { 1230 refcount_inc(&kvm->users_count); 1231 } 1232 EXPORT_SYMBOL_GPL(kvm_get_kvm); 1233 1234 /* 1235 * Make sure the vm is not during destruction, which is a safe version of 1236 * kvm_get_kvm(). Return true if kvm referenced successfully, false otherwise. 1237 */ 1238 bool kvm_get_kvm_safe(struct kvm *kvm) 1239 { 1240 return refcount_inc_not_zero(&kvm->users_count); 1241 } 1242 EXPORT_SYMBOL_GPL(kvm_get_kvm_safe); 1243 1244 void kvm_put_kvm(struct kvm *kvm) 1245 { 1246 if (refcount_dec_and_test(&kvm->users_count)) 1247 kvm_destroy_vm(kvm); 1248 } 1249 EXPORT_SYMBOL_GPL(kvm_put_kvm); 1250 1251 /* 1252 * Used to put a reference that was taken on behalf of an object associated 1253 * with a user-visible file descriptor, e.g. a vcpu or device, if installation 1254 * of the new file descriptor fails and the reference cannot be transferred to 1255 * its final owner. In such cases, the caller is still actively using @kvm and 1256 * will fail miserably if the refcount unexpectedly hits zero. 1257 */ 1258 void kvm_put_kvm_no_destroy(struct kvm *kvm) 1259 { 1260 WARN_ON(refcount_dec_and_test(&kvm->users_count)); 1261 } 1262 EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy); 1263 1264 static int kvm_vm_release(struct inode *inode, struct file *filp) 1265 { 1266 struct kvm *kvm = filp->private_data; 1267 1268 kvm_irqfd_release(kvm); 1269 1270 kvm_put_kvm(kvm); 1271 return 0; 1272 } 1273 1274 /* 1275 * Allocation size is twice as large as the actual dirty bitmap size. 1276 * See kvm_vm_ioctl_get_dirty_log() why this is needed. 1277 */ 1278 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) 1279 { 1280 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); 1281 1282 memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL_ACCOUNT); 1283 if (!memslot->dirty_bitmap) 1284 return -ENOMEM; 1285 1286 return 0; 1287 } 1288 1289 static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id) 1290 { 1291 struct kvm_memslots *active = __kvm_memslots(kvm, as_id); 1292 int node_idx_inactive = active->node_idx ^ 1; 1293 1294 return &kvm->__memslots[as_id][node_idx_inactive]; 1295 } 1296 1297 /* 1298 * Helper to get the address space ID when one of memslot pointers may be NULL. 1299 * This also serves as a sanity that at least one of the pointers is non-NULL, 1300 * and that their address space IDs don't diverge. 1301 */ 1302 static int kvm_memslots_get_as_id(struct kvm_memory_slot *a, 1303 struct kvm_memory_slot *b) 1304 { 1305 if (WARN_ON_ONCE(!a && !b)) 1306 return 0; 1307 1308 if (!a) 1309 return b->as_id; 1310 if (!b) 1311 return a->as_id; 1312 1313 WARN_ON_ONCE(a->as_id != b->as_id); 1314 return a->as_id; 1315 } 1316 1317 static void kvm_insert_gfn_node(struct kvm_memslots *slots, 1318 struct kvm_memory_slot *slot) 1319 { 1320 struct rb_root *gfn_tree = &slots->gfn_tree; 1321 struct rb_node **node, *parent; 1322 int idx = slots->node_idx; 1323 1324 parent = NULL; 1325 for (node = &gfn_tree->rb_node; *node; ) { 1326 struct kvm_memory_slot *tmp; 1327 1328 tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]); 1329 parent = *node; 1330 if (slot->base_gfn < tmp->base_gfn) 1331 node = &(*node)->rb_left; 1332 else if (slot->base_gfn > tmp->base_gfn) 1333 node = &(*node)->rb_right; 1334 else 1335 BUG(); 1336 } 1337 1338 rb_link_node(&slot->gfn_node[idx], parent, node); 1339 rb_insert_color(&slot->gfn_node[idx], gfn_tree); 1340 } 1341 1342 static void kvm_erase_gfn_node(struct kvm_memslots *slots, 1343 struct kvm_memory_slot *slot) 1344 { 1345 rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree); 1346 } 1347 1348 static void kvm_replace_gfn_node(struct kvm_memslots *slots, 1349 struct kvm_memory_slot *old, 1350 struct kvm_memory_slot *new) 1351 { 1352 int idx = slots->node_idx; 1353 1354 WARN_ON_ONCE(old->base_gfn != new->base_gfn); 1355 1356 rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx], 1357 &slots->gfn_tree); 1358 } 1359 1360 /* 1361 * Replace @old with @new in the inactive memslots. 1362 * 1363 * With NULL @old this simply adds @new. 1364 * With NULL @new this simply removes @old. 1365 * 1366 * If @new is non-NULL its hva_node[slots_idx] range has to be set 1367 * appropriately. 1368 */ 1369 static void kvm_replace_memslot(struct kvm *kvm, 1370 struct kvm_memory_slot *old, 1371 struct kvm_memory_slot *new) 1372 { 1373 int as_id = kvm_memslots_get_as_id(old, new); 1374 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); 1375 int idx = slots->node_idx; 1376 1377 if (old) { 1378 hash_del(&old->id_node[idx]); 1379 interval_tree_remove(&old->hva_node[idx], &slots->hva_tree); 1380 1381 if ((long)old == atomic_long_read(&slots->last_used_slot)) 1382 atomic_long_set(&slots->last_used_slot, (long)new); 1383 1384 if (!new) { 1385 kvm_erase_gfn_node(slots, old); 1386 return; 1387 } 1388 } 1389 1390 /* 1391 * Initialize @new's hva range. Do this even when replacing an @old 1392 * slot, kvm_copy_memslot() deliberately does not touch node data. 1393 */ 1394 new->hva_node[idx].start = new->userspace_addr; 1395 new->hva_node[idx].last = new->userspace_addr + 1396 (new->npages << PAGE_SHIFT) - 1; 1397 1398 /* 1399 * (Re)Add the new memslot. There is no O(1) interval_tree_replace(), 1400 * hva_node needs to be swapped with remove+insert even though hva can't 1401 * change when replacing an existing slot. 1402 */ 1403 hash_add(slots->id_hash, &new->id_node[idx], new->id); 1404 interval_tree_insert(&new->hva_node[idx], &slots->hva_tree); 1405 1406 /* 1407 * If the memslot gfn is unchanged, rb_replace_node() can be used to 1408 * switch the node in the gfn tree instead of removing the old and 1409 * inserting the new as two separate operations. Replacement is a 1410 * single O(1) operation versus two O(log(n)) operations for 1411 * remove+insert. 1412 */ 1413 if (old && old->base_gfn == new->base_gfn) { 1414 kvm_replace_gfn_node(slots, old, new); 1415 } else { 1416 if (old) 1417 kvm_erase_gfn_node(slots, old); 1418 kvm_insert_gfn_node(slots, new); 1419 } 1420 } 1421 1422 static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem) 1423 { 1424 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; 1425 1426 #ifdef __KVM_HAVE_READONLY_MEM 1427 valid_flags |= KVM_MEM_READONLY; 1428 #endif 1429 1430 if (mem->flags & ~valid_flags) 1431 return -EINVAL; 1432 1433 return 0; 1434 } 1435 1436 static void kvm_swap_active_memslots(struct kvm *kvm, int as_id) 1437 { 1438 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); 1439 1440 /* Grab the generation from the activate memslots. */ 1441 u64 gen = __kvm_memslots(kvm, as_id)->generation; 1442 1443 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); 1444 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; 1445 1446 /* 1447 * Do not store the new memslots while there are invalidations in 1448 * progress, otherwise the locking in invalidate_range_start and 1449 * invalidate_range_end will be unbalanced. 1450 */ 1451 spin_lock(&kvm->mn_invalidate_lock); 1452 prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait); 1453 while (kvm->mn_active_invalidate_count) { 1454 set_current_state(TASK_UNINTERRUPTIBLE); 1455 spin_unlock(&kvm->mn_invalidate_lock); 1456 schedule(); 1457 spin_lock(&kvm->mn_invalidate_lock); 1458 } 1459 finish_rcuwait(&kvm->mn_memslots_update_rcuwait); 1460 rcu_assign_pointer(kvm->memslots[as_id], slots); 1461 spin_unlock(&kvm->mn_invalidate_lock); 1462 1463 /* 1464 * Acquired in kvm_set_memslot. Must be released before synchronize 1465 * SRCU below in order to avoid deadlock with another thread 1466 * acquiring the slots_arch_lock in an srcu critical section. 1467 */ 1468 mutex_unlock(&kvm->slots_arch_lock); 1469 1470 synchronize_srcu_expedited(&kvm->srcu); 1471 1472 /* 1473 * Increment the new memslot generation a second time, dropping the 1474 * update in-progress flag and incrementing the generation based on 1475 * the number of address spaces. This provides a unique and easily 1476 * identifiable generation number while the memslots are in flux. 1477 */ 1478 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; 1479 1480 /* 1481 * Generations must be unique even across address spaces. We do not need 1482 * a global counter for that, instead the generation space is evenly split 1483 * across address spaces. For example, with two address spaces, address 1484 * space 0 will use generations 0, 2, 4, ... while address space 1 will 1485 * use generations 1, 3, 5, ... 1486 */ 1487 gen += KVM_ADDRESS_SPACE_NUM; 1488 1489 kvm_arch_memslots_updated(kvm, gen); 1490 1491 slots->generation = gen; 1492 } 1493 1494 static int kvm_prepare_memory_region(struct kvm *kvm, 1495 const struct kvm_memory_slot *old, 1496 struct kvm_memory_slot *new, 1497 enum kvm_mr_change change) 1498 { 1499 int r; 1500 1501 /* 1502 * If dirty logging is disabled, nullify the bitmap; the old bitmap 1503 * will be freed on "commit". If logging is enabled in both old and 1504 * new, reuse the existing bitmap. If logging is enabled only in the 1505 * new and KVM isn't using a ring buffer, allocate and initialize a 1506 * new bitmap. 1507 */ 1508 if (change != KVM_MR_DELETE) { 1509 if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) 1510 new->dirty_bitmap = NULL; 1511 else if (old && old->dirty_bitmap) 1512 new->dirty_bitmap = old->dirty_bitmap; 1513 else if (!kvm->dirty_ring_size) { 1514 r = kvm_alloc_dirty_bitmap(new); 1515 if (r) 1516 return r; 1517 1518 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) 1519 bitmap_set(new->dirty_bitmap, 0, new->npages); 1520 } 1521 } 1522 1523 r = kvm_arch_prepare_memory_region(kvm, old, new, change); 1524 1525 /* Free the bitmap on failure if it was allocated above. */ 1526 if (r && new && new->dirty_bitmap && old && !old->dirty_bitmap) 1527 kvm_destroy_dirty_bitmap(new); 1528 1529 return r; 1530 } 1531 1532 static void kvm_commit_memory_region(struct kvm *kvm, 1533 struct kvm_memory_slot *old, 1534 const struct kvm_memory_slot *new, 1535 enum kvm_mr_change change) 1536 { 1537 /* 1538 * Update the total number of memslot pages before calling the arch 1539 * hook so that architectures can consume the result directly. 1540 */ 1541 if (change == KVM_MR_DELETE) 1542 kvm->nr_memslot_pages -= old->npages; 1543 else if (change == KVM_MR_CREATE) 1544 kvm->nr_memslot_pages += new->npages; 1545 1546 kvm_arch_commit_memory_region(kvm, old, new, change); 1547 1548 switch (change) { 1549 case KVM_MR_CREATE: 1550 /* Nothing more to do. */ 1551 break; 1552 case KVM_MR_DELETE: 1553 /* Free the old memslot and all its metadata. */ 1554 kvm_free_memslot(kvm, old); 1555 break; 1556 case KVM_MR_MOVE: 1557 case KVM_MR_FLAGS_ONLY: 1558 /* 1559 * Free the dirty bitmap as needed; the below check encompasses 1560 * both the flags and whether a ring buffer is being used) 1561 */ 1562 if (old->dirty_bitmap && !new->dirty_bitmap) 1563 kvm_destroy_dirty_bitmap(old); 1564 1565 /* 1566 * The final quirk. Free the detached, old slot, but only its 1567 * memory, not any metadata. Metadata, including arch specific 1568 * data, may be reused by @new. 1569 */ 1570 kfree(old); 1571 break; 1572 default: 1573 BUG(); 1574 } 1575 } 1576 1577 /* 1578 * Activate @new, which must be installed in the inactive slots by the caller, 1579 * by swapping the active slots and then propagating @new to @old once @old is 1580 * unreachable and can be safely modified. 1581 * 1582 * With NULL @old this simply adds @new to @active (while swapping the sets). 1583 * With NULL @new this simply removes @old from @active and frees it 1584 * (while also swapping the sets). 1585 */ 1586 static void kvm_activate_memslot(struct kvm *kvm, 1587 struct kvm_memory_slot *old, 1588 struct kvm_memory_slot *new) 1589 { 1590 int as_id = kvm_memslots_get_as_id(old, new); 1591 1592 kvm_swap_active_memslots(kvm, as_id); 1593 1594 /* Propagate the new memslot to the now inactive memslots. */ 1595 kvm_replace_memslot(kvm, old, new); 1596 } 1597 1598 static void kvm_copy_memslot(struct kvm_memory_slot *dest, 1599 const struct kvm_memory_slot *src) 1600 { 1601 dest->base_gfn = src->base_gfn; 1602 dest->npages = src->npages; 1603 dest->dirty_bitmap = src->dirty_bitmap; 1604 dest->arch = src->arch; 1605 dest->userspace_addr = src->userspace_addr; 1606 dest->flags = src->flags; 1607 dest->id = src->id; 1608 dest->as_id = src->as_id; 1609 } 1610 1611 static void kvm_invalidate_memslot(struct kvm *kvm, 1612 struct kvm_memory_slot *old, 1613 struct kvm_memory_slot *invalid_slot) 1614 { 1615 /* 1616 * Mark the current slot INVALID. As with all memslot modifications, 1617 * this must be done on an unreachable slot to avoid modifying the 1618 * current slot in the active tree. 1619 */ 1620 kvm_copy_memslot(invalid_slot, old); 1621 invalid_slot->flags |= KVM_MEMSLOT_INVALID; 1622 kvm_replace_memslot(kvm, old, invalid_slot); 1623 1624 /* 1625 * Activate the slot that is now marked INVALID, but don't propagate 1626 * the slot to the now inactive slots. The slot is either going to be 1627 * deleted or recreated as a new slot. 1628 */ 1629 kvm_swap_active_memslots(kvm, old->as_id); 1630 1631 /* 1632 * From this point no new shadow pages pointing to a deleted, or moved, 1633 * memslot will be created. Validation of sp->gfn happens in: 1634 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 1635 * - kvm_is_visible_gfn (mmu_check_root) 1636 */ 1637 kvm_arch_flush_shadow_memslot(kvm, old); 1638 1639 /* Was released by kvm_swap_active_memslots, reacquire. */ 1640 mutex_lock(&kvm->slots_arch_lock); 1641 1642 /* 1643 * Copy the arch-specific field of the newly-installed slot back to the 1644 * old slot as the arch data could have changed between releasing 1645 * slots_arch_lock in install_new_memslots() and re-acquiring the lock 1646 * above. Writers are required to retrieve memslots *after* acquiring 1647 * slots_arch_lock, thus the active slot's data is guaranteed to be fresh. 1648 */ 1649 old->arch = invalid_slot->arch; 1650 } 1651 1652 static void kvm_create_memslot(struct kvm *kvm, 1653 struct kvm_memory_slot *new) 1654 { 1655 /* Add the new memslot to the inactive set and activate. */ 1656 kvm_replace_memslot(kvm, NULL, new); 1657 kvm_activate_memslot(kvm, NULL, new); 1658 } 1659 1660 static void kvm_delete_memslot(struct kvm *kvm, 1661 struct kvm_memory_slot *old, 1662 struct kvm_memory_slot *invalid_slot) 1663 { 1664 /* 1665 * Remove the old memslot (in the inactive memslots) by passing NULL as 1666 * the "new" slot, and for the invalid version in the active slots. 1667 */ 1668 kvm_replace_memslot(kvm, old, NULL); 1669 kvm_activate_memslot(kvm, invalid_slot, NULL); 1670 } 1671 1672 static void kvm_move_memslot(struct kvm *kvm, 1673 struct kvm_memory_slot *old, 1674 struct kvm_memory_slot *new, 1675 struct kvm_memory_slot *invalid_slot) 1676 { 1677 /* 1678 * Replace the old memslot in the inactive slots, and then swap slots 1679 * and replace the current INVALID with the new as well. 1680 */ 1681 kvm_replace_memslot(kvm, old, new); 1682 kvm_activate_memslot(kvm, invalid_slot, new); 1683 } 1684 1685 static void kvm_update_flags_memslot(struct kvm *kvm, 1686 struct kvm_memory_slot *old, 1687 struct kvm_memory_slot *new) 1688 { 1689 /* 1690 * Similar to the MOVE case, but the slot doesn't need to be zapped as 1691 * an intermediate step. Instead, the old memslot is simply replaced 1692 * with a new, updated copy in both memslot sets. 1693 */ 1694 kvm_replace_memslot(kvm, old, new); 1695 kvm_activate_memslot(kvm, old, new); 1696 } 1697 1698 static int kvm_set_memslot(struct kvm *kvm, 1699 struct kvm_memory_slot *old, 1700 struct kvm_memory_slot *new, 1701 enum kvm_mr_change change) 1702 { 1703 struct kvm_memory_slot *invalid_slot; 1704 int r; 1705 1706 /* 1707 * Released in kvm_swap_active_memslots. 1708 * 1709 * Must be held from before the current memslots are copied until 1710 * after the new memslots are installed with rcu_assign_pointer, 1711 * then released before the synchronize srcu in kvm_swap_active_memslots. 1712 * 1713 * When modifying memslots outside of the slots_lock, must be held 1714 * before reading the pointer to the current memslots until after all 1715 * changes to those memslots are complete. 1716 * 1717 * These rules ensure that installing new memslots does not lose 1718 * changes made to the previous memslots. 1719 */ 1720 mutex_lock(&kvm->slots_arch_lock); 1721 1722 /* 1723 * Invalidate the old slot if it's being deleted or moved. This is 1724 * done prior to actually deleting/moving the memslot to allow vCPUs to 1725 * continue running by ensuring there are no mappings or shadow pages 1726 * for the memslot when it is deleted/moved. Without pre-invalidation 1727 * (and without a lock), a window would exist between effecting the 1728 * delete/move and committing the changes in arch code where KVM or a 1729 * guest could access a non-existent memslot. 1730 * 1731 * Modifications are done on a temporary, unreachable slot. The old 1732 * slot needs to be preserved in case a later step fails and the 1733 * invalidation needs to be reverted. 1734 */ 1735 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { 1736 invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT); 1737 if (!invalid_slot) { 1738 mutex_unlock(&kvm->slots_arch_lock); 1739 return -ENOMEM; 1740 } 1741 kvm_invalidate_memslot(kvm, old, invalid_slot); 1742 } 1743 1744 r = kvm_prepare_memory_region(kvm, old, new, change); 1745 if (r) { 1746 /* 1747 * For DELETE/MOVE, revert the above INVALID change. No 1748 * modifications required since the original slot was preserved 1749 * in the inactive slots. Changing the active memslots also 1750 * release slots_arch_lock. 1751 */ 1752 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { 1753 kvm_activate_memslot(kvm, invalid_slot, old); 1754 kfree(invalid_slot); 1755 } else { 1756 mutex_unlock(&kvm->slots_arch_lock); 1757 } 1758 return r; 1759 } 1760 1761 /* 1762 * For DELETE and MOVE, the working slot is now active as the INVALID 1763 * version of the old slot. MOVE is particularly special as it reuses 1764 * the old slot and returns a copy of the old slot (in working_slot). 1765 * For CREATE, there is no old slot. For DELETE and FLAGS_ONLY, the 1766 * old slot is detached but otherwise preserved. 1767 */ 1768 if (change == KVM_MR_CREATE) 1769 kvm_create_memslot(kvm, new); 1770 else if (change == KVM_MR_DELETE) 1771 kvm_delete_memslot(kvm, old, invalid_slot); 1772 else if (change == KVM_MR_MOVE) 1773 kvm_move_memslot(kvm, old, new, invalid_slot); 1774 else if (change == KVM_MR_FLAGS_ONLY) 1775 kvm_update_flags_memslot(kvm, old, new); 1776 else 1777 BUG(); 1778 1779 /* Free the temporary INVALID slot used for DELETE and MOVE. */ 1780 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) 1781 kfree(invalid_slot); 1782 1783 /* 1784 * No need to refresh new->arch, changes after dropping slots_arch_lock 1785 * will directly hit the final, active memsot. Architectures are 1786 * responsible for knowing that new->arch may be stale. 1787 */ 1788 kvm_commit_memory_region(kvm, old, new, change); 1789 1790 return 0; 1791 } 1792 1793 static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id, 1794 gfn_t start, gfn_t end) 1795 { 1796 struct kvm_memslot_iter iter; 1797 1798 kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) { 1799 if (iter.slot->id != id) 1800 return true; 1801 } 1802 1803 return false; 1804 } 1805 1806 /* 1807 * Allocate some memory and give it an address in the guest physical address 1808 * space. 1809 * 1810 * Discontiguous memory is allowed, mostly for framebuffers. 1811 * 1812 * Must be called holding kvm->slots_lock for write. 1813 */ 1814 int __kvm_set_memory_region(struct kvm *kvm, 1815 const struct kvm_userspace_memory_region *mem) 1816 { 1817 struct kvm_memory_slot *old, *new; 1818 struct kvm_memslots *slots; 1819 enum kvm_mr_change change; 1820 unsigned long npages; 1821 gfn_t base_gfn; 1822 int as_id, id; 1823 int r; 1824 1825 r = check_memory_region_flags(mem); 1826 if (r) 1827 return r; 1828 1829 as_id = mem->slot >> 16; 1830 id = (u16)mem->slot; 1831 1832 /* General sanity checks */ 1833 if ((mem->memory_size & (PAGE_SIZE - 1)) || 1834 (mem->memory_size != (unsigned long)mem->memory_size)) 1835 return -EINVAL; 1836 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 1837 return -EINVAL; 1838 /* We can read the guest memory with __xxx_user() later on. */ 1839 if ((mem->userspace_addr & (PAGE_SIZE - 1)) || 1840 (mem->userspace_addr != untagged_addr(mem->userspace_addr)) || 1841 !access_ok((void __user *)(unsigned long)mem->userspace_addr, 1842 mem->memory_size)) 1843 return -EINVAL; 1844 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) 1845 return -EINVAL; 1846 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 1847 return -EINVAL; 1848 if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES) 1849 return -EINVAL; 1850 1851 slots = __kvm_memslots(kvm, as_id); 1852 1853 /* 1854 * Note, the old memslot (and the pointer itself!) may be invalidated 1855 * and/or destroyed by kvm_set_memslot(). 1856 */ 1857 old = id_to_memslot(slots, id); 1858 1859 if (!mem->memory_size) { 1860 if (!old || !old->npages) 1861 return -EINVAL; 1862 1863 if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages)) 1864 return -EIO; 1865 1866 return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE); 1867 } 1868 1869 base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT); 1870 npages = (mem->memory_size >> PAGE_SHIFT); 1871 1872 if (!old || !old->npages) { 1873 change = KVM_MR_CREATE; 1874 1875 /* 1876 * To simplify KVM internals, the total number of pages across 1877 * all memslots must fit in an unsigned long. 1878 */ 1879 if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages) 1880 return -EINVAL; 1881 } else { /* Modify an existing slot. */ 1882 if ((mem->userspace_addr != old->userspace_addr) || 1883 (npages != old->npages) || 1884 ((mem->flags ^ old->flags) & KVM_MEM_READONLY)) 1885 return -EINVAL; 1886 1887 if (base_gfn != old->base_gfn) 1888 change = KVM_MR_MOVE; 1889 else if (mem->flags != old->flags) 1890 change = KVM_MR_FLAGS_ONLY; 1891 else /* Nothing to change. */ 1892 return 0; 1893 } 1894 1895 if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) && 1896 kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages)) 1897 return -EEXIST; 1898 1899 /* Allocate a slot that will persist in the memslot. */ 1900 new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT); 1901 if (!new) 1902 return -ENOMEM; 1903 1904 new->as_id = as_id; 1905 new->id = id; 1906 new->base_gfn = base_gfn; 1907 new->npages = npages; 1908 new->flags = mem->flags; 1909 new->userspace_addr = mem->userspace_addr; 1910 1911 r = kvm_set_memslot(kvm, old, new, change); 1912 if (r) 1913 kfree(new); 1914 return r; 1915 } 1916 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 1917 1918 int kvm_set_memory_region(struct kvm *kvm, 1919 const struct kvm_userspace_memory_region *mem) 1920 { 1921 int r; 1922 1923 mutex_lock(&kvm->slots_lock); 1924 r = __kvm_set_memory_region(kvm, mem); 1925 mutex_unlock(&kvm->slots_lock); 1926 return r; 1927 } 1928 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 1929 1930 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 1931 struct kvm_userspace_memory_region *mem) 1932 { 1933 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) 1934 return -EINVAL; 1935 1936 return kvm_set_memory_region(kvm, mem); 1937 } 1938 1939 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 1940 /** 1941 * kvm_get_dirty_log - get a snapshot of dirty pages 1942 * @kvm: pointer to kvm instance 1943 * @log: slot id and address to which we copy the log 1944 * @is_dirty: set to '1' if any dirty pages were found 1945 * @memslot: set to the associated memslot, always valid on success 1946 */ 1947 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, 1948 int *is_dirty, struct kvm_memory_slot **memslot) 1949 { 1950 struct kvm_memslots *slots; 1951 int i, as_id, id; 1952 unsigned long n; 1953 unsigned long any = 0; 1954 1955 /* Dirty ring tracking is exclusive to dirty log tracking */ 1956 if (kvm->dirty_ring_size) 1957 return -ENXIO; 1958 1959 *memslot = NULL; 1960 *is_dirty = 0; 1961 1962 as_id = log->slot >> 16; 1963 id = (u16)log->slot; 1964 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1965 return -EINVAL; 1966 1967 slots = __kvm_memslots(kvm, as_id); 1968 *memslot = id_to_memslot(slots, id); 1969 if (!(*memslot) || !(*memslot)->dirty_bitmap) 1970 return -ENOENT; 1971 1972 kvm_arch_sync_dirty_log(kvm, *memslot); 1973 1974 n = kvm_dirty_bitmap_bytes(*memslot); 1975 1976 for (i = 0; !any && i < n/sizeof(long); ++i) 1977 any = (*memslot)->dirty_bitmap[i]; 1978 1979 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n)) 1980 return -EFAULT; 1981 1982 if (any) 1983 *is_dirty = 1; 1984 return 0; 1985 } 1986 EXPORT_SYMBOL_GPL(kvm_get_dirty_log); 1987 1988 #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 1989 /** 1990 * kvm_get_dirty_log_protect - get a snapshot of dirty pages 1991 * and reenable dirty page tracking for the corresponding pages. 1992 * @kvm: pointer to kvm instance 1993 * @log: slot id and address to which we copy the log 1994 * 1995 * We need to keep it in mind that VCPU threads can write to the bitmap 1996 * concurrently. So, to avoid losing track of dirty pages we keep the 1997 * following order: 1998 * 1999 * 1. Take a snapshot of the bit and clear it if needed. 2000 * 2. Write protect the corresponding page. 2001 * 3. Copy the snapshot to the userspace. 2002 * 4. Upon return caller flushes TLB's if needed. 2003 * 2004 * Between 2 and 4, the guest may write to the page using the remaining TLB 2005 * entry. This is not a problem because the page is reported dirty using 2006 * the snapshot taken before and step 4 ensures that writes done after 2007 * exiting to userspace will be logged for the next call. 2008 * 2009 */ 2010 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) 2011 { 2012 struct kvm_memslots *slots; 2013 struct kvm_memory_slot *memslot; 2014 int i, as_id, id; 2015 unsigned long n; 2016 unsigned long *dirty_bitmap; 2017 unsigned long *dirty_bitmap_buffer; 2018 bool flush; 2019 2020 /* Dirty ring tracking is exclusive to dirty log tracking */ 2021 if (kvm->dirty_ring_size) 2022 return -ENXIO; 2023 2024 as_id = log->slot >> 16; 2025 id = (u16)log->slot; 2026 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 2027 return -EINVAL; 2028 2029 slots = __kvm_memslots(kvm, as_id); 2030 memslot = id_to_memslot(slots, id); 2031 if (!memslot || !memslot->dirty_bitmap) 2032 return -ENOENT; 2033 2034 dirty_bitmap = memslot->dirty_bitmap; 2035 2036 kvm_arch_sync_dirty_log(kvm, memslot); 2037 2038 n = kvm_dirty_bitmap_bytes(memslot); 2039 flush = false; 2040 if (kvm->manual_dirty_log_protect) { 2041 /* 2042 * Unlike kvm_get_dirty_log, we always return false in *flush, 2043 * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There 2044 * is some code duplication between this function and 2045 * kvm_get_dirty_log, but hopefully all architecture 2046 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log 2047 * can be eliminated. 2048 */ 2049 dirty_bitmap_buffer = dirty_bitmap; 2050 } else { 2051 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 2052 memset(dirty_bitmap_buffer, 0, n); 2053 2054 KVM_MMU_LOCK(kvm); 2055 for (i = 0; i < n / sizeof(long); i++) { 2056 unsigned long mask; 2057 gfn_t offset; 2058 2059 if (!dirty_bitmap[i]) 2060 continue; 2061 2062 flush = true; 2063 mask = xchg(&dirty_bitmap[i], 0); 2064 dirty_bitmap_buffer[i] = mask; 2065 2066 offset = i * BITS_PER_LONG; 2067 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 2068 offset, mask); 2069 } 2070 KVM_MMU_UNLOCK(kvm); 2071 } 2072 2073 if (flush) 2074 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); 2075 2076 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) 2077 return -EFAULT; 2078 return 0; 2079 } 2080 2081 2082 /** 2083 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot 2084 * @kvm: kvm instance 2085 * @log: slot id and address to which we copy the log 2086 * 2087 * Steps 1-4 below provide general overview of dirty page logging. See 2088 * kvm_get_dirty_log_protect() function description for additional details. 2089 * 2090 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we 2091 * always flush the TLB (step 4) even if previous step failed and the dirty 2092 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API 2093 * does not preclude user space subsequent dirty log read. Flushing TLB ensures 2094 * writes will be marked dirty for next log read. 2095 * 2096 * 1. Take a snapshot of the bit and clear it if needed. 2097 * 2. Write protect the corresponding page. 2098 * 3. Copy the snapshot to the userspace. 2099 * 4. Flush TLB's if needed. 2100 */ 2101 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 2102 struct kvm_dirty_log *log) 2103 { 2104 int r; 2105 2106 mutex_lock(&kvm->slots_lock); 2107 2108 r = kvm_get_dirty_log_protect(kvm, log); 2109 2110 mutex_unlock(&kvm->slots_lock); 2111 return r; 2112 } 2113 2114 /** 2115 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap 2116 * and reenable dirty page tracking for the corresponding pages. 2117 * @kvm: pointer to kvm instance 2118 * @log: slot id and address from which to fetch the bitmap of dirty pages 2119 */ 2120 static int kvm_clear_dirty_log_protect(struct kvm *kvm, 2121 struct kvm_clear_dirty_log *log) 2122 { 2123 struct kvm_memslots *slots; 2124 struct kvm_memory_slot *memslot; 2125 int as_id, id; 2126 gfn_t offset; 2127 unsigned long i, n; 2128 unsigned long *dirty_bitmap; 2129 unsigned long *dirty_bitmap_buffer; 2130 bool flush; 2131 2132 /* Dirty ring tracking is exclusive to dirty log tracking */ 2133 if (kvm->dirty_ring_size) 2134 return -ENXIO; 2135 2136 as_id = log->slot >> 16; 2137 id = (u16)log->slot; 2138 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 2139 return -EINVAL; 2140 2141 if (log->first_page & 63) 2142 return -EINVAL; 2143 2144 slots = __kvm_memslots(kvm, as_id); 2145 memslot = id_to_memslot(slots, id); 2146 if (!memslot || !memslot->dirty_bitmap) 2147 return -ENOENT; 2148 2149 dirty_bitmap = memslot->dirty_bitmap; 2150 2151 n = ALIGN(log->num_pages, BITS_PER_LONG) / 8; 2152 2153 if (log->first_page > memslot->npages || 2154 log->num_pages > memslot->npages - log->first_page || 2155 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63))) 2156 return -EINVAL; 2157 2158 kvm_arch_sync_dirty_log(kvm, memslot); 2159 2160 flush = false; 2161 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 2162 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) 2163 return -EFAULT; 2164 2165 KVM_MMU_LOCK(kvm); 2166 for (offset = log->first_page, i = offset / BITS_PER_LONG, 2167 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--; 2168 i++, offset += BITS_PER_LONG) { 2169 unsigned long mask = *dirty_bitmap_buffer++; 2170 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i]; 2171 if (!mask) 2172 continue; 2173 2174 mask &= atomic_long_fetch_andnot(mask, p); 2175 2176 /* 2177 * mask contains the bits that really have been cleared. This 2178 * never includes any bits beyond the length of the memslot (if 2179 * the length is not aligned to 64 pages), therefore it is not 2180 * a problem if userspace sets them in log->dirty_bitmap. 2181 */ 2182 if (mask) { 2183 flush = true; 2184 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 2185 offset, mask); 2186 } 2187 } 2188 KVM_MMU_UNLOCK(kvm); 2189 2190 if (flush) 2191 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); 2192 2193 return 0; 2194 } 2195 2196 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, 2197 struct kvm_clear_dirty_log *log) 2198 { 2199 int r; 2200 2201 mutex_lock(&kvm->slots_lock); 2202 2203 r = kvm_clear_dirty_log_protect(kvm, log); 2204 2205 mutex_unlock(&kvm->slots_lock); 2206 return r; 2207 } 2208 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 2209 2210 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 2211 { 2212 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 2213 } 2214 EXPORT_SYMBOL_GPL(gfn_to_memslot); 2215 2216 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) 2217 { 2218 struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); 2219 u64 gen = slots->generation; 2220 struct kvm_memory_slot *slot; 2221 2222 /* 2223 * This also protects against using a memslot from a different address space, 2224 * since different address spaces have different generation numbers. 2225 */ 2226 if (unlikely(gen != vcpu->last_used_slot_gen)) { 2227 vcpu->last_used_slot = NULL; 2228 vcpu->last_used_slot_gen = gen; 2229 } 2230 2231 slot = try_get_memslot(vcpu->last_used_slot, gfn); 2232 if (slot) 2233 return slot; 2234 2235 /* 2236 * Fall back to searching all memslots. We purposely use 2237 * search_memslots() instead of __gfn_to_memslot() to avoid 2238 * thrashing the VM-wide last_used_slot in kvm_memslots. 2239 */ 2240 slot = search_memslots(slots, gfn, false); 2241 if (slot) { 2242 vcpu->last_used_slot = slot; 2243 return slot; 2244 } 2245 2246 return NULL; 2247 } 2248 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_memslot); 2249 2250 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 2251 { 2252 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); 2253 2254 return kvm_is_visible_memslot(memslot); 2255 } 2256 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 2257 2258 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 2259 { 2260 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2261 2262 return kvm_is_visible_memslot(memslot); 2263 } 2264 EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn); 2265 2266 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) 2267 { 2268 struct vm_area_struct *vma; 2269 unsigned long addr, size; 2270 2271 size = PAGE_SIZE; 2272 2273 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL); 2274 if (kvm_is_error_hva(addr)) 2275 return PAGE_SIZE; 2276 2277 mmap_read_lock(current->mm); 2278 vma = find_vma(current->mm, addr); 2279 if (!vma) 2280 goto out; 2281 2282 size = vma_kernel_pagesize(vma); 2283 2284 out: 2285 mmap_read_unlock(current->mm); 2286 2287 return size; 2288 } 2289 2290 static bool memslot_is_readonly(const struct kvm_memory_slot *slot) 2291 { 2292 return slot->flags & KVM_MEM_READONLY; 2293 } 2294 2295 static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn, 2296 gfn_t *nr_pages, bool write) 2297 { 2298 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 2299 return KVM_HVA_ERR_BAD; 2300 2301 if (memslot_is_readonly(slot) && write) 2302 return KVM_HVA_ERR_RO_BAD; 2303 2304 if (nr_pages) 2305 *nr_pages = slot->npages - (gfn - slot->base_gfn); 2306 2307 return __gfn_to_hva_memslot(slot, gfn); 2308 } 2309 2310 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 2311 gfn_t *nr_pages) 2312 { 2313 return __gfn_to_hva_many(slot, gfn, nr_pages, true); 2314 } 2315 2316 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 2317 gfn_t gfn) 2318 { 2319 return gfn_to_hva_many(slot, gfn, NULL); 2320 } 2321 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); 2322 2323 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 2324 { 2325 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); 2326 } 2327 EXPORT_SYMBOL_GPL(gfn_to_hva); 2328 2329 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) 2330 { 2331 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); 2332 } 2333 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); 2334 2335 /* 2336 * Return the hva of a @gfn and the R/W attribute if possible. 2337 * 2338 * @slot: the kvm_memory_slot which contains @gfn 2339 * @gfn: the gfn to be translated 2340 * @writable: used to return the read/write attribute of the @slot if the hva 2341 * is valid and @writable is not NULL 2342 */ 2343 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, 2344 gfn_t gfn, bool *writable) 2345 { 2346 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); 2347 2348 if (!kvm_is_error_hva(hva) && writable) 2349 *writable = !memslot_is_readonly(slot); 2350 2351 return hva; 2352 } 2353 2354 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 2355 { 2356 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 2357 2358 return gfn_to_hva_memslot_prot(slot, gfn, writable); 2359 } 2360 2361 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) 2362 { 2363 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2364 2365 return gfn_to_hva_memslot_prot(slot, gfn, writable); 2366 } 2367 2368 static inline int check_user_page_hwpoison(unsigned long addr) 2369 { 2370 int rc, flags = FOLL_HWPOISON | FOLL_WRITE; 2371 2372 rc = get_user_pages(addr, 1, flags, NULL, NULL); 2373 return rc == -EHWPOISON; 2374 } 2375 2376 /* 2377 * The fast path to get the writable pfn which will be stored in @pfn, 2378 * true indicates success, otherwise false is returned. It's also the 2379 * only part that runs if we can in atomic context. 2380 */ 2381 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, 2382 bool *writable, kvm_pfn_t *pfn) 2383 { 2384 struct page *page[1]; 2385 2386 /* 2387 * Fast pin a writable pfn only if it is a write fault request 2388 * or the caller allows to map a writable pfn for a read fault 2389 * request. 2390 */ 2391 if (!(write_fault || writable)) 2392 return false; 2393 2394 if (get_user_page_fast_only(addr, FOLL_WRITE, page)) { 2395 *pfn = page_to_pfn(page[0]); 2396 2397 if (writable) 2398 *writable = true; 2399 return true; 2400 } 2401 2402 return false; 2403 } 2404 2405 /* 2406 * The slow path to get the pfn of the specified host virtual address, 2407 * 1 indicates success, -errno is returned if error is detected. 2408 */ 2409 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, 2410 bool *writable, kvm_pfn_t *pfn) 2411 { 2412 unsigned int flags = FOLL_HWPOISON; 2413 struct page *page; 2414 int npages = 0; 2415 2416 might_sleep(); 2417 2418 if (writable) 2419 *writable = write_fault; 2420 2421 if (write_fault) 2422 flags |= FOLL_WRITE; 2423 if (async) 2424 flags |= FOLL_NOWAIT; 2425 2426 npages = get_user_pages_unlocked(addr, 1, &page, flags); 2427 if (npages != 1) 2428 return npages; 2429 2430 /* map read fault as writable if possible */ 2431 if (unlikely(!write_fault) && writable) { 2432 struct page *wpage; 2433 2434 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) { 2435 *writable = true; 2436 put_page(page); 2437 page = wpage; 2438 } 2439 } 2440 *pfn = page_to_pfn(page); 2441 return npages; 2442 } 2443 2444 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) 2445 { 2446 if (unlikely(!(vma->vm_flags & VM_READ))) 2447 return false; 2448 2449 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) 2450 return false; 2451 2452 return true; 2453 } 2454 2455 static int kvm_try_get_pfn(kvm_pfn_t pfn) 2456 { 2457 if (kvm_is_reserved_pfn(pfn)) 2458 return 1; 2459 return get_page_unless_zero(pfn_to_page(pfn)); 2460 } 2461 2462 static int hva_to_pfn_remapped(struct vm_area_struct *vma, 2463 unsigned long addr, bool *async, 2464 bool write_fault, bool *writable, 2465 kvm_pfn_t *p_pfn) 2466 { 2467 kvm_pfn_t pfn; 2468 pte_t *ptep; 2469 spinlock_t *ptl; 2470 int r; 2471 2472 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); 2473 if (r) { 2474 /* 2475 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does 2476 * not call the fault handler, so do it here. 2477 */ 2478 bool unlocked = false; 2479 r = fixup_user_fault(current->mm, addr, 2480 (write_fault ? FAULT_FLAG_WRITE : 0), 2481 &unlocked); 2482 if (unlocked) 2483 return -EAGAIN; 2484 if (r) 2485 return r; 2486 2487 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); 2488 if (r) 2489 return r; 2490 } 2491 2492 if (write_fault && !pte_write(*ptep)) { 2493 pfn = KVM_PFN_ERR_RO_FAULT; 2494 goto out; 2495 } 2496 2497 if (writable) 2498 *writable = pte_write(*ptep); 2499 pfn = pte_pfn(*ptep); 2500 2501 /* 2502 * Get a reference here because callers of *hva_to_pfn* and 2503 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the 2504 * returned pfn. This is only needed if the VMA has VM_MIXEDMAP 2505 * set, but the kvm_try_get_pfn/kvm_release_pfn_clean pair will 2506 * simply do nothing for reserved pfns. 2507 * 2508 * Whoever called remap_pfn_range is also going to call e.g. 2509 * unmap_mapping_range before the underlying pages are freed, 2510 * causing a call to our MMU notifier. 2511 * 2512 * Certain IO or PFNMAP mappings can be backed with valid 2513 * struct pages, but be allocated without refcounting e.g., 2514 * tail pages of non-compound higher order allocations, which 2515 * would then underflow the refcount when the caller does the 2516 * required put_page. Don't allow those pages here. 2517 */ 2518 if (!kvm_try_get_pfn(pfn)) 2519 r = -EFAULT; 2520 2521 out: 2522 pte_unmap_unlock(ptep, ptl); 2523 *p_pfn = pfn; 2524 2525 return r; 2526 } 2527 2528 /* 2529 * Pin guest page in memory and return its pfn. 2530 * @addr: host virtual address which maps memory to the guest 2531 * @atomic: whether this function can sleep 2532 * @async: whether this function need to wait IO complete if the 2533 * host page is not in the memory 2534 * @write_fault: whether we should get a writable host page 2535 * @writable: whether it allows to map a writable host page for !@write_fault 2536 * 2537 * The function will map a writable host page for these two cases: 2538 * 1): @write_fault = true 2539 * 2): @write_fault = false && @writable, @writable will tell the caller 2540 * whether the mapping is writable. 2541 */ 2542 static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, 2543 bool write_fault, bool *writable) 2544 { 2545 struct vm_area_struct *vma; 2546 kvm_pfn_t pfn = 0; 2547 int npages, r; 2548 2549 /* we can do it either atomically or asynchronously, not both */ 2550 BUG_ON(atomic && async); 2551 2552 if (hva_to_pfn_fast(addr, write_fault, writable, &pfn)) 2553 return pfn; 2554 2555 if (atomic) 2556 return KVM_PFN_ERR_FAULT; 2557 2558 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); 2559 if (npages == 1) 2560 return pfn; 2561 2562 mmap_read_lock(current->mm); 2563 if (npages == -EHWPOISON || 2564 (!async && check_user_page_hwpoison(addr))) { 2565 pfn = KVM_PFN_ERR_HWPOISON; 2566 goto exit; 2567 } 2568 2569 retry: 2570 vma = vma_lookup(current->mm, addr); 2571 2572 if (vma == NULL) 2573 pfn = KVM_PFN_ERR_FAULT; 2574 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { 2575 r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn); 2576 if (r == -EAGAIN) 2577 goto retry; 2578 if (r < 0) 2579 pfn = KVM_PFN_ERR_FAULT; 2580 } else { 2581 if (async && vma_is_valid(vma, write_fault)) 2582 *async = true; 2583 pfn = KVM_PFN_ERR_FAULT; 2584 } 2585 exit: 2586 mmap_read_unlock(current->mm); 2587 return pfn; 2588 } 2589 2590 kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, 2591 bool atomic, bool *async, bool write_fault, 2592 bool *writable, hva_t *hva) 2593 { 2594 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); 2595 2596 if (hva) 2597 *hva = addr; 2598 2599 if (addr == KVM_HVA_ERR_RO_BAD) { 2600 if (writable) 2601 *writable = false; 2602 return KVM_PFN_ERR_RO_FAULT; 2603 } 2604 2605 if (kvm_is_error_hva(addr)) { 2606 if (writable) 2607 *writable = false; 2608 return KVM_PFN_NOSLOT; 2609 } 2610 2611 /* Do not map writable pfn in the readonly memslot. */ 2612 if (writable && memslot_is_readonly(slot)) { 2613 *writable = false; 2614 writable = NULL; 2615 } 2616 2617 return hva_to_pfn(addr, atomic, async, write_fault, 2618 writable); 2619 } 2620 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); 2621 2622 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 2623 bool *writable) 2624 { 2625 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, 2626 write_fault, writable, NULL); 2627 } 2628 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 2629 2630 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) 2631 { 2632 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL); 2633 } 2634 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); 2635 2636 kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn) 2637 { 2638 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL, NULL); 2639 } 2640 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); 2641 2642 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) 2643 { 2644 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 2645 } 2646 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic); 2647 2648 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 2649 { 2650 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); 2651 } 2652 EXPORT_SYMBOL_GPL(gfn_to_pfn); 2653 2654 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) 2655 { 2656 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 2657 } 2658 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn); 2659 2660 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 2661 struct page **pages, int nr_pages) 2662 { 2663 unsigned long addr; 2664 gfn_t entry = 0; 2665 2666 addr = gfn_to_hva_many(slot, gfn, &entry); 2667 if (kvm_is_error_hva(addr)) 2668 return -1; 2669 2670 if (entry < nr_pages) 2671 return 0; 2672 2673 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages); 2674 } 2675 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); 2676 2677 static struct page *kvm_pfn_to_page(kvm_pfn_t pfn) 2678 { 2679 if (is_error_noslot_pfn(pfn)) 2680 return KVM_ERR_PTR_BAD_PAGE; 2681 2682 if (kvm_is_reserved_pfn(pfn)) { 2683 WARN_ON(1); 2684 return KVM_ERR_PTR_BAD_PAGE; 2685 } 2686 2687 return pfn_to_page(pfn); 2688 } 2689 2690 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 2691 { 2692 kvm_pfn_t pfn; 2693 2694 pfn = gfn_to_pfn(kvm, gfn); 2695 2696 return kvm_pfn_to_page(pfn); 2697 } 2698 EXPORT_SYMBOL_GPL(gfn_to_page); 2699 2700 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty) 2701 { 2702 if (pfn == 0) 2703 return; 2704 2705 if (dirty) 2706 kvm_release_pfn_dirty(pfn); 2707 else 2708 kvm_release_pfn_clean(pfn); 2709 } 2710 2711 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) 2712 { 2713 kvm_pfn_t pfn; 2714 void *hva = NULL; 2715 struct page *page = KVM_UNMAPPED_PAGE; 2716 2717 if (!map) 2718 return -EINVAL; 2719 2720 pfn = gfn_to_pfn(vcpu->kvm, gfn); 2721 if (is_error_noslot_pfn(pfn)) 2722 return -EINVAL; 2723 2724 if (pfn_valid(pfn)) { 2725 page = pfn_to_page(pfn); 2726 hva = kmap(page); 2727 #ifdef CONFIG_HAS_IOMEM 2728 } else { 2729 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); 2730 #endif 2731 } 2732 2733 if (!hva) 2734 return -EFAULT; 2735 2736 map->page = page; 2737 map->hva = hva; 2738 map->pfn = pfn; 2739 map->gfn = gfn; 2740 2741 return 0; 2742 } 2743 EXPORT_SYMBOL_GPL(kvm_vcpu_map); 2744 2745 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) 2746 { 2747 if (!map) 2748 return; 2749 2750 if (!map->hva) 2751 return; 2752 2753 if (map->page != KVM_UNMAPPED_PAGE) 2754 kunmap(map->page); 2755 #ifdef CONFIG_HAS_IOMEM 2756 else 2757 memunmap(map->hva); 2758 #endif 2759 2760 if (dirty) 2761 kvm_vcpu_mark_page_dirty(vcpu, map->gfn); 2762 2763 kvm_release_pfn(map->pfn, dirty); 2764 2765 map->hva = NULL; 2766 map->page = NULL; 2767 } 2768 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap); 2769 2770 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) 2771 { 2772 kvm_pfn_t pfn; 2773 2774 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn); 2775 2776 return kvm_pfn_to_page(pfn); 2777 } 2778 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page); 2779 2780 void kvm_release_page_clean(struct page *page) 2781 { 2782 WARN_ON(is_error_page(page)); 2783 2784 kvm_release_pfn_clean(page_to_pfn(page)); 2785 } 2786 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 2787 2788 void kvm_release_pfn_clean(kvm_pfn_t pfn) 2789 { 2790 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) 2791 put_page(pfn_to_page(pfn)); 2792 } 2793 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 2794 2795 void kvm_release_page_dirty(struct page *page) 2796 { 2797 WARN_ON(is_error_page(page)); 2798 2799 kvm_release_pfn_dirty(page_to_pfn(page)); 2800 } 2801 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 2802 2803 void kvm_release_pfn_dirty(kvm_pfn_t pfn) 2804 { 2805 kvm_set_pfn_dirty(pfn); 2806 kvm_release_pfn_clean(pfn); 2807 } 2808 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); 2809 2810 void kvm_set_pfn_dirty(kvm_pfn_t pfn) 2811 { 2812 if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) 2813 SetPageDirty(pfn_to_page(pfn)); 2814 } 2815 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 2816 2817 void kvm_set_pfn_accessed(kvm_pfn_t pfn) 2818 { 2819 if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) 2820 mark_page_accessed(pfn_to_page(pfn)); 2821 } 2822 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 2823 2824 static int next_segment(unsigned long len, int offset) 2825 { 2826 if (len > PAGE_SIZE - offset) 2827 return PAGE_SIZE - offset; 2828 else 2829 return len; 2830 } 2831 2832 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, 2833 void *data, int offset, int len) 2834 { 2835 int r; 2836 unsigned long addr; 2837 2838 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 2839 if (kvm_is_error_hva(addr)) 2840 return -EFAULT; 2841 r = __copy_from_user(data, (void __user *)addr + offset, len); 2842 if (r) 2843 return -EFAULT; 2844 return 0; 2845 } 2846 2847 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 2848 int len) 2849 { 2850 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 2851 2852 return __kvm_read_guest_page(slot, gfn, data, offset, len); 2853 } 2854 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 2855 2856 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, 2857 int offset, int len) 2858 { 2859 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2860 2861 return __kvm_read_guest_page(slot, gfn, data, offset, len); 2862 } 2863 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); 2864 2865 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 2866 { 2867 gfn_t gfn = gpa >> PAGE_SHIFT; 2868 int seg; 2869 int offset = offset_in_page(gpa); 2870 int ret; 2871 2872 while ((seg = next_segment(len, offset)) != 0) { 2873 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 2874 if (ret < 0) 2875 return ret; 2876 offset = 0; 2877 len -= seg; 2878 data += seg; 2879 ++gfn; 2880 } 2881 return 0; 2882 } 2883 EXPORT_SYMBOL_GPL(kvm_read_guest); 2884 2885 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) 2886 { 2887 gfn_t gfn = gpa >> PAGE_SHIFT; 2888 int seg; 2889 int offset = offset_in_page(gpa); 2890 int ret; 2891 2892 while ((seg = next_segment(len, offset)) != 0) { 2893 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); 2894 if (ret < 0) 2895 return ret; 2896 offset = 0; 2897 len -= seg; 2898 data += seg; 2899 ++gfn; 2900 } 2901 return 0; 2902 } 2903 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest); 2904 2905 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 2906 void *data, int offset, unsigned long len) 2907 { 2908 int r; 2909 unsigned long addr; 2910 2911 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 2912 if (kvm_is_error_hva(addr)) 2913 return -EFAULT; 2914 pagefault_disable(); 2915 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 2916 pagefault_enable(); 2917 if (r) 2918 return -EFAULT; 2919 return 0; 2920 } 2921 2922 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, 2923 void *data, unsigned long len) 2924 { 2925 gfn_t gfn = gpa >> PAGE_SHIFT; 2926 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2927 int offset = offset_in_page(gpa); 2928 2929 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); 2930 } 2931 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic); 2932 2933 static int __kvm_write_guest_page(struct kvm *kvm, 2934 struct kvm_memory_slot *memslot, gfn_t gfn, 2935 const void *data, int offset, int len) 2936 { 2937 int r; 2938 unsigned long addr; 2939 2940 addr = gfn_to_hva_memslot(memslot, gfn); 2941 if (kvm_is_error_hva(addr)) 2942 return -EFAULT; 2943 r = __copy_to_user((void __user *)addr + offset, data, len); 2944 if (r) 2945 return -EFAULT; 2946 mark_page_dirty_in_slot(kvm, memslot, gfn); 2947 return 0; 2948 } 2949 2950 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, 2951 const void *data, int offset, int len) 2952 { 2953 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 2954 2955 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len); 2956 } 2957 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 2958 2959 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, 2960 const void *data, int offset, int len) 2961 { 2962 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2963 2964 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len); 2965 } 2966 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page); 2967 2968 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 2969 unsigned long len) 2970 { 2971 gfn_t gfn = gpa >> PAGE_SHIFT; 2972 int seg; 2973 int offset = offset_in_page(gpa); 2974 int ret; 2975 2976 while ((seg = next_segment(len, offset)) != 0) { 2977 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 2978 if (ret < 0) 2979 return ret; 2980 offset = 0; 2981 len -= seg; 2982 data += seg; 2983 ++gfn; 2984 } 2985 return 0; 2986 } 2987 EXPORT_SYMBOL_GPL(kvm_write_guest); 2988 2989 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 2990 unsigned long len) 2991 { 2992 gfn_t gfn = gpa >> PAGE_SHIFT; 2993 int seg; 2994 int offset = offset_in_page(gpa); 2995 int ret; 2996 2997 while ((seg = next_segment(len, offset)) != 0) { 2998 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); 2999 if (ret < 0) 3000 return ret; 3001 offset = 0; 3002 len -= seg; 3003 data += seg; 3004 ++gfn; 3005 } 3006 return 0; 3007 } 3008 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest); 3009 3010 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, 3011 struct gfn_to_hva_cache *ghc, 3012 gpa_t gpa, unsigned long len) 3013 { 3014 int offset = offset_in_page(gpa); 3015 gfn_t start_gfn = gpa >> PAGE_SHIFT; 3016 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; 3017 gfn_t nr_pages_needed = end_gfn - start_gfn + 1; 3018 gfn_t nr_pages_avail; 3019 3020 /* Update ghc->generation before performing any error checks. */ 3021 ghc->generation = slots->generation; 3022 3023 if (start_gfn > end_gfn) { 3024 ghc->hva = KVM_HVA_ERR_BAD; 3025 return -EINVAL; 3026 } 3027 3028 /* 3029 * If the requested region crosses two memslots, we still 3030 * verify that the entire region is valid here. 3031 */ 3032 for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) { 3033 ghc->memslot = __gfn_to_memslot(slots, start_gfn); 3034 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, 3035 &nr_pages_avail); 3036 if (kvm_is_error_hva(ghc->hva)) 3037 return -EFAULT; 3038 } 3039 3040 /* Use the slow path for cross page reads and writes. */ 3041 if (nr_pages_needed == 1) 3042 ghc->hva += offset; 3043 else 3044 ghc->memslot = NULL; 3045 3046 ghc->gpa = gpa; 3047 ghc->len = len; 3048 return 0; 3049 } 3050 3051 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3052 gpa_t gpa, unsigned long len) 3053 { 3054 struct kvm_memslots *slots = kvm_memslots(kvm); 3055 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len); 3056 } 3057 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 3058 3059 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3060 void *data, unsigned int offset, 3061 unsigned long len) 3062 { 3063 struct kvm_memslots *slots = kvm_memslots(kvm); 3064 int r; 3065 gpa_t gpa = ghc->gpa + offset; 3066 3067 if (WARN_ON_ONCE(len + offset > ghc->len)) 3068 return -EINVAL; 3069 3070 if (slots->generation != ghc->generation) { 3071 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) 3072 return -EFAULT; 3073 } 3074 3075 if (kvm_is_error_hva(ghc->hva)) 3076 return -EFAULT; 3077 3078 if (unlikely(!ghc->memslot)) 3079 return kvm_write_guest(kvm, gpa, data, len); 3080 3081 r = __copy_to_user((void __user *)ghc->hva + offset, data, len); 3082 if (r) 3083 return -EFAULT; 3084 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT); 3085 3086 return 0; 3087 } 3088 EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached); 3089 3090 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3091 void *data, unsigned long len) 3092 { 3093 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); 3094 } 3095 EXPORT_SYMBOL_GPL(kvm_write_guest_cached); 3096 3097 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3098 void *data, unsigned int offset, 3099 unsigned long len) 3100 { 3101 struct kvm_memslots *slots = kvm_memslots(kvm); 3102 int r; 3103 gpa_t gpa = ghc->gpa + offset; 3104 3105 if (WARN_ON_ONCE(len + offset > ghc->len)) 3106 return -EINVAL; 3107 3108 if (slots->generation != ghc->generation) { 3109 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) 3110 return -EFAULT; 3111 } 3112 3113 if (kvm_is_error_hva(ghc->hva)) 3114 return -EFAULT; 3115 3116 if (unlikely(!ghc->memslot)) 3117 return kvm_read_guest(kvm, gpa, data, len); 3118 3119 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len); 3120 if (r) 3121 return -EFAULT; 3122 3123 return 0; 3124 } 3125 EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached); 3126 3127 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3128 void *data, unsigned long len) 3129 { 3130 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len); 3131 } 3132 EXPORT_SYMBOL_GPL(kvm_read_guest_cached); 3133 3134 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 3135 { 3136 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 3137 gfn_t gfn = gpa >> PAGE_SHIFT; 3138 int seg; 3139 int offset = offset_in_page(gpa); 3140 int ret; 3141 3142 while ((seg = next_segment(len, offset)) != 0) { 3143 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len); 3144 if (ret < 0) 3145 return ret; 3146 offset = 0; 3147 len -= seg; 3148 ++gfn; 3149 } 3150 return 0; 3151 } 3152 EXPORT_SYMBOL_GPL(kvm_clear_guest); 3153 3154 void mark_page_dirty_in_slot(struct kvm *kvm, 3155 const struct kvm_memory_slot *memslot, 3156 gfn_t gfn) 3157 { 3158 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); 3159 3160 if (WARN_ON_ONCE(!vcpu) || WARN_ON_ONCE(vcpu->kvm != kvm)) 3161 return; 3162 3163 if (memslot && kvm_slot_dirty_track_enabled(memslot)) { 3164 unsigned long rel_gfn = gfn - memslot->base_gfn; 3165 u32 slot = (memslot->as_id << 16) | memslot->id; 3166 3167 if (kvm->dirty_ring_size) 3168 kvm_dirty_ring_push(&vcpu->dirty_ring, 3169 slot, rel_gfn); 3170 else 3171 set_bit_le(rel_gfn, memslot->dirty_bitmap); 3172 } 3173 } 3174 EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot); 3175 3176 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 3177 { 3178 struct kvm_memory_slot *memslot; 3179 3180 memslot = gfn_to_memslot(kvm, gfn); 3181 mark_page_dirty_in_slot(kvm, memslot, gfn); 3182 } 3183 EXPORT_SYMBOL_GPL(mark_page_dirty); 3184 3185 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) 3186 { 3187 struct kvm_memory_slot *memslot; 3188 3189 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 3190 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn); 3191 } 3192 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); 3193 3194 void kvm_sigset_activate(struct kvm_vcpu *vcpu) 3195 { 3196 if (!vcpu->sigset_active) 3197 return; 3198 3199 /* 3200 * This does a lockless modification of ->real_blocked, which is fine 3201 * because, only current can change ->real_blocked and all readers of 3202 * ->real_blocked don't care as long ->real_blocked is always a subset 3203 * of ->blocked. 3204 */ 3205 sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked); 3206 } 3207 3208 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu) 3209 { 3210 if (!vcpu->sigset_active) 3211 return; 3212 3213 sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL); 3214 sigemptyset(¤t->real_blocked); 3215 } 3216 3217 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) 3218 { 3219 unsigned int old, val, grow, grow_start; 3220 3221 old = val = vcpu->halt_poll_ns; 3222 grow_start = READ_ONCE(halt_poll_ns_grow_start); 3223 grow = READ_ONCE(halt_poll_ns_grow); 3224 if (!grow) 3225 goto out; 3226 3227 val *= grow; 3228 if (val < grow_start) 3229 val = grow_start; 3230 3231 if (val > vcpu->kvm->max_halt_poll_ns) 3232 val = vcpu->kvm->max_halt_poll_ns; 3233 3234 vcpu->halt_poll_ns = val; 3235 out: 3236 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); 3237 } 3238 3239 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) 3240 { 3241 unsigned int old, val, shrink, grow_start; 3242 3243 old = val = vcpu->halt_poll_ns; 3244 shrink = READ_ONCE(halt_poll_ns_shrink); 3245 grow_start = READ_ONCE(halt_poll_ns_grow_start); 3246 if (shrink == 0) 3247 val = 0; 3248 else 3249 val /= shrink; 3250 3251 if (val < grow_start) 3252 val = 0; 3253 3254 vcpu->halt_poll_ns = val; 3255 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); 3256 } 3257 3258 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) 3259 { 3260 int ret = -EINTR; 3261 int idx = srcu_read_lock(&vcpu->kvm->srcu); 3262 3263 if (kvm_arch_vcpu_runnable(vcpu)) { 3264 kvm_make_request(KVM_REQ_UNHALT, vcpu); 3265 goto out; 3266 } 3267 if (kvm_cpu_has_pending_timer(vcpu)) 3268 goto out; 3269 if (signal_pending(current)) 3270 goto out; 3271 if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu)) 3272 goto out; 3273 3274 ret = 0; 3275 out: 3276 srcu_read_unlock(&vcpu->kvm->srcu, idx); 3277 return ret; 3278 } 3279 3280 /* 3281 * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is 3282 * pending. This is mostly used when halting a vCPU, but may also be used 3283 * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI. 3284 */ 3285 bool kvm_vcpu_block(struct kvm_vcpu *vcpu) 3286 { 3287 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); 3288 bool waited = false; 3289 3290 vcpu->stat.generic.blocking = 1; 3291 3292 kvm_arch_vcpu_blocking(vcpu); 3293 3294 prepare_to_rcuwait(wait); 3295 for (;;) { 3296 set_current_state(TASK_INTERRUPTIBLE); 3297 3298 if (kvm_vcpu_check_block(vcpu) < 0) 3299 break; 3300 3301 waited = true; 3302 schedule(); 3303 } 3304 finish_rcuwait(wait); 3305 3306 kvm_arch_vcpu_unblocking(vcpu); 3307 3308 vcpu->stat.generic.blocking = 0; 3309 3310 return waited; 3311 } 3312 3313 static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, 3314 ktime_t end, bool success) 3315 { 3316 struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic; 3317 u64 poll_ns = ktime_to_ns(ktime_sub(end, start)); 3318 3319 ++vcpu->stat.generic.halt_attempted_poll; 3320 3321 if (success) { 3322 ++vcpu->stat.generic.halt_successful_poll; 3323 3324 if (!vcpu_valid_wakeup(vcpu)) 3325 ++vcpu->stat.generic.halt_poll_invalid; 3326 3327 stats->halt_poll_success_ns += poll_ns; 3328 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns); 3329 } else { 3330 stats->halt_poll_fail_ns += poll_ns; 3331 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns); 3332 } 3333 } 3334 3335 /* 3336 * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc... If halt 3337 * polling is enabled, busy wait for a short time before blocking to avoid the 3338 * expensive block+unblock sequence if a wake event arrives soon after the vCPU 3339 * is halted. 3340 */ 3341 void kvm_vcpu_halt(struct kvm_vcpu *vcpu) 3342 { 3343 bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); 3344 bool do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns; 3345 ktime_t start, cur, poll_end; 3346 bool waited = false; 3347 u64 halt_ns; 3348 3349 start = cur = poll_end = ktime_get(); 3350 if (do_halt_poll) { 3351 ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns); 3352 3353 do { 3354 /* 3355 * This sets KVM_REQ_UNHALT if an interrupt 3356 * arrives. 3357 */ 3358 if (kvm_vcpu_check_block(vcpu) < 0) 3359 goto out; 3360 cpu_relax(); 3361 poll_end = cur = ktime_get(); 3362 } while (kvm_vcpu_can_poll(cur, stop)); 3363 } 3364 3365 waited = kvm_vcpu_block(vcpu); 3366 3367 cur = ktime_get(); 3368 if (waited) { 3369 vcpu->stat.generic.halt_wait_ns += 3370 ktime_to_ns(cur) - ktime_to_ns(poll_end); 3371 KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist, 3372 ktime_to_ns(cur) - ktime_to_ns(poll_end)); 3373 } 3374 out: 3375 /* The total time the vCPU was "halted", including polling time. */ 3376 halt_ns = ktime_to_ns(cur) - ktime_to_ns(start); 3377 3378 /* 3379 * Note, halt-polling is considered successful so long as the vCPU was 3380 * never actually scheduled out, i.e. even if the wake event arrived 3381 * after of the halt-polling loop itself, but before the full wait. 3382 */ 3383 if (do_halt_poll) 3384 update_halt_poll_stats(vcpu, start, poll_end, !waited); 3385 3386 if (halt_poll_allowed) { 3387 if (!vcpu_valid_wakeup(vcpu)) { 3388 shrink_halt_poll_ns(vcpu); 3389 } else if (vcpu->kvm->max_halt_poll_ns) { 3390 if (halt_ns <= vcpu->halt_poll_ns) 3391 ; 3392 /* we had a long block, shrink polling */ 3393 else if (vcpu->halt_poll_ns && 3394 halt_ns > vcpu->kvm->max_halt_poll_ns) 3395 shrink_halt_poll_ns(vcpu); 3396 /* we had a short halt and our poll time is too small */ 3397 else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns && 3398 halt_ns < vcpu->kvm->max_halt_poll_ns) 3399 grow_halt_poll_ns(vcpu); 3400 } else { 3401 vcpu->halt_poll_ns = 0; 3402 } 3403 } 3404 3405 trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu)); 3406 } 3407 EXPORT_SYMBOL_GPL(kvm_vcpu_halt); 3408 3409 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) 3410 { 3411 if (__kvm_vcpu_wake_up(vcpu)) { 3412 WRITE_ONCE(vcpu->ready, true); 3413 ++vcpu->stat.generic.halt_wakeup; 3414 return true; 3415 } 3416 3417 return false; 3418 } 3419 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up); 3420 3421 #ifndef CONFIG_S390 3422 /* 3423 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. 3424 */ 3425 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 3426 { 3427 int me, cpu; 3428 3429 if (kvm_vcpu_wake_up(vcpu)) 3430 return; 3431 3432 me = get_cpu(); 3433 /* 3434 * The only state change done outside the vcpu mutex is IN_GUEST_MODE 3435 * to EXITING_GUEST_MODE. Therefore the moderately expensive "should 3436 * kick" check does not need atomic operations if kvm_vcpu_kick is used 3437 * within the vCPU thread itself. 3438 */ 3439 if (vcpu == __this_cpu_read(kvm_running_vcpu)) { 3440 if (vcpu->mode == IN_GUEST_MODE) 3441 WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE); 3442 goto out; 3443 } 3444 3445 /* 3446 * Note, the vCPU could get migrated to a different pCPU at any point 3447 * after kvm_arch_vcpu_should_kick(), which could result in sending an 3448 * IPI to the previous pCPU. But, that's ok because the purpose of the 3449 * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the 3450 * vCPU also requires it to leave IN_GUEST_MODE. 3451 */ 3452 if (kvm_arch_vcpu_should_kick(vcpu)) { 3453 cpu = READ_ONCE(vcpu->cpu); 3454 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) 3455 smp_send_reschedule(cpu); 3456 } 3457 out: 3458 put_cpu(); 3459 } 3460 EXPORT_SYMBOL_GPL(kvm_vcpu_kick); 3461 #endif /* !CONFIG_S390 */ 3462 3463 int kvm_vcpu_yield_to(struct kvm_vcpu *target) 3464 { 3465 struct pid *pid; 3466 struct task_struct *task = NULL; 3467 int ret = 0; 3468 3469 rcu_read_lock(); 3470 pid = rcu_dereference(target->pid); 3471 if (pid) 3472 task = get_pid_task(pid, PIDTYPE_PID); 3473 rcu_read_unlock(); 3474 if (!task) 3475 return ret; 3476 ret = yield_to(task, 1); 3477 put_task_struct(task); 3478 3479 return ret; 3480 } 3481 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); 3482 3483 /* 3484 * Helper that checks whether a VCPU is eligible for directed yield. 3485 * Most eligible candidate to yield is decided by following heuristics: 3486 * 3487 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently 3488 * (preempted lock holder), indicated by @in_spin_loop. 3489 * Set at the beginning and cleared at the end of interception/PLE handler. 3490 * 3491 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get 3492 * chance last time (mostly it has become eligible now since we have probably 3493 * yielded to lockholder in last iteration. This is done by toggling 3494 * @dy_eligible each time a VCPU checked for eligibility.) 3495 * 3496 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding 3497 * to preempted lock-holder could result in wrong VCPU selection and CPU 3498 * burning. Giving priority for a potential lock-holder increases lock 3499 * progress. 3500 * 3501 * Since algorithm is based on heuristics, accessing another VCPU data without 3502 * locking does not harm. It may result in trying to yield to same VCPU, fail 3503 * and continue with next VCPU and so on. 3504 */ 3505 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) 3506 { 3507 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 3508 bool eligible; 3509 3510 eligible = !vcpu->spin_loop.in_spin_loop || 3511 vcpu->spin_loop.dy_eligible; 3512 3513 if (vcpu->spin_loop.in_spin_loop) 3514 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); 3515 3516 return eligible; 3517 #else 3518 return true; 3519 #endif 3520 } 3521 3522 /* 3523 * Unlike kvm_arch_vcpu_runnable, this function is called outside 3524 * a vcpu_load/vcpu_put pair. However, for most architectures 3525 * kvm_arch_vcpu_runnable does not require vcpu_load. 3526 */ 3527 bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) 3528 { 3529 return kvm_arch_vcpu_runnable(vcpu); 3530 } 3531 3532 static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu) 3533 { 3534 if (kvm_arch_dy_runnable(vcpu)) 3535 return true; 3536 3537 #ifdef CONFIG_KVM_ASYNC_PF 3538 if (!list_empty_careful(&vcpu->async_pf.done)) 3539 return true; 3540 #endif 3541 3542 return false; 3543 } 3544 3545 bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) 3546 { 3547 return false; 3548 } 3549 3550 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) 3551 { 3552 struct kvm *kvm = me->kvm; 3553 struct kvm_vcpu *vcpu; 3554 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 3555 unsigned long i; 3556 int yielded = 0; 3557 int try = 3; 3558 int pass; 3559 3560 kvm_vcpu_set_in_spin_loop(me, true); 3561 /* 3562 * We boost the priority of a VCPU that is runnable but not 3563 * currently running, because it got preempted by something 3564 * else and called schedule in __vcpu_run. Hopefully that 3565 * VCPU is holding the lock that we need and will release it. 3566 * We approximate round-robin by starting at the last boosted VCPU. 3567 */ 3568 for (pass = 0; pass < 2 && !yielded && try; pass++) { 3569 kvm_for_each_vcpu(i, vcpu, kvm) { 3570 if (!pass && i <= last_boosted_vcpu) { 3571 i = last_boosted_vcpu; 3572 continue; 3573 } else if (pass && i > last_boosted_vcpu) 3574 break; 3575 if (!READ_ONCE(vcpu->ready)) 3576 continue; 3577 if (vcpu == me) 3578 continue; 3579 if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu)) 3580 continue; 3581 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && 3582 !kvm_arch_dy_has_pending_interrupt(vcpu) && 3583 !kvm_arch_vcpu_in_kernel(vcpu)) 3584 continue; 3585 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 3586 continue; 3587 3588 yielded = kvm_vcpu_yield_to(vcpu); 3589 if (yielded > 0) { 3590 kvm->last_boosted_vcpu = i; 3591 break; 3592 } else if (yielded < 0) { 3593 try--; 3594 if (!try) 3595 break; 3596 } 3597 } 3598 } 3599 kvm_vcpu_set_in_spin_loop(me, false); 3600 3601 /* Ensure vcpu is not eligible during next spinloop */ 3602 kvm_vcpu_set_dy_eligible(me, false); 3603 } 3604 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 3605 3606 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff) 3607 { 3608 #ifdef CONFIG_HAVE_KVM_DIRTY_RING 3609 return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) && 3610 (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET + 3611 kvm->dirty_ring_size / PAGE_SIZE); 3612 #else 3613 return false; 3614 #endif 3615 } 3616 3617 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf) 3618 { 3619 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data; 3620 struct page *page; 3621 3622 if (vmf->pgoff == 0) 3623 page = virt_to_page(vcpu->run); 3624 #ifdef CONFIG_X86 3625 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 3626 page = virt_to_page(vcpu->arch.pio_data); 3627 #endif 3628 #ifdef CONFIG_KVM_MMIO 3629 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 3630 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 3631 #endif 3632 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff)) 3633 page = kvm_dirty_ring_get_page( 3634 &vcpu->dirty_ring, 3635 vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET); 3636 else 3637 return kvm_arch_vcpu_fault(vcpu, vmf); 3638 get_page(page); 3639 vmf->page = page; 3640 return 0; 3641 } 3642 3643 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 3644 .fault = kvm_vcpu_fault, 3645 }; 3646 3647 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 3648 { 3649 struct kvm_vcpu *vcpu = file->private_data; 3650 unsigned long pages = vma_pages(vma); 3651 3652 if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) || 3653 kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) && 3654 ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED))) 3655 return -EINVAL; 3656 3657 vma->vm_ops = &kvm_vcpu_vm_ops; 3658 return 0; 3659 } 3660 3661 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 3662 { 3663 struct kvm_vcpu *vcpu = filp->private_data; 3664 3665 kvm_put_kvm(vcpu->kvm); 3666 return 0; 3667 } 3668 3669 static struct file_operations kvm_vcpu_fops = { 3670 .release = kvm_vcpu_release, 3671 .unlocked_ioctl = kvm_vcpu_ioctl, 3672 .mmap = kvm_vcpu_mmap, 3673 .llseek = noop_llseek, 3674 KVM_COMPAT(kvm_vcpu_compat_ioctl), 3675 }; 3676 3677 /* 3678 * Allocates an inode for the vcpu. 3679 */ 3680 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 3681 { 3682 char name[8 + 1 + ITOA_MAX_LEN + 1]; 3683 3684 snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id); 3685 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); 3686 } 3687 3688 static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) 3689 { 3690 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS 3691 struct dentry *debugfs_dentry; 3692 char dir_name[ITOA_MAX_LEN * 2]; 3693 3694 if (!debugfs_initialized()) 3695 return; 3696 3697 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id); 3698 debugfs_dentry = debugfs_create_dir(dir_name, 3699 vcpu->kvm->debugfs_dentry); 3700 3701 kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry); 3702 #endif 3703 } 3704 3705 /* 3706 * Creates some virtual cpus. Good luck creating more than one. 3707 */ 3708 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 3709 { 3710 int r; 3711 struct kvm_vcpu *vcpu; 3712 struct page *page; 3713 3714 if (id >= KVM_MAX_VCPU_IDS) 3715 return -EINVAL; 3716 3717 mutex_lock(&kvm->lock); 3718 if (kvm->created_vcpus == KVM_MAX_VCPUS) { 3719 mutex_unlock(&kvm->lock); 3720 return -EINVAL; 3721 } 3722 3723 kvm->created_vcpus++; 3724 mutex_unlock(&kvm->lock); 3725 3726 r = kvm_arch_vcpu_precreate(kvm, id); 3727 if (r) 3728 goto vcpu_decrement; 3729 3730 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT); 3731 if (!vcpu) { 3732 r = -ENOMEM; 3733 goto vcpu_decrement; 3734 } 3735 3736 BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE); 3737 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 3738 if (!page) { 3739 r = -ENOMEM; 3740 goto vcpu_free; 3741 } 3742 vcpu->run = page_address(page); 3743 3744 kvm_vcpu_init(vcpu, kvm, id); 3745 3746 r = kvm_arch_vcpu_create(vcpu); 3747 if (r) 3748 goto vcpu_free_run_page; 3749 3750 if (kvm->dirty_ring_size) { 3751 r = kvm_dirty_ring_alloc(&vcpu->dirty_ring, 3752 id, kvm->dirty_ring_size); 3753 if (r) 3754 goto arch_vcpu_destroy; 3755 } 3756 3757 mutex_lock(&kvm->lock); 3758 if (kvm_get_vcpu_by_id(kvm, id)) { 3759 r = -EEXIST; 3760 goto unlock_vcpu_destroy; 3761 } 3762 3763 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); 3764 r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT); 3765 BUG_ON(r == -EBUSY); 3766 if (r) 3767 goto unlock_vcpu_destroy; 3768 3769 /* Fill the stats id string for the vcpu */ 3770 snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d", 3771 task_pid_nr(current), id); 3772 3773 /* Now it's all set up, let userspace reach it */ 3774 kvm_get_kvm(kvm); 3775 r = create_vcpu_fd(vcpu); 3776 if (r < 0) { 3777 xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx); 3778 kvm_put_kvm_no_destroy(kvm); 3779 goto unlock_vcpu_destroy; 3780 } 3781 3782 /* 3783 * Pairs with smp_rmb() in kvm_get_vcpu. Store the vcpu 3784 * pointer before kvm->online_vcpu's incremented value. 3785 */ 3786 smp_wmb(); 3787 atomic_inc(&kvm->online_vcpus); 3788 3789 mutex_unlock(&kvm->lock); 3790 kvm_arch_vcpu_postcreate(vcpu); 3791 kvm_create_vcpu_debugfs(vcpu); 3792 return r; 3793 3794 unlock_vcpu_destroy: 3795 mutex_unlock(&kvm->lock); 3796 kvm_dirty_ring_free(&vcpu->dirty_ring); 3797 arch_vcpu_destroy: 3798 kvm_arch_vcpu_destroy(vcpu); 3799 vcpu_free_run_page: 3800 free_page((unsigned long)vcpu->run); 3801 vcpu_free: 3802 kmem_cache_free(kvm_vcpu_cache, vcpu); 3803 vcpu_decrement: 3804 mutex_lock(&kvm->lock); 3805 kvm->created_vcpus--; 3806 mutex_unlock(&kvm->lock); 3807 return r; 3808 } 3809 3810 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 3811 { 3812 if (sigset) { 3813 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 3814 vcpu->sigset_active = 1; 3815 vcpu->sigset = *sigset; 3816 } else 3817 vcpu->sigset_active = 0; 3818 return 0; 3819 } 3820 3821 static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer, 3822 size_t size, loff_t *offset) 3823 { 3824 struct kvm_vcpu *vcpu = file->private_data; 3825 3826 return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header, 3827 &kvm_vcpu_stats_desc[0], &vcpu->stat, 3828 sizeof(vcpu->stat), user_buffer, size, offset); 3829 } 3830 3831 static const struct file_operations kvm_vcpu_stats_fops = { 3832 .read = kvm_vcpu_stats_read, 3833 .llseek = noop_llseek, 3834 }; 3835 3836 static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu) 3837 { 3838 int fd; 3839 struct file *file; 3840 char name[15 + ITOA_MAX_LEN + 1]; 3841 3842 snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id); 3843 3844 fd = get_unused_fd_flags(O_CLOEXEC); 3845 if (fd < 0) 3846 return fd; 3847 3848 file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY); 3849 if (IS_ERR(file)) { 3850 put_unused_fd(fd); 3851 return PTR_ERR(file); 3852 } 3853 file->f_mode |= FMODE_PREAD; 3854 fd_install(fd, file); 3855 3856 return fd; 3857 } 3858 3859 static long kvm_vcpu_ioctl(struct file *filp, 3860 unsigned int ioctl, unsigned long arg) 3861 { 3862 struct kvm_vcpu *vcpu = filp->private_data; 3863 void __user *argp = (void __user *)arg; 3864 int r; 3865 struct kvm_fpu *fpu = NULL; 3866 struct kvm_sregs *kvm_sregs = NULL; 3867 3868 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead) 3869 return -EIO; 3870 3871 if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) 3872 return -EINVAL; 3873 3874 /* 3875 * Some architectures have vcpu ioctls that are asynchronous to vcpu 3876 * execution; mutex_lock() would break them. 3877 */ 3878 r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg); 3879 if (r != -ENOIOCTLCMD) 3880 return r; 3881 3882 if (mutex_lock_killable(&vcpu->mutex)) 3883 return -EINTR; 3884 switch (ioctl) { 3885 case KVM_RUN: { 3886 struct pid *oldpid; 3887 r = -EINVAL; 3888 if (arg) 3889 goto out; 3890 oldpid = rcu_access_pointer(vcpu->pid); 3891 if (unlikely(oldpid != task_pid(current))) { 3892 /* The thread running this VCPU changed. */ 3893 struct pid *newpid; 3894 3895 r = kvm_arch_vcpu_run_pid_change(vcpu); 3896 if (r) 3897 break; 3898 3899 newpid = get_task_pid(current, PIDTYPE_PID); 3900 rcu_assign_pointer(vcpu->pid, newpid); 3901 if (oldpid) 3902 synchronize_rcu(); 3903 put_pid(oldpid); 3904 } 3905 r = kvm_arch_vcpu_ioctl_run(vcpu); 3906 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 3907 break; 3908 } 3909 case KVM_GET_REGS: { 3910 struct kvm_regs *kvm_regs; 3911 3912 r = -ENOMEM; 3913 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT); 3914 if (!kvm_regs) 3915 goto out; 3916 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 3917 if (r) 3918 goto out_free1; 3919 r = -EFAULT; 3920 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 3921 goto out_free1; 3922 r = 0; 3923 out_free1: 3924 kfree(kvm_regs); 3925 break; 3926 } 3927 case KVM_SET_REGS: { 3928 struct kvm_regs *kvm_regs; 3929 3930 kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); 3931 if (IS_ERR(kvm_regs)) { 3932 r = PTR_ERR(kvm_regs); 3933 goto out; 3934 } 3935 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 3936 kfree(kvm_regs); 3937 break; 3938 } 3939 case KVM_GET_SREGS: { 3940 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), 3941 GFP_KERNEL_ACCOUNT); 3942 r = -ENOMEM; 3943 if (!kvm_sregs) 3944 goto out; 3945 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 3946 if (r) 3947 goto out; 3948 r = -EFAULT; 3949 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 3950 goto out; 3951 r = 0; 3952 break; 3953 } 3954 case KVM_SET_SREGS: { 3955 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); 3956 if (IS_ERR(kvm_sregs)) { 3957 r = PTR_ERR(kvm_sregs); 3958 kvm_sregs = NULL; 3959 goto out; 3960 } 3961 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 3962 break; 3963 } 3964 case KVM_GET_MP_STATE: { 3965 struct kvm_mp_state mp_state; 3966 3967 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 3968 if (r) 3969 goto out; 3970 r = -EFAULT; 3971 if (copy_to_user(argp, &mp_state, sizeof(mp_state))) 3972 goto out; 3973 r = 0; 3974 break; 3975 } 3976 case KVM_SET_MP_STATE: { 3977 struct kvm_mp_state mp_state; 3978 3979 r = -EFAULT; 3980 if (copy_from_user(&mp_state, argp, sizeof(mp_state))) 3981 goto out; 3982 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 3983 break; 3984 } 3985 case KVM_TRANSLATE: { 3986 struct kvm_translation tr; 3987 3988 r = -EFAULT; 3989 if (copy_from_user(&tr, argp, sizeof(tr))) 3990 goto out; 3991 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 3992 if (r) 3993 goto out; 3994 r = -EFAULT; 3995 if (copy_to_user(argp, &tr, sizeof(tr))) 3996 goto out; 3997 r = 0; 3998 break; 3999 } 4000 case KVM_SET_GUEST_DEBUG: { 4001 struct kvm_guest_debug dbg; 4002 4003 r = -EFAULT; 4004 if (copy_from_user(&dbg, argp, sizeof(dbg))) 4005 goto out; 4006 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 4007 break; 4008 } 4009 case KVM_SET_SIGNAL_MASK: { 4010 struct kvm_signal_mask __user *sigmask_arg = argp; 4011 struct kvm_signal_mask kvm_sigmask; 4012 sigset_t sigset, *p; 4013 4014 p = NULL; 4015 if (argp) { 4016 r = -EFAULT; 4017 if (copy_from_user(&kvm_sigmask, argp, 4018 sizeof(kvm_sigmask))) 4019 goto out; 4020 r = -EINVAL; 4021 if (kvm_sigmask.len != sizeof(sigset)) 4022 goto out; 4023 r = -EFAULT; 4024 if (copy_from_user(&sigset, sigmask_arg->sigset, 4025 sizeof(sigset))) 4026 goto out; 4027 p = &sigset; 4028 } 4029 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 4030 break; 4031 } 4032 case KVM_GET_FPU: { 4033 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT); 4034 r = -ENOMEM; 4035 if (!fpu) 4036 goto out; 4037 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 4038 if (r) 4039 goto out; 4040 r = -EFAULT; 4041 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 4042 goto out; 4043 r = 0; 4044 break; 4045 } 4046 case KVM_SET_FPU: { 4047 fpu = memdup_user(argp, sizeof(*fpu)); 4048 if (IS_ERR(fpu)) { 4049 r = PTR_ERR(fpu); 4050 fpu = NULL; 4051 goto out; 4052 } 4053 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 4054 break; 4055 } 4056 case KVM_GET_STATS_FD: { 4057 r = kvm_vcpu_ioctl_get_stats_fd(vcpu); 4058 break; 4059 } 4060 default: 4061 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 4062 } 4063 out: 4064 mutex_unlock(&vcpu->mutex); 4065 kfree(fpu); 4066 kfree(kvm_sregs); 4067 return r; 4068 } 4069 4070 #ifdef CONFIG_KVM_COMPAT 4071 static long kvm_vcpu_compat_ioctl(struct file *filp, 4072 unsigned int ioctl, unsigned long arg) 4073 { 4074 struct kvm_vcpu *vcpu = filp->private_data; 4075 void __user *argp = compat_ptr(arg); 4076 int r; 4077 4078 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead) 4079 return -EIO; 4080 4081 switch (ioctl) { 4082 case KVM_SET_SIGNAL_MASK: { 4083 struct kvm_signal_mask __user *sigmask_arg = argp; 4084 struct kvm_signal_mask kvm_sigmask; 4085 sigset_t sigset; 4086 4087 if (argp) { 4088 r = -EFAULT; 4089 if (copy_from_user(&kvm_sigmask, argp, 4090 sizeof(kvm_sigmask))) 4091 goto out; 4092 r = -EINVAL; 4093 if (kvm_sigmask.len != sizeof(compat_sigset_t)) 4094 goto out; 4095 r = -EFAULT; 4096 if (get_compat_sigset(&sigset, 4097 (compat_sigset_t __user *)sigmask_arg->sigset)) 4098 goto out; 4099 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 4100 } else 4101 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); 4102 break; 4103 } 4104 default: 4105 r = kvm_vcpu_ioctl(filp, ioctl, arg); 4106 } 4107 4108 out: 4109 return r; 4110 } 4111 #endif 4112 4113 static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma) 4114 { 4115 struct kvm_device *dev = filp->private_data; 4116 4117 if (dev->ops->mmap) 4118 return dev->ops->mmap(dev, vma); 4119 4120 return -ENODEV; 4121 } 4122 4123 static int kvm_device_ioctl_attr(struct kvm_device *dev, 4124 int (*accessor)(struct kvm_device *dev, 4125 struct kvm_device_attr *attr), 4126 unsigned long arg) 4127 { 4128 struct kvm_device_attr attr; 4129 4130 if (!accessor) 4131 return -EPERM; 4132 4133 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 4134 return -EFAULT; 4135 4136 return accessor(dev, &attr); 4137 } 4138 4139 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, 4140 unsigned long arg) 4141 { 4142 struct kvm_device *dev = filp->private_data; 4143 4144 if (dev->kvm->mm != current->mm || dev->kvm->vm_dead) 4145 return -EIO; 4146 4147 switch (ioctl) { 4148 case KVM_SET_DEVICE_ATTR: 4149 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); 4150 case KVM_GET_DEVICE_ATTR: 4151 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); 4152 case KVM_HAS_DEVICE_ATTR: 4153 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); 4154 default: 4155 if (dev->ops->ioctl) 4156 return dev->ops->ioctl(dev, ioctl, arg); 4157 4158 return -ENOTTY; 4159 } 4160 } 4161 4162 static int kvm_device_release(struct inode *inode, struct file *filp) 4163 { 4164 struct kvm_device *dev = filp->private_data; 4165 struct kvm *kvm = dev->kvm; 4166 4167 if (dev->ops->release) { 4168 mutex_lock(&kvm->lock); 4169 list_del(&dev->vm_node); 4170 dev->ops->release(dev); 4171 mutex_unlock(&kvm->lock); 4172 } 4173 4174 kvm_put_kvm(kvm); 4175 return 0; 4176 } 4177 4178 static const struct file_operations kvm_device_fops = { 4179 .unlocked_ioctl = kvm_device_ioctl, 4180 .release = kvm_device_release, 4181 KVM_COMPAT(kvm_device_ioctl), 4182 .mmap = kvm_device_mmap, 4183 }; 4184 4185 struct kvm_device *kvm_device_from_filp(struct file *filp) 4186 { 4187 if (filp->f_op != &kvm_device_fops) 4188 return NULL; 4189 4190 return filp->private_data; 4191 } 4192 4193 static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { 4194 #ifdef CONFIG_KVM_MPIC 4195 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, 4196 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, 4197 #endif 4198 }; 4199 4200 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type) 4201 { 4202 if (type >= ARRAY_SIZE(kvm_device_ops_table)) 4203 return -ENOSPC; 4204 4205 if (kvm_device_ops_table[type] != NULL) 4206 return -EEXIST; 4207 4208 kvm_device_ops_table[type] = ops; 4209 return 0; 4210 } 4211 4212 void kvm_unregister_device_ops(u32 type) 4213 { 4214 if (kvm_device_ops_table[type] != NULL) 4215 kvm_device_ops_table[type] = NULL; 4216 } 4217 4218 static int kvm_ioctl_create_device(struct kvm *kvm, 4219 struct kvm_create_device *cd) 4220 { 4221 const struct kvm_device_ops *ops = NULL; 4222 struct kvm_device *dev; 4223 bool test = cd->flags & KVM_CREATE_DEVICE_TEST; 4224 int type; 4225 int ret; 4226 4227 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) 4228 return -ENODEV; 4229 4230 type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table)); 4231 ops = kvm_device_ops_table[type]; 4232 if (ops == NULL) 4233 return -ENODEV; 4234 4235 if (test) 4236 return 0; 4237 4238 dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT); 4239 if (!dev) 4240 return -ENOMEM; 4241 4242 dev->ops = ops; 4243 dev->kvm = kvm; 4244 4245 mutex_lock(&kvm->lock); 4246 ret = ops->create(dev, type); 4247 if (ret < 0) { 4248 mutex_unlock(&kvm->lock); 4249 kfree(dev); 4250 return ret; 4251 } 4252 list_add(&dev->vm_node, &kvm->devices); 4253 mutex_unlock(&kvm->lock); 4254 4255 if (ops->init) 4256 ops->init(dev); 4257 4258 kvm_get_kvm(kvm); 4259 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 4260 if (ret < 0) { 4261 kvm_put_kvm_no_destroy(kvm); 4262 mutex_lock(&kvm->lock); 4263 list_del(&dev->vm_node); 4264 mutex_unlock(&kvm->lock); 4265 ops->destroy(dev); 4266 return ret; 4267 } 4268 4269 cd->fd = ret; 4270 return 0; 4271 } 4272 4273 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) 4274 { 4275 switch (arg) { 4276 case KVM_CAP_USER_MEMORY: 4277 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 4278 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 4279 case KVM_CAP_INTERNAL_ERROR_DATA: 4280 #ifdef CONFIG_HAVE_KVM_MSI 4281 case KVM_CAP_SIGNAL_MSI: 4282 #endif 4283 #ifdef CONFIG_HAVE_KVM_IRQFD 4284 case KVM_CAP_IRQFD: 4285 case KVM_CAP_IRQFD_RESAMPLE: 4286 #endif 4287 case KVM_CAP_IOEVENTFD_ANY_LENGTH: 4288 case KVM_CAP_CHECK_EXTENSION_VM: 4289 case KVM_CAP_ENABLE_CAP_VM: 4290 case KVM_CAP_HALT_POLL: 4291 return 1; 4292 #ifdef CONFIG_KVM_MMIO 4293 case KVM_CAP_COALESCED_MMIO: 4294 return KVM_COALESCED_MMIO_PAGE_OFFSET; 4295 case KVM_CAP_COALESCED_PIO: 4296 return 1; 4297 #endif 4298 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4299 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: 4300 return KVM_DIRTY_LOG_MANUAL_CAPS; 4301 #endif 4302 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 4303 case KVM_CAP_IRQ_ROUTING: 4304 return KVM_MAX_IRQ_ROUTES; 4305 #endif 4306 #if KVM_ADDRESS_SPACE_NUM > 1 4307 case KVM_CAP_MULTI_ADDRESS_SPACE: 4308 return KVM_ADDRESS_SPACE_NUM; 4309 #endif 4310 case KVM_CAP_NR_MEMSLOTS: 4311 return KVM_USER_MEM_SLOTS; 4312 case KVM_CAP_DIRTY_LOG_RING: 4313 #ifdef CONFIG_HAVE_KVM_DIRTY_RING 4314 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn); 4315 #else 4316 return 0; 4317 #endif 4318 case KVM_CAP_BINARY_STATS_FD: 4319 return 1; 4320 default: 4321 break; 4322 } 4323 return kvm_vm_ioctl_check_extension(kvm, arg); 4324 } 4325 4326 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size) 4327 { 4328 int r; 4329 4330 if (!KVM_DIRTY_LOG_PAGE_OFFSET) 4331 return -EINVAL; 4332 4333 /* the size should be power of 2 */ 4334 if (!size || (size & (size - 1))) 4335 return -EINVAL; 4336 4337 /* Should be bigger to keep the reserved entries, or a page */ 4338 if (size < kvm_dirty_ring_get_rsvd_entries() * 4339 sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE) 4340 return -EINVAL; 4341 4342 if (size > KVM_DIRTY_RING_MAX_ENTRIES * 4343 sizeof(struct kvm_dirty_gfn)) 4344 return -E2BIG; 4345 4346 /* We only allow it to set once */ 4347 if (kvm->dirty_ring_size) 4348 return -EINVAL; 4349 4350 mutex_lock(&kvm->lock); 4351 4352 if (kvm->created_vcpus) { 4353 /* We don't allow to change this value after vcpu created */ 4354 r = -EINVAL; 4355 } else { 4356 kvm->dirty_ring_size = size; 4357 r = 0; 4358 } 4359 4360 mutex_unlock(&kvm->lock); 4361 return r; 4362 } 4363 4364 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm) 4365 { 4366 unsigned long i; 4367 struct kvm_vcpu *vcpu; 4368 int cleared = 0; 4369 4370 if (!kvm->dirty_ring_size) 4371 return -EINVAL; 4372 4373 mutex_lock(&kvm->slots_lock); 4374 4375 kvm_for_each_vcpu(i, vcpu, kvm) 4376 cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring); 4377 4378 mutex_unlock(&kvm->slots_lock); 4379 4380 if (cleared) 4381 kvm_flush_remote_tlbs(kvm); 4382 4383 return cleared; 4384 } 4385 4386 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm, 4387 struct kvm_enable_cap *cap) 4388 { 4389 return -EINVAL; 4390 } 4391 4392 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, 4393 struct kvm_enable_cap *cap) 4394 { 4395 switch (cap->cap) { 4396 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4397 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: { 4398 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE; 4399 4400 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE) 4401 allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS; 4402 4403 if (cap->flags || (cap->args[0] & ~allowed_options)) 4404 return -EINVAL; 4405 kvm->manual_dirty_log_protect = cap->args[0]; 4406 return 0; 4407 } 4408 #endif 4409 case KVM_CAP_HALT_POLL: { 4410 if (cap->flags || cap->args[0] != (unsigned int)cap->args[0]) 4411 return -EINVAL; 4412 4413 kvm->max_halt_poll_ns = cap->args[0]; 4414 return 0; 4415 } 4416 case KVM_CAP_DIRTY_LOG_RING: 4417 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]); 4418 default: 4419 return kvm_vm_ioctl_enable_cap(kvm, cap); 4420 } 4421 } 4422 4423 static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer, 4424 size_t size, loff_t *offset) 4425 { 4426 struct kvm *kvm = file->private_data; 4427 4428 return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header, 4429 &kvm_vm_stats_desc[0], &kvm->stat, 4430 sizeof(kvm->stat), user_buffer, size, offset); 4431 } 4432 4433 static const struct file_operations kvm_vm_stats_fops = { 4434 .read = kvm_vm_stats_read, 4435 .llseek = noop_llseek, 4436 }; 4437 4438 static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm) 4439 { 4440 int fd; 4441 struct file *file; 4442 4443 fd = get_unused_fd_flags(O_CLOEXEC); 4444 if (fd < 0) 4445 return fd; 4446 4447 file = anon_inode_getfile("kvm-vm-stats", 4448 &kvm_vm_stats_fops, kvm, O_RDONLY); 4449 if (IS_ERR(file)) { 4450 put_unused_fd(fd); 4451 return PTR_ERR(file); 4452 } 4453 file->f_mode |= FMODE_PREAD; 4454 fd_install(fd, file); 4455 4456 return fd; 4457 } 4458 4459 static long kvm_vm_ioctl(struct file *filp, 4460 unsigned int ioctl, unsigned long arg) 4461 { 4462 struct kvm *kvm = filp->private_data; 4463 void __user *argp = (void __user *)arg; 4464 int r; 4465 4466 if (kvm->mm != current->mm || kvm->vm_dead) 4467 return -EIO; 4468 switch (ioctl) { 4469 case KVM_CREATE_VCPU: 4470 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 4471 break; 4472 case KVM_ENABLE_CAP: { 4473 struct kvm_enable_cap cap; 4474 4475 r = -EFAULT; 4476 if (copy_from_user(&cap, argp, sizeof(cap))) 4477 goto out; 4478 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap); 4479 break; 4480 } 4481 case KVM_SET_USER_MEMORY_REGION: { 4482 struct kvm_userspace_memory_region kvm_userspace_mem; 4483 4484 r = -EFAULT; 4485 if (copy_from_user(&kvm_userspace_mem, argp, 4486 sizeof(kvm_userspace_mem))) 4487 goto out; 4488 4489 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); 4490 break; 4491 } 4492 case KVM_GET_DIRTY_LOG: { 4493 struct kvm_dirty_log log; 4494 4495 r = -EFAULT; 4496 if (copy_from_user(&log, argp, sizeof(log))) 4497 goto out; 4498 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 4499 break; 4500 } 4501 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4502 case KVM_CLEAR_DIRTY_LOG: { 4503 struct kvm_clear_dirty_log log; 4504 4505 r = -EFAULT; 4506 if (copy_from_user(&log, argp, sizeof(log))) 4507 goto out; 4508 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); 4509 break; 4510 } 4511 #endif 4512 #ifdef CONFIG_KVM_MMIO 4513 case KVM_REGISTER_COALESCED_MMIO: { 4514 struct kvm_coalesced_mmio_zone zone; 4515 4516 r = -EFAULT; 4517 if (copy_from_user(&zone, argp, sizeof(zone))) 4518 goto out; 4519 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 4520 break; 4521 } 4522 case KVM_UNREGISTER_COALESCED_MMIO: { 4523 struct kvm_coalesced_mmio_zone zone; 4524 4525 r = -EFAULT; 4526 if (copy_from_user(&zone, argp, sizeof(zone))) 4527 goto out; 4528 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 4529 break; 4530 } 4531 #endif 4532 case KVM_IRQFD: { 4533 struct kvm_irqfd data; 4534 4535 r = -EFAULT; 4536 if (copy_from_user(&data, argp, sizeof(data))) 4537 goto out; 4538 r = kvm_irqfd(kvm, &data); 4539 break; 4540 } 4541 case KVM_IOEVENTFD: { 4542 struct kvm_ioeventfd data; 4543 4544 r = -EFAULT; 4545 if (copy_from_user(&data, argp, sizeof(data))) 4546 goto out; 4547 r = kvm_ioeventfd(kvm, &data); 4548 break; 4549 } 4550 #ifdef CONFIG_HAVE_KVM_MSI 4551 case KVM_SIGNAL_MSI: { 4552 struct kvm_msi msi; 4553 4554 r = -EFAULT; 4555 if (copy_from_user(&msi, argp, sizeof(msi))) 4556 goto out; 4557 r = kvm_send_userspace_msi(kvm, &msi); 4558 break; 4559 } 4560 #endif 4561 #ifdef __KVM_HAVE_IRQ_LINE 4562 case KVM_IRQ_LINE_STATUS: 4563 case KVM_IRQ_LINE: { 4564 struct kvm_irq_level irq_event; 4565 4566 r = -EFAULT; 4567 if (copy_from_user(&irq_event, argp, sizeof(irq_event))) 4568 goto out; 4569 4570 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, 4571 ioctl == KVM_IRQ_LINE_STATUS); 4572 if (r) 4573 goto out; 4574 4575 r = -EFAULT; 4576 if (ioctl == KVM_IRQ_LINE_STATUS) { 4577 if (copy_to_user(argp, &irq_event, sizeof(irq_event))) 4578 goto out; 4579 } 4580 4581 r = 0; 4582 break; 4583 } 4584 #endif 4585 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 4586 case KVM_SET_GSI_ROUTING: { 4587 struct kvm_irq_routing routing; 4588 struct kvm_irq_routing __user *urouting; 4589 struct kvm_irq_routing_entry *entries = NULL; 4590 4591 r = -EFAULT; 4592 if (copy_from_user(&routing, argp, sizeof(routing))) 4593 goto out; 4594 r = -EINVAL; 4595 if (!kvm_arch_can_set_irq_routing(kvm)) 4596 goto out; 4597 if (routing.nr > KVM_MAX_IRQ_ROUTES) 4598 goto out; 4599 if (routing.flags) 4600 goto out; 4601 if (routing.nr) { 4602 urouting = argp; 4603 entries = vmemdup_user(urouting->entries, 4604 array_size(sizeof(*entries), 4605 routing.nr)); 4606 if (IS_ERR(entries)) { 4607 r = PTR_ERR(entries); 4608 goto out; 4609 } 4610 } 4611 r = kvm_set_irq_routing(kvm, entries, routing.nr, 4612 routing.flags); 4613 kvfree(entries); 4614 break; 4615 } 4616 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ 4617 case KVM_CREATE_DEVICE: { 4618 struct kvm_create_device cd; 4619 4620 r = -EFAULT; 4621 if (copy_from_user(&cd, argp, sizeof(cd))) 4622 goto out; 4623 4624 r = kvm_ioctl_create_device(kvm, &cd); 4625 if (r) 4626 goto out; 4627 4628 r = -EFAULT; 4629 if (copy_to_user(argp, &cd, sizeof(cd))) 4630 goto out; 4631 4632 r = 0; 4633 break; 4634 } 4635 case KVM_CHECK_EXTENSION: 4636 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); 4637 break; 4638 case KVM_RESET_DIRTY_RINGS: 4639 r = kvm_vm_ioctl_reset_dirty_pages(kvm); 4640 break; 4641 case KVM_GET_STATS_FD: 4642 r = kvm_vm_ioctl_get_stats_fd(kvm); 4643 break; 4644 default: 4645 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 4646 } 4647 out: 4648 return r; 4649 } 4650 4651 #ifdef CONFIG_KVM_COMPAT 4652 struct compat_kvm_dirty_log { 4653 __u32 slot; 4654 __u32 padding1; 4655 union { 4656 compat_uptr_t dirty_bitmap; /* one bit per page */ 4657 __u64 padding2; 4658 }; 4659 }; 4660 4661 struct compat_kvm_clear_dirty_log { 4662 __u32 slot; 4663 __u32 num_pages; 4664 __u64 first_page; 4665 union { 4666 compat_uptr_t dirty_bitmap; /* one bit per page */ 4667 __u64 padding2; 4668 }; 4669 }; 4670 4671 static long kvm_vm_compat_ioctl(struct file *filp, 4672 unsigned int ioctl, unsigned long arg) 4673 { 4674 struct kvm *kvm = filp->private_data; 4675 int r; 4676 4677 if (kvm->mm != current->mm || kvm->vm_dead) 4678 return -EIO; 4679 switch (ioctl) { 4680 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4681 case KVM_CLEAR_DIRTY_LOG: { 4682 struct compat_kvm_clear_dirty_log compat_log; 4683 struct kvm_clear_dirty_log log; 4684 4685 if (copy_from_user(&compat_log, (void __user *)arg, 4686 sizeof(compat_log))) 4687 return -EFAULT; 4688 log.slot = compat_log.slot; 4689 log.num_pages = compat_log.num_pages; 4690 log.first_page = compat_log.first_page; 4691 log.padding2 = compat_log.padding2; 4692 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 4693 4694 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); 4695 break; 4696 } 4697 #endif 4698 case KVM_GET_DIRTY_LOG: { 4699 struct compat_kvm_dirty_log compat_log; 4700 struct kvm_dirty_log log; 4701 4702 if (copy_from_user(&compat_log, (void __user *)arg, 4703 sizeof(compat_log))) 4704 return -EFAULT; 4705 log.slot = compat_log.slot; 4706 log.padding1 = compat_log.padding1; 4707 log.padding2 = compat_log.padding2; 4708 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 4709 4710 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 4711 break; 4712 } 4713 default: 4714 r = kvm_vm_ioctl(filp, ioctl, arg); 4715 } 4716 return r; 4717 } 4718 #endif 4719 4720 static struct file_operations kvm_vm_fops = { 4721 .release = kvm_vm_release, 4722 .unlocked_ioctl = kvm_vm_ioctl, 4723 .llseek = noop_llseek, 4724 KVM_COMPAT(kvm_vm_compat_ioctl), 4725 }; 4726 4727 bool file_is_kvm(struct file *file) 4728 { 4729 return file && file->f_op == &kvm_vm_fops; 4730 } 4731 EXPORT_SYMBOL_GPL(file_is_kvm); 4732 4733 static int kvm_dev_ioctl_create_vm(unsigned long type) 4734 { 4735 int r; 4736 struct kvm *kvm; 4737 struct file *file; 4738 4739 kvm = kvm_create_vm(type); 4740 if (IS_ERR(kvm)) 4741 return PTR_ERR(kvm); 4742 #ifdef CONFIG_KVM_MMIO 4743 r = kvm_coalesced_mmio_init(kvm); 4744 if (r < 0) 4745 goto put_kvm; 4746 #endif 4747 r = get_unused_fd_flags(O_CLOEXEC); 4748 if (r < 0) 4749 goto put_kvm; 4750 4751 snprintf(kvm->stats_id, sizeof(kvm->stats_id), 4752 "kvm-%d", task_pid_nr(current)); 4753 4754 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 4755 if (IS_ERR(file)) { 4756 put_unused_fd(r); 4757 r = PTR_ERR(file); 4758 goto put_kvm; 4759 } 4760 4761 /* 4762 * Don't call kvm_put_kvm anymore at this point; file->f_op is 4763 * already set, with ->release() being kvm_vm_release(). In error 4764 * cases it will be called by the final fput(file) and will take 4765 * care of doing kvm_put_kvm(kvm). 4766 */ 4767 if (kvm_create_vm_debugfs(kvm, r) < 0) { 4768 put_unused_fd(r); 4769 fput(file); 4770 return -ENOMEM; 4771 } 4772 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm); 4773 4774 fd_install(r, file); 4775 return r; 4776 4777 put_kvm: 4778 kvm_put_kvm(kvm); 4779 return r; 4780 } 4781 4782 static long kvm_dev_ioctl(struct file *filp, 4783 unsigned int ioctl, unsigned long arg) 4784 { 4785 long r = -EINVAL; 4786 4787 switch (ioctl) { 4788 case KVM_GET_API_VERSION: 4789 if (arg) 4790 goto out; 4791 r = KVM_API_VERSION; 4792 break; 4793 case KVM_CREATE_VM: 4794 r = kvm_dev_ioctl_create_vm(arg); 4795 break; 4796 case KVM_CHECK_EXTENSION: 4797 r = kvm_vm_ioctl_check_extension_generic(NULL, arg); 4798 break; 4799 case KVM_GET_VCPU_MMAP_SIZE: 4800 if (arg) 4801 goto out; 4802 r = PAGE_SIZE; /* struct kvm_run */ 4803 #ifdef CONFIG_X86 4804 r += PAGE_SIZE; /* pio data page */ 4805 #endif 4806 #ifdef CONFIG_KVM_MMIO 4807 r += PAGE_SIZE; /* coalesced mmio ring page */ 4808 #endif 4809 break; 4810 case KVM_TRACE_ENABLE: 4811 case KVM_TRACE_PAUSE: 4812 case KVM_TRACE_DISABLE: 4813 r = -EOPNOTSUPP; 4814 break; 4815 default: 4816 return kvm_arch_dev_ioctl(filp, ioctl, arg); 4817 } 4818 out: 4819 return r; 4820 } 4821 4822 static struct file_operations kvm_chardev_ops = { 4823 .unlocked_ioctl = kvm_dev_ioctl, 4824 .llseek = noop_llseek, 4825 KVM_COMPAT(kvm_dev_ioctl), 4826 }; 4827 4828 static struct miscdevice kvm_dev = { 4829 KVM_MINOR, 4830 "kvm", 4831 &kvm_chardev_ops, 4832 }; 4833 4834 static void hardware_enable_nolock(void *junk) 4835 { 4836 int cpu = raw_smp_processor_id(); 4837 int r; 4838 4839 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 4840 return; 4841 4842 cpumask_set_cpu(cpu, cpus_hardware_enabled); 4843 4844 r = kvm_arch_hardware_enable(); 4845 4846 if (r) { 4847 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 4848 atomic_inc(&hardware_enable_failed); 4849 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu); 4850 } 4851 } 4852 4853 static int kvm_starting_cpu(unsigned int cpu) 4854 { 4855 raw_spin_lock(&kvm_count_lock); 4856 if (kvm_usage_count) 4857 hardware_enable_nolock(NULL); 4858 raw_spin_unlock(&kvm_count_lock); 4859 return 0; 4860 } 4861 4862 static void hardware_disable_nolock(void *junk) 4863 { 4864 int cpu = raw_smp_processor_id(); 4865 4866 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 4867 return; 4868 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 4869 kvm_arch_hardware_disable(); 4870 } 4871 4872 static int kvm_dying_cpu(unsigned int cpu) 4873 { 4874 raw_spin_lock(&kvm_count_lock); 4875 if (kvm_usage_count) 4876 hardware_disable_nolock(NULL); 4877 raw_spin_unlock(&kvm_count_lock); 4878 return 0; 4879 } 4880 4881 static void hardware_disable_all_nolock(void) 4882 { 4883 BUG_ON(!kvm_usage_count); 4884 4885 kvm_usage_count--; 4886 if (!kvm_usage_count) 4887 on_each_cpu(hardware_disable_nolock, NULL, 1); 4888 } 4889 4890 static void hardware_disable_all(void) 4891 { 4892 raw_spin_lock(&kvm_count_lock); 4893 hardware_disable_all_nolock(); 4894 raw_spin_unlock(&kvm_count_lock); 4895 } 4896 4897 static int hardware_enable_all(void) 4898 { 4899 int r = 0; 4900 4901 raw_spin_lock(&kvm_count_lock); 4902 4903 kvm_usage_count++; 4904 if (kvm_usage_count == 1) { 4905 atomic_set(&hardware_enable_failed, 0); 4906 on_each_cpu(hardware_enable_nolock, NULL, 1); 4907 4908 if (atomic_read(&hardware_enable_failed)) { 4909 hardware_disable_all_nolock(); 4910 r = -EBUSY; 4911 } 4912 } 4913 4914 raw_spin_unlock(&kvm_count_lock); 4915 4916 return r; 4917 } 4918 4919 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 4920 void *v) 4921 { 4922 /* 4923 * Some (well, at least mine) BIOSes hang on reboot if 4924 * in vmx root mode. 4925 * 4926 * And Intel TXT required VMX off for all cpu when system shutdown. 4927 */ 4928 pr_info("kvm: exiting hardware virtualization\n"); 4929 kvm_rebooting = true; 4930 on_each_cpu(hardware_disable_nolock, NULL, 1); 4931 return NOTIFY_OK; 4932 } 4933 4934 static struct notifier_block kvm_reboot_notifier = { 4935 .notifier_call = kvm_reboot, 4936 .priority = 0, 4937 }; 4938 4939 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 4940 { 4941 int i; 4942 4943 for (i = 0; i < bus->dev_count; i++) { 4944 struct kvm_io_device *pos = bus->range[i].dev; 4945 4946 kvm_iodevice_destructor(pos); 4947 } 4948 kfree(bus); 4949 } 4950 4951 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, 4952 const struct kvm_io_range *r2) 4953 { 4954 gpa_t addr1 = r1->addr; 4955 gpa_t addr2 = r2->addr; 4956 4957 if (addr1 < addr2) 4958 return -1; 4959 4960 /* If r2->len == 0, match the exact address. If r2->len != 0, 4961 * accept any overlapping write. Any order is acceptable for 4962 * overlapping ranges, because kvm_io_bus_get_first_dev ensures 4963 * we process all of them. 4964 */ 4965 if (r2->len) { 4966 addr1 += r1->len; 4967 addr2 += r2->len; 4968 } 4969 4970 if (addr1 > addr2) 4971 return 1; 4972 4973 return 0; 4974 } 4975 4976 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) 4977 { 4978 return kvm_io_bus_cmp(p1, p2); 4979 } 4980 4981 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, 4982 gpa_t addr, int len) 4983 { 4984 struct kvm_io_range *range, key; 4985 int off; 4986 4987 key = (struct kvm_io_range) { 4988 .addr = addr, 4989 .len = len, 4990 }; 4991 4992 range = bsearch(&key, bus->range, bus->dev_count, 4993 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); 4994 if (range == NULL) 4995 return -ENOENT; 4996 4997 off = range - bus->range; 4998 4999 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) 5000 off--; 5001 5002 return off; 5003 } 5004 5005 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 5006 struct kvm_io_range *range, const void *val) 5007 { 5008 int idx; 5009 5010 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 5011 if (idx < 0) 5012 return -EOPNOTSUPP; 5013 5014 while (idx < bus->dev_count && 5015 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 5016 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, 5017 range->len, val)) 5018 return idx; 5019 idx++; 5020 } 5021 5022 return -EOPNOTSUPP; 5023 } 5024 5025 /* kvm_io_bus_write - called under kvm->slots_lock */ 5026 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 5027 int len, const void *val) 5028 { 5029 struct kvm_io_bus *bus; 5030 struct kvm_io_range range; 5031 int r; 5032 5033 range = (struct kvm_io_range) { 5034 .addr = addr, 5035 .len = len, 5036 }; 5037 5038 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5039 if (!bus) 5040 return -ENOMEM; 5041 r = __kvm_io_bus_write(vcpu, bus, &range, val); 5042 return r < 0 ? r : 0; 5043 } 5044 EXPORT_SYMBOL_GPL(kvm_io_bus_write); 5045 5046 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ 5047 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 5048 gpa_t addr, int len, const void *val, long cookie) 5049 { 5050 struct kvm_io_bus *bus; 5051 struct kvm_io_range range; 5052 5053 range = (struct kvm_io_range) { 5054 .addr = addr, 5055 .len = len, 5056 }; 5057 5058 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5059 if (!bus) 5060 return -ENOMEM; 5061 5062 /* First try the device referenced by cookie. */ 5063 if ((cookie >= 0) && (cookie < bus->dev_count) && 5064 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) 5065 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, 5066 val)) 5067 return cookie; 5068 5069 /* 5070 * cookie contained garbage; fall back to search and return the 5071 * correct cookie value. 5072 */ 5073 return __kvm_io_bus_write(vcpu, bus, &range, val); 5074 } 5075 5076 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 5077 struct kvm_io_range *range, void *val) 5078 { 5079 int idx; 5080 5081 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 5082 if (idx < 0) 5083 return -EOPNOTSUPP; 5084 5085 while (idx < bus->dev_count && 5086 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 5087 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, 5088 range->len, val)) 5089 return idx; 5090 idx++; 5091 } 5092 5093 return -EOPNOTSUPP; 5094 } 5095 5096 /* kvm_io_bus_read - called under kvm->slots_lock */ 5097 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 5098 int len, void *val) 5099 { 5100 struct kvm_io_bus *bus; 5101 struct kvm_io_range range; 5102 int r; 5103 5104 range = (struct kvm_io_range) { 5105 .addr = addr, 5106 .len = len, 5107 }; 5108 5109 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5110 if (!bus) 5111 return -ENOMEM; 5112 r = __kvm_io_bus_read(vcpu, bus, &range, val); 5113 return r < 0 ? r : 0; 5114 } 5115 5116 /* Caller must hold slots_lock. */ 5117 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 5118 int len, struct kvm_io_device *dev) 5119 { 5120 int i; 5121 struct kvm_io_bus *new_bus, *bus; 5122 struct kvm_io_range range; 5123 5124 bus = kvm_get_bus(kvm, bus_idx); 5125 if (!bus) 5126 return -ENOMEM; 5127 5128 /* exclude ioeventfd which is limited by maximum fd */ 5129 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 5130 return -ENOSPC; 5131 5132 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1), 5133 GFP_KERNEL_ACCOUNT); 5134 if (!new_bus) 5135 return -ENOMEM; 5136 5137 range = (struct kvm_io_range) { 5138 .addr = addr, 5139 .len = len, 5140 .dev = dev, 5141 }; 5142 5143 for (i = 0; i < bus->dev_count; i++) 5144 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0) 5145 break; 5146 5147 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 5148 new_bus->dev_count++; 5149 new_bus->range[i] = range; 5150 memcpy(new_bus->range + i + 1, bus->range + i, 5151 (bus->dev_count - i) * sizeof(struct kvm_io_range)); 5152 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 5153 synchronize_srcu_expedited(&kvm->srcu); 5154 kfree(bus); 5155 5156 return 0; 5157 } 5158 5159 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 5160 struct kvm_io_device *dev) 5161 { 5162 int i, j; 5163 struct kvm_io_bus *new_bus, *bus; 5164 5165 lockdep_assert_held(&kvm->slots_lock); 5166 5167 bus = kvm_get_bus(kvm, bus_idx); 5168 if (!bus) 5169 return 0; 5170 5171 for (i = 0; i < bus->dev_count; i++) { 5172 if (bus->range[i].dev == dev) { 5173 break; 5174 } 5175 } 5176 5177 if (i == bus->dev_count) 5178 return 0; 5179 5180 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1), 5181 GFP_KERNEL_ACCOUNT); 5182 if (new_bus) { 5183 memcpy(new_bus, bus, struct_size(bus, range, i)); 5184 new_bus->dev_count--; 5185 memcpy(new_bus->range + i, bus->range + i + 1, 5186 flex_array_size(new_bus, range, new_bus->dev_count - i)); 5187 } 5188 5189 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 5190 synchronize_srcu_expedited(&kvm->srcu); 5191 5192 /* Destroy the old bus _after_ installing the (null) bus. */ 5193 if (!new_bus) { 5194 pr_err("kvm: failed to shrink bus, removing it completely\n"); 5195 for (j = 0; j < bus->dev_count; j++) { 5196 if (j == i) 5197 continue; 5198 kvm_iodevice_destructor(bus->range[j].dev); 5199 } 5200 } 5201 5202 kfree(bus); 5203 return new_bus ? 0 : -ENOMEM; 5204 } 5205 5206 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 5207 gpa_t addr) 5208 { 5209 struct kvm_io_bus *bus; 5210 int dev_idx, srcu_idx; 5211 struct kvm_io_device *iodev = NULL; 5212 5213 srcu_idx = srcu_read_lock(&kvm->srcu); 5214 5215 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 5216 if (!bus) 5217 goto out_unlock; 5218 5219 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); 5220 if (dev_idx < 0) 5221 goto out_unlock; 5222 5223 iodev = bus->range[dev_idx].dev; 5224 5225 out_unlock: 5226 srcu_read_unlock(&kvm->srcu, srcu_idx); 5227 5228 return iodev; 5229 } 5230 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev); 5231 5232 static int kvm_debugfs_open(struct inode *inode, struct file *file, 5233 int (*get)(void *, u64 *), int (*set)(void *, u64), 5234 const char *fmt) 5235 { 5236 struct kvm_stat_data *stat_data = (struct kvm_stat_data *) 5237 inode->i_private; 5238 5239 /* 5240 * The debugfs files are a reference to the kvm struct which 5241 * is still valid when kvm_destroy_vm is called. kvm_get_kvm_safe 5242 * avoids the race between open and the removal of the debugfs directory. 5243 */ 5244 if (!kvm_get_kvm_safe(stat_data->kvm)) 5245 return -ENOENT; 5246 5247 if (simple_attr_open(inode, file, get, 5248 kvm_stats_debugfs_mode(stat_data->desc) & 0222 5249 ? set : NULL, 5250 fmt)) { 5251 kvm_put_kvm(stat_data->kvm); 5252 return -ENOMEM; 5253 } 5254 5255 return 0; 5256 } 5257 5258 static int kvm_debugfs_release(struct inode *inode, struct file *file) 5259 { 5260 struct kvm_stat_data *stat_data = (struct kvm_stat_data *) 5261 inode->i_private; 5262 5263 simple_attr_release(inode, file); 5264 kvm_put_kvm(stat_data->kvm); 5265 5266 return 0; 5267 } 5268 5269 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val) 5270 { 5271 *val = *(u64 *)((void *)(&kvm->stat) + offset); 5272 5273 return 0; 5274 } 5275 5276 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset) 5277 { 5278 *(u64 *)((void *)(&kvm->stat) + offset) = 0; 5279 5280 return 0; 5281 } 5282 5283 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) 5284 { 5285 unsigned long i; 5286 struct kvm_vcpu *vcpu; 5287 5288 *val = 0; 5289 5290 kvm_for_each_vcpu(i, vcpu, kvm) 5291 *val += *(u64 *)((void *)(&vcpu->stat) + offset); 5292 5293 return 0; 5294 } 5295 5296 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset) 5297 { 5298 unsigned long i; 5299 struct kvm_vcpu *vcpu; 5300 5301 kvm_for_each_vcpu(i, vcpu, kvm) 5302 *(u64 *)((void *)(&vcpu->stat) + offset) = 0; 5303 5304 return 0; 5305 } 5306 5307 static int kvm_stat_data_get(void *data, u64 *val) 5308 { 5309 int r = -EFAULT; 5310 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 5311 5312 switch (stat_data->kind) { 5313 case KVM_STAT_VM: 5314 r = kvm_get_stat_per_vm(stat_data->kvm, 5315 stat_data->desc->desc.offset, val); 5316 break; 5317 case KVM_STAT_VCPU: 5318 r = kvm_get_stat_per_vcpu(stat_data->kvm, 5319 stat_data->desc->desc.offset, val); 5320 break; 5321 } 5322 5323 return r; 5324 } 5325 5326 static int kvm_stat_data_clear(void *data, u64 val) 5327 { 5328 int r = -EFAULT; 5329 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 5330 5331 if (val) 5332 return -EINVAL; 5333 5334 switch (stat_data->kind) { 5335 case KVM_STAT_VM: 5336 r = kvm_clear_stat_per_vm(stat_data->kvm, 5337 stat_data->desc->desc.offset); 5338 break; 5339 case KVM_STAT_VCPU: 5340 r = kvm_clear_stat_per_vcpu(stat_data->kvm, 5341 stat_data->desc->desc.offset); 5342 break; 5343 } 5344 5345 return r; 5346 } 5347 5348 static int kvm_stat_data_open(struct inode *inode, struct file *file) 5349 { 5350 __simple_attr_check_format("%llu\n", 0ull); 5351 return kvm_debugfs_open(inode, file, kvm_stat_data_get, 5352 kvm_stat_data_clear, "%llu\n"); 5353 } 5354 5355 static const struct file_operations stat_fops_per_vm = { 5356 .owner = THIS_MODULE, 5357 .open = kvm_stat_data_open, 5358 .release = kvm_debugfs_release, 5359 .read = simple_attr_read, 5360 .write = simple_attr_write, 5361 .llseek = no_llseek, 5362 }; 5363 5364 static int vm_stat_get(void *_offset, u64 *val) 5365 { 5366 unsigned offset = (long)_offset; 5367 struct kvm *kvm; 5368 u64 tmp_val; 5369 5370 *val = 0; 5371 mutex_lock(&kvm_lock); 5372 list_for_each_entry(kvm, &vm_list, vm_list) { 5373 kvm_get_stat_per_vm(kvm, offset, &tmp_val); 5374 *val += tmp_val; 5375 } 5376 mutex_unlock(&kvm_lock); 5377 return 0; 5378 } 5379 5380 static int vm_stat_clear(void *_offset, u64 val) 5381 { 5382 unsigned offset = (long)_offset; 5383 struct kvm *kvm; 5384 5385 if (val) 5386 return -EINVAL; 5387 5388 mutex_lock(&kvm_lock); 5389 list_for_each_entry(kvm, &vm_list, vm_list) { 5390 kvm_clear_stat_per_vm(kvm, offset); 5391 } 5392 mutex_unlock(&kvm_lock); 5393 5394 return 0; 5395 } 5396 5397 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n"); 5398 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n"); 5399 5400 static int vcpu_stat_get(void *_offset, u64 *val) 5401 { 5402 unsigned offset = (long)_offset; 5403 struct kvm *kvm; 5404 u64 tmp_val; 5405 5406 *val = 0; 5407 mutex_lock(&kvm_lock); 5408 list_for_each_entry(kvm, &vm_list, vm_list) { 5409 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val); 5410 *val += tmp_val; 5411 } 5412 mutex_unlock(&kvm_lock); 5413 return 0; 5414 } 5415 5416 static int vcpu_stat_clear(void *_offset, u64 val) 5417 { 5418 unsigned offset = (long)_offset; 5419 struct kvm *kvm; 5420 5421 if (val) 5422 return -EINVAL; 5423 5424 mutex_lock(&kvm_lock); 5425 list_for_each_entry(kvm, &vm_list, vm_list) { 5426 kvm_clear_stat_per_vcpu(kvm, offset); 5427 } 5428 mutex_unlock(&kvm_lock); 5429 5430 return 0; 5431 } 5432 5433 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear, 5434 "%llu\n"); 5435 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n"); 5436 5437 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) 5438 { 5439 struct kobj_uevent_env *env; 5440 unsigned long long created, active; 5441 5442 if (!kvm_dev.this_device || !kvm) 5443 return; 5444 5445 mutex_lock(&kvm_lock); 5446 if (type == KVM_EVENT_CREATE_VM) { 5447 kvm_createvm_count++; 5448 kvm_active_vms++; 5449 } else if (type == KVM_EVENT_DESTROY_VM) { 5450 kvm_active_vms--; 5451 } 5452 created = kvm_createvm_count; 5453 active = kvm_active_vms; 5454 mutex_unlock(&kvm_lock); 5455 5456 env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT); 5457 if (!env) 5458 return; 5459 5460 add_uevent_var(env, "CREATED=%llu", created); 5461 add_uevent_var(env, "COUNT=%llu", active); 5462 5463 if (type == KVM_EVENT_CREATE_VM) { 5464 add_uevent_var(env, "EVENT=create"); 5465 kvm->userspace_pid = task_pid_nr(current); 5466 } else if (type == KVM_EVENT_DESTROY_VM) { 5467 add_uevent_var(env, "EVENT=destroy"); 5468 } 5469 add_uevent_var(env, "PID=%d", kvm->userspace_pid); 5470 5471 if (kvm->debugfs_dentry) { 5472 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT); 5473 5474 if (p) { 5475 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); 5476 if (!IS_ERR(tmp)) 5477 add_uevent_var(env, "STATS_PATH=%s", tmp); 5478 kfree(p); 5479 } 5480 } 5481 /* no need for checks, since we are adding at most only 5 keys */ 5482 env->envp[env->envp_idx++] = NULL; 5483 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp); 5484 kfree(env); 5485 } 5486 5487 static void kvm_init_debug(void) 5488 { 5489 const struct file_operations *fops; 5490 const struct _kvm_stats_desc *pdesc; 5491 int i; 5492 5493 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 5494 5495 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) { 5496 pdesc = &kvm_vm_stats_desc[i]; 5497 if (kvm_stats_debugfs_mode(pdesc) & 0222) 5498 fops = &vm_stat_fops; 5499 else 5500 fops = &vm_stat_readonly_fops; 5501 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 5502 kvm_debugfs_dir, 5503 (void *)(long)pdesc->desc.offset, fops); 5504 } 5505 5506 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { 5507 pdesc = &kvm_vcpu_stats_desc[i]; 5508 if (kvm_stats_debugfs_mode(pdesc) & 0222) 5509 fops = &vcpu_stat_fops; 5510 else 5511 fops = &vcpu_stat_readonly_fops; 5512 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 5513 kvm_debugfs_dir, 5514 (void *)(long)pdesc->desc.offset, fops); 5515 } 5516 } 5517 5518 static int kvm_suspend(void) 5519 { 5520 if (kvm_usage_count) 5521 hardware_disable_nolock(NULL); 5522 return 0; 5523 } 5524 5525 static void kvm_resume(void) 5526 { 5527 if (kvm_usage_count) { 5528 #ifdef CONFIG_LOCKDEP 5529 WARN_ON(lockdep_is_held(&kvm_count_lock)); 5530 #endif 5531 hardware_enable_nolock(NULL); 5532 } 5533 } 5534 5535 static struct syscore_ops kvm_syscore_ops = { 5536 .suspend = kvm_suspend, 5537 .resume = kvm_resume, 5538 }; 5539 5540 static inline 5541 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 5542 { 5543 return container_of(pn, struct kvm_vcpu, preempt_notifier); 5544 } 5545 5546 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 5547 { 5548 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 5549 5550 WRITE_ONCE(vcpu->preempted, false); 5551 WRITE_ONCE(vcpu->ready, false); 5552 5553 __this_cpu_write(kvm_running_vcpu, vcpu); 5554 kvm_arch_sched_in(vcpu, cpu); 5555 kvm_arch_vcpu_load(vcpu, cpu); 5556 } 5557 5558 static void kvm_sched_out(struct preempt_notifier *pn, 5559 struct task_struct *next) 5560 { 5561 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 5562 5563 if (current->on_rq) { 5564 WRITE_ONCE(vcpu->preempted, true); 5565 WRITE_ONCE(vcpu->ready, true); 5566 } 5567 kvm_arch_vcpu_put(vcpu); 5568 __this_cpu_write(kvm_running_vcpu, NULL); 5569 } 5570 5571 /** 5572 * kvm_get_running_vcpu - get the vcpu running on the current CPU. 5573 * 5574 * We can disable preemption locally around accessing the per-CPU variable, 5575 * and use the resolved vcpu pointer after enabling preemption again, 5576 * because even if the current thread is migrated to another CPU, reading 5577 * the per-CPU value later will give us the same value as we update the 5578 * per-CPU variable in the preempt notifier handlers. 5579 */ 5580 struct kvm_vcpu *kvm_get_running_vcpu(void) 5581 { 5582 struct kvm_vcpu *vcpu; 5583 5584 preempt_disable(); 5585 vcpu = __this_cpu_read(kvm_running_vcpu); 5586 preempt_enable(); 5587 5588 return vcpu; 5589 } 5590 EXPORT_SYMBOL_GPL(kvm_get_running_vcpu); 5591 5592 /** 5593 * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus. 5594 */ 5595 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) 5596 { 5597 return &kvm_running_vcpu; 5598 } 5599 5600 struct kvm_cpu_compat_check { 5601 void *opaque; 5602 int *ret; 5603 }; 5604 5605 static void check_processor_compat(void *data) 5606 { 5607 struct kvm_cpu_compat_check *c = data; 5608 5609 *c->ret = kvm_arch_check_processor_compat(c->opaque); 5610 } 5611 5612 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 5613 struct module *module) 5614 { 5615 struct kvm_cpu_compat_check c; 5616 int r; 5617 int cpu; 5618 5619 r = kvm_arch_init(opaque); 5620 if (r) 5621 goto out_fail; 5622 5623 /* 5624 * kvm_arch_init makes sure there's at most one caller 5625 * for architectures that support multiple implementations, 5626 * like intel and amd on x86. 5627 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating 5628 * conflicts in case kvm is already setup for another implementation. 5629 */ 5630 r = kvm_irqfd_init(); 5631 if (r) 5632 goto out_irqfd; 5633 5634 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 5635 r = -ENOMEM; 5636 goto out_free_0; 5637 } 5638 5639 r = kvm_arch_hardware_setup(opaque); 5640 if (r < 0) 5641 goto out_free_1; 5642 5643 c.ret = &r; 5644 c.opaque = opaque; 5645 for_each_online_cpu(cpu) { 5646 smp_call_function_single(cpu, check_processor_compat, &c, 1); 5647 if (r < 0) 5648 goto out_free_2; 5649 } 5650 5651 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting", 5652 kvm_starting_cpu, kvm_dying_cpu); 5653 if (r) 5654 goto out_free_2; 5655 register_reboot_notifier(&kvm_reboot_notifier); 5656 5657 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 5658 if (!vcpu_align) 5659 vcpu_align = __alignof__(struct kvm_vcpu); 5660 kvm_vcpu_cache = 5661 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align, 5662 SLAB_ACCOUNT, 5663 offsetof(struct kvm_vcpu, arch), 5664 offsetofend(struct kvm_vcpu, stats_id) 5665 - offsetof(struct kvm_vcpu, arch), 5666 NULL); 5667 if (!kvm_vcpu_cache) { 5668 r = -ENOMEM; 5669 goto out_free_3; 5670 } 5671 5672 for_each_possible_cpu(cpu) { 5673 if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu), 5674 GFP_KERNEL, cpu_to_node(cpu))) { 5675 r = -ENOMEM; 5676 goto out_free_4; 5677 } 5678 } 5679 5680 r = kvm_async_pf_init(); 5681 if (r) 5682 goto out_free_5; 5683 5684 kvm_chardev_ops.owner = module; 5685 kvm_vm_fops.owner = module; 5686 kvm_vcpu_fops.owner = module; 5687 5688 r = misc_register(&kvm_dev); 5689 if (r) { 5690 pr_err("kvm: misc device register failed\n"); 5691 goto out_unreg; 5692 } 5693 5694 register_syscore_ops(&kvm_syscore_ops); 5695 5696 kvm_preempt_ops.sched_in = kvm_sched_in; 5697 kvm_preempt_ops.sched_out = kvm_sched_out; 5698 5699 kvm_init_debug(); 5700 5701 r = kvm_vfio_ops_init(); 5702 WARN_ON(r); 5703 5704 return 0; 5705 5706 out_unreg: 5707 kvm_async_pf_deinit(); 5708 out_free_5: 5709 for_each_possible_cpu(cpu) 5710 free_cpumask_var(per_cpu(cpu_kick_mask, cpu)); 5711 out_free_4: 5712 kmem_cache_destroy(kvm_vcpu_cache); 5713 out_free_3: 5714 unregister_reboot_notifier(&kvm_reboot_notifier); 5715 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); 5716 out_free_2: 5717 kvm_arch_hardware_unsetup(); 5718 out_free_1: 5719 free_cpumask_var(cpus_hardware_enabled); 5720 out_free_0: 5721 kvm_irqfd_exit(); 5722 out_irqfd: 5723 kvm_arch_exit(); 5724 out_fail: 5725 return r; 5726 } 5727 EXPORT_SYMBOL_GPL(kvm_init); 5728 5729 void kvm_exit(void) 5730 { 5731 int cpu; 5732 5733 debugfs_remove_recursive(kvm_debugfs_dir); 5734 misc_deregister(&kvm_dev); 5735 for_each_possible_cpu(cpu) 5736 free_cpumask_var(per_cpu(cpu_kick_mask, cpu)); 5737 kmem_cache_destroy(kvm_vcpu_cache); 5738 kvm_async_pf_deinit(); 5739 unregister_syscore_ops(&kvm_syscore_ops); 5740 unregister_reboot_notifier(&kvm_reboot_notifier); 5741 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); 5742 on_each_cpu(hardware_disable_nolock, NULL, 1); 5743 kvm_arch_hardware_unsetup(); 5744 kvm_arch_exit(); 5745 kvm_irqfd_exit(); 5746 free_cpumask_var(cpus_hardware_enabled); 5747 kvm_vfio_ops_exit(); 5748 } 5749 EXPORT_SYMBOL_GPL(kvm_exit); 5750 5751 struct kvm_vm_worker_thread_context { 5752 struct kvm *kvm; 5753 struct task_struct *parent; 5754 struct completion init_done; 5755 kvm_vm_thread_fn_t thread_fn; 5756 uintptr_t data; 5757 int err; 5758 }; 5759 5760 static int kvm_vm_worker_thread(void *context) 5761 { 5762 /* 5763 * The init_context is allocated on the stack of the parent thread, so 5764 * we have to locally copy anything that is needed beyond initialization 5765 */ 5766 struct kvm_vm_worker_thread_context *init_context = context; 5767 struct kvm *kvm = init_context->kvm; 5768 kvm_vm_thread_fn_t thread_fn = init_context->thread_fn; 5769 uintptr_t data = init_context->data; 5770 int err; 5771 5772 err = kthread_park(current); 5773 /* kthread_park(current) is never supposed to return an error */ 5774 WARN_ON(err != 0); 5775 if (err) 5776 goto init_complete; 5777 5778 err = cgroup_attach_task_all(init_context->parent, current); 5779 if (err) { 5780 kvm_err("%s: cgroup_attach_task_all failed with err %d\n", 5781 __func__, err); 5782 goto init_complete; 5783 } 5784 5785 set_user_nice(current, task_nice(init_context->parent)); 5786 5787 init_complete: 5788 init_context->err = err; 5789 complete(&init_context->init_done); 5790 init_context = NULL; 5791 5792 if (err) 5793 return err; 5794 5795 /* Wait to be woken up by the spawner before proceeding. */ 5796 kthread_parkme(); 5797 5798 if (!kthread_should_stop()) 5799 err = thread_fn(kvm, data); 5800 5801 return err; 5802 } 5803 5804 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, 5805 uintptr_t data, const char *name, 5806 struct task_struct **thread_ptr) 5807 { 5808 struct kvm_vm_worker_thread_context init_context = {}; 5809 struct task_struct *thread; 5810 5811 *thread_ptr = NULL; 5812 init_context.kvm = kvm; 5813 init_context.parent = current; 5814 init_context.thread_fn = thread_fn; 5815 init_context.data = data; 5816 init_completion(&init_context.init_done); 5817 5818 thread = kthread_run(kvm_vm_worker_thread, &init_context, 5819 "%s-%d", name, task_pid_nr(current)); 5820 if (IS_ERR(thread)) 5821 return PTR_ERR(thread); 5822 5823 /* kthread_run is never supposed to return NULL */ 5824 WARN_ON(thread == NULL); 5825 5826 wait_for_completion(&init_context.init_done); 5827 5828 if (!init_context.err) 5829 *thread_ptr = thread; 5830 5831 return init_context.err; 5832 } 5833