1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * This module enables machines with Intel VT-x extensions to run virtual 6 * machines without emulation or binary translation. 7 * 8 * Copyright (C) 2006 Qumranet, Inc. 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 10 * 11 * Authors: 12 * Avi Kivity <avi@qumranet.com> 13 * Yaniv Kamay <yaniv@qumranet.com> 14 */ 15 16 #include <kvm/iodev.h> 17 18 #include <linux/kvm_host.h> 19 #include <linux/kvm.h> 20 #include <linux/module.h> 21 #include <linux/errno.h> 22 #include <linux/percpu.h> 23 #include <linux/mm.h> 24 #include <linux/miscdevice.h> 25 #include <linux/vmalloc.h> 26 #include <linux/reboot.h> 27 #include <linux/debugfs.h> 28 #include <linux/highmem.h> 29 #include <linux/file.h> 30 #include <linux/syscore_ops.h> 31 #include <linux/cpu.h> 32 #include <linux/sched/signal.h> 33 #include <linux/sched/mm.h> 34 #include <linux/sched/stat.h> 35 #include <linux/cpumask.h> 36 #include <linux/smp.h> 37 #include <linux/anon_inodes.h> 38 #include <linux/profile.h> 39 #include <linux/kvm_para.h> 40 #include <linux/pagemap.h> 41 #include <linux/mman.h> 42 #include <linux/swap.h> 43 #include <linux/bitops.h> 44 #include <linux/spinlock.h> 45 #include <linux/compat.h> 46 #include <linux/srcu.h> 47 #include <linux/hugetlb.h> 48 #include <linux/slab.h> 49 #include <linux/sort.h> 50 #include <linux/bsearch.h> 51 #include <linux/io.h> 52 #include <linux/lockdep.h> 53 #include <linux/kthread.h> 54 55 #include <asm/processor.h> 56 #include <asm/ioctl.h> 57 #include <linux/uaccess.h> 58 59 #include "coalesced_mmio.h" 60 #include "async_pf.h" 61 #include "mmu_lock.h" 62 #include "vfio.h" 63 64 #define CREATE_TRACE_POINTS 65 #include <trace/events/kvm.h> 66 67 #include <linux/kvm_dirty_ring.h> 68 69 /* Worst case buffer size needed for holding an integer. */ 70 #define ITOA_MAX_LEN 12 71 72 MODULE_AUTHOR("Qumranet"); 73 MODULE_LICENSE("GPL"); 74 75 /* Architectures should define their poll value according to the halt latency */ 76 unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; 77 module_param(halt_poll_ns, uint, 0644); 78 EXPORT_SYMBOL_GPL(halt_poll_ns); 79 80 /* Default doubles per-vcpu halt_poll_ns. */ 81 unsigned int halt_poll_ns_grow = 2; 82 module_param(halt_poll_ns_grow, uint, 0644); 83 EXPORT_SYMBOL_GPL(halt_poll_ns_grow); 84 85 /* The start value to grow halt_poll_ns from */ 86 unsigned int halt_poll_ns_grow_start = 10000; /* 10us */ 87 module_param(halt_poll_ns_grow_start, uint, 0644); 88 EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start); 89 90 /* Default resets per-vcpu halt_poll_ns . */ 91 unsigned int halt_poll_ns_shrink; 92 module_param(halt_poll_ns_shrink, uint, 0644); 93 EXPORT_SYMBOL_GPL(halt_poll_ns_shrink); 94 95 /* 96 * Ordering of locks: 97 * 98 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 99 */ 100 101 DEFINE_MUTEX(kvm_lock); 102 static DEFINE_RAW_SPINLOCK(kvm_count_lock); 103 LIST_HEAD(vm_list); 104 105 static cpumask_var_t cpus_hardware_enabled; 106 static int kvm_usage_count; 107 static atomic_t hardware_enable_failed; 108 109 static struct kmem_cache *kvm_vcpu_cache; 110 111 static __read_mostly struct preempt_ops kvm_preempt_ops; 112 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu); 113 114 struct dentry *kvm_debugfs_dir; 115 EXPORT_SYMBOL_GPL(kvm_debugfs_dir); 116 117 static int kvm_debugfs_num_entries; 118 static const struct file_operations stat_fops_per_vm; 119 120 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 121 unsigned long arg); 122 #ifdef CONFIG_KVM_COMPAT 123 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, 124 unsigned long arg); 125 #define KVM_COMPAT(c) .compat_ioctl = (c) 126 #else 127 /* 128 * For architectures that don't implement a compat infrastructure, 129 * adopt a double line of defense: 130 * - Prevent a compat task from opening /dev/kvm 131 * - If the open has been done by a 64bit task, and the KVM fd 132 * passed to a compat task, let the ioctls fail. 133 */ 134 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl, 135 unsigned long arg) { return -EINVAL; } 136 137 static int kvm_no_compat_open(struct inode *inode, struct file *file) 138 { 139 return is_compat_task() ? -ENODEV : 0; 140 } 141 #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \ 142 .open = kvm_no_compat_open 143 #endif 144 static int hardware_enable_all(void); 145 static void hardware_disable_all(void); 146 147 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 148 149 __visible bool kvm_rebooting; 150 EXPORT_SYMBOL_GPL(kvm_rebooting); 151 152 #define KVM_EVENT_CREATE_VM 0 153 #define KVM_EVENT_DESTROY_VM 1 154 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); 155 static unsigned long long kvm_createvm_count; 156 static unsigned long long kvm_active_vms; 157 158 __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, 159 unsigned long start, unsigned long end) 160 { 161 } 162 163 bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) 164 { 165 /* 166 * The metadata used by is_zone_device_page() to determine whether or 167 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if 168 * the device has been pinned, e.g. by get_user_pages(). WARN if the 169 * page_count() is zero to help detect bad usage of this helper. 170 */ 171 if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn)))) 172 return false; 173 174 return is_zone_device_page(pfn_to_page(pfn)); 175 } 176 177 bool kvm_is_reserved_pfn(kvm_pfn_t pfn) 178 { 179 /* 180 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting 181 * perspective they are "normal" pages, albeit with slightly different 182 * usage rules. 183 */ 184 if (pfn_valid(pfn)) 185 return PageReserved(pfn_to_page(pfn)) && 186 !is_zero_pfn(pfn) && 187 !kvm_is_zone_device_pfn(pfn); 188 189 return true; 190 } 191 192 bool kvm_is_transparent_hugepage(kvm_pfn_t pfn) 193 { 194 struct page *page = pfn_to_page(pfn); 195 196 if (!PageTransCompoundMap(page)) 197 return false; 198 199 return is_transparent_hugepage(compound_head(page)); 200 } 201 202 /* 203 * Switches to specified vcpu, until a matching vcpu_put() 204 */ 205 void vcpu_load(struct kvm_vcpu *vcpu) 206 { 207 int cpu = get_cpu(); 208 209 __this_cpu_write(kvm_running_vcpu, vcpu); 210 preempt_notifier_register(&vcpu->preempt_notifier); 211 kvm_arch_vcpu_load(vcpu, cpu); 212 put_cpu(); 213 } 214 EXPORT_SYMBOL_GPL(vcpu_load); 215 216 void vcpu_put(struct kvm_vcpu *vcpu) 217 { 218 preempt_disable(); 219 kvm_arch_vcpu_put(vcpu); 220 preempt_notifier_unregister(&vcpu->preempt_notifier); 221 __this_cpu_write(kvm_running_vcpu, NULL); 222 preempt_enable(); 223 } 224 EXPORT_SYMBOL_GPL(vcpu_put); 225 226 /* TODO: merge with kvm_arch_vcpu_should_kick */ 227 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req) 228 { 229 int mode = kvm_vcpu_exiting_guest_mode(vcpu); 230 231 /* 232 * We need to wait for the VCPU to reenable interrupts and get out of 233 * READING_SHADOW_PAGE_TABLES mode. 234 */ 235 if (req & KVM_REQUEST_WAIT) 236 return mode != OUTSIDE_GUEST_MODE; 237 238 /* 239 * Need to kick a running VCPU, but otherwise there is nothing to do. 240 */ 241 return mode == IN_GUEST_MODE; 242 } 243 244 static void ack_flush(void *_completed) 245 { 246 } 247 248 static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait) 249 { 250 if (unlikely(!cpus)) 251 cpus = cpu_online_mask; 252 253 if (cpumask_empty(cpus)) 254 return false; 255 256 smp_call_function_many(cpus, ack_flush, NULL, wait); 257 return true; 258 } 259 260 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, 261 struct kvm_vcpu *except, 262 unsigned long *vcpu_bitmap, cpumask_var_t tmp) 263 { 264 int i, cpu, me; 265 struct kvm_vcpu *vcpu; 266 bool called; 267 268 me = get_cpu(); 269 270 kvm_for_each_vcpu(i, vcpu, kvm) { 271 if ((vcpu_bitmap && !test_bit(i, vcpu_bitmap)) || 272 vcpu == except) 273 continue; 274 275 kvm_make_request(req, vcpu); 276 cpu = vcpu->cpu; 277 278 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) 279 continue; 280 281 if (tmp != NULL && cpu != -1 && cpu != me && 282 kvm_request_needs_ipi(vcpu, req)) 283 __cpumask_set_cpu(cpu, tmp); 284 } 285 286 called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT)); 287 put_cpu(); 288 289 return called; 290 } 291 292 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, 293 struct kvm_vcpu *except) 294 { 295 cpumask_var_t cpus; 296 bool called; 297 298 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 299 300 called = kvm_make_vcpus_request_mask(kvm, req, except, NULL, cpus); 301 302 free_cpumask_var(cpus); 303 return called; 304 } 305 306 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) 307 { 308 return kvm_make_all_cpus_request_except(kvm, req, NULL); 309 } 310 311 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL 312 void kvm_flush_remote_tlbs(struct kvm *kvm) 313 { 314 /* 315 * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in 316 * kvm_make_all_cpus_request. 317 */ 318 long dirty_count = smp_load_acquire(&kvm->tlbs_dirty); 319 320 /* 321 * We want to publish modifications to the page tables before reading 322 * mode. Pairs with a memory barrier in arch-specific code. 323 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest 324 * and smp_mb in walk_shadow_page_lockless_begin/end. 325 * - powerpc: smp_mb in kvmppc_prepare_to_enter. 326 * 327 * There is already an smp_mb__after_atomic() before 328 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that 329 * barrier here. 330 */ 331 if (!kvm_arch_flush_remote_tlb(kvm) 332 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 333 ++kvm->stat.remote_tlb_flush; 334 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); 335 } 336 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); 337 #endif 338 339 void kvm_reload_remote_mmus(struct kvm *kvm) 340 { 341 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); 342 } 343 344 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 345 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, 346 gfp_t gfp_flags) 347 { 348 gfp_flags |= mc->gfp_zero; 349 350 if (mc->kmem_cache) 351 return kmem_cache_alloc(mc->kmem_cache, gfp_flags); 352 else 353 return (void *)__get_free_page(gfp_flags); 354 } 355 356 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min) 357 { 358 void *obj; 359 360 if (mc->nobjs >= min) 361 return 0; 362 while (mc->nobjs < ARRAY_SIZE(mc->objects)) { 363 obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT); 364 if (!obj) 365 return mc->nobjs >= min ? 0 : -ENOMEM; 366 mc->objects[mc->nobjs++] = obj; 367 } 368 return 0; 369 } 370 371 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc) 372 { 373 return mc->nobjs; 374 } 375 376 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) 377 { 378 while (mc->nobjs) { 379 if (mc->kmem_cache) 380 kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]); 381 else 382 free_page((unsigned long)mc->objects[--mc->nobjs]); 383 } 384 } 385 386 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) 387 { 388 void *p; 389 390 if (WARN_ON(!mc->nobjs)) 391 p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT); 392 else 393 p = mc->objects[--mc->nobjs]; 394 BUG_ON(!p); 395 return p; 396 } 397 #endif 398 399 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 400 { 401 mutex_init(&vcpu->mutex); 402 vcpu->cpu = -1; 403 vcpu->kvm = kvm; 404 vcpu->vcpu_id = id; 405 vcpu->pid = NULL; 406 rcuwait_init(&vcpu->wait); 407 kvm_async_pf_vcpu_init(vcpu); 408 409 vcpu->pre_pcpu = -1; 410 INIT_LIST_HEAD(&vcpu->blocked_vcpu_list); 411 412 kvm_vcpu_set_in_spin_loop(vcpu, false); 413 kvm_vcpu_set_dy_eligible(vcpu, false); 414 vcpu->preempted = false; 415 vcpu->ready = false; 416 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 417 } 418 419 void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) 420 { 421 kvm_dirty_ring_free(&vcpu->dirty_ring); 422 kvm_arch_vcpu_destroy(vcpu); 423 424 /* 425 * No need for rcu_read_lock as VCPU_RUN is the only place that changes 426 * the vcpu->pid pointer, and at destruction time all file descriptors 427 * are already gone. 428 */ 429 put_pid(rcu_dereference_protected(vcpu->pid, 1)); 430 431 free_page((unsigned long)vcpu->run); 432 kmem_cache_free(kvm_vcpu_cache, vcpu); 433 } 434 EXPORT_SYMBOL_GPL(kvm_vcpu_destroy); 435 436 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 437 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 438 { 439 return container_of(mn, struct kvm, mmu_notifier); 440 } 441 442 static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn, 443 struct mm_struct *mm, 444 unsigned long start, unsigned long end) 445 { 446 struct kvm *kvm = mmu_notifier_to_kvm(mn); 447 int idx; 448 449 idx = srcu_read_lock(&kvm->srcu); 450 kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); 451 srcu_read_unlock(&kvm->srcu, idx); 452 } 453 454 typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range); 455 456 typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start, 457 unsigned long end); 458 459 struct kvm_hva_range { 460 unsigned long start; 461 unsigned long end; 462 pte_t pte; 463 hva_handler_t handler; 464 on_lock_fn_t on_lock; 465 bool flush_on_ret; 466 bool may_block; 467 }; 468 469 /* 470 * Use a dedicated stub instead of NULL to indicate that there is no callback 471 * function/handler. The compiler technically can't guarantee that a real 472 * function will have a non-zero address, and so it will generate code to 473 * check for !NULL, whereas comparing against a stub will be elided at compile 474 * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9). 475 */ 476 static void kvm_null_fn(void) 477 { 478 479 } 480 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn) 481 482 static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, 483 const struct kvm_hva_range *range) 484 { 485 bool ret = false, locked = false; 486 struct kvm_gfn_range gfn_range; 487 struct kvm_memory_slot *slot; 488 struct kvm_memslots *slots; 489 int i, idx; 490 491 /* A null handler is allowed if and only if on_lock() is provided. */ 492 if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) && 493 IS_KVM_NULL_FN(range->handler))) 494 return 0; 495 496 idx = srcu_read_lock(&kvm->srcu); 497 498 /* The on_lock() path does not yet support lock elision. */ 499 if (!IS_KVM_NULL_FN(range->on_lock)) { 500 locked = true; 501 KVM_MMU_LOCK(kvm); 502 503 range->on_lock(kvm, range->start, range->end); 504 505 if (IS_KVM_NULL_FN(range->handler)) 506 goto out_unlock; 507 } 508 509 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 510 slots = __kvm_memslots(kvm, i); 511 kvm_for_each_memslot(slot, slots) { 512 unsigned long hva_start, hva_end; 513 514 hva_start = max(range->start, slot->userspace_addr); 515 hva_end = min(range->end, slot->userspace_addr + 516 (slot->npages << PAGE_SHIFT)); 517 if (hva_start >= hva_end) 518 continue; 519 520 /* 521 * To optimize for the likely case where the address 522 * range is covered by zero or one memslots, don't 523 * bother making these conditional (to avoid writes on 524 * the second or later invocation of the handler). 525 */ 526 gfn_range.pte = range->pte; 527 gfn_range.may_block = range->may_block; 528 529 /* 530 * {gfn(page) | page intersects with [hva_start, hva_end)} = 531 * {gfn_start, gfn_start+1, ..., gfn_end-1}. 532 */ 533 gfn_range.start = hva_to_gfn_memslot(hva_start, slot); 534 gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot); 535 gfn_range.slot = slot; 536 537 if (!locked) { 538 locked = true; 539 KVM_MMU_LOCK(kvm); 540 } 541 ret |= range->handler(kvm, &gfn_range); 542 } 543 } 544 545 if (range->flush_on_ret && (ret || kvm->tlbs_dirty)) 546 kvm_flush_remote_tlbs(kvm); 547 548 out_unlock: 549 if (locked) 550 KVM_MMU_UNLOCK(kvm); 551 552 srcu_read_unlock(&kvm->srcu, idx); 553 554 /* The notifiers are averse to booleans. :-( */ 555 return (int)ret; 556 } 557 558 static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn, 559 unsigned long start, 560 unsigned long end, 561 pte_t pte, 562 hva_handler_t handler) 563 { 564 struct kvm *kvm = mmu_notifier_to_kvm(mn); 565 const struct kvm_hva_range range = { 566 .start = start, 567 .end = end, 568 .pte = pte, 569 .handler = handler, 570 .on_lock = (void *)kvm_null_fn, 571 .flush_on_ret = true, 572 .may_block = false, 573 }; 574 575 return __kvm_handle_hva_range(kvm, &range); 576 } 577 578 static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn, 579 unsigned long start, 580 unsigned long end, 581 hva_handler_t handler) 582 { 583 struct kvm *kvm = mmu_notifier_to_kvm(mn); 584 const struct kvm_hva_range range = { 585 .start = start, 586 .end = end, 587 .pte = __pte(0), 588 .handler = handler, 589 .on_lock = (void *)kvm_null_fn, 590 .flush_on_ret = false, 591 .may_block = false, 592 }; 593 594 return __kvm_handle_hva_range(kvm, &range); 595 } 596 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 597 struct mm_struct *mm, 598 unsigned long address, 599 pte_t pte) 600 { 601 struct kvm *kvm = mmu_notifier_to_kvm(mn); 602 603 trace_kvm_set_spte_hva(address); 604 605 /* 606 * .change_pte() must be surrounded by .invalidate_range_{start,end}(), 607 * and so always runs with an elevated notifier count. This obviates 608 * the need to bump the sequence count. 609 */ 610 WARN_ON_ONCE(!kvm->mmu_notifier_count); 611 612 kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn); 613 } 614 615 static void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start, 616 unsigned long end) 617 { 618 /* 619 * The count increase must become visible at unlock time as no 620 * spte can be established without taking the mmu_lock and 621 * count is also read inside the mmu_lock critical section. 622 */ 623 kvm->mmu_notifier_count++; 624 if (likely(kvm->mmu_notifier_count == 1)) { 625 kvm->mmu_notifier_range_start = start; 626 kvm->mmu_notifier_range_end = end; 627 } else { 628 /* 629 * Fully tracking multiple concurrent ranges has dimishing 630 * returns. Keep things simple and just find the minimal range 631 * which includes the current and new ranges. As there won't be 632 * enough information to subtract a range after its invalidate 633 * completes, any ranges invalidated concurrently will 634 * accumulate and persist until all outstanding invalidates 635 * complete. 636 */ 637 kvm->mmu_notifier_range_start = 638 min(kvm->mmu_notifier_range_start, start); 639 kvm->mmu_notifier_range_end = 640 max(kvm->mmu_notifier_range_end, end); 641 } 642 } 643 644 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 645 const struct mmu_notifier_range *range) 646 { 647 struct kvm *kvm = mmu_notifier_to_kvm(mn); 648 const struct kvm_hva_range hva_range = { 649 .start = range->start, 650 .end = range->end, 651 .pte = __pte(0), 652 .handler = kvm_unmap_gfn_range, 653 .on_lock = kvm_inc_notifier_count, 654 .flush_on_ret = true, 655 .may_block = mmu_notifier_range_blockable(range), 656 }; 657 658 trace_kvm_unmap_hva_range(range->start, range->end); 659 660 __kvm_handle_hva_range(kvm, &hva_range); 661 662 return 0; 663 } 664 665 static void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start, 666 unsigned long end) 667 { 668 /* 669 * This sequence increase will notify the kvm page fault that 670 * the page that is going to be mapped in the spte could have 671 * been freed. 672 */ 673 kvm->mmu_notifier_seq++; 674 smp_wmb(); 675 /* 676 * The above sequence increase must be visible before the 677 * below count decrease, which is ensured by the smp_wmb above 678 * in conjunction with the smp_rmb in mmu_notifier_retry(). 679 */ 680 kvm->mmu_notifier_count--; 681 } 682 683 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 684 const struct mmu_notifier_range *range) 685 { 686 struct kvm *kvm = mmu_notifier_to_kvm(mn); 687 const struct kvm_hva_range hva_range = { 688 .start = range->start, 689 .end = range->end, 690 .pte = __pte(0), 691 .handler = (void *)kvm_null_fn, 692 .on_lock = kvm_dec_notifier_count, 693 .flush_on_ret = false, 694 .may_block = mmu_notifier_range_blockable(range), 695 }; 696 697 __kvm_handle_hva_range(kvm, &hva_range); 698 699 BUG_ON(kvm->mmu_notifier_count < 0); 700 } 701 702 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 703 struct mm_struct *mm, 704 unsigned long start, 705 unsigned long end) 706 { 707 trace_kvm_age_hva(start, end); 708 709 return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn); 710 } 711 712 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, 713 struct mm_struct *mm, 714 unsigned long start, 715 unsigned long end) 716 { 717 trace_kvm_age_hva(start, end); 718 719 /* 720 * Even though we do not flush TLB, this will still adversely 721 * affect performance on pre-Haswell Intel EPT, where there is 722 * no EPT Access Bit to clear so that we have to tear down EPT 723 * tables instead. If we find this unacceptable, we can always 724 * add a parameter to kvm_age_hva so that it effectively doesn't 725 * do anything on clear_young. 726 * 727 * Also note that currently we never issue secondary TLB flushes 728 * from clear_young, leaving this job up to the regular system 729 * cadence. If we find this inaccurate, we might come up with a 730 * more sophisticated heuristic later. 731 */ 732 return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn); 733 } 734 735 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, 736 struct mm_struct *mm, 737 unsigned long address) 738 { 739 trace_kvm_test_age_hva(address); 740 741 return kvm_handle_hva_range_no_flush(mn, address, address + 1, 742 kvm_test_age_gfn); 743 } 744 745 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 746 struct mm_struct *mm) 747 { 748 struct kvm *kvm = mmu_notifier_to_kvm(mn); 749 int idx; 750 751 idx = srcu_read_lock(&kvm->srcu); 752 kvm_arch_flush_shadow_all(kvm); 753 srcu_read_unlock(&kvm->srcu, idx); 754 } 755 756 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 757 .invalidate_range = kvm_mmu_notifier_invalidate_range, 758 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 759 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 760 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 761 .clear_young = kvm_mmu_notifier_clear_young, 762 .test_young = kvm_mmu_notifier_test_young, 763 .change_pte = kvm_mmu_notifier_change_pte, 764 .release = kvm_mmu_notifier_release, 765 }; 766 767 static int kvm_init_mmu_notifier(struct kvm *kvm) 768 { 769 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 770 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 771 } 772 773 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 774 775 static int kvm_init_mmu_notifier(struct kvm *kvm) 776 { 777 return 0; 778 } 779 780 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 781 782 static struct kvm_memslots *kvm_alloc_memslots(void) 783 { 784 int i; 785 struct kvm_memslots *slots; 786 787 slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT); 788 if (!slots) 789 return NULL; 790 791 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) 792 slots->id_to_index[i] = -1; 793 794 return slots; 795 } 796 797 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) 798 { 799 if (!memslot->dirty_bitmap) 800 return; 801 802 kvfree(memslot->dirty_bitmap); 803 memslot->dirty_bitmap = NULL; 804 } 805 806 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 807 { 808 kvm_destroy_dirty_bitmap(slot); 809 810 kvm_arch_free_memslot(kvm, slot); 811 812 slot->flags = 0; 813 slot->npages = 0; 814 } 815 816 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) 817 { 818 struct kvm_memory_slot *memslot; 819 820 if (!slots) 821 return; 822 823 kvm_for_each_memslot(memslot, slots) 824 kvm_free_memslot(kvm, memslot); 825 826 kvfree(slots); 827 } 828 829 static void kvm_destroy_vm_debugfs(struct kvm *kvm) 830 { 831 int i; 832 833 if (!kvm->debugfs_dentry) 834 return; 835 836 debugfs_remove_recursive(kvm->debugfs_dentry); 837 838 if (kvm->debugfs_stat_data) { 839 for (i = 0; i < kvm_debugfs_num_entries; i++) 840 kfree(kvm->debugfs_stat_data[i]); 841 kfree(kvm->debugfs_stat_data); 842 } 843 } 844 845 static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) 846 { 847 char dir_name[ITOA_MAX_LEN * 2]; 848 struct kvm_stat_data *stat_data; 849 struct kvm_stats_debugfs_item *p; 850 851 if (!debugfs_initialized()) 852 return 0; 853 854 snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd); 855 kvm->debugfs_dentry = debugfs_create_dir(dir_name, kvm_debugfs_dir); 856 857 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, 858 sizeof(*kvm->debugfs_stat_data), 859 GFP_KERNEL_ACCOUNT); 860 if (!kvm->debugfs_stat_data) 861 return -ENOMEM; 862 863 for (p = debugfs_entries; p->name; p++) { 864 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); 865 if (!stat_data) 866 return -ENOMEM; 867 868 stat_data->kvm = kvm; 869 stat_data->dbgfs_item = p; 870 kvm->debugfs_stat_data[p - debugfs_entries] = stat_data; 871 debugfs_create_file(p->name, KVM_DBGFS_GET_MODE(p), 872 kvm->debugfs_dentry, stat_data, 873 &stat_fops_per_vm); 874 } 875 return 0; 876 } 877 878 /* 879 * Called after the VM is otherwise initialized, but just before adding it to 880 * the vm_list. 881 */ 882 int __weak kvm_arch_post_init_vm(struct kvm *kvm) 883 { 884 return 0; 885 } 886 887 /* 888 * Called just after removing the VM from the vm_list, but before doing any 889 * other destruction. 890 */ 891 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm) 892 { 893 } 894 895 static struct kvm *kvm_create_vm(unsigned long type) 896 { 897 struct kvm *kvm = kvm_arch_alloc_vm(); 898 int r = -ENOMEM; 899 int i; 900 901 if (!kvm) 902 return ERR_PTR(-ENOMEM); 903 904 KVM_MMU_LOCK_INIT(kvm); 905 mmgrab(current->mm); 906 kvm->mm = current->mm; 907 kvm_eventfd_init(kvm); 908 mutex_init(&kvm->lock); 909 mutex_init(&kvm->irq_lock); 910 mutex_init(&kvm->slots_lock); 911 INIT_LIST_HEAD(&kvm->devices); 912 913 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 914 915 if (init_srcu_struct(&kvm->srcu)) 916 goto out_err_no_srcu; 917 if (init_srcu_struct(&kvm->irq_srcu)) 918 goto out_err_no_irq_srcu; 919 920 refcount_set(&kvm->users_count, 1); 921 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 922 struct kvm_memslots *slots = kvm_alloc_memslots(); 923 924 if (!slots) 925 goto out_err_no_arch_destroy_vm; 926 /* Generations must be different for each address space. */ 927 slots->generation = i; 928 rcu_assign_pointer(kvm->memslots[i], slots); 929 } 930 931 for (i = 0; i < KVM_NR_BUSES; i++) { 932 rcu_assign_pointer(kvm->buses[i], 933 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT)); 934 if (!kvm->buses[i]) 935 goto out_err_no_arch_destroy_vm; 936 } 937 938 kvm->max_halt_poll_ns = halt_poll_ns; 939 940 r = kvm_arch_init_vm(kvm, type); 941 if (r) 942 goto out_err_no_arch_destroy_vm; 943 944 r = hardware_enable_all(); 945 if (r) 946 goto out_err_no_disable; 947 948 #ifdef CONFIG_HAVE_KVM_IRQFD 949 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 950 #endif 951 952 r = kvm_init_mmu_notifier(kvm); 953 if (r) 954 goto out_err_no_mmu_notifier; 955 956 r = kvm_arch_post_init_vm(kvm); 957 if (r) 958 goto out_err; 959 960 mutex_lock(&kvm_lock); 961 list_add(&kvm->vm_list, &vm_list); 962 mutex_unlock(&kvm_lock); 963 964 preempt_notifier_inc(); 965 966 return kvm; 967 968 out_err: 969 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 970 if (kvm->mmu_notifier.ops) 971 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); 972 #endif 973 out_err_no_mmu_notifier: 974 hardware_disable_all(); 975 out_err_no_disable: 976 kvm_arch_destroy_vm(kvm); 977 out_err_no_arch_destroy_vm: 978 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); 979 for (i = 0; i < KVM_NR_BUSES; i++) 980 kfree(kvm_get_bus(kvm, i)); 981 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 982 kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); 983 cleanup_srcu_struct(&kvm->irq_srcu); 984 out_err_no_irq_srcu: 985 cleanup_srcu_struct(&kvm->srcu); 986 out_err_no_srcu: 987 kvm_arch_free_vm(kvm); 988 mmdrop(current->mm); 989 return ERR_PTR(r); 990 } 991 992 static void kvm_destroy_devices(struct kvm *kvm) 993 { 994 struct kvm_device *dev, *tmp; 995 996 /* 997 * We do not need to take the kvm->lock here, because nobody else 998 * has a reference to the struct kvm at this point and therefore 999 * cannot access the devices list anyhow. 1000 */ 1001 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { 1002 list_del(&dev->vm_node); 1003 dev->ops->destroy(dev); 1004 } 1005 } 1006 1007 static void kvm_destroy_vm(struct kvm *kvm) 1008 { 1009 int i; 1010 struct mm_struct *mm = kvm->mm; 1011 1012 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm); 1013 kvm_destroy_vm_debugfs(kvm); 1014 kvm_arch_sync_events(kvm); 1015 mutex_lock(&kvm_lock); 1016 list_del(&kvm->vm_list); 1017 mutex_unlock(&kvm_lock); 1018 kvm_arch_pre_destroy_vm(kvm); 1019 1020 kvm_free_irq_routing(kvm); 1021 for (i = 0; i < KVM_NR_BUSES; i++) { 1022 struct kvm_io_bus *bus = kvm_get_bus(kvm, i); 1023 1024 if (bus) 1025 kvm_io_bus_destroy(bus); 1026 kvm->buses[i] = NULL; 1027 } 1028 kvm_coalesced_mmio_free(kvm); 1029 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 1030 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 1031 #else 1032 kvm_arch_flush_shadow_all(kvm); 1033 #endif 1034 kvm_arch_destroy_vm(kvm); 1035 kvm_destroy_devices(kvm); 1036 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 1037 kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); 1038 cleanup_srcu_struct(&kvm->irq_srcu); 1039 cleanup_srcu_struct(&kvm->srcu); 1040 kvm_arch_free_vm(kvm); 1041 preempt_notifier_dec(); 1042 hardware_disable_all(); 1043 mmdrop(mm); 1044 } 1045 1046 void kvm_get_kvm(struct kvm *kvm) 1047 { 1048 refcount_inc(&kvm->users_count); 1049 } 1050 EXPORT_SYMBOL_GPL(kvm_get_kvm); 1051 1052 void kvm_put_kvm(struct kvm *kvm) 1053 { 1054 if (refcount_dec_and_test(&kvm->users_count)) 1055 kvm_destroy_vm(kvm); 1056 } 1057 EXPORT_SYMBOL_GPL(kvm_put_kvm); 1058 1059 /* 1060 * Used to put a reference that was taken on behalf of an object associated 1061 * with a user-visible file descriptor, e.g. a vcpu or device, if installation 1062 * of the new file descriptor fails and the reference cannot be transferred to 1063 * its final owner. In such cases, the caller is still actively using @kvm and 1064 * will fail miserably if the refcount unexpectedly hits zero. 1065 */ 1066 void kvm_put_kvm_no_destroy(struct kvm *kvm) 1067 { 1068 WARN_ON(refcount_dec_and_test(&kvm->users_count)); 1069 } 1070 EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy); 1071 1072 static int kvm_vm_release(struct inode *inode, struct file *filp) 1073 { 1074 struct kvm *kvm = filp->private_data; 1075 1076 kvm_irqfd_release(kvm); 1077 1078 kvm_put_kvm(kvm); 1079 return 0; 1080 } 1081 1082 /* 1083 * Allocation size is twice as large as the actual dirty bitmap size. 1084 * See kvm_vm_ioctl_get_dirty_log() why this is needed. 1085 */ 1086 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) 1087 { 1088 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); 1089 1090 memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL_ACCOUNT); 1091 if (!memslot->dirty_bitmap) 1092 return -ENOMEM; 1093 1094 return 0; 1095 } 1096 1097 /* 1098 * Delete a memslot by decrementing the number of used slots and shifting all 1099 * other entries in the array forward one spot. 1100 */ 1101 static inline void kvm_memslot_delete(struct kvm_memslots *slots, 1102 struct kvm_memory_slot *memslot) 1103 { 1104 struct kvm_memory_slot *mslots = slots->memslots; 1105 int i; 1106 1107 if (WARN_ON(slots->id_to_index[memslot->id] == -1)) 1108 return; 1109 1110 slots->used_slots--; 1111 1112 if (atomic_read(&slots->lru_slot) >= slots->used_slots) 1113 atomic_set(&slots->lru_slot, 0); 1114 1115 for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) { 1116 mslots[i] = mslots[i + 1]; 1117 slots->id_to_index[mslots[i].id] = i; 1118 } 1119 mslots[i] = *memslot; 1120 slots->id_to_index[memslot->id] = -1; 1121 } 1122 1123 /* 1124 * "Insert" a new memslot by incrementing the number of used slots. Returns 1125 * the new slot's initial index into the memslots array. 1126 */ 1127 static inline int kvm_memslot_insert_back(struct kvm_memslots *slots) 1128 { 1129 return slots->used_slots++; 1130 } 1131 1132 /* 1133 * Move a changed memslot backwards in the array by shifting existing slots 1134 * with a higher GFN toward the front of the array. Note, the changed memslot 1135 * itself is not preserved in the array, i.e. not swapped at this time, only 1136 * its new index into the array is tracked. Returns the changed memslot's 1137 * current index into the memslots array. 1138 */ 1139 static inline int kvm_memslot_move_backward(struct kvm_memslots *slots, 1140 struct kvm_memory_slot *memslot) 1141 { 1142 struct kvm_memory_slot *mslots = slots->memslots; 1143 int i; 1144 1145 if (WARN_ON_ONCE(slots->id_to_index[memslot->id] == -1) || 1146 WARN_ON_ONCE(!slots->used_slots)) 1147 return -1; 1148 1149 /* 1150 * Move the target memslot backward in the array by shifting existing 1151 * memslots with a higher GFN (than the target memslot) towards the 1152 * front of the array. 1153 */ 1154 for (i = slots->id_to_index[memslot->id]; i < slots->used_slots - 1; i++) { 1155 if (memslot->base_gfn > mslots[i + 1].base_gfn) 1156 break; 1157 1158 WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn); 1159 1160 /* Shift the next memslot forward one and update its index. */ 1161 mslots[i] = mslots[i + 1]; 1162 slots->id_to_index[mslots[i].id] = i; 1163 } 1164 return i; 1165 } 1166 1167 /* 1168 * Move a changed memslot forwards in the array by shifting existing slots with 1169 * a lower GFN toward the back of the array. Note, the changed memslot itself 1170 * is not preserved in the array, i.e. not swapped at this time, only its new 1171 * index into the array is tracked. Returns the changed memslot's final index 1172 * into the memslots array. 1173 */ 1174 static inline int kvm_memslot_move_forward(struct kvm_memslots *slots, 1175 struct kvm_memory_slot *memslot, 1176 int start) 1177 { 1178 struct kvm_memory_slot *mslots = slots->memslots; 1179 int i; 1180 1181 for (i = start; i > 0; i--) { 1182 if (memslot->base_gfn < mslots[i - 1].base_gfn) 1183 break; 1184 1185 WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn); 1186 1187 /* Shift the next memslot back one and update its index. */ 1188 mslots[i] = mslots[i - 1]; 1189 slots->id_to_index[mslots[i].id] = i; 1190 } 1191 return i; 1192 } 1193 1194 /* 1195 * Re-sort memslots based on their GFN to account for an added, deleted, or 1196 * moved memslot. Sorting memslots by GFN allows using a binary search during 1197 * memslot lookup. 1198 * 1199 * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! I.e. the entry 1200 * at memslots[0] has the highest GFN. 1201 * 1202 * The sorting algorithm takes advantage of having initially sorted memslots 1203 * and knowing the position of the changed memslot. Sorting is also optimized 1204 * by not swapping the updated memslot and instead only shifting other memslots 1205 * and tracking the new index for the update memslot. Only once its final 1206 * index is known is the updated memslot copied into its position in the array. 1207 * 1208 * - When deleting a memslot, the deleted memslot simply needs to be moved to 1209 * the end of the array. 1210 * 1211 * - When creating a memslot, the algorithm "inserts" the new memslot at the 1212 * end of the array and then it forward to its correct location. 1213 * 1214 * - When moving a memslot, the algorithm first moves the updated memslot 1215 * backward to handle the scenario where the memslot's GFN was changed to a 1216 * lower value. update_memslots() then falls through and runs the same flow 1217 * as creating a memslot to move the memslot forward to handle the scenario 1218 * where its GFN was changed to a higher value. 1219 * 1220 * Note, slots are sorted from highest->lowest instead of lowest->highest for 1221 * historical reasons. Originally, invalid memslots where denoted by having 1222 * GFN=0, thus sorting from highest->lowest naturally sorted invalid memslots 1223 * to the end of the array. The current algorithm uses dedicated logic to 1224 * delete a memslot and thus does not rely on invalid memslots having GFN=0. 1225 * 1226 * The other historical motiviation for highest->lowest was to improve the 1227 * performance of memslot lookup. KVM originally used a linear search starting 1228 * at memslots[0]. On x86, the largest memslot usually has one of the highest, 1229 * if not *the* highest, GFN, as the bulk of the guest's RAM is located in a 1230 * single memslot above the 4gb boundary. As the largest memslot is also the 1231 * most likely to be referenced, sorting it to the front of the array was 1232 * advantageous. The current binary search starts from the middle of the array 1233 * and uses an LRU pointer to improve performance for all memslots and GFNs. 1234 */ 1235 static void update_memslots(struct kvm_memslots *slots, 1236 struct kvm_memory_slot *memslot, 1237 enum kvm_mr_change change) 1238 { 1239 int i; 1240 1241 if (change == KVM_MR_DELETE) { 1242 kvm_memslot_delete(slots, memslot); 1243 } else { 1244 if (change == KVM_MR_CREATE) 1245 i = kvm_memslot_insert_back(slots); 1246 else 1247 i = kvm_memslot_move_backward(slots, memslot); 1248 i = kvm_memslot_move_forward(slots, memslot, i); 1249 1250 /* 1251 * Copy the memslot to its new position in memslots and update 1252 * its index accordingly. 1253 */ 1254 slots->memslots[i] = *memslot; 1255 slots->id_to_index[memslot->id] = i; 1256 } 1257 } 1258 1259 static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem) 1260 { 1261 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; 1262 1263 #ifdef __KVM_HAVE_READONLY_MEM 1264 valid_flags |= KVM_MEM_READONLY; 1265 #endif 1266 1267 if (mem->flags & ~valid_flags) 1268 return -EINVAL; 1269 1270 return 0; 1271 } 1272 1273 static struct kvm_memslots *install_new_memslots(struct kvm *kvm, 1274 int as_id, struct kvm_memslots *slots) 1275 { 1276 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); 1277 u64 gen = old_memslots->generation; 1278 1279 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); 1280 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; 1281 1282 rcu_assign_pointer(kvm->memslots[as_id], slots); 1283 synchronize_srcu_expedited(&kvm->srcu); 1284 1285 /* 1286 * Increment the new memslot generation a second time, dropping the 1287 * update in-progress flag and incrementing the generation based on 1288 * the number of address spaces. This provides a unique and easily 1289 * identifiable generation number while the memslots are in flux. 1290 */ 1291 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; 1292 1293 /* 1294 * Generations must be unique even across address spaces. We do not need 1295 * a global counter for that, instead the generation space is evenly split 1296 * across address spaces. For example, with two address spaces, address 1297 * space 0 will use generations 0, 2, 4, ... while address space 1 will 1298 * use generations 1, 3, 5, ... 1299 */ 1300 gen += KVM_ADDRESS_SPACE_NUM; 1301 1302 kvm_arch_memslots_updated(kvm, gen); 1303 1304 slots->generation = gen; 1305 1306 return old_memslots; 1307 } 1308 1309 /* 1310 * Note, at a minimum, the current number of used slots must be allocated, even 1311 * when deleting a memslot, as we need a complete duplicate of the memslots for 1312 * use when invalidating a memslot prior to deleting/moving the memslot. 1313 */ 1314 static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old, 1315 enum kvm_mr_change change) 1316 { 1317 struct kvm_memslots *slots; 1318 size_t old_size, new_size; 1319 1320 old_size = sizeof(struct kvm_memslots) + 1321 (sizeof(struct kvm_memory_slot) * old->used_slots); 1322 1323 if (change == KVM_MR_CREATE) 1324 new_size = old_size + sizeof(struct kvm_memory_slot); 1325 else 1326 new_size = old_size; 1327 1328 slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT); 1329 if (likely(slots)) 1330 memcpy(slots, old, old_size); 1331 1332 return slots; 1333 } 1334 1335 static int kvm_set_memslot(struct kvm *kvm, 1336 const struct kvm_userspace_memory_region *mem, 1337 struct kvm_memory_slot *old, 1338 struct kvm_memory_slot *new, int as_id, 1339 enum kvm_mr_change change) 1340 { 1341 struct kvm_memory_slot *slot; 1342 struct kvm_memslots *slots; 1343 int r; 1344 1345 slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change); 1346 if (!slots) 1347 return -ENOMEM; 1348 1349 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { 1350 /* 1351 * Note, the INVALID flag needs to be in the appropriate entry 1352 * in the freshly allocated memslots, not in @old or @new. 1353 */ 1354 slot = id_to_memslot(slots, old->id); 1355 slot->flags |= KVM_MEMSLOT_INVALID; 1356 1357 /* 1358 * We can re-use the old memslots, the only difference from the 1359 * newly installed memslots is the invalid flag, which will get 1360 * dropped by update_memslots anyway. We'll also revert to the 1361 * old memslots if preparing the new memory region fails. 1362 */ 1363 slots = install_new_memslots(kvm, as_id, slots); 1364 1365 /* From this point no new shadow pages pointing to a deleted, 1366 * or moved, memslot will be created. 1367 * 1368 * validation of sp->gfn happens in: 1369 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 1370 * - kvm_is_visible_gfn (mmu_check_root) 1371 */ 1372 kvm_arch_flush_shadow_memslot(kvm, slot); 1373 } 1374 1375 r = kvm_arch_prepare_memory_region(kvm, new, mem, change); 1376 if (r) 1377 goto out_slots; 1378 1379 update_memslots(slots, new, change); 1380 slots = install_new_memslots(kvm, as_id, slots); 1381 1382 kvm_arch_commit_memory_region(kvm, mem, old, new, change); 1383 1384 kvfree(slots); 1385 return 0; 1386 1387 out_slots: 1388 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) 1389 slots = install_new_memslots(kvm, as_id, slots); 1390 kvfree(slots); 1391 return r; 1392 } 1393 1394 static int kvm_delete_memslot(struct kvm *kvm, 1395 const struct kvm_userspace_memory_region *mem, 1396 struct kvm_memory_slot *old, int as_id) 1397 { 1398 struct kvm_memory_slot new; 1399 int r; 1400 1401 if (!old->npages) 1402 return -EINVAL; 1403 1404 memset(&new, 0, sizeof(new)); 1405 new.id = old->id; 1406 /* 1407 * This is only for debugging purpose; it should never be referenced 1408 * for a removed memslot. 1409 */ 1410 new.as_id = as_id; 1411 1412 r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE); 1413 if (r) 1414 return r; 1415 1416 kvm_free_memslot(kvm, old); 1417 return 0; 1418 } 1419 1420 /* 1421 * Allocate some memory and give it an address in the guest physical address 1422 * space. 1423 * 1424 * Discontiguous memory is allowed, mostly for framebuffers. 1425 * 1426 * Must be called holding kvm->slots_lock for write. 1427 */ 1428 int __kvm_set_memory_region(struct kvm *kvm, 1429 const struct kvm_userspace_memory_region *mem) 1430 { 1431 struct kvm_memory_slot old, new; 1432 struct kvm_memory_slot *tmp; 1433 enum kvm_mr_change change; 1434 int as_id, id; 1435 int r; 1436 1437 r = check_memory_region_flags(mem); 1438 if (r) 1439 return r; 1440 1441 as_id = mem->slot >> 16; 1442 id = (u16)mem->slot; 1443 1444 /* General sanity checks */ 1445 if (mem->memory_size & (PAGE_SIZE - 1)) 1446 return -EINVAL; 1447 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 1448 return -EINVAL; 1449 /* We can read the guest memory with __xxx_user() later on. */ 1450 if ((mem->userspace_addr & (PAGE_SIZE - 1)) || 1451 (mem->userspace_addr != untagged_addr(mem->userspace_addr)) || 1452 !access_ok((void __user *)(unsigned long)mem->userspace_addr, 1453 mem->memory_size)) 1454 return -EINVAL; 1455 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) 1456 return -EINVAL; 1457 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 1458 return -EINVAL; 1459 1460 /* 1461 * Make a full copy of the old memslot, the pointer will become stale 1462 * when the memslots are re-sorted by update_memslots(), and the old 1463 * memslot needs to be referenced after calling update_memslots(), e.g. 1464 * to free its resources and for arch specific behavior. 1465 */ 1466 tmp = id_to_memslot(__kvm_memslots(kvm, as_id), id); 1467 if (tmp) { 1468 old = *tmp; 1469 tmp = NULL; 1470 } else { 1471 memset(&old, 0, sizeof(old)); 1472 old.id = id; 1473 } 1474 1475 if (!mem->memory_size) 1476 return kvm_delete_memslot(kvm, mem, &old, as_id); 1477 1478 new.as_id = as_id; 1479 new.id = id; 1480 new.base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 1481 new.npages = mem->memory_size >> PAGE_SHIFT; 1482 new.flags = mem->flags; 1483 new.userspace_addr = mem->userspace_addr; 1484 1485 if (new.npages > KVM_MEM_MAX_NR_PAGES) 1486 return -EINVAL; 1487 1488 if (!old.npages) { 1489 change = KVM_MR_CREATE; 1490 new.dirty_bitmap = NULL; 1491 memset(&new.arch, 0, sizeof(new.arch)); 1492 } else { /* Modify an existing slot. */ 1493 if ((new.userspace_addr != old.userspace_addr) || 1494 (new.npages != old.npages) || 1495 ((new.flags ^ old.flags) & KVM_MEM_READONLY)) 1496 return -EINVAL; 1497 1498 if (new.base_gfn != old.base_gfn) 1499 change = KVM_MR_MOVE; 1500 else if (new.flags != old.flags) 1501 change = KVM_MR_FLAGS_ONLY; 1502 else /* Nothing to change. */ 1503 return 0; 1504 1505 /* Copy dirty_bitmap and arch from the current memslot. */ 1506 new.dirty_bitmap = old.dirty_bitmap; 1507 memcpy(&new.arch, &old.arch, sizeof(new.arch)); 1508 } 1509 1510 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 1511 /* Check for overlaps */ 1512 kvm_for_each_memslot(tmp, __kvm_memslots(kvm, as_id)) { 1513 if (tmp->id == id) 1514 continue; 1515 if (!((new.base_gfn + new.npages <= tmp->base_gfn) || 1516 (new.base_gfn >= tmp->base_gfn + tmp->npages))) 1517 return -EEXIST; 1518 } 1519 } 1520 1521 /* Allocate/free page dirty bitmap as needed */ 1522 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 1523 new.dirty_bitmap = NULL; 1524 else if (!new.dirty_bitmap && !kvm->dirty_ring_size) { 1525 r = kvm_alloc_dirty_bitmap(&new); 1526 if (r) 1527 return r; 1528 1529 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) 1530 bitmap_set(new.dirty_bitmap, 0, new.npages); 1531 } 1532 1533 r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change); 1534 if (r) 1535 goto out_bitmap; 1536 1537 if (old.dirty_bitmap && !new.dirty_bitmap) 1538 kvm_destroy_dirty_bitmap(&old); 1539 return 0; 1540 1541 out_bitmap: 1542 if (new.dirty_bitmap && !old.dirty_bitmap) 1543 kvm_destroy_dirty_bitmap(&new); 1544 return r; 1545 } 1546 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 1547 1548 int kvm_set_memory_region(struct kvm *kvm, 1549 const struct kvm_userspace_memory_region *mem) 1550 { 1551 int r; 1552 1553 mutex_lock(&kvm->slots_lock); 1554 r = __kvm_set_memory_region(kvm, mem); 1555 mutex_unlock(&kvm->slots_lock); 1556 return r; 1557 } 1558 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 1559 1560 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 1561 struct kvm_userspace_memory_region *mem) 1562 { 1563 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) 1564 return -EINVAL; 1565 1566 return kvm_set_memory_region(kvm, mem); 1567 } 1568 1569 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 1570 /** 1571 * kvm_get_dirty_log - get a snapshot of dirty pages 1572 * @kvm: pointer to kvm instance 1573 * @log: slot id and address to which we copy the log 1574 * @is_dirty: set to '1' if any dirty pages were found 1575 * @memslot: set to the associated memslot, always valid on success 1576 */ 1577 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, 1578 int *is_dirty, struct kvm_memory_slot **memslot) 1579 { 1580 struct kvm_memslots *slots; 1581 int i, as_id, id; 1582 unsigned long n; 1583 unsigned long any = 0; 1584 1585 /* Dirty ring tracking is exclusive to dirty log tracking */ 1586 if (kvm->dirty_ring_size) 1587 return -ENXIO; 1588 1589 *memslot = NULL; 1590 *is_dirty = 0; 1591 1592 as_id = log->slot >> 16; 1593 id = (u16)log->slot; 1594 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1595 return -EINVAL; 1596 1597 slots = __kvm_memslots(kvm, as_id); 1598 *memslot = id_to_memslot(slots, id); 1599 if (!(*memslot) || !(*memslot)->dirty_bitmap) 1600 return -ENOENT; 1601 1602 kvm_arch_sync_dirty_log(kvm, *memslot); 1603 1604 n = kvm_dirty_bitmap_bytes(*memslot); 1605 1606 for (i = 0; !any && i < n/sizeof(long); ++i) 1607 any = (*memslot)->dirty_bitmap[i]; 1608 1609 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n)) 1610 return -EFAULT; 1611 1612 if (any) 1613 *is_dirty = 1; 1614 return 0; 1615 } 1616 EXPORT_SYMBOL_GPL(kvm_get_dirty_log); 1617 1618 #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 1619 /** 1620 * kvm_get_dirty_log_protect - get a snapshot of dirty pages 1621 * and reenable dirty page tracking for the corresponding pages. 1622 * @kvm: pointer to kvm instance 1623 * @log: slot id and address to which we copy the log 1624 * 1625 * We need to keep it in mind that VCPU threads can write to the bitmap 1626 * concurrently. So, to avoid losing track of dirty pages we keep the 1627 * following order: 1628 * 1629 * 1. Take a snapshot of the bit and clear it if needed. 1630 * 2. Write protect the corresponding page. 1631 * 3. Copy the snapshot to the userspace. 1632 * 4. Upon return caller flushes TLB's if needed. 1633 * 1634 * Between 2 and 4, the guest may write to the page using the remaining TLB 1635 * entry. This is not a problem because the page is reported dirty using 1636 * the snapshot taken before and step 4 ensures that writes done after 1637 * exiting to userspace will be logged for the next call. 1638 * 1639 */ 1640 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) 1641 { 1642 struct kvm_memslots *slots; 1643 struct kvm_memory_slot *memslot; 1644 int i, as_id, id; 1645 unsigned long n; 1646 unsigned long *dirty_bitmap; 1647 unsigned long *dirty_bitmap_buffer; 1648 bool flush; 1649 1650 /* Dirty ring tracking is exclusive to dirty log tracking */ 1651 if (kvm->dirty_ring_size) 1652 return -ENXIO; 1653 1654 as_id = log->slot >> 16; 1655 id = (u16)log->slot; 1656 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1657 return -EINVAL; 1658 1659 slots = __kvm_memslots(kvm, as_id); 1660 memslot = id_to_memslot(slots, id); 1661 if (!memslot || !memslot->dirty_bitmap) 1662 return -ENOENT; 1663 1664 dirty_bitmap = memslot->dirty_bitmap; 1665 1666 kvm_arch_sync_dirty_log(kvm, memslot); 1667 1668 n = kvm_dirty_bitmap_bytes(memslot); 1669 flush = false; 1670 if (kvm->manual_dirty_log_protect) { 1671 /* 1672 * Unlike kvm_get_dirty_log, we always return false in *flush, 1673 * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There 1674 * is some code duplication between this function and 1675 * kvm_get_dirty_log, but hopefully all architecture 1676 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log 1677 * can be eliminated. 1678 */ 1679 dirty_bitmap_buffer = dirty_bitmap; 1680 } else { 1681 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 1682 memset(dirty_bitmap_buffer, 0, n); 1683 1684 KVM_MMU_LOCK(kvm); 1685 for (i = 0; i < n / sizeof(long); i++) { 1686 unsigned long mask; 1687 gfn_t offset; 1688 1689 if (!dirty_bitmap[i]) 1690 continue; 1691 1692 flush = true; 1693 mask = xchg(&dirty_bitmap[i], 0); 1694 dirty_bitmap_buffer[i] = mask; 1695 1696 offset = i * BITS_PER_LONG; 1697 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 1698 offset, mask); 1699 } 1700 KVM_MMU_UNLOCK(kvm); 1701 } 1702 1703 if (flush) 1704 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); 1705 1706 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) 1707 return -EFAULT; 1708 return 0; 1709 } 1710 1711 1712 /** 1713 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot 1714 * @kvm: kvm instance 1715 * @log: slot id and address to which we copy the log 1716 * 1717 * Steps 1-4 below provide general overview of dirty page logging. See 1718 * kvm_get_dirty_log_protect() function description for additional details. 1719 * 1720 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we 1721 * always flush the TLB (step 4) even if previous step failed and the dirty 1722 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API 1723 * does not preclude user space subsequent dirty log read. Flushing TLB ensures 1724 * writes will be marked dirty for next log read. 1725 * 1726 * 1. Take a snapshot of the bit and clear it if needed. 1727 * 2. Write protect the corresponding page. 1728 * 3. Copy the snapshot to the userspace. 1729 * 4. Flush TLB's if needed. 1730 */ 1731 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 1732 struct kvm_dirty_log *log) 1733 { 1734 int r; 1735 1736 mutex_lock(&kvm->slots_lock); 1737 1738 r = kvm_get_dirty_log_protect(kvm, log); 1739 1740 mutex_unlock(&kvm->slots_lock); 1741 return r; 1742 } 1743 1744 /** 1745 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap 1746 * and reenable dirty page tracking for the corresponding pages. 1747 * @kvm: pointer to kvm instance 1748 * @log: slot id and address from which to fetch the bitmap of dirty pages 1749 */ 1750 static int kvm_clear_dirty_log_protect(struct kvm *kvm, 1751 struct kvm_clear_dirty_log *log) 1752 { 1753 struct kvm_memslots *slots; 1754 struct kvm_memory_slot *memslot; 1755 int as_id, id; 1756 gfn_t offset; 1757 unsigned long i, n; 1758 unsigned long *dirty_bitmap; 1759 unsigned long *dirty_bitmap_buffer; 1760 bool flush; 1761 1762 /* Dirty ring tracking is exclusive to dirty log tracking */ 1763 if (kvm->dirty_ring_size) 1764 return -ENXIO; 1765 1766 as_id = log->slot >> 16; 1767 id = (u16)log->slot; 1768 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1769 return -EINVAL; 1770 1771 if (log->first_page & 63) 1772 return -EINVAL; 1773 1774 slots = __kvm_memslots(kvm, as_id); 1775 memslot = id_to_memslot(slots, id); 1776 if (!memslot || !memslot->dirty_bitmap) 1777 return -ENOENT; 1778 1779 dirty_bitmap = memslot->dirty_bitmap; 1780 1781 n = ALIGN(log->num_pages, BITS_PER_LONG) / 8; 1782 1783 if (log->first_page > memslot->npages || 1784 log->num_pages > memslot->npages - log->first_page || 1785 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63))) 1786 return -EINVAL; 1787 1788 kvm_arch_sync_dirty_log(kvm, memslot); 1789 1790 flush = false; 1791 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 1792 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) 1793 return -EFAULT; 1794 1795 KVM_MMU_LOCK(kvm); 1796 for (offset = log->first_page, i = offset / BITS_PER_LONG, 1797 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--; 1798 i++, offset += BITS_PER_LONG) { 1799 unsigned long mask = *dirty_bitmap_buffer++; 1800 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i]; 1801 if (!mask) 1802 continue; 1803 1804 mask &= atomic_long_fetch_andnot(mask, p); 1805 1806 /* 1807 * mask contains the bits that really have been cleared. This 1808 * never includes any bits beyond the length of the memslot (if 1809 * the length is not aligned to 64 pages), therefore it is not 1810 * a problem if userspace sets them in log->dirty_bitmap. 1811 */ 1812 if (mask) { 1813 flush = true; 1814 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 1815 offset, mask); 1816 } 1817 } 1818 KVM_MMU_UNLOCK(kvm); 1819 1820 if (flush) 1821 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); 1822 1823 return 0; 1824 } 1825 1826 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, 1827 struct kvm_clear_dirty_log *log) 1828 { 1829 int r; 1830 1831 mutex_lock(&kvm->slots_lock); 1832 1833 r = kvm_clear_dirty_log_protect(kvm, log); 1834 1835 mutex_unlock(&kvm->slots_lock); 1836 return r; 1837 } 1838 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 1839 1840 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 1841 { 1842 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 1843 } 1844 EXPORT_SYMBOL_GPL(gfn_to_memslot); 1845 1846 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) 1847 { 1848 return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn); 1849 } 1850 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_memslot); 1851 1852 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 1853 { 1854 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); 1855 1856 return kvm_is_visible_memslot(memslot); 1857 } 1858 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 1859 1860 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 1861 { 1862 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1863 1864 return kvm_is_visible_memslot(memslot); 1865 } 1866 EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn); 1867 1868 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) 1869 { 1870 struct vm_area_struct *vma; 1871 unsigned long addr, size; 1872 1873 size = PAGE_SIZE; 1874 1875 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL); 1876 if (kvm_is_error_hva(addr)) 1877 return PAGE_SIZE; 1878 1879 mmap_read_lock(current->mm); 1880 vma = find_vma(current->mm, addr); 1881 if (!vma) 1882 goto out; 1883 1884 size = vma_kernel_pagesize(vma); 1885 1886 out: 1887 mmap_read_unlock(current->mm); 1888 1889 return size; 1890 } 1891 1892 static bool memslot_is_readonly(struct kvm_memory_slot *slot) 1893 { 1894 return slot->flags & KVM_MEM_READONLY; 1895 } 1896 1897 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1898 gfn_t *nr_pages, bool write) 1899 { 1900 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 1901 return KVM_HVA_ERR_BAD; 1902 1903 if (memslot_is_readonly(slot) && write) 1904 return KVM_HVA_ERR_RO_BAD; 1905 1906 if (nr_pages) 1907 *nr_pages = slot->npages - (gfn - slot->base_gfn); 1908 1909 return __gfn_to_hva_memslot(slot, gfn); 1910 } 1911 1912 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1913 gfn_t *nr_pages) 1914 { 1915 return __gfn_to_hva_many(slot, gfn, nr_pages, true); 1916 } 1917 1918 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 1919 gfn_t gfn) 1920 { 1921 return gfn_to_hva_many(slot, gfn, NULL); 1922 } 1923 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); 1924 1925 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 1926 { 1927 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); 1928 } 1929 EXPORT_SYMBOL_GPL(gfn_to_hva); 1930 1931 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) 1932 { 1933 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); 1934 } 1935 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); 1936 1937 /* 1938 * Return the hva of a @gfn and the R/W attribute if possible. 1939 * 1940 * @slot: the kvm_memory_slot which contains @gfn 1941 * @gfn: the gfn to be translated 1942 * @writable: used to return the read/write attribute of the @slot if the hva 1943 * is valid and @writable is not NULL 1944 */ 1945 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, 1946 gfn_t gfn, bool *writable) 1947 { 1948 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); 1949 1950 if (!kvm_is_error_hva(hva) && writable) 1951 *writable = !memslot_is_readonly(slot); 1952 1953 return hva; 1954 } 1955 1956 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 1957 { 1958 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1959 1960 return gfn_to_hva_memslot_prot(slot, gfn, writable); 1961 } 1962 1963 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) 1964 { 1965 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1966 1967 return gfn_to_hva_memslot_prot(slot, gfn, writable); 1968 } 1969 1970 static inline int check_user_page_hwpoison(unsigned long addr) 1971 { 1972 int rc, flags = FOLL_HWPOISON | FOLL_WRITE; 1973 1974 rc = get_user_pages(addr, 1, flags, NULL, NULL); 1975 return rc == -EHWPOISON; 1976 } 1977 1978 /* 1979 * The fast path to get the writable pfn which will be stored in @pfn, 1980 * true indicates success, otherwise false is returned. It's also the 1981 * only part that runs if we can in atomic context. 1982 */ 1983 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, 1984 bool *writable, kvm_pfn_t *pfn) 1985 { 1986 struct page *page[1]; 1987 1988 /* 1989 * Fast pin a writable pfn only if it is a write fault request 1990 * or the caller allows to map a writable pfn for a read fault 1991 * request. 1992 */ 1993 if (!(write_fault || writable)) 1994 return false; 1995 1996 if (get_user_page_fast_only(addr, FOLL_WRITE, page)) { 1997 *pfn = page_to_pfn(page[0]); 1998 1999 if (writable) 2000 *writable = true; 2001 return true; 2002 } 2003 2004 return false; 2005 } 2006 2007 /* 2008 * The slow path to get the pfn of the specified host virtual address, 2009 * 1 indicates success, -errno is returned if error is detected. 2010 */ 2011 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, 2012 bool *writable, kvm_pfn_t *pfn) 2013 { 2014 unsigned int flags = FOLL_HWPOISON; 2015 struct page *page; 2016 int npages = 0; 2017 2018 might_sleep(); 2019 2020 if (writable) 2021 *writable = write_fault; 2022 2023 if (write_fault) 2024 flags |= FOLL_WRITE; 2025 if (async) 2026 flags |= FOLL_NOWAIT; 2027 2028 npages = get_user_pages_unlocked(addr, 1, &page, flags); 2029 if (npages != 1) 2030 return npages; 2031 2032 /* map read fault as writable if possible */ 2033 if (unlikely(!write_fault) && writable) { 2034 struct page *wpage; 2035 2036 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) { 2037 *writable = true; 2038 put_page(page); 2039 page = wpage; 2040 } 2041 } 2042 *pfn = page_to_pfn(page); 2043 return npages; 2044 } 2045 2046 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) 2047 { 2048 if (unlikely(!(vma->vm_flags & VM_READ))) 2049 return false; 2050 2051 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) 2052 return false; 2053 2054 return true; 2055 } 2056 2057 static int hva_to_pfn_remapped(struct vm_area_struct *vma, 2058 unsigned long addr, bool *async, 2059 bool write_fault, bool *writable, 2060 kvm_pfn_t *p_pfn) 2061 { 2062 kvm_pfn_t pfn; 2063 pte_t *ptep; 2064 spinlock_t *ptl; 2065 int r; 2066 2067 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); 2068 if (r) { 2069 /* 2070 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does 2071 * not call the fault handler, so do it here. 2072 */ 2073 bool unlocked = false; 2074 r = fixup_user_fault(current->mm, addr, 2075 (write_fault ? FAULT_FLAG_WRITE : 0), 2076 &unlocked); 2077 if (unlocked) 2078 return -EAGAIN; 2079 if (r) 2080 return r; 2081 2082 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); 2083 if (r) 2084 return r; 2085 } 2086 2087 if (write_fault && !pte_write(*ptep)) { 2088 pfn = KVM_PFN_ERR_RO_FAULT; 2089 goto out; 2090 } 2091 2092 if (writable) 2093 *writable = pte_write(*ptep); 2094 pfn = pte_pfn(*ptep); 2095 2096 /* 2097 * Get a reference here because callers of *hva_to_pfn* and 2098 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the 2099 * returned pfn. This is only needed if the VMA has VM_MIXEDMAP 2100 * set, but the kvm_get_pfn/kvm_release_pfn_clean pair will 2101 * simply do nothing for reserved pfns. 2102 * 2103 * Whoever called remap_pfn_range is also going to call e.g. 2104 * unmap_mapping_range before the underlying pages are freed, 2105 * causing a call to our MMU notifier. 2106 */ 2107 kvm_get_pfn(pfn); 2108 2109 out: 2110 pte_unmap_unlock(ptep, ptl); 2111 *p_pfn = pfn; 2112 return 0; 2113 } 2114 2115 /* 2116 * Pin guest page in memory and return its pfn. 2117 * @addr: host virtual address which maps memory to the guest 2118 * @atomic: whether this function can sleep 2119 * @async: whether this function need to wait IO complete if the 2120 * host page is not in the memory 2121 * @write_fault: whether we should get a writable host page 2122 * @writable: whether it allows to map a writable host page for !@write_fault 2123 * 2124 * The function will map a writable host page for these two cases: 2125 * 1): @write_fault = true 2126 * 2): @write_fault = false && @writable, @writable will tell the caller 2127 * whether the mapping is writable. 2128 */ 2129 static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, 2130 bool write_fault, bool *writable) 2131 { 2132 struct vm_area_struct *vma; 2133 kvm_pfn_t pfn = 0; 2134 int npages, r; 2135 2136 /* we can do it either atomically or asynchronously, not both */ 2137 BUG_ON(atomic && async); 2138 2139 if (hva_to_pfn_fast(addr, write_fault, writable, &pfn)) 2140 return pfn; 2141 2142 if (atomic) 2143 return KVM_PFN_ERR_FAULT; 2144 2145 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); 2146 if (npages == 1) 2147 return pfn; 2148 2149 mmap_read_lock(current->mm); 2150 if (npages == -EHWPOISON || 2151 (!async && check_user_page_hwpoison(addr))) { 2152 pfn = KVM_PFN_ERR_HWPOISON; 2153 goto exit; 2154 } 2155 2156 retry: 2157 vma = find_vma_intersection(current->mm, addr, addr + 1); 2158 2159 if (vma == NULL) 2160 pfn = KVM_PFN_ERR_FAULT; 2161 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { 2162 r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn); 2163 if (r == -EAGAIN) 2164 goto retry; 2165 if (r < 0) 2166 pfn = KVM_PFN_ERR_FAULT; 2167 } else { 2168 if (async && vma_is_valid(vma, write_fault)) 2169 *async = true; 2170 pfn = KVM_PFN_ERR_FAULT; 2171 } 2172 exit: 2173 mmap_read_unlock(current->mm); 2174 return pfn; 2175 } 2176 2177 kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, 2178 bool atomic, bool *async, bool write_fault, 2179 bool *writable, hva_t *hva) 2180 { 2181 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); 2182 2183 if (hva) 2184 *hva = addr; 2185 2186 if (addr == KVM_HVA_ERR_RO_BAD) { 2187 if (writable) 2188 *writable = false; 2189 return KVM_PFN_ERR_RO_FAULT; 2190 } 2191 2192 if (kvm_is_error_hva(addr)) { 2193 if (writable) 2194 *writable = false; 2195 return KVM_PFN_NOSLOT; 2196 } 2197 2198 /* Do not map writable pfn in the readonly memslot. */ 2199 if (writable && memslot_is_readonly(slot)) { 2200 *writable = false; 2201 writable = NULL; 2202 } 2203 2204 return hva_to_pfn(addr, atomic, async, write_fault, 2205 writable); 2206 } 2207 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); 2208 2209 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 2210 bool *writable) 2211 { 2212 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, 2213 write_fault, writable, NULL); 2214 } 2215 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 2216 2217 kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 2218 { 2219 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL); 2220 } 2221 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); 2222 2223 kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) 2224 { 2225 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL, NULL); 2226 } 2227 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); 2228 2229 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) 2230 { 2231 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 2232 } 2233 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic); 2234 2235 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 2236 { 2237 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); 2238 } 2239 EXPORT_SYMBOL_GPL(gfn_to_pfn); 2240 2241 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) 2242 { 2243 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 2244 } 2245 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn); 2246 2247 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 2248 struct page **pages, int nr_pages) 2249 { 2250 unsigned long addr; 2251 gfn_t entry = 0; 2252 2253 addr = gfn_to_hva_many(slot, gfn, &entry); 2254 if (kvm_is_error_hva(addr)) 2255 return -1; 2256 2257 if (entry < nr_pages) 2258 return 0; 2259 2260 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages); 2261 } 2262 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); 2263 2264 static struct page *kvm_pfn_to_page(kvm_pfn_t pfn) 2265 { 2266 if (is_error_noslot_pfn(pfn)) 2267 return KVM_ERR_PTR_BAD_PAGE; 2268 2269 if (kvm_is_reserved_pfn(pfn)) { 2270 WARN_ON(1); 2271 return KVM_ERR_PTR_BAD_PAGE; 2272 } 2273 2274 return pfn_to_page(pfn); 2275 } 2276 2277 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 2278 { 2279 kvm_pfn_t pfn; 2280 2281 pfn = gfn_to_pfn(kvm, gfn); 2282 2283 return kvm_pfn_to_page(pfn); 2284 } 2285 EXPORT_SYMBOL_GPL(gfn_to_page); 2286 2287 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache) 2288 { 2289 if (pfn == 0) 2290 return; 2291 2292 if (cache) 2293 cache->pfn = cache->gfn = 0; 2294 2295 if (dirty) 2296 kvm_release_pfn_dirty(pfn); 2297 else 2298 kvm_release_pfn_clean(pfn); 2299 } 2300 2301 static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn, 2302 struct gfn_to_pfn_cache *cache, u64 gen) 2303 { 2304 kvm_release_pfn(cache->pfn, cache->dirty, cache); 2305 2306 cache->pfn = gfn_to_pfn_memslot(slot, gfn); 2307 cache->gfn = gfn; 2308 cache->dirty = false; 2309 cache->generation = gen; 2310 } 2311 2312 static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn, 2313 struct kvm_host_map *map, 2314 struct gfn_to_pfn_cache *cache, 2315 bool atomic) 2316 { 2317 kvm_pfn_t pfn; 2318 void *hva = NULL; 2319 struct page *page = KVM_UNMAPPED_PAGE; 2320 struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn); 2321 u64 gen = slots->generation; 2322 2323 if (!map) 2324 return -EINVAL; 2325 2326 if (cache) { 2327 if (!cache->pfn || cache->gfn != gfn || 2328 cache->generation != gen) { 2329 if (atomic) 2330 return -EAGAIN; 2331 kvm_cache_gfn_to_pfn(slot, gfn, cache, gen); 2332 } 2333 pfn = cache->pfn; 2334 } else { 2335 if (atomic) 2336 return -EAGAIN; 2337 pfn = gfn_to_pfn_memslot(slot, gfn); 2338 } 2339 if (is_error_noslot_pfn(pfn)) 2340 return -EINVAL; 2341 2342 if (pfn_valid(pfn)) { 2343 page = pfn_to_page(pfn); 2344 if (atomic) 2345 hva = kmap_atomic(page); 2346 else 2347 hva = kmap(page); 2348 #ifdef CONFIG_HAS_IOMEM 2349 } else if (!atomic) { 2350 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); 2351 } else { 2352 return -EINVAL; 2353 #endif 2354 } 2355 2356 if (!hva) 2357 return -EFAULT; 2358 2359 map->page = page; 2360 map->hva = hva; 2361 map->pfn = pfn; 2362 map->gfn = gfn; 2363 2364 return 0; 2365 } 2366 2367 int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, 2368 struct gfn_to_pfn_cache *cache, bool atomic) 2369 { 2370 return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map, 2371 cache, atomic); 2372 } 2373 EXPORT_SYMBOL_GPL(kvm_map_gfn); 2374 2375 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) 2376 { 2377 return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map, 2378 NULL, false); 2379 } 2380 EXPORT_SYMBOL_GPL(kvm_vcpu_map); 2381 2382 static void __kvm_unmap_gfn(struct kvm *kvm, 2383 struct kvm_memory_slot *memslot, 2384 struct kvm_host_map *map, 2385 struct gfn_to_pfn_cache *cache, 2386 bool dirty, bool atomic) 2387 { 2388 if (!map) 2389 return; 2390 2391 if (!map->hva) 2392 return; 2393 2394 if (map->page != KVM_UNMAPPED_PAGE) { 2395 if (atomic) 2396 kunmap_atomic(map->hva); 2397 else 2398 kunmap(map->page); 2399 } 2400 #ifdef CONFIG_HAS_IOMEM 2401 else if (!atomic) 2402 memunmap(map->hva); 2403 else 2404 WARN_ONCE(1, "Unexpected unmapping in atomic context"); 2405 #endif 2406 2407 if (dirty) 2408 mark_page_dirty_in_slot(kvm, memslot, map->gfn); 2409 2410 if (cache) 2411 cache->dirty |= dirty; 2412 else 2413 kvm_release_pfn(map->pfn, dirty, NULL); 2414 2415 map->hva = NULL; 2416 map->page = NULL; 2417 } 2418 2419 int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, 2420 struct gfn_to_pfn_cache *cache, bool dirty, bool atomic) 2421 { 2422 __kvm_unmap_gfn(vcpu->kvm, gfn_to_memslot(vcpu->kvm, map->gfn), map, 2423 cache, dirty, atomic); 2424 return 0; 2425 } 2426 EXPORT_SYMBOL_GPL(kvm_unmap_gfn); 2427 2428 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) 2429 { 2430 __kvm_unmap_gfn(vcpu->kvm, kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), 2431 map, NULL, dirty, false); 2432 } 2433 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap); 2434 2435 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) 2436 { 2437 kvm_pfn_t pfn; 2438 2439 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn); 2440 2441 return kvm_pfn_to_page(pfn); 2442 } 2443 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page); 2444 2445 void kvm_release_page_clean(struct page *page) 2446 { 2447 WARN_ON(is_error_page(page)); 2448 2449 kvm_release_pfn_clean(page_to_pfn(page)); 2450 } 2451 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 2452 2453 void kvm_release_pfn_clean(kvm_pfn_t pfn) 2454 { 2455 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) 2456 put_page(pfn_to_page(pfn)); 2457 } 2458 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 2459 2460 void kvm_release_page_dirty(struct page *page) 2461 { 2462 WARN_ON(is_error_page(page)); 2463 2464 kvm_release_pfn_dirty(page_to_pfn(page)); 2465 } 2466 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 2467 2468 void kvm_release_pfn_dirty(kvm_pfn_t pfn) 2469 { 2470 kvm_set_pfn_dirty(pfn); 2471 kvm_release_pfn_clean(pfn); 2472 } 2473 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); 2474 2475 void kvm_set_pfn_dirty(kvm_pfn_t pfn) 2476 { 2477 if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) 2478 SetPageDirty(pfn_to_page(pfn)); 2479 } 2480 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 2481 2482 void kvm_set_pfn_accessed(kvm_pfn_t pfn) 2483 { 2484 if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) 2485 mark_page_accessed(pfn_to_page(pfn)); 2486 } 2487 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 2488 2489 void kvm_get_pfn(kvm_pfn_t pfn) 2490 { 2491 if (!kvm_is_reserved_pfn(pfn)) 2492 get_page(pfn_to_page(pfn)); 2493 } 2494 EXPORT_SYMBOL_GPL(kvm_get_pfn); 2495 2496 static int next_segment(unsigned long len, int offset) 2497 { 2498 if (len > PAGE_SIZE - offset) 2499 return PAGE_SIZE - offset; 2500 else 2501 return len; 2502 } 2503 2504 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, 2505 void *data, int offset, int len) 2506 { 2507 int r; 2508 unsigned long addr; 2509 2510 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 2511 if (kvm_is_error_hva(addr)) 2512 return -EFAULT; 2513 r = __copy_from_user(data, (void __user *)addr + offset, len); 2514 if (r) 2515 return -EFAULT; 2516 return 0; 2517 } 2518 2519 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 2520 int len) 2521 { 2522 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 2523 2524 return __kvm_read_guest_page(slot, gfn, data, offset, len); 2525 } 2526 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 2527 2528 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, 2529 int offset, int len) 2530 { 2531 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2532 2533 return __kvm_read_guest_page(slot, gfn, data, offset, len); 2534 } 2535 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); 2536 2537 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 2538 { 2539 gfn_t gfn = gpa >> PAGE_SHIFT; 2540 int seg; 2541 int offset = offset_in_page(gpa); 2542 int ret; 2543 2544 while ((seg = next_segment(len, offset)) != 0) { 2545 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 2546 if (ret < 0) 2547 return ret; 2548 offset = 0; 2549 len -= seg; 2550 data += seg; 2551 ++gfn; 2552 } 2553 return 0; 2554 } 2555 EXPORT_SYMBOL_GPL(kvm_read_guest); 2556 2557 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) 2558 { 2559 gfn_t gfn = gpa >> PAGE_SHIFT; 2560 int seg; 2561 int offset = offset_in_page(gpa); 2562 int ret; 2563 2564 while ((seg = next_segment(len, offset)) != 0) { 2565 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); 2566 if (ret < 0) 2567 return ret; 2568 offset = 0; 2569 len -= seg; 2570 data += seg; 2571 ++gfn; 2572 } 2573 return 0; 2574 } 2575 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest); 2576 2577 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 2578 void *data, int offset, unsigned long len) 2579 { 2580 int r; 2581 unsigned long addr; 2582 2583 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 2584 if (kvm_is_error_hva(addr)) 2585 return -EFAULT; 2586 pagefault_disable(); 2587 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 2588 pagefault_enable(); 2589 if (r) 2590 return -EFAULT; 2591 return 0; 2592 } 2593 2594 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, 2595 void *data, unsigned long len) 2596 { 2597 gfn_t gfn = gpa >> PAGE_SHIFT; 2598 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2599 int offset = offset_in_page(gpa); 2600 2601 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); 2602 } 2603 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic); 2604 2605 static int __kvm_write_guest_page(struct kvm *kvm, 2606 struct kvm_memory_slot *memslot, gfn_t gfn, 2607 const void *data, int offset, int len) 2608 { 2609 int r; 2610 unsigned long addr; 2611 2612 addr = gfn_to_hva_memslot(memslot, gfn); 2613 if (kvm_is_error_hva(addr)) 2614 return -EFAULT; 2615 r = __copy_to_user((void __user *)addr + offset, data, len); 2616 if (r) 2617 return -EFAULT; 2618 mark_page_dirty_in_slot(kvm, memslot, gfn); 2619 return 0; 2620 } 2621 2622 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, 2623 const void *data, int offset, int len) 2624 { 2625 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 2626 2627 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len); 2628 } 2629 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 2630 2631 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, 2632 const void *data, int offset, int len) 2633 { 2634 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2635 2636 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len); 2637 } 2638 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page); 2639 2640 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 2641 unsigned long len) 2642 { 2643 gfn_t gfn = gpa >> PAGE_SHIFT; 2644 int seg; 2645 int offset = offset_in_page(gpa); 2646 int ret; 2647 2648 while ((seg = next_segment(len, offset)) != 0) { 2649 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 2650 if (ret < 0) 2651 return ret; 2652 offset = 0; 2653 len -= seg; 2654 data += seg; 2655 ++gfn; 2656 } 2657 return 0; 2658 } 2659 EXPORT_SYMBOL_GPL(kvm_write_guest); 2660 2661 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 2662 unsigned long len) 2663 { 2664 gfn_t gfn = gpa >> PAGE_SHIFT; 2665 int seg; 2666 int offset = offset_in_page(gpa); 2667 int ret; 2668 2669 while ((seg = next_segment(len, offset)) != 0) { 2670 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); 2671 if (ret < 0) 2672 return ret; 2673 offset = 0; 2674 len -= seg; 2675 data += seg; 2676 ++gfn; 2677 } 2678 return 0; 2679 } 2680 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest); 2681 2682 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, 2683 struct gfn_to_hva_cache *ghc, 2684 gpa_t gpa, unsigned long len) 2685 { 2686 int offset = offset_in_page(gpa); 2687 gfn_t start_gfn = gpa >> PAGE_SHIFT; 2688 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; 2689 gfn_t nr_pages_needed = end_gfn - start_gfn + 1; 2690 gfn_t nr_pages_avail; 2691 2692 /* Update ghc->generation before performing any error checks. */ 2693 ghc->generation = slots->generation; 2694 2695 if (start_gfn > end_gfn) { 2696 ghc->hva = KVM_HVA_ERR_BAD; 2697 return -EINVAL; 2698 } 2699 2700 /* 2701 * If the requested region crosses two memslots, we still 2702 * verify that the entire region is valid here. 2703 */ 2704 for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) { 2705 ghc->memslot = __gfn_to_memslot(slots, start_gfn); 2706 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, 2707 &nr_pages_avail); 2708 if (kvm_is_error_hva(ghc->hva)) 2709 return -EFAULT; 2710 } 2711 2712 /* Use the slow path for cross page reads and writes. */ 2713 if (nr_pages_needed == 1) 2714 ghc->hva += offset; 2715 else 2716 ghc->memslot = NULL; 2717 2718 ghc->gpa = gpa; 2719 ghc->len = len; 2720 return 0; 2721 } 2722 2723 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 2724 gpa_t gpa, unsigned long len) 2725 { 2726 struct kvm_memslots *slots = kvm_memslots(kvm); 2727 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len); 2728 } 2729 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 2730 2731 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 2732 void *data, unsigned int offset, 2733 unsigned long len) 2734 { 2735 struct kvm_memslots *slots = kvm_memslots(kvm); 2736 int r; 2737 gpa_t gpa = ghc->gpa + offset; 2738 2739 BUG_ON(len + offset > ghc->len); 2740 2741 if (slots->generation != ghc->generation) { 2742 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) 2743 return -EFAULT; 2744 } 2745 2746 if (kvm_is_error_hva(ghc->hva)) 2747 return -EFAULT; 2748 2749 if (unlikely(!ghc->memslot)) 2750 return kvm_write_guest(kvm, gpa, data, len); 2751 2752 r = __copy_to_user((void __user *)ghc->hva + offset, data, len); 2753 if (r) 2754 return -EFAULT; 2755 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT); 2756 2757 return 0; 2758 } 2759 EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached); 2760 2761 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 2762 void *data, unsigned long len) 2763 { 2764 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); 2765 } 2766 EXPORT_SYMBOL_GPL(kvm_write_guest_cached); 2767 2768 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 2769 void *data, unsigned int offset, 2770 unsigned long len) 2771 { 2772 struct kvm_memslots *slots = kvm_memslots(kvm); 2773 int r; 2774 gpa_t gpa = ghc->gpa + offset; 2775 2776 BUG_ON(len + offset > ghc->len); 2777 2778 if (slots->generation != ghc->generation) { 2779 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) 2780 return -EFAULT; 2781 } 2782 2783 if (kvm_is_error_hva(ghc->hva)) 2784 return -EFAULT; 2785 2786 if (unlikely(!ghc->memslot)) 2787 return kvm_read_guest(kvm, gpa, data, len); 2788 2789 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len); 2790 if (r) 2791 return -EFAULT; 2792 2793 return 0; 2794 } 2795 EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached); 2796 2797 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 2798 void *data, unsigned long len) 2799 { 2800 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len); 2801 } 2802 EXPORT_SYMBOL_GPL(kvm_read_guest_cached); 2803 2804 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 2805 { 2806 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 2807 gfn_t gfn = gpa >> PAGE_SHIFT; 2808 int seg; 2809 int offset = offset_in_page(gpa); 2810 int ret; 2811 2812 while ((seg = next_segment(len, offset)) != 0) { 2813 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len); 2814 if (ret < 0) 2815 return ret; 2816 offset = 0; 2817 len -= seg; 2818 ++gfn; 2819 } 2820 return 0; 2821 } 2822 EXPORT_SYMBOL_GPL(kvm_clear_guest); 2823 2824 void mark_page_dirty_in_slot(struct kvm *kvm, 2825 struct kvm_memory_slot *memslot, 2826 gfn_t gfn) 2827 { 2828 if (memslot && kvm_slot_dirty_track_enabled(memslot)) { 2829 unsigned long rel_gfn = gfn - memslot->base_gfn; 2830 u32 slot = (memslot->as_id << 16) | memslot->id; 2831 2832 if (kvm->dirty_ring_size) 2833 kvm_dirty_ring_push(kvm_dirty_ring_get(kvm), 2834 slot, rel_gfn); 2835 else 2836 set_bit_le(rel_gfn, memslot->dirty_bitmap); 2837 } 2838 } 2839 EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot); 2840 2841 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 2842 { 2843 struct kvm_memory_slot *memslot; 2844 2845 memslot = gfn_to_memslot(kvm, gfn); 2846 mark_page_dirty_in_slot(kvm, memslot, gfn); 2847 } 2848 EXPORT_SYMBOL_GPL(mark_page_dirty); 2849 2850 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) 2851 { 2852 struct kvm_memory_slot *memslot; 2853 2854 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2855 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn); 2856 } 2857 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); 2858 2859 void kvm_sigset_activate(struct kvm_vcpu *vcpu) 2860 { 2861 if (!vcpu->sigset_active) 2862 return; 2863 2864 /* 2865 * This does a lockless modification of ->real_blocked, which is fine 2866 * because, only current can change ->real_blocked and all readers of 2867 * ->real_blocked don't care as long ->real_blocked is always a subset 2868 * of ->blocked. 2869 */ 2870 sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked); 2871 } 2872 2873 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu) 2874 { 2875 if (!vcpu->sigset_active) 2876 return; 2877 2878 sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL); 2879 sigemptyset(¤t->real_blocked); 2880 } 2881 2882 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) 2883 { 2884 unsigned int old, val, grow, grow_start; 2885 2886 old = val = vcpu->halt_poll_ns; 2887 grow_start = READ_ONCE(halt_poll_ns_grow_start); 2888 grow = READ_ONCE(halt_poll_ns_grow); 2889 if (!grow) 2890 goto out; 2891 2892 val *= grow; 2893 if (val < grow_start) 2894 val = grow_start; 2895 2896 if (val > vcpu->kvm->max_halt_poll_ns) 2897 val = vcpu->kvm->max_halt_poll_ns; 2898 2899 vcpu->halt_poll_ns = val; 2900 out: 2901 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); 2902 } 2903 2904 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) 2905 { 2906 unsigned int old, val, shrink; 2907 2908 old = val = vcpu->halt_poll_ns; 2909 shrink = READ_ONCE(halt_poll_ns_shrink); 2910 if (shrink == 0) 2911 val = 0; 2912 else 2913 val /= shrink; 2914 2915 vcpu->halt_poll_ns = val; 2916 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); 2917 } 2918 2919 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) 2920 { 2921 int ret = -EINTR; 2922 int idx = srcu_read_lock(&vcpu->kvm->srcu); 2923 2924 if (kvm_arch_vcpu_runnable(vcpu)) { 2925 kvm_make_request(KVM_REQ_UNHALT, vcpu); 2926 goto out; 2927 } 2928 if (kvm_cpu_has_pending_timer(vcpu)) 2929 goto out; 2930 if (signal_pending(current)) 2931 goto out; 2932 2933 ret = 0; 2934 out: 2935 srcu_read_unlock(&vcpu->kvm->srcu, idx); 2936 return ret; 2937 } 2938 2939 static inline void 2940 update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited) 2941 { 2942 if (waited) 2943 vcpu->stat.halt_poll_fail_ns += poll_ns; 2944 else 2945 vcpu->stat.halt_poll_success_ns += poll_ns; 2946 } 2947 2948 /* 2949 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 2950 */ 2951 void kvm_vcpu_block(struct kvm_vcpu *vcpu) 2952 { 2953 ktime_t start, cur, poll_end; 2954 bool waited = false; 2955 u64 block_ns; 2956 2957 kvm_arch_vcpu_blocking(vcpu); 2958 2959 start = cur = poll_end = ktime_get(); 2960 if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) { 2961 ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); 2962 2963 ++vcpu->stat.halt_attempted_poll; 2964 do { 2965 /* 2966 * This sets KVM_REQ_UNHALT if an interrupt 2967 * arrives. 2968 */ 2969 if (kvm_vcpu_check_block(vcpu) < 0) { 2970 ++vcpu->stat.halt_successful_poll; 2971 if (!vcpu_valid_wakeup(vcpu)) 2972 ++vcpu->stat.halt_poll_invalid; 2973 goto out; 2974 } 2975 poll_end = cur = ktime_get(); 2976 } while (single_task_running() && !need_resched() && 2977 ktime_before(cur, stop)); 2978 } 2979 2980 prepare_to_rcuwait(&vcpu->wait); 2981 for (;;) { 2982 set_current_state(TASK_INTERRUPTIBLE); 2983 2984 if (kvm_vcpu_check_block(vcpu) < 0) 2985 break; 2986 2987 waited = true; 2988 schedule(); 2989 } 2990 finish_rcuwait(&vcpu->wait); 2991 cur = ktime_get(); 2992 out: 2993 kvm_arch_vcpu_unblocking(vcpu); 2994 block_ns = ktime_to_ns(cur) - ktime_to_ns(start); 2995 2996 update_halt_poll_stats( 2997 vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited); 2998 2999 if (!kvm_arch_no_poll(vcpu)) { 3000 if (!vcpu_valid_wakeup(vcpu)) { 3001 shrink_halt_poll_ns(vcpu); 3002 } else if (vcpu->kvm->max_halt_poll_ns) { 3003 if (block_ns <= vcpu->halt_poll_ns) 3004 ; 3005 /* we had a long block, shrink polling */ 3006 else if (vcpu->halt_poll_ns && 3007 block_ns > vcpu->kvm->max_halt_poll_ns) 3008 shrink_halt_poll_ns(vcpu); 3009 /* we had a short halt and our poll time is too small */ 3010 else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns && 3011 block_ns < vcpu->kvm->max_halt_poll_ns) 3012 grow_halt_poll_ns(vcpu); 3013 } else { 3014 vcpu->halt_poll_ns = 0; 3015 } 3016 } 3017 3018 trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu)); 3019 kvm_arch_vcpu_block_finish(vcpu); 3020 } 3021 EXPORT_SYMBOL_GPL(kvm_vcpu_block); 3022 3023 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) 3024 { 3025 struct rcuwait *waitp; 3026 3027 waitp = kvm_arch_vcpu_get_wait(vcpu); 3028 if (rcuwait_wake_up(waitp)) { 3029 WRITE_ONCE(vcpu->ready, true); 3030 ++vcpu->stat.halt_wakeup; 3031 return true; 3032 } 3033 3034 return false; 3035 } 3036 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up); 3037 3038 #ifndef CONFIG_S390 3039 /* 3040 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. 3041 */ 3042 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 3043 { 3044 int me; 3045 int cpu = vcpu->cpu; 3046 3047 if (kvm_vcpu_wake_up(vcpu)) 3048 return; 3049 3050 me = get_cpu(); 3051 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) 3052 if (kvm_arch_vcpu_should_kick(vcpu)) 3053 smp_send_reschedule(cpu); 3054 put_cpu(); 3055 } 3056 EXPORT_SYMBOL_GPL(kvm_vcpu_kick); 3057 #endif /* !CONFIG_S390 */ 3058 3059 int kvm_vcpu_yield_to(struct kvm_vcpu *target) 3060 { 3061 struct pid *pid; 3062 struct task_struct *task = NULL; 3063 int ret = 0; 3064 3065 rcu_read_lock(); 3066 pid = rcu_dereference(target->pid); 3067 if (pid) 3068 task = get_pid_task(pid, PIDTYPE_PID); 3069 rcu_read_unlock(); 3070 if (!task) 3071 return ret; 3072 ret = yield_to(task, 1); 3073 put_task_struct(task); 3074 3075 return ret; 3076 } 3077 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); 3078 3079 /* 3080 * Helper that checks whether a VCPU is eligible for directed yield. 3081 * Most eligible candidate to yield is decided by following heuristics: 3082 * 3083 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently 3084 * (preempted lock holder), indicated by @in_spin_loop. 3085 * Set at the beginning and cleared at the end of interception/PLE handler. 3086 * 3087 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get 3088 * chance last time (mostly it has become eligible now since we have probably 3089 * yielded to lockholder in last iteration. This is done by toggling 3090 * @dy_eligible each time a VCPU checked for eligibility.) 3091 * 3092 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding 3093 * to preempted lock-holder could result in wrong VCPU selection and CPU 3094 * burning. Giving priority for a potential lock-holder increases lock 3095 * progress. 3096 * 3097 * Since algorithm is based on heuristics, accessing another VCPU data without 3098 * locking does not harm. It may result in trying to yield to same VCPU, fail 3099 * and continue with next VCPU and so on. 3100 */ 3101 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) 3102 { 3103 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 3104 bool eligible; 3105 3106 eligible = !vcpu->spin_loop.in_spin_loop || 3107 vcpu->spin_loop.dy_eligible; 3108 3109 if (vcpu->spin_loop.in_spin_loop) 3110 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); 3111 3112 return eligible; 3113 #else 3114 return true; 3115 #endif 3116 } 3117 3118 /* 3119 * Unlike kvm_arch_vcpu_runnable, this function is called outside 3120 * a vcpu_load/vcpu_put pair. However, for most architectures 3121 * kvm_arch_vcpu_runnable does not require vcpu_load. 3122 */ 3123 bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) 3124 { 3125 return kvm_arch_vcpu_runnable(vcpu); 3126 } 3127 3128 static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu) 3129 { 3130 if (kvm_arch_dy_runnable(vcpu)) 3131 return true; 3132 3133 #ifdef CONFIG_KVM_ASYNC_PF 3134 if (!list_empty_careful(&vcpu->async_pf.done)) 3135 return true; 3136 #endif 3137 3138 return false; 3139 } 3140 3141 bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) 3142 { 3143 return false; 3144 } 3145 3146 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) 3147 { 3148 struct kvm *kvm = me->kvm; 3149 struct kvm_vcpu *vcpu; 3150 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 3151 int yielded = 0; 3152 int try = 3; 3153 int pass; 3154 int i; 3155 3156 kvm_vcpu_set_in_spin_loop(me, true); 3157 /* 3158 * We boost the priority of a VCPU that is runnable but not 3159 * currently running, because it got preempted by something 3160 * else and called schedule in __vcpu_run. Hopefully that 3161 * VCPU is holding the lock that we need and will release it. 3162 * We approximate round-robin by starting at the last boosted VCPU. 3163 */ 3164 for (pass = 0; pass < 2 && !yielded && try; pass++) { 3165 kvm_for_each_vcpu(i, vcpu, kvm) { 3166 if (!pass && i <= last_boosted_vcpu) { 3167 i = last_boosted_vcpu; 3168 continue; 3169 } else if (pass && i > last_boosted_vcpu) 3170 break; 3171 if (!READ_ONCE(vcpu->ready)) 3172 continue; 3173 if (vcpu == me) 3174 continue; 3175 if (rcuwait_active(&vcpu->wait) && 3176 !vcpu_dy_runnable(vcpu)) 3177 continue; 3178 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && 3179 !kvm_arch_dy_has_pending_interrupt(vcpu) && 3180 !kvm_arch_vcpu_in_kernel(vcpu)) 3181 continue; 3182 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 3183 continue; 3184 3185 yielded = kvm_vcpu_yield_to(vcpu); 3186 if (yielded > 0) { 3187 kvm->last_boosted_vcpu = i; 3188 break; 3189 } else if (yielded < 0) { 3190 try--; 3191 if (!try) 3192 break; 3193 } 3194 } 3195 } 3196 kvm_vcpu_set_in_spin_loop(me, false); 3197 3198 /* Ensure vcpu is not eligible during next spinloop */ 3199 kvm_vcpu_set_dy_eligible(me, false); 3200 } 3201 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 3202 3203 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff) 3204 { 3205 #if KVM_DIRTY_LOG_PAGE_OFFSET > 0 3206 return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) && 3207 (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET + 3208 kvm->dirty_ring_size / PAGE_SIZE); 3209 #else 3210 return false; 3211 #endif 3212 } 3213 3214 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf) 3215 { 3216 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data; 3217 struct page *page; 3218 3219 if (vmf->pgoff == 0) 3220 page = virt_to_page(vcpu->run); 3221 #ifdef CONFIG_X86 3222 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 3223 page = virt_to_page(vcpu->arch.pio_data); 3224 #endif 3225 #ifdef CONFIG_KVM_MMIO 3226 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 3227 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 3228 #endif 3229 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff)) 3230 page = kvm_dirty_ring_get_page( 3231 &vcpu->dirty_ring, 3232 vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET); 3233 else 3234 return kvm_arch_vcpu_fault(vcpu, vmf); 3235 get_page(page); 3236 vmf->page = page; 3237 return 0; 3238 } 3239 3240 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 3241 .fault = kvm_vcpu_fault, 3242 }; 3243 3244 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 3245 { 3246 struct kvm_vcpu *vcpu = file->private_data; 3247 unsigned long pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 3248 3249 if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) || 3250 kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) && 3251 ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED))) 3252 return -EINVAL; 3253 3254 vma->vm_ops = &kvm_vcpu_vm_ops; 3255 return 0; 3256 } 3257 3258 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 3259 { 3260 struct kvm_vcpu *vcpu = filp->private_data; 3261 3262 kvm_put_kvm(vcpu->kvm); 3263 return 0; 3264 } 3265 3266 static struct file_operations kvm_vcpu_fops = { 3267 .release = kvm_vcpu_release, 3268 .unlocked_ioctl = kvm_vcpu_ioctl, 3269 .mmap = kvm_vcpu_mmap, 3270 .llseek = noop_llseek, 3271 KVM_COMPAT(kvm_vcpu_compat_ioctl), 3272 }; 3273 3274 /* 3275 * Allocates an inode for the vcpu. 3276 */ 3277 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 3278 { 3279 char name[8 + 1 + ITOA_MAX_LEN + 1]; 3280 3281 snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id); 3282 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); 3283 } 3284 3285 static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) 3286 { 3287 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS 3288 struct dentry *debugfs_dentry; 3289 char dir_name[ITOA_MAX_LEN * 2]; 3290 3291 if (!debugfs_initialized()) 3292 return; 3293 3294 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id); 3295 debugfs_dentry = debugfs_create_dir(dir_name, 3296 vcpu->kvm->debugfs_dentry); 3297 3298 kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry); 3299 #endif 3300 } 3301 3302 /* 3303 * Creates some virtual cpus. Good luck creating more than one. 3304 */ 3305 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 3306 { 3307 int r; 3308 struct kvm_vcpu *vcpu; 3309 struct page *page; 3310 3311 if (id >= KVM_MAX_VCPU_ID) 3312 return -EINVAL; 3313 3314 mutex_lock(&kvm->lock); 3315 if (kvm->created_vcpus == KVM_MAX_VCPUS) { 3316 mutex_unlock(&kvm->lock); 3317 return -EINVAL; 3318 } 3319 3320 kvm->created_vcpus++; 3321 mutex_unlock(&kvm->lock); 3322 3323 r = kvm_arch_vcpu_precreate(kvm, id); 3324 if (r) 3325 goto vcpu_decrement; 3326 3327 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT); 3328 if (!vcpu) { 3329 r = -ENOMEM; 3330 goto vcpu_decrement; 3331 } 3332 3333 BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE); 3334 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 3335 if (!page) { 3336 r = -ENOMEM; 3337 goto vcpu_free; 3338 } 3339 vcpu->run = page_address(page); 3340 3341 kvm_vcpu_init(vcpu, kvm, id); 3342 3343 r = kvm_arch_vcpu_create(vcpu); 3344 if (r) 3345 goto vcpu_free_run_page; 3346 3347 if (kvm->dirty_ring_size) { 3348 r = kvm_dirty_ring_alloc(&vcpu->dirty_ring, 3349 id, kvm->dirty_ring_size); 3350 if (r) 3351 goto arch_vcpu_destroy; 3352 } 3353 3354 mutex_lock(&kvm->lock); 3355 if (kvm_get_vcpu_by_id(kvm, id)) { 3356 r = -EEXIST; 3357 goto unlock_vcpu_destroy; 3358 } 3359 3360 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); 3361 BUG_ON(kvm->vcpus[vcpu->vcpu_idx]); 3362 3363 /* Now it's all set up, let userspace reach it */ 3364 kvm_get_kvm(kvm); 3365 r = create_vcpu_fd(vcpu); 3366 if (r < 0) { 3367 kvm_put_kvm_no_destroy(kvm); 3368 goto unlock_vcpu_destroy; 3369 } 3370 3371 kvm->vcpus[vcpu->vcpu_idx] = vcpu; 3372 3373 /* 3374 * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus 3375 * before kvm->online_vcpu's incremented value. 3376 */ 3377 smp_wmb(); 3378 atomic_inc(&kvm->online_vcpus); 3379 3380 mutex_unlock(&kvm->lock); 3381 kvm_arch_vcpu_postcreate(vcpu); 3382 kvm_create_vcpu_debugfs(vcpu); 3383 return r; 3384 3385 unlock_vcpu_destroy: 3386 mutex_unlock(&kvm->lock); 3387 kvm_dirty_ring_free(&vcpu->dirty_ring); 3388 arch_vcpu_destroy: 3389 kvm_arch_vcpu_destroy(vcpu); 3390 vcpu_free_run_page: 3391 free_page((unsigned long)vcpu->run); 3392 vcpu_free: 3393 kmem_cache_free(kvm_vcpu_cache, vcpu); 3394 vcpu_decrement: 3395 mutex_lock(&kvm->lock); 3396 kvm->created_vcpus--; 3397 mutex_unlock(&kvm->lock); 3398 return r; 3399 } 3400 3401 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 3402 { 3403 if (sigset) { 3404 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 3405 vcpu->sigset_active = 1; 3406 vcpu->sigset = *sigset; 3407 } else 3408 vcpu->sigset_active = 0; 3409 return 0; 3410 } 3411 3412 static long kvm_vcpu_ioctl(struct file *filp, 3413 unsigned int ioctl, unsigned long arg) 3414 { 3415 struct kvm_vcpu *vcpu = filp->private_data; 3416 void __user *argp = (void __user *)arg; 3417 int r; 3418 struct kvm_fpu *fpu = NULL; 3419 struct kvm_sregs *kvm_sregs = NULL; 3420 3421 if (vcpu->kvm->mm != current->mm) 3422 return -EIO; 3423 3424 if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) 3425 return -EINVAL; 3426 3427 /* 3428 * Some architectures have vcpu ioctls that are asynchronous to vcpu 3429 * execution; mutex_lock() would break them. 3430 */ 3431 r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg); 3432 if (r != -ENOIOCTLCMD) 3433 return r; 3434 3435 if (mutex_lock_killable(&vcpu->mutex)) 3436 return -EINTR; 3437 switch (ioctl) { 3438 case KVM_RUN: { 3439 struct pid *oldpid; 3440 r = -EINVAL; 3441 if (arg) 3442 goto out; 3443 oldpid = rcu_access_pointer(vcpu->pid); 3444 if (unlikely(oldpid != task_pid(current))) { 3445 /* The thread running this VCPU changed. */ 3446 struct pid *newpid; 3447 3448 r = kvm_arch_vcpu_run_pid_change(vcpu); 3449 if (r) 3450 break; 3451 3452 newpid = get_task_pid(current, PIDTYPE_PID); 3453 rcu_assign_pointer(vcpu->pid, newpid); 3454 if (oldpid) 3455 synchronize_rcu(); 3456 put_pid(oldpid); 3457 } 3458 r = kvm_arch_vcpu_ioctl_run(vcpu); 3459 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 3460 break; 3461 } 3462 case KVM_GET_REGS: { 3463 struct kvm_regs *kvm_regs; 3464 3465 r = -ENOMEM; 3466 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT); 3467 if (!kvm_regs) 3468 goto out; 3469 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 3470 if (r) 3471 goto out_free1; 3472 r = -EFAULT; 3473 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 3474 goto out_free1; 3475 r = 0; 3476 out_free1: 3477 kfree(kvm_regs); 3478 break; 3479 } 3480 case KVM_SET_REGS: { 3481 struct kvm_regs *kvm_regs; 3482 3483 kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); 3484 if (IS_ERR(kvm_regs)) { 3485 r = PTR_ERR(kvm_regs); 3486 goto out; 3487 } 3488 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 3489 kfree(kvm_regs); 3490 break; 3491 } 3492 case KVM_GET_SREGS: { 3493 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), 3494 GFP_KERNEL_ACCOUNT); 3495 r = -ENOMEM; 3496 if (!kvm_sregs) 3497 goto out; 3498 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 3499 if (r) 3500 goto out; 3501 r = -EFAULT; 3502 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 3503 goto out; 3504 r = 0; 3505 break; 3506 } 3507 case KVM_SET_SREGS: { 3508 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); 3509 if (IS_ERR(kvm_sregs)) { 3510 r = PTR_ERR(kvm_sregs); 3511 kvm_sregs = NULL; 3512 goto out; 3513 } 3514 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 3515 break; 3516 } 3517 case KVM_GET_MP_STATE: { 3518 struct kvm_mp_state mp_state; 3519 3520 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 3521 if (r) 3522 goto out; 3523 r = -EFAULT; 3524 if (copy_to_user(argp, &mp_state, sizeof(mp_state))) 3525 goto out; 3526 r = 0; 3527 break; 3528 } 3529 case KVM_SET_MP_STATE: { 3530 struct kvm_mp_state mp_state; 3531 3532 r = -EFAULT; 3533 if (copy_from_user(&mp_state, argp, sizeof(mp_state))) 3534 goto out; 3535 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 3536 break; 3537 } 3538 case KVM_TRANSLATE: { 3539 struct kvm_translation tr; 3540 3541 r = -EFAULT; 3542 if (copy_from_user(&tr, argp, sizeof(tr))) 3543 goto out; 3544 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 3545 if (r) 3546 goto out; 3547 r = -EFAULT; 3548 if (copy_to_user(argp, &tr, sizeof(tr))) 3549 goto out; 3550 r = 0; 3551 break; 3552 } 3553 case KVM_SET_GUEST_DEBUG: { 3554 struct kvm_guest_debug dbg; 3555 3556 r = -EFAULT; 3557 if (copy_from_user(&dbg, argp, sizeof(dbg))) 3558 goto out; 3559 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 3560 break; 3561 } 3562 case KVM_SET_SIGNAL_MASK: { 3563 struct kvm_signal_mask __user *sigmask_arg = argp; 3564 struct kvm_signal_mask kvm_sigmask; 3565 sigset_t sigset, *p; 3566 3567 p = NULL; 3568 if (argp) { 3569 r = -EFAULT; 3570 if (copy_from_user(&kvm_sigmask, argp, 3571 sizeof(kvm_sigmask))) 3572 goto out; 3573 r = -EINVAL; 3574 if (kvm_sigmask.len != sizeof(sigset)) 3575 goto out; 3576 r = -EFAULT; 3577 if (copy_from_user(&sigset, sigmask_arg->sigset, 3578 sizeof(sigset))) 3579 goto out; 3580 p = &sigset; 3581 } 3582 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 3583 break; 3584 } 3585 case KVM_GET_FPU: { 3586 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT); 3587 r = -ENOMEM; 3588 if (!fpu) 3589 goto out; 3590 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 3591 if (r) 3592 goto out; 3593 r = -EFAULT; 3594 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 3595 goto out; 3596 r = 0; 3597 break; 3598 } 3599 case KVM_SET_FPU: { 3600 fpu = memdup_user(argp, sizeof(*fpu)); 3601 if (IS_ERR(fpu)) { 3602 r = PTR_ERR(fpu); 3603 fpu = NULL; 3604 goto out; 3605 } 3606 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 3607 break; 3608 } 3609 default: 3610 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 3611 } 3612 out: 3613 mutex_unlock(&vcpu->mutex); 3614 kfree(fpu); 3615 kfree(kvm_sregs); 3616 return r; 3617 } 3618 3619 #ifdef CONFIG_KVM_COMPAT 3620 static long kvm_vcpu_compat_ioctl(struct file *filp, 3621 unsigned int ioctl, unsigned long arg) 3622 { 3623 struct kvm_vcpu *vcpu = filp->private_data; 3624 void __user *argp = compat_ptr(arg); 3625 int r; 3626 3627 if (vcpu->kvm->mm != current->mm) 3628 return -EIO; 3629 3630 switch (ioctl) { 3631 case KVM_SET_SIGNAL_MASK: { 3632 struct kvm_signal_mask __user *sigmask_arg = argp; 3633 struct kvm_signal_mask kvm_sigmask; 3634 sigset_t sigset; 3635 3636 if (argp) { 3637 r = -EFAULT; 3638 if (copy_from_user(&kvm_sigmask, argp, 3639 sizeof(kvm_sigmask))) 3640 goto out; 3641 r = -EINVAL; 3642 if (kvm_sigmask.len != sizeof(compat_sigset_t)) 3643 goto out; 3644 r = -EFAULT; 3645 if (get_compat_sigset(&sigset, 3646 (compat_sigset_t __user *)sigmask_arg->sigset)) 3647 goto out; 3648 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 3649 } else 3650 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); 3651 break; 3652 } 3653 default: 3654 r = kvm_vcpu_ioctl(filp, ioctl, arg); 3655 } 3656 3657 out: 3658 return r; 3659 } 3660 #endif 3661 3662 static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma) 3663 { 3664 struct kvm_device *dev = filp->private_data; 3665 3666 if (dev->ops->mmap) 3667 return dev->ops->mmap(dev, vma); 3668 3669 return -ENODEV; 3670 } 3671 3672 static int kvm_device_ioctl_attr(struct kvm_device *dev, 3673 int (*accessor)(struct kvm_device *dev, 3674 struct kvm_device_attr *attr), 3675 unsigned long arg) 3676 { 3677 struct kvm_device_attr attr; 3678 3679 if (!accessor) 3680 return -EPERM; 3681 3682 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 3683 return -EFAULT; 3684 3685 return accessor(dev, &attr); 3686 } 3687 3688 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, 3689 unsigned long arg) 3690 { 3691 struct kvm_device *dev = filp->private_data; 3692 3693 if (dev->kvm->mm != current->mm) 3694 return -EIO; 3695 3696 switch (ioctl) { 3697 case KVM_SET_DEVICE_ATTR: 3698 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); 3699 case KVM_GET_DEVICE_ATTR: 3700 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); 3701 case KVM_HAS_DEVICE_ATTR: 3702 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); 3703 default: 3704 if (dev->ops->ioctl) 3705 return dev->ops->ioctl(dev, ioctl, arg); 3706 3707 return -ENOTTY; 3708 } 3709 } 3710 3711 static int kvm_device_release(struct inode *inode, struct file *filp) 3712 { 3713 struct kvm_device *dev = filp->private_data; 3714 struct kvm *kvm = dev->kvm; 3715 3716 if (dev->ops->release) { 3717 mutex_lock(&kvm->lock); 3718 list_del(&dev->vm_node); 3719 dev->ops->release(dev); 3720 mutex_unlock(&kvm->lock); 3721 } 3722 3723 kvm_put_kvm(kvm); 3724 return 0; 3725 } 3726 3727 static const struct file_operations kvm_device_fops = { 3728 .unlocked_ioctl = kvm_device_ioctl, 3729 .release = kvm_device_release, 3730 KVM_COMPAT(kvm_device_ioctl), 3731 .mmap = kvm_device_mmap, 3732 }; 3733 3734 struct kvm_device *kvm_device_from_filp(struct file *filp) 3735 { 3736 if (filp->f_op != &kvm_device_fops) 3737 return NULL; 3738 3739 return filp->private_data; 3740 } 3741 3742 static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { 3743 #ifdef CONFIG_KVM_MPIC 3744 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, 3745 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, 3746 #endif 3747 }; 3748 3749 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type) 3750 { 3751 if (type >= ARRAY_SIZE(kvm_device_ops_table)) 3752 return -ENOSPC; 3753 3754 if (kvm_device_ops_table[type] != NULL) 3755 return -EEXIST; 3756 3757 kvm_device_ops_table[type] = ops; 3758 return 0; 3759 } 3760 3761 void kvm_unregister_device_ops(u32 type) 3762 { 3763 if (kvm_device_ops_table[type] != NULL) 3764 kvm_device_ops_table[type] = NULL; 3765 } 3766 3767 static int kvm_ioctl_create_device(struct kvm *kvm, 3768 struct kvm_create_device *cd) 3769 { 3770 const struct kvm_device_ops *ops = NULL; 3771 struct kvm_device *dev; 3772 bool test = cd->flags & KVM_CREATE_DEVICE_TEST; 3773 int type; 3774 int ret; 3775 3776 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) 3777 return -ENODEV; 3778 3779 type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table)); 3780 ops = kvm_device_ops_table[type]; 3781 if (ops == NULL) 3782 return -ENODEV; 3783 3784 if (test) 3785 return 0; 3786 3787 dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT); 3788 if (!dev) 3789 return -ENOMEM; 3790 3791 dev->ops = ops; 3792 dev->kvm = kvm; 3793 3794 mutex_lock(&kvm->lock); 3795 ret = ops->create(dev, type); 3796 if (ret < 0) { 3797 mutex_unlock(&kvm->lock); 3798 kfree(dev); 3799 return ret; 3800 } 3801 list_add(&dev->vm_node, &kvm->devices); 3802 mutex_unlock(&kvm->lock); 3803 3804 if (ops->init) 3805 ops->init(dev); 3806 3807 kvm_get_kvm(kvm); 3808 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 3809 if (ret < 0) { 3810 kvm_put_kvm_no_destroy(kvm); 3811 mutex_lock(&kvm->lock); 3812 list_del(&dev->vm_node); 3813 mutex_unlock(&kvm->lock); 3814 ops->destroy(dev); 3815 return ret; 3816 } 3817 3818 cd->fd = ret; 3819 return 0; 3820 } 3821 3822 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) 3823 { 3824 switch (arg) { 3825 case KVM_CAP_USER_MEMORY: 3826 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 3827 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 3828 case KVM_CAP_INTERNAL_ERROR_DATA: 3829 #ifdef CONFIG_HAVE_KVM_MSI 3830 case KVM_CAP_SIGNAL_MSI: 3831 #endif 3832 #ifdef CONFIG_HAVE_KVM_IRQFD 3833 case KVM_CAP_IRQFD: 3834 case KVM_CAP_IRQFD_RESAMPLE: 3835 #endif 3836 case KVM_CAP_IOEVENTFD_ANY_LENGTH: 3837 case KVM_CAP_CHECK_EXTENSION_VM: 3838 case KVM_CAP_ENABLE_CAP_VM: 3839 case KVM_CAP_HALT_POLL: 3840 return 1; 3841 #ifdef CONFIG_KVM_MMIO 3842 case KVM_CAP_COALESCED_MMIO: 3843 return KVM_COALESCED_MMIO_PAGE_OFFSET; 3844 case KVM_CAP_COALESCED_PIO: 3845 return 1; 3846 #endif 3847 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 3848 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: 3849 return KVM_DIRTY_LOG_MANUAL_CAPS; 3850 #endif 3851 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 3852 case KVM_CAP_IRQ_ROUTING: 3853 return KVM_MAX_IRQ_ROUTES; 3854 #endif 3855 #if KVM_ADDRESS_SPACE_NUM > 1 3856 case KVM_CAP_MULTI_ADDRESS_SPACE: 3857 return KVM_ADDRESS_SPACE_NUM; 3858 #endif 3859 case KVM_CAP_NR_MEMSLOTS: 3860 return KVM_USER_MEM_SLOTS; 3861 case KVM_CAP_DIRTY_LOG_RING: 3862 #if KVM_DIRTY_LOG_PAGE_OFFSET > 0 3863 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn); 3864 #else 3865 return 0; 3866 #endif 3867 default: 3868 break; 3869 } 3870 return kvm_vm_ioctl_check_extension(kvm, arg); 3871 } 3872 3873 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size) 3874 { 3875 int r; 3876 3877 if (!KVM_DIRTY_LOG_PAGE_OFFSET) 3878 return -EINVAL; 3879 3880 /* the size should be power of 2 */ 3881 if (!size || (size & (size - 1))) 3882 return -EINVAL; 3883 3884 /* Should be bigger to keep the reserved entries, or a page */ 3885 if (size < kvm_dirty_ring_get_rsvd_entries() * 3886 sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE) 3887 return -EINVAL; 3888 3889 if (size > KVM_DIRTY_RING_MAX_ENTRIES * 3890 sizeof(struct kvm_dirty_gfn)) 3891 return -E2BIG; 3892 3893 /* We only allow it to set once */ 3894 if (kvm->dirty_ring_size) 3895 return -EINVAL; 3896 3897 mutex_lock(&kvm->lock); 3898 3899 if (kvm->created_vcpus) { 3900 /* We don't allow to change this value after vcpu created */ 3901 r = -EINVAL; 3902 } else { 3903 kvm->dirty_ring_size = size; 3904 r = 0; 3905 } 3906 3907 mutex_unlock(&kvm->lock); 3908 return r; 3909 } 3910 3911 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm) 3912 { 3913 int i; 3914 struct kvm_vcpu *vcpu; 3915 int cleared = 0; 3916 3917 if (!kvm->dirty_ring_size) 3918 return -EINVAL; 3919 3920 mutex_lock(&kvm->slots_lock); 3921 3922 kvm_for_each_vcpu(i, vcpu, kvm) 3923 cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring); 3924 3925 mutex_unlock(&kvm->slots_lock); 3926 3927 if (cleared) 3928 kvm_flush_remote_tlbs(kvm); 3929 3930 return cleared; 3931 } 3932 3933 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm, 3934 struct kvm_enable_cap *cap) 3935 { 3936 return -EINVAL; 3937 } 3938 3939 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, 3940 struct kvm_enable_cap *cap) 3941 { 3942 switch (cap->cap) { 3943 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 3944 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: { 3945 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE; 3946 3947 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE) 3948 allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS; 3949 3950 if (cap->flags || (cap->args[0] & ~allowed_options)) 3951 return -EINVAL; 3952 kvm->manual_dirty_log_protect = cap->args[0]; 3953 return 0; 3954 } 3955 #endif 3956 case KVM_CAP_HALT_POLL: { 3957 if (cap->flags || cap->args[0] != (unsigned int)cap->args[0]) 3958 return -EINVAL; 3959 3960 kvm->max_halt_poll_ns = cap->args[0]; 3961 return 0; 3962 } 3963 case KVM_CAP_DIRTY_LOG_RING: 3964 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]); 3965 default: 3966 return kvm_vm_ioctl_enable_cap(kvm, cap); 3967 } 3968 } 3969 3970 static long kvm_vm_ioctl(struct file *filp, 3971 unsigned int ioctl, unsigned long arg) 3972 { 3973 struct kvm *kvm = filp->private_data; 3974 void __user *argp = (void __user *)arg; 3975 int r; 3976 3977 if (kvm->mm != current->mm) 3978 return -EIO; 3979 switch (ioctl) { 3980 case KVM_CREATE_VCPU: 3981 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 3982 break; 3983 case KVM_ENABLE_CAP: { 3984 struct kvm_enable_cap cap; 3985 3986 r = -EFAULT; 3987 if (copy_from_user(&cap, argp, sizeof(cap))) 3988 goto out; 3989 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap); 3990 break; 3991 } 3992 case KVM_SET_USER_MEMORY_REGION: { 3993 struct kvm_userspace_memory_region kvm_userspace_mem; 3994 3995 r = -EFAULT; 3996 if (copy_from_user(&kvm_userspace_mem, argp, 3997 sizeof(kvm_userspace_mem))) 3998 goto out; 3999 4000 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); 4001 break; 4002 } 4003 case KVM_GET_DIRTY_LOG: { 4004 struct kvm_dirty_log log; 4005 4006 r = -EFAULT; 4007 if (copy_from_user(&log, argp, sizeof(log))) 4008 goto out; 4009 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 4010 break; 4011 } 4012 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4013 case KVM_CLEAR_DIRTY_LOG: { 4014 struct kvm_clear_dirty_log log; 4015 4016 r = -EFAULT; 4017 if (copy_from_user(&log, argp, sizeof(log))) 4018 goto out; 4019 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); 4020 break; 4021 } 4022 #endif 4023 #ifdef CONFIG_KVM_MMIO 4024 case KVM_REGISTER_COALESCED_MMIO: { 4025 struct kvm_coalesced_mmio_zone zone; 4026 4027 r = -EFAULT; 4028 if (copy_from_user(&zone, argp, sizeof(zone))) 4029 goto out; 4030 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 4031 break; 4032 } 4033 case KVM_UNREGISTER_COALESCED_MMIO: { 4034 struct kvm_coalesced_mmio_zone zone; 4035 4036 r = -EFAULT; 4037 if (copy_from_user(&zone, argp, sizeof(zone))) 4038 goto out; 4039 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 4040 break; 4041 } 4042 #endif 4043 case KVM_IRQFD: { 4044 struct kvm_irqfd data; 4045 4046 r = -EFAULT; 4047 if (copy_from_user(&data, argp, sizeof(data))) 4048 goto out; 4049 r = kvm_irqfd(kvm, &data); 4050 break; 4051 } 4052 case KVM_IOEVENTFD: { 4053 struct kvm_ioeventfd data; 4054 4055 r = -EFAULT; 4056 if (copy_from_user(&data, argp, sizeof(data))) 4057 goto out; 4058 r = kvm_ioeventfd(kvm, &data); 4059 break; 4060 } 4061 #ifdef CONFIG_HAVE_KVM_MSI 4062 case KVM_SIGNAL_MSI: { 4063 struct kvm_msi msi; 4064 4065 r = -EFAULT; 4066 if (copy_from_user(&msi, argp, sizeof(msi))) 4067 goto out; 4068 r = kvm_send_userspace_msi(kvm, &msi); 4069 break; 4070 } 4071 #endif 4072 #ifdef __KVM_HAVE_IRQ_LINE 4073 case KVM_IRQ_LINE_STATUS: 4074 case KVM_IRQ_LINE: { 4075 struct kvm_irq_level irq_event; 4076 4077 r = -EFAULT; 4078 if (copy_from_user(&irq_event, argp, sizeof(irq_event))) 4079 goto out; 4080 4081 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, 4082 ioctl == KVM_IRQ_LINE_STATUS); 4083 if (r) 4084 goto out; 4085 4086 r = -EFAULT; 4087 if (ioctl == KVM_IRQ_LINE_STATUS) { 4088 if (copy_to_user(argp, &irq_event, sizeof(irq_event))) 4089 goto out; 4090 } 4091 4092 r = 0; 4093 break; 4094 } 4095 #endif 4096 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 4097 case KVM_SET_GSI_ROUTING: { 4098 struct kvm_irq_routing routing; 4099 struct kvm_irq_routing __user *urouting; 4100 struct kvm_irq_routing_entry *entries = NULL; 4101 4102 r = -EFAULT; 4103 if (copy_from_user(&routing, argp, sizeof(routing))) 4104 goto out; 4105 r = -EINVAL; 4106 if (!kvm_arch_can_set_irq_routing(kvm)) 4107 goto out; 4108 if (routing.nr > KVM_MAX_IRQ_ROUTES) 4109 goto out; 4110 if (routing.flags) 4111 goto out; 4112 if (routing.nr) { 4113 urouting = argp; 4114 entries = vmemdup_user(urouting->entries, 4115 array_size(sizeof(*entries), 4116 routing.nr)); 4117 if (IS_ERR(entries)) { 4118 r = PTR_ERR(entries); 4119 goto out; 4120 } 4121 } 4122 r = kvm_set_irq_routing(kvm, entries, routing.nr, 4123 routing.flags); 4124 kvfree(entries); 4125 break; 4126 } 4127 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ 4128 case KVM_CREATE_DEVICE: { 4129 struct kvm_create_device cd; 4130 4131 r = -EFAULT; 4132 if (copy_from_user(&cd, argp, sizeof(cd))) 4133 goto out; 4134 4135 r = kvm_ioctl_create_device(kvm, &cd); 4136 if (r) 4137 goto out; 4138 4139 r = -EFAULT; 4140 if (copy_to_user(argp, &cd, sizeof(cd))) 4141 goto out; 4142 4143 r = 0; 4144 break; 4145 } 4146 case KVM_CHECK_EXTENSION: 4147 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); 4148 break; 4149 case KVM_RESET_DIRTY_RINGS: 4150 r = kvm_vm_ioctl_reset_dirty_pages(kvm); 4151 break; 4152 default: 4153 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 4154 } 4155 out: 4156 return r; 4157 } 4158 4159 #ifdef CONFIG_KVM_COMPAT 4160 struct compat_kvm_dirty_log { 4161 __u32 slot; 4162 __u32 padding1; 4163 union { 4164 compat_uptr_t dirty_bitmap; /* one bit per page */ 4165 __u64 padding2; 4166 }; 4167 }; 4168 4169 static long kvm_vm_compat_ioctl(struct file *filp, 4170 unsigned int ioctl, unsigned long arg) 4171 { 4172 struct kvm *kvm = filp->private_data; 4173 int r; 4174 4175 if (kvm->mm != current->mm) 4176 return -EIO; 4177 switch (ioctl) { 4178 case KVM_GET_DIRTY_LOG: { 4179 struct compat_kvm_dirty_log compat_log; 4180 struct kvm_dirty_log log; 4181 4182 if (copy_from_user(&compat_log, (void __user *)arg, 4183 sizeof(compat_log))) 4184 return -EFAULT; 4185 log.slot = compat_log.slot; 4186 log.padding1 = compat_log.padding1; 4187 log.padding2 = compat_log.padding2; 4188 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 4189 4190 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 4191 break; 4192 } 4193 default: 4194 r = kvm_vm_ioctl(filp, ioctl, arg); 4195 } 4196 return r; 4197 } 4198 #endif 4199 4200 static struct file_operations kvm_vm_fops = { 4201 .release = kvm_vm_release, 4202 .unlocked_ioctl = kvm_vm_ioctl, 4203 .llseek = noop_llseek, 4204 KVM_COMPAT(kvm_vm_compat_ioctl), 4205 }; 4206 4207 bool file_is_kvm(struct file *file) 4208 { 4209 return file && file->f_op == &kvm_vm_fops; 4210 } 4211 EXPORT_SYMBOL_GPL(file_is_kvm); 4212 4213 static int kvm_dev_ioctl_create_vm(unsigned long type) 4214 { 4215 int r; 4216 struct kvm *kvm; 4217 struct file *file; 4218 4219 kvm = kvm_create_vm(type); 4220 if (IS_ERR(kvm)) 4221 return PTR_ERR(kvm); 4222 #ifdef CONFIG_KVM_MMIO 4223 r = kvm_coalesced_mmio_init(kvm); 4224 if (r < 0) 4225 goto put_kvm; 4226 #endif 4227 r = get_unused_fd_flags(O_CLOEXEC); 4228 if (r < 0) 4229 goto put_kvm; 4230 4231 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 4232 if (IS_ERR(file)) { 4233 put_unused_fd(r); 4234 r = PTR_ERR(file); 4235 goto put_kvm; 4236 } 4237 4238 /* 4239 * Don't call kvm_put_kvm anymore at this point; file->f_op is 4240 * already set, with ->release() being kvm_vm_release(). In error 4241 * cases it will be called by the final fput(file) and will take 4242 * care of doing kvm_put_kvm(kvm). 4243 */ 4244 if (kvm_create_vm_debugfs(kvm, r) < 0) { 4245 put_unused_fd(r); 4246 fput(file); 4247 return -ENOMEM; 4248 } 4249 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm); 4250 4251 fd_install(r, file); 4252 return r; 4253 4254 put_kvm: 4255 kvm_put_kvm(kvm); 4256 return r; 4257 } 4258 4259 static long kvm_dev_ioctl(struct file *filp, 4260 unsigned int ioctl, unsigned long arg) 4261 { 4262 long r = -EINVAL; 4263 4264 switch (ioctl) { 4265 case KVM_GET_API_VERSION: 4266 if (arg) 4267 goto out; 4268 r = KVM_API_VERSION; 4269 break; 4270 case KVM_CREATE_VM: 4271 r = kvm_dev_ioctl_create_vm(arg); 4272 break; 4273 case KVM_CHECK_EXTENSION: 4274 r = kvm_vm_ioctl_check_extension_generic(NULL, arg); 4275 break; 4276 case KVM_GET_VCPU_MMAP_SIZE: 4277 if (arg) 4278 goto out; 4279 r = PAGE_SIZE; /* struct kvm_run */ 4280 #ifdef CONFIG_X86 4281 r += PAGE_SIZE; /* pio data page */ 4282 #endif 4283 #ifdef CONFIG_KVM_MMIO 4284 r += PAGE_SIZE; /* coalesced mmio ring page */ 4285 #endif 4286 break; 4287 case KVM_TRACE_ENABLE: 4288 case KVM_TRACE_PAUSE: 4289 case KVM_TRACE_DISABLE: 4290 r = -EOPNOTSUPP; 4291 break; 4292 default: 4293 return kvm_arch_dev_ioctl(filp, ioctl, arg); 4294 } 4295 out: 4296 return r; 4297 } 4298 4299 static struct file_operations kvm_chardev_ops = { 4300 .unlocked_ioctl = kvm_dev_ioctl, 4301 .llseek = noop_llseek, 4302 KVM_COMPAT(kvm_dev_ioctl), 4303 }; 4304 4305 static struct miscdevice kvm_dev = { 4306 KVM_MINOR, 4307 "kvm", 4308 &kvm_chardev_ops, 4309 }; 4310 4311 static void hardware_enable_nolock(void *junk) 4312 { 4313 int cpu = raw_smp_processor_id(); 4314 int r; 4315 4316 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 4317 return; 4318 4319 cpumask_set_cpu(cpu, cpus_hardware_enabled); 4320 4321 r = kvm_arch_hardware_enable(); 4322 4323 if (r) { 4324 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 4325 atomic_inc(&hardware_enable_failed); 4326 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu); 4327 } 4328 } 4329 4330 static int kvm_starting_cpu(unsigned int cpu) 4331 { 4332 raw_spin_lock(&kvm_count_lock); 4333 if (kvm_usage_count) 4334 hardware_enable_nolock(NULL); 4335 raw_spin_unlock(&kvm_count_lock); 4336 return 0; 4337 } 4338 4339 static void hardware_disable_nolock(void *junk) 4340 { 4341 int cpu = raw_smp_processor_id(); 4342 4343 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 4344 return; 4345 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 4346 kvm_arch_hardware_disable(); 4347 } 4348 4349 static int kvm_dying_cpu(unsigned int cpu) 4350 { 4351 raw_spin_lock(&kvm_count_lock); 4352 if (kvm_usage_count) 4353 hardware_disable_nolock(NULL); 4354 raw_spin_unlock(&kvm_count_lock); 4355 return 0; 4356 } 4357 4358 static void hardware_disable_all_nolock(void) 4359 { 4360 BUG_ON(!kvm_usage_count); 4361 4362 kvm_usage_count--; 4363 if (!kvm_usage_count) 4364 on_each_cpu(hardware_disable_nolock, NULL, 1); 4365 } 4366 4367 static void hardware_disable_all(void) 4368 { 4369 raw_spin_lock(&kvm_count_lock); 4370 hardware_disable_all_nolock(); 4371 raw_spin_unlock(&kvm_count_lock); 4372 } 4373 4374 static int hardware_enable_all(void) 4375 { 4376 int r = 0; 4377 4378 raw_spin_lock(&kvm_count_lock); 4379 4380 kvm_usage_count++; 4381 if (kvm_usage_count == 1) { 4382 atomic_set(&hardware_enable_failed, 0); 4383 on_each_cpu(hardware_enable_nolock, NULL, 1); 4384 4385 if (atomic_read(&hardware_enable_failed)) { 4386 hardware_disable_all_nolock(); 4387 r = -EBUSY; 4388 } 4389 } 4390 4391 raw_spin_unlock(&kvm_count_lock); 4392 4393 return r; 4394 } 4395 4396 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 4397 void *v) 4398 { 4399 /* 4400 * Some (well, at least mine) BIOSes hang on reboot if 4401 * in vmx root mode. 4402 * 4403 * And Intel TXT required VMX off for all cpu when system shutdown. 4404 */ 4405 pr_info("kvm: exiting hardware virtualization\n"); 4406 kvm_rebooting = true; 4407 on_each_cpu(hardware_disable_nolock, NULL, 1); 4408 return NOTIFY_OK; 4409 } 4410 4411 static struct notifier_block kvm_reboot_notifier = { 4412 .notifier_call = kvm_reboot, 4413 .priority = 0, 4414 }; 4415 4416 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 4417 { 4418 int i; 4419 4420 for (i = 0; i < bus->dev_count; i++) { 4421 struct kvm_io_device *pos = bus->range[i].dev; 4422 4423 kvm_iodevice_destructor(pos); 4424 } 4425 kfree(bus); 4426 } 4427 4428 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, 4429 const struct kvm_io_range *r2) 4430 { 4431 gpa_t addr1 = r1->addr; 4432 gpa_t addr2 = r2->addr; 4433 4434 if (addr1 < addr2) 4435 return -1; 4436 4437 /* If r2->len == 0, match the exact address. If r2->len != 0, 4438 * accept any overlapping write. Any order is acceptable for 4439 * overlapping ranges, because kvm_io_bus_get_first_dev ensures 4440 * we process all of them. 4441 */ 4442 if (r2->len) { 4443 addr1 += r1->len; 4444 addr2 += r2->len; 4445 } 4446 4447 if (addr1 > addr2) 4448 return 1; 4449 4450 return 0; 4451 } 4452 4453 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) 4454 { 4455 return kvm_io_bus_cmp(p1, p2); 4456 } 4457 4458 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, 4459 gpa_t addr, int len) 4460 { 4461 struct kvm_io_range *range, key; 4462 int off; 4463 4464 key = (struct kvm_io_range) { 4465 .addr = addr, 4466 .len = len, 4467 }; 4468 4469 range = bsearch(&key, bus->range, bus->dev_count, 4470 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); 4471 if (range == NULL) 4472 return -ENOENT; 4473 4474 off = range - bus->range; 4475 4476 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) 4477 off--; 4478 4479 return off; 4480 } 4481 4482 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 4483 struct kvm_io_range *range, const void *val) 4484 { 4485 int idx; 4486 4487 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 4488 if (idx < 0) 4489 return -EOPNOTSUPP; 4490 4491 while (idx < bus->dev_count && 4492 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 4493 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, 4494 range->len, val)) 4495 return idx; 4496 idx++; 4497 } 4498 4499 return -EOPNOTSUPP; 4500 } 4501 4502 /* kvm_io_bus_write - called under kvm->slots_lock */ 4503 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 4504 int len, const void *val) 4505 { 4506 struct kvm_io_bus *bus; 4507 struct kvm_io_range range; 4508 int r; 4509 4510 range = (struct kvm_io_range) { 4511 .addr = addr, 4512 .len = len, 4513 }; 4514 4515 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 4516 if (!bus) 4517 return -ENOMEM; 4518 r = __kvm_io_bus_write(vcpu, bus, &range, val); 4519 return r < 0 ? r : 0; 4520 } 4521 EXPORT_SYMBOL_GPL(kvm_io_bus_write); 4522 4523 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ 4524 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 4525 gpa_t addr, int len, const void *val, long cookie) 4526 { 4527 struct kvm_io_bus *bus; 4528 struct kvm_io_range range; 4529 4530 range = (struct kvm_io_range) { 4531 .addr = addr, 4532 .len = len, 4533 }; 4534 4535 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 4536 if (!bus) 4537 return -ENOMEM; 4538 4539 /* First try the device referenced by cookie. */ 4540 if ((cookie >= 0) && (cookie < bus->dev_count) && 4541 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) 4542 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, 4543 val)) 4544 return cookie; 4545 4546 /* 4547 * cookie contained garbage; fall back to search and return the 4548 * correct cookie value. 4549 */ 4550 return __kvm_io_bus_write(vcpu, bus, &range, val); 4551 } 4552 4553 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 4554 struct kvm_io_range *range, void *val) 4555 { 4556 int idx; 4557 4558 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 4559 if (idx < 0) 4560 return -EOPNOTSUPP; 4561 4562 while (idx < bus->dev_count && 4563 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 4564 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, 4565 range->len, val)) 4566 return idx; 4567 idx++; 4568 } 4569 4570 return -EOPNOTSUPP; 4571 } 4572 4573 /* kvm_io_bus_read - called under kvm->slots_lock */ 4574 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 4575 int len, void *val) 4576 { 4577 struct kvm_io_bus *bus; 4578 struct kvm_io_range range; 4579 int r; 4580 4581 range = (struct kvm_io_range) { 4582 .addr = addr, 4583 .len = len, 4584 }; 4585 4586 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 4587 if (!bus) 4588 return -ENOMEM; 4589 r = __kvm_io_bus_read(vcpu, bus, &range, val); 4590 return r < 0 ? r : 0; 4591 } 4592 4593 /* Caller must hold slots_lock. */ 4594 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 4595 int len, struct kvm_io_device *dev) 4596 { 4597 int i; 4598 struct kvm_io_bus *new_bus, *bus; 4599 struct kvm_io_range range; 4600 4601 bus = kvm_get_bus(kvm, bus_idx); 4602 if (!bus) 4603 return -ENOMEM; 4604 4605 /* exclude ioeventfd which is limited by maximum fd */ 4606 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 4607 return -ENOSPC; 4608 4609 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1), 4610 GFP_KERNEL_ACCOUNT); 4611 if (!new_bus) 4612 return -ENOMEM; 4613 4614 range = (struct kvm_io_range) { 4615 .addr = addr, 4616 .len = len, 4617 .dev = dev, 4618 }; 4619 4620 for (i = 0; i < bus->dev_count; i++) 4621 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0) 4622 break; 4623 4624 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 4625 new_bus->dev_count++; 4626 new_bus->range[i] = range; 4627 memcpy(new_bus->range + i + 1, bus->range + i, 4628 (bus->dev_count - i) * sizeof(struct kvm_io_range)); 4629 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 4630 synchronize_srcu_expedited(&kvm->srcu); 4631 kfree(bus); 4632 4633 return 0; 4634 } 4635 4636 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 4637 struct kvm_io_device *dev) 4638 { 4639 int i, j; 4640 struct kvm_io_bus *new_bus, *bus; 4641 4642 lockdep_assert_held(&kvm->slots_lock); 4643 4644 bus = kvm_get_bus(kvm, bus_idx); 4645 if (!bus) 4646 return 0; 4647 4648 for (i = 0; i < bus->dev_count; i++) { 4649 if (bus->range[i].dev == dev) { 4650 break; 4651 } 4652 } 4653 4654 if (i == bus->dev_count) 4655 return 0; 4656 4657 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1), 4658 GFP_KERNEL_ACCOUNT); 4659 if (new_bus) { 4660 memcpy(new_bus, bus, struct_size(bus, range, i)); 4661 new_bus->dev_count--; 4662 memcpy(new_bus->range + i, bus->range + i + 1, 4663 flex_array_size(new_bus, range, new_bus->dev_count - i)); 4664 } 4665 4666 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 4667 synchronize_srcu_expedited(&kvm->srcu); 4668 4669 /* Destroy the old bus _after_ installing the (null) bus. */ 4670 if (!new_bus) { 4671 pr_err("kvm: failed to shrink bus, removing it completely\n"); 4672 for (j = 0; j < bus->dev_count; j++) { 4673 if (j == i) 4674 continue; 4675 kvm_iodevice_destructor(bus->range[j].dev); 4676 } 4677 } 4678 4679 kfree(bus); 4680 return new_bus ? 0 : -ENOMEM; 4681 } 4682 4683 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 4684 gpa_t addr) 4685 { 4686 struct kvm_io_bus *bus; 4687 int dev_idx, srcu_idx; 4688 struct kvm_io_device *iodev = NULL; 4689 4690 srcu_idx = srcu_read_lock(&kvm->srcu); 4691 4692 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 4693 if (!bus) 4694 goto out_unlock; 4695 4696 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); 4697 if (dev_idx < 0) 4698 goto out_unlock; 4699 4700 iodev = bus->range[dev_idx].dev; 4701 4702 out_unlock: 4703 srcu_read_unlock(&kvm->srcu, srcu_idx); 4704 4705 return iodev; 4706 } 4707 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev); 4708 4709 static int kvm_debugfs_open(struct inode *inode, struct file *file, 4710 int (*get)(void *, u64 *), int (*set)(void *, u64), 4711 const char *fmt) 4712 { 4713 struct kvm_stat_data *stat_data = (struct kvm_stat_data *) 4714 inode->i_private; 4715 4716 /* The debugfs files are a reference to the kvm struct which 4717 * is still valid when kvm_destroy_vm is called. 4718 * To avoid the race between open and the removal of the debugfs 4719 * directory we test against the users count. 4720 */ 4721 if (!refcount_inc_not_zero(&stat_data->kvm->users_count)) 4722 return -ENOENT; 4723 4724 if (simple_attr_open(inode, file, get, 4725 KVM_DBGFS_GET_MODE(stat_data->dbgfs_item) & 0222 4726 ? set : NULL, 4727 fmt)) { 4728 kvm_put_kvm(stat_data->kvm); 4729 return -ENOMEM; 4730 } 4731 4732 return 0; 4733 } 4734 4735 static int kvm_debugfs_release(struct inode *inode, struct file *file) 4736 { 4737 struct kvm_stat_data *stat_data = (struct kvm_stat_data *) 4738 inode->i_private; 4739 4740 simple_attr_release(inode, file); 4741 kvm_put_kvm(stat_data->kvm); 4742 4743 return 0; 4744 } 4745 4746 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val) 4747 { 4748 *val = *(ulong *)((void *)kvm + offset); 4749 4750 return 0; 4751 } 4752 4753 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset) 4754 { 4755 *(ulong *)((void *)kvm + offset) = 0; 4756 4757 return 0; 4758 } 4759 4760 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) 4761 { 4762 int i; 4763 struct kvm_vcpu *vcpu; 4764 4765 *val = 0; 4766 4767 kvm_for_each_vcpu(i, vcpu, kvm) 4768 *val += *(u64 *)((void *)vcpu + offset); 4769 4770 return 0; 4771 } 4772 4773 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset) 4774 { 4775 int i; 4776 struct kvm_vcpu *vcpu; 4777 4778 kvm_for_each_vcpu(i, vcpu, kvm) 4779 *(u64 *)((void *)vcpu + offset) = 0; 4780 4781 return 0; 4782 } 4783 4784 static int kvm_stat_data_get(void *data, u64 *val) 4785 { 4786 int r = -EFAULT; 4787 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 4788 4789 switch (stat_data->dbgfs_item->kind) { 4790 case KVM_STAT_VM: 4791 r = kvm_get_stat_per_vm(stat_data->kvm, 4792 stat_data->dbgfs_item->offset, val); 4793 break; 4794 case KVM_STAT_VCPU: 4795 r = kvm_get_stat_per_vcpu(stat_data->kvm, 4796 stat_data->dbgfs_item->offset, val); 4797 break; 4798 } 4799 4800 return r; 4801 } 4802 4803 static int kvm_stat_data_clear(void *data, u64 val) 4804 { 4805 int r = -EFAULT; 4806 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 4807 4808 if (val) 4809 return -EINVAL; 4810 4811 switch (stat_data->dbgfs_item->kind) { 4812 case KVM_STAT_VM: 4813 r = kvm_clear_stat_per_vm(stat_data->kvm, 4814 stat_data->dbgfs_item->offset); 4815 break; 4816 case KVM_STAT_VCPU: 4817 r = kvm_clear_stat_per_vcpu(stat_data->kvm, 4818 stat_data->dbgfs_item->offset); 4819 break; 4820 } 4821 4822 return r; 4823 } 4824 4825 static int kvm_stat_data_open(struct inode *inode, struct file *file) 4826 { 4827 __simple_attr_check_format("%llu\n", 0ull); 4828 return kvm_debugfs_open(inode, file, kvm_stat_data_get, 4829 kvm_stat_data_clear, "%llu\n"); 4830 } 4831 4832 static const struct file_operations stat_fops_per_vm = { 4833 .owner = THIS_MODULE, 4834 .open = kvm_stat_data_open, 4835 .release = kvm_debugfs_release, 4836 .read = simple_attr_read, 4837 .write = simple_attr_write, 4838 .llseek = no_llseek, 4839 }; 4840 4841 static int vm_stat_get(void *_offset, u64 *val) 4842 { 4843 unsigned offset = (long)_offset; 4844 struct kvm *kvm; 4845 u64 tmp_val; 4846 4847 *val = 0; 4848 mutex_lock(&kvm_lock); 4849 list_for_each_entry(kvm, &vm_list, vm_list) { 4850 kvm_get_stat_per_vm(kvm, offset, &tmp_val); 4851 *val += tmp_val; 4852 } 4853 mutex_unlock(&kvm_lock); 4854 return 0; 4855 } 4856 4857 static int vm_stat_clear(void *_offset, u64 val) 4858 { 4859 unsigned offset = (long)_offset; 4860 struct kvm *kvm; 4861 4862 if (val) 4863 return -EINVAL; 4864 4865 mutex_lock(&kvm_lock); 4866 list_for_each_entry(kvm, &vm_list, vm_list) { 4867 kvm_clear_stat_per_vm(kvm, offset); 4868 } 4869 mutex_unlock(&kvm_lock); 4870 4871 return 0; 4872 } 4873 4874 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n"); 4875 4876 static int vcpu_stat_get(void *_offset, u64 *val) 4877 { 4878 unsigned offset = (long)_offset; 4879 struct kvm *kvm; 4880 u64 tmp_val; 4881 4882 *val = 0; 4883 mutex_lock(&kvm_lock); 4884 list_for_each_entry(kvm, &vm_list, vm_list) { 4885 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val); 4886 *val += tmp_val; 4887 } 4888 mutex_unlock(&kvm_lock); 4889 return 0; 4890 } 4891 4892 static int vcpu_stat_clear(void *_offset, u64 val) 4893 { 4894 unsigned offset = (long)_offset; 4895 struct kvm *kvm; 4896 4897 if (val) 4898 return -EINVAL; 4899 4900 mutex_lock(&kvm_lock); 4901 list_for_each_entry(kvm, &vm_list, vm_list) { 4902 kvm_clear_stat_per_vcpu(kvm, offset); 4903 } 4904 mutex_unlock(&kvm_lock); 4905 4906 return 0; 4907 } 4908 4909 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear, 4910 "%llu\n"); 4911 4912 static const struct file_operations *stat_fops[] = { 4913 [KVM_STAT_VCPU] = &vcpu_stat_fops, 4914 [KVM_STAT_VM] = &vm_stat_fops, 4915 }; 4916 4917 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) 4918 { 4919 struct kobj_uevent_env *env; 4920 unsigned long long created, active; 4921 4922 if (!kvm_dev.this_device || !kvm) 4923 return; 4924 4925 mutex_lock(&kvm_lock); 4926 if (type == KVM_EVENT_CREATE_VM) { 4927 kvm_createvm_count++; 4928 kvm_active_vms++; 4929 } else if (type == KVM_EVENT_DESTROY_VM) { 4930 kvm_active_vms--; 4931 } 4932 created = kvm_createvm_count; 4933 active = kvm_active_vms; 4934 mutex_unlock(&kvm_lock); 4935 4936 env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT); 4937 if (!env) 4938 return; 4939 4940 add_uevent_var(env, "CREATED=%llu", created); 4941 add_uevent_var(env, "COUNT=%llu", active); 4942 4943 if (type == KVM_EVENT_CREATE_VM) { 4944 add_uevent_var(env, "EVENT=create"); 4945 kvm->userspace_pid = task_pid_nr(current); 4946 } else if (type == KVM_EVENT_DESTROY_VM) { 4947 add_uevent_var(env, "EVENT=destroy"); 4948 } 4949 add_uevent_var(env, "PID=%d", kvm->userspace_pid); 4950 4951 if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) { 4952 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT); 4953 4954 if (p) { 4955 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); 4956 if (!IS_ERR(tmp)) 4957 add_uevent_var(env, "STATS_PATH=%s", tmp); 4958 kfree(p); 4959 } 4960 } 4961 /* no need for checks, since we are adding at most only 5 keys */ 4962 env->envp[env->envp_idx++] = NULL; 4963 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp); 4964 kfree(env); 4965 } 4966 4967 static void kvm_init_debug(void) 4968 { 4969 struct kvm_stats_debugfs_item *p; 4970 4971 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 4972 4973 kvm_debugfs_num_entries = 0; 4974 for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) { 4975 debugfs_create_file(p->name, KVM_DBGFS_GET_MODE(p), 4976 kvm_debugfs_dir, (void *)(long)p->offset, 4977 stat_fops[p->kind]); 4978 } 4979 } 4980 4981 static int kvm_suspend(void) 4982 { 4983 if (kvm_usage_count) 4984 hardware_disable_nolock(NULL); 4985 return 0; 4986 } 4987 4988 static void kvm_resume(void) 4989 { 4990 if (kvm_usage_count) { 4991 #ifdef CONFIG_LOCKDEP 4992 WARN_ON(lockdep_is_held(&kvm_count_lock)); 4993 #endif 4994 hardware_enable_nolock(NULL); 4995 } 4996 } 4997 4998 static struct syscore_ops kvm_syscore_ops = { 4999 .suspend = kvm_suspend, 5000 .resume = kvm_resume, 5001 }; 5002 5003 static inline 5004 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 5005 { 5006 return container_of(pn, struct kvm_vcpu, preempt_notifier); 5007 } 5008 5009 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 5010 { 5011 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 5012 5013 WRITE_ONCE(vcpu->preempted, false); 5014 WRITE_ONCE(vcpu->ready, false); 5015 5016 __this_cpu_write(kvm_running_vcpu, vcpu); 5017 kvm_arch_sched_in(vcpu, cpu); 5018 kvm_arch_vcpu_load(vcpu, cpu); 5019 } 5020 5021 static void kvm_sched_out(struct preempt_notifier *pn, 5022 struct task_struct *next) 5023 { 5024 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 5025 5026 if (current->state == TASK_RUNNING) { 5027 WRITE_ONCE(vcpu->preempted, true); 5028 WRITE_ONCE(vcpu->ready, true); 5029 } 5030 kvm_arch_vcpu_put(vcpu); 5031 __this_cpu_write(kvm_running_vcpu, NULL); 5032 } 5033 5034 /** 5035 * kvm_get_running_vcpu - get the vcpu running on the current CPU. 5036 * 5037 * We can disable preemption locally around accessing the per-CPU variable, 5038 * and use the resolved vcpu pointer after enabling preemption again, 5039 * because even if the current thread is migrated to another CPU, reading 5040 * the per-CPU value later will give us the same value as we update the 5041 * per-CPU variable in the preempt notifier handlers. 5042 */ 5043 struct kvm_vcpu *kvm_get_running_vcpu(void) 5044 { 5045 struct kvm_vcpu *vcpu; 5046 5047 preempt_disable(); 5048 vcpu = __this_cpu_read(kvm_running_vcpu); 5049 preempt_enable(); 5050 5051 return vcpu; 5052 } 5053 EXPORT_SYMBOL_GPL(kvm_get_running_vcpu); 5054 5055 /** 5056 * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus. 5057 */ 5058 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) 5059 { 5060 return &kvm_running_vcpu; 5061 } 5062 5063 struct kvm_cpu_compat_check { 5064 void *opaque; 5065 int *ret; 5066 }; 5067 5068 static void check_processor_compat(void *data) 5069 { 5070 struct kvm_cpu_compat_check *c = data; 5071 5072 *c->ret = kvm_arch_check_processor_compat(c->opaque); 5073 } 5074 5075 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 5076 struct module *module) 5077 { 5078 struct kvm_cpu_compat_check c; 5079 int r; 5080 int cpu; 5081 5082 r = kvm_arch_init(opaque); 5083 if (r) 5084 goto out_fail; 5085 5086 /* 5087 * kvm_arch_init makes sure there's at most one caller 5088 * for architectures that support multiple implementations, 5089 * like intel and amd on x86. 5090 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating 5091 * conflicts in case kvm is already setup for another implementation. 5092 */ 5093 r = kvm_irqfd_init(); 5094 if (r) 5095 goto out_irqfd; 5096 5097 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 5098 r = -ENOMEM; 5099 goto out_free_0; 5100 } 5101 5102 r = kvm_arch_hardware_setup(opaque); 5103 if (r < 0) 5104 goto out_free_1; 5105 5106 c.ret = &r; 5107 c.opaque = opaque; 5108 for_each_online_cpu(cpu) { 5109 smp_call_function_single(cpu, check_processor_compat, &c, 1); 5110 if (r < 0) 5111 goto out_free_2; 5112 } 5113 5114 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting", 5115 kvm_starting_cpu, kvm_dying_cpu); 5116 if (r) 5117 goto out_free_2; 5118 register_reboot_notifier(&kvm_reboot_notifier); 5119 5120 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 5121 if (!vcpu_align) 5122 vcpu_align = __alignof__(struct kvm_vcpu); 5123 kvm_vcpu_cache = 5124 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align, 5125 SLAB_ACCOUNT, 5126 offsetof(struct kvm_vcpu, arch), 5127 sizeof_field(struct kvm_vcpu, arch), 5128 NULL); 5129 if (!kvm_vcpu_cache) { 5130 r = -ENOMEM; 5131 goto out_free_3; 5132 } 5133 5134 r = kvm_async_pf_init(); 5135 if (r) 5136 goto out_free; 5137 5138 kvm_chardev_ops.owner = module; 5139 kvm_vm_fops.owner = module; 5140 kvm_vcpu_fops.owner = module; 5141 5142 r = misc_register(&kvm_dev); 5143 if (r) { 5144 pr_err("kvm: misc device register failed\n"); 5145 goto out_unreg; 5146 } 5147 5148 register_syscore_ops(&kvm_syscore_ops); 5149 5150 kvm_preempt_ops.sched_in = kvm_sched_in; 5151 kvm_preempt_ops.sched_out = kvm_sched_out; 5152 5153 kvm_init_debug(); 5154 5155 r = kvm_vfio_ops_init(); 5156 WARN_ON(r); 5157 5158 return 0; 5159 5160 out_unreg: 5161 kvm_async_pf_deinit(); 5162 out_free: 5163 kmem_cache_destroy(kvm_vcpu_cache); 5164 out_free_3: 5165 unregister_reboot_notifier(&kvm_reboot_notifier); 5166 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); 5167 out_free_2: 5168 kvm_arch_hardware_unsetup(); 5169 out_free_1: 5170 free_cpumask_var(cpus_hardware_enabled); 5171 out_free_0: 5172 kvm_irqfd_exit(); 5173 out_irqfd: 5174 kvm_arch_exit(); 5175 out_fail: 5176 return r; 5177 } 5178 EXPORT_SYMBOL_GPL(kvm_init); 5179 5180 void kvm_exit(void) 5181 { 5182 debugfs_remove_recursive(kvm_debugfs_dir); 5183 misc_deregister(&kvm_dev); 5184 kmem_cache_destroy(kvm_vcpu_cache); 5185 kvm_async_pf_deinit(); 5186 unregister_syscore_ops(&kvm_syscore_ops); 5187 unregister_reboot_notifier(&kvm_reboot_notifier); 5188 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); 5189 on_each_cpu(hardware_disable_nolock, NULL, 1); 5190 kvm_arch_hardware_unsetup(); 5191 kvm_arch_exit(); 5192 kvm_irqfd_exit(); 5193 free_cpumask_var(cpus_hardware_enabled); 5194 kvm_vfio_ops_exit(); 5195 } 5196 EXPORT_SYMBOL_GPL(kvm_exit); 5197 5198 struct kvm_vm_worker_thread_context { 5199 struct kvm *kvm; 5200 struct task_struct *parent; 5201 struct completion init_done; 5202 kvm_vm_thread_fn_t thread_fn; 5203 uintptr_t data; 5204 int err; 5205 }; 5206 5207 static int kvm_vm_worker_thread(void *context) 5208 { 5209 /* 5210 * The init_context is allocated on the stack of the parent thread, so 5211 * we have to locally copy anything that is needed beyond initialization 5212 */ 5213 struct kvm_vm_worker_thread_context *init_context = context; 5214 struct kvm *kvm = init_context->kvm; 5215 kvm_vm_thread_fn_t thread_fn = init_context->thread_fn; 5216 uintptr_t data = init_context->data; 5217 int err; 5218 5219 err = kthread_park(current); 5220 /* kthread_park(current) is never supposed to return an error */ 5221 WARN_ON(err != 0); 5222 if (err) 5223 goto init_complete; 5224 5225 err = cgroup_attach_task_all(init_context->parent, current); 5226 if (err) { 5227 kvm_err("%s: cgroup_attach_task_all failed with err %d\n", 5228 __func__, err); 5229 goto init_complete; 5230 } 5231 5232 set_user_nice(current, task_nice(init_context->parent)); 5233 5234 init_complete: 5235 init_context->err = err; 5236 complete(&init_context->init_done); 5237 init_context = NULL; 5238 5239 if (err) 5240 return err; 5241 5242 /* Wait to be woken up by the spawner before proceeding. */ 5243 kthread_parkme(); 5244 5245 if (!kthread_should_stop()) 5246 err = thread_fn(kvm, data); 5247 5248 return err; 5249 } 5250 5251 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, 5252 uintptr_t data, const char *name, 5253 struct task_struct **thread_ptr) 5254 { 5255 struct kvm_vm_worker_thread_context init_context = {}; 5256 struct task_struct *thread; 5257 5258 *thread_ptr = NULL; 5259 init_context.kvm = kvm; 5260 init_context.parent = current; 5261 init_context.thread_fn = thread_fn; 5262 init_context.data = data; 5263 init_completion(&init_context.init_done); 5264 5265 thread = kthread_run(kvm_vm_worker_thread, &init_context, 5266 "%s-%d", name, task_pid_nr(current)); 5267 if (IS_ERR(thread)) 5268 return PTR_ERR(thread); 5269 5270 /* kthread_run is never supposed to return NULL */ 5271 WARN_ON(thread == NULL); 5272 5273 wait_for_completion(&init_context.init_done); 5274 5275 if (!init_context.err) 5276 *thread_ptr = thread; 5277 5278 return init_context.err; 5279 } 5280