1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * This module enables machines with Intel VT-x extensions to run virtual 6 * machines without emulation or binary translation. 7 * 8 * Copyright (C) 2006 Qumranet, Inc. 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 10 * 11 * Authors: 12 * Avi Kivity <avi@qumranet.com> 13 * Yaniv Kamay <yaniv@qumranet.com> 14 */ 15 16 #include <kvm/iodev.h> 17 18 #include <linux/kvm_host.h> 19 #include <linux/kvm.h> 20 #include <linux/module.h> 21 #include <linux/errno.h> 22 #include <linux/percpu.h> 23 #include <linux/mm.h> 24 #include <linux/miscdevice.h> 25 #include <linux/vmalloc.h> 26 #include <linux/reboot.h> 27 #include <linux/debugfs.h> 28 #include <linux/highmem.h> 29 #include <linux/file.h> 30 #include <linux/syscore_ops.h> 31 #include <linux/cpu.h> 32 #include <linux/sched/signal.h> 33 #include <linux/sched/mm.h> 34 #include <linux/sched/stat.h> 35 #include <linux/cpumask.h> 36 #include <linux/smp.h> 37 #include <linux/anon_inodes.h> 38 #include <linux/profile.h> 39 #include <linux/kvm_para.h> 40 #include <linux/pagemap.h> 41 #include <linux/mman.h> 42 #include <linux/swap.h> 43 #include <linux/bitops.h> 44 #include <linux/spinlock.h> 45 #include <linux/compat.h> 46 #include <linux/srcu.h> 47 #include <linux/hugetlb.h> 48 #include <linux/slab.h> 49 #include <linux/sort.h> 50 #include <linux/bsearch.h> 51 #include <linux/io.h> 52 #include <linux/lockdep.h> 53 #include <linux/kthread.h> 54 #include <linux/suspend.h> 55 56 #include <asm/processor.h> 57 #include <asm/ioctl.h> 58 #include <linux/uaccess.h> 59 60 #include "coalesced_mmio.h" 61 #include "async_pf.h" 62 #include "mmu_lock.h" 63 #include "vfio.h" 64 65 #define CREATE_TRACE_POINTS 66 #include <trace/events/kvm.h> 67 68 #include <linux/kvm_dirty_ring.h> 69 70 /* Worst case buffer size needed for holding an integer. */ 71 #define ITOA_MAX_LEN 12 72 73 MODULE_AUTHOR("Qumranet"); 74 MODULE_LICENSE("GPL"); 75 76 /* Architectures should define their poll value according to the halt latency */ 77 unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; 78 module_param(halt_poll_ns, uint, 0644); 79 EXPORT_SYMBOL_GPL(halt_poll_ns); 80 81 /* Default doubles per-vcpu halt_poll_ns. */ 82 unsigned int halt_poll_ns_grow = 2; 83 module_param(halt_poll_ns_grow, uint, 0644); 84 EXPORT_SYMBOL_GPL(halt_poll_ns_grow); 85 86 /* The start value to grow halt_poll_ns from */ 87 unsigned int halt_poll_ns_grow_start = 10000; /* 10us */ 88 module_param(halt_poll_ns_grow_start, uint, 0644); 89 EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start); 90 91 /* Default resets per-vcpu halt_poll_ns . */ 92 unsigned int halt_poll_ns_shrink; 93 module_param(halt_poll_ns_shrink, uint, 0644); 94 EXPORT_SYMBOL_GPL(halt_poll_ns_shrink); 95 96 /* 97 * Ordering of locks: 98 * 99 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 100 */ 101 102 DEFINE_MUTEX(kvm_lock); 103 static DEFINE_RAW_SPINLOCK(kvm_count_lock); 104 LIST_HEAD(vm_list); 105 106 static cpumask_var_t cpus_hardware_enabled; 107 static int kvm_usage_count; 108 static atomic_t hardware_enable_failed; 109 110 static struct kmem_cache *kvm_vcpu_cache; 111 112 static __read_mostly struct preempt_ops kvm_preempt_ops; 113 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu); 114 115 struct dentry *kvm_debugfs_dir; 116 EXPORT_SYMBOL_GPL(kvm_debugfs_dir); 117 118 static const struct file_operations stat_fops_per_vm; 119 120 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 121 unsigned long arg); 122 #ifdef CONFIG_KVM_COMPAT 123 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, 124 unsigned long arg); 125 #define KVM_COMPAT(c) .compat_ioctl = (c) 126 #else 127 /* 128 * For architectures that don't implement a compat infrastructure, 129 * adopt a double line of defense: 130 * - Prevent a compat task from opening /dev/kvm 131 * - If the open has been done by a 64bit task, and the KVM fd 132 * passed to a compat task, let the ioctls fail. 133 */ 134 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl, 135 unsigned long arg) { return -EINVAL; } 136 137 static int kvm_no_compat_open(struct inode *inode, struct file *file) 138 { 139 return is_compat_task() ? -ENODEV : 0; 140 } 141 #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \ 142 .open = kvm_no_compat_open 143 #endif 144 static int hardware_enable_all(void); 145 static void hardware_disable_all(void); 146 147 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 148 149 __visible bool kvm_rebooting; 150 EXPORT_SYMBOL_GPL(kvm_rebooting); 151 152 #define KVM_EVENT_CREATE_VM 0 153 #define KVM_EVENT_DESTROY_VM 1 154 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); 155 static unsigned long long kvm_createvm_count; 156 static unsigned long long kvm_active_vms; 157 158 static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask); 159 160 __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, 161 unsigned long start, unsigned long end) 162 { 163 } 164 165 bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) 166 { 167 /* 168 * The metadata used by is_zone_device_page() to determine whether or 169 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if 170 * the device has been pinned, e.g. by get_user_pages(). WARN if the 171 * page_count() is zero to help detect bad usage of this helper. 172 */ 173 if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn)))) 174 return false; 175 176 return is_zone_device_page(pfn_to_page(pfn)); 177 } 178 179 bool kvm_is_reserved_pfn(kvm_pfn_t pfn) 180 { 181 /* 182 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting 183 * perspective they are "normal" pages, albeit with slightly different 184 * usage rules. 185 */ 186 if (pfn_valid(pfn)) 187 return PageReserved(pfn_to_page(pfn)) && 188 !is_zero_pfn(pfn) && 189 !kvm_is_zone_device_pfn(pfn); 190 191 return true; 192 } 193 194 /* 195 * Switches to specified vcpu, until a matching vcpu_put() 196 */ 197 void vcpu_load(struct kvm_vcpu *vcpu) 198 { 199 int cpu = get_cpu(); 200 201 __this_cpu_write(kvm_running_vcpu, vcpu); 202 preempt_notifier_register(&vcpu->preempt_notifier); 203 kvm_arch_vcpu_load(vcpu, cpu); 204 put_cpu(); 205 } 206 EXPORT_SYMBOL_GPL(vcpu_load); 207 208 void vcpu_put(struct kvm_vcpu *vcpu) 209 { 210 preempt_disable(); 211 kvm_arch_vcpu_put(vcpu); 212 preempt_notifier_unregister(&vcpu->preempt_notifier); 213 __this_cpu_write(kvm_running_vcpu, NULL); 214 preempt_enable(); 215 } 216 EXPORT_SYMBOL_GPL(vcpu_put); 217 218 /* TODO: merge with kvm_arch_vcpu_should_kick */ 219 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req) 220 { 221 int mode = kvm_vcpu_exiting_guest_mode(vcpu); 222 223 /* 224 * We need to wait for the VCPU to reenable interrupts and get out of 225 * READING_SHADOW_PAGE_TABLES mode. 226 */ 227 if (req & KVM_REQUEST_WAIT) 228 return mode != OUTSIDE_GUEST_MODE; 229 230 /* 231 * Need to kick a running VCPU, but otherwise there is nothing to do. 232 */ 233 return mode == IN_GUEST_MODE; 234 } 235 236 static void ack_flush(void *_completed) 237 { 238 } 239 240 static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait) 241 { 242 if (cpumask_empty(cpus)) 243 return false; 244 245 smp_call_function_many(cpus, ack_flush, NULL, wait); 246 return true; 247 } 248 249 static void kvm_make_vcpu_request(struct kvm *kvm, struct kvm_vcpu *vcpu, 250 unsigned int req, struct cpumask *tmp, 251 int current_cpu) 252 { 253 int cpu; 254 255 kvm_make_request(req, vcpu); 256 257 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) 258 return; 259 260 /* 261 * Note, the vCPU could get migrated to a different pCPU at any point 262 * after kvm_request_needs_ipi(), which could result in sending an IPI 263 * to the previous pCPU. But, that's OK because the purpose of the IPI 264 * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is 265 * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES 266 * after this point is also OK, as the requirement is only that KVM wait 267 * for vCPUs that were reading SPTEs _before_ any changes were 268 * finalized. See kvm_vcpu_kick() for more details on handling requests. 269 */ 270 if (kvm_request_needs_ipi(vcpu, req)) { 271 cpu = READ_ONCE(vcpu->cpu); 272 if (cpu != -1 && cpu != current_cpu) 273 __cpumask_set_cpu(cpu, tmp); 274 } 275 } 276 277 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, 278 unsigned long *vcpu_bitmap) 279 { 280 struct kvm_vcpu *vcpu; 281 struct cpumask *cpus; 282 int i, me; 283 bool called; 284 285 me = get_cpu(); 286 287 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); 288 cpumask_clear(cpus); 289 290 for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) { 291 vcpu = kvm_get_vcpu(kvm, i); 292 if (!vcpu) 293 continue; 294 kvm_make_vcpu_request(kvm, vcpu, req, cpus, me); 295 } 296 297 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); 298 put_cpu(); 299 300 return called; 301 } 302 303 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, 304 struct kvm_vcpu *except) 305 { 306 struct kvm_vcpu *vcpu; 307 struct cpumask *cpus; 308 bool called; 309 int i, me; 310 311 me = get_cpu(); 312 313 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); 314 cpumask_clear(cpus); 315 316 kvm_for_each_vcpu(i, vcpu, kvm) { 317 if (vcpu == except) 318 continue; 319 kvm_make_vcpu_request(kvm, vcpu, req, cpus, me); 320 } 321 322 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); 323 put_cpu(); 324 325 return called; 326 } 327 328 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) 329 { 330 return kvm_make_all_cpus_request_except(kvm, req, NULL); 331 } 332 EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request); 333 334 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL 335 void kvm_flush_remote_tlbs(struct kvm *kvm) 336 { 337 ++kvm->stat.generic.remote_tlb_flush_requests; 338 339 /* 340 * We want to publish modifications to the page tables before reading 341 * mode. Pairs with a memory barrier in arch-specific code. 342 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest 343 * and smp_mb in walk_shadow_page_lockless_begin/end. 344 * - powerpc: smp_mb in kvmppc_prepare_to_enter. 345 * 346 * There is already an smp_mb__after_atomic() before 347 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that 348 * barrier here. 349 */ 350 if (!kvm_arch_flush_remote_tlb(kvm) 351 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 352 ++kvm->stat.generic.remote_tlb_flush; 353 } 354 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); 355 #endif 356 357 void kvm_reload_remote_mmus(struct kvm *kvm) 358 { 359 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); 360 } 361 362 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 363 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, 364 gfp_t gfp_flags) 365 { 366 gfp_flags |= mc->gfp_zero; 367 368 if (mc->kmem_cache) 369 return kmem_cache_alloc(mc->kmem_cache, gfp_flags); 370 else 371 return (void *)__get_free_page(gfp_flags); 372 } 373 374 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min) 375 { 376 void *obj; 377 378 if (mc->nobjs >= min) 379 return 0; 380 while (mc->nobjs < ARRAY_SIZE(mc->objects)) { 381 obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT); 382 if (!obj) 383 return mc->nobjs >= min ? 0 : -ENOMEM; 384 mc->objects[mc->nobjs++] = obj; 385 } 386 return 0; 387 } 388 389 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc) 390 { 391 return mc->nobjs; 392 } 393 394 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) 395 { 396 while (mc->nobjs) { 397 if (mc->kmem_cache) 398 kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]); 399 else 400 free_page((unsigned long)mc->objects[--mc->nobjs]); 401 } 402 } 403 404 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) 405 { 406 void *p; 407 408 if (WARN_ON(!mc->nobjs)) 409 p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT); 410 else 411 p = mc->objects[--mc->nobjs]; 412 BUG_ON(!p); 413 return p; 414 } 415 #endif 416 417 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 418 { 419 mutex_init(&vcpu->mutex); 420 vcpu->cpu = -1; 421 vcpu->kvm = kvm; 422 vcpu->vcpu_id = id; 423 vcpu->pid = NULL; 424 rcuwait_init(&vcpu->wait); 425 kvm_async_pf_vcpu_init(vcpu); 426 427 vcpu->pre_pcpu = -1; 428 INIT_LIST_HEAD(&vcpu->blocked_vcpu_list); 429 430 kvm_vcpu_set_in_spin_loop(vcpu, false); 431 kvm_vcpu_set_dy_eligible(vcpu, false); 432 vcpu->preempted = false; 433 vcpu->ready = false; 434 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 435 vcpu->last_used_slot = 0; 436 } 437 438 void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) 439 { 440 kvm_dirty_ring_free(&vcpu->dirty_ring); 441 kvm_arch_vcpu_destroy(vcpu); 442 443 /* 444 * No need for rcu_read_lock as VCPU_RUN is the only place that changes 445 * the vcpu->pid pointer, and at destruction time all file descriptors 446 * are already gone. 447 */ 448 put_pid(rcu_dereference_protected(vcpu->pid, 1)); 449 450 free_page((unsigned long)vcpu->run); 451 kmem_cache_free(kvm_vcpu_cache, vcpu); 452 } 453 EXPORT_SYMBOL_GPL(kvm_vcpu_destroy); 454 455 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 456 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 457 { 458 return container_of(mn, struct kvm, mmu_notifier); 459 } 460 461 static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn, 462 struct mm_struct *mm, 463 unsigned long start, unsigned long end) 464 { 465 struct kvm *kvm = mmu_notifier_to_kvm(mn); 466 int idx; 467 468 idx = srcu_read_lock(&kvm->srcu); 469 kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); 470 srcu_read_unlock(&kvm->srcu, idx); 471 } 472 473 typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range); 474 475 typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start, 476 unsigned long end); 477 478 struct kvm_hva_range { 479 unsigned long start; 480 unsigned long end; 481 pte_t pte; 482 hva_handler_t handler; 483 on_lock_fn_t on_lock; 484 bool flush_on_ret; 485 bool may_block; 486 }; 487 488 /* 489 * Use a dedicated stub instead of NULL to indicate that there is no callback 490 * function/handler. The compiler technically can't guarantee that a real 491 * function will have a non-zero address, and so it will generate code to 492 * check for !NULL, whereas comparing against a stub will be elided at compile 493 * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9). 494 */ 495 static void kvm_null_fn(void) 496 { 497 498 } 499 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn) 500 501 static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, 502 const struct kvm_hva_range *range) 503 { 504 bool ret = false, locked = false; 505 struct kvm_gfn_range gfn_range; 506 struct kvm_memory_slot *slot; 507 struct kvm_memslots *slots; 508 int i, idx; 509 510 /* A null handler is allowed if and only if on_lock() is provided. */ 511 if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) && 512 IS_KVM_NULL_FN(range->handler))) 513 return 0; 514 515 idx = srcu_read_lock(&kvm->srcu); 516 517 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 518 slots = __kvm_memslots(kvm, i); 519 kvm_for_each_memslot(slot, slots) { 520 unsigned long hva_start, hva_end; 521 522 hva_start = max(range->start, slot->userspace_addr); 523 hva_end = min(range->end, slot->userspace_addr + 524 (slot->npages << PAGE_SHIFT)); 525 if (hva_start >= hva_end) 526 continue; 527 528 /* 529 * To optimize for the likely case where the address 530 * range is covered by zero or one memslots, don't 531 * bother making these conditional (to avoid writes on 532 * the second or later invocation of the handler). 533 */ 534 gfn_range.pte = range->pte; 535 gfn_range.may_block = range->may_block; 536 537 /* 538 * {gfn(page) | page intersects with [hva_start, hva_end)} = 539 * {gfn_start, gfn_start+1, ..., gfn_end-1}. 540 */ 541 gfn_range.start = hva_to_gfn_memslot(hva_start, slot); 542 gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot); 543 gfn_range.slot = slot; 544 545 if (!locked) { 546 locked = true; 547 KVM_MMU_LOCK(kvm); 548 if (!IS_KVM_NULL_FN(range->on_lock)) 549 range->on_lock(kvm, range->start, range->end); 550 if (IS_KVM_NULL_FN(range->handler)) 551 break; 552 } 553 ret |= range->handler(kvm, &gfn_range); 554 } 555 } 556 557 if (range->flush_on_ret && ret) 558 kvm_flush_remote_tlbs(kvm); 559 560 if (locked) 561 KVM_MMU_UNLOCK(kvm); 562 563 srcu_read_unlock(&kvm->srcu, idx); 564 565 /* The notifiers are averse to booleans. :-( */ 566 return (int)ret; 567 } 568 569 static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn, 570 unsigned long start, 571 unsigned long end, 572 pte_t pte, 573 hva_handler_t handler) 574 { 575 struct kvm *kvm = mmu_notifier_to_kvm(mn); 576 const struct kvm_hva_range range = { 577 .start = start, 578 .end = end, 579 .pte = pte, 580 .handler = handler, 581 .on_lock = (void *)kvm_null_fn, 582 .flush_on_ret = true, 583 .may_block = false, 584 }; 585 586 return __kvm_handle_hva_range(kvm, &range); 587 } 588 589 static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn, 590 unsigned long start, 591 unsigned long end, 592 hva_handler_t handler) 593 { 594 struct kvm *kvm = mmu_notifier_to_kvm(mn); 595 const struct kvm_hva_range range = { 596 .start = start, 597 .end = end, 598 .pte = __pte(0), 599 .handler = handler, 600 .on_lock = (void *)kvm_null_fn, 601 .flush_on_ret = false, 602 .may_block = false, 603 }; 604 605 return __kvm_handle_hva_range(kvm, &range); 606 } 607 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 608 struct mm_struct *mm, 609 unsigned long address, 610 pte_t pte) 611 { 612 struct kvm *kvm = mmu_notifier_to_kvm(mn); 613 614 trace_kvm_set_spte_hva(address); 615 616 /* 617 * .change_pte() must be surrounded by .invalidate_range_{start,end}(). 618 * If mmu_notifier_count is zero, then no in-progress invalidations, 619 * including this one, found a relevant memslot at start(); rechecking 620 * memslots here is unnecessary. Note, a false positive (count elevated 621 * by a different invalidation) is sub-optimal but functionally ok. 622 */ 623 WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count)); 624 if (!READ_ONCE(kvm->mmu_notifier_count)) 625 return; 626 627 kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn); 628 } 629 630 void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start, 631 unsigned long end) 632 { 633 /* 634 * The count increase must become visible at unlock time as no 635 * spte can be established without taking the mmu_lock and 636 * count is also read inside the mmu_lock critical section. 637 */ 638 kvm->mmu_notifier_count++; 639 if (likely(kvm->mmu_notifier_count == 1)) { 640 kvm->mmu_notifier_range_start = start; 641 kvm->mmu_notifier_range_end = end; 642 } else { 643 /* 644 * Fully tracking multiple concurrent ranges has dimishing 645 * returns. Keep things simple and just find the minimal range 646 * which includes the current and new ranges. As there won't be 647 * enough information to subtract a range after its invalidate 648 * completes, any ranges invalidated concurrently will 649 * accumulate and persist until all outstanding invalidates 650 * complete. 651 */ 652 kvm->mmu_notifier_range_start = 653 min(kvm->mmu_notifier_range_start, start); 654 kvm->mmu_notifier_range_end = 655 max(kvm->mmu_notifier_range_end, end); 656 } 657 } 658 659 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 660 const struct mmu_notifier_range *range) 661 { 662 struct kvm *kvm = mmu_notifier_to_kvm(mn); 663 const struct kvm_hva_range hva_range = { 664 .start = range->start, 665 .end = range->end, 666 .pte = __pte(0), 667 .handler = kvm_unmap_gfn_range, 668 .on_lock = kvm_inc_notifier_count, 669 .flush_on_ret = true, 670 .may_block = mmu_notifier_range_blockable(range), 671 }; 672 673 trace_kvm_unmap_hva_range(range->start, range->end); 674 675 /* 676 * Prevent memslot modification between range_start() and range_end() 677 * so that conditionally locking provides the same result in both 678 * functions. Without that guarantee, the mmu_notifier_count 679 * adjustments will be imbalanced. 680 * 681 * Pairs with the decrement in range_end(). 682 */ 683 spin_lock(&kvm->mn_invalidate_lock); 684 kvm->mn_active_invalidate_count++; 685 spin_unlock(&kvm->mn_invalidate_lock); 686 687 __kvm_handle_hva_range(kvm, &hva_range); 688 689 return 0; 690 } 691 692 void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start, 693 unsigned long end) 694 { 695 /* 696 * This sequence increase will notify the kvm page fault that 697 * the page that is going to be mapped in the spte could have 698 * been freed. 699 */ 700 kvm->mmu_notifier_seq++; 701 smp_wmb(); 702 /* 703 * The above sequence increase must be visible before the 704 * below count decrease, which is ensured by the smp_wmb above 705 * in conjunction with the smp_rmb in mmu_notifier_retry(). 706 */ 707 kvm->mmu_notifier_count--; 708 } 709 710 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 711 const struct mmu_notifier_range *range) 712 { 713 struct kvm *kvm = mmu_notifier_to_kvm(mn); 714 const struct kvm_hva_range hva_range = { 715 .start = range->start, 716 .end = range->end, 717 .pte = __pte(0), 718 .handler = (void *)kvm_null_fn, 719 .on_lock = kvm_dec_notifier_count, 720 .flush_on_ret = false, 721 .may_block = mmu_notifier_range_blockable(range), 722 }; 723 bool wake; 724 725 __kvm_handle_hva_range(kvm, &hva_range); 726 727 /* Pairs with the increment in range_start(). */ 728 spin_lock(&kvm->mn_invalidate_lock); 729 wake = (--kvm->mn_active_invalidate_count == 0); 730 spin_unlock(&kvm->mn_invalidate_lock); 731 732 /* 733 * There can only be one waiter, since the wait happens under 734 * slots_lock. 735 */ 736 if (wake) 737 rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait); 738 739 BUG_ON(kvm->mmu_notifier_count < 0); 740 } 741 742 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 743 struct mm_struct *mm, 744 unsigned long start, 745 unsigned long end) 746 { 747 trace_kvm_age_hva(start, end); 748 749 return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn); 750 } 751 752 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, 753 struct mm_struct *mm, 754 unsigned long start, 755 unsigned long end) 756 { 757 trace_kvm_age_hva(start, end); 758 759 /* 760 * Even though we do not flush TLB, this will still adversely 761 * affect performance on pre-Haswell Intel EPT, where there is 762 * no EPT Access Bit to clear so that we have to tear down EPT 763 * tables instead. If we find this unacceptable, we can always 764 * add a parameter to kvm_age_hva so that it effectively doesn't 765 * do anything on clear_young. 766 * 767 * Also note that currently we never issue secondary TLB flushes 768 * from clear_young, leaving this job up to the regular system 769 * cadence. If we find this inaccurate, we might come up with a 770 * more sophisticated heuristic later. 771 */ 772 return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn); 773 } 774 775 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, 776 struct mm_struct *mm, 777 unsigned long address) 778 { 779 trace_kvm_test_age_hva(address); 780 781 return kvm_handle_hva_range_no_flush(mn, address, address + 1, 782 kvm_test_age_gfn); 783 } 784 785 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 786 struct mm_struct *mm) 787 { 788 struct kvm *kvm = mmu_notifier_to_kvm(mn); 789 int idx; 790 791 idx = srcu_read_lock(&kvm->srcu); 792 kvm_arch_flush_shadow_all(kvm); 793 srcu_read_unlock(&kvm->srcu, idx); 794 } 795 796 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 797 .invalidate_range = kvm_mmu_notifier_invalidate_range, 798 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 799 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 800 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 801 .clear_young = kvm_mmu_notifier_clear_young, 802 .test_young = kvm_mmu_notifier_test_young, 803 .change_pte = kvm_mmu_notifier_change_pte, 804 .release = kvm_mmu_notifier_release, 805 }; 806 807 static int kvm_init_mmu_notifier(struct kvm *kvm) 808 { 809 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 810 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 811 } 812 813 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 814 815 static int kvm_init_mmu_notifier(struct kvm *kvm) 816 { 817 return 0; 818 } 819 820 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 821 822 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 823 static int kvm_pm_notifier_call(struct notifier_block *bl, 824 unsigned long state, 825 void *unused) 826 { 827 struct kvm *kvm = container_of(bl, struct kvm, pm_notifier); 828 829 return kvm_arch_pm_notifier(kvm, state); 830 } 831 832 static void kvm_init_pm_notifier(struct kvm *kvm) 833 { 834 kvm->pm_notifier.notifier_call = kvm_pm_notifier_call; 835 /* Suspend KVM before we suspend ftrace, RCU, etc. */ 836 kvm->pm_notifier.priority = INT_MAX; 837 register_pm_notifier(&kvm->pm_notifier); 838 } 839 840 static void kvm_destroy_pm_notifier(struct kvm *kvm) 841 { 842 unregister_pm_notifier(&kvm->pm_notifier); 843 } 844 #else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */ 845 static void kvm_init_pm_notifier(struct kvm *kvm) 846 { 847 } 848 849 static void kvm_destroy_pm_notifier(struct kvm *kvm) 850 { 851 } 852 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */ 853 854 static struct kvm_memslots *kvm_alloc_memslots(void) 855 { 856 int i; 857 struct kvm_memslots *slots; 858 859 slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT); 860 if (!slots) 861 return NULL; 862 863 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) 864 slots->id_to_index[i] = -1; 865 866 return slots; 867 } 868 869 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) 870 { 871 if (!memslot->dirty_bitmap) 872 return; 873 874 kvfree(memslot->dirty_bitmap); 875 memslot->dirty_bitmap = NULL; 876 } 877 878 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 879 { 880 kvm_destroy_dirty_bitmap(slot); 881 882 kvm_arch_free_memslot(kvm, slot); 883 884 slot->flags = 0; 885 slot->npages = 0; 886 } 887 888 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) 889 { 890 struct kvm_memory_slot *memslot; 891 892 if (!slots) 893 return; 894 895 kvm_for_each_memslot(memslot, slots) 896 kvm_free_memslot(kvm, memslot); 897 898 kvfree(slots); 899 } 900 901 static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc) 902 { 903 switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) { 904 case KVM_STATS_TYPE_INSTANT: 905 return 0444; 906 case KVM_STATS_TYPE_CUMULATIVE: 907 case KVM_STATS_TYPE_PEAK: 908 default: 909 return 0644; 910 } 911 } 912 913 914 static void kvm_destroy_vm_debugfs(struct kvm *kvm) 915 { 916 int i; 917 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + 918 kvm_vcpu_stats_header.num_desc; 919 920 if (!kvm->debugfs_dentry) 921 return; 922 923 debugfs_remove_recursive(kvm->debugfs_dentry); 924 925 if (kvm->debugfs_stat_data) { 926 for (i = 0; i < kvm_debugfs_num_entries; i++) 927 kfree(kvm->debugfs_stat_data[i]); 928 kfree(kvm->debugfs_stat_data); 929 } 930 } 931 932 static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) 933 { 934 static DEFINE_MUTEX(kvm_debugfs_lock); 935 struct dentry *dent; 936 char dir_name[ITOA_MAX_LEN * 2]; 937 struct kvm_stat_data *stat_data; 938 const struct _kvm_stats_desc *pdesc; 939 int i, ret; 940 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + 941 kvm_vcpu_stats_header.num_desc; 942 943 if (!debugfs_initialized()) 944 return 0; 945 946 snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd); 947 mutex_lock(&kvm_debugfs_lock); 948 dent = debugfs_lookup(dir_name, kvm_debugfs_dir); 949 if (dent) { 950 pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name); 951 dput(dent); 952 mutex_unlock(&kvm_debugfs_lock); 953 return 0; 954 } 955 dent = debugfs_create_dir(dir_name, kvm_debugfs_dir); 956 mutex_unlock(&kvm_debugfs_lock); 957 if (IS_ERR(dent)) 958 return 0; 959 960 kvm->debugfs_dentry = dent; 961 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, 962 sizeof(*kvm->debugfs_stat_data), 963 GFP_KERNEL_ACCOUNT); 964 if (!kvm->debugfs_stat_data) 965 return -ENOMEM; 966 967 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) { 968 pdesc = &kvm_vm_stats_desc[i]; 969 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); 970 if (!stat_data) 971 return -ENOMEM; 972 973 stat_data->kvm = kvm; 974 stat_data->desc = pdesc; 975 stat_data->kind = KVM_STAT_VM; 976 kvm->debugfs_stat_data[i] = stat_data; 977 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 978 kvm->debugfs_dentry, stat_data, 979 &stat_fops_per_vm); 980 } 981 982 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { 983 pdesc = &kvm_vcpu_stats_desc[i]; 984 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); 985 if (!stat_data) 986 return -ENOMEM; 987 988 stat_data->kvm = kvm; 989 stat_data->desc = pdesc; 990 stat_data->kind = KVM_STAT_VCPU; 991 kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data; 992 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 993 kvm->debugfs_dentry, stat_data, 994 &stat_fops_per_vm); 995 } 996 997 ret = kvm_arch_create_vm_debugfs(kvm); 998 if (ret) { 999 kvm_destroy_vm_debugfs(kvm); 1000 return i; 1001 } 1002 1003 return 0; 1004 } 1005 1006 /* 1007 * Called after the VM is otherwise initialized, but just before adding it to 1008 * the vm_list. 1009 */ 1010 int __weak kvm_arch_post_init_vm(struct kvm *kvm) 1011 { 1012 return 0; 1013 } 1014 1015 /* 1016 * Called just after removing the VM from the vm_list, but before doing any 1017 * other destruction. 1018 */ 1019 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm) 1020 { 1021 } 1022 1023 /* 1024 * Called after per-vm debugfs created. When called kvm->debugfs_dentry should 1025 * be setup already, so we can create arch-specific debugfs entries under it. 1026 * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so 1027 * a per-arch destroy interface is not needed. 1028 */ 1029 int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm) 1030 { 1031 return 0; 1032 } 1033 1034 static struct kvm *kvm_create_vm(unsigned long type) 1035 { 1036 struct kvm *kvm = kvm_arch_alloc_vm(); 1037 int r = -ENOMEM; 1038 int i; 1039 1040 if (!kvm) 1041 return ERR_PTR(-ENOMEM); 1042 1043 KVM_MMU_LOCK_INIT(kvm); 1044 mmgrab(current->mm); 1045 kvm->mm = current->mm; 1046 kvm_eventfd_init(kvm); 1047 mutex_init(&kvm->lock); 1048 mutex_init(&kvm->irq_lock); 1049 mutex_init(&kvm->slots_lock); 1050 mutex_init(&kvm->slots_arch_lock); 1051 spin_lock_init(&kvm->mn_invalidate_lock); 1052 rcuwait_init(&kvm->mn_memslots_update_rcuwait); 1053 1054 INIT_LIST_HEAD(&kvm->devices); 1055 1056 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 1057 1058 if (init_srcu_struct(&kvm->srcu)) 1059 goto out_err_no_srcu; 1060 if (init_srcu_struct(&kvm->irq_srcu)) 1061 goto out_err_no_irq_srcu; 1062 1063 refcount_set(&kvm->users_count, 1); 1064 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 1065 struct kvm_memslots *slots = kvm_alloc_memslots(); 1066 1067 if (!slots) 1068 goto out_err_no_arch_destroy_vm; 1069 /* Generations must be different for each address space. */ 1070 slots->generation = i; 1071 rcu_assign_pointer(kvm->memslots[i], slots); 1072 } 1073 1074 for (i = 0; i < KVM_NR_BUSES; i++) { 1075 rcu_assign_pointer(kvm->buses[i], 1076 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT)); 1077 if (!kvm->buses[i]) 1078 goto out_err_no_arch_destroy_vm; 1079 } 1080 1081 kvm->max_halt_poll_ns = halt_poll_ns; 1082 1083 r = kvm_arch_init_vm(kvm, type); 1084 if (r) 1085 goto out_err_no_arch_destroy_vm; 1086 1087 r = hardware_enable_all(); 1088 if (r) 1089 goto out_err_no_disable; 1090 1091 #ifdef CONFIG_HAVE_KVM_IRQFD 1092 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 1093 #endif 1094 1095 r = kvm_init_mmu_notifier(kvm); 1096 if (r) 1097 goto out_err_no_mmu_notifier; 1098 1099 r = kvm_arch_post_init_vm(kvm); 1100 if (r) 1101 goto out_err; 1102 1103 mutex_lock(&kvm_lock); 1104 list_add(&kvm->vm_list, &vm_list); 1105 mutex_unlock(&kvm_lock); 1106 1107 preempt_notifier_inc(); 1108 kvm_init_pm_notifier(kvm); 1109 1110 return kvm; 1111 1112 out_err: 1113 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 1114 if (kvm->mmu_notifier.ops) 1115 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); 1116 #endif 1117 out_err_no_mmu_notifier: 1118 hardware_disable_all(); 1119 out_err_no_disable: 1120 kvm_arch_destroy_vm(kvm); 1121 out_err_no_arch_destroy_vm: 1122 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); 1123 for (i = 0; i < KVM_NR_BUSES; i++) 1124 kfree(kvm_get_bus(kvm, i)); 1125 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 1126 kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); 1127 cleanup_srcu_struct(&kvm->irq_srcu); 1128 out_err_no_irq_srcu: 1129 cleanup_srcu_struct(&kvm->srcu); 1130 out_err_no_srcu: 1131 kvm_arch_free_vm(kvm); 1132 mmdrop(current->mm); 1133 return ERR_PTR(r); 1134 } 1135 1136 static void kvm_destroy_devices(struct kvm *kvm) 1137 { 1138 struct kvm_device *dev, *tmp; 1139 1140 /* 1141 * We do not need to take the kvm->lock here, because nobody else 1142 * has a reference to the struct kvm at this point and therefore 1143 * cannot access the devices list anyhow. 1144 */ 1145 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { 1146 list_del(&dev->vm_node); 1147 dev->ops->destroy(dev); 1148 } 1149 } 1150 1151 static void kvm_destroy_vm(struct kvm *kvm) 1152 { 1153 int i; 1154 struct mm_struct *mm = kvm->mm; 1155 1156 kvm_destroy_pm_notifier(kvm); 1157 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm); 1158 kvm_destroy_vm_debugfs(kvm); 1159 kvm_arch_sync_events(kvm); 1160 mutex_lock(&kvm_lock); 1161 list_del(&kvm->vm_list); 1162 mutex_unlock(&kvm_lock); 1163 kvm_arch_pre_destroy_vm(kvm); 1164 1165 kvm_free_irq_routing(kvm); 1166 for (i = 0; i < KVM_NR_BUSES; i++) { 1167 struct kvm_io_bus *bus = kvm_get_bus(kvm, i); 1168 1169 if (bus) 1170 kvm_io_bus_destroy(bus); 1171 kvm->buses[i] = NULL; 1172 } 1173 kvm_coalesced_mmio_free(kvm); 1174 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 1175 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 1176 /* 1177 * At this point, pending calls to invalidate_range_start() 1178 * have completed but no more MMU notifiers will run, so 1179 * mn_active_invalidate_count may remain unbalanced. 1180 * No threads can be waiting in install_new_memslots as the 1181 * last reference on KVM has been dropped, but freeing 1182 * memslots would deadlock without this manual intervention. 1183 */ 1184 WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait)); 1185 kvm->mn_active_invalidate_count = 0; 1186 #else 1187 kvm_arch_flush_shadow_all(kvm); 1188 #endif 1189 kvm_arch_destroy_vm(kvm); 1190 kvm_destroy_devices(kvm); 1191 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 1192 kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); 1193 cleanup_srcu_struct(&kvm->irq_srcu); 1194 cleanup_srcu_struct(&kvm->srcu); 1195 kvm_arch_free_vm(kvm); 1196 preempt_notifier_dec(); 1197 hardware_disable_all(); 1198 mmdrop(mm); 1199 } 1200 1201 void kvm_get_kvm(struct kvm *kvm) 1202 { 1203 refcount_inc(&kvm->users_count); 1204 } 1205 EXPORT_SYMBOL_GPL(kvm_get_kvm); 1206 1207 /* 1208 * Make sure the vm is not during destruction, which is a safe version of 1209 * kvm_get_kvm(). Return true if kvm referenced successfully, false otherwise. 1210 */ 1211 bool kvm_get_kvm_safe(struct kvm *kvm) 1212 { 1213 return refcount_inc_not_zero(&kvm->users_count); 1214 } 1215 EXPORT_SYMBOL_GPL(kvm_get_kvm_safe); 1216 1217 void kvm_put_kvm(struct kvm *kvm) 1218 { 1219 if (refcount_dec_and_test(&kvm->users_count)) 1220 kvm_destroy_vm(kvm); 1221 } 1222 EXPORT_SYMBOL_GPL(kvm_put_kvm); 1223 1224 /* 1225 * Used to put a reference that was taken on behalf of an object associated 1226 * with a user-visible file descriptor, e.g. a vcpu or device, if installation 1227 * of the new file descriptor fails and the reference cannot be transferred to 1228 * its final owner. In such cases, the caller is still actively using @kvm and 1229 * will fail miserably if the refcount unexpectedly hits zero. 1230 */ 1231 void kvm_put_kvm_no_destroy(struct kvm *kvm) 1232 { 1233 WARN_ON(refcount_dec_and_test(&kvm->users_count)); 1234 } 1235 EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy); 1236 1237 static int kvm_vm_release(struct inode *inode, struct file *filp) 1238 { 1239 struct kvm *kvm = filp->private_data; 1240 1241 kvm_irqfd_release(kvm); 1242 1243 kvm_put_kvm(kvm); 1244 return 0; 1245 } 1246 1247 /* 1248 * Allocation size is twice as large as the actual dirty bitmap size. 1249 * See kvm_vm_ioctl_get_dirty_log() why this is needed. 1250 */ 1251 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) 1252 { 1253 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); 1254 1255 memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL_ACCOUNT); 1256 if (!memslot->dirty_bitmap) 1257 return -ENOMEM; 1258 1259 return 0; 1260 } 1261 1262 /* 1263 * Delete a memslot by decrementing the number of used slots and shifting all 1264 * other entries in the array forward one spot. 1265 */ 1266 static inline void kvm_memslot_delete(struct kvm_memslots *slots, 1267 struct kvm_memory_slot *memslot) 1268 { 1269 struct kvm_memory_slot *mslots = slots->memslots; 1270 int i; 1271 1272 if (WARN_ON(slots->id_to_index[memslot->id] == -1)) 1273 return; 1274 1275 slots->used_slots--; 1276 1277 if (atomic_read(&slots->last_used_slot) >= slots->used_slots) 1278 atomic_set(&slots->last_used_slot, 0); 1279 1280 for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) { 1281 mslots[i] = mslots[i + 1]; 1282 slots->id_to_index[mslots[i].id] = i; 1283 } 1284 mslots[i] = *memslot; 1285 slots->id_to_index[memslot->id] = -1; 1286 } 1287 1288 /* 1289 * "Insert" a new memslot by incrementing the number of used slots. Returns 1290 * the new slot's initial index into the memslots array. 1291 */ 1292 static inline int kvm_memslot_insert_back(struct kvm_memslots *slots) 1293 { 1294 return slots->used_slots++; 1295 } 1296 1297 /* 1298 * Move a changed memslot backwards in the array by shifting existing slots 1299 * with a higher GFN toward the front of the array. Note, the changed memslot 1300 * itself is not preserved in the array, i.e. not swapped at this time, only 1301 * its new index into the array is tracked. Returns the changed memslot's 1302 * current index into the memslots array. 1303 */ 1304 static inline int kvm_memslot_move_backward(struct kvm_memslots *slots, 1305 struct kvm_memory_slot *memslot) 1306 { 1307 struct kvm_memory_slot *mslots = slots->memslots; 1308 int i; 1309 1310 if (WARN_ON_ONCE(slots->id_to_index[memslot->id] == -1) || 1311 WARN_ON_ONCE(!slots->used_slots)) 1312 return -1; 1313 1314 /* 1315 * Move the target memslot backward in the array by shifting existing 1316 * memslots with a higher GFN (than the target memslot) towards the 1317 * front of the array. 1318 */ 1319 for (i = slots->id_to_index[memslot->id]; i < slots->used_slots - 1; i++) { 1320 if (memslot->base_gfn > mslots[i + 1].base_gfn) 1321 break; 1322 1323 WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn); 1324 1325 /* Shift the next memslot forward one and update its index. */ 1326 mslots[i] = mslots[i + 1]; 1327 slots->id_to_index[mslots[i].id] = i; 1328 } 1329 return i; 1330 } 1331 1332 /* 1333 * Move a changed memslot forwards in the array by shifting existing slots with 1334 * a lower GFN toward the back of the array. Note, the changed memslot itself 1335 * is not preserved in the array, i.e. not swapped at this time, only its new 1336 * index into the array is tracked. Returns the changed memslot's final index 1337 * into the memslots array. 1338 */ 1339 static inline int kvm_memslot_move_forward(struct kvm_memslots *slots, 1340 struct kvm_memory_slot *memslot, 1341 int start) 1342 { 1343 struct kvm_memory_slot *mslots = slots->memslots; 1344 int i; 1345 1346 for (i = start; i > 0; i--) { 1347 if (memslot->base_gfn < mslots[i - 1].base_gfn) 1348 break; 1349 1350 WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn); 1351 1352 /* Shift the next memslot back one and update its index. */ 1353 mslots[i] = mslots[i - 1]; 1354 slots->id_to_index[mslots[i].id] = i; 1355 } 1356 return i; 1357 } 1358 1359 /* 1360 * Re-sort memslots based on their GFN to account for an added, deleted, or 1361 * moved memslot. Sorting memslots by GFN allows using a binary search during 1362 * memslot lookup. 1363 * 1364 * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! I.e. the entry 1365 * at memslots[0] has the highest GFN. 1366 * 1367 * The sorting algorithm takes advantage of having initially sorted memslots 1368 * and knowing the position of the changed memslot. Sorting is also optimized 1369 * by not swapping the updated memslot and instead only shifting other memslots 1370 * and tracking the new index for the update memslot. Only once its final 1371 * index is known is the updated memslot copied into its position in the array. 1372 * 1373 * - When deleting a memslot, the deleted memslot simply needs to be moved to 1374 * the end of the array. 1375 * 1376 * - When creating a memslot, the algorithm "inserts" the new memslot at the 1377 * end of the array and then it forward to its correct location. 1378 * 1379 * - When moving a memslot, the algorithm first moves the updated memslot 1380 * backward to handle the scenario where the memslot's GFN was changed to a 1381 * lower value. update_memslots() then falls through and runs the same flow 1382 * as creating a memslot to move the memslot forward to handle the scenario 1383 * where its GFN was changed to a higher value. 1384 * 1385 * Note, slots are sorted from highest->lowest instead of lowest->highest for 1386 * historical reasons. Originally, invalid memslots where denoted by having 1387 * GFN=0, thus sorting from highest->lowest naturally sorted invalid memslots 1388 * to the end of the array. The current algorithm uses dedicated logic to 1389 * delete a memslot and thus does not rely on invalid memslots having GFN=0. 1390 * 1391 * The other historical motiviation for highest->lowest was to improve the 1392 * performance of memslot lookup. KVM originally used a linear search starting 1393 * at memslots[0]. On x86, the largest memslot usually has one of the highest, 1394 * if not *the* highest, GFN, as the bulk of the guest's RAM is located in a 1395 * single memslot above the 4gb boundary. As the largest memslot is also the 1396 * most likely to be referenced, sorting it to the front of the array was 1397 * advantageous. The current binary search starts from the middle of the array 1398 * and uses an LRU pointer to improve performance for all memslots and GFNs. 1399 */ 1400 static void update_memslots(struct kvm_memslots *slots, 1401 struct kvm_memory_slot *memslot, 1402 enum kvm_mr_change change) 1403 { 1404 int i; 1405 1406 if (change == KVM_MR_DELETE) { 1407 kvm_memslot_delete(slots, memslot); 1408 } else { 1409 if (change == KVM_MR_CREATE) 1410 i = kvm_memslot_insert_back(slots); 1411 else 1412 i = kvm_memslot_move_backward(slots, memslot); 1413 i = kvm_memslot_move_forward(slots, memslot, i); 1414 1415 /* 1416 * Copy the memslot to its new position in memslots and update 1417 * its index accordingly. 1418 */ 1419 slots->memslots[i] = *memslot; 1420 slots->id_to_index[memslot->id] = i; 1421 } 1422 } 1423 1424 static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem) 1425 { 1426 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; 1427 1428 #ifdef __KVM_HAVE_READONLY_MEM 1429 valid_flags |= KVM_MEM_READONLY; 1430 #endif 1431 1432 if (mem->flags & ~valid_flags) 1433 return -EINVAL; 1434 1435 return 0; 1436 } 1437 1438 static struct kvm_memslots *install_new_memslots(struct kvm *kvm, 1439 int as_id, struct kvm_memslots *slots) 1440 { 1441 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); 1442 u64 gen = old_memslots->generation; 1443 1444 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); 1445 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; 1446 1447 /* 1448 * Do not store the new memslots while there are invalidations in 1449 * progress, otherwise the locking in invalidate_range_start and 1450 * invalidate_range_end will be unbalanced. 1451 */ 1452 spin_lock(&kvm->mn_invalidate_lock); 1453 prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait); 1454 while (kvm->mn_active_invalidate_count) { 1455 set_current_state(TASK_UNINTERRUPTIBLE); 1456 spin_unlock(&kvm->mn_invalidate_lock); 1457 schedule(); 1458 spin_lock(&kvm->mn_invalidate_lock); 1459 } 1460 finish_rcuwait(&kvm->mn_memslots_update_rcuwait); 1461 rcu_assign_pointer(kvm->memslots[as_id], slots); 1462 spin_unlock(&kvm->mn_invalidate_lock); 1463 1464 /* 1465 * Acquired in kvm_set_memslot. Must be released before synchronize 1466 * SRCU below in order to avoid deadlock with another thread 1467 * acquiring the slots_arch_lock in an srcu critical section. 1468 */ 1469 mutex_unlock(&kvm->slots_arch_lock); 1470 1471 synchronize_srcu_expedited(&kvm->srcu); 1472 1473 /* 1474 * Increment the new memslot generation a second time, dropping the 1475 * update in-progress flag and incrementing the generation based on 1476 * the number of address spaces. This provides a unique and easily 1477 * identifiable generation number while the memslots are in flux. 1478 */ 1479 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; 1480 1481 /* 1482 * Generations must be unique even across address spaces. We do not need 1483 * a global counter for that, instead the generation space is evenly split 1484 * across address spaces. For example, with two address spaces, address 1485 * space 0 will use generations 0, 2, 4, ... while address space 1 will 1486 * use generations 1, 3, 5, ... 1487 */ 1488 gen += KVM_ADDRESS_SPACE_NUM; 1489 1490 kvm_arch_memslots_updated(kvm, gen); 1491 1492 slots->generation = gen; 1493 1494 return old_memslots; 1495 } 1496 1497 static size_t kvm_memslots_size(int slots) 1498 { 1499 return sizeof(struct kvm_memslots) + 1500 (sizeof(struct kvm_memory_slot) * slots); 1501 } 1502 1503 static void kvm_copy_memslots(struct kvm_memslots *to, 1504 struct kvm_memslots *from) 1505 { 1506 memcpy(to, from, kvm_memslots_size(from->used_slots)); 1507 } 1508 1509 /* 1510 * Note, at a minimum, the current number of used slots must be allocated, even 1511 * when deleting a memslot, as we need a complete duplicate of the memslots for 1512 * use when invalidating a memslot prior to deleting/moving the memslot. 1513 */ 1514 static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old, 1515 enum kvm_mr_change change) 1516 { 1517 struct kvm_memslots *slots; 1518 size_t new_size; 1519 1520 if (change == KVM_MR_CREATE) 1521 new_size = kvm_memslots_size(old->used_slots + 1); 1522 else 1523 new_size = kvm_memslots_size(old->used_slots); 1524 1525 slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT); 1526 if (likely(slots)) 1527 kvm_copy_memslots(slots, old); 1528 1529 return slots; 1530 } 1531 1532 static int kvm_set_memslot(struct kvm *kvm, 1533 const struct kvm_userspace_memory_region *mem, 1534 struct kvm_memory_slot *old, 1535 struct kvm_memory_slot *new, int as_id, 1536 enum kvm_mr_change change) 1537 { 1538 struct kvm_memory_slot *slot; 1539 struct kvm_memslots *slots; 1540 int r; 1541 1542 /* 1543 * Released in install_new_memslots. 1544 * 1545 * Must be held from before the current memslots are copied until 1546 * after the new memslots are installed with rcu_assign_pointer, 1547 * then released before the synchronize srcu in install_new_memslots. 1548 * 1549 * When modifying memslots outside of the slots_lock, must be held 1550 * before reading the pointer to the current memslots until after all 1551 * changes to those memslots are complete. 1552 * 1553 * These rules ensure that installing new memslots does not lose 1554 * changes made to the previous memslots. 1555 */ 1556 mutex_lock(&kvm->slots_arch_lock); 1557 1558 slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change); 1559 if (!slots) { 1560 mutex_unlock(&kvm->slots_arch_lock); 1561 return -ENOMEM; 1562 } 1563 1564 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { 1565 /* 1566 * Note, the INVALID flag needs to be in the appropriate entry 1567 * in the freshly allocated memslots, not in @old or @new. 1568 */ 1569 slot = id_to_memslot(slots, old->id); 1570 slot->flags |= KVM_MEMSLOT_INVALID; 1571 1572 /* 1573 * We can re-use the memory from the old memslots. 1574 * It will be overwritten with a copy of the new memslots 1575 * after reacquiring the slots_arch_lock below. 1576 */ 1577 slots = install_new_memslots(kvm, as_id, slots); 1578 1579 /* From this point no new shadow pages pointing to a deleted, 1580 * or moved, memslot will be created. 1581 * 1582 * validation of sp->gfn happens in: 1583 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 1584 * - kvm_is_visible_gfn (mmu_check_root) 1585 */ 1586 kvm_arch_flush_shadow_memslot(kvm, slot); 1587 1588 /* Released in install_new_memslots. */ 1589 mutex_lock(&kvm->slots_arch_lock); 1590 1591 /* 1592 * The arch-specific fields of the memslots could have changed 1593 * between releasing the slots_arch_lock in 1594 * install_new_memslots and here, so get a fresh copy of the 1595 * slots. 1596 */ 1597 kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id)); 1598 } 1599 1600 r = kvm_arch_prepare_memory_region(kvm, new, mem, change); 1601 if (r) 1602 goto out_slots; 1603 1604 update_memslots(slots, new, change); 1605 slots = install_new_memslots(kvm, as_id, slots); 1606 1607 kvm_arch_commit_memory_region(kvm, mem, old, new, change); 1608 1609 kvfree(slots); 1610 return 0; 1611 1612 out_slots: 1613 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { 1614 slot = id_to_memslot(slots, old->id); 1615 slot->flags &= ~KVM_MEMSLOT_INVALID; 1616 slots = install_new_memslots(kvm, as_id, slots); 1617 } else { 1618 mutex_unlock(&kvm->slots_arch_lock); 1619 } 1620 kvfree(slots); 1621 return r; 1622 } 1623 1624 static int kvm_delete_memslot(struct kvm *kvm, 1625 const struct kvm_userspace_memory_region *mem, 1626 struct kvm_memory_slot *old, int as_id) 1627 { 1628 struct kvm_memory_slot new; 1629 int r; 1630 1631 if (!old->npages) 1632 return -EINVAL; 1633 1634 memset(&new, 0, sizeof(new)); 1635 new.id = old->id; 1636 /* 1637 * This is only for debugging purpose; it should never be referenced 1638 * for a removed memslot. 1639 */ 1640 new.as_id = as_id; 1641 1642 r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE); 1643 if (r) 1644 return r; 1645 1646 kvm_free_memslot(kvm, old); 1647 return 0; 1648 } 1649 1650 /* 1651 * Allocate some memory and give it an address in the guest physical address 1652 * space. 1653 * 1654 * Discontiguous memory is allowed, mostly for framebuffers. 1655 * 1656 * Must be called holding kvm->slots_lock for write. 1657 */ 1658 int __kvm_set_memory_region(struct kvm *kvm, 1659 const struct kvm_userspace_memory_region *mem) 1660 { 1661 struct kvm_memory_slot old, new; 1662 struct kvm_memory_slot *tmp; 1663 enum kvm_mr_change change; 1664 int as_id, id; 1665 int r; 1666 1667 r = check_memory_region_flags(mem); 1668 if (r) 1669 return r; 1670 1671 as_id = mem->slot >> 16; 1672 id = (u16)mem->slot; 1673 1674 /* General sanity checks */ 1675 if (mem->memory_size & (PAGE_SIZE - 1)) 1676 return -EINVAL; 1677 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 1678 return -EINVAL; 1679 /* We can read the guest memory with __xxx_user() later on. */ 1680 if ((mem->userspace_addr & (PAGE_SIZE - 1)) || 1681 (mem->userspace_addr != untagged_addr(mem->userspace_addr)) || 1682 !access_ok((void __user *)(unsigned long)mem->userspace_addr, 1683 mem->memory_size)) 1684 return -EINVAL; 1685 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) 1686 return -EINVAL; 1687 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 1688 return -EINVAL; 1689 1690 /* 1691 * Make a full copy of the old memslot, the pointer will become stale 1692 * when the memslots are re-sorted by update_memslots(), and the old 1693 * memslot needs to be referenced after calling update_memslots(), e.g. 1694 * to free its resources and for arch specific behavior. 1695 */ 1696 tmp = id_to_memslot(__kvm_memslots(kvm, as_id), id); 1697 if (tmp) { 1698 old = *tmp; 1699 tmp = NULL; 1700 } else { 1701 memset(&old, 0, sizeof(old)); 1702 old.id = id; 1703 } 1704 1705 if (!mem->memory_size) 1706 return kvm_delete_memslot(kvm, mem, &old, as_id); 1707 1708 new.as_id = as_id; 1709 new.id = id; 1710 new.base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 1711 new.npages = mem->memory_size >> PAGE_SHIFT; 1712 new.flags = mem->flags; 1713 new.userspace_addr = mem->userspace_addr; 1714 1715 if (new.npages > KVM_MEM_MAX_NR_PAGES) 1716 return -EINVAL; 1717 1718 if (!old.npages) { 1719 change = KVM_MR_CREATE; 1720 new.dirty_bitmap = NULL; 1721 memset(&new.arch, 0, sizeof(new.arch)); 1722 } else { /* Modify an existing slot. */ 1723 if ((new.userspace_addr != old.userspace_addr) || 1724 (new.npages != old.npages) || 1725 ((new.flags ^ old.flags) & KVM_MEM_READONLY)) 1726 return -EINVAL; 1727 1728 if (new.base_gfn != old.base_gfn) 1729 change = KVM_MR_MOVE; 1730 else if (new.flags != old.flags) 1731 change = KVM_MR_FLAGS_ONLY; 1732 else /* Nothing to change. */ 1733 return 0; 1734 1735 /* Copy dirty_bitmap and arch from the current memslot. */ 1736 new.dirty_bitmap = old.dirty_bitmap; 1737 memcpy(&new.arch, &old.arch, sizeof(new.arch)); 1738 } 1739 1740 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 1741 /* Check for overlaps */ 1742 kvm_for_each_memslot(tmp, __kvm_memslots(kvm, as_id)) { 1743 if (tmp->id == id) 1744 continue; 1745 if (!((new.base_gfn + new.npages <= tmp->base_gfn) || 1746 (new.base_gfn >= tmp->base_gfn + tmp->npages))) 1747 return -EEXIST; 1748 } 1749 } 1750 1751 /* Allocate/free page dirty bitmap as needed */ 1752 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 1753 new.dirty_bitmap = NULL; 1754 else if (!new.dirty_bitmap && !kvm->dirty_ring_size) { 1755 r = kvm_alloc_dirty_bitmap(&new); 1756 if (r) 1757 return r; 1758 1759 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) 1760 bitmap_set(new.dirty_bitmap, 0, new.npages); 1761 } 1762 1763 r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change); 1764 if (r) 1765 goto out_bitmap; 1766 1767 if (old.dirty_bitmap && !new.dirty_bitmap) 1768 kvm_destroy_dirty_bitmap(&old); 1769 return 0; 1770 1771 out_bitmap: 1772 if (new.dirty_bitmap && !old.dirty_bitmap) 1773 kvm_destroy_dirty_bitmap(&new); 1774 return r; 1775 } 1776 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 1777 1778 int kvm_set_memory_region(struct kvm *kvm, 1779 const struct kvm_userspace_memory_region *mem) 1780 { 1781 int r; 1782 1783 mutex_lock(&kvm->slots_lock); 1784 r = __kvm_set_memory_region(kvm, mem); 1785 mutex_unlock(&kvm->slots_lock); 1786 return r; 1787 } 1788 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 1789 1790 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 1791 struct kvm_userspace_memory_region *mem) 1792 { 1793 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) 1794 return -EINVAL; 1795 1796 return kvm_set_memory_region(kvm, mem); 1797 } 1798 1799 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 1800 /** 1801 * kvm_get_dirty_log - get a snapshot of dirty pages 1802 * @kvm: pointer to kvm instance 1803 * @log: slot id and address to which we copy the log 1804 * @is_dirty: set to '1' if any dirty pages were found 1805 * @memslot: set to the associated memslot, always valid on success 1806 */ 1807 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, 1808 int *is_dirty, struct kvm_memory_slot **memslot) 1809 { 1810 struct kvm_memslots *slots; 1811 int i, as_id, id; 1812 unsigned long n; 1813 unsigned long any = 0; 1814 1815 /* Dirty ring tracking is exclusive to dirty log tracking */ 1816 if (kvm->dirty_ring_size) 1817 return -ENXIO; 1818 1819 *memslot = NULL; 1820 *is_dirty = 0; 1821 1822 as_id = log->slot >> 16; 1823 id = (u16)log->slot; 1824 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1825 return -EINVAL; 1826 1827 slots = __kvm_memslots(kvm, as_id); 1828 *memslot = id_to_memslot(slots, id); 1829 if (!(*memslot) || !(*memslot)->dirty_bitmap) 1830 return -ENOENT; 1831 1832 kvm_arch_sync_dirty_log(kvm, *memslot); 1833 1834 n = kvm_dirty_bitmap_bytes(*memslot); 1835 1836 for (i = 0; !any && i < n/sizeof(long); ++i) 1837 any = (*memslot)->dirty_bitmap[i]; 1838 1839 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n)) 1840 return -EFAULT; 1841 1842 if (any) 1843 *is_dirty = 1; 1844 return 0; 1845 } 1846 EXPORT_SYMBOL_GPL(kvm_get_dirty_log); 1847 1848 #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 1849 /** 1850 * kvm_get_dirty_log_protect - get a snapshot of dirty pages 1851 * and reenable dirty page tracking for the corresponding pages. 1852 * @kvm: pointer to kvm instance 1853 * @log: slot id and address to which we copy the log 1854 * 1855 * We need to keep it in mind that VCPU threads can write to the bitmap 1856 * concurrently. So, to avoid losing track of dirty pages we keep the 1857 * following order: 1858 * 1859 * 1. Take a snapshot of the bit and clear it if needed. 1860 * 2. Write protect the corresponding page. 1861 * 3. Copy the snapshot to the userspace. 1862 * 4. Upon return caller flushes TLB's if needed. 1863 * 1864 * Between 2 and 4, the guest may write to the page using the remaining TLB 1865 * entry. This is not a problem because the page is reported dirty using 1866 * the snapshot taken before and step 4 ensures that writes done after 1867 * exiting to userspace will be logged for the next call. 1868 * 1869 */ 1870 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) 1871 { 1872 struct kvm_memslots *slots; 1873 struct kvm_memory_slot *memslot; 1874 int i, as_id, id; 1875 unsigned long n; 1876 unsigned long *dirty_bitmap; 1877 unsigned long *dirty_bitmap_buffer; 1878 bool flush; 1879 1880 /* Dirty ring tracking is exclusive to dirty log tracking */ 1881 if (kvm->dirty_ring_size) 1882 return -ENXIO; 1883 1884 as_id = log->slot >> 16; 1885 id = (u16)log->slot; 1886 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1887 return -EINVAL; 1888 1889 slots = __kvm_memslots(kvm, as_id); 1890 memslot = id_to_memslot(slots, id); 1891 if (!memslot || !memslot->dirty_bitmap) 1892 return -ENOENT; 1893 1894 dirty_bitmap = memslot->dirty_bitmap; 1895 1896 kvm_arch_sync_dirty_log(kvm, memslot); 1897 1898 n = kvm_dirty_bitmap_bytes(memslot); 1899 flush = false; 1900 if (kvm->manual_dirty_log_protect) { 1901 /* 1902 * Unlike kvm_get_dirty_log, we always return false in *flush, 1903 * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There 1904 * is some code duplication between this function and 1905 * kvm_get_dirty_log, but hopefully all architecture 1906 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log 1907 * can be eliminated. 1908 */ 1909 dirty_bitmap_buffer = dirty_bitmap; 1910 } else { 1911 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 1912 memset(dirty_bitmap_buffer, 0, n); 1913 1914 KVM_MMU_LOCK(kvm); 1915 for (i = 0; i < n / sizeof(long); i++) { 1916 unsigned long mask; 1917 gfn_t offset; 1918 1919 if (!dirty_bitmap[i]) 1920 continue; 1921 1922 flush = true; 1923 mask = xchg(&dirty_bitmap[i], 0); 1924 dirty_bitmap_buffer[i] = mask; 1925 1926 offset = i * BITS_PER_LONG; 1927 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 1928 offset, mask); 1929 } 1930 KVM_MMU_UNLOCK(kvm); 1931 } 1932 1933 if (flush) 1934 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); 1935 1936 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) 1937 return -EFAULT; 1938 return 0; 1939 } 1940 1941 1942 /** 1943 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot 1944 * @kvm: kvm instance 1945 * @log: slot id and address to which we copy the log 1946 * 1947 * Steps 1-4 below provide general overview of dirty page logging. See 1948 * kvm_get_dirty_log_protect() function description for additional details. 1949 * 1950 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we 1951 * always flush the TLB (step 4) even if previous step failed and the dirty 1952 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API 1953 * does not preclude user space subsequent dirty log read. Flushing TLB ensures 1954 * writes will be marked dirty for next log read. 1955 * 1956 * 1. Take a snapshot of the bit and clear it if needed. 1957 * 2. Write protect the corresponding page. 1958 * 3. Copy the snapshot to the userspace. 1959 * 4. Flush TLB's if needed. 1960 */ 1961 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 1962 struct kvm_dirty_log *log) 1963 { 1964 int r; 1965 1966 mutex_lock(&kvm->slots_lock); 1967 1968 r = kvm_get_dirty_log_protect(kvm, log); 1969 1970 mutex_unlock(&kvm->slots_lock); 1971 return r; 1972 } 1973 1974 /** 1975 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap 1976 * and reenable dirty page tracking for the corresponding pages. 1977 * @kvm: pointer to kvm instance 1978 * @log: slot id and address from which to fetch the bitmap of dirty pages 1979 */ 1980 static int kvm_clear_dirty_log_protect(struct kvm *kvm, 1981 struct kvm_clear_dirty_log *log) 1982 { 1983 struct kvm_memslots *slots; 1984 struct kvm_memory_slot *memslot; 1985 int as_id, id; 1986 gfn_t offset; 1987 unsigned long i, n; 1988 unsigned long *dirty_bitmap; 1989 unsigned long *dirty_bitmap_buffer; 1990 bool flush; 1991 1992 /* Dirty ring tracking is exclusive to dirty log tracking */ 1993 if (kvm->dirty_ring_size) 1994 return -ENXIO; 1995 1996 as_id = log->slot >> 16; 1997 id = (u16)log->slot; 1998 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1999 return -EINVAL; 2000 2001 if (log->first_page & 63) 2002 return -EINVAL; 2003 2004 slots = __kvm_memslots(kvm, as_id); 2005 memslot = id_to_memslot(slots, id); 2006 if (!memslot || !memslot->dirty_bitmap) 2007 return -ENOENT; 2008 2009 dirty_bitmap = memslot->dirty_bitmap; 2010 2011 n = ALIGN(log->num_pages, BITS_PER_LONG) / 8; 2012 2013 if (log->first_page > memslot->npages || 2014 log->num_pages > memslot->npages - log->first_page || 2015 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63))) 2016 return -EINVAL; 2017 2018 kvm_arch_sync_dirty_log(kvm, memslot); 2019 2020 flush = false; 2021 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 2022 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) 2023 return -EFAULT; 2024 2025 KVM_MMU_LOCK(kvm); 2026 for (offset = log->first_page, i = offset / BITS_PER_LONG, 2027 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--; 2028 i++, offset += BITS_PER_LONG) { 2029 unsigned long mask = *dirty_bitmap_buffer++; 2030 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i]; 2031 if (!mask) 2032 continue; 2033 2034 mask &= atomic_long_fetch_andnot(mask, p); 2035 2036 /* 2037 * mask contains the bits that really have been cleared. This 2038 * never includes any bits beyond the length of the memslot (if 2039 * the length is not aligned to 64 pages), therefore it is not 2040 * a problem if userspace sets them in log->dirty_bitmap. 2041 */ 2042 if (mask) { 2043 flush = true; 2044 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 2045 offset, mask); 2046 } 2047 } 2048 KVM_MMU_UNLOCK(kvm); 2049 2050 if (flush) 2051 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); 2052 2053 return 0; 2054 } 2055 2056 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, 2057 struct kvm_clear_dirty_log *log) 2058 { 2059 int r; 2060 2061 mutex_lock(&kvm->slots_lock); 2062 2063 r = kvm_clear_dirty_log_protect(kvm, log); 2064 2065 mutex_unlock(&kvm->slots_lock); 2066 return r; 2067 } 2068 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 2069 2070 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 2071 { 2072 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 2073 } 2074 EXPORT_SYMBOL_GPL(gfn_to_memslot); 2075 2076 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) 2077 { 2078 struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); 2079 struct kvm_memory_slot *slot; 2080 int slot_index; 2081 2082 slot = try_get_memslot(slots, vcpu->last_used_slot, gfn); 2083 if (slot) 2084 return slot; 2085 2086 /* 2087 * Fall back to searching all memslots. We purposely use 2088 * search_memslots() instead of __gfn_to_memslot() to avoid 2089 * thrashing the VM-wide last_used_index in kvm_memslots. 2090 */ 2091 slot = search_memslots(slots, gfn, &slot_index); 2092 if (slot) { 2093 vcpu->last_used_slot = slot_index; 2094 return slot; 2095 } 2096 2097 return NULL; 2098 } 2099 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_memslot); 2100 2101 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 2102 { 2103 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); 2104 2105 return kvm_is_visible_memslot(memslot); 2106 } 2107 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 2108 2109 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 2110 { 2111 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2112 2113 return kvm_is_visible_memslot(memslot); 2114 } 2115 EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn); 2116 2117 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) 2118 { 2119 struct vm_area_struct *vma; 2120 unsigned long addr, size; 2121 2122 size = PAGE_SIZE; 2123 2124 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL); 2125 if (kvm_is_error_hva(addr)) 2126 return PAGE_SIZE; 2127 2128 mmap_read_lock(current->mm); 2129 vma = find_vma(current->mm, addr); 2130 if (!vma) 2131 goto out; 2132 2133 size = vma_kernel_pagesize(vma); 2134 2135 out: 2136 mmap_read_unlock(current->mm); 2137 2138 return size; 2139 } 2140 2141 static bool memslot_is_readonly(struct kvm_memory_slot *slot) 2142 { 2143 return slot->flags & KVM_MEM_READONLY; 2144 } 2145 2146 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 2147 gfn_t *nr_pages, bool write) 2148 { 2149 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 2150 return KVM_HVA_ERR_BAD; 2151 2152 if (memslot_is_readonly(slot) && write) 2153 return KVM_HVA_ERR_RO_BAD; 2154 2155 if (nr_pages) 2156 *nr_pages = slot->npages - (gfn - slot->base_gfn); 2157 2158 return __gfn_to_hva_memslot(slot, gfn); 2159 } 2160 2161 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 2162 gfn_t *nr_pages) 2163 { 2164 return __gfn_to_hva_many(slot, gfn, nr_pages, true); 2165 } 2166 2167 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 2168 gfn_t gfn) 2169 { 2170 return gfn_to_hva_many(slot, gfn, NULL); 2171 } 2172 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); 2173 2174 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 2175 { 2176 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); 2177 } 2178 EXPORT_SYMBOL_GPL(gfn_to_hva); 2179 2180 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) 2181 { 2182 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); 2183 } 2184 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); 2185 2186 /* 2187 * Return the hva of a @gfn and the R/W attribute if possible. 2188 * 2189 * @slot: the kvm_memory_slot which contains @gfn 2190 * @gfn: the gfn to be translated 2191 * @writable: used to return the read/write attribute of the @slot if the hva 2192 * is valid and @writable is not NULL 2193 */ 2194 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, 2195 gfn_t gfn, bool *writable) 2196 { 2197 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); 2198 2199 if (!kvm_is_error_hva(hva) && writable) 2200 *writable = !memslot_is_readonly(slot); 2201 2202 return hva; 2203 } 2204 2205 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 2206 { 2207 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 2208 2209 return gfn_to_hva_memslot_prot(slot, gfn, writable); 2210 } 2211 2212 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) 2213 { 2214 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2215 2216 return gfn_to_hva_memslot_prot(slot, gfn, writable); 2217 } 2218 2219 static inline int check_user_page_hwpoison(unsigned long addr) 2220 { 2221 int rc, flags = FOLL_HWPOISON | FOLL_WRITE; 2222 2223 rc = get_user_pages(addr, 1, flags, NULL, NULL); 2224 return rc == -EHWPOISON; 2225 } 2226 2227 /* 2228 * The fast path to get the writable pfn which will be stored in @pfn, 2229 * true indicates success, otherwise false is returned. It's also the 2230 * only part that runs if we can in atomic context. 2231 */ 2232 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, 2233 bool *writable, kvm_pfn_t *pfn) 2234 { 2235 struct page *page[1]; 2236 2237 /* 2238 * Fast pin a writable pfn only if it is a write fault request 2239 * or the caller allows to map a writable pfn for a read fault 2240 * request. 2241 */ 2242 if (!(write_fault || writable)) 2243 return false; 2244 2245 if (get_user_page_fast_only(addr, FOLL_WRITE, page)) { 2246 *pfn = page_to_pfn(page[0]); 2247 2248 if (writable) 2249 *writable = true; 2250 return true; 2251 } 2252 2253 return false; 2254 } 2255 2256 /* 2257 * The slow path to get the pfn of the specified host virtual address, 2258 * 1 indicates success, -errno is returned if error is detected. 2259 */ 2260 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, 2261 bool *writable, kvm_pfn_t *pfn) 2262 { 2263 unsigned int flags = FOLL_HWPOISON; 2264 struct page *page; 2265 int npages = 0; 2266 2267 might_sleep(); 2268 2269 if (writable) 2270 *writable = write_fault; 2271 2272 if (write_fault) 2273 flags |= FOLL_WRITE; 2274 if (async) 2275 flags |= FOLL_NOWAIT; 2276 2277 npages = get_user_pages_unlocked(addr, 1, &page, flags); 2278 if (npages != 1) 2279 return npages; 2280 2281 /* map read fault as writable if possible */ 2282 if (unlikely(!write_fault) && writable) { 2283 struct page *wpage; 2284 2285 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) { 2286 *writable = true; 2287 put_page(page); 2288 page = wpage; 2289 } 2290 } 2291 *pfn = page_to_pfn(page); 2292 return npages; 2293 } 2294 2295 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) 2296 { 2297 if (unlikely(!(vma->vm_flags & VM_READ))) 2298 return false; 2299 2300 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) 2301 return false; 2302 2303 return true; 2304 } 2305 2306 static int kvm_try_get_pfn(kvm_pfn_t pfn) 2307 { 2308 if (kvm_is_reserved_pfn(pfn)) 2309 return 1; 2310 return get_page_unless_zero(pfn_to_page(pfn)); 2311 } 2312 2313 static int hva_to_pfn_remapped(struct vm_area_struct *vma, 2314 unsigned long addr, bool *async, 2315 bool write_fault, bool *writable, 2316 kvm_pfn_t *p_pfn) 2317 { 2318 kvm_pfn_t pfn; 2319 pte_t *ptep; 2320 spinlock_t *ptl; 2321 int r; 2322 2323 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); 2324 if (r) { 2325 /* 2326 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does 2327 * not call the fault handler, so do it here. 2328 */ 2329 bool unlocked = false; 2330 r = fixup_user_fault(current->mm, addr, 2331 (write_fault ? FAULT_FLAG_WRITE : 0), 2332 &unlocked); 2333 if (unlocked) 2334 return -EAGAIN; 2335 if (r) 2336 return r; 2337 2338 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); 2339 if (r) 2340 return r; 2341 } 2342 2343 if (write_fault && !pte_write(*ptep)) { 2344 pfn = KVM_PFN_ERR_RO_FAULT; 2345 goto out; 2346 } 2347 2348 if (writable) 2349 *writable = pte_write(*ptep); 2350 pfn = pte_pfn(*ptep); 2351 2352 /* 2353 * Get a reference here because callers of *hva_to_pfn* and 2354 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the 2355 * returned pfn. This is only needed if the VMA has VM_MIXEDMAP 2356 * set, but the kvm_try_get_pfn/kvm_release_pfn_clean pair will 2357 * simply do nothing for reserved pfns. 2358 * 2359 * Whoever called remap_pfn_range is also going to call e.g. 2360 * unmap_mapping_range before the underlying pages are freed, 2361 * causing a call to our MMU notifier. 2362 * 2363 * Certain IO or PFNMAP mappings can be backed with valid 2364 * struct pages, but be allocated without refcounting e.g., 2365 * tail pages of non-compound higher order allocations, which 2366 * would then underflow the refcount when the caller does the 2367 * required put_page. Don't allow those pages here. 2368 */ 2369 if (!kvm_try_get_pfn(pfn)) 2370 r = -EFAULT; 2371 2372 out: 2373 pte_unmap_unlock(ptep, ptl); 2374 *p_pfn = pfn; 2375 2376 return r; 2377 } 2378 2379 /* 2380 * Pin guest page in memory and return its pfn. 2381 * @addr: host virtual address which maps memory to the guest 2382 * @atomic: whether this function can sleep 2383 * @async: whether this function need to wait IO complete if the 2384 * host page is not in the memory 2385 * @write_fault: whether we should get a writable host page 2386 * @writable: whether it allows to map a writable host page for !@write_fault 2387 * 2388 * The function will map a writable host page for these two cases: 2389 * 1): @write_fault = true 2390 * 2): @write_fault = false && @writable, @writable will tell the caller 2391 * whether the mapping is writable. 2392 */ 2393 static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, 2394 bool write_fault, bool *writable) 2395 { 2396 struct vm_area_struct *vma; 2397 kvm_pfn_t pfn = 0; 2398 int npages, r; 2399 2400 /* we can do it either atomically or asynchronously, not both */ 2401 BUG_ON(atomic && async); 2402 2403 if (hva_to_pfn_fast(addr, write_fault, writable, &pfn)) 2404 return pfn; 2405 2406 if (atomic) 2407 return KVM_PFN_ERR_FAULT; 2408 2409 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); 2410 if (npages == 1) 2411 return pfn; 2412 2413 mmap_read_lock(current->mm); 2414 if (npages == -EHWPOISON || 2415 (!async && check_user_page_hwpoison(addr))) { 2416 pfn = KVM_PFN_ERR_HWPOISON; 2417 goto exit; 2418 } 2419 2420 retry: 2421 vma = vma_lookup(current->mm, addr); 2422 2423 if (vma == NULL) 2424 pfn = KVM_PFN_ERR_FAULT; 2425 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { 2426 r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn); 2427 if (r == -EAGAIN) 2428 goto retry; 2429 if (r < 0) 2430 pfn = KVM_PFN_ERR_FAULT; 2431 } else { 2432 if (async && vma_is_valid(vma, write_fault)) 2433 *async = true; 2434 pfn = KVM_PFN_ERR_FAULT; 2435 } 2436 exit: 2437 mmap_read_unlock(current->mm); 2438 return pfn; 2439 } 2440 2441 kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, 2442 bool atomic, bool *async, bool write_fault, 2443 bool *writable, hva_t *hva) 2444 { 2445 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); 2446 2447 if (hva) 2448 *hva = addr; 2449 2450 if (addr == KVM_HVA_ERR_RO_BAD) { 2451 if (writable) 2452 *writable = false; 2453 return KVM_PFN_ERR_RO_FAULT; 2454 } 2455 2456 if (kvm_is_error_hva(addr)) { 2457 if (writable) 2458 *writable = false; 2459 return KVM_PFN_NOSLOT; 2460 } 2461 2462 /* Do not map writable pfn in the readonly memslot. */ 2463 if (writable && memslot_is_readonly(slot)) { 2464 *writable = false; 2465 writable = NULL; 2466 } 2467 2468 return hva_to_pfn(addr, atomic, async, write_fault, 2469 writable); 2470 } 2471 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); 2472 2473 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 2474 bool *writable) 2475 { 2476 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, 2477 write_fault, writable, NULL); 2478 } 2479 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 2480 2481 kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 2482 { 2483 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL); 2484 } 2485 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); 2486 2487 kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) 2488 { 2489 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL, NULL); 2490 } 2491 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); 2492 2493 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) 2494 { 2495 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 2496 } 2497 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic); 2498 2499 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 2500 { 2501 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); 2502 } 2503 EXPORT_SYMBOL_GPL(gfn_to_pfn); 2504 2505 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) 2506 { 2507 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 2508 } 2509 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn); 2510 2511 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 2512 struct page **pages, int nr_pages) 2513 { 2514 unsigned long addr; 2515 gfn_t entry = 0; 2516 2517 addr = gfn_to_hva_many(slot, gfn, &entry); 2518 if (kvm_is_error_hva(addr)) 2519 return -1; 2520 2521 if (entry < nr_pages) 2522 return 0; 2523 2524 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages); 2525 } 2526 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); 2527 2528 static struct page *kvm_pfn_to_page(kvm_pfn_t pfn) 2529 { 2530 if (is_error_noslot_pfn(pfn)) 2531 return KVM_ERR_PTR_BAD_PAGE; 2532 2533 if (kvm_is_reserved_pfn(pfn)) { 2534 WARN_ON(1); 2535 return KVM_ERR_PTR_BAD_PAGE; 2536 } 2537 2538 return pfn_to_page(pfn); 2539 } 2540 2541 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 2542 { 2543 kvm_pfn_t pfn; 2544 2545 pfn = gfn_to_pfn(kvm, gfn); 2546 2547 return kvm_pfn_to_page(pfn); 2548 } 2549 EXPORT_SYMBOL_GPL(gfn_to_page); 2550 2551 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache) 2552 { 2553 if (pfn == 0) 2554 return; 2555 2556 if (cache) 2557 cache->pfn = cache->gfn = 0; 2558 2559 if (dirty) 2560 kvm_release_pfn_dirty(pfn); 2561 else 2562 kvm_release_pfn_clean(pfn); 2563 } 2564 2565 static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn, 2566 struct gfn_to_pfn_cache *cache, u64 gen) 2567 { 2568 kvm_release_pfn(cache->pfn, cache->dirty, cache); 2569 2570 cache->pfn = gfn_to_pfn_memslot(slot, gfn); 2571 cache->gfn = gfn; 2572 cache->dirty = false; 2573 cache->generation = gen; 2574 } 2575 2576 static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn, 2577 struct kvm_host_map *map, 2578 struct gfn_to_pfn_cache *cache, 2579 bool atomic) 2580 { 2581 kvm_pfn_t pfn; 2582 void *hva = NULL; 2583 struct page *page = KVM_UNMAPPED_PAGE; 2584 struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn); 2585 u64 gen = slots->generation; 2586 2587 if (!map) 2588 return -EINVAL; 2589 2590 if (cache) { 2591 if (!cache->pfn || cache->gfn != gfn || 2592 cache->generation != gen) { 2593 if (atomic) 2594 return -EAGAIN; 2595 kvm_cache_gfn_to_pfn(slot, gfn, cache, gen); 2596 } 2597 pfn = cache->pfn; 2598 } else { 2599 if (atomic) 2600 return -EAGAIN; 2601 pfn = gfn_to_pfn_memslot(slot, gfn); 2602 } 2603 if (is_error_noslot_pfn(pfn)) 2604 return -EINVAL; 2605 2606 if (pfn_valid(pfn)) { 2607 page = pfn_to_page(pfn); 2608 if (atomic) 2609 hva = kmap_atomic(page); 2610 else 2611 hva = kmap(page); 2612 #ifdef CONFIG_HAS_IOMEM 2613 } else if (!atomic) { 2614 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); 2615 } else { 2616 return -EINVAL; 2617 #endif 2618 } 2619 2620 if (!hva) 2621 return -EFAULT; 2622 2623 map->page = page; 2624 map->hva = hva; 2625 map->pfn = pfn; 2626 map->gfn = gfn; 2627 2628 return 0; 2629 } 2630 2631 int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, 2632 struct gfn_to_pfn_cache *cache, bool atomic) 2633 { 2634 return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map, 2635 cache, atomic); 2636 } 2637 EXPORT_SYMBOL_GPL(kvm_map_gfn); 2638 2639 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) 2640 { 2641 return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map, 2642 NULL, false); 2643 } 2644 EXPORT_SYMBOL_GPL(kvm_vcpu_map); 2645 2646 static void __kvm_unmap_gfn(struct kvm *kvm, 2647 struct kvm_memory_slot *memslot, 2648 struct kvm_host_map *map, 2649 struct gfn_to_pfn_cache *cache, 2650 bool dirty, bool atomic) 2651 { 2652 if (!map) 2653 return; 2654 2655 if (!map->hva) 2656 return; 2657 2658 if (map->page != KVM_UNMAPPED_PAGE) { 2659 if (atomic) 2660 kunmap_atomic(map->hva); 2661 else 2662 kunmap(map->page); 2663 } 2664 #ifdef CONFIG_HAS_IOMEM 2665 else if (!atomic) 2666 memunmap(map->hva); 2667 else 2668 WARN_ONCE(1, "Unexpected unmapping in atomic context"); 2669 #endif 2670 2671 if (dirty) 2672 mark_page_dirty_in_slot(kvm, memslot, map->gfn); 2673 2674 if (cache) 2675 cache->dirty |= dirty; 2676 else 2677 kvm_release_pfn(map->pfn, dirty, NULL); 2678 2679 map->hva = NULL; 2680 map->page = NULL; 2681 } 2682 2683 int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, 2684 struct gfn_to_pfn_cache *cache, bool dirty, bool atomic) 2685 { 2686 __kvm_unmap_gfn(vcpu->kvm, gfn_to_memslot(vcpu->kvm, map->gfn), map, 2687 cache, dirty, atomic); 2688 return 0; 2689 } 2690 EXPORT_SYMBOL_GPL(kvm_unmap_gfn); 2691 2692 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) 2693 { 2694 __kvm_unmap_gfn(vcpu->kvm, kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), 2695 map, NULL, dirty, false); 2696 } 2697 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap); 2698 2699 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) 2700 { 2701 kvm_pfn_t pfn; 2702 2703 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn); 2704 2705 return kvm_pfn_to_page(pfn); 2706 } 2707 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page); 2708 2709 void kvm_release_page_clean(struct page *page) 2710 { 2711 WARN_ON(is_error_page(page)); 2712 2713 kvm_release_pfn_clean(page_to_pfn(page)); 2714 } 2715 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 2716 2717 void kvm_release_pfn_clean(kvm_pfn_t pfn) 2718 { 2719 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) 2720 put_page(pfn_to_page(pfn)); 2721 } 2722 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 2723 2724 void kvm_release_page_dirty(struct page *page) 2725 { 2726 WARN_ON(is_error_page(page)); 2727 2728 kvm_release_pfn_dirty(page_to_pfn(page)); 2729 } 2730 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 2731 2732 void kvm_release_pfn_dirty(kvm_pfn_t pfn) 2733 { 2734 kvm_set_pfn_dirty(pfn); 2735 kvm_release_pfn_clean(pfn); 2736 } 2737 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); 2738 2739 void kvm_set_pfn_dirty(kvm_pfn_t pfn) 2740 { 2741 if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) 2742 SetPageDirty(pfn_to_page(pfn)); 2743 } 2744 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 2745 2746 void kvm_set_pfn_accessed(kvm_pfn_t pfn) 2747 { 2748 if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) 2749 mark_page_accessed(pfn_to_page(pfn)); 2750 } 2751 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 2752 2753 static int next_segment(unsigned long len, int offset) 2754 { 2755 if (len > PAGE_SIZE - offset) 2756 return PAGE_SIZE - offset; 2757 else 2758 return len; 2759 } 2760 2761 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, 2762 void *data, int offset, int len) 2763 { 2764 int r; 2765 unsigned long addr; 2766 2767 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 2768 if (kvm_is_error_hva(addr)) 2769 return -EFAULT; 2770 r = __copy_from_user(data, (void __user *)addr + offset, len); 2771 if (r) 2772 return -EFAULT; 2773 return 0; 2774 } 2775 2776 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 2777 int len) 2778 { 2779 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 2780 2781 return __kvm_read_guest_page(slot, gfn, data, offset, len); 2782 } 2783 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 2784 2785 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, 2786 int offset, int len) 2787 { 2788 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2789 2790 return __kvm_read_guest_page(slot, gfn, data, offset, len); 2791 } 2792 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); 2793 2794 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 2795 { 2796 gfn_t gfn = gpa >> PAGE_SHIFT; 2797 int seg; 2798 int offset = offset_in_page(gpa); 2799 int ret; 2800 2801 while ((seg = next_segment(len, offset)) != 0) { 2802 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 2803 if (ret < 0) 2804 return ret; 2805 offset = 0; 2806 len -= seg; 2807 data += seg; 2808 ++gfn; 2809 } 2810 return 0; 2811 } 2812 EXPORT_SYMBOL_GPL(kvm_read_guest); 2813 2814 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) 2815 { 2816 gfn_t gfn = gpa >> PAGE_SHIFT; 2817 int seg; 2818 int offset = offset_in_page(gpa); 2819 int ret; 2820 2821 while ((seg = next_segment(len, offset)) != 0) { 2822 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); 2823 if (ret < 0) 2824 return ret; 2825 offset = 0; 2826 len -= seg; 2827 data += seg; 2828 ++gfn; 2829 } 2830 return 0; 2831 } 2832 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest); 2833 2834 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 2835 void *data, int offset, unsigned long len) 2836 { 2837 int r; 2838 unsigned long addr; 2839 2840 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 2841 if (kvm_is_error_hva(addr)) 2842 return -EFAULT; 2843 pagefault_disable(); 2844 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 2845 pagefault_enable(); 2846 if (r) 2847 return -EFAULT; 2848 return 0; 2849 } 2850 2851 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, 2852 void *data, unsigned long len) 2853 { 2854 gfn_t gfn = gpa >> PAGE_SHIFT; 2855 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2856 int offset = offset_in_page(gpa); 2857 2858 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); 2859 } 2860 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic); 2861 2862 static int __kvm_write_guest_page(struct kvm *kvm, 2863 struct kvm_memory_slot *memslot, gfn_t gfn, 2864 const void *data, int offset, int len) 2865 { 2866 int r; 2867 unsigned long addr; 2868 2869 addr = gfn_to_hva_memslot(memslot, gfn); 2870 if (kvm_is_error_hva(addr)) 2871 return -EFAULT; 2872 r = __copy_to_user((void __user *)addr + offset, data, len); 2873 if (r) 2874 return -EFAULT; 2875 mark_page_dirty_in_slot(kvm, memslot, gfn); 2876 return 0; 2877 } 2878 2879 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, 2880 const void *data, int offset, int len) 2881 { 2882 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 2883 2884 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len); 2885 } 2886 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 2887 2888 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, 2889 const void *data, int offset, int len) 2890 { 2891 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2892 2893 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len); 2894 } 2895 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page); 2896 2897 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 2898 unsigned long len) 2899 { 2900 gfn_t gfn = gpa >> PAGE_SHIFT; 2901 int seg; 2902 int offset = offset_in_page(gpa); 2903 int ret; 2904 2905 while ((seg = next_segment(len, offset)) != 0) { 2906 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 2907 if (ret < 0) 2908 return ret; 2909 offset = 0; 2910 len -= seg; 2911 data += seg; 2912 ++gfn; 2913 } 2914 return 0; 2915 } 2916 EXPORT_SYMBOL_GPL(kvm_write_guest); 2917 2918 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 2919 unsigned long len) 2920 { 2921 gfn_t gfn = gpa >> PAGE_SHIFT; 2922 int seg; 2923 int offset = offset_in_page(gpa); 2924 int ret; 2925 2926 while ((seg = next_segment(len, offset)) != 0) { 2927 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); 2928 if (ret < 0) 2929 return ret; 2930 offset = 0; 2931 len -= seg; 2932 data += seg; 2933 ++gfn; 2934 } 2935 return 0; 2936 } 2937 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest); 2938 2939 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, 2940 struct gfn_to_hva_cache *ghc, 2941 gpa_t gpa, unsigned long len) 2942 { 2943 int offset = offset_in_page(gpa); 2944 gfn_t start_gfn = gpa >> PAGE_SHIFT; 2945 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; 2946 gfn_t nr_pages_needed = end_gfn - start_gfn + 1; 2947 gfn_t nr_pages_avail; 2948 2949 /* Update ghc->generation before performing any error checks. */ 2950 ghc->generation = slots->generation; 2951 2952 if (start_gfn > end_gfn) { 2953 ghc->hva = KVM_HVA_ERR_BAD; 2954 return -EINVAL; 2955 } 2956 2957 /* 2958 * If the requested region crosses two memslots, we still 2959 * verify that the entire region is valid here. 2960 */ 2961 for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) { 2962 ghc->memslot = __gfn_to_memslot(slots, start_gfn); 2963 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, 2964 &nr_pages_avail); 2965 if (kvm_is_error_hva(ghc->hva)) 2966 return -EFAULT; 2967 } 2968 2969 /* Use the slow path for cross page reads and writes. */ 2970 if (nr_pages_needed == 1) 2971 ghc->hva += offset; 2972 else 2973 ghc->memslot = NULL; 2974 2975 ghc->gpa = gpa; 2976 ghc->len = len; 2977 return 0; 2978 } 2979 2980 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 2981 gpa_t gpa, unsigned long len) 2982 { 2983 struct kvm_memslots *slots = kvm_memslots(kvm); 2984 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len); 2985 } 2986 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 2987 2988 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 2989 void *data, unsigned int offset, 2990 unsigned long len) 2991 { 2992 struct kvm_memslots *slots = kvm_memslots(kvm); 2993 int r; 2994 gpa_t gpa = ghc->gpa + offset; 2995 2996 BUG_ON(len + offset > ghc->len); 2997 2998 if (slots->generation != ghc->generation) { 2999 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) 3000 return -EFAULT; 3001 } 3002 3003 if (kvm_is_error_hva(ghc->hva)) 3004 return -EFAULT; 3005 3006 if (unlikely(!ghc->memslot)) 3007 return kvm_write_guest(kvm, gpa, data, len); 3008 3009 r = __copy_to_user((void __user *)ghc->hva + offset, data, len); 3010 if (r) 3011 return -EFAULT; 3012 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT); 3013 3014 return 0; 3015 } 3016 EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached); 3017 3018 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3019 void *data, unsigned long len) 3020 { 3021 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); 3022 } 3023 EXPORT_SYMBOL_GPL(kvm_write_guest_cached); 3024 3025 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3026 void *data, unsigned int offset, 3027 unsigned long len) 3028 { 3029 struct kvm_memslots *slots = kvm_memslots(kvm); 3030 int r; 3031 gpa_t gpa = ghc->gpa + offset; 3032 3033 BUG_ON(len + offset > ghc->len); 3034 3035 if (slots->generation != ghc->generation) { 3036 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) 3037 return -EFAULT; 3038 } 3039 3040 if (kvm_is_error_hva(ghc->hva)) 3041 return -EFAULT; 3042 3043 if (unlikely(!ghc->memslot)) 3044 return kvm_read_guest(kvm, gpa, data, len); 3045 3046 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len); 3047 if (r) 3048 return -EFAULT; 3049 3050 return 0; 3051 } 3052 EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached); 3053 3054 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3055 void *data, unsigned long len) 3056 { 3057 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len); 3058 } 3059 EXPORT_SYMBOL_GPL(kvm_read_guest_cached); 3060 3061 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 3062 { 3063 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 3064 gfn_t gfn = gpa >> PAGE_SHIFT; 3065 int seg; 3066 int offset = offset_in_page(gpa); 3067 int ret; 3068 3069 while ((seg = next_segment(len, offset)) != 0) { 3070 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len); 3071 if (ret < 0) 3072 return ret; 3073 offset = 0; 3074 len -= seg; 3075 ++gfn; 3076 } 3077 return 0; 3078 } 3079 EXPORT_SYMBOL_GPL(kvm_clear_guest); 3080 3081 void mark_page_dirty_in_slot(struct kvm *kvm, 3082 struct kvm_memory_slot *memslot, 3083 gfn_t gfn) 3084 { 3085 if (memslot && kvm_slot_dirty_track_enabled(memslot)) { 3086 unsigned long rel_gfn = gfn - memslot->base_gfn; 3087 u32 slot = (memslot->as_id << 16) | memslot->id; 3088 3089 if (kvm->dirty_ring_size) 3090 kvm_dirty_ring_push(kvm_dirty_ring_get(kvm), 3091 slot, rel_gfn); 3092 else 3093 set_bit_le(rel_gfn, memslot->dirty_bitmap); 3094 } 3095 } 3096 EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot); 3097 3098 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 3099 { 3100 struct kvm_memory_slot *memslot; 3101 3102 memslot = gfn_to_memslot(kvm, gfn); 3103 mark_page_dirty_in_slot(kvm, memslot, gfn); 3104 } 3105 EXPORT_SYMBOL_GPL(mark_page_dirty); 3106 3107 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) 3108 { 3109 struct kvm_memory_slot *memslot; 3110 3111 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 3112 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn); 3113 } 3114 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); 3115 3116 void kvm_sigset_activate(struct kvm_vcpu *vcpu) 3117 { 3118 if (!vcpu->sigset_active) 3119 return; 3120 3121 /* 3122 * This does a lockless modification of ->real_blocked, which is fine 3123 * because, only current can change ->real_blocked and all readers of 3124 * ->real_blocked don't care as long ->real_blocked is always a subset 3125 * of ->blocked. 3126 */ 3127 sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked); 3128 } 3129 3130 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu) 3131 { 3132 if (!vcpu->sigset_active) 3133 return; 3134 3135 sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL); 3136 sigemptyset(¤t->real_blocked); 3137 } 3138 3139 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) 3140 { 3141 unsigned int old, val, grow, grow_start; 3142 3143 old = val = vcpu->halt_poll_ns; 3144 grow_start = READ_ONCE(halt_poll_ns_grow_start); 3145 grow = READ_ONCE(halt_poll_ns_grow); 3146 if (!grow) 3147 goto out; 3148 3149 val *= grow; 3150 if (val < grow_start) 3151 val = grow_start; 3152 3153 if (val > vcpu->kvm->max_halt_poll_ns) 3154 val = vcpu->kvm->max_halt_poll_ns; 3155 3156 vcpu->halt_poll_ns = val; 3157 out: 3158 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); 3159 } 3160 3161 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) 3162 { 3163 unsigned int old, val, shrink, grow_start; 3164 3165 old = val = vcpu->halt_poll_ns; 3166 shrink = READ_ONCE(halt_poll_ns_shrink); 3167 grow_start = READ_ONCE(halt_poll_ns_grow_start); 3168 if (shrink == 0) 3169 val = 0; 3170 else 3171 val /= shrink; 3172 3173 if (val < grow_start) 3174 val = 0; 3175 3176 vcpu->halt_poll_ns = val; 3177 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); 3178 } 3179 3180 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) 3181 { 3182 int ret = -EINTR; 3183 int idx = srcu_read_lock(&vcpu->kvm->srcu); 3184 3185 if (kvm_arch_vcpu_runnable(vcpu)) { 3186 kvm_make_request(KVM_REQ_UNHALT, vcpu); 3187 goto out; 3188 } 3189 if (kvm_cpu_has_pending_timer(vcpu)) 3190 goto out; 3191 if (signal_pending(current)) 3192 goto out; 3193 if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu)) 3194 goto out; 3195 3196 ret = 0; 3197 out: 3198 srcu_read_unlock(&vcpu->kvm->srcu, idx); 3199 return ret; 3200 } 3201 3202 static inline void 3203 update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited) 3204 { 3205 if (waited) 3206 vcpu->stat.generic.halt_poll_fail_ns += poll_ns; 3207 else 3208 vcpu->stat.generic.halt_poll_success_ns += poll_ns; 3209 } 3210 3211 /* 3212 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 3213 */ 3214 void kvm_vcpu_block(struct kvm_vcpu *vcpu) 3215 { 3216 ktime_t start, cur, poll_end; 3217 bool waited = false; 3218 u64 block_ns; 3219 3220 kvm_arch_vcpu_blocking(vcpu); 3221 3222 start = cur = poll_end = ktime_get(); 3223 if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) { 3224 ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); 3225 3226 ++vcpu->stat.generic.halt_attempted_poll; 3227 do { 3228 /* 3229 * This sets KVM_REQ_UNHALT if an interrupt 3230 * arrives. 3231 */ 3232 if (kvm_vcpu_check_block(vcpu) < 0) { 3233 ++vcpu->stat.generic.halt_successful_poll; 3234 if (!vcpu_valid_wakeup(vcpu)) 3235 ++vcpu->stat.generic.halt_poll_invalid; 3236 3237 KVM_STATS_LOG_HIST_UPDATE( 3238 vcpu->stat.generic.halt_poll_success_hist, 3239 ktime_to_ns(ktime_get()) - 3240 ktime_to_ns(start)); 3241 goto out; 3242 } 3243 cpu_relax(); 3244 poll_end = cur = ktime_get(); 3245 } while (kvm_vcpu_can_poll(cur, stop)); 3246 3247 KVM_STATS_LOG_HIST_UPDATE( 3248 vcpu->stat.generic.halt_poll_fail_hist, 3249 ktime_to_ns(ktime_get()) - ktime_to_ns(start)); 3250 } 3251 3252 3253 prepare_to_rcuwait(&vcpu->wait); 3254 for (;;) { 3255 set_current_state(TASK_INTERRUPTIBLE); 3256 3257 if (kvm_vcpu_check_block(vcpu) < 0) 3258 break; 3259 3260 waited = true; 3261 schedule(); 3262 } 3263 finish_rcuwait(&vcpu->wait); 3264 cur = ktime_get(); 3265 if (waited) { 3266 vcpu->stat.generic.halt_wait_ns += 3267 ktime_to_ns(cur) - ktime_to_ns(poll_end); 3268 KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist, 3269 ktime_to_ns(cur) - ktime_to_ns(poll_end)); 3270 } 3271 out: 3272 kvm_arch_vcpu_unblocking(vcpu); 3273 block_ns = ktime_to_ns(cur) - ktime_to_ns(start); 3274 3275 update_halt_poll_stats( 3276 vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited); 3277 3278 if (!kvm_arch_no_poll(vcpu)) { 3279 if (!vcpu_valid_wakeup(vcpu)) { 3280 shrink_halt_poll_ns(vcpu); 3281 } else if (vcpu->kvm->max_halt_poll_ns) { 3282 if (block_ns <= vcpu->halt_poll_ns) 3283 ; 3284 /* we had a long block, shrink polling */ 3285 else if (vcpu->halt_poll_ns && 3286 block_ns > vcpu->kvm->max_halt_poll_ns) 3287 shrink_halt_poll_ns(vcpu); 3288 /* we had a short halt and our poll time is too small */ 3289 else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns && 3290 block_ns < vcpu->kvm->max_halt_poll_ns) 3291 grow_halt_poll_ns(vcpu); 3292 } else { 3293 vcpu->halt_poll_ns = 0; 3294 } 3295 } 3296 3297 trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu)); 3298 kvm_arch_vcpu_block_finish(vcpu); 3299 } 3300 EXPORT_SYMBOL_GPL(kvm_vcpu_block); 3301 3302 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) 3303 { 3304 struct rcuwait *waitp; 3305 3306 waitp = kvm_arch_vcpu_get_wait(vcpu); 3307 if (rcuwait_wake_up(waitp)) { 3308 WRITE_ONCE(vcpu->ready, true); 3309 ++vcpu->stat.generic.halt_wakeup; 3310 return true; 3311 } 3312 3313 return false; 3314 } 3315 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up); 3316 3317 #ifndef CONFIG_S390 3318 /* 3319 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. 3320 */ 3321 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 3322 { 3323 int me, cpu; 3324 3325 if (kvm_vcpu_wake_up(vcpu)) 3326 return; 3327 3328 /* 3329 * Note, the vCPU could get migrated to a different pCPU at any point 3330 * after kvm_arch_vcpu_should_kick(), which could result in sending an 3331 * IPI to the previous pCPU. But, that's ok because the purpose of the 3332 * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the 3333 * vCPU also requires it to leave IN_GUEST_MODE. 3334 */ 3335 me = get_cpu(); 3336 if (kvm_arch_vcpu_should_kick(vcpu)) { 3337 cpu = READ_ONCE(vcpu->cpu); 3338 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) 3339 smp_send_reschedule(cpu); 3340 } 3341 put_cpu(); 3342 } 3343 EXPORT_SYMBOL_GPL(kvm_vcpu_kick); 3344 #endif /* !CONFIG_S390 */ 3345 3346 int kvm_vcpu_yield_to(struct kvm_vcpu *target) 3347 { 3348 struct pid *pid; 3349 struct task_struct *task = NULL; 3350 int ret = 0; 3351 3352 rcu_read_lock(); 3353 pid = rcu_dereference(target->pid); 3354 if (pid) 3355 task = get_pid_task(pid, PIDTYPE_PID); 3356 rcu_read_unlock(); 3357 if (!task) 3358 return ret; 3359 ret = yield_to(task, 1); 3360 put_task_struct(task); 3361 3362 return ret; 3363 } 3364 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); 3365 3366 /* 3367 * Helper that checks whether a VCPU is eligible for directed yield. 3368 * Most eligible candidate to yield is decided by following heuristics: 3369 * 3370 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently 3371 * (preempted lock holder), indicated by @in_spin_loop. 3372 * Set at the beginning and cleared at the end of interception/PLE handler. 3373 * 3374 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get 3375 * chance last time (mostly it has become eligible now since we have probably 3376 * yielded to lockholder in last iteration. This is done by toggling 3377 * @dy_eligible each time a VCPU checked for eligibility.) 3378 * 3379 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding 3380 * to preempted lock-holder could result in wrong VCPU selection and CPU 3381 * burning. Giving priority for a potential lock-holder increases lock 3382 * progress. 3383 * 3384 * Since algorithm is based on heuristics, accessing another VCPU data without 3385 * locking does not harm. It may result in trying to yield to same VCPU, fail 3386 * and continue with next VCPU and so on. 3387 */ 3388 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) 3389 { 3390 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 3391 bool eligible; 3392 3393 eligible = !vcpu->spin_loop.in_spin_loop || 3394 vcpu->spin_loop.dy_eligible; 3395 3396 if (vcpu->spin_loop.in_spin_loop) 3397 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); 3398 3399 return eligible; 3400 #else 3401 return true; 3402 #endif 3403 } 3404 3405 /* 3406 * Unlike kvm_arch_vcpu_runnable, this function is called outside 3407 * a vcpu_load/vcpu_put pair. However, for most architectures 3408 * kvm_arch_vcpu_runnable does not require vcpu_load. 3409 */ 3410 bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) 3411 { 3412 return kvm_arch_vcpu_runnable(vcpu); 3413 } 3414 3415 static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu) 3416 { 3417 if (kvm_arch_dy_runnable(vcpu)) 3418 return true; 3419 3420 #ifdef CONFIG_KVM_ASYNC_PF 3421 if (!list_empty_careful(&vcpu->async_pf.done)) 3422 return true; 3423 #endif 3424 3425 return false; 3426 } 3427 3428 bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) 3429 { 3430 return false; 3431 } 3432 3433 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) 3434 { 3435 struct kvm *kvm = me->kvm; 3436 struct kvm_vcpu *vcpu; 3437 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 3438 int yielded = 0; 3439 int try = 3; 3440 int pass; 3441 int i; 3442 3443 kvm_vcpu_set_in_spin_loop(me, true); 3444 /* 3445 * We boost the priority of a VCPU that is runnable but not 3446 * currently running, because it got preempted by something 3447 * else and called schedule in __vcpu_run. Hopefully that 3448 * VCPU is holding the lock that we need and will release it. 3449 * We approximate round-robin by starting at the last boosted VCPU. 3450 */ 3451 for (pass = 0; pass < 2 && !yielded && try; pass++) { 3452 kvm_for_each_vcpu(i, vcpu, kvm) { 3453 if (!pass && i <= last_boosted_vcpu) { 3454 i = last_boosted_vcpu; 3455 continue; 3456 } else if (pass && i > last_boosted_vcpu) 3457 break; 3458 if (!READ_ONCE(vcpu->ready)) 3459 continue; 3460 if (vcpu == me) 3461 continue; 3462 if (rcuwait_active(&vcpu->wait) && 3463 !vcpu_dy_runnable(vcpu)) 3464 continue; 3465 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && 3466 !kvm_arch_dy_has_pending_interrupt(vcpu) && 3467 !kvm_arch_vcpu_in_kernel(vcpu)) 3468 continue; 3469 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 3470 continue; 3471 3472 yielded = kvm_vcpu_yield_to(vcpu); 3473 if (yielded > 0) { 3474 kvm->last_boosted_vcpu = i; 3475 break; 3476 } else if (yielded < 0) { 3477 try--; 3478 if (!try) 3479 break; 3480 } 3481 } 3482 } 3483 kvm_vcpu_set_in_spin_loop(me, false); 3484 3485 /* Ensure vcpu is not eligible during next spinloop */ 3486 kvm_vcpu_set_dy_eligible(me, false); 3487 } 3488 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 3489 3490 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff) 3491 { 3492 #if KVM_DIRTY_LOG_PAGE_OFFSET > 0 3493 return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) && 3494 (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET + 3495 kvm->dirty_ring_size / PAGE_SIZE); 3496 #else 3497 return false; 3498 #endif 3499 } 3500 3501 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf) 3502 { 3503 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data; 3504 struct page *page; 3505 3506 if (vmf->pgoff == 0) 3507 page = virt_to_page(vcpu->run); 3508 #ifdef CONFIG_X86 3509 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 3510 page = virt_to_page(vcpu->arch.pio_data); 3511 #endif 3512 #ifdef CONFIG_KVM_MMIO 3513 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 3514 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 3515 #endif 3516 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff)) 3517 page = kvm_dirty_ring_get_page( 3518 &vcpu->dirty_ring, 3519 vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET); 3520 else 3521 return kvm_arch_vcpu_fault(vcpu, vmf); 3522 get_page(page); 3523 vmf->page = page; 3524 return 0; 3525 } 3526 3527 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 3528 .fault = kvm_vcpu_fault, 3529 }; 3530 3531 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 3532 { 3533 struct kvm_vcpu *vcpu = file->private_data; 3534 unsigned long pages = vma_pages(vma); 3535 3536 if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) || 3537 kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) && 3538 ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED))) 3539 return -EINVAL; 3540 3541 vma->vm_ops = &kvm_vcpu_vm_ops; 3542 return 0; 3543 } 3544 3545 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 3546 { 3547 struct kvm_vcpu *vcpu = filp->private_data; 3548 3549 kvm_put_kvm(vcpu->kvm); 3550 return 0; 3551 } 3552 3553 static struct file_operations kvm_vcpu_fops = { 3554 .release = kvm_vcpu_release, 3555 .unlocked_ioctl = kvm_vcpu_ioctl, 3556 .mmap = kvm_vcpu_mmap, 3557 .llseek = noop_llseek, 3558 KVM_COMPAT(kvm_vcpu_compat_ioctl), 3559 }; 3560 3561 /* 3562 * Allocates an inode for the vcpu. 3563 */ 3564 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 3565 { 3566 char name[8 + 1 + ITOA_MAX_LEN + 1]; 3567 3568 snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id); 3569 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); 3570 } 3571 3572 static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) 3573 { 3574 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS 3575 struct dentry *debugfs_dentry; 3576 char dir_name[ITOA_MAX_LEN * 2]; 3577 3578 if (!debugfs_initialized()) 3579 return; 3580 3581 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id); 3582 debugfs_dentry = debugfs_create_dir(dir_name, 3583 vcpu->kvm->debugfs_dentry); 3584 3585 kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry); 3586 #endif 3587 } 3588 3589 /* 3590 * Creates some virtual cpus. Good luck creating more than one. 3591 */ 3592 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 3593 { 3594 int r; 3595 struct kvm_vcpu *vcpu; 3596 struct page *page; 3597 3598 if (id >= KVM_MAX_VCPU_IDS) 3599 return -EINVAL; 3600 3601 mutex_lock(&kvm->lock); 3602 if (kvm->created_vcpus == KVM_MAX_VCPUS) { 3603 mutex_unlock(&kvm->lock); 3604 return -EINVAL; 3605 } 3606 3607 kvm->created_vcpus++; 3608 mutex_unlock(&kvm->lock); 3609 3610 r = kvm_arch_vcpu_precreate(kvm, id); 3611 if (r) 3612 goto vcpu_decrement; 3613 3614 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT); 3615 if (!vcpu) { 3616 r = -ENOMEM; 3617 goto vcpu_decrement; 3618 } 3619 3620 BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE); 3621 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 3622 if (!page) { 3623 r = -ENOMEM; 3624 goto vcpu_free; 3625 } 3626 vcpu->run = page_address(page); 3627 3628 kvm_vcpu_init(vcpu, kvm, id); 3629 3630 r = kvm_arch_vcpu_create(vcpu); 3631 if (r) 3632 goto vcpu_free_run_page; 3633 3634 if (kvm->dirty_ring_size) { 3635 r = kvm_dirty_ring_alloc(&vcpu->dirty_ring, 3636 id, kvm->dirty_ring_size); 3637 if (r) 3638 goto arch_vcpu_destroy; 3639 } 3640 3641 mutex_lock(&kvm->lock); 3642 if (kvm_get_vcpu_by_id(kvm, id)) { 3643 r = -EEXIST; 3644 goto unlock_vcpu_destroy; 3645 } 3646 3647 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); 3648 BUG_ON(kvm->vcpus[vcpu->vcpu_idx]); 3649 3650 /* Fill the stats id string for the vcpu */ 3651 snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d", 3652 task_pid_nr(current), id); 3653 3654 /* Now it's all set up, let userspace reach it */ 3655 kvm_get_kvm(kvm); 3656 r = create_vcpu_fd(vcpu); 3657 if (r < 0) { 3658 kvm_put_kvm_no_destroy(kvm); 3659 goto unlock_vcpu_destroy; 3660 } 3661 3662 kvm->vcpus[vcpu->vcpu_idx] = vcpu; 3663 3664 /* 3665 * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus 3666 * before kvm->online_vcpu's incremented value. 3667 */ 3668 smp_wmb(); 3669 atomic_inc(&kvm->online_vcpus); 3670 3671 mutex_unlock(&kvm->lock); 3672 kvm_arch_vcpu_postcreate(vcpu); 3673 kvm_create_vcpu_debugfs(vcpu); 3674 return r; 3675 3676 unlock_vcpu_destroy: 3677 mutex_unlock(&kvm->lock); 3678 kvm_dirty_ring_free(&vcpu->dirty_ring); 3679 arch_vcpu_destroy: 3680 kvm_arch_vcpu_destroy(vcpu); 3681 vcpu_free_run_page: 3682 free_page((unsigned long)vcpu->run); 3683 vcpu_free: 3684 kmem_cache_free(kvm_vcpu_cache, vcpu); 3685 vcpu_decrement: 3686 mutex_lock(&kvm->lock); 3687 kvm->created_vcpus--; 3688 mutex_unlock(&kvm->lock); 3689 return r; 3690 } 3691 3692 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 3693 { 3694 if (sigset) { 3695 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 3696 vcpu->sigset_active = 1; 3697 vcpu->sigset = *sigset; 3698 } else 3699 vcpu->sigset_active = 0; 3700 return 0; 3701 } 3702 3703 static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer, 3704 size_t size, loff_t *offset) 3705 { 3706 struct kvm_vcpu *vcpu = file->private_data; 3707 3708 return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header, 3709 &kvm_vcpu_stats_desc[0], &vcpu->stat, 3710 sizeof(vcpu->stat), user_buffer, size, offset); 3711 } 3712 3713 static const struct file_operations kvm_vcpu_stats_fops = { 3714 .read = kvm_vcpu_stats_read, 3715 .llseek = noop_llseek, 3716 }; 3717 3718 static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu) 3719 { 3720 int fd; 3721 struct file *file; 3722 char name[15 + ITOA_MAX_LEN + 1]; 3723 3724 snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id); 3725 3726 fd = get_unused_fd_flags(O_CLOEXEC); 3727 if (fd < 0) 3728 return fd; 3729 3730 file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY); 3731 if (IS_ERR(file)) { 3732 put_unused_fd(fd); 3733 return PTR_ERR(file); 3734 } 3735 file->f_mode |= FMODE_PREAD; 3736 fd_install(fd, file); 3737 3738 return fd; 3739 } 3740 3741 static long kvm_vcpu_ioctl(struct file *filp, 3742 unsigned int ioctl, unsigned long arg) 3743 { 3744 struct kvm_vcpu *vcpu = filp->private_data; 3745 void __user *argp = (void __user *)arg; 3746 int r; 3747 struct kvm_fpu *fpu = NULL; 3748 struct kvm_sregs *kvm_sregs = NULL; 3749 3750 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_bugged) 3751 return -EIO; 3752 3753 if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) 3754 return -EINVAL; 3755 3756 /* 3757 * Some architectures have vcpu ioctls that are asynchronous to vcpu 3758 * execution; mutex_lock() would break them. 3759 */ 3760 r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg); 3761 if (r != -ENOIOCTLCMD) 3762 return r; 3763 3764 if (mutex_lock_killable(&vcpu->mutex)) 3765 return -EINTR; 3766 switch (ioctl) { 3767 case KVM_RUN: { 3768 struct pid *oldpid; 3769 r = -EINVAL; 3770 if (arg) 3771 goto out; 3772 oldpid = rcu_access_pointer(vcpu->pid); 3773 if (unlikely(oldpid != task_pid(current))) { 3774 /* The thread running this VCPU changed. */ 3775 struct pid *newpid; 3776 3777 r = kvm_arch_vcpu_run_pid_change(vcpu); 3778 if (r) 3779 break; 3780 3781 newpid = get_task_pid(current, PIDTYPE_PID); 3782 rcu_assign_pointer(vcpu->pid, newpid); 3783 if (oldpid) 3784 synchronize_rcu(); 3785 put_pid(oldpid); 3786 } 3787 r = kvm_arch_vcpu_ioctl_run(vcpu); 3788 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 3789 break; 3790 } 3791 case KVM_GET_REGS: { 3792 struct kvm_regs *kvm_regs; 3793 3794 r = -ENOMEM; 3795 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT); 3796 if (!kvm_regs) 3797 goto out; 3798 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 3799 if (r) 3800 goto out_free1; 3801 r = -EFAULT; 3802 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 3803 goto out_free1; 3804 r = 0; 3805 out_free1: 3806 kfree(kvm_regs); 3807 break; 3808 } 3809 case KVM_SET_REGS: { 3810 struct kvm_regs *kvm_regs; 3811 3812 kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); 3813 if (IS_ERR(kvm_regs)) { 3814 r = PTR_ERR(kvm_regs); 3815 goto out; 3816 } 3817 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 3818 kfree(kvm_regs); 3819 break; 3820 } 3821 case KVM_GET_SREGS: { 3822 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), 3823 GFP_KERNEL_ACCOUNT); 3824 r = -ENOMEM; 3825 if (!kvm_sregs) 3826 goto out; 3827 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 3828 if (r) 3829 goto out; 3830 r = -EFAULT; 3831 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 3832 goto out; 3833 r = 0; 3834 break; 3835 } 3836 case KVM_SET_SREGS: { 3837 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); 3838 if (IS_ERR(kvm_sregs)) { 3839 r = PTR_ERR(kvm_sregs); 3840 kvm_sregs = NULL; 3841 goto out; 3842 } 3843 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 3844 break; 3845 } 3846 case KVM_GET_MP_STATE: { 3847 struct kvm_mp_state mp_state; 3848 3849 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 3850 if (r) 3851 goto out; 3852 r = -EFAULT; 3853 if (copy_to_user(argp, &mp_state, sizeof(mp_state))) 3854 goto out; 3855 r = 0; 3856 break; 3857 } 3858 case KVM_SET_MP_STATE: { 3859 struct kvm_mp_state mp_state; 3860 3861 r = -EFAULT; 3862 if (copy_from_user(&mp_state, argp, sizeof(mp_state))) 3863 goto out; 3864 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 3865 break; 3866 } 3867 case KVM_TRANSLATE: { 3868 struct kvm_translation tr; 3869 3870 r = -EFAULT; 3871 if (copy_from_user(&tr, argp, sizeof(tr))) 3872 goto out; 3873 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 3874 if (r) 3875 goto out; 3876 r = -EFAULT; 3877 if (copy_to_user(argp, &tr, sizeof(tr))) 3878 goto out; 3879 r = 0; 3880 break; 3881 } 3882 case KVM_SET_GUEST_DEBUG: { 3883 struct kvm_guest_debug dbg; 3884 3885 r = -EFAULT; 3886 if (copy_from_user(&dbg, argp, sizeof(dbg))) 3887 goto out; 3888 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 3889 break; 3890 } 3891 case KVM_SET_SIGNAL_MASK: { 3892 struct kvm_signal_mask __user *sigmask_arg = argp; 3893 struct kvm_signal_mask kvm_sigmask; 3894 sigset_t sigset, *p; 3895 3896 p = NULL; 3897 if (argp) { 3898 r = -EFAULT; 3899 if (copy_from_user(&kvm_sigmask, argp, 3900 sizeof(kvm_sigmask))) 3901 goto out; 3902 r = -EINVAL; 3903 if (kvm_sigmask.len != sizeof(sigset)) 3904 goto out; 3905 r = -EFAULT; 3906 if (copy_from_user(&sigset, sigmask_arg->sigset, 3907 sizeof(sigset))) 3908 goto out; 3909 p = &sigset; 3910 } 3911 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 3912 break; 3913 } 3914 case KVM_GET_FPU: { 3915 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT); 3916 r = -ENOMEM; 3917 if (!fpu) 3918 goto out; 3919 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 3920 if (r) 3921 goto out; 3922 r = -EFAULT; 3923 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 3924 goto out; 3925 r = 0; 3926 break; 3927 } 3928 case KVM_SET_FPU: { 3929 fpu = memdup_user(argp, sizeof(*fpu)); 3930 if (IS_ERR(fpu)) { 3931 r = PTR_ERR(fpu); 3932 fpu = NULL; 3933 goto out; 3934 } 3935 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 3936 break; 3937 } 3938 case KVM_GET_STATS_FD: { 3939 r = kvm_vcpu_ioctl_get_stats_fd(vcpu); 3940 break; 3941 } 3942 default: 3943 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 3944 } 3945 out: 3946 mutex_unlock(&vcpu->mutex); 3947 kfree(fpu); 3948 kfree(kvm_sregs); 3949 return r; 3950 } 3951 3952 #ifdef CONFIG_KVM_COMPAT 3953 static long kvm_vcpu_compat_ioctl(struct file *filp, 3954 unsigned int ioctl, unsigned long arg) 3955 { 3956 struct kvm_vcpu *vcpu = filp->private_data; 3957 void __user *argp = compat_ptr(arg); 3958 int r; 3959 3960 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_bugged) 3961 return -EIO; 3962 3963 switch (ioctl) { 3964 case KVM_SET_SIGNAL_MASK: { 3965 struct kvm_signal_mask __user *sigmask_arg = argp; 3966 struct kvm_signal_mask kvm_sigmask; 3967 sigset_t sigset; 3968 3969 if (argp) { 3970 r = -EFAULT; 3971 if (copy_from_user(&kvm_sigmask, argp, 3972 sizeof(kvm_sigmask))) 3973 goto out; 3974 r = -EINVAL; 3975 if (kvm_sigmask.len != sizeof(compat_sigset_t)) 3976 goto out; 3977 r = -EFAULT; 3978 if (get_compat_sigset(&sigset, 3979 (compat_sigset_t __user *)sigmask_arg->sigset)) 3980 goto out; 3981 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 3982 } else 3983 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); 3984 break; 3985 } 3986 default: 3987 r = kvm_vcpu_ioctl(filp, ioctl, arg); 3988 } 3989 3990 out: 3991 return r; 3992 } 3993 #endif 3994 3995 static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma) 3996 { 3997 struct kvm_device *dev = filp->private_data; 3998 3999 if (dev->ops->mmap) 4000 return dev->ops->mmap(dev, vma); 4001 4002 return -ENODEV; 4003 } 4004 4005 static int kvm_device_ioctl_attr(struct kvm_device *dev, 4006 int (*accessor)(struct kvm_device *dev, 4007 struct kvm_device_attr *attr), 4008 unsigned long arg) 4009 { 4010 struct kvm_device_attr attr; 4011 4012 if (!accessor) 4013 return -EPERM; 4014 4015 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 4016 return -EFAULT; 4017 4018 return accessor(dev, &attr); 4019 } 4020 4021 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, 4022 unsigned long arg) 4023 { 4024 struct kvm_device *dev = filp->private_data; 4025 4026 if (dev->kvm->mm != current->mm || dev->kvm->vm_bugged) 4027 return -EIO; 4028 4029 switch (ioctl) { 4030 case KVM_SET_DEVICE_ATTR: 4031 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); 4032 case KVM_GET_DEVICE_ATTR: 4033 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); 4034 case KVM_HAS_DEVICE_ATTR: 4035 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); 4036 default: 4037 if (dev->ops->ioctl) 4038 return dev->ops->ioctl(dev, ioctl, arg); 4039 4040 return -ENOTTY; 4041 } 4042 } 4043 4044 static int kvm_device_release(struct inode *inode, struct file *filp) 4045 { 4046 struct kvm_device *dev = filp->private_data; 4047 struct kvm *kvm = dev->kvm; 4048 4049 if (dev->ops->release) { 4050 mutex_lock(&kvm->lock); 4051 list_del(&dev->vm_node); 4052 dev->ops->release(dev); 4053 mutex_unlock(&kvm->lock); 4054 } 4055 4056 kvm_put_kvm(kvm); 4057 return 0; 4058 } 4059 4060 static const struct file_operations kvm_device_fops = { 4061 .unlocked_ioctl = kvm_device_ioctl, 4062 .release = kvm_device_release, 4063 KVM_COMPAT(kvm_device_ioctl), 4064 .mmap = kvm_device_mmap, 4065 }; 4066 4067 struct kvm_device *kvm_device_from_filp(struct file *filp) 4068 { 4069 if (filp->f_op != &kvm_device_fops) 4070 return NULL; 4071 4072 return filp->private_data; 4073 } 4074 4075 static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { 4076 #ifdef CONFIG_KVM_MPIC 4077 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, 4078 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, 4079 #endif 4080 }; 4081 4082 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type) 4083 { 4084 if (type >= ARRAY_SIZE(kvm_device_ops_table)) 4085 return -ENOSPC; 4086 4087 if (kvm_device_ops_table[type] != NULL) 4088 return -EEXIST; 4089 4090 kvm_device_ops_table[type] = ops; 4091 return 0; 4092 } 4093 4094 void kvm_unregister_device_ops(u32 type) 4095 { 4096 if (kvm_device_ops_table[type] != NULL) 4097 kvm_device_ops_table[type] = NULL; 4098 } 4099 4100 static int kvm_ioctl_create_device(struct kvm *kvm, 4101 struct kvm_create_device *cd) 4102 { 4103 const struct kvm_device_ops *ops = NULL; 4104 struct kvm_device *dev; 4105 bool test = cd->flags & KVM_CREATE_DEVICE_TEST; 4106 int type; 4107 int ret; 4108 4109 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) 4110 return -ENODEV; 4111 4112 type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table)); 4113 ops = kvm_device_ops_table[type]; 4114 if (ops == NULL) 4115 return -ENODEV; 4116 4117 if (test) 4118 return 0; 4119 4120 dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT); 4121 if (!dev) 4122 return -ENOMEM; 4123 4124 dev->ops = ops; 4125 dev->kvm = kvm; 4126 4127 mutex_lock(&kvm->lock); 4128 ret = ops->create(dev, type); 4129 if (ret < 0) { 4130 mutex_unlock(&kvm->lock); 4131 kfree(dev); 4132 return ret; 4133 } 4134 list_add(&dev->vm_node, &kvm->devices); 4135 mutex_unlock(&kvm->lock); 4136 4137 if (ops->init) 4138 ops->init(dev); 4139 4140 kvm_get_kvm(kvm); 4141 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 4142 if (ret < 0) { 4143 kvm_put_kvm_no_destroy(kvm); 4144 mutex_lock(&kvm->lock); 4145 list_del(&dev->vm_node); 4146 mutex_unlock(&kvm->lock); 4147 ops->destroy(dev); 4148 return ret; 4149 } 4150 4151 cd->fd = ret; 4152 return 0; 4153 } 4154 4155 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) 4156 { 4157 switch (arg) { 4158 case KVM_CAP_USER_MEMORY: 4159 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 4160 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 4161 case KVM_CAP_INTERNAL_ERROR_DATA: 4162 #ifdef CONFIG_HAVE_KVM_MSI 4163 case KVM_CAP_SIGNAL_MSI: 4164 #endif 4165 #ifdef CONFIG_HAVE_KVM_IRQFD 4166 case KVM_CAP_IRQFD: 4167 case KVM_CAP_IRQFD_RESAMPLE: 4168 #endif 4169 case KVM_CAP_IOEVENTFD_ANY_LENGTH: 4170 case KVM_CAP_CHECK_EXTENSION_VM: 4171 case KVM_CAP_ENABLE_CAP_VM: 4172 case KVM_CAP_HALT_POLL: 4173 return 1; 4174 #ifdef CONFIG_KVM_MMIO 4175 case KVM_CAP_COALESCED_MMIO: 4176 return KVM_COALESCED_MMIO_PAGE_OFFSET; 4177 case KVM_CAP_COALESCED_PIO: 4178 return 1; 4179 #endif 4180 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4181 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: 4182 return KVM_DIRTY_LOG_MANUAL_CAPS; 4183 #endif 4184 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 4185 case KVM_CAP_IRQ_ROUTING: 4186 return KVM_MAX_IRQ_ROUTES; 4187 #endif 4188 #if KVM_ADDRESS_SPACE_NUM > 1 4189 case KVM_CAP_MULTI_ADDRESS_SPACE: 4190 return KVM_ADDRESS_SPACE_NUM; 4191 #endif 4192 case KVM_CAP_NR_MEMSLOTS: 4193 return KVM_USER_MEM_SLOTS; 4194 case KVM_CAP_DIRTY_LOG_RING: 4195 #if KVM_DIRTY_LOG_PAGE_OFFSET > 0 4196 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn); 4197 #else 4198 return 0; 4199 #endif 4200 case KVM_CAP_BINARY_STATS_FD: 4201 return 1; 4202 default: 4203 break; 4204 } 4205 return kvm_vm_ioctl_check_extension(kvm, arg); 4206 } 4207 4208 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size) 4209 { 4210 int r; 4211 4212 if (!KVM_DIRTY_LOG_PAGE_OFFSET) 4213 return -EINVAL; 4214 4215 /* the size should be power of 2 */ 4216 if (!size || (size & (size - 1))) 4217 return -EINVAL; 4218 4219 /* Should be bigger to keep the reserved entries, or a page */ 4220 if (size < kvm_dirty_ring_get_rsvd_entries() * 4221 sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE) 4222 return -EINVAL; 4223 4224 if (size > KVM_DIRTY_RING_MAX_ENTRIES * 4225 sizeof(struct kvm_dirty_gfn)) 4226 return -E2BIG; 4227 4228 /* We only allow it to set once */ 4229 if (kvm->dirty_ring_size) 4230 return -EINVAL; 4231 4232 mutex_lock(&kvm->lock); 4233 4234 if (kvm->created_vcpus) { 4235 /* We don't allow to change this value after vcpu created */ 4236 r = -EINVAL; 4237 } else { 4238 kvm->dirty_ring_size = size; 4239 r = 0; 4240 } 4241 4242 mutex_unlock(&kvm->lock); 4243 return r; 4244 } 4245 4246 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm) 4247 { 4248 int i; 4249 struct kvm_vcpu *vcpu; 4250 int cleared = 0; 4251 4252 if (!kvm->dirty_ring_size) 4253 return -EINVAL; 4254 4255 mutex_lock(&kvm->slots_lock); 4256 4257 kvm_for_each_vcpu(i, vcpu, kvm) 4258 cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring); 4259 4260 mutex_unlock(&kvm->slots_lock); 4261 4262 if (cleared) 4263 kvm_flush_remote_tlbs(kvm); 4264 4265 return cleared; 4266 } 4267 4268 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm, 4269 struct kvm_enable_cap *cap) 4270 { 4271 return -EINVAL; 4272 } 4273 4274 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, 4275 struct kvm_enable_cap *cap) 4276 { 4277 switch (cap->cap) { 4278 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4279 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: { 4280 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE; 4281 4282 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE) 4283 allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS; 4284 4285 if (cap->flags || (cap->args[0] & ~allowed_options)) 4286 return -EINVAL; 4287 kvm->manual_dirty_log_protect = cap->args[0]; 4288 return 0; 4289 } 4290 #endif 4291 case KVM_CAP_HALT_POLL: { 4292 if (cap->flags || cap->args[0] != (unsigned int)cap->args[0]) 4293 return -EINVAL; 4294 4295 kvm->max_halt_poll_ns = cap->args[0]; 4296 return 0; 4297 } 4298 case KVM_CAP_DIRTY_LOG_RING: 4299 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]); 4300 default: 4301 return kvm_vm_ioctl_enable_cap(kvm, cap); 4302 } 4303 } 4304 4305 static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer, 4306 size_t size, loff_t *offset) 4307 { 4308 struct kvm *kvm = file->private_data; 4309 4310 return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header, 4311 &kvm_vm_stats_desc[0], &kvm->stat, 4312 sizeof(kvm->stat), user_buffer, size, offset); 4313 } 4314 4315 static const struct file_operations kvm_vm_stats_fops = { 4316 .read = kvm_vm_stats_read, 4317 .llseek = noop_llseek, 4318 }; 4319 4320 static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm) 4321 { 4322 int fd; 4323 struct file *file; 4324 4325 fd = get_unused_fd_flags(O_CLOEXEC); 4326 if (fd < 0) 4327 return fd; 4328 4329 file = anon_inode_getfile("kvm-vm-stats", 4330 &kvm_vm_stats_fops, kvm, O_RDONLY); 4331 if (IS_ERR(file)) { 4332 put_unused_fd(fd); 4333 return PTR_ERR(file); 4334 } 4335 file->f_mode |= FMODE_PREAD; 4336 fd_install(fd, file); 4337 4338 return fd; 4339 } 4340 4341 static long kvm_vm_ioctl(struct file *filp, 4342 unsigned int ioctl, unsigned long arg) 4343 { 4344 struct kvm *kvm = filp->private_data; 4345 void __user *argp = (void __user *)arg; 4346 int r; 4347 4348 if (kvm->mm != current->mm || kvm->vm_bugged) 4349 return -EIO; 4350 switch (ioctl) { 4351 case KVM_CREATE_VCPU: 4352 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 4353 break; 4354 case KVM_ENABLE_CAP: { 4355 struct kvm_enable_cap cap; 4356 4357 r = -EFAULT; 4358 if (copy_from_user(&cap, argp, sizeof(cap))) 4359 goto out; 4360 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap); 4361 break; 4362 } 4363 case KVM_SET_USER_MEMORY_REGION: { 4364 struct kvm_userspace_memory_region kvm_userspace_mem; 4365 4366 r = -EFAULT; 4367 if (copy_from_user(&kvm_userspace_mem, argp, 4368 sizeof(kvm_userspace_mem))) 4369 goto out; 4370 4371 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); 4372 break; 4373 } 4374 case KVM_GET_DIRTY_LOG: { 4375 struct kvm_dirty_log log; 4376 4377 r = -EFAULT; 4378 if (copy_from_user(&log, argp, sizeof(log))) 4379 goto out; 4380 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 4381 break; 4382 } 4383 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4384 case KVM_CLEAR_DIRTY_LOG: { 4385 struct kvm_clear_dirty_log log; 4386 4387 r = -EFAULT; 4388 if (copy_from_user(&log, argp, sizeof(log))) 4389 goto out; 4390 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); 4391 break; 4392 } 4393 #endif 4394 #ifdef CONFIG_KVM_MMIO 4395 case KVM_REGISTER_COALESCED_MMIO: { 4396 struct kvm_coalesced_mmio_zone zone; 4397 4398 r = -EFAULT; 4399 if (copy_from_user(&zone, argp, sizeof(zone))) 4400 goto out; 4401 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 4402 break; 4403 } 4404 case KVM_UNREGISTER_COALESCED_MMIO: { 4405 struct kvm_coalesced_mmio_zone zone; 4406 4407 r = -EFAULT; 4408 if (copy_from_user(&zone, argp, sizeof(zone))) 4409 goto out; 4410 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 4411 break; 4412 } 4413 #endif 4414 case KVM_IRQFD: { 4415 struct kvm_irqfd data; 4416 4417 r = -EFAULT; 4418 if (copy_from_user(&data, argp, sizeof(data))) 4419 goto out; 4420 r = kvm_irqfd(kvm, &data); 4421 break; 4422 } 4423 case KVM_IOEVENTFD: { 4424 struct kvm_ioeventfd data; 4425 4426 r = -EFAULT; 4427 if (copy_from_user(&data, argp, sizeof(data))) 4428 goto out; 4429 r = kvm_ioeventfd(kvm, &data); 4430 break; 4431 } 4432 #ifdef CONFIG_HAVE_KVM_MSI 4433 case KVM_SIGNAL_MSI: { 4434 struct kvm_msi msi; 4435 4436 r = -EFAULT; 4437 if (copy_from_user(&msi, argp, sizeof(msi))) 4438 goto out; 4439 r = kvm_send_userspace_msi(kvm, &msi); 4440 break; 4441 } 4442 #endif 4443 #ifdef __KVM_HAVE_IRQ_LINE 4444 case KVM_IRQ_LINE_STATUS: 4445 case KVM_IRQ_LINE: { 4446 struct kvm_irq_level irq_event; 4447 4448 r = -EFAULT; 4449 if (copy_from_user(&irq_event, argp, sizeof(irq_event))) 4450 goto out; 4451 4452 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, 4453 ioctl == KVM_IRQ_LINE_STATUS); 4454 if (r) 4455 goto out; 4456 4457 r = -EFAULT; 4458 if (ioctl == KVM_IRQ_LINE_STATUS) { 4459 if (copy_to_user(argp, &irq_event, sizeof(irq_event))) 4460 goto out; 4461 } 4462 4463 r = 0; 4464 break; 4465 } 4466 #endif 4467 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 4468 case KVM_SET_GSI_ROUTING: { 4469 struct kvm_irq_routing routing; 4470 struct kvm_irq_routing __user *urouting; 4471 struct kvm_irq_routing_entry *entries = NULL; 4472 4473 r = -EFAULT; 4474 if (copy_from_user(&routing, argp, sizeof(routing))) 4475 goto out; 4476 r = -EINVAL; 4477 if (!kvm_arch_can_set_irq_routing(kvm)) 4478 goto out; 4479 if (routing.nr > KVM_MAX_IRQ_ROUTES) 4480 goto out; 4481 if (routing.flags) 4482 goto out; 4483 if (routing.nr) { 4484 urouting = argp; 4485 entries = vmemdup_user(urouting->entries, 4486 array_size(sizeof(*entries), 4487 routing.nr)); 4488 if (IS_ERR(entries)) { 4489 r = PTR_ERR(entries); 4490 goto out; 4491 } 4492 } 4493 r = kvm_set_irq_routing(kvm, entries, routing.nr, 4494 routing.flags); 4495 kvfree(entries); 4496 break; 4497 } 4498 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ 4499 case KVM_CREATE_DEVICE: { 4500 struct kvm_create_device cd; 4501 4502 r = -EFAULT; 4503 if (copy_from_user(&cd, argp, sizeof(cd))) 4504 goto out; 4505 4506 r = kvm_ioctl_create_device(kvm, &cd); 4507 if (r) 4508 goto out; 4509 4510 r = -EFAULT; 4511 if (copy_to_user(argp, &cd, sizeof(cd))) 4512 goto out; 4513 4514 r = 0; 4515 break; 4516 } 4517 case KVM_CHECK_EXTENSION: 4518 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); 4519 break; 4520 case KVM_RESET_DIRTY_RINGS: 4521 r = kvm_vm_ioctl_reset_dirty_pages(kvm); 4522 break; 4523 case KVM_GET_STATS_FD: 4524 r = kvm_vm_ioctl_get_stats_fd(kvm); 4525 break; 4526 default: 4527 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 4528 } 4529 out: 4530 return r; 4531 } 4532 4533 #ifdef CONFIG_KVM_COMPAT 4534 struct compat_kvm_dirty_log { 4535 __u32 slot; 4536 __u32 padding1; 4537 union { 4538 compat_uptr_t dirty_bitmap; /* one bit per page */ 4539 __u64 padding2; 4540 }; 4541 }; 4542 4543 struct compat_kvm_clear_dirty_log { 4544 __u32 slot; 4545 __u32 num_pages; 4546 __u64 first_page; 4547 union { 4548 compat_uptr_t dirty_bitmap; /* one bit per page */ 4549 __u64 padding2; 4550 }; 4551 }; 4552 4553 static long kvm_vm_compat_ioctl(struct file *filp, 4554 unsigned int ioctl, unsigned long arg) 4555 { 4556 struct kvm *kvm = filp->private_data; 4557 int r; 4558 4559 if (kvm->mm != current->mm || kvm->vm_bugged) 4560 return -EIO; 4561 switch (ioctl) { 4562 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4563 case KVM_CLEAR_DIRTY_LOG: { 4564 struct compat_kvm_clear_dirty_log compat_log; 4565 struct kvm_clear_dirty_log log; 4566 4567 if (copy_from_user(&compat_log, (void __user *)arg, 4568 sizeof(compat_log))) 4569 return -EFAULT; 4570 log.slot = compat_log.slot; 4571 log.num_pages = compat_log.num_pages; 4572 log.first_page = compat_log.first_page; 4573 log.padding2 = compat_log.padding2; 4574 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 4575 4576 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); 4577 break; 4578 } 4579 #endif 4580 case KVM_GET_DIRTY_LOG: { 4581 struct compat_kvm_dirty_log compat_log; 4582 struct kvm_dirty_log log; 4583 4584 if (copy_from_user(&compat_log, (void __user *)arg, 4585 sizeof(compat_log))) 4586 return -EFAULT; 4587 log.slot = compat_log.slot; 4588 log.padding1 = compat_log.padding1; 4589 log.padding2 = compat_log.padding2; 4590 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 4591 4592 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 4593 break; 4594 } 4595 default: 4596 r = kvm_vm_ioctl(filp, ioctl, arg); 4597 } 4598 return r; 4599 } 4600 #endif 4601 4602 static struct file_operations kvm_vm_fops = { 4603 .release = kvm_vm_release, 4604 .unlocked_ioctl = kvm_vm_ioctl, 4605 .llseek = noop_llseek, 4606 KVM_COMPAT(kvm_vm_compat_ioctl), 4607 }; 4608 4609 bool file_is_kvm(struct file *file) 4610 { 4611 return file && file->f_op == &kvm_vm_fops; 4612 } 4613 EXPORT_SYMBOL_GPL(file_is_kvm); 4614 4615 static int kvm_dev_ioctl_create_vm(unsigned long type) 4616 { 4617 int r; 4618 struct kvm *kvm; 4619 struct file *file; 4620 4621 kvm = kvm_create_vm(type); 4622 if (IS_ERR(kvm)) 4623 return PTR_ERR(kvm); 4624 #ifdef CONFIG_KVM_MMIO 4625 r = kvm_coalesced_mmio_init(kvm); 4626 if (r < 0) 4627 goto put_kvm; 4628 #endif 4629 r = get_unused_fd_flags(O_CLOEXEC); 4630 if (r < 0) 4631 goto put_kvm; 4632 4633 snprintf(kvm->stats_id, sizeof(kvm->stats_id), 4634 "kvm-%d", task_pid_nr(current)); 4635 4636 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 4637 if (IS_ERR(file)) { 4638 put_unused_fd(r); 4639 r = PTR_ERR(file); 4640 goto put_kvm; 4641 } 4642 4643 /* 4644 * Don't call kvm_put_kvm anymore at this point; file->f_op is 4645 * already set, with ->release() being kvm_vm_release(). In error 4646 * cases it will be called by the final fput(file) and will take 4647 * care of doing kvm_put_kvm(kvm). 4648 */ 4649 if (kvm_create_vm_debugfs(kvm, r) < 0) { 4650 put_unused_fd(r); 4651 fput(file); 4652 return -ENOMEM; 4653 } 4654 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm); 4655 4656 fd_install(r, file); 4657 return r; 4658 4659 put_kvm: 4660 kvm_put_kvm(kvm); 4661 return r; 4662 } 4663 4664 static long kvm_dev_ioctl(struct file *filp, 4665 unsigned int ioctl, unsigned long arg) 4666 { 4667 long r = -EINVAL; 4668 4669 switch (ioctl) { 4670 case KVM_GET_API_VERSION: 4671 if (arg) 4672 goto out; 4673 r = KVM_API_VERSION; 4674 break; 4675 case KVM_CREATE_VM: 4676 r = kvm_dev_ioctl_create_vm(arg); 4677 break; 4678 case KVM_CHECK_EXTENSION: 4679 r = kvm_vm_ioctl_check_extension_generic(NULL, arg); 4680 break; 4681 case KVM_GET_VCPU_MMAP_SIZE: 4682 if (arg) 4683 goto out; 4684 r = PAGE_SIZE; /* struct kvm_run */ 4685 #ifdef CONFIG_X86 4686 r += PAGE_SIZE; /* pio data page */ 4687 #endif 4688 #ifdef CONFIG_KVM_MMIO 4689 r += PAGE_SIZE; /* coalesced mmio ring page */ 4690 #endif 4691 break; 4692 case KVM_TRACE_ENABLE: 4693 case KVM_TRACE_PAUSE: 4694 case KVM_TRACE_DISABLE: 4695 r = -EOPNOTSUPP; 4696 break; 4697 default: 4698 return kvm_arch_dev_ioctl(filp, ioctl, arg); 4699 } 4700 out: 4701 return r; 4702 } 4703 4704 static struct file_operations kvm_chardev_ops = { 4705 .unlocked_ioctl = kvm_dev_ioctl, 4706 .llseek = noop_llseek, 4707 KVM_COMPAT(kvm_dev_ioctl), 4708 }; 4709 4710 static struct miscdevice kvm_dev = { 4711 KVM_MINOR, 4712 "kvm", 4713 &kvm_chardev_ops, 4714 }; 4715 4716 static void hardware_enable_nolock(void *junk) 4717 { 4718 int cpu = raw_smp_processor_id(); 4719 int r; 4720 4721 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 4722 return; 4723 4724 cpumask_set_cpu(cpu, cpus_hardware_enabled); 4725 4726 r = kvm_arch_hardware_enable(); 4727 4728 if (r) { 4729 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 4730 atomic_inc(&hardware_enable_failed); 4731 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu); 4732 } 4733 } 4734 4735 static int kvm_starting_cpu(unsigned int cpu) 4736 { 4737 raw_spin_lock(&kvm_count_lock); 4738 if (kvm_usage_count) 4739 hardware_enable_nolock(NULL); 4740 raw_spin_unlock(&kvm_count_lock); 4741 return 0; 4742 } 4743 4744 static void hardware_disable_nolock(void *junk) 4745 { 4746 int cpu = raw_smp_processor_id(); 4747 4748 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 4749 return; 4750 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 4751 kvm_arch_hardware_disable(); 4752 } 4753 4754 static int kvm_dying_cpu(unsigned int cpu) 4755 { 4756 raw_spin_lock(&kvm_count_lock); 4757 if (kvm_usage_count) 4758 hardware_disable_nolock(NULL); 4759 raw_spin_unlock(&kvm_count_lock); 4760 return 0; 4761 } 4762 4763 static void hardware_disable_all_nolock(void) 4764 { 4765 BUG_ON(!kvm_usage_count); 4766 4767 kvm_usage_count--; 4768 if (!kvm_usage_count) 4769 on_each_cpu(hardware_disable_nolock, NULL, 1); 4770 } 4771 4772 static void hardware_disable_all(void) 4773 { 4774 raw_spin_lock(&kvm_count_lock); 4775 hardware_disable_all_nolock(); 4776 raw_spin_unlock(&kvm_count_lock); 4777 } 4778 4779 static int hardware_enable_all(void) 4780 { 4781 int r = 0; 4782 4783 raw_spin_lock(&kvm_count_lock); 4784 4785 kvm_usage_count++; 4786 if (kvm_usage_count == 1) { 4787 atomic_set(&hardware_enable_failed, 0); 4788 on_each_cpu(hardware_enable_nolock, NULL, 1); 4789 4790 if (atomic_read(&hardware_enable_failed)) { 4791 hardware_disable_all_nolock(); 4792 r = -EBUSY; 4793 } 4794 } 4795 4796 raw_spin_unlock(&kvm_count_lock); 4797 4798 return r; 4799 } 4800 4801 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 4802 void *v) 4803 { 4804 /* 4805 * Some (well, at least mine) BIOSes hang on reboot if 4806 * in vmx root mode. 4807 * 4808 * And Intel TXT required VMX off for all cpu when system shutdown. 4809 */ 4810 pr_info("kvm: exiting hardware virtualization\n"); 4811 kvm_rebooting = true; 4812 on_each_cpu(hardware_disable_nolock, NULL, 1); 4813 return NOTIFY_OK; 4814 } 4815 4816 static struct notifier_block kvm_reboot_notifier = { 4817 .notifier_call = kvm_reboot, 4818 .priority = 0, 4819 }; 4820 4821 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 4822 { 4823 int i; 4824 4825 for (i = 0; i < bus->dev_count; i++) { 4826 struct kvm_io_device *pos = bus->range[i].dev; 4827 4828 kvm_iodevice_destructor(pos); 4829 } 4830 kfree(bus); 4831 } 4832 4833 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, 4834 const struct kvm_io_range *r2) 4835 { 4836 gpa_t addr1 = r1->addr; 4837 gpa_t addr2 = r2->addr; 4838 4839 if (addr1 < addr2) 4840 return -1; 4841 4842 /* If r2->len == 0, match the exact address. If r2->len != 0, 4843 * accept any overlapping write. Any order is acceptable for 4844 * overlapping ranges, because kvm_io_bus_get_first_dev ensures 4845 * we process all of them. 4846 */ 4847 if (r2->len) { 4848 addr1 += r1->len; 4849 addr2 += r2->len; 4850 } 4851 4852 if (addr1 > addr2) 4853 return 1; 4854 4855 return 0; 4856 } 4857 4858 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) 4859 { 4860 return kvm_io_bus_cmp(p1, p2); 4861 } 4862 4863 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, 4864 gpa_t addr, int len) 4865 { 4866 struct kvm_io_range *range, key; 4867 int off; 4868 4869 key = (struct kvm_io_range) { 4870 .addr = addr, 4871 .len = len, 4872 }; 4873 4874 range = bsearch(&key, bus->range, bus->dev_count, 4875 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); 4876 if (range == NULL) 4877 return -ENOENT; 4878 4879 off = range - bus->range; 4880 4881 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) 4882 off--; 4883 4884 return off; 4885 } 4886 4887 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 4888 struct kvm_io_range *range, const void *val) 4889 { 4890 int idx; 4891 4892 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 4893 if (idx < 0) 4894 return -EOPNOTSUPP; 4895 4896 while (idx < bus->dev_count && 4897 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 4898 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, 4899 range->len, val)) 4900 return idx; 4901 idx++; 4902 } 4903 4904 return -EOPNOTSUPP; 4905 } 4906 4907 /* kvm_io_bus_write - called under kvm->slots_lock */ 4908 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 4909 int len, const void *val) 4910 { 4911 struct kvm_io_bus *bus; 4912 struct kvm_io_range range; 4913 int r; 4914 4915 range = (struct kvm_io_range) { 4916 .addr = addr, 4917 .len = len, 4918 }; 4919 4920 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 4921 if (!bus) 4922 return -ENOMEM; 4923 r = __kvm_io_bus_write(vcpu, bus, &range, val); 4924 return r < 0 ? r : 0; 4925 } 4926 EXPORT_SYMBOL_GPL(kvm_io_bus_write); 4927 4928 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ 4929 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 4930 gpa_t addr, int len, const void *val, long cookie) 4931 { 4932 struct kvm_io_bus *bus; 4933 struct kvm_io_range range; 4934 4935 range = (struct kvm_io_range) { 4936 .addr = addr, 4937 .len = len, 4938 }; 4939 4940 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 4941 if (!bus) 4942 return -ENOMEM; 4943 4944 /* First try the device referenced by cookie. */ 4945 if ((cookie >= 0) && (cookie < bus->dev_count) && 4946 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) 4947 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, 4948 val)) 4949 return cookie; 4950 4951 /* 4952 * cookie contained garbage; fall back to search and return the 4953 * correct cookie value. 4954 */ 4955 return __kvm_io_bus_write(vcpu, bus, &range, val); 4956 } 4957 4958 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 4959 struct kvm_io_range *range, void *val) 4960 { 4961 int idx; 4962 4963 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 4964 if (idx < 0) 4965 return -EOPNOTSUPP; 4966 4967 while (idx < bus->dev_count && 4968 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 4969 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, 4970 range->len, val)) 4971 return idx; 4972 idx++; 4973 } 4974 4975 return -EOPNOTSUPP; 4976 } 4977 4978 /* kvm_io_bus_read - called under kvm->slots_lock */ 4979 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 4980 int len, void *val) 4981 { 4982 struct kvm_io_bus *bus; 4983 struct kvm_io_range range; 4984 int r; 4985 4986 range = (struct kvm_io_range) { 4987 .addr = addr, 4988 .len = len, 4989 }; 4990 4991 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 4992 if (!bus) 4993 return -ENOMEM; 4994 r = __kvm_io_bus_read(vcpu, bus, &range, val); 4995 return r < 0 ? r : 0; 4996 } 4997 4998 /* Caller must hold slots_lock. */ 4999 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 5000 int len, struct kvm_io_device *dev) 5001 { 5002 int i; 5003 struct kvm_io_bus *new_bus, *bus; 5004 struct kvm_io_range range; 5005 5006 bus = kvm_get_bus(kvm, bus_idx); 5007 if (!bus) 5008 return -ENOMEM; 5009 5010 /* exclude ioeventfd which is limited by maximum fd */ 5011 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 5012 return -ENOSPC; 5013 5014 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1), 5015 GFP_KERNEL_ACCOUNT); 5016 if (!new_bus) 5017 return -ENOMEM; 5018 5019 range = (struct kvm_io_range) { 5020 .addr = addr, 5021 .len = len, 5022 .dev = dev, 5023 }; 5024 5025 for (i = 0; i < bus->dev_count; i++) 5026 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0) 5027 break; 5028 5029 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 5030 new_bus->dev_count++; 5031 new_bus->range[i] = range; 5032 memcpy(new_bus->range + i + 1, bus->range + i, 5033 (bus->dev_count - i) * sizeof(struct kvm_io_range)); 5034 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 5035 synchronize_srcu_expedited(&kvm->srcu); 5036 kfree(bus); 5037 5038 return 0; 5039 } 5040 5041 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 5042 struct kvm_io_device *dev) 5043 { 5044 int i, j; 5045 struct kvm_io_bus *new_bus, *bus; 5046 5047 lockdep_assert_held(&kvm->slots_lock); 5048 5049 bus = kvm_get_bus(kvm, bus_idx); 5050 if (!bus) 5051 return 0; 5052 5053 for (i = 0; i < bus->dev_count; i++) { 5054 if (bus->range[i].dev == dev) { 5055 break; 5056 } 5057 } 5058 5059 if (i == bus->dev_count) 5060 return 0; 5061 5062 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1), 5063 GFP_KERNEL_ACCOUNT); 5064 if (new_bus) { 5065 memcpy(new_bus, bus, struct_size(bus, range, i)); 5066 new_bus->dev_count--; 5067 memcpy(new_bus->range + i, bus->range + i + 1, 5068 flex_array_size(new_bus, range, new_bus->dev_count - i)); 5069 } 5070 5071 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 5072 synchronize_srcu_expedited(&kvm->srcu); 5073 5074 /* Destroy the old bus _after_ installing the (null) bus. */ 5075 if (!new_bus) { 5076 pr_err("kvm: failed to shrink bus, removing it completely\n"); 5077 for (j = 0; j < bus->dev_count; j++) { 5078 if (j == i) 5079 continue; 5080 kvm_iodevice_destructor(bus->range[j].dev); 5081 } 5082 } 5083 5084 kfree(bus); 5085 return new_bus ? 0 : -ENOMEM; 5086 } 5087 5088 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 5089 gpa_t addr) 5090 { 5091 struct kvm_io_bus *bus; 5092 int dev_idx, srcu_idx; 5093 struct kvm_io_device *iodev = NULL; 5094 5095 srcu_idx = srcu_read_lock(&kvm->srcu); 5096 5097 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 5098 if (!bus) 5099 goto out_unlock; 5100 5101 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); 5102 if (dev_idx < 0) 5103 goto out_unlock; 5104 5105 iodev = bus->range[dev_idx].dev; 5106 5107 out_unlock: 5108 srcu_read_unlock(&kvm->srcu, srcu_idx); 5109 5110 return iodev; 5111 } 5112 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev); 5113 5114 static int kvm_debugfs_open(struct inode *inode, struct file *file, 5115 int (*get)(void *, u64 *), int (*set)(void *, u64), 5116 const char *fmt) 5117 { 5118 struct kvm_stat_data *stat_data = (struct kvm_stat_data *) 5119 inode->i_private; 5120 5121 /* 5122 * The debugfs files are a reference to the kvm struct which 5123 * is still valid when kvm_destroy_vm is called. kvm_get_kvm_safe 5124 * avoids the race between open and the removal of the debugfs directory. 5125 */ 5126 if (!kvm_get_kvm_safe(stat_data->kvm)) 5127 return -ENOENT; 5128 5129 if (simple_attr_open(inode, file, get, 5130 kvm_stats_debugfs_mode(stat_data->desc) & 0222 5131 ? set : NULL, 5132 fmt)) { 5133 kvm_put_kvm(stat_data->kvm); 5134 return -ENOMEM; 5135 } 5136 5137 return 0; 5138 } 5139 5140 static int kvm_debugfs_release(struct inode *inode, struct file *file) 5141 { 5142 struct kvm_stat_data *stat_data = (struct kvm_stat_data *) 5143 inode->i_private; 5144 5145 simple_attr_release(inode, file); 5146 kvm_put_kvm(stat_data->kvm); 5147 5148 return 0; 5149 } 5150 5151 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val) 5152 { 5153 *val = *(u64 *)((void *)(&kvm->stat) + offset); 5154 5155 return 0; 5156 } 5157 5158 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset) 5159 { 5160 *(u64 *)((void *)(&kvm->stat) + offset) = 0; 5161 5162 return 0; 5163 } 5164 5165 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) 5166 { 5167 int i; 5168 struct kvm_vcpu *vcpu; 5169 5170 *val = 0; 5171 5172 kvm_for_each_vcpu(i, vcpu, kvm) 5173 *val += *(u64 *)((void *)(&vcpu->stat) + offset); 5174 5175 return 0; 5176 } 5177 5178 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset) 5179 { 5180 int i; 5181 struct kvm_vcpu *vcpu; 5182 5183 kvm_for_each_vcpu(i, vcpu, kvm) 5184 *(u64 *)((void *)(&vcpu->stat) + offset) = 0; 5185 5186 return 0; 5187 } 5188 5189 static int kvm_stat_data_get(void *data, u64 *val) 5190 { 5191 int r = -EFAULT; 5192 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 5193 5194 switch (stat_data->kind) { 5195 case KVM_STAT_VM: 5196 r = kvm_get_stat_per_vm(stat_data->kvm, 5197 stat_data->desc->desc.offset, val); 5198 break; 5199 case KVM_STAT_VCPU: 5200 r = kvm_get_stat_per_vcpu(stat_data->kvm, 5201 stat_data->desc->desc.offset, val); 5202 break; 5203 } 5204 5205 return r; 5206 } 5207 5208 static int kvm_stat_data_clear(void *data, u64 val) 5209 { 5210 int r = -EFAULT; 5211 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 5212 5213 if (val) 5214 return -EINVAL; 5215 5216 switch (stat_data->kind) { 5217 case KVM_STAT_VM: 5218 r = kvm_clear_stat_per_vm(stat_data->kvm, 5219 stat_data->desc->desc.offset); 5220 break; 5221 case KVM_STAT_VCPU: 5222 r = kvm_clear_stat_per_vcpu(stat_data->kvm, 5223 stat_data->desc->desc.offset); 5224 break; 5225 } 5226 5227 return r; 5228 } 5229 5230 static int kvm_stat_data_open(struct inode *inode, struct file *file) 5231 { 5232 __simple_attr_check_format("%llu\n", 0ull); 5233 return kvm_debugfs_open(inode, file, kvm_stat_data_get, 5234 kvm_stat_data_clear, "%llu\n"); 5235 } 5236 5237 static const struct file_operations stat_fops_per_vm = { 5238 .owner = THIS_MODULE, 5239 .open = kvm_stat_data_open, 5240 .release = kvm_debugfs_release, 5241 .read = simple_attr_read, 5242 .write = simple_attr_write, 5243 .llseek = no_llseek, 5244 }; 5245 5246 static int vm_stat_get(void *_offset, u64 *val) 5247 { 5248 unsigned offset = (long)_offset; 5249 struct kvm *kvm; 5250 u64 tmp_val; 5251 5252 *val = 0; 5253 mutex_lock(&kvm_lock); 5254 list_for_each_entry(kvm, &vm_list, vm_list) { 5255 kvm_get_stat_per_vm(kvm, offset, &tmp_val); 5256 *val += tmp_val; 5257 } 5258 mutex_unlock(&kvm_lock); 5259 return 0; 5260 } 5261 5262 static int vm_stat_clear(void *_offset, u64 val) 5263 { 5264 unsigned offset = (long)_offset; 5265 struct kvm *kvm; 5266 5267 if (val) 5268 return -EINVAL; 5269 5270 mutex_lock(&kvm_lock); 5271 list_for_each_entry(kvm, &vm_list, vm_list) { 5272 kvm_clear_stat_per_vm(kvm, offset); 5273 } 5274 mutex_unlock(&kvm_lock); 5275 5276 return 0; 5277 } 5278 5279 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n"); 5280 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n"); 5281 5282 static int vcpu_stat_get(void *_offset, u64 *val) 5283 { 5284 unsigned offset = (long)_offset; 5285 struct kvm *kvm; 5286 u64 tmp_val; 5287 5288 *val = 0; 5289 mutex_lock(&kvm_lock); 5290 list_for_each_entry(kvm, &vm_list, vm_list) { 5291 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val); 5292 *val += tmp_val; 5293 } 5294 mutex_unlock(&kvm_lock); 5295 return 0; 5296 } 5297 5298 static int vcpu_stat_clear(void *_offset, u64 val) 5299 { 5300 unsigned offset = (long)_offset; 5301 struct kvm *kvm; 5302 5303 if (val) 5304 return -EINVAL; 5305 5306 mutex_lock(&kvm_lock); 5307 list_for_each_entry(kvm, &vm_list, vm_list) { 5308 kvm_clear_stat_per_vcpu(kvm, offset); 5309 } 5310 mutex_unlock(&kvm_lock); 5311 5312 return 0; 5313 } 5314 5315 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear, 5316 "%llu\n"); 5317 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n"); 5318 5319 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) 5320 { 5321 struct kobj_uevent_env *env; 5322 unsigned long long created, active; 5323 5324 if (!kvm_dev.this_device || !kvm) 5325 return; 5326 5327 mutex_lock(&kvm_lock); 5328 if (type == KVM_EVENT_CREATE_VM) { 5329 kvm_createvm_count++; 5330 kvm_active_vms++; 5331 } else if (type == KVM_EVENT_DESTROY_VM) { 5332 kvm_active_vms--; 5333 } 5334 created = kvm_createvm_count; 5335 active = kvm_active_vms; 5336 mutex_unlock(&kvm_lock); 5337 5338 env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT); 5339 if (!env) 5340 return; 5341 5342 add_uevent_var(env, "CREATED=%llu", created); 5343 add_uevent_var(env, "COUNT=%llu", active); 5344 5345 if (type == KVM_EVENT_CREATE_VM) { 5346 add_uevent_var(env, "EVENT=create"); 5347 kvm->userspace_pid = task_pid_nr(current); 5348 } else if (type == KVM_EVENT_DESTROY_VM) { 5349 add_uevent_var(env, "EVENT=destroy"); 5350 } 5351 add_uevent_var(env, "PID=%d", kvm->userspace_pid); 5352 5353 if (kvm->debugfs_dentry) { 5354 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT); 5355 5356 if (p) { 5357 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); 5358 if (!IS_ERR(tmp)) 5359 add_uevent_var(env, "STATS_PATH=%s", tmp); 5360 kfree(p); 5361 } 5362 } 5363 /* no need for checks, since we are adding at most only 5 keys */ 5364 env->envp[env->envp_idx++] = NULL; 5365 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp); 5366 kfree(env); 5367 } 5368 5369 static void kvm_init_debug(void) 5370 { 5371 const struct file_operations *fops; 5372 const struct _kvm_stats_desc *pdesc; 5373 int i; 5374 5375 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 5376 5377 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) { 5378 pdesc = &kvm_vm_stats_desc[i]; 5379 if (kvm_stats_debugfs_mode(pdesc) & 0222) 5380 fops = &vm_stat_fops; 5381 else 5382 fops = &vm_stat_readonly_fops; 5383 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 5384 kvm_debugfs_dir, 5385 (void *)(long)pdesc->desc.offset, fops); 5386 } 5387 5388 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { 5389 pdesc = &kvm_vcpu_stats_desc[i]; 5390 if (kvm_stats_debugfs_mode(pdesc) & 0222) 5391 fops = &vcpu_stat_fops; 5392 else 5393 fops = &vcpu_stat_readonly_fops; 5394 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 5395 kvm_debugfs_dir, 5396 (void *)(long)pdesc->desc.offset, fops); 5397 } 5398 } 5399 5400 static int kvm_suspend(void) 5401 { 5402 if (kvm_usage_count) 5403 hardware_disable_nolock(NULL); 5404 return 0; 5405 } 5406 5407 static void kvm_resume(void) 5408 { 5409 if (kvm_usage_count) { 5410 #ifdef CONFIG_LOCKDEP 5411 WARN_ON(lockdep_is_held(&kvm_count_lock)); 5412 #endif 5413 hardware_enable_nolock(NULL); 5414 } 5415 } 5416 5417 static struct syscore_ops kvm_syscore_ops = { 5418 .suspend = kvm_suspend, 5419 .resume = kvm_resume, 5420 }; 5421 5422 static inline 5423 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 5424 { 5425 return container_of(pn, struct kvm_vcpu, preempt_notifier); 5426 } 5427 5428 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 5429 { 5430 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 5431 5432 WRITE_ONCE(vcpu->preempted, false); 5433 WRITE_ONCE(vcpu->ready, false); 5434 5435 __this_cpu_write(kvm_running_vcpu, vcpu); 5436 kvm_arch_sched_in(vcpu, cpu); 5437 kvm_arch_vcpu_load(vcpu, cpu); 5438 } 5439 5440 static void kvm_sched_out(struct preempt_notifier *pn, 5441 struct task_struct *next) 5442 { 5443 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 5444 5445 if (current->on_rq) { 5446 WRITE_ONCE(vcpu->preempted, true); 5447 WRITE_ONCE(vcpu->ready, true); 5448 } 5449 kvm_arch_vcpu_put(vcpu); 5450 __this_cpu_write(kvm_running_vcpu, NULL); 5451 } 5452 5453 /** 5454 * kvm_get_running_vcpu - get the vcpu running on the current CPU. 5455 * 5456 * We can disable preemption locally around accessing the per-CPU variable, 5457 * and use the resolved vcpu pointer after enabling preemption again, 5458 * because even if the current thread is migrated to another CPU, reading 5459 * the per-CPU value later will give us the same value as we update the 5460 * per-CPU variable in the preempt notifier handlers. 5461 */ 5462 struct kvm_vcpu *kvm_get_running_vcpu(void) 5463 { 5464 struct kvm_vcpu *vcpu; 5465 5466 preempt_disable(); 5467 vcpu = __this_cpu_read(kvm_running_vcpu); 5468 preempt_enable(); 5469 5470 return vcpu; 5471 } 5472 EXPORT_SYMBOL_GPL(kvm_get_running_vcpu); 5473 5474 /** 5475 * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus. 5476 */ 5477 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) 5478 { 5479 return &kvm_running_vcpu; 5480 } 5481 5482 struct kvm_cpu_compat_check { 5483 void *opaque; 5484 int *ret; 5485 }; 5486 5487 static void check_processor_compat(void *data) 5488 { 5489 struct kvm_cpu_compat_check *c = data; 5490 5491 *c->ret = kvm_arch_check_processor_compat(c->opaque); 5492 } 5493 5494 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 5495 struct module *module) 5496 { 5497 struct kvm_cpu_compat_check c; 5498 int r; 5499 int cpu; 5500 5501 r = kvm_arch_init(opaque); 5502 if (r) 5503 goto out_fail; 5504 5505 /* 5506 * kvm_arch_init makes sure there's at most one caller 5507 * for architectures that support multiple implementations, 5508 * like intel and amd on x86. 5509 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating 5510 * conflicts in case kvm is already setup for another implementation. 5511 */ 5512 r = kvm_irqfd_init(); 5513 if (r) 5514 goto out_irqfd; 5515 5516 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 5517 r = -ENOMEM; 5518 goto out_free_0; 5519 } 5520 5521 r = kvm_arch_hardware_setup(opaque); 5522 if (r < 0) 5523 goto out_free_1; 5524 5525 c.ret = &r; 5526 c.opaque = opaque; 5527 for_each_online_cpu(cpu) { 5528 smp_call_function_single(cpu, check_processor_compat, &c, 1); 5529 if (r < 0) 5530 goto out_free_2; 5531 } 5532 5533 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting", 5534 kvm_starting_cpu, kvm_dying_cpu); 5535 if (r) 5536 goto out_free_2; 5537 register_reboot_notifier(&kvm_reboot_notifier); 5538 5539 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 5540 if (!vcpu_align) 5541 vcpu_align = __alignof__(struct kvm_vcpu); 5542 kvm_vcpu_cache = 5543 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align, 5544 SLAB_ACCOUNT, 5545 offsetof(struct kvm_vcpu, arch), 5546 offsetofend(struct kvm_vcpu, stats_id) 5547 - offsetof(struct kvm_vcpu, arch), 5548 NULL); 5549 if (!kvm_vcpu_cache) { 5550 r = -ENOMEM; 5551 goto out_free_3; 5552 } 5553 5554 for_each_possible_cpu(cpu) { 5555 if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu), 5556 GFP_KERNEL, cpu_to_node(cpu))) { 5557 r = -ENOMEM; 5558 goto out_free_4; 5559 } 5560 } 5561 5562 r = kvm_async_pf_init(); 5563 if (r) 5564 goto out_free_5; 5565 5566 kvm_chardev_ops.owner = module; 5567 kvm_vm_fops.owner = module; 5568 kvm_vcpu_fops.owner = module; 5569 5570 r = misc_register(&kvm_dev); 5571 if (r) { 5572 pr_err("kvm: misc device register failed\n"); 5573 goto out_unreg; 5574 } 5575 5576 register_syscore_ops(&kvm_syscore_ops); 5577 5578 kvm_preempt_ops.sched_in = kvm_sched_in; 5579 kvm_preempt_ops.sched_out = kvm_sched_out; 5580 5581 kvm_init_debug(); 5582 5583 r = kvm_vfio_ops_init(); 5584 WARN_ON(r); 5585 5586 return 0; 5587 5588 out_unreg: 5589 kvm_async_pf_deinit(); 5590 out_free_5: 5591 for_each_possible_cpu(cpu) 5592 free_cpumask_var(per_cpu(cpu_kick_mask, cpu)); 5593 out_free_4: 5594 kmem_cache_destroy(kvm_vcpu_cache); 5595 out_free_3: 5596 unregister_reboot_notifier(&kvm_reboot_notifier); 5597 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); 5598 out_free_2: 5599 kvm_arch_hardware_unsetup(); 5600 out_free_1: 5601 free_cpumask_var(cpus_hardware_enabled); 5602 out_free_0: 5603 kvm_irqfd_exit(); 5604 out_irqfd: 5605 kvm_arch_exit(); 5606 out_fail: 5607 return r; 5608 } 5609 EXPORT_SYMBOL_GPL(kvm_init); 5610 5611 void kvm_exit(void) 5612 { 5613 int cpu; 5614 5615 debugfs_remove_recursive(kvm_debugfs_dir); 5616 misc_deregister(&kvm_dev); 5617 for_each_possible_cpu(cpu) 5618 free_cpumask_var(per_cpu(cpu_kick_mask, cpu)); 5619 kmem_cache_destroy(kvm_vcpu_cache); 5620 kvm_async_pf_deinit(); 5621 unregister_syscore_ops(&kvm_syscore_ops); 5622 unregister_reboot_notifier(&kvm_reboot_notifier); 5623 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); 5624 on_each_cpu(hardware_disable_nolock, NULL, 1); 5625 kvm_arch_hardware_unsetup(); 5626 kvm_arch_exit(); 5627 kvm_irqfd_exit(); 5628 free_cpumask_var(cpus_hardware_enabled); 5629 kvm_vfio_ops_exit(); 5630 } 5631 EXPORT_SYMBOL_GPL(kvm_exit); 5632 5633 struct kvm_vm_worker_thread_context { 5634 struct kvm *kvm; 5635 struct task_struct *parent; 5636 struct completion init_done; 5637 kvm_vm_thread_fn_t thread_fn; 5638 uintptr_t data; 5639 int err; 5640 }; 5641 5642 static int kvm_vm_worker_thread(void *context) 5643 { 5644 /* 5645 * The init_context is allocated on the stack of the parent thread, so 5646 * we have to locally copy anything that is needed beyond initialization 5647 */ 5648 struct kvm_vm_worker_thread_context *init_context = context; 5649 struct kvm *kvm = init_context->kvm; 5650 kvm_vm_thread_fn_t thread_fn = init_context->thread_fn; 5651 uintptr_t data = init_context->data; 5652 int err; 5653 5654 err = kthread_park(current); 5655 /* kthread_park(current) is never supposed to return an error */ 5656 WARN_ON(err != 0); 5657 if (err) 5658 goto init_complete; 5659 5660 err = cgroup_attach_task_all(init_context->parent, current); 5661 if (err) { 5662 kvm_err("%s: cgroup_attach_task_all failed with err %d\n", 5663 __func__, err); 5664 goto init_complete; 5665 } 5666 5667 set_user_nice(current, task_nice(init_context->parent)); 5668 5669 init_complete: 5670 init_context->err = err; 5671 complete(&init_context->init_done); 5672 init_context = NULL; 5673 5674 if (err) 5675 return err; 5676 5677 /* Wait to be woken up by the spawner before proceeding. */ 5678 kthread_parkme(); 5679 5680 if (!kthread_should_stop()) 5681 err = thread_fn(kvm, data); 5682 5683 return err; 5684 } 5685 5686 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, 5687 uintptr_t data, const char *name, 5688 struct task_struct **thread_ptr) 5689 { 5690 struct kvm_vm_worker_thread_context init_context = {}; 5691 struct task_struct *thread; 5692 5693 *thread_ptr = NULL; 5694 init_context.kvm = kvm; 5695 init_context.parent = current; 5696 init_context.thread_fn = thread_fn; 5697 init_context.data = data; 5698 init_completion(&init_context.init_done); 5699 5700 thread = kthread_run(kvm_vm_worker_thread, &init_context, 5701 "%s-%d", name, task_pid_nr(current)); 5702 if (IS_ERR(thread)) 5703 return PTR_ERR(thread); 5704 5705 /* kthread_run is never supposed to return NULL */ 5706 WARN_ON(thread == NULL); 5707 5708 wait_for_completion(&init_context.init_done); 5709 5710 if (!init_context.err) 5711 *thread_ptr = thread; 5712 5713 return init_context.err; 5714 } 5715