Lines Matching refs:kvm

151 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
157 __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) in kvm_arch_guest_memory_reclaimed() argument
290 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, in kvm_make_vcpus_request_mask() argument
304 vcpu = kvm_get_vcpu(kvm, i); in kvm_make_vcpus_request_mask()
316 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, in kvm_make_all_cpus_request_except() argument
330 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_make_all_cpus_request_except()
342 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) in kvm_make_all_cpus_request() argument
344 return kvm_make_all_cpus_request_except(kvm, req, NULL); in kvm_make_all_cpus_request()
348 void kvm_flush_remote_tlbs(struct kvm *kvm) in kvm_flush_remote_tlbs() argument
350 ++kvm->stat.generic.remote_tlb_flush_requests; in kvm_flush_remote_tlbs()
363 if (!kvm_arch_flush_remote_tlbs(kvm) in kvm_flush_remote_tlbs()
364 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) in kvm_flush_remote_tlbs()
365 ++kvm->stat.generic.remote_tlb_flush; in kvm_flush_remote_tlbs()
369 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages) in kvm_flush_remote_tlbs_range() argument
371 if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages)) in kvm_flush_remote_tlbs_range()
379 kvm_flush_remote_tlbs(kvm); in kvm_flush_remote_tlbs_range()
382 void kvm_flush_remote_tlbs_memslot(struct kvm *kvm, in kvm_flush_remote_tlbs_memslot() argument
392 lockdep_assert_held(&kvm->slots_lock); in kvm_flush_remote_tlbs_memslot()
393 kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages); in kvm_flush_remote_tlbs_memslot()
396 static void kvm_flush_shadow_all(struct kvm *kvm) in kvm_flush_shadow_all() argument
398 kvm_arch_flush_shadow_all(kvm); in kvm_flush_shadow_all()
399 kvm_arch_guest_memory_reclaimed(kvm); in kvm_flush_shadow_all()
484 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) in kvm_vcpu_init() argument
488 vcpu->kvm = kvm; in kvm_vcpu_init()
524 void kvm_destroy_vcpus(struct kvm *kvm) in kvm_destroy_vcpus() argument
529 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_destroy_vcpus()
531 xa_erase(&kvm->vcpu_array, i); in kvm_destroy_vcpus()
534 atomic_set(&kvm->online_vcpus, 0); in kvm_destroy_vcpus()
539 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) in mmu_notifier_to_kvm()
541 return container_of(mn, struct kvm, mmu_notifier); in mmu_notifier_to_kvm()
544 typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
546 typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
549 typedef void (*on_unlock_fn_t)(struct kvm *kvm);
583 static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, in __kvm_handle_hva_range() argument
600 idx = srcu_read_lock(&kvm->srcu); in __kvm_handle_hva_range()
605 slots = __kvm_memslots(kvm, i); in __kvm_handle_hva_range()
634 KVM_MMU_LOCK(kvm); in __kvm_handle_hva_range()
636 range->on_lock(kvm, range->start, range->end); in __kvm_handle_hva_range()
640 ret |= range->handler(kvm, &gfn_range); in __kvm_handle_hva_range()
645 kvm_flush_remote_tlbs(kvm); in __kvm_handle_hva_range()
648 KVM_MMU_UNLOCK(kvm); in __kvm_handle_hva_range()
650 range->on_unlock(kvm); in __kvm_handle_hva_range()
653 srcu_read_unlock(&kvm->srcu, idx); in __kvm_handle_hva_range()
665 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_handle_hva_range() local
677 return __kvm_handle_hva_range(kvm, &range); in kvm_handle_hva_range()
685 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_handle_hva_range_no_flush() local
696 return __kvm_handle_hva_range(kvm, &range); in kvm_handle_hva_range_no_flush()
699 static bool kvm_change_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_change_spte_gfn() argument
708 WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count)); in kvm_change_spte_gfn()
713 return kvm_set_spte_gfn(kvm, range); in kvm_change_spte_gfn()
721 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_change_pte() local
734 WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count)); in kvm_mmu_notifier_change_pte()
735 if (!READ_ONCE(kvm->mmu_invalidate_in_progress)) in kvm_mmu_notifier_change_pte()
741 void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start, in kvm_mmu_invalidate_begin() argument
749 kvm->mmu_invalidate_in_progress++; in kvm_mmu_invalidate_begin()
750 if (likely(kvm->mmu_invalidate_in_progress == 1)) { in kvm_mmu_invalidate_begin()
751 kvm->mmu_invalidate_range_start = start; in kvm_mmu_invalidate_begin()
752 kvm->mmu_invalidate_range_end = end; in kvm_mmu_invalidate_begin()
763 kvm->mmu_invalidate_range_start = in kvm_mmu_invalidate_begin()
764 min(kvm->mmu_invalidate_range_start, start); in kvm_mmu_invalidate_begin()
765 kvm->mmu_invalidate_range_end = in kvm_mmu_invalidate_begin()
766 max(kvm->mmu_invalidate_range_end, end); in kvm_mmu_invalidate_begin()
773 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_invalidate_range_start() local
794 spin_lock(&kvm->mn_invalidate_lock); in kvm_mmu_notifier_invalidate_range_start()
795 kvm->mn_active_invalidate_count++; in kvm_mmu_notifier_invalidate_range_start()
796 spin_unlock(&kvm->mn_invalidate_lock); in kvm_mmu_notifier_invalidate_range_start()
808 gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end, in kvm_mmu_notifier_invalidate_range_start()
811 __kvm_handle_hva_range(kvm, &hva_range); in kvm_mmu_notifier_invalidate_range_start()
816 void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start, in kvm_mmu_invalidate_end() argument
824 kvm->mmu_invalidate_seq++; in kvm_mmu_invalidate_end()
831 kvm->mmu_invalidate_in_progress--; in kvm_mmu_invalidate_end()
837 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_invalidate_range_end() local
849 __kvm_handle_hva_range(kvm, &hva_range); in kvm_mmu_notifier_invalidate_range_end()
852 spin_lock(&kvm->mn_invalidate_lock); in kvm_mmu_notifier_invalidate_range_end()
853 wake = (--kvm->mn_active_invalidate_count == 0); in kvm_mmu_notifier_invalidate_range_end()
854 spin_unlock(&kvm->mn_invalidate_lock); in kvm_mmu_notifier_invalidate_range_end()
861 rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait); in kvm_mmu_notifier_invalidate_range_end()
863 BUG_ON(kvm->mmu_invalidate_in_progress < 0); in kvm_mmu_notifier_invalidate_range_end()
913 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_release() local
916 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_release()
917 kvm_flush_shadow_all(kvm); in kvm_mmu_notifier_release()
918 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_release()
931 static int kvm_init_mmu_notifier(struct kvm *kvm) in kvm_init_mmu_notifier() argument
933 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; in kvm_init_mmu_notifier()
934 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); in kvm_init_mmu_notifier()
939 static int kvm_init_mmu_notifier(struct kvm *kvm) in kvm_init_mmu_notifier() argument
951 struct kvm *kvm = container_of(bl, struct kvm, pm_notifier); in kvm_pm_notifier_call() local
953 return kvm_arch_pm_notifier(kvm, state); in kvm_pm_notifier_call()
956 static void kvm_init_pm_notifier(struct kvm *kvm) in kvm_init_pm_notifier() argument
958 kvm->pm_notifier.notifier_call = kvm_pm_notifier_call; in kvm_init_pm_notifier()
960 kvm->pm_notifier.priority = INT_MAX; in kvm_init_pm_notifier()
961 register_pm_notifier(&kvm->pm_notifier); in kvm_init_pm_notifier()
964 static void kvm_destroy_pm_notifier(struct kvm *kvm) in kvm_destroy_pm_notifier() argument
966 unregister_pm_notifier(&kvm->pm_notifier); in kvm_destroy_pm_notifier()
969 static void kvm_init_pm_notifier(struct kvm *kvm) in kvm_init_pm_notifier() argument
973 static void kvm_destroy_pm_notifier(struct kvm *kvm) in kvm_destroy_pm_notifier() argument
988 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) in kvm_free_memslot() argument
992 kvm_arch_free_memslot(kvm, slot); in kvm_free_memslot()
997 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) in kvm_free_memslots() argument
1013 kvm_free_memslot(kvm, memslot); in kvm_free_memslots()
1029 static void kvm_destroy_vm_debugfs(struct kvm *kvm) in kvm_destroy_vm_debugfs() argument
1035 if (IS_ERR(kvm->debugfs_dentry)) in kvm_destroy_vm_debugfs()
1038 debugfs_remove_recursive(kvm->debugfs_dentry); in kvm_destroy_vm_debugfs()
1040 if (kvm->debugfs_stat_data) { in kvm_destroy_vm_debugfs()
1042 kfree(kvm->debugfs_stat_data[i]); in kvm_destroy_vm_debugfs()
1043 kfree(kvm->debugfs_stat_data); in kvm_destroy_vm_debugfs()
1047 static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname) in kvm_create_vm_debugfs() argument
1075 kvm->debugfs_dentry = dent; in kvm_create_vm_debugfs()
1076 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, in kvm_create_vm_debugfs()
1077 sizeof(*kvm->debugfs_stat_data), in kvm_create_vm_debugfs()
1079 if (!kvm->debugfs_stat_data) in kvm_create_vm_debugfs()
1088 stat_data->kvm = kvm; in kvm_create_vm_debugfs()
1091 kvm->debugfs_stat_data[i] = stat_data; in kvm_create_vm_debugfs()
1093 kvm->debugfs_dentry, stat_data, in kvm_create_vm_debugfs()
1103 stat_data->kvm = kvm; in kvm_create_vm_debugfs()
1106 kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data; in kvm_create_vm_debugfs()
1108 kvm->debugfs_dentry, stat_data, in kvm_create_vm_debugfs()
1112 ret = kvm_arch_create_vm_debugfs(kvm); in kvm_create_vm_debugfs()
1118 kvm_destroy_vm_debugfs(kvm); in kvm_create_vm_debugfs()
1126 int __weak kvm_arch_post_init_vm(struct kvm *kvm) in kvm_arch_post_init_vm() argument
1135 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm) in kvm_arch_pre_destroy_vm() argument
1145 int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm) in kvm_arch_create_vm_debugfs() argument
1150 static struct kvm *kvm_create_vm(unsigned long type, const char *fdname) in kvm_create_vm()
1152 struct kvm *kvm = kvm_arch_alloc_vm(); in kvm_create_vm() local
1157 if (!kvm) in kvm_create_vm()
1163 KVM_MMU_LOCK_INIT(kvm); in kvm_create_vm()
1165 kvm->mm = current->mm; in kvm_create_vm()
1166 kvm_eventfd_init(kvm); in kvm_create_vm()
1167 mutex_init(&kvm->lock); in kvm_create_vm()
1168 mutex_init(&kvm->irq_lock); in kvm_create_vm()
1169 mutex_init(&kvm->slots_lock); in kvm_create_vm()
1170 mutex_init(&kvm->slots_arch_lock); in kvm_create_vm()
1171 spin_lock_init(&kvm->mn_invalidate_lock); in kvm_create_vm()
1172 rcuwait_init(&kvm->mn_memslots_update_rcuwait); in kvm_create_vm()
1173 xa_init(&kvm->vcpu_array); in kvm_create_vm()
1175 INIT_LIST_HEAD(&kvm->gpc_list); in kvm_create_vm()
1176 spin_lock_init(&kvm->gpc_lock); in kvm_create_vm()
1178 INIT_LIST_HEAD(&kvm->devices); in kvm_create_vm()
1179 kvm->max_vcpus = KVM_MAX_VCPUS; in kvm_create_vm()
1187 kvm->debugfs_dentry = ERR_PTR(-ENOENT); in kvm_create_vm()
1189 snprintf(kvm->stats_id, sizeof(kvm->stats_id), "kvm-%d", in kvm_create_vm()
1192 if (init_srcu_struct(&kvm->srcu)) in kvm_create_vm()
1194 if (init_srcu_struct(&kvm->irq_srcu)) in kvm_create_vm()
1197 refcount_set(&kvm->users_count, 1); in kvm_create_vm()
1200 slots = &kvm->__memslots[i][j]; in kvm_create_vm()
1212 rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]); in kvm_create_vm()
1216 rcu_assign_pointer(kvm->buses[i], in kvm_create_vm()
1218 if (!kvm->buses[i]) in kvm_create_vm()
1222 r = kvm_arch_init_vm(kvm, type); in kvm_create_vm()
1231 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); in kvm_create_vm()
1234 r = kvm_init_mmu_notifier(kvm); in kvm_create_vm()
1238 r = kvm_coalesced_mmio_init(kvm); in kvm_create_vm()
1242 r = kvm_create_vm_debugfs(kvm, fdname); in kvm_create_vm()
1246 r = kvm_arch_post_init_vm(kvm); in kvm_create_vm()
1251 list_add(&kvm->vm_list, &vm_list); in kvm_create_vm()
1255 kvm_init_pm_notifier(kvm); in kvm_create_vm()
1257 return kvm; in kvm_create_vm()
1260 kvm_destroy_vm_debugfs(kvm); in kvm_create_vm()
1262 kvm_coalesced_mmio_free(kvm); in kvm_create_vm()
1265 if (kvm->mmu_notifier.ops) in kvm_create_vm()
1266 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); in kvm_create_vm()
1271 kvm_arch_destroy_vm(kvm); in kvm_create_vm()
1273 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); in kvm_create_vm()
1275 kfree(kvm_get_bus(kvm, i)); in kvm_create_vm()
1276 cleanup_srcu_struct(&kvm->irq_srcu); in kvm_create_vm()
1278 cleanup_srcu_struct(&kvm->srcu); in kvm_create_vm()
1280 kvm_arch_free_vm(kvm); in kvm_create_vm()
1286 static void kvm_destroy_devices(struct kvm *kvm) in kvm_destroy_devices() argument
1295 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { in kvm_destroy_devices()
1301 static void kvm_destroy_vm(struct kvm *kvm) in kvm_destroy_vm() argument
1304 struct mm_struct *mm = kvm->mm; in kvm_destroy_vm()
1306 kvm_destroy_pm_notifier(kvm); in kvm_destroy_vm()
1307 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm); in kvm_destroy_vm()
1308 kvm_destroy_vm_debugfs(kvm); in kvm_destroy_vm()
1309 kvm_arch_sync_events(kvm); in kvm_destroy_vm()
1311 list_del(&kvm->vm_list); in kvm_destroy_vm()
1313 kvm_arch_pre_destroy_vm(kvm); in kvm_destroy_vm()
1315 kvm_free_irq_routing(kvm); in kvm_destroy_vm()
1317 struct kvm_io_bus *bus = kvm_get_bus(kvm, i); in kvm_destroy_vm()
1321 kvm->buses[i] = NULL; in kvm_destroy_vm()
1323 kvm_coalesced_mmio_free(kvm); in kvm_destroy_vm()
1325 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); in kvm_destroy_vm()
1334 WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait)); in kvm_destroy_vm()
1335 kvm->mn_active_invalidate_count = 0; in kvm_destroy_vm()
1337 kvm_flush_shadow_all(kvm); in kvm_destroy_vm()
1339 kvm_arch_destroy_vm(kvm); in kvm_destroy_vm()
1340 kvm_destroy_devices(kvm); in kvm_destroy_vm()
1342 kvm_free_memslots(kvm, &kvm->__memslots[i][0]); in kvm_destroy_vm()
1343 kvm_free_memslots(kvm, &kvm->__memslots[i][1]); in kvm_destroy_vm()
1345 cleanup_srcu_struct(&kvm->irq_srcu); in kvm_destroy_vm()
1346 cleanup_srcu_struct(&kvm->srcu); in kvm_destroy_vm()
1347 kvm_arch_free_vm(kvm); in kvm_destroy_vm()
1354 void kvm_get_kvm(struct kvm *kvm) in kvm_get_kvm() argument
1356 refcount_inc(&kvm->users_count); in kvm_get_kvm()
1364 bool kvm_get_kvm_safe(struct kvm *kvm) in kvm_get_kvm_safe() argument
1366 return refcount_inc_not_zero(&kvm->users_count); in kvm_get_kvm_safe()
1370 void kvm_put_kvm(struct kvm *kvm) in kvm_put_kvm() argument
1372 if (refcount_dec_and_test(&kvm->users_count)) in kvm_put_kvm()
1373 kvm_destroy_vm(kvm); in kvm_put_kvm()
1384 void kvm_put_kvm_no_destroy(struct kvm *kvm) in kvm_put_kvm_no_destroy() argument
1386 WARN_ON(refcount_dec_and_test(&kvm->users_count)); in kvm_put_kvm_no_destroy()
1392 struct kvm *kvm = filp->private_data; in kvm_vm_release() local
1394 kvm_irqfd_release(kvm); in kvm_vm_release()
1396 kvm_put_kvm(kvm); in kvm_vm_release()
1415 static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id) in kvm_get_inactive_memslots() argument
1417 struct kvm_memslots *active = __kvm_memslots(kvm, as_id); in kvm_get_inactive_memslots()
1420 return &kvm->__memslots[as_id][node_idx_inactive]; in kvm_get_inactive_memslots()
1495 static void kvm_replace_memslot(struct kvm *kvm, in kvm_replace_memslot() argument
1500 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); in kvm_replace_memslot()
1562 static void kvm_swap_active_memslots(struct kvm *kvm, int as_id) in kvm_swap_active_memslots() argument
1564 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); in kvm_swap_active_memslots()
1567 u64 gen = __kvm_memslots(kvm, as_id)->generation; in kvm_swap_active_memslots()
1577 spin_lock(&kvm->mn_invalidate_lock); in kvm_swap_active_memslots()
1578 prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait); in kvm_swap_active_memslots()
1579 while (kvm->mn_active_invalidate_count) { in kvm_swap_active_memslots()
1581 spin_unlock(&kvm->mn_invalidate_lock); in kvm_swap_active_memslots()
1583 spin_lock(&kvm->mn_invalidate_lock); in kvm_swap_active_memslots()
1585 finish_rcuwait(&kvm->mn_memslots_update_rcuwait); in kvm_swap_active_memslots()
1586 rcu_assign_pointer(kvm->memslots[as_id], slots); in kvm_swap_active_memslots()
1587 spin_unlock(&kvm->mn_invalidate_lock); in kvm_swap_active_memslots()
1594 mutex_unlock(&kvm->slots_arch_lock); in kvm_swap_active_memslots()
1596 synchronize_srcu_expedited(&kvm->srcu); in kvm_swap_active_memslots()
1615 kvm_arch_memslots_updated(kvm, gen); in kvm_swap_active_memslots()
1620 static int kvm_prepare_memory_region(struct kvm *kvm, in kvm_prepare_memory_region() argument
1639 else if (kvm_use_dirty_bitmap(kvm)) { in kvm_prepare_memory_region()
1644 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) in kvm_prepare_memory_region()
1649 r = kvm_arch_prepare_memory_region(kvm, old, new, change); in kvm_prepare_memory_region()
1658 static void kvm_commit_memory_region(struct kvm *kvm, in kvm_commit_memory_region() argument
1670 kvm->nr_memslot_pages -= old->npages; in kvm_commit_memory_region()
1672 kvm->nr_memslot_pages += new->npages; in kvm_commit_memory_region()
1676 atomic_set(&kvm->nr_memslots_dirty_logging, in kvm_commit_memory_region()
1677 atomic_read(&kvm->nr_memslots_dirty_logging) + change); in kvm_commit_memory_region()
1680 kvm_arch_commit_memory_region(kvm, old, new, change); in kvm_commit_memory_region()
1688 kvm_free_memslot(kvm, old); in kvm_commit_memory_region()
1720 static void kvm_activate_memslot(struct kvm *kvm, in kvm_activate_memslot() argument
1726 kvm_swap_active_memslots(kvm, as_id); in kvm_activate_memslot()
1729 kvm_replace_memslot(kvm, old, new); in kvm_activate_memslot()
1745 static void kvm_invalidate_memslot(struct kvm *kvm, in kvm_invalidate_memslot() argument
1756 kvm_replace_memslot(kvm, old, invalid_slot); in kvm_invalidate_memslot()
1763 kvm_swap_active_memslots(kvm, old->as_id); in kvm_invalidate_memslot()
1771 kvm_arch_flush_shadow_memslot(kvm, old); in kvm_invalidate_memslot()
1772 kvm_arch_guest_memory_reclaimed(kvm); in kvm_invalidate_memslot()
1775 mutex_lock(&kvm->slots_arch_lock); in kvm_invalidate_memslot()
1787 static void kvm_create_memslot(struct kvm *kvm, in kvm_create_memslot() argument
1791 kvm_replace_memslot(kvm, NULL, new); in kvm_create_memslot()
1792 kvm_activate_memslot(kvm, NULL, new); in kvm_create_memslot()
1795 static void kvm_delete_memslot(struct kvm *kvm, in kvm_delete_memslot() argument
1803 kvm_replace_memslot(kvm, old, NULL); in kvm_delete_memslot()
1804 kvm_activate_memslot(kvm, invalid_slot, NULL); in kvm_delete_memslot()
1807 static void kvm_move_memslot(struct kvm *kvm, in kvm_move_memslot() argument
1816 kvm_replace_memslot(kvm, old, new); in kvm_move_memslot()
1817 kvm_activate_memslot(kvm, invalid_slot, new); in kvm_move_memslot()
1820 static void kvm_update_flags_memslot(struct kvm *kvm, in kvm_update_flags_memslot() argument
1829 kvm_replace_memslot(kvm, old, new); in kvm_update_flags_memslot()
1830 kvm_activate_memslot(kvm, old, new); in kvm_update_flags_memslot()
1833 static int kvm_set_memslot(struct kvm *kvm, in kvm_set_memslot() argument
1855 mutex_lock(&kvm->slots_arch_lock); in kvm_set_memslot()
1873 mutex_unlock(&kvm->slots_arch_lock); in kvm_set_memslot()
1876 kvm_invalidate_memslot(kvm, old, invalid_slot); in kvm_set_memslot()
1879 r = kvm_prepare_memory_region(kvm, old, new, change); in kvm_set_memslot()
1888 kvm_activate_memslot(kvm, invalid_slot, old); in kvm_set_memslot()
1891 mutex_unlock(&kvm->slots_arch_lock); in kvm_set_memslot()
1904 kvm_create_memslot(kvm, new); in kvm_set_memslot()
1906 kvm_delete_memslot(kvm, old, invalid_slot); in kvm_set_memslot()
1908 kvm_move_memslot(kvm, old, new, invalid_slot); in kvm_set_memslot()
1910 kvm_update_flags_memslot(kvm, old, new); in kvm_set_memslot()
1923 kvm_commit_memory_region(kvm, old, new, change); in kvm_set_memslot()
1949 int __kvm_set_memory_region(struct kvm *kvm, in __kvm_set_memory_region() argument
1986 slots = __kvm_memslots(kvm, as_id); in __kvm_set_memory_region()
1998 if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages)) in __kvm_set_memory_region()
2001 return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE); in __kvm_set_memory_region()
2014 if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages) in __kvm_set_memory_region()
2046 r = kvm_set_memslot(kvm, old, new, change); in __kvm_set_memory_region()
2053 int kvm_set_memory_region(struct kvm *kvm, in kvm_set_memory_region() argument
2058 mutex_lock(&kvm->slots_lock); in kvm_set_memory_region()
2059 r = __kvm_set_memory_region(kvm, mem); in kvm_set_memory_region()
2060 mutex_unlock(&kvm->slots_lock); in kvm_set_memory_region()
2065 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, in kvm_vm_ioctl_set_memory_region() argument
2071 return kvm_set_memory_region(kvm, mem); in kvm_vm_ioctl_set_memory_region()
2082 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, in kvm_get_dirty_log() argument
2091 if (!kvm_use_dirty_bitmap(kvm)) in kvm_get_dirty_log()
2102 slots = __kvm_memslots(kvm, as_id); in kvm_get_dirty_log()
2107 kvm_arch_sync_dirty_log(kvm, *memslot); in kvm_get_dirty_log()
2145 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) in kvm_get_dirty_log_protect() argument
2156 if (!kvm_use_dirty_bitmap(kvm)) in kvm_get_dirty_log_protect()
2164 slots = __kvm_memslots(kvm, as_id); in kvm_get_dirty_log_protect()
2171 kvm_arch_sync_dirty_log(kvm, memslot); in kvm_get_dirty_log_protect()
2175 if (kvm->manual_dirty_log_protect) { in kvm_get_dirty_log_protect()
2189 KVM_MMU_LOCK(kvm); in kvm_get_dirty_log_protect()
2202 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, in kvm_get_dirty_log_protect()
2205 KVM_MMU_UNLOCK(kvm); in kvm_get_dirty_log_protect()
2209 kvm_flush_remote_tlbs_memslot(kvm, memslot); in kvm_get_dirty_log_protect()
2236 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, in kvm_vm_ioctl_get_dirty_log() argument
2241 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
2243 r = kvm_get_dirty_log_protect(kvm, log); in kvm_vm_ioctl_get_dirty_log()
2245 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
2255 static int kvm_clear_dirty_log_protect(struct kvm *kvm, in kvm_clear_dirty_log_protect() argument
2268 if (!kvm_use_dirty_bitmap(kvm)) in kvm_clear_dirty_log_protect()
2279 slots = __kvm_memslots(kvm, as_id); in kvm_clear_dirty_log_protect()
2293 kvm_arch_sync_dirty_log(kvm, memslot); in kvm_clear_dirty_log_protect()
2300 KVM_MMU_LOCK(kvm); in kvm_clear_dirty_log_protect()
2319 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, in kvm_clear_dirty_log_protect()
2323 KVM_MMU_UNLOCK(kvm); in kvm_clear_dirty_log_protect()
2326 kvm_flush_remote_tlbs_memslot(kvm, memslot); in kvm_clear_dirty_log_protect()
2331 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, in kvm_vm_ioctl_clear_dirty_log() argument
2336 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_clear_dirty_log()
2338 r = kvm_clear_dirty_log_protect(kvm, log); in kvm_vm_ioctl_clear_dirty_log()
2340 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_clear_dirty_log()
2345 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) in gfn_to_memslot() argument
2347 return __gfn_to_memslot(kvm_memslots(kvm), gfn); in gfn_to_memslot()
2384 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) in kvm_is_visible_gfn() argument
2386 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); in kvm_is_visible_gfn()
2457 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) in gfn_to_hva() argument
2459 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); in gfn_to_hva()
2488 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) in gfn_to_hva_prot() argument
2490 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in gfn_to_hva_prot()
2778 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, in gfn_to_pfn_prot() argument
2781 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, false, in gfn_to_pfn_prot()
2806 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) in gfn_to_pfn() argument
2808 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); in gfn_to_pfn()
2841 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) in gfn_to_page() argument
2846 pfn = gfn_to_pfn(kvm, gfn); in gfn_to_page()
2876 pfn = gfn_to_pfn(vcpu->kvm, gfn); in kvm_vcpu_map()
3043 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, in kvm_read_guest_page() argument
3046 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in kvm_read_guest_page()
3061 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) in kvm_read_guest() argument
3069 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); in kvm_read_guest()
3129 static int __kvm_write_guest_page(struct kvm *kvm, in __kvm_write_guest_page() argument
3142 mark_page_dirty_in_slot(kvm, memslot, gfn); in __kvm_write_guest_page()
3146 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, in kvm_write_guest_page() argument
3149 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in kvm_write_guest_page()
3151 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len); in kvm_write_guest_page()
3160 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len); in kvm_vcpu_write_guest_page()
3164 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, in kvm_write_guest() argument
3173 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); in kvm_write_guest()
3247 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_gfn_to_hva_cache_init() argument
3250 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_gfn_to_hva_cache_init()
3255 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_write_guest_offset_cached() argument
3259 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_write_guest_offset_cached()
3275 return kvm_write_guest(kvm, gpa, data, len); in kvm_write_guest_offset_cached()
3280 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT); in kvm_write_guest_offset_cached()
3286 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_write_guest_cached() argument
3289 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); in kvm_write_guest_cached()
3293 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_read_guest_offset_cached() argument
3297 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_read_guest_offset_cached()
3313 return kvm_read_guest(kvm, gpa, data, len); in kvm_read_guest_offset_cached()
3323 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_read_guest_cached() argument
3326 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len); in kvm_read_guest_cached()
3330 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) in kvm_clear_guest() argument
3339 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len); in kvm_clear_guest()
3350 void mark_page_dirty_in_slot(struct kvm *kvm, in mark_page_dirty_in_slot() argument
3357 if (WARN_ON_ONCE(vcpu && vcpu->kvm != kvm)) in mark_page_dirty_in_slot()
3360 WARN_ON_ONCE(!vcpu && !kvm_arch_allow_write_without_running_vcpu(kvm)); in mark_page_dirty_in_slot()
3367 if (kvm->dirty_ring_size && vcpu) in mark_page_dirty_in_slot()
3375 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) in mark_page_dirty() argument
3379 memslot = gfn_to_memslot(kvm, gfn); in mark_page_dirty()
3380 mark_page_dirty_in_slot(kvm, memslot, gfn); in mark_page_dirty()
3389 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn); in kvm_vcpu_mark_page_dirty()
3457 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_vcpu_check_block()
3470 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_vcpu_check_block()
3535 struct kvm *kvm = vcpu->kvm; in kvm_vcpu_max_halt_poll_ns() local
3537 if (kvm->override_halt_poll_ns) { in kvm_vcpu_max_halt_poll_ns()
3545 return READ_ONCE(kvm->max_halt_poll_ns); in kvm_vcpu_max_halt_poll_ns()
3773 struct kvm *kvm = me->kvm; in kvm_vcpu_on_spin() local
3781 last_boosted_vcpu = READ_ONCE(kvm->last_boosted_vcpu); in kvm_vcpu_on_spin()
3791 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_vcpu_on_spin()
3812 WRITE_ONCE(kvm->last_boosted_vcpu, i); in kvm_vcpu_on_spin()
3828 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff) in kvm_page_in_dirty_ring() argument
3833 kvm->dirty_ring_size / PAGE_SIZE); in kvm_page_in_dirty_ring()
3852 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); in kvm_vcpu_fault()
3854 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff)) in kvm_vcpu_fault()
3874 if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) || in kvm_vcpu_mmap()
3875 kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) && in kvm_vcpu_mmap()
3887 kvm_put_kvm(vcpu->kvm); in kvm_vcpu_release()
3933 vcpu->kvm->debugfs_dentry); in kvm_create_vcpu_debugfs()
3944 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) in kvm_vm_ioctl_create_vcpu() argument
3953 mutex_lock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
3954 if (kvm->created_vcpus >= kvm->max_vcpus) { in kvm_vm_ioctl_create_vcpu()
3955 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
3959 r = kvm_arch_vcpu_precreate(kvm, id); in kvm_vm_ioctl_create_vcpu()
3961 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
3965 kvm->created_vcpus++; in kvm_vm_ioctl_create_vcpu()
3966 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
3982 kvm_vcpu_init(vcpu, kvm, id); in kvm_vm_ioctl_create_vcpu()
3988 if (kvm->dirty_ring_size) { in kvm_vm_ioctl_create_vcpu()
3990 id, kvm->dirty_ring_size); in kvm_vm_ioctl_create_vcpu()
3995 mutex_lock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
4003 if (kvm_get_vcpu_by_id(kvm, id)) { in kvm_vm_ioctl_create_vcpu()
4008 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); in kvm_vm_ioctl_create_vcpu()
4009 r = xa_reserve(&kvm->vcpu_array, vcpu->vcpu_idx, GFP_KERNEL_ACCOUNT); in kvm_vm_ioctl_create_vcpu()
4014 kvm_get_kvm(kvm); in kvm_vm_ioctl_create_vcpu()
4019 if (KVM_BUG_ON(xa_store(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, 0), kvm)) { in kvm_vm_ioctl_create_vcpu()
4029 atomic_inc(&kvm->online_vcpus); in kvm_vm_ioctl_create_vcpu()
4031 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
4037 kvm_put_kvm_no_destroy(kvm); in kvm_vm_ioctl_create_vcpu()
4038 xa_release(&kvm->vcpu_array, vcpu->vcpu_idx); in kvm_vm_ioctl_create_vcpu()
4040 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
4049 mutex_lock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
4050 kvm->created_vcpus--; in kvm_vm_ioctl_create_vcpu()
4051 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
4080 kvm_put_kvm(vcpu->kvm); in kvm_vcpu_stats_release()
4108 kvm_get_kvm(vcpu->kvm); in kvm_vcpu_ioctl_get_stats_fd()
4125 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead) in kvm_vcpu_ioctl()
4335 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead) in kvm_vcpu_compat_ioctl()
4401 if (dev->kvm->mm != current->mm || dev->kvm->vm_dead) in kvm_device_ioctl()
4422 struct kvm *kvm = dev->kvm; in kvm_device_release() local
4425 mutex_lock(&kvm->lock); in kvm_device_release()
4428 mutex_unlock(&kvm->lock); in kvm_device_release()
4431 kvm_put_kvm(kvm); in kvm_device_release()
4475 static int kvm_ioctl_create_device(struct kvm *kvm, in kvm_ioctl_create_device() argument
4500 dev->kvm = kvm; in kvm_ioctl_create_device()
4502 mutex_lock(&kvm->lock); in kvm_ioctl_create_device()
4505 mutex_unlock(&kvm->lock); in kvm_ioctl_create_device()
4509 list_add(&dev->vm_node, &kvm->devices); in kvm_ioctl_create_device()
4510 mutex_unlock(&kvm->lock); in kvm_ioctl_create_device()
4515 kvm_get_kvm(kvm); in kvm_ioctl_create_device()
4518 kvm_put_kvm_no_destroy(kvm); in kvm_ioctl_create_device()
4519 mutex_lock(&kvm->lock); in kvm_ioctl_create_device()
4523 mutex_unlock(&kvm->lock); in kvm_ioctl_create_device()
4533 static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) in kvm_vm_ioctl_check_extension_generic() argument
4592 return kvm_vm_ioctl_check_extension(kvm, arg); in kvm_vm_ioctl_check_extension_generic()
4595 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size) in kvm_vm_ioctl_enable_dirty_log_ring() argument
4616 if (kvm->dirty_ring_size) in kvm_vm_ioctl_enable_dirty_log_ring()
4619 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_dirty_log_ring()
4621 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_dirty_log_ring()
4625 kvm->dirty_ring_size = size; in kvm_vm_ioctl_enable_dirty_log_ring()
4629 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_dirty_log_ring()
4633 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm) in kvm_vm_ioctl_reset_dirty_pages() argument
4639 if (!kvm->dirty_ring_size) in kvm_vm_ioctl_reset_dirty_pages()
4642 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_reset_dirty_pages()
4644 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_vm_ioctl_reset_dirty_pages()
4645 cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring); in kvm_vm_ioctl_reset_dirty_pages()
4647 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_reset_dirty_pages()
4650 kvm_flush_remote_tlbs(kvm); in kvm_vm_ioctl_reset_dirty_pages()
4655 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm, in kvm_vm_ioctl_enable_cap() argument
4661 bool kvm_are_all_memslots_empty(struct kvm *kvm) in kvm_are_all_memslots_empty() argument
4665 lockdep_assert_held(&kvm->slots_lock); in kvm_are_all_memslots_empty()
4668 if (!kvm_memslots_empty(__kvm_memslots(kvm, i))) in kvm_are_all_memslots_empty()
4676 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, in kvm_vm_ioctl_enable_cap_generic() argument
4689 kvm->manual_dirty_log_protect = cap->args[0]; in kvm_vm_ioctl_enable_cap_generic()
4697 kvm->max_halt_poll_ns = cap->args[0]; in kvm_vm_ioctl_enable_cap_generic()
4706 kvm->override_halt_poll_ns = true; in kvm_vm_ioctl_enable_cap_generic()
4712 if (!kvm_vm_ioctl_check_extension_generic(kvm, cap->cap)) in kvm_vm_ioctl_enable_cap_generic()
4715 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]); in kvm_vm_ioctl_enable_cap_generic()
4720 !kvm->dirty_ring_size || cap->flags) in kvm_vm_ioctl_enable_cap_generic()
4723 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_enable_cap_generic()
4730 if (kvm_are_all_memslots_empty(kvm)) { in kvm_vm_ioctl_enable_cap_generic()
4731 kvm->dirty_ring_with_bitmap = true; in kvm_vm_ioctl_enable_cap_generic()
4735 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_enable_cap_generic()
4740 return kvm_vm_ioctl_enable_cap(kvm, cap); in kvm_vm_ioctl_enable_cap_generic()
4747 struct kvm *kvm = file->private_data; in kvm_vm_stats_read() local
4749 return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header, in kvm_vm_stats_read()
4750 &kvm_vm_stats_desc[0], &kvm->stat, in kvm_vm_stats_read()
4751 sizeof(kvm->stat), user_buffer, size, offset); in kvm_vm_stats_read()
4756 struct kvm *kvm = file->private_data; in kvm_vm_stats_release() local
4758 kvm_put_kvm(kvm); in kvm_vm_stats_release()
4768 static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm) in kvm_vm_ioctl_get_stats_fd() argument
4778 &kvm_vm_stats_fops, kvm, O_RDONLY); in kvm_vm_ioctl_get_stats_fd()
4784 kvm_get_kvm(kvm); in kvm_vm_ioctl_get_stats_fd()
4795 struct kvm *kvm = filp->private_data; in kvm_vm_ioctl() local
4799 if (kvm->mm != current->mm || kvm->vm_dead) in kvm_vm_ioctl()
4803 r = kvm_vm_ioctl_create_vcpu(kvm, arg); in kvm_vm_ioctl()
4811 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap); in kvm_vm_ioctl()
4822 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); in kvm_vm_ioctl()
4831 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); in kvm_vm_ioctl()
4841 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); in kvm_vm_ioctl()
4852 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); in kvm_vm_ioctl()
4861 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); in kvm_vm_ioctl()
4871 r = kvm_irqfd(kvm, &data); in kvm_vm_ioctl()
4880 r = kvm_ioeventfd(kvm, &data); in kvm_vm_ioctl()
4890 r = kvm_send_userspace_msi(kvm, &msi); in kvm_vm_ioctl()
4903 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, in kvm_vm_ioctl()
4928 if (!kvm_arch_can_set_irq_routing(kvm)) in kvm_vm_ioctl()
4944 r = kvm_set_irq_routing(kvm, entries, routing.nr, in kvm_vm_ioctl()
4957 r = kvm_ioctl_create_device(kvm, &cd); in kvm_vm_ioctl()
4969 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); in kvm_vm_ioctl()
4972 r = kvm_vm_ioctl_reset_dirty_pages(kvm); in kvm_vm_ioctl()
4975 r = kvm_vm_ioctl_get_stats_fd(kvm); in kvm_vm_ioctl()
5013 struct kvm *kvm = filp->private_data; in kvm_vm_compat_ioctl() local
5016 if (kvm->mm != current->mm || kvm->vm_dead) in kvm_vm_compat_ioctl()
5038 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); in kvm_vm_compat_ioctl()
5054 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); in kvm_vm_compat_ioctl()
5081 struct kvm *kvm; in kvm_dev_ioctl_create_vm() local
5090 kvm = kvm_create_vm(type, fdname); in kvm_dev_ioctl_create_vm()
5091 if (IS_ERR(kvm)) { in kvm_dev_ioctl_create_vm()
5092 r = PTR_ERR(kvm); in kvm_dev_ioctl_create_vm()
5096 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); in kvm_dev_ioctl_create_vm()
5108 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm); in kvm_dev_ioctl_create_vm()
5114 kvm_put_kvm(kvm); in kvm_dev_ioctl_create_vm()
5472 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_write()
5492 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_write_cookie()
5543 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_read()
5551 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, in kvm_io_bus_register_dev() argument
5558 bus = kvm_get_bus(kvm, bus_idx); in kvm_io_bus_register_dev()
5586 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); in kvm_io_bus_register_dev()
5587 synchronize_srcu_expedited(&kvm->srcu); in kvm_io_bus_register_dev()
5593 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, in kvm_io_bus_unregister_dev() argument
5599 lockdep_assert_held(&kvm->slots_lock); in kvm_io_bus_unregister_dev()
5601 bus = kvm_get_bus(kvm, bus_idx); in kvm_io_bus_unregister_dev()
5623 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); in kvm_io_bus_unregister_dev()
5624 synchronize_srcu_expedited(&kvm->srcu); in kvm_io_bus_unregister_dev()
5641 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, in kvm_io_bus_get_dev() argument
5648 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_io_bus_get_dev()
5650 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); in kvm_io_bus_get_dev()
5661 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_io_bus_get_dev()
5679 if (!kvm_get_kvm_safe(stat_data->kvm)) in kvm_debugfs_open()
5686 kvm_put_kvm(stat_data->kvm); in kvm_debugfs_open()
5696 kvm_put_kvm(stat_data->kvm); in kvm_debugfs_release()
5701 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val) in kvm_get_stat_per_vm() argument
5703 *val = *(u64 *)((void *)(&kvm->stat) + offset); in kvm_get_stat_per_vm()
5708 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset) in kvm_clear_stat_per_vm() argument
5710 *(u64 *)((void *)(&kvm->stat) + offset) = 0; in kvm_clear_stat_per_vm()
5715 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) in kvm_get_stat_per_vcpu() argument
5722 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_get_stat_per_vcpu()
5728 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset) in kvm_clear_stat_per_vcpu() argument
5733 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_clear_stat_per_vcpu()
5746 r = kvm_get_stat_per_vm(stat_data->kvm, in kvm_stat_data_get()
5750 r = kvm_get_stat_per_vcpu(stat_data->kvm, in kvm_stat_data_get()
5768 r = kvm_clear_stat_per_vm(stat_data->kvm, in kvm_stat_data_clear()
5772 r = kvm_clear_stat_per_vcpu(stat_data->kvm, in kvm_stat_data_clear()
5799 struct kvm *kvm; in vm_stat_get() local
5804 list_for_each_entry(kvm, &vm_list, vm_list) { in vm_stat_get()
5805 kvm_get_stat_per_vm(kvm, offset, &tmp_val); in vm_stat_get()
5815 struct kvm *kvm; in vm_stat_clear() local
5821 list_for_each_entry(kvm, &vm_list, vm_list) { in vm_stat_clear()
5822 kvm_clear_stat_per_vm(kvm, offset); in vm_stat_clear()
5835 struct kvm *kvm; in vcpu_stat_get() local
5840 list_for_each_entry(kvm, &vm_list, vm_list) { in vcpu_stat_get()
5841 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val); in vcpu_stat_get()
5851 struct kvm *kvm; in vcpu_stat_clear() local
5857 list_for_each_entry(kvm, &vm_list, vm_list) { in vcpu_stat_clear()
5858 kvm_clear_stat_per_vcpu(kvm, offset); in vcpu_stat_clear()
5869 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) in kvm_uevent_notify_change() argument
5874 if (!kvm_dev.this_device || !kvm) in kvm_uevent_notify_change()
5897 kvm->userspace_pid = task_pid_nr(current); in kvm_uevent_notify_change()
5901 add_uevent_var(env, "PID=%d", kvm->userspace_pid); in kvm_uevent_notify_change()
5903 if (!IS_ERR(kvm->debugfs_dentry)) { in kvm_uevent_notify_change()
5907 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); in kvm_uevent_notify_change()
6168 struct kvm *kvm; member
6184 struct kvm *kvm = init_context->kvm; in kvm_vm_worker_thread() local
6216 err = thread_fn(kvm, data); in kvm_vm_worker_thread()
6240 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, in kvm_vm_create_worker_thread() argument
6248 init_context.kvm = kvm; in kvm_vm_create_worker_thread()