Lines Matching +full:- +full:kvm

1 // SPDX-License-Identifier: GPL-2.0-only
3 * kvm eventfd support - use eventfd objects to signal various KVM events
13 #include <linux/kvm.h>
27 #include <trace/events/kvm.h>
29 #include <kvm/iodev.h>
36 kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args) in kvm_arch_irqfd_allowed() argument
46 struct kvm *kvm = irqfd->kvm; in irqfd_inject() local
48 if (!irqfd->resampler) { in irqfd_inject()
49 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1, in irqfd_inject()
51 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0, in irqfd_inject()
54 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, in irqfd_inject()
55 irqfd->gsi, 1, false); in irqfd_inject()
62 list_for_each_entry_srcu(irqfd, &resampler->list, resampler_link, in irqfd_resampler_notify()
63 srcu_read_lock_held(&resampler->kvm->irq_srcu)) in irqfd_resampler_notify()
64 eventfd_signal(irqfd->resamplefd, 1); in irqfd_resampler_notify()
68 * Since resampler irqfds share an IRQ source ID, we de-assert once
70 * do multiple de-asserts or we risk racing with incoming re-asserts.
76 struct kvm *kvm; in irqfd_resampler_ack() local
81 kvm = resampler->kvm; in irqfd_resampler_ack()
83 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, in irqfd_resampler_ack()
84 resampler->notifier.gsi, 0, false); in irqfd_resampler_ack()
86 idx = srcu_read_lock(&kvm->irq_srcu); in irqfd_resampler_ack()
88 srcu_read_unlock(&kvm->irq_srcu, idx); in irqfd_resampler_ack()
94 struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler; in irqfd_resampler_shutdown()
95 struct kvm *kvm = resampler->kvm; in irqfd_resampler_shutdown() local
97 mutex_lock(&kvm->irqfds.resampler_lock); in irqfd_resampler_shutdown()
99 list_del_rcu(&irqfd->resampler_link); in irqfd_resampler_shutdown()
100 synchronize_srcu(&kvm->irq_srcu); in irqfd_resampler_shutdown()
102 if (list_empty(&resampler->list)) { in irqfd_resampler_shutdown()
103 list_del_rcu(&resampler->link); in irqfd_resampler_shutdown()
104 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier); in irqfd_resampler_shutdown()
106 * synchronize_srcu(&kvm->irq_srcu) already called in irqfd_resampler_shutdown()
109 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, in irqfd_resampler_shutdown()
110 resampler->notifier.gsi, 0, false); in irqfd_resampler_shutdown()
114 mutex_unlock(&kvm->irqfds.resampler_lock); in irqfd_resampler_shutdown()
118 * Race-free decouple logic (ordering is critical)
125 struct kvm *kvm = irqfd->kvm; in irqfd_shutdown() local
129 synchronize_srcu(&kvm->irq_srcu); in irqfd_shutdown()
132 * Synchronize with the wait-queue and unhook ourselves to prevent in irqfd_shutdown()
135 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt); in irqfd_shutdown()
141 flush_work(&irqfd->inject); in irqfd_shutdown()
143 if (irqfd->resampler) { in irqfd_shutdown()
145 eventfd_ctx_put(irqfd->resamplefd); in irqfd_shutdown()
152 irq_bypass_unregister_consumer(&irqfd->consumer); in irqfd_shutdown()
154 eventfd_ctx_put(irqfd->eventfd); in irqfd_shutdown()
159 /* assumes kvm->irqfds.lock is held */
163 return list_empty(&irqfd->list) ? false : true; in irqfd_is_active()
169 * assumes kvm->irqfds.lock is held
176 list_del_init(&irqfd->list); in irqfd_deactivate()
178 queue_work(irqfd_cleanup_wq, &irqfd->shutdown); in irqfd_deactivate()
183 struct kvm *kvm, int irq_source_id, in kvm_arch_set_irq_inatomic() argument
187 return -EWOULDBLOCK; in kvm_arch_set_irq_inatomic()
191 * Called with wqh->lock held and interrupts disabled
200 struct kvm *kvm = irqfd->kvm; in irqfd_wakeup() local
207 eventfd_ctx_do_read(irqfd->eventfd, &cnt); in irqfd_wakeup()
209 idx = srcu_read_lock(&kvm->irq_srcu); in irqfd_wakeup()
211 seq = read_seqcount_begin(&irqfd->irq_entry_sc); in irqfd_wakeup()
212 irq = irqfd->irq_entry; in irqfd_wakeup()
213 } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq)); in irqfd_wakeup()
215 if (kvm_arch_set_irq_inatomic(&irq, kvm, in irqfd_wakeup()
217 false) == -EWOULDBLOCK) in irqfd_wakeup()
218 schedule_work(&irqfd->inject); in irqfd_wakeup()
219 srcu_read_unlock(&kvm->irq_srcu, idx); in irqfd_wakeup()
224 /* The eventfd is closing, detach from KVM */ in irqfd_wakeup()
227 spin_lock_irqsave(&kvm->irqfds.lock, iflags); in irqfd_wakeup()
232 * deactivated from the KVM side before it is unhooked from in irqfd_wakeup()
233 * the wait-queue. If it is already deactivated, we can in irqfd_wakeup()
236 * other side is required to acquire wqh->lock, which we hold in irqfd_wakeup()
241 spin_unlock_irqrestore(&kvm->irqfds.lock, iflags); in irqfd_wakeup()
253 add_wait_queue_priority(wqh, &irqfd->wait); in irqfd_ptable_queue_proc()
257 static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd) in irqfd_update() argument
263 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi); in irqfd_update()
265 write_seqcount_begin(&irqfd->irq_entry_sc); in irqfd_update()
269 irqfd->irq_entry = *e; in irqfd_update()
271 irqfd->irq_entry.type = 0; in irqfd_update()
273 write_seqcount_end(&irqfd->irq_entry_sc); in irqfd_update()
288 struct kvm *kvm, unsigned int host_irq, in kvm_arch_update_irqfd_routing() argument
303 kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) in kvm_irqfd_assign() argument
312 if (!kvm_arch_intc_initialized(kvm)) in kvm_irqfd_assign()
313 return -EAGAIN; in kvm_irqfd_assign()
315 if (!kvm_arch_irqfd_allowed(kvm, args)) in kvm_irqfd_assign()
316 return -EINVAL; in kvm_irqfd_assign()
320 return -ENOMEM; in kvm_irqfd_assign()
322 irqfd->kvm = kvm; in kvm_irqfd_assign()
323 irqfd->gsi = args->gsi; in kvm_irqfd_assign()
324 INIT_LIST_HEAD(&irqfd->list); in kvm_irqfd_assign()
325 INIT_WORK(&irqfd->inject, irqfd_inject); in kvm_irqfd_assign()
326 INIT_WORK(&irqfd->shutdown, irqfd_shutdown); in kvm_irqfd_assign()
327 seqcount_spinlock_init(&irqfd->irq_entry_sc, &kvm->irqfds.lock); in kvm_irqfd_assign()
329 f = fdget(args->fd); in kvm_irqfd_assign()
331 ret = -EBADF; in kvm_irqfd_assign()
341 irqfd->eventfd = eventfd; in kvm_irqfd_assign()
343 if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) { in kvm_irqfd_assign()
346 resamplefd = eventfd_ctx_fdget(args->resamplefd); in kvm_irqfd_assign()
352 irqfd->resamplefd = resamplefd; in kvm_irqfd_assign()
353 INIT_LIST_HEAD(&irqfd->resampler_link); in kvm_irqfd_assign()
355 mutex_lock(&kvm->irqfds.resampler_lock); in kvm_irqfd_assign()
358 &kvm->irqfds.resampler_list, link) { in kvm_irqfd_assign()
359 if (resampler->notifier.gsi == irqfd->gsi) { in kvm_irqfd_assign()
360 irqfd->resampler = resampler; in kvm_irqfd_assign()
365 if (!irqfd->resampler) { in kvm_irqfd_assign()
369 ret = -ENOMEM; in kvm_irqfd_assign()
370 mutex_unlock(&kvm->irqfds.resampler_lock); in kvm_irqfd_assign()
374 resampler->kvm = kvm; in kvm_irqfd_assign()
375 INIT_LIST_HEAD(&resampler->list); in kvm_irqfd_assign()
376 resampler->notifier.gsi = irqfd->gsi; in kvm_irqfd_assign()
377 resampler->notifier.irq_acked = irqfd_resampler_ack; in kvm_irqfd_assign()
378 INIT_LIST_HEAD(&resampler->link); in kvm_irqfd_assign()
380 list_add_rcu(&resampler->link, &kvm->irqfds.resampler_list); in kvm_irqfd_assign()
381 kvm_register_irq_ack_notifier(kvm, in kvm_irqfd_assign()
382 &resampler->notifier); in kvm_irqfd_assign()
383 irqfd->resampler = resampler; in kvm_irqfd_assign()
386 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list); in kvm_irqfd_assign()
387 synchronize_srcu(&kvm->irq_srcu); in kvm_irqfd_assign()
389 mutex_unlock(&kvm->irqfds.resampler_lock); in kvm_irqfd_assign()
393 * Install our own custom wake-up handling so we are notified via in kvm_irqfd_assign()
396 init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup); in kvm_irqfd_assign()
397 init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc); in kvm_irqfd_assign()
399 spin_lock_irq(&kvm->irqfds.lock); in kvm_irqfd_assign()
402 list_for_each_entry(tmp, &kvm->irqfds.items, list) { in kvm_irqfd_assign()
403 if (irqfd->eventfd != tmp->eventfd) in kvm_irqfd_assign()
406 ret = -EBUSY; in kvm_irqfd_assign()
407 spin_unlock_irq(&kvm->irqfds.lock); in kvm_irqfd_assign()
411 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_irqfd_assign()
412 irqfd_update(kvm, irqfd); in kvm_irqfd_assign()
414 list_add_tail(&irqfd->list, &kvm->irqfds.items); in kvm_irqfd_assign()
416 spin_unlock_irq(&kvm->irqfds.lock); in kvm_irqfd_assign()
422 events = vfs_poll(f.file, &irqfd->pt); in kvm_irqfd_assign()
425 schedule_work(&irqfd->inject); in kvm_irqfd_assign()
429 irqfd->consumer.token = (void *)irqfd->eventfd; in kvm_irqfd_assign()
430 irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer; in kvm_irqfd_assign()
431 irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer; in kvm_irqfd_assign()
432 irqfd->consumer.stop = kvm_arch_irq_bypass_stop; in kvm_irqfd_assign()
433 irqfd->consumer.start = kvm_arch_irq_bypass_start; in kvm_irqfd_assign()
434 ret = irq_bypass_register_consumer(&irqfd->consumer); in kvm_irqfd_assign()
437 irqfd->consumer.token, ret); in kvm_irqfd_assign()
441 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_irqfd_assign()
451 if (irqfd->resampler) in kvm_irqfd_assign()
467 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin) in kvm_irq_has_notifier() argument
472 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_irq_has_notifier()
473 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); in kvm_irq_has_notifier()
474 if (gsi != -1) in kvm_irq_has_notifier()
475 hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list, in kvm_irq_has_notifier()
476 link, srcu_read_lock_held(&kvm->irq_srcu)) in kvm_irq_has_notifier()
477 if (kian->gsi == gsi) { in kvm_irq_has_notifier()
478 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_irq_has_notifier()
482 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_irq_has_notifier()
488 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi) in kvm_notify_acked_gsi() argument
492 hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list, in kvm_notify_acked_gsi()
493 link, srcu_read_lock_held(&kvm->irq_srcu)) in kvm_notify_acked_gsi()
494 if (kian->gsi == gsi) in kvm_notify_acked_gsi()
495 kian->irq_acked(kian); in kvm_notify_acked_gsi()
498 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) in kvm_notify_acked_irq() argument
504 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_notify_acked_irq()
505 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); in kvm_notify_acked_irq()
506 if (gsi != -1) in kvm_notify_acked_irq()
507 kvm_notify_acked_gsi(kvm, gsi); in kvm_notify_acked_irq()
508 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_notify_acked_irq()
511 void kvm_register_irq_ack_notifier(struct kvm *kvm, in kvm_register_irq_ack_notifier() argument
514 mutex_lock(&kvm->irq_lock); in kvm_register_irq_ack_notifier()
515 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list); in kvm_register_irq_ack_notifier()
516 mutex_unlock(&kvm->irq_lock); in kvm_register_irq_ack_notifier()
517 kvm_arch_post_irq_ack_notifier_list_update(kvm); in kvm_register_irq_ack_notifier()
520 void kvm_unregister_irq_ack_notifier(struct kvm *kvm, in kvm_unregister_irq_ack_notifier() argument
523 mutex_lock(&kvm->irq_lock); in kvm_unregister_irq_ack_notifier()
524 hlist_del_init_rcu(&kian->link); in kvm_unregister_irq_ack_notifier()
525 mutex_unlock(&kvm->irq_lock); in kvm_unregister_irq_ack_notifier()
526 synchronize_srcu(&kvm->irq_srcu); in kvm_unregister_irq_ack_notifier()
527 kvm_arch_post_irq_ack_notifier_list_update(kvm); in kvm_unregister_irq_ack_notifier()
532 kvm_eventfd_init(struct kvm *kvm) in kvm_eventfd_init() argument
535 spin_lock_init(&kvm->irqfds.lock); in kvm_eventfd_init()
536 INIT_LIST_HEAD(&kvm->irqfds.items); in kvm_eventfd_init()
537 INIT_LIST_HEAD(&kvm->irqfds.resampler_list); in kvm_eventfd_init()
538 mutex_init(&kvm->irqfds.resampler_lock); in kvm_eventfd_init()
540 INIT_LIST_HEAD(&kvm->ioeventfds); in kvm_eventfd_init()
548 kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args) in kvm_irqfd_deassign() argument
553 eventfd = eventfd_ctx_fdget(args->fd); in kvm_irqfd_deassign()
557 spin_lock_irq(&kvm->irqfds.lock); in kvm_irqfd_deassign()
559 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) { in kvm_irqfd_deassign()
560 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) { in kvm_irqfd_deassign()
567 write_seqcount_begin(&irqfd->irq_entry_sc); in kvm_irqfd_deassign()
568 irqfd->irq_entry.type = 0; in kvm_irqfd_deassign()
569 write_seqcount_end(&irqfd->irq_entry_sc); in kvm_irqfd_deassign()
574 spin_unlock_irq(&kvm->irqfds.lock); in kvm_irqfd_deassign()
588 kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) in kvm_irqfd() argument
590 if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE)) in kvm_irqfd()
591 return -EINVAL; in kvm_irqfd()
593 if (args->flags & KVM_IRQFD_FLAG_DEASSIGN) in kvm_irqfd()
594 return kvm_irqfd_deassign(kvm, args); in kvm_irqfd()
596 return kvm_irqfd_assign(kvm, args); in kvm_irqfd()
600 * This function is called as the kvm VM fd is being released. Shutdown all
604 kvm_irqfd_release(struct kvm *kvm) in kvm_irqfd_release() argument
608 spin_lock_irq(&kvm->irqfds.lock); in kvm_irqfd_release()
610 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) in kvm_irqfd_release()
613 spin_unlock_irq(&kvm->irqfds.lock); in kvm_irqfd_release()
617 * since we do not take a kvm* reference. in kvm_irqfd_release()
625 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
627 void kvm_irq_routing_update(struct kvm *kvm) in kvm_irq_routing_update() argument
631 spin_lock_irq(&kvm->irqfds.lock); in kvm_irq_routing_update()
633 list_for_each_entry(irqfd, &kvm->irqfds.items, list) { in kvm_irq_routing_update()
636 struct kvm_kernel_irq_routing_entry old = irqfd->irq_entry; in kvm_irq_routing_update()
639 irqfd_update(kvm, irqfd); in kvm_irq_routing_update()
642 if (irqfd->producer && in kvm_irq_routing_update()
643 kvm_arch_irqfd_route_changed(&old, &irqfd->irq_entry)) { in kvm_irq_routing_update()
645 irqfd->kvm, irqfd->producer->irq, in kvm_irq_routing_update()
646 irqfd->gsi, 1); in kvm_irq_routing_update()
652 spin_unlock_irq(&kvm->irqfds.lock); in kvm_irq_routing_update()
655 bool kvm_notify_irqfd_resampler(struct kvm *kvm, in kvm_notify_irqfd_resampler() argument
662 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_notify_irqfd_resampler()
663 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); in kvm_notify_irqfd_resampler()
664 if (gsi != -1) { in kvm_notify_irqfd_resampler()
666 &kvm->irqfds.resampler_list, link, in kvm_notify_irqfd_resampler()
667 srcu_read_lock_held(&kvm->irq_srcu)) { in kvm_notify_irqfd_resampler()
668 if (resampler->notifier.gsi == gsi) { in kvm_notify_irqfd_resampler()
670 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_notify_irqfd_resampler()
675 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_notify_irqfd_resampler()
681 * create a host-wide workqueue for issuing deferred shutdown requests
687 irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0); in kvm_irqfd_init()
689 return -ENOMEM; in kvm_irqfd_init()
701 * --------------------------------------------------------------------
706 * --------------------------------------------------------------------
729 eventfd_ctx_put(p->eventfd); in ioeventfd_release()
730 list_del(&p->list); in ioeventfd_release()
739 if (addr != p->addr) in ioeventfd_in_range()
743 if (!p->length) in ioeventfd_in_range()
747 if (len != p->length) in ioeventfd_in_range()
748 /* address-range must be precise for a hit */ in ioeventfd_in_range()
751 if (p->wildcard) in ioeventfd_in_range()
776 return _val == p->datamatch; in ioeventfd_in_range()
787 return -EOPNOTSUPP; in ioeventfd_write()
789 eventfd_signal(p->eventfd, 1); in ioeventfd_write()
794 * This function is called as KVM is completely shutting down. We do not
810 /* assumes kvm->slots_lock held */
812 ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p) in ioeventfd_check_collision() argument
816 list_for_each_entry(_p, &kvm->ioeventfds, list) in ioeventfd_check_collision()
817 if (_p->bus_idx == p->bus_idx && in ioeventfd_check_collision()
818 _p->addr == p->addr && in ioeventfd_check_collision()
819 (!_p->length || !p->length || in ioeventfd_check_collision()
820 (_p->length == p->length && in ioeventfd_check_collision()
821 (_p->wildcard || p->wildcard || in ioeventfd_check_collision()
822 _p->datamatch == p->datamatch)))) in ioeventfd_check_collision()
837 static int kvm_assign_ioeventfd_idx(struct kvm *kvm, in kvm_assign_ioeventfd_idx() argument
846 eventfd = eventfd_ctx_fdget(args->fd); in kvm_assign_ioeventfd_idx()
852 ret = -ENOMEM; in kvm_assign_ioeventfd_idx()
856 INIT_LIST_HEAD(&p->list); in kvm_assign_ioeventfd_idx()
857 p->addr = args->addr; in kvm_assign_ioeventfd_idx()
858 p->bus_idx = bus_idx; in kvm_assign_ioeventfd_idx()
859 p->length = args->len; in kvm_assign_ioeventfd_idx()
860 p->eventfd = eventfd; in kvm_assign_ioeventfd_idx()
863 if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH) in kvm_assign_ioeventfd_idx()
864 p->datamatch = args->datamatch; in kvm_assign_ioeventfd_idx()
866 p->wildcard = true; in kvm_assign_ioeventfd_idx()
868 mutex_lock(&kvm->slots_lock); in kvm_assign_ioeventfd_idx()
871 if (ioeventfd_check_collision(kvm, p)) { in kvm_assign_ioeventfd_idx()
872 ret = -EEXIST; in kvm_assign_ioeventfd_idx()
876 kvm_iodevice_init(&p->dev, &ioeventfd_ops); in kvm_assign_ioeventfd_idx()
878 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length, in kvm_assign_ioeventfd_idx()
879 &p->dev); in kvm_assign_ioeventfd_idx()
883 kvm_get_bus(kvm, bus_idx)->ioeventfd_count++; in kvm_assign_ioeventfd_idx()
884 list_add_tail(&p->list, &kvm->ioeventfds); in kvm_assign_ioeventfd_idx()
886 mutex_unlock(&kvm->slots_lock); in kvm_assign_ioeventfd_idx()
891 mutex_unlock(&kvm->slots_lock); in kvm_assign_ioeventfd_idx()
901 kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx, in kvm_deassign_ioeventfd_idx() argument
907 int ret = -ENOENT; in kvm_deassign_ioeventfd_idx()
910 eventfd = eventfd_ctx_fdget(args->fd); in kvm_deassign_ioeventfd_idx()
914 wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH); in kvm_deassign_ioeventfd_idx()
916 mutex_lock(&kvm->slots_lock); in kvm_deassign_ioeventfd_idx()
918 list_for_each_entry(p, &kvm->ioeventfds, list) { in kvm_deassign_ioeventfd_idx()
919 if (p->bus_idx != bus_idx || in kvm_deassign_ioeventfd_idx()
920 p->eventfd != eventfd || in kvm_deassign_ioeventfd_idx()
921 p->addr != args->addr || in kvm_deassign_ioeventfd_idx()
922 p->length != args->len || in kvm_deassign_ioeventfd_idx()
923 p->wildcard != wildcard) in kvm_deassign_ioeventfd_idx()
926 if (!p->wildcard && p->datamatch != args->datamatch) in kvm_deassign_ioeventfd_idx()
929 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); in kvm_deassign_ioeventfd_idx()
930 bus = kvm_get_bus(kvm, bus_idx); in kvm_deassign_ioeventfd_idx()
932 bus->ioeventfd_count--; in kvm_deassign_ioeventfd_idx()
937 mutex_unlock(&kvm->slots_lock); in kvm_deassign_ioeventfd_idx()
944 static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) in kvm_deassign_ioeventfd() argument
946 enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags); in kvm_deassign_ioeventfd()
947 int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args); in kvm_deassign_ioeventfd()
949 if (!args->len && bus_idx == KVM_MMIO_BUS) in kvm_deassign_ioeventfd()
950 kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args); in kvm_deassign_ioeventfd()
956 kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) in kvm_assign_ioeventfd() argument
961 bus_idx = ioeventfd_bus_from_flags(args->flags); in kvm_assign_ioeventfd()
962 /* must be natural-word sized, or 0 to ignore length */ in kvm_assign_ioeventfd()
963 switch (args->len) { in kvm_assign_ioeventfd()
971 return -EINVAL; in kvm_assign_ioeventfd()
975 if (args->addr + args->len < args->addr) in kvm_assign_ioeventfd()
976 return -EINVAL; in kvm_assign_ioeventfd()
979 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK) in kvm_assign_ioeventfd()
980 return -EINVAL; in kvm_assign_ioeventfd()
983 if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)) in kvm_assign_ioeventfd()
984 return -EINVAL; in kvm_assign_ioeventfd()
986 ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args); in kvm_assign_ioeventfd()
993 if (!args->len && bus_idx == KVM_MMIO_BUS) { in kvm_assign_ioeventfd()
994 ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args); in kvm_assign_ioeventfd()
1002 kvm_deassign_ioeventfd_idx(kvm, bus_idx, args); in kvm_assign_ioeventfd()
1008 kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) in kvm_ioeventfd() argument
1010 if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN) in kvm_ioeventfd()
1011 return kvm_deassign_ioeventfd(kvm, args); in kvm_ioeventfd()
1013 return kvm_assign_ioeventfd(kvm, args); in kvm_ioeventfd()