xref: /openbmc/linux/virt/kvm/eventfd.c (revision e8dbf195)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kvm eventfd support - use eventfd objects to signal various KVM events
4  *
5  * Copyright 2009 Novell.  All Rights Reserved.
6  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
7  *
8  * Author:
9  *	Gregory Haskins <ghaskins@novell.com>
10  */
11 
12 #include <linux/kvm_host.h>
13 #include <linux/kvm.h>
14 #include <linux/kvm_irqfd.h>
15 #include <linux/workqueue.h>
16 #include <linux/syscalls.h>
17 #include <linux/wait.h>
18 #include <linux/poll.h>
19 #include <linux/file.h>
20 #include <linux/list.h>
21 #include <linux/eventfd.h>
22 #include <linux/kernel.h>
23 #include <linux/srcu.h>
24 #include <linux/slab.h>
25 #include <linux/seqlock.h>
26 #include <linux/irqbypass.h>
27 #include <trace/events/kvm.h>
28 
29 #include <kvm/iodev.h>
30 
31 #ifdef CONFIG_HAVE_KVM_IRQFD
32 
33 static struct workqueue_struct *irqfd_cleanup_wq;
34 
35 bool __attribute__((weak))
36 kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
37 {
38 	return true;
39 }
40 
41 static void
42 irqfd_inject(struct work_struct *work)
43 {
44 	struct kvm_kernel_irqfd *irqfd =
45 		container_of(work, struct kvm_kernel_irqfd, inject);
46 	struct kvm *kvm = irqfd->kvm;
47 
48 	if (!irqfd->resampler) {
49 		kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
50 				false);
51 		kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
52 				false);
53 	} else
54 		kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
55 			    irqfd->gsi, 1, false);
56 }
57 
58 /*
59  * Since resampler irqfds share an IRQ source ID, we de-assert once
60  * then notify all of the resampler irqfds using this GSI.  We can't
61  * do multiple de-asserts or we risk racing with incoming re-asserts.
62  */
63 static void
64 irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
65 {
66 	struct kvm_kernel_irqfd_resampler *resampler;
67 	struct kvm *kvm;
68 	struct kvm_kernel_irqfd *irqfd;
69 	int idx;
70 
71 	resampler = container_of(kian,
72 			struct kvm_kernel_irqfd_resampler, notifier);
73 	kvm = resampler->kvm;
74 
75 	kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
76 		    resampler->notifier.gsi, 0, false);
77 
78 	idx = srcu_read_lock(&kvm->irq_srcu);
79 
80 	list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
81 		eventfd_signal(irqfd->resamplefd, 1);
82 
83 	srcu_read_unlock(&kvm->irq_srcu, idx);
84 }
85 
86 static void
87 irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
88 {
89 	struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
90 	struct kvm *kvm = resampler->kvm;
91 
92 	mutex_lock(&kvm->irqfds.resampler_lock);
93 
94 	list_del_rcu(&irqfd->resampler_link);
95 	synchronize_srcu(&kvm->irq_srcu);
96 
97 	if (list_empty(&resampler->list)) {
98 		list_del(&resampler->link);
99 		kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
100 		kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
101 			    resampler->notifier.gsi, 0, false);
102 		kfree(resampler);
103 	}
104 
105 	mutex_unlock(&kvm->irqfds.resampler_lock);
106 }
107 
108 /*
109  * Race-free decouple logic (ordering is critical)
110  */
111 static void
112 irqfd_shutdown(struct work_struct *work)
113 {
114 	struct kvm_kernel_irqfd *irqfd =
115 		container_of(work, struct kvm_kernel_irqfd, shutdown);
116 	struct kvm *kvm = irqfd->kvm;
117 	u64 cnt;
118 
119 	/* Make sure irqfd has been initialized in assign path. */
120 	synchronize_srcu(&kvm->irq_srcu);
121 
122 	/*
123 	 * Synchronize with the wait-queue and unhook ourselves to prevent
124 	 * further events.
125 	 */
126 	eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
127 
128 	/*
129 	 * We know no new events will be scheduled at this point, so block
130 	 * until all previously outstanding events have completed
131 	 */
132 	flush_work(&irqfd->inject);
133 
134 	if (irqfd->resampler) {
135 		irqfd_resampler_shutdown(irqfd);
136 		eventfd_ctx_put(irqfd->resamplefd);
137 	}
138 
139 	/*
140 	 * It is now safe to release the object's resources
141 	 */
142 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
143 	irq_bypass_unregister_consumer(&irqfd->consumer);
144 #endif
145 	eventfd_ctx_put(irqfd->eventfd);
146 	kfree(irqfd);
147 }
148 
149 
150 /* assumes kvm->irqfds.lock is held */
151 static bool
152 irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
153 {
154 	return list_empty(&irqfd->list) ? false : true;
155 }
156 
157 /*
158  * Mark the irqfd as inactive and schedule it for removal
159  *
160  * assumes kvm->irqfds.lock is held
161  */
162 static void
163 irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
164 {
165 	BUG_ON(!irqfd_is_active(irqfd));
166 
167 	list_del_init(&irqfd->list);
168 
169 	queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
170 }
171 
172 int __attribute__((weak)) kvm_arch_set_irq_inatomic(
173 				struct kvm_kernel_irq_routing_entry *irq,
174 				struct kvm *kvm, int irq_source_id,
175 				int level,
176 				bool line_status)
177 {
178 	return -EWOULDBLOCK;
179 }
180 
181 /*
182  * Called with wqh->lock held and interrupts disabled
183  */
184 static int
185 irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
186 {
187 	struct kvm_kernel_irqfd *irqfd =
188 		container_of(wait, struct kvm_kernel_irqfd, wait);
189 	__poll_t flags = key_to_poll(key);
190 	struct kvm_kernel_irq_routing_entry irq;
191 	struct kvm *kvm = irqfd->kvm;
192 	unsigned seq;
193 	int idx;
194 	int ret = 0;
195 
196 	if (flags & EPOLLIN) {
197 		idx = srcu_read_lock(&kvm->irq_srcu);
198 		do {
199 			seq = read_seqcount_begin(&irqfd->irq_entry_sc);
200 			irq = irqfd->irq_entry;
201 		} while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
202 		/* An event has been signaled, inject an interrupt */
203 		if (kvm_arch_set_irq_inatomic(&irq, kvm,
204 					      KVM_USERSPACE_IRQ_SOURCE_ID, 1,
205 					      false) == -EWOULDBLOCK)
206 			schedule_work(&irqfd->inject);
207 		srcu_read_unlock(&kvm->irq_srcu, idx);
208 		ret = 1;
209 	}
210 
211 	if (flags & EPOLLHUP) {
212 		/* The eventfd is closing, detach from KVM */
213 		unsigned long iflags;
214 
215 		spin_lock_irqsave(&kvm->irqfds.lock, iflags);
216 
217 		/*
218 		 * We must check if someone deactivated the irqfd before
219 		 * we could acquire the irqfds.lock since the item is
220 		 * deactivated from the KVM side before it is unhooked from
221 		 * the wait-queue.  If it is already deactivated, we can
222 		 * simply return knowing the other side will cleanup for us.
223 		 * We cannot race against the irqfd going away since the
224 		 * other side is required to acquire wqh->lock, which we hold
225 		 */
226 		if (irqfd_is_active(irqfd))
227 			irqfd_deactivate(irqfd);
228 
229 		spin_unlock_irqrestore(&kvm->irqfds.lock, iflags);
230 	}
231 
232 	return ret;
233 }
234 
235 static void
236 irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
237 			poll_table *pt)
238 {
239 	struct kvm_kernel_irqfd *irqfd =
240 		container_of(pt, struct kvm_kernel_irqfd, pt);
241 	add_wait_queue_priority(wqh, &irqfd->wait);
242 }
243 
244 /* Must be called under irqfds.lock */
245 static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
246 {
247 	struct kvm_kernel_irq_routing_entry *e;
248 	struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
249 	int n_entries;
250 
251 	n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
252 
253 	write_seqcount_begin(&irqfd->irq_entry_sc);
254 
255 	e = entries;
256 	if (n_entries == 1)
257 		irqfd->irq_entry = *e;
258 	else
259 		irqfd->irq_entry.type = 0;
260 
261 	write_seqcount_end(&irqfd->irq_entry_sc);
262 }
263 
264 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
265 void __attribute__((weak)) kvm_arch_irq_bypass_stop(
266 				struct irq_bypass_consumer *cons)
267 {
268 }
269 
270 void __attribute__((weak)) kvm_arch_irq_bypass_start(
271 				struct irq_bypass_consumer *cons)
272 {
273 }
274 
275 int  __attribute__((weak)) kvm_arch_update_irqfd_routing(
276 				struct kvm *kvm, unsigned int host_irq,
277 				uint32_t guest_irq, bool set)
278 {
279 	return 0;
280 }
281 #endif
282 
283 static int
284 kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
285 {
286 	struct kvm_kernel_irqfd *irqfd, *tmp;
287 	struct fd f;
288 	struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
289 	int ret;
290 	__poll_t events;
291 	int idx;
292 
293 	if (!kvm_arch_intc_initialized(kvm))
294 		return -EAGAIN;
295 
296 	if (!kvm_arch_irqfd_allowed(kvm, args))
297 		return -EINVAL;
298 
299 	irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL_ACCOUNT);
300 	if (!irqfd)
301 		return -ENOMEM;
302 
303 	irqfd->kvm = kvm;
304 	irqfd->gsi = args->gsi;
305 	INIT_LIST_HEAD(&irqfd->list);
306 	INIT_WORK(&irqfd->inject, irqfd_inject);
307 	INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
308 	seqcount_spinlock_init(&irqfd->irq_entry_sc, &kvm->irqfds.lock);
309 
310 	f = fdget(args->fd);
311 	if (!f.file) {
312 		ret = -EBADF;
313 		goto out;
314 	}
315 
316 	eventfd = eventfd_ctx_fileget(f.file);
317 	if (IS_ERR(eventfd)) {
318 		ret = PTR_ERR(eventfd);
319 		goto fail;
320 	}
321 
322 	irqfd->eventfd = eventfd;
323 
324 	if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
325 		struct kvm_kernel_irqfd_resampler *resampler;
326 
327 		resamplefd = eventfd_ctx_fdget(args->resamplefd);
328 		if (IS_ERR(resamplefd)) {
329 			ret = PTR_ERR(resamplefd);
330 			goto fail;
331 		}
332 
333 		irqfd->resamplefd = resamplefd;
334 		INIT_LIST_HEAD(&irqfd->resampler_link);
335 
336 		mutex_lock(&kvm->irqfds.resampler_lock);
337 
338 		list_for_each_entry(resampler,
339 				    &kvm->irqfds.resampler_list, link) {
340 			if (resampler->notifier.gsi == irqfd->gsi) {
341 				irqfd->resampler = resampler;
342 				break;
343 			}
344 		}
345 
346 		if (!irqfd->resampler) {
347 			resampler = kzalloc(sizeof(*resampler),
348 					    GFP_KERNEL_ACCOUNT);
349 			if (!resampler) {
350 				ret = -ENOMEM;
351 				mutex_unlock(&kvm->irqfds.resampler_lock);
352 				goto fail;
353 			}
354 
355 			resampler->kvm = kvm;
356 			INIT_LIST_HEAD(&resampler->list);
357 			resampler->notifier.gsi = irqfd->gsi;
358 			resampler->notifier.irq_acked = irqfd_resampler_ack;
359 			INIT_LIST_HEAD(&resampler->link);
360 
361 			list_add(&resampler->link, &kvm->irqfds.resampler_list);
362 			kvm_register_irq_ack_notifier(kvm,
363 						      &resampler->notifier);
364 			irqfd->resampler = resampler;
365 		}
366 
367 		list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
368 		synchronize_srcu(&kvm->irq_srcu);
369 
370 		mutex_unlock(&kvm->irqfds.resampler_lock);
371 	}
372 
373 	/*
374 	 * Install our own custom wake-up handling so we are notified via
375 	 * a callback whenever someone signals the underlying eventfd
376 	 */
377 	init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
378 	init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
379 
380 	spin_lock_irq(&kvm->irqfds.lock);
381 
382 	ret = 0;
383 	list_for_each_entry(tmp, &kvm->irqfds.items, list) {
384 		if (irqfd->eventfd != tmp->eventfd)
385 			continue;
386 		/* This fd is used for another irq already. */
387 		ret = -EBUSY;
388 		spin_unlock_irq(&kvm->irqfds.lock);
389 		goto fail;
390 	}
391 
392 	idx = srcu_read_lock(&kvm->irq_srcu);
393 	irqfd_update(kvm, irqfd);
394 
395 	list_add_tail(&irqfd->list, &kvm->irqfds.items);
396 
397 	spin_unlock_irq(&kvm->irqfds.lock);
398 
399 	/*
400 	 * Check if there was an event already pending on the eventfd
401 	 * before we registered, and trigger it as if we didn't miss it.
402 	 */
403 	events = vfs_poll(f.file, &irqfd->pt);
404 
405 	if (events & EPOLLIN)
406 		schedule_work(&irqfd->inject);
407 
408 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
409 	if (kvm_arch_has_irq_bypass()) {
410 		irqfd->consumer.token = (void *)irqfd->eventfd;
411 		irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
412 		irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
413 		irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
414 		irqfd->consumer.start = kvm_arch_irq_bypass_start;
415 		ret = irq_bypass_register_consumer(&irqfd->consumer);
416 		if (ret)
417 			pr_info("irq bypass consumer (token %p) registration fails: %d\n",
418 				irqfd->consumer.token, ret);
419 	}
420 #endif
421 
422 	srcu_read_unlock(&kvm->irq_srcu, idx);
423 
424 	/*
425 	 * do not drop the file until the irqfd is fully initialized, otherwise
426 	 * we might race against the EPOLLHUP
427 	 */
428 	fdput(f);
429 	return 0;
430 
431 fail:
432 	if (irqfd->resampler)
433 		irqfd_resampler_shutdown(irqfd);
434 
435 	if (resamplefd && !IS_ERR(resamplefd))
436 		eventfd_ctx_put(resamplefd);
437 
438 	if (eventfd && !IS_ERR(eventfd))
439 		eventfd_ctx_put(eventfd);
440 
441 	fdput(f);
442 
443 out:
444 	kfree(irqfd);
445 	return ret;
446 }
447 
448 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
449 {
450 	struct kvm_irq_ack_notifier *kian;
451 	int gsi, idx;
452 
453 	idx = srcu_read_lock(&kvm->irq_srcu);
454 	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
455 	if (gsi != -1)
456 		hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
457 					 link)
458 			if (kian->gsi == gsi) {
459 				srcu_read_unlock(&kvm->irq_srcu, idx);
460 				return true;
461 			}
462 
463 	srcu_read_unlock(&kvm->irq_srcu, idx);
464 
465 	return false;
466 }
467 EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
468 
469 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
470 {
471 	struct kvm_irq_ack_notifier *kian;
472 
473 	hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
474 				 link)
475 		if (kian->gsi == gsi)
476 			kian->irq_acked(kian);
477 }
478 
479 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
480 {
481 	int gsi, idx;
482 
483 	trace_kvm_ack_irq(irqchip, pin);
484 
485 	idx = srcu_read_lock(&kvm->irq_srcu);
486 	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
487 	if (gsi != -1)
488 		kvm_notify_acked_gsi(kvm, gsi);
489 	srcu_read_unlock(&kvm->irq_srcu, idx);
490 }
491 
492 void kvm_register_irq_ack_notifier(struct kvm *kvm,
493 				   struct kvm_irq_ack_notifier *kian)
494 {
495 	mutex_lock(&kvm->irq_lock);
496 	hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
497 	mutex_unlock(&kvm->irq_lock);
498 	kvm_arch_post_irq_ack_notifier_list_update(kvm);
499 }
500 
501 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
502 				    struct kvm_irq_ack_notifier *kian)
503 {
504 	mutex_lock(&kvm->irq_lock);
505 	hlist_del_init_rcu(&kian->link);
506 	mutex_unlock(&kvm->irq_lock);
507 	synchronize_srcu(&kvm->irq_srcu);
508 	kvm_arch_post_irq_ack_notifier_list_update(kvm);
509 }
510 #endif
511 
512 void
513 kvm_eventfd_init(struct kvm *kvm)
514 {
515 #ifdef CONFIG_HAVE_KVM_IRQFD
516 	spin_lock_init(&kvm->irqfds.lock);
517 	INIT_LIST_HEAD(&kvm->irqfds.items);
518 	INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
519 	mutex_init(&kvm->irqfds.resampler_lock);
520 #endif
521 	INIT_LIST_HEAD(&kvm->ioeventfds);
522 }
523 
524 #ifdef CONFIG_HAVE_KVM_IRQFD
525 /*
526  * shutdown any irqfd's that match fd+gsi
527  */
528 static int
529 kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
530 {
531 	struct kvm_kernel_irqfd *irqfd, *tmp;
532 	struct eventfd_ctx *eventfd;
533 
534 	eventfd = eventfd_ctx_fdget(args->fd);
535 	if (IS_ERR(eventfd))
536 		return PTR_ERR(eventfd);
537 
538 	spin_lock_irq(&kvm->irqfds.lock);
539 
540 	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
541 		if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
542 			/*
543 			 * This clearing of irq_entry.type is needed for when
544 			 * another thread calls kvm_irq_routing_update before
545 			 * we flush workqueue below (we synchronize with
546 			 * kvm_irq_routing_update using irqfds.lock).
547 			 */
548 			write_seqcount_begin(&irqfd->irq_entry_sc);
549 			irqfd->irq_entry.type = 0;
550 			write_seqcount_end(&irqfd->irq_entry_sc);
551 			irqfd_deactivate(irqfd);
552 		}
553 	}
554 
555 	spin_unlock_irq(&kvm->irqfds.lock);
556 	eventfd_ctx_put(eventfd);
557 
558 	/*
559 	 * Block until we know all outstanding shutdown jobs have completed
560 	 * so that we guarantee there will not be any more interrupts on this
561 	 * gsi once this deassign function returns.
562 	 */
563 	flush_workqueue(irqfd_cleanup_wq);
564 
565 	return 0;
566 }
567 
568 int
569 kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
570 {
571 	if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
572 		return -EINVAL;
573 
574 	if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
575 		return kvm_irqfd_deassign(kvm, args);
576 
577 	return kvm_irqfd_assign(kvm, args);
578 }
579 
580 /*
581  * This function is called as the kvm VM fd is being released. Shutdown all
582  * irqfds that still remain open
583  */
584 void
585 kvm_irqfd_release(struct kvm *kvm)
586 {
587 	struct kvm_kernel_irqfd *irqfd, *tmp;
588 
589 	spin_lock_irq(&kvm->irqfds.lock);
590 
591 	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
592 		irqfd_deactivate(irqfd);
593 
594 	spin_unlock_irq(&kvm->irqfds.lock);
595 
596 	/*
597 	 * Block until we know all outstanding shutdown jobs have completed
598 	 * since we do not take a kvm* reference.
599 	 */
600 	flush_workqueue(irqfd_cleanup_wq);
601 
602 }
603 
604 /*
605  * Take note of a change in irq routing.
606  * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
607  */
608 void kvm_irq_routing_update(struct kvm *kvm)
609 {
610 	struct kvm_kernel_irqfd *irqfd;
611 
612 	spin_lock_irq(&kvm->irqfds.lock);
613 
614 	list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
615 		irqfd_update(kvm, irqfd);
616 
617 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
618 		if (irqfd->producer) {
619 			int ret = kvm_arch_update_irqfd_routing(
620 					irqfd->kvm, irqfd->producer->irq,
621 					irqfd->gsi, 1);
622 			WARN_ON(ret);
623 		}
624 #endif
625 	}
626 
627 	spin_unlock_irq(&kvm->irqfds.lock);
628 }
629 
630 /*
631  * create a host-wide workqueue for issuing deferred shutdown requests
632  * aggregated from all vm* instances. We need our own isolated
633  * queue to ease flushing work items when a VM exits.
634  */
635 int kvm_irqfd_init(void)
636 {
637 	irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0);
638 	if (!irqfd_cleanup_wq)
639 		return -ENOMEM;
640 
641 	return 0;
642 }
643 
644 void kvm_irqfd_exit(void)
645 {
646 	destroy_workqueue(irqfd_cleanup_wq);
647 }
648 #endif
649 
650 /*
651  * --------------------------------------------------------------------
652  * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
653  *
654  * userspace can register a PIO/MMIO address with an eventfd for receiving
655  * notification when the memory has been touched.
656  * --------------------------------------------------------------------
657  */
658 
659 struct _ioeventfd {
660 	struct list_head     list;
661 	u64                  addr;
662 	int                  length;
663 	struct eventfd_ctx  *eventfd;
664 	u64                  datamatch;
665 	struct kvm_io_device dev;
666 	u8                   bus_idx;
667 	bool                 wildcard;
668 };
669 
670 static inline struct _ioeventfd *
671 to_ioeventfd(struct kvm_io_device *dev)
672 {
673 	return container_of(dev, struct _ioeventfd, dev);
674 }
675 
676 static void
677 ioeventfd_release(struct _ioeventfd *p)
678 {
679 	eventfd_ctx_put(p->eventfd);
680 	list_del(&p->list);
681 	kfree(p);
682 }
683 
684 static bool
685 ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
686 {
687 	u64 _val;
688 
689 	if (addr != p->addr)
690 		/* address must be precise for a hit */
691 		return false;
692 
693 	if (!p->length)
694 		/* length = 0 means only look at the address, so always a hit */
695 		return true;
696 
697 	if (len != p->length)
698 		/* address-range must be precise for a hit */
699 		return false;
700 
701 	if (p->wildcard)
702 		/* all else equal, wildcard is always a hit */
703 		return true;
704 
705 	/* otherwise, we have to actually compare the data */
706 
707 	BUG_ON(!IS_ALIGNED((unsigned long)val, len));
708 
709 	switch (len) {
710 	case 1:
711 		_val = *(u8 *)val;
712 		break;
713 	case 2:
714 		_val = *(u16 *)val;
715 		break;
716 	case 4:
717 		_val = *(u32 *)val;
718 		break;
719 	case 8:
720 		_val = *(u64 *)val;
721 		break;
722 	default:
723 		return false;
724 	}
725 
726 	return _val == p->datamatch;
727 }
728 
729 /* MMIO/PIO writes trigger an event if the addr/val match */
730 static int
731 ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
732 		int len, const void *val)
733 {
734 	struct _ioeventfd *p = to_ioeventfd(this);
735 
736 	if (!ioeventfd_in_range(p, addr, len, val))
737 		return -EOPNOTSUPP;
738 
739 	eventfd_signal(p->eventfd, 1);
740 	return 0;
741 }
742 
743 /*
744  * This function is called as KVM is completely shutting down.  We do not
745  * need to worry about locking just nuke anything we have as quickly as possible
746  */
747 static void
748 ioeventfd_destructor(struct kvm_io_device *this)
749 {
750 	struct _ioeventfd *p = to_ioeventfd(this);
751 
752 	ioeventfd_release(p);
753 }
754 
755 static const struct kvm_io_device_ops ioeventfd_ops = {
756 	.write      = ioeventfd_write,
757 	.destructor = ioeventfd_destructor,
758 };
759 
760 /* assumes kvm->slots_lock held */
761 static bool
762 ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
763 {
764 	struct _ioeventfd *_p;
765 
766 	list_for_each_entry(_p, &kvm->ioeventfds, list)
767 		if (_p->bus_idx == p->bus_idx &&
768 		    _p->addr == p->addr &&
769 		    (!_p->length || !p->length ||
770 		     (_p->length == p->length &&
771 		      (_p->wildcard || p->wildcard ||
772 		       _p->datamatch == p->datamatch))))
773 			return true;
774 
775 	return false;
776 }
777 
778 static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
779 {
780 	if (flags & KVM_IOEVENTFD_FLAG_PIO)
781 		return KVM_PIO_BUS;
782 	if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
783 		return KVM_VIRTIO_CCW_NOTIFY_BUS;
784 	return KVM_MMIO_BUS;
785 }
786 
787 static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
788 				enum kvm_bus bus_idx,
789 				struct kvm_ioeventfd *args)
790 {
791 
792 	struct eventfd_ctx *eventfd;
793 	struct _ioeventfd *p;
794 	int ret;
795 
796 	eventfd = eventfd_ctx_fdget(args->fd);
797 	if (IS_ERR(eventfd))
798 		return PTR_ERR(eventfd);
799 
800 	p = kzalloc(sizeof(*p), GFP_KERNEL_ACCOUNT);
801 	if (!p) {
802 		ret = -ENOMEM;
803 		goto fail;
804 	}
805 
806 	INIT_LIST_HEAD(&p->list);
807 	p->addr    = args->addr;
808 	p->bus_idx = bus_idx;
809 	p->length  = args->len;
810 	p->eventfd = eventfd;
811 
812 	/* The datamatch feature is optional, otherwise this is a wildcard */
813 	if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
814 		p->datamatch = args->datamatch;
815 	else
816 		p->wildcard = true;
817 
818 	mutex_lock(&kvm->slots_lock);
819 
820 	/* Verify that there isn't a match already */
821 	if (ioeventfd_check_collision(kvm, p)) {
822 		ret = -EEXIST;
823 		goto unlock_fail;
824 	}
825 
826 	kvm_iodevice_init(&p->dev, &ioeventfd_ops);
827 
828 	ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
829 				      &p->dev);
830 	if (ret < 0)
831 		goto unlock_fail;
832 
833 	kvm_get_bus(kvm, bus_idx)->ioeventfd_count++;
834 	list_add_tail(&p->list, &kvm->ioeventfds);
835 
836 	mutex_unlock(&kvm->slots_lock);
837 
838 	return 0;
839 
840 unlock_fail:
841 	mutex_unlock(&kvm->slots_lock);
842 
843 fail:
844 	kfree(p);
845 	eventfd_ctx_put(eventfd);
846 
847 	return ret;
848 }
849 
850 static int
851 kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
852 			   struct kvm_ioeventfd *args)
853 {
854 	struct _ioeventfd        *p, *tmp;
855 	struct eventfd_ctx       *eventfd;
856 	struct kvm_io_bus	 *bus;
857 	int                       ret = -ENOENT;
858 	bool                      wildcard;
859 
860 	eventfd = eventfd_ctx_fdget(args->fd);
861 	if (IS_ERR(eventfd))
862 		return PTR_ERR(eventfd);
863 
864 	wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
865 
866 	mutex_lock(&kvm->slots_lock);
867 
868 	list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
869 
870 		if (p->bus_idx != bus_idx ||
871 		    p->eventfd != eventfd  ||
872 		    p->addr != args->addr  ||
873 		    p->length != args->len ||
874 		    p->wildcard != wildcard)
875 			continue;
876 
877 		if (!p->wildcard && p->datamatch != args->datamatch)
878 			continue;
879 
880 		kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
881 		bus = kvm_get_bus(kvm, bus_idx);
882 		if (bus)
883 			bus->ioeventfd_count--;
884 		ioeventfd_release(p);
885 		ret = 0;
886 		break;
887 	}
888 
889 	mutex_unlock(&kvm->slots_lock);
890 
891 	eventfd_ctx_put(eventfd);
892 
893 	return ret;
894 }
895 
896 static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
897 {
898 	enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
899 	int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
900 
901 	if (!args->len && bus_idx == KVM_MMIO_BUS)
902 		kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
903 
904 	return ret;
905 }
906 
907 static int
908 kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
909 {
910 	enum kvm_bus              bus_idx;
911 	int ret;
912 
913 	bus_idx = ioeventfd_bus_from_flags(args->flags);
914 	/* must be natural-word sized, or 0 to ignore length */
915 	switch (args->len) {
916 	case 0:
917 	case 1:
918 	case 2:
919 	case 4:
920 	case 8:
921 		break;
922 	default:
923 		return -EINVAL;
924 	}
925 
926 	/* check for range overflow */
927 	if (args->addr + args->len < args->addr)
928 		return -EINVAL;
929 
930 	/* check for extra flags that we don't understand */
931 	if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
932 		return -EINVAL;
933 
934 	/* ioeventfd with no length can't be combined with DATAMATCH */
935 	if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH))
936 		return -EINVAL;
937 
938 	ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
939 	if (ret)
940 		goto fail;
941 
942 	/* When length is ignored, MMIO is also put on a separate bus, for
943 	 * faster lookups.
944 	 */
945 	if (!args->len && bus_idx == KVM_MMIO_BUS) {
946 		ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
947 		if (ret < 0)
948 			goto fast_fail;
949 	}
950 
951 	return 0;
952 
953 fast_fail:
954 	kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
955 fail:
956 	return ret;
957 }
958 
959 int
960 kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
961 {
962 	if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
963 		return kvm_deassign_ioeventfd(kvm, args);
964 
965 	return kvm_assign_ioeventfd(kvm, args);
966 }
967