xref: /openbmc/linux/virt/kvm/eventfd.c (revision b5020a8e)
1 /*
2  * kvm eventfd support - use eventfd objects to signal various KVM events
3  *
4  * Copyright 2009 Novell.  All Rights Reserved.
5  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
6  *
7  * Author:
8  *	Gregory Haskins <ghaskins@novell.com>
9  *
10  * This file is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License
12  * as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software Foundation,
21  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22  */
23 
24 #include <linux/kvm_host.h>
25 #include <linux/kvm.h>
26 #include <linux/kvm_irqfd.h>
27 #include <linux/workqueue.h>
28 #include <linux/syscalls.h>
29 #include <linux/wait.h>
30 #include <linux/poll.h>
31 #include <linux/file.h>
32 #include <linux/list.h>
33 #include <linux/eventfd.h>
34 #include <linux/kernel.h>
35 #include <linux/srcu.h>
36 #include <linux/slab.h>
37 #include <linux/seqlock.h>
38 #include <linux/irqbypass.h>
39 #include <trace/events/kvm.h>
40 
41 #include <kvm/iodev.h>
42 
43 #ifdef CONFIG_HAVE_KVM_IRQFD
44 
45 static struct workqueue_struct *irqfd_cleanup_wq;
46 
47 static void
48 irqfd_inject(struct work_struct *work)
49 {
50 	struct kvm_kernel_irqfd *irqfd =
51 		container_of(work, struct kvm_kernel_irqfd, inject);
52 	struct kvm *kvm = irqfd->kvm;
53 
54 	if (!irqfd->resampler) {
55 		kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
56 				false);
57 		kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
58 				false);
59 	} else
60 		kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
61 			    irqfd->gsi, 1, false);
62 }
63 
64 /*
65  * Since resampler irqfds share an IRQ source ID, we de-assert once
66  * then notify all of the resampler irqfds using this GSI.  We can't
67  * do multiple de-asserts or we risk racing with incoming re-asserts.
68  */
69 static void
70 irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
71 {
72 	struct kvm_kernel_irqfd_resampler *resampler;
73 	struct kvm *kvm;
74 	struct kvm_kernel_irqfd *irqfd;
75 	int idx;
76 
77 	resampler = container_of(kian,
78 			struct kvm_kernel_irqfd_resampler, notifier);
79 	kvm = resampler->kvm;
80 
81 	kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
82 		    resampler->notifier.gsi, 0, false);
83 
84 	idx = srcu_read_lock(&kvm->irq_srcu);
85 
86 	list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
87 		eventfd_signal(irqfd->resamplefd, 1);
88 
89 	srcu_read_unlock(&kvm->irq_srcu, idx);
90 }
91 
92 static void
93 irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
94 {
95 	struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
96 	struct kvm *kvm = resampler->kvm;
97 
98 	mutex_lock(&kvm->irqfds.resampler_lock);
99 
100 	list_del_rcu(&irqfd->resampler_link);
101 	synchronize_srcu(&kvm->irq_srcu);
102 
103 	if (list_empty(&resampler->list)) {
104 		list_del(&resampler->link);
105 		kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
106 		kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
107 			    resampler->notifier.gsi, 0, false);
108 		kfree(resampler);
109 	}
110 
111 	mutex_unlock(&kvm->irqfds.resampler_lock);
112 }
113 
114 /*
115  * Race-free decouple logic (ordering is critical)
116  */
117 static void
118 irqfd_shutdown(struct work_struct *work)
119 {
120 	struct kvm_kernel_irqfd *irqfd =
121 		container_of(work, struct kvm_kernel_irqfd, shutdown);
122 	struct kvm *kvm = irqfd->kvm;
123 	u64 cnt;
124 
125 	/* Make sure irqfd has been initalized in assign path. */
126 	synchronize_srcu(&kvm->irq_srcu);
127 
128 	/*
129 	 * Synchronize with the wait-queue and unhook ourselves to prevent
130 	 * further events.
131 	 */
132 	eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
133 
134 	/*
135 	 * We know no new events will be scheduled at this point, so block
136 	 * until all previously outstanding events have completed
137 	 */
138 	flush_work(&irqfd->inject);
139 
140 	if (irqfd->resampler) {
141 		irqfd_resampler_shutdown(irqfd);
142 		eventfd_ctx_put(irqfd->resamplefd);
143 	}
144 
145 	/*
146 	 * It is now safe to release the object's resources
147 	 */
148 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
149 	irq_bypass_unregister_consumer(&irqfd->consumer);
150 #endif
151 	eventfd_ctx_put(irqfd->eventfd);
152 	kfree(irqfd);
153 }
154 
155 
156 /* assumes kvm->irqfds.lock is held */
157 static bool
158 irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
159 {
160 	return list_empty(&irqfd->list) ? false : true;
161 }
162 
163 /*
164  * Mark the irqfd as inactive and schedule it for removal
165  *
166  * assumes kvm->irqfds.lock is held
167  */
168 static void
169 irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
170 {
171 	BUG_ON(!irqfd_is_active(irqfd));
172 
173 	list_del_init(&irqfd->list);
174 
175 	queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
176 }
177 
178 int __attribute__((weak)) kvm_arch_set_irq_inatomic(
179 				struct kvm_kernel_irq_routing_entry *irq,
180 				struct kvm *kvm, int irq_source_id,
181 				int level,
182 				bool line_status)
183 {
184 	return -EWOULDBLOCK;
185 }
186 
187 /*
188  * Called with wqh->lock held and interrupts disabled
189  */
190 static int
191 irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
192 {
193 	struct kvm_kernel_irqfd *irqfd =
194 		container_of(wait, struct kvm_kernel_irqfd, wait);
195 	__poll_t flags = key_to_poll(key);
196 	struct kvm_kernel_irq_routing_entry irq;
197 	struct kvm *kvm = irqfd->kvm;
198 	unsigned seq;
199 	int idx;
200 
201 	if (flags & EPOLLIN) {
202 		idx = srcu_read_lock(&kvm->irq_srcu);
203 		do {
204 			seq = read_seqcount_begin(&irqfd->irq_entry_sc);
205 			irq = irqfd->irq_entry;
206 		} while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
207 		/* An event has been signaled, inject an interrupt */
208 		if (kvm_arch_set_irq_inatomic(&irq, kvm,
209 					      KVM_USERSPACE_IRQ_SOURCE_ID, 1,
210 					      false) == -EWOULDBLOCK)
211 			schedule_work(&irqfd->inject);
212 		srcu_read_unlock(&kvm->irq_srcu, idx);
213 	}
214 
215 	if (flags & EPOLLHUP) {
216 		/* The eventfd is closing, detach from KVM */
217 		unsigned long flags;
218 
219 		spin_lock_irqsave(&kvm->irqfds.lock, flags);
220 
221 		/*
222 		 * We must check if someone deactivated the irqfd before
223 		 * we could acquire the irqfds.lock since the item is
224 		 * deactivated from the KVM side before it is unhooked from
225 		 * the wait-queue.  If it is already deactivated, we can
226 		 * simply return knowing the other side will cleanup for us.
227 		 * We cannot race against the irqfd going away since the
228 		 * other side is required to acquire wqh->lock, which we hold
229 		 */
230 		if (irqfd_is_active(irqfd))
231 			irqfd_deactivate(irqfd);
232 
233 		spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
234 	}
235 
236 	return 0;
237 }
238 
239 static void
240 irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
241 			poll_table *pt)
242 {
243 	struct kvm_kernel_irqfd *irqfd =
244 		container_of(pt, struct kvm_kernel_irqfd, pt);
245 	add_wait_queue(wqh, &irqfd->wait);
246 }
247 
248 /* Must be called under irqfds.lock */
249 static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
250 {
251 	struct kvm_kernel_irq_routing_entry *e;
252 	struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
253 	int n_entries;
254 
255 	n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
256 
257 	write_seqcount_begin(&irqfd->irq_entry_sc);
258 
259 	e = entries;
260 	if (n_entries == 1)
261 		irqfd->irq_entry = *e;
262 	else
263 		irqfd->irq_entry.type = 0;
264 
265 	write_seqcount_end(&irqfd->irq_entry_sc);
266 }
267 
268 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
269 void __attribute__((weak)) kvm_arch_irq_bypass_stop(
270 				struct irq_bypass_consumer *cons)
271 {
272 }
273 
274 void __attribute__((weak)) kvm_arch_irq_bypass_start(
275 				struct irq_bypass_consumer *cons)
276 {
277 }
278 
279 int  __attribute__((weak)) kvm_arch_update_irqfd_routing(
280 				struct kvm *kvm, unsigned int host_irq,
281 				uint32_t guest_irq, bool set)
282 {
283 	return 0;
284 }
285 #endif
286 
287 static int
288 kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
289 {
290 	struct kvm_kernel_irqfd *irqfd, *tmp;
291 	struct fd f;
292 	struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
293 	int ret;
294 	__poll_t events;
295 	int idx;
296 
297 	if (!kvm_arch_intc_initialized(kvm))
298 		return -EAGAIN;
299 
300 	irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
301 	if (!irqfd)
302 		return -ENOMEM;
303 
304 	irqfd->kvm = kvm;
305 	irqfd->gsi = args->gsi;
306 	INIT_LIST_HEAD(&irqfd->list);
307 	INIT_WORK(&irqfd->inject, irqfd_inject);
308 	INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
309 	seqcount_init(&irqfd->irq_entry_sc);
310 
311 	f = fdget(args->fd);
312 	if (!f.file) {
313 		ret = -EBADF;
314 		goto out;
315 	}
316 
317 	eventfd = eventfd_ctx_fileget(f.file);
318 	if (IS_ERR(eventfd)) {
319 		ret = PTR_ERR(eventfd);
320 		goto fail;
321 	}
322 
323 	irqfd->eventfd = eventfd;
324 
325 	if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
326 		struct kvm_kernel_irqfd_resampler *resampler;
327 
328 		resamplefd = eventfd_ctx_fdget(args->resamplefd);
329 		if (IS_ERR(resamplefd)) {
330 			ret = PTR_ERR(resamplefd);
331 			goto fail;
332 		}
333 
334 		irqfd->resamplefd = resamplefd;
335 		INIT_LIST_HEAD(&irqfd->resampler_link);
336 
337 		mutex_lock(&kvm->irqfds.resampler_lock);
338 
339 		list_for_each_entry(resampler,
340 				    &kvm->irqfds.resampler_list, link) {
341 			if (resampler->notifier.gsi == irqfd->gsi) {
342 				irqfd->resampler = resampler;
343 				break;
344 			}
345 		}
346 
347 		if (!irqfd->resampler) {
348 			resampler = kzalloc(sizeof(*resampler), GFP_KERNEL);
349 			if (!resampler) {
350 				ret = -ENOMEM;
351 				mutex_unlock(&kvm->irqfds.resampler_lock);
352 				goto fail;
353 			}
354 
355 			resampler->kvm = kvm;
356 			INIT_LIST_HEAD(&resampler->list);
357 			resampler->notifier.gsi = irqfd->gsi;
358 			resampler->notifier.irq_acked = irqfd_resampler_ack;
359 			INIT_LIST_HEAD(&resampler->link);
360 
361 			list_add(&resampler->link, &kvm->irqfds.resampler_list);
362 			kvm_register_irq_ack_notifier(kvm,
363 						      &resampler->notifier);
364 			irqfd->resampler = resampler;
365 		}
366 
367 		list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
368 		synchronize_srcu(&kvm->irq_srcu);
369 
370 		mutex_unlock(&kvm->irqfds.resampler_lock);
371 	}
372 
373 	/*
374 	 * Install our own custom wake-up handling so we are notified via
375 	 * a callback whenever someone signals the underlying eventfd
376 	 */
377 	init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
378 	init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
379 
380 	spin_lock_irq(&kvm->irqfds.lock);
381 
382 	ret = 0;
383 	list_for_each_entry(tmp, &kvm->irqfds.items, list) {
384 		if (irqfd->eventfd != tmp->eventfd)
385 			continue;
386 		/* This fd is used for another irq already. */
387 		ret = -EBUSY;
388 		spin_unlock_irq(&kvm->irqfds.lock);
389 		goto fail;
390 	}
391 
392 	idx = srcu_read_lock(&kvm->irq_srcu);
393 	irqfd_update(kvm, irqfd);
394 
395 	list_add_tail(&irqfd->list, &kvm->irqfds.items);
396 
397 	spin_unlock_irq(&kvm->irqfds.lock);
398 
399 	/*
400 	 * Check if there was an event already pending on the eventfd
401 	 * before we registered, and trigger it as if we didn't miss it.
402 	 */
403 	events = f.file->f_op->poll(f.file, &irqfd->pt);
404 
405 	if (events & EPOLLIN)
406 		schedule_work(&irqfd->inject);
407 
408 	/*
409 	 * do not drop the file until the irqfd is fully initialized, otherwise
410 	 * we might race against the EPOLLHUP
411 	 */
412 	fdput(f);
413 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
414 	if (kvm_arch_has_irq_bypass()) {
415 		irqfd->consumer.token = (void *)irqfd->eventfd;
416 		irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
417 		irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
418 		irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
419 		irqfd->consumer.start = kvm_arch_irq_bypass_start;
420 		ret = irq_bypass_register_consumer(&irqfd->consumer);
421 		if (ret)
422 			pr_info("irq bypass consumer (token %p) registration fails: %d\n",
423 				irqfd->consumer.token, ret);
424 	}
425 #endif
426 
427 	srcu_read_unlock(&kvm->irq_srcu, idx);
428 	return 0;
429 
430 fail:
431 	if (irqfd->resampler)
432 		irqfd_resampler_shutdown(irqfd);
433 
434 	if (resamplefd && !IS_ERR(resamplefd))
435 		eventfd_ctx_put(resamplefd);
436 
437 	if (eventfd && !IS_ERR(eventfd))
438 		eventfd_ctx_put(eventfd);
439 
440 	fdput(f);
441 
442 out:
443 	kfree(irqfd);
444 	return ret;
445 }
446 
447 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
448 {
449 	struct kvm_irq_ack_notifier *kian;
450 	int gsi, idx;
451 
452 	idx = srcu_read_lock(&kvm->irq_srcu);
453 	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
454 	if (gsi != -1)
455 		hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
456 					 link)
457 			if (kian->gsi == gsi) {
458 				srcu_read_unlock(&kvm->irq_srcu, idx);
459 				return true;
460 			}
461 
462 	srcu_read_unlock(&kvm->irq_srcu, idx);
463 
464 	return false;
465 }
466 EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
467 
468 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
469 {
470 	struct kvm_irq_ack_notifier *kian;
471 
472 	hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
473 				 link)
474 		if (kian->gsi == gsi)
475 			kian->irq_acked(kian);
476 }
477 
478 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
479 {
480 	int gsi, idx;
481 
482 	trace_kvm_ack_irq(irqchip, pin);
483 
484 	idx = srcu_read_lock(&kvm->irq_srcu);
485 	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
486 	if (gsi != -1)
487 		kvm_notify_acked_gsi(kvm, gsi);
488 	srcu_read_unlock(&kvm->irq_srcu, idx);
489 }
490 
491 void kvm_register_irq_ack_notifier(struct kvm *kvm,
492 				   struct kvm_irq_ack_notifier *kian)
493 {
494 	mutex_lock(&kvm->irq_lock);
495 	hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
496 	mutex_unlock(&kvm->irq_lock);
497 	kvm_arch_post_irq_ack_notifier_list_update(kvm);
498 }
499 
500 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
501 				    struct kvm_irq_ack_notifier *kian)
502 {
503 	mutex_lock(&kvm->irq_lock);
504 	hlist_del_init_rcu(&kian->link);
505 	mutex_unlock(&kvm->irq_lock);
506 	synchronize_srcu(&kvm->irq_srcu);
507 	kvm_arch_post_irq_ack_notifier_list_update(kvm);
508 }
509 #endif
510 
511 void
512 kvm_eventfd_init(struct kvm *kvm)
513 {
514 #ifdef CONFIG_HAVE_KVM_IRQFD
515 	spin_lock_init(&kvm->irqfds.lock);
516 	INIT_LIST_HEAD(&kvm->irqfds.items);
517 	INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
518 	mutex_init(&kvm->irqfds.resampler_lock);
519 #endif
520 	INIT_LIST_HEAD(&kvm->ioeventfds);
521 }
522 
523 #ifdef CONFIG_HAVE_KVM_IRQFD
524 /*
525  * shutdown any irqfd's that match fd+gsi
526  */
527 static int
528 kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
529 {
530 	struct kvm_kernel_irqfd *irqfd, *tmp;
531 	struct eventfd_ctx *eventfd;
532 
533 	eventfd = eventfd_ctx_fdget(args->fd);
534 	if (IS_ERR(eventfd))
535 		return PTR_ERR(eventfd);
536 
537 	spin_lock_irq(&kvm->irqfds.lock);
538 
539 	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
540 		if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
541 			/*
542 			 * This clearing of irq_entry.type is needed for when
543 			 * another thread calls kvm_irq_routing_update before
544 			 * we flush workqueue below (we synchronize with
545 			 * kvm_irq_routing_update using irqfds.lock).
546 			 */
547 			write_seqcount_begin(&irqfd->irq_entry_sc);
548 			irqfd->irq_entry.type = 0;
549 			write_seqcount_end(&irqfd->irq_entry_sc);
550 			irqfd_deactivate(irqfd);
551 		}
552 	}
553 
554 	spin_unlock_irq(&kvm->irqfds.lock);
555 	eventfd_ctx_put(eventfd);
556 
557 	/*
558 	 * Block until we know all outstanding shutdown jobs have completed
559 	 * so that we guarantee there will not be any more interrupts on this
560 	 * gsi once this deassign function returns.
561 	 */
562 	flush_workqueue(irqfd_cleanup_wq);
563 
564 	return 0;
565 }
566 
567 int
568 kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
569 {
570 	if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
571 		return -EINVAL;
572 
573 	if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
574 		return kvm_irqfd_deassign(kvm, args);
575 
576 	return kvm_irqfd_assign(kvm, args);
577 }
578 
579 /*
580  * This function is called as the kvm VM fd is being released. Shutdown all
581  * irqfds that still remain open
582  */
583 void
584 kvm_irqfd_release(struct kvm *kvm)
585 {
586 	struct kvm_kernel_irqfd *irqfd, *tmp;
587 
588 	spin_lock_irq(&kvm->irqfds.lock);
589 
590 	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
591 		irqfd_deactivate(irqfd);
592 
593 	spin_unlock_irq(&kvm->irqfds.lock);
594 
595 	/*
596 	 * Block until we know all outstanding shutdown jobs have completed
597 	 * since we do not take a kvm* reference.
598 	 */
599 	flush_workqueue(irqfd_cleanup_wq);
600 
601 }
602 
603 /*
604  * Take note of a change in irq routing.
605  * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
606  */
607 void kvm_irq_routing_update(struct kvm *kvm)
608 {
609 	struct kvm_kernel_irqfd *irqfd;
610 
611 	spin_lock_irq(&kvm->irqfds.lock);
612 
613 	list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
614 		irqfd_update(kvm, irqfd);
615 
616 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
617 		if (irqfd->producer) {
618 			int ret = kvm_arch_update_irqfd_routing(
619 					irqfd->kvm, irqfd->producer->irq,
620 					irqfd->gsi, 1);
621 			WARN_ON(ret);
622 		}
623 #endif
624 	}
625 
626 	spin_unlock_irq(&kvm->irqfds.lock);
627 }
628 
629 /*
630  * create a host-wide workqueue for issuing deferred shutdown requests
631  * aggregated from all vm* instances. We need our own isolated
632  * queue to ease flushing work items when a VM exits.
633  */
634 int kvm_irqfd_init(void)
635 {
636 	irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0);
637 	if (!irqfd_cleanup_wq)
638 		return -ENOMEM;
639 
640 	return 0;
641 }
642 
643 void kvm_irqfd_exit(void)
644 {
645 	destroy_workqueue(irqfd_cleanup_wq);
646 }
647 #endif
648 
649 /*
650  * --------------------------------------------------------------------
651  * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
652  *
653  * userspace can register a PIO/MMIO address with an eventfd for receiving
654  * notification when the memory has been touched.
655  * --------------------------------------------------------------------
656  */
657 
658 struct _ioeventfd {
659 	struct list_head     list;
660 	u64                  addr;
661 	int                  length;
662 	struct eventfd_ctx  *eventfd;
663 	u64                  datamatch;
664 	struct kvm_io_device dev;
665 	u8                   bus_idx;
666 	bool                 wildcard;
667 };
668 
669 static inline struct _ioeventfd *
670 to_ioeventfd(struct kvm_io_device *dev)
671 {
672 	return container_of(dev, struct _ioeventfd, dev);
673 }
674 
675 static void
676 ioeventfd_release(struct _ioeventfd *p)
677 {
678 	eventfd_ctx_put(p->eventfd);
679 	list_del(&p->list);
680 	kfree(p);
681 }
682 
683 static bool
684 ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
685 {
686 	u64 _val;
687 
688 	if (addr != p->addr)
689 		/* address must be precise for a hit */
690 		return false;
691 
692 	if (!p->length)
693 		/* length = 0 means only look at the address, so always a hit */
694 		return true;
695 
696 	if (len != p->length)
697 		/* address-range must be precise for a hit */
698 		return false;
699 
700 	if (p->wildcard)
701 		/* all else equal, wildcard is always a hit */
702 		return true;
703 
704 	/* otherwise, we have to actually compare the data */
705 
706 	BUG_ON(!IS_ALIGNED((unsigned long)val, len));
707 
708 	switch (len) {
709 	case 1:
710 		_val = *(u8 *)val;
711 		break;
712 	case 2:
713 		_val = *(u16 *)val;
714 		break;
715 	case 4:
716 		_val = *(u32 *)val;
717 		break;
718 	case 8:
719 		_val = *(u64 *)val;
720 		break;
721 	default:
722 		return false;
723 	}
724 
725 	return _val == p->datamatch ? true : false;
726 }
727 
728 /* MMIO/PIO writes trigger an event if the addr/val match */
729 static int
730 ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
731 		int len, const void *val)
732 {
733 	struct _ioeventfd *p = to_ioeventfd(this);
734 
735 	if (!ioeventfd_in_range(p, addr, len, val))
736 		return -EOPNOTSUPP;
737 
738 	eventfd_signal(p->eventfd, 1);
739 	return 0;
740 }
741 
742 /*
743  * This function is called as KVM is completely shutting down.  We do not
744  * need to worry about locking just nuke anything we have as quickly as possible
745  */
746 static void
747 ioeventfd_destructor(struct kvm_io_device *this)
748 {
749 	struct _ioeventfd *p = to_ioeventfd(this);
750 
751 	ioeventfd_release(p);
752 }
753 
754 static const struct kvm_io_device_ops ioeventfd_ops = {
755 	.write      = ioeventfd_write,
756 	.destructor = ioeventfd_destructor,
757 };
758 
759 /* assumes kvm->slots_lock held */
760 static bool
761 ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
762 {
763 	struct _ioeventfd *_p;
764 
765 	list_for_each_entry(_p, &kvm->ioeventfds, list)
766 		if (_p->bus_idx == p->bus_idx &&
767 		    _p->addr == p->addr &&
768 		    (!_p->length || !p->length ||
769 		     (_p->length == p->length &&
770 		      (_p->wildcard || p->wildcard ||
771 		       _p->datamatch == p->datamatch))))
772 			return true;
773 
774 	return false;
775 }
776 
777 static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
778 {
779 	if (flags & KVM_IOEVENTFD_FLAG_PIO)
780 		return KVM_PIO_BUS;
781 	if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
782 		return KVM_VIRTIO_CCW_NOTIFY_BUS;
783 	return KVM_MMIO_BUS;
784 }
785 
786 static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
787 				enum kvm_bus bus_idx,
788 				struct kvm_ioeventfd *args)
789 {
790 
791 	struct eventfd_ctx *eventfd;
792 	struct _ioeventfd *p;
793 	int ret;
794 
795 	eventfd = eventfd_ctx_fdget(args->fd);
796 	if (IS_ERR(eventfd))
797 		return PTR_ERR(eventfd);
798 
799 	p = kzalloc(sizeof(*p), GFP_KERNEL);
800 	if (!p) {
801 		ret = -ENOMEM;
802 		goto fail;
803 	}
804 
805 	INIT_LIST_HEAD(&p->list);
806 	p->addr    = args->addr;
807 	p->bus_idx = bus_idx;
808 	p->length  = args->len;
809 	p->eventfd = eventfd;
810 
811 	/* The datamatch feature is optional, otherwise this is a wildcard */
812 	if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
813 		p->datamatch = args->datamatch;
814 	else
815 		p->wildcard = true;
816 
817 	mutex_lock(&kvm->slots_lock);
818 
819 	/* Verify that there isn't a match already */
820 	if (ioeventfd_check_collision(kvm, p)) {
821 		ret = -EEXIST;
822 		goto unlock_fail;
823 	}
824 
825 	kvm_iodevice_init(&p->dev, &ioeventfd_ops);
826 
827 	ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
828 				      &p->dev);
829 	if (ret < 0)
830 		goto unlock_fail;
831 
832 	kvm_get_bus(kvm, bus_idx)->ioeventfd_count++;
833 	list_add_tail(&p->list, &kvm->ioeventfds);
834 
835 	mutex_unlock(&kvm->slots_lock);
836 
837 	return 0;
838 
839 unlock_fail:
840 	mutex_unlock(&kvm->slots_lock);
841 
842 fail:
843 	kfree(p);
844 	eventfd_ctx_put(eventfd);
845 
846 	return ret;
847 }
848 
849 static int
850 kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
851 			   struct kvm_ioeventfd *args)
852 {
853 	struct _ioeventfd        *p, *tmp;
854 	struct eventfd_ctx       *eventfd;
855 	struct kvm_io_bus	 *bus;
856 	int                       ret = -ENOENT;
857 
858 	eventfd = eventfd_ctx_fdget(args->fd);
859 	if (IS_ERR(eventfd))
860 		return PTR_ERR(eventfd);
861 
862 	mutex_lock(&kvm->slots_lock);
863 
864 	list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
865 		bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
866 
867 		if (p->bus_idx != bus_idx ||
868 		    p->eventfd != eventfd  ||
869 		    p->addr != args->addr  ||
870 		    p->length != args->len ||
871 		    p->wildcard != wildcard)
872 			continue;
873 
874 		if (!p->wildcard && p->datamatch != args->datamatch)
875 			continue;
876 
877 		kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
878 		bus = kvm_get_bus(kvm, bus_idx);
879 		if (bus)
880 			bus->ioeventfd_count--;
881 		ioeventfd_release(p);
882 		ret = 0;
883 		break;
884 	}
885 
886 	mutex_unlock(&kvm->slots_lock);
887 
888 	eventfd_ctx_put(eventfd);
889 
890 	return ret;
891 }
892 
893 static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
894 {
895 	enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
896 	int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
897 
898 	if (!args->len && bus_idx == KVM_MMIO_BUS)
899 		kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
900 
901 	return ret;
902 }
903 
904 static int
905 kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
906 {
907 	enum kvm_bus              bus_idx;
908 	int ret;
909 
910 	bus_idx = ioeventfd_bus_from_flags(args->flags);
911 	/* must be natural-word sized, or 0 to ignore length */
912 	switch (args->len) {
913 	case 0:
914 	case 1:
915 	case 2:
916 	case 4:
917 	case 8:
918 		break;
919 	default:
920 		return -EINVAL;
921 	}
922 
923 	/* check for range overflow */
924 	if (args->addr + args->len < args->addr)
925 		return -EINVAL;
926 
927 	/* check for extra flags that we don't understand */
928 	if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
929 		return -EINVAL;
930 
931 	/* ioeventfd with no length can't be combined with DATAMATCH */
932 	if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH))
933 		return -EINVAL;
934 
935 	ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
936 	if (ret)
937 		goto fail;
938 
939 	/* When length is ignored, MMIO is also put on a separate bus, for
940 	 * faster lookups.
941 	 */
942 	if (!args->len && bus_idx == KVM_MMIO_BUS) {
943 		ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
944 		if (ret < 0)
945 			goto fast_fail;
946 	}
947 
948 	return 0;
949 
950 fast_fail:
951 	kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
952 fail:
953 	return ret;
954 }
955 
956 int
957 kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
958 {
959 	if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
960 		return kvm_deassign_ioeventfd(kvm, args);
961 
962 	return kvm_assign_ioeventfd(kvm, args);
963 }
964