1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VFIO PCI interrupt handling
4  *
5  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
6  *     Author: Alex Williamson <alex.williamson@redhat.com>
7  *
8  * Derived from original vfio:
9  * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
10  * Author: Tom Lyon, pugs@cisco.com
11  */
12 
13 #include <linux/device.h>
14 #include <linux/interrupt.h>
15 #include <linux/eventfd.h>
16 #include <linux/msi.h>
17 #include <linux/pci.h>
18 #include <linux/file.h>
19 #include <linux/vfio.h>
20 #include <linux/wait.h>
21 #include <linux/slab.h>
22 
23 #include "vfio_pci_priv.h"
24 
25 struct vfio_pci_irq_ctx {
26 	struct eventfd_ctx	*trigger;
27 	struct virqfd		*unmask;
28 	struct virqfd		*mask;
29 	char			*name;
30 	bool			masked;
31 	struct irq_bypass_producer	producer;
32 };
33 
34 static bool irq_is(struct vfio_pci_core_device *vdev, int type)
35 {
36 	return vdev->irq_type == type;
37 }
38 
39 static bool is_intx(struct vfio_pci_core_device *vdev)
40 {
41 	return vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX;
42 }
43 
44 static bool is_irq_none(struct vfio_pci_core_device *vdev)
45 {
46 	return !(vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX ||
47 		 vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX ||
48 		 vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX);
49 }
50 
51 /*
52  * INTx
53  */
54 static void vfio_send_intx_eventfd(void *opaque, void *unused)
55 {
56 	struct vfio_pci_core_device *vdev = opaque;
57 
58 	if (likely(is_intx(vdev) && !vdev->virq_disabled))
59 		eventfd_signal(vdev->ctx[0].trigger, 1);
60 }
61 
62 /* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */
63 bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
64 {
65 	struct pci_dev *pdev = vdev->pdev;
66 	unsigned long flags;
67 	bool masked_changed = false;
68 
69 	spin_lock_irqsave(&vdev->irqlock, flags);
70 
71 	/*
72 	 * Masking can come from interrupt, ioctl, or config space
73 	 * via INTx disable.  The latter means this can get called
74 	 * even when not using intx delivery.  In this case, just
75 	 * try to have the physical bit follow the virtual bit.
76 	 */
77 	if (unlikely(!is_intx(vdev))) {
78 		if (vdev->pci_2_3)
79 			pci_intx(pdev, 0);
80 	} else if (!vdev->ctx[0].masked) {
81 		/*
82 		 * Can't use check_and_mask here because we always want to
83 		 * mask, not just when something is pending.
84 		 */
85 		if (vdev->pci_2_3)
86 			pci_intx(pdev, 0);
87 		else
88 			disable_irq_nosync(pdev->irq);
89 
90 		vdev->ctx[0].masked = true;
91 		masked_changed = true;
92 	}
93 
94 	spin_unlock_irqrestore(&vdev->irqlock, flags);
95 	return masked_changed;
96 }
97 
98 /*
99  * If this is triggered by an eventfd, we can't call eventfd_signal
100  * or else we'll deadlock on the eventfd wait queue.  Return >0 when
101  * a signal is necessary, which can then be handled via a work queue
102  * or directly depending on the caller.
103  */
104 static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
105 {
106 	struct vfio_pci_core_device *vdev = opaque;
107 	struct pci_dev *pdev = vdev->pdev;
108 	unsigned long flags;
109 	int ret = 0;
110 
111 	spin_lock_irqsave(&vdev->irqlock, flags);
112 
113 	/*
114 	 * Unmasking comes from ioctl or config, so again, have the
115 	 * physical bit follow the virtual even when not using INTx.
116 	 */
117 	if (unlikely(!is_intx(vdev))) {
118 		if (vdev->pci_2_3)
119 			pci_intx(pdev, 1);
120 	} else if (vdev->ctx[0].masked && !vdev->virq_disabled) {
121 		/*
122 		 * A pending interrupt here would immediately trigger,
123 		 * but we can avoid that overhead by just re-sending
124 		 * the interrupt to the user.
125 		 */
126 		if (vdev->pci_2_3) {
127 			if (!pci_check_and_unmask_intx(pdev))
128 				ret = 1;
129 		} else
130 			enable_irq(pdev->irq);
131 
132 		vdev->ctx[0].masked = (ret > 0);
133 	}
134 
135 	spin_unlock_irqrestore(&vdev->irqlock, flags);
136 
137 	return ret;
138 }
139 
140 void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
141 {
142 	if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
143 		vfio_send_intx_eventfd(vdev, NULL);
144 }
145 
146 static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
147 {
148 	struct vfio_pci_core_device *vdev = dev_id;
149 	unsigned long flags;
150 	int ret = IRQ_NONE;
151 
152 	spin_lock_irqsave(&vdev->irqlock, flags);
153 
154 	if (!vdev->pci_2_3) {
155 		disable_irq_nosync(vdev->pdev->irq);
156 		vdev->ctx[0].masked = true;
157 		ret = IRQ_HANDLED;
158 	} else if (!vdev->ctx[0].masked &&  /* may be shared */
159 		   pci_check_and_mask_intx(vdev->pdev)) {
160 		vdev->ctx[0].masked = true;
161 		ret = IRQ_HANDLED;
162 	}
163 
164 	spin_unlock_irqrestore(&vdev->irqlock, flags);
165 
166 	if (ret == IRQ_HANDLED)
167 		vfio_send_intx_eventfd(vdev, NULL);
168 
169 	return ret;
170 }
171 
172 static int vfio_intx_enable(struct vfio_pci_core_device *vdev)
173 {
174 	if (!is_irq_none(vdev))
175 		return -EINVAL;
176 
177 	if (!vdev->pdev->irq)
178 		return -ENODEV;
179 
180 	vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
181 	if (!vdev->ctx)
182 		return -ENOMEM;
183 
184 	vdev->num_ctx = 1;
185 
186 	/*
187 	 * If the virtual interrupt is masked, restore it.  Devices
188 	 * supporting DisINTx can be masked at the hardware level
189 	 * here, non-PCI-2.3 devices will have to wait until the
190 	 * interrupt is enabled.
191 	 */
192 	vdev->ctx[0].masked = vdev->virq_disabled;
193 	if (vdev->pci_2_3)
194 		pci_intx(vdev->pdev, !vdev->ctx[0].masked);
195 
196 	vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
197 
198 	return 0;
199 }
200 
201 static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd)
202 {
203 	struct pci_dev *pdev = vdev->pdev;
204 	unsigned long irqflags = IRQF_SHARED;
205 	struct eventfd_ctx *trigger;
206 	unsigned long flags;
207 	int ret;
208 
209 	if (vdev->ctx[0].trigger) {
210 		free_irq(pdev->irq, vdev);
211 		kfree(vdev->ctx[0].name);
212 		eventfd_ctx_put(vdev->ctx[0].trigger);
213 		vdev->ctx[0].trigger = NULL;
214 	}
215 
216 	if (fd < 0) /* Disable only */
217 		return 0;
218 
219 	vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)",
220 				      pci_name(pdev));
221 	if (!vdev->ctx[0].name)
222 		return -ENOMEM;
223 
224 	trigger = eventfd_ctx_fdget(fd);
225 	if (IS_ERR(trigger)) {
226 		kfree(vdev->ctx[0].name);
227 		return PTR_ERR(trigger);
228 	}
229 
230 	vdev->ctx[0].trigger = trigger;
231 
232 	if (!vdev->pci_2_3)
233 		irqflags = 0;
234 
235 	ret = request_irq(pdev->irq, vfio_intx_handler,
236 			  irqflags, vdev->ctx[0].name, vdev);
237 	if (ret) {
238 		vdev->ctx[0].trigger = NULL;
239 		kfree(vdev->ctx[0].name);
240 		eventfd_ctx_put(trigger);
241 		return ret;
242 	}
243 
244 	/*
245 	 * INTx disable will stick across the new irq setup,
246 	 * disable_irq won't.
247 	 */
248 	spin_lock_irqsave(&vdev->irqlock, flags);
249 	if (!vdev->pci_2_3 && vdev->ctx[0].masked)
250 		disable_irq_nosync(pdev->irq);
251 	spin_unlock_irqrestore(&vdev->irqlock, flags);
252 
253 	return 0;
254 }
255 
256 static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
257 {
258 	vfio_virqfd_disable(&vdev->ctx[0].unmask);
259 	vfio_virqfd_disable(&vdev->ctx[0].mask);
260 	vfio_intx_set_signal(vdev, -1);
261 	vdev->irq_type = VFIO_PCI_NUM_IRQS;
262 	vdev->num_ctx = 0;
263 	kfree(vdev->ctx);
264 }
265 
266 /*
267  * MSI/MSI-X
268  */
269 static irqreturn_t vfio_msihandler(int irq, void *arg)
270 {
271 	struct eventfd_ctx *trigger = arg;
272 
273 	eventfd_signal(trigger, 1);
274 	return IRQ_HANDLED;
275 }
276 
277 static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msix)
278 {
279 	struct pci_dev *pdev = vdev->pdev;
280 	unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
281 	int ret;
282 	u16 cmd;
283 
284 	if (!is_irq_none(vdev))
285 		return -EINVAL;
286 
287 	vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
288 	if (!vdev->ctx)
289 		return -ENOMEM;
290 
291 	/* return the number of supported vectors if we can't get all: */
292 	cmd = vfio_pci_memory_lock_and_enable(vdev);
293 	ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
294 	if (ret < nvec) {
295 		if (ret > 0)
296 			pci_free_irq_vectors(pdev);
297 		vfio_pci_memory_unlock_and_restore(vdev, cmd);
298 		kfree(vdev->ctx);
299 		return ret;
300 	}
301 	vfio_pci_memory_unlock_and_restore(vdev, cmd);
302 
303 	vdev->num_ctx = nvec;
304 	vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
305 				VFIO_PCI_MSI_IRQ_INDEX;
306 
307 	if (!msix) {
308 		/*
309 		 * Compute the virtual hardware field for max msi vectors -
310 		 * it is the log base 2 of the number of vectors.
311 		 */
312 		vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
313 	}
314 
315 	return 0;
316 }
317 
318 static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
319 				      int vector, int fd, bool msix)
320 {
321 	struct pci_dev *pdev = vdev->pdev;
322 	struct eventfd_ctx *trigger;
323 	int irq, ret;
324 	u16 cmd;
325 
326 	if (vector < 0 || vector >= vdev->num_ctx)
327 		return -EINVAL;
328 
329 	irq = pci_irq_vector(pdev, vector);
330 
331 	if (vdev->ctx[vector].trigger) {
332 		irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
333 
334 		cmd = vfio_pci_memory_lock_and_enable(vdev);
335 		free_irq(irq, vdev->ctx[vector].trigger);
336 		vfio_pci_memory_unlock_and_restore(vdev, cmd);
337 
338 		kfree(vdev->ctx[vector].name);
339 		eventfd_ctx_put(vdev->ctx[vector].trigger);
340 		vdev->ctx[vector].trigger = NULL;
341 	}
342 
343 	if (fd < 0)
344 		return 0;
345 
346 	vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "vfio-msi%s[%d](%s)",
347 					   msix ? "x" : "", vector,
348 					   pci_name(pdev));
349 	if (!vdev->ctx[vector].name)
350 		return -ENOMEM;
351 
352 	trigger = eventfd_ctx_fdget(fd);
353 	if (IS_ERR(trigger)) {
354 		kfree(vdev->ctx[vector].name);
355 		return PTR_ERR(trigger);
356 	}
357 
358 	/*
359 	 * The MSIx vector table resides in device memory which may be cleared
360 	 * via backdoor resets. We don't allow direct access to the vector
361 	 * table so even if a userspace driver attempts to save/restore around
362 	 * such a reset it would be unsuccessful. To avoid this, restore the
363 	 * cached value of the message prior to enabling.
364 	 */
365 	cmd = vfio_pci_memory_lock_and_enable(vdev);
366 	if (msix) {
367 		struct msi_msg msg;
368 
369 		get_cached_msi_msg(irq, &msg);
370 		pci_write_msi_msg(irq, &msg);
371 	}
372 
373 	ret = request_irq(irq, vfio_msihandler, 0,
374 			  vdev->ctx[vector].name, trigger);
375 	vfio_pci_memory_unlock_and_restore(vdev, cmd);
376 	if (ret) {
377 		kfree(vdev->ctx[vector].name);
378 		eventfd_ctx_put(trigger);
379 		return ret;
380 	}
381 
382 	vdev->ctx[vector].producer.token = trigger;
383 	vdev->ctx[vector].producer.irq = irq;
384 	ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
385 	if (unlikely(ret)) {
386 		dev_info(&pdev->dev,
387 		"irq bypass producer (token %p) registration fails: %d\n",
388 		vdev->ctx[vector].producer.token, ret);
389 
390 		vdev->ctx[vector].producer.token = NULL;
391 	}
392 	vdev->ctx[vector].trigger = trigger;
393 
394 	return 0;
395 }
396 
397 static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start,
398 			      unsigned count, int32_t *fds, bool msix)
399 {
400 	int i, j, ret = 0;
401 
402 	if (start >= vdev->num_ctx || start + count > vdev->num_ctx)
403 		return -EINVAL;
404 
405 	for (i = 0, j = start; i < count && !ret; i++, j++) {
406 		int fd = fds ? fds[i] : -1;
407 		ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
408 	}
409 
410 	if (ret) {
411 		for (--j; j >= (int)start; j--)
412 			vfio_msi_set_vector_signal(vdev, j, -1, msix);
413 	}
414 
415 	return ret;
416 }
417 
418 static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
419 {
420 	struct pci_dev *pdev = vdev->pdev;
421 	int i;
422 	u16 cmd;
423 
424 	for (i = 0; i < vdev->num_ctx; i++) {
425 		vfio_virqfd_disable(&vdev->ctx[i].unmask);
426 		vfio_virqfd_disable(&vdev->ctx[i].mask);
427 	}
428 
429 	vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
430 
431 	cmd = vfio_pci_memory_lock_and_enable(vdev);
432 	pci_free_irq_vectors(pdev);
433 	vfio_pci_memory_unlock_and_restore(vdev, cmd);
434 
435 	/*
436 	 * Both disable paths above use pci_intx_for_msi() to clear DisINTx
437 	 * via their shutdown paths.  Restore for NoINTx devices.
438 	 */
439 	if (vdev->nointx)
440 		pci_intx(pdev, 0);
441 
442 	vdev->irq_type = VFIO_PCI_NUM_IRQS;
443 	vdev->num_ctx = 0;
444 	kfree(vdev->ctx);
445 }
446 
447 /*
448  * IOCTL support
449  */
450 static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev,
451 				    unsigned index, unsigned start,
452 				    unsigned count, uint32_t flags, void *data)
453 {
454 	if (!is_intx(vdev) || start != 0 || count != 1)
455 		return -EINVAL;
456 
457 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
458 		vfio_pci_intx_unmask(vdev);
459 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
460 		uint8_t unmask = *(uint8_t *)data;
461 		if (unmask)
462 			vfio_pci_intx_unmask(vdev);
463 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
464 		int32_t fd = *(int32_t *)data;
465 		if (fd >= 0)
466 			return vfio_virqfd_enable((void *) vdev,
467 						  vfio_pci_intx_unmask_handler,
468 						  vfio_send_intx_eventfd, NULL,
469 						  &vdev->ctx[0].unmask, fd);
470 
471 		vfio_virqfd_disable(&vdev->ctx[0].unmask);
472 	}
473 
474 	return 0;
475 }
476 
477 static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev,
478 				  unsigned index, unsigned start,
479 				  unsigned count, uint32_t flags, void *data)
480 {
481 	if (!is_intx(vdev) || start != 0 || count != 1)
482 		return -EINVAL;
483 
484 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
485 		vfio_pci_intx_mask(vdev);
486 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
487 		uint8_t mask = *(uint8_t *)data;
488 		if (mask)
489 			vfio_pci_intx_mask(vdev);
490 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
491 		return -ENOTTY; /* XXX implement me */
492 	}
493 
494 	return 0;
495 }
496 
497 static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev,
498 				     unsigned index, unsigned start,
499 				     unsigned count, uint32_t flags, void *data)
500 {
501 	if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
502 		vfio_intx_disable(vdev);
503 		return 0;
504 	}
505 
506 	if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
507 		return -EINVAL;
508 
509 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
510 		int32_t fd = *(int32_t *)data;
511 		int ret;
512 
513 		if (is_intx(vdev))
514 			return vfio_intx_set_signal(vdev, fd);
515 
516 		ret = vfio_intx_enable(vdev);
517 		if (ret)
518 			return ret;
519 
520 		ret = vfio_intx_set_signal(vdev, fd);
521 		if (ret)
522 			vfio_intx_disable(vdev);
523 
524 		return ret;
525 	}
526 
527 	if (!is_intx(vdev))
528 		return -EINVAL;
529 
530 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
531 		vfio_send_intx_eventfd(vdev, NULL);
532 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
533 		uint8_t trigger = *(uint8_t *)data;
534 		if (trigger)
535 			vfio_send_intx_eventfd(vdev, NULL);
536 	}
537 	return 0;
538 }
539 
540 static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev,
541 				    unsigned index, unsigned start,
542 				    unsigned count, uint32_t flags, void *data)
543 {
544 	int i;
545 	bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
546 
547 	if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
548 		vfio_msi_disable(vdev, msix);
549 		return 0;
550 	}
551 
552 	if (!(irq_is(vdev, index) || is_irq_none(vdev)))
553 		return -EINVAL;
554 
555 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
556 		int32_t *fds = data;
557 		int ret;
558 
559 		if (vdev->irq_type == index)
560 			return vfio_msi_set_block(vdev, start, count,
561 						  fds, msix);
562 
563 		ret = vfio_msi_enable(vdev, start + count, msix);
564 		if (ret)
565 			return ret;
566 
567 		ret = vfio_msi_set_block(vdev, start, count, fds, msix);
568 		if (ret)
569 			vfio_msi_disable(vdev, msix);
570 
571 		return ret;
572 	}
573 
574 	if (!irq_is(vdev, index) || start + count > vdev->num_ctx)
575 		return -EINVAL;
576 
577 	for (i = start; i < start + count; i++) {
578 		if (!vdev->ctx[i].trigger)
579 			continue;
580 		if (flags & VFIO_IRQ_SET_DATA_NONE) {
581 			eventfd_signal(vdev->ctx[i].trigger, 1);
582 		} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
583 			uint8_t *bools = data;
584 			if (bools[i - start])
585 				eventfd_signal(vdev->ctx[i].trigger, 1);
586 		}
587 	}
588 	return 0;
589 }
590 
591 static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
592 					   unsigned int count, uint32_t flags,
593 					   void *data)
594 {
595 	/* DATA_NONE/DATA_BOOL enables loopback testing */
596 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
597 		if (*ctx) {
598 			if (count) {
599 				eventfd_signal(*ctx, 1);
600 			} else {
601 				eventfd_ctx_put(*ctx);
602 				*ctx = NULL;
603 			}
604 			return 0;
605 		}
606 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
607 		uint8_t trigger;
608 
609 		if (!count)
610 			return -EINVAL;
611 
612 		trigger = *(uint8_t *)data;
613 		if (trigger && *ctx)
614 			eventfd_signal(*ctx, 1);
615 
616 		return 0;
617 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
618 		int32_t fd;
619 
620 		if (!count)
621 			return -EINVAL;
622 
623 		fd = *(int32_t *)data;
624 		if (fd == -1) {
625 			if (*ctx)
626 				eventfd_ctx_put(*ctx);
627 			*ctx = NULL;
628 		} else if (fd >= 0) {
629 			struct eventfd_ctx *efdctx;
630 
631 			efdctx = eventfd_ctx_fdget(fd);
632 			if (IS_ERR(efdctx))
633 				return PTR_ERR(efdctx);
634 
635 			if (*ctx)
636 				eventfd_ctx_put(*ctx);
637 
638 			*ctx = efdctx;
639 		}
640 		return 0;
641 	}
642 
643 	return -EINVAL;
644 }
645 
646 static int vfio_pci_set_err_trigger(struct vfio_pci_core_device *vdev,
647 				    unsigned index, unsigned start,
648 				    unsigned count, uint32_t flags, void *data)
649 {
650 	if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
651 		return -EINVAL;
652 
653 	return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
654 					       count, flags, data);
655 }
656 
657 static int vfio_pci_set_req_trigger(struct vfio_pci_core_device *vdev,
658 				    unsigned index, unsigned start,
659 				    unsigned count, uint32_t flags, void *data)
660 {
661 	if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
662 		return -EINVAL;
663 
664 	return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
665 					       count, flags, data);
666 }
667 
668 int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, uint32_t flags,
669 			    unsigned index, unsigned start, unsigned count,
670 			    void *data)
671 {
672 	int (*func)(struct vfio_pci_core_device *vdev, unsigned index,
673 		    unsigned start, unsigned count, uint32_t flags,
674 		    void *data) = NULL;
675 
676 	switch (index) {
677 	case VFIO_PCI_INTX_IRQ_INDEX:
678 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
679 		case VFIO_IRQ_SET_ACTION_MASK:
680 			func = vfio_pci_set_intx_mask;
681 			break;
682 		case VFIO_IRQ_SET_ACTION_UNMASK:
683 			func = vfio_pci_set_intx_unmask;
684 			break;
685 		case VFIO_IRQ_SET_ACTION_TRIGGER:
686 			func = vfio_pci_set_intx_trigger;
687 			break;
688 		}
689 		break;
690 	case VFIO_PCI_MSI_IRQ_INDEX:
691 	case VFIO_PCI_MSIX_IRQ_INDEX:
692 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
693 		case VFIO_IRQ_SET_ACTION_MASK:
694 		case VFIO_IRQ_SET_ACTION_UNMASK:
695 			/* XXX Need masking support exported */
696 			break;
697 		case VFIO_IRQ_SET_ACTION_TRIGGER:
698 			func = vfio_pci_set_msi_trigger;
699 			break;
700 		}
701 		break;
702 	case VFIO_PCI_ERR_IRQ_INDEX:
703 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
704 		case VFIO_IRQ_SET_ACTION_TRIGGER:
705 			if (pci_is_pcie(vdev->pdev))
706 				func = vfio_pci_set_err_trigger;
707 			break;
708 		}
709 		break;
710 	case VFIO_PCI_REQ_IRQ_INDEX:
711 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
712 		case VFIO_IRQ_SET_ACTION_TRIGGER:
713 			func = vfio_pci_set_req_trigger;
714 			break;
715 		}
716 		break;
717 	}
718 
719 	if (!func)
720 		return -ENOTTY;
721 
722 	return func(vdev, index, start, count, flags, data);
723 }
724