xref: /openbmc/qemu/hw/vfio/pci.c (revision db432672)
1 /*
2  * vfio based device assignment support
3  *
4  * Copyright Red Hat, Inc. 2012
5  *
6  * Authors:
7  *  Alex Williamson <alex.williamson@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Based on qemu-kvm device-assignment:
13  *  Adapted for KVM by Qumranet.
14  *  Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15  *  Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16  *  Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17  *  Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18  *  Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
19  */
20 
21 #include "qemu/osdep.h"
22 #include <linux/vfio.h>
23 #include <sys/ioctl.h>
24 
25 #include "hw/pci/msi.h"
26 #include "hw/pci/msix.h"
27 #include "hw/pci/pci_bridge.h"
28 #include "qemu/error-report.h"
29 #include "qemu/range.h"
30 #include "sysemu/kvm.h"
31 #include "sysemu/sysemu.h"
32 #include "pci.h"
33 #include "trace.h"
34 #include "qapi/error.h"
35 
36 #define MSIX_CAP_LENGTH 12
37 
38 static void vfio_disable_interrupts(VFIOPCIDevice *vdev);
39 static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled);
40 
41 /*
42  * Disabling BAR mmaping can be slow, but toggling it around INTx can
43  * also be a huge overhead.  We try to get the best of both worlds by
44  * waiting until an interrupt to disable mmaps (subsequent transitions
45  * to the same state are effectively no overhead).  If the interrupt has
46  * been serviced and the time gap is long enough, we re-enable mmaps for
47  * performance.  This works well for things like graphics cards, which
48  * may not use their interrupt at all and are penalized to an unusable
49  * level by read/write BAR traps.  Other devices, like NICs, have more
50  * regular interrupts and see much better latency by staying in non-mmap
51  * mode.  We therefore set the default mmap_timeout such that a ping
52  * is just enough to keep the mmap disabled.  Users can experiment with
53  * other options with the x-intx-mmap-timeout-ms parameter (a value of
54  * zero disables the timer).
55  */
56 static void vfio_intx_mmap_enable(void *opaque)
57 {
58     VFIOPCIDevice *vdev = opaque;
59 
60     if (vdev->intx.pending) {
61         timer_mod(vdev->intx.mmap_timer,
62                        qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
63         return;
64     }
65 
66     vfio_mmap_set_enabled(vdev, true);
67 }
68 
69 static void vfio_intx_interrupt(void *opaque)
70 {
71     VFIOPCIDevice *vdev = opaque;
72 
73     if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) {
74         return;
75     }
76 
77     trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin);
78 
79     vdev->intx.pending = true;
80     pci_irq_assert(&vdev->pdev);
81     vfio_mmap_set_enabled(vdev, false);
82     if (vdev->intx.mmap_timeout) {
83         timer_mod(vdev->intx.mmap_timer,
84                        qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
85     }
86 }
87 
88 static void vfio_intx_eoi(VFIODevice *vbasedev)
89 {
90     VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
91 
92     if (!vdev->intx.pending) {
93         return;
94     }
95 
96     trace_vfio_intx_eoi(vbasedev->name);
97 
98     vdev->intx.pending = false;
99     pci_irq_deassert(&vdev->pdev);
100     vfio_unmask_single_irqindex(vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
101 }
102 
103 static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
104 {
105 #ifdef CONFIG_KVM
106     struct kvm_irqfd irqfd = {
107         .fd = event_notifier_get_fd(&vdev->intx.interrupt),
108         .gsi = vdev->intx.route.irq,
109         .flags = KVM_IRQFD_FLAG_RESAMPLE,
110     };
111     struct vfio_irq_set *irq_set;
112     int ret, argsz;
113     int32_t *pfd;
114 
115     if (vdev->no_kvm_intx || !kvm_irqfds_enabled() ||
116         vdev->intx.route.mode != PCI_INTX_ENABLED ||
117         !kvm_resamplefds_enabled()) {
118         return;
119     }
120 
121     /* Get to a known interrupt state */
122     qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev);
123     vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
124     vdev->intx.pending = false;
125     pci_irq_deassert(&vdev->pdev);
126 
127     /* Get an eventfd for resample/unmask */
128     if (event_notifier_init(&vdev->intx.unmask, 0)) {
129         error_setg(errp, "event_notifier_init failed eoi");
130         goto fail;
131     }
132 
133     /* KVM triggers it, VFIO listens for it */
134     irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask);
135 
136     if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
137         error_setg_errno(errp, errno, "failed to setup resample irqfd");
138         goto fail_irqfd;
139     }
140 
141     argsz = sizeof(*irq_set) + sizeof(*pfd);
142 
143     irq_set = g_malloc0(argsz);
144     irq_set->argsz = argsz;
145     irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK;
146     irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
147     irq_set->start = 0;
148     irq_set->count = 1;
149     pfd = (int32_t *)&irq_set->data;
150 
151     *pfd = irqfd.resamplefd;
152 
153     ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
154     g_free(irq_set);
155     if (ret) {
156         error_setg_errno(errp, -ret, "failed to setup INTx unmask fd");
157         goto fail_vfio;
158     }
159 
160     /* Let'em rip */
161     vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
162 
163     vdev->intx.kvm_accel = true;
164 
165     trace_vfio_intx_enable_kvm(vdev->vbasedev.name);
166 
167     return;
168 
169 fail_vfio:
170     irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN;
171     kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd);
172 fail_irqfd:
173     event_notifier_cleanup(&vdev->intx.unmask);
174 fail:
175     qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
176     vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
177 #endif
178 }
179 
180 static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
181 {
182 #ifdef CONFIG_KVM
183     struct kvm_irqfd irqfd = {
184         .fd = event_notifier_get_fd(&vdev->intx.interrupt),
185         .gsi = vdev->intx.route.irq,
186         .flags = KVM_IRQFD_FLAG_DEASSIGN,
187     };
188 
189     if (!vdev->intx.kvm_accel) {
190         return;
191     }
192 
193     /*
194      * Get to a known state, hardware masked, QEMU ready to accept new
195      * interrupts, QEMU IRQ de-asserted.
196      */
197     vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
198     vdev->intx.pending = false;
199     pci_irq_deassert(&vdev->pdev);
200 
201     /* Tell KVM to stop listening for an INTx irqfd */
202     if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
203         error_report("vfio: Error: Failed to disable INTx irqfd: %m");
204     }
205 
206     /* We only need to close the eventfd for VFIO to cleanup the kernel side */
207     event_notifier_cleanup(&vdev->intx.unmask);
208 
209     /* QEMU starts listening for interrupt events. */
210     qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
211 
212     vdev->intx.kvm_accel = false;
213 
214     /* If we've missed an event, let it re-fire through QEMU */
215     vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
216 
217     trace_vfio_intx_disable_kvm(vdev->vbasedev.name);
218 #endif
219 }
220 
221 static void vfio_intx_update(PCIDevice *pdev)
222 {
223     VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
224     PCIINTxRoute route;
225     Error *err = NULL;
226 
227     if (vdev->interrupt != VFIO_INT_INTx) {
228         return;
229     }
230 
231     route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin);
232 
233     if (!pci_intx_route_changed(&vdev->intx.route, &route)) {
234         return; /* Nothing changed */
235     }
236 
237     trace_vfio_intx_update(vdev->vbasedev.name,
238                            vdev->intx.route.irq, route.irq);
239 
240     vfio_intx_disable_kvm(vdev);
241 
242     vdev->intx.route = route;
243 
244     if (route.mode != PCI_INTX_ENABLED) {
245         return;
246     }
247 
248     vfio_intx_enable_kvm(vdev, &err);
249     if (err) {
250         error_reportf_err(err, WARN_PREFIX, vdev->vbasedev.name);
251     }
252 
253     /* Re-enable the interrupt in cased we missed an EOI */
254     vfio_intx_eoi(&vdev->vbasedev);
255 }
256 
257 static int vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp)
258 {
259     uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
260     int ret, argsz, retval = 0;
261     struct vfio_irq_set *irq_set;
262     int32_t *pfd;
263     Error *err = NULL;
264 
265     if (!pin) {
266         return 0;
267     }
268 
269     vfio_disable_interrupts(vdev);
270 
271     vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */
272     pci_config_set_interrupt_pin(vdev->pdev.config, pin);
273 
274 #ifdef CONFIG_KVM
275     /*
276      * Only conditional to avoid generating error messages on platforms
277      * where we won't actually use the result anyway.
278      */
279     if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
280         vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev,
281                                                         vdev->intx.pin);
282     }
283 #endif
284 
285     ret = event_notifier_init(&vdev->intx.interrupt, 0);
286     if (ret) {
287         error_setg_errno(errp, -ret, "event_notifier_init failed");
288         return ret;
289     }
290 
291     argsz = sizeof(*irq_set) + sizeof(*pfd);
292 
293     irq_set = g_malloc0(argsz);
294     irq_set->argsz = argsz;
295     irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
296     irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
297     irq_set->start = 0;
298     irq_set->count = 1;
299     pfd = (int32_t *)&irq_set->data;
300 
301     *pfd = event_notifier_get_fd(&vdev->intx.interrupt);
302     qemu_set_fd_handler(*pfd, vfio_intx_interrupt, NULL, vdev);
303 
304     ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
305     if (ret) {
306         error_setg_errno(errp, -ret, "failed to setup INTx fd");
307         qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
308         event_notifier_cleanup(&vdev->intx.interrupt);
309         retval = -errno;
310         goto cleanup;
311     }
312 
313     vfio_intx_enable_kvm(vdev, &err);
314     if (err) {
315         error_reportf_err(err, WARN_PREFIX, vdev->vbasedev.name);
316     }
317 
318     vdev->interrupt = VFIO_INT_INTx;
319 
320     trace_vfio_intx_enable(vdev->vbasedev.name);
321 
322 cleanup:
323     g_free(irq_set);
324 
325     return retval;
326 }
327 
328 static void vfio_intx_disable(VFIOPCIDevice *vdev)
329 {
330     int fd;
331 
332     timer_del(vdev->intx.mmap_timer);
333     vfio_intx_disable_kvm(vdev);
334     vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
335     vdev->intx.pending = false;
336     pci_irq_deassert(&vdev->pdev);
337     vfio_mmap_set_enabled(vdev, true);
338 
339     fd = event_notifier_get_fd(&vdev->intx.interrupt);
340     qemu_set_fd_handler(fd, NULL, NULL, vdev);
341     event_notifier_cleanup(&vdev->intx.interrupt);
342 
343     vdev->interrupt = VFIO_INT_NONE;
344 
345     trace_vfio_intx_disable(vdev->vbasedev.name);
346 }
347 
348 /*
349  * MSI/X
350  */
351 static void vfio_msi_interrupt(void *opaque)
352 {
353     VFIOMSIVector *vector = opaque;
354     VFIOPCIDevice *vdev = vector->vdev;
355     MSIMessage (*get_msg)(PCIDevice *dev, unsigned vector);
356     void (*notify)(PCIDevice *dev, unsigned vector);
357     MSIMessage msg;
358     int nr = vector - vdev->msi_vectors;
359 
360     if (!event_notifier_test_and_clear(&vector->interrupt)) {
361         return;
362     }
363 
364     if (vdev->interrupt == VFIO_INT_MSIX) {
365         get_msg = msix_get_message;
366         notify = msix_notify;
367 
368         /* A masked vector firing needs to use the PBA, enable it */
369         if (msix_is_masked(&vdev->pdev, nr)) {
370             set_bit(nr, vdev->msix->pending);
371             memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, true);
372             trace_vfio_msix_pba_enable(vdev->vbasedev.name);
373         }
374     } else if (vdev->interrupt == VFIO_INT_MSI) {
375         get_msg = msi_get_message;
376         notify = msi_notify;
377     } else {
378         abort();
379     }
380 
381     msg = get_msg(&vdev->pdev, nr);
382     trace_vfio_msi_interrupt(vdev->vbasedev.name, nr, msg.address, msg.data);
383     notify(&vdev->pdev, nr);
384 }
385 
386 static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix)
387 {
388     struct vfio_irq_set *irq_set;
389     int ret = 0, i, argsz;
390     int32_t *fds;
391 
392     argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds));
393 
394     irq_set = g_malloc0(argsz);
395     irq_set->argsz = argsz;
396     irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
397     irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX;
398     irq_set->start = 0;
399     irq_set->count = vdev->nr_vectors;
400     fds = (int32_t *)&irq_set->data;
401 
402     for (i = 0; i < vdev->nr_vectors; i++) {
403         int fd = -1;
404 
405         /*
406          * MSI vs MSI-X - The guest has direct access to MSI mask and pending
407          * bits, therefore we always use the KVM signaling path when setup.
408          * MSI-X mask and pending bits are emulated, so we want to use the
409          * KVM signaling path only when configured and unmasked.
410          */
411         if (vdev->msi_vectors[i].use) {
412             if (vdev->msi_vectors[i].virq < 0 ||
413                 (msix && msix_is_masked(&vdev->pdev, i))) {
414                 fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt);
415             } else {
416                 fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt);
417             }
418         }
419 
420         fds[i] = fd;
421     }
422 
423     ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
424 
425     g_free(irq_set);
426 
427     return ret;
428 }
429 
430 static void vfio_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector,
431                                   int vector_n, bool msix)
432 {
433     int virq;
434 
435     if ((msix && vdev->no_kvm_msix) || (!msix && vdev->no_kvm_msi)) {
436         return;
437     }
438 
439     if (event_notifier_init(&vector->kvm_interrupt, 0)) {
440         return;
441     }
442 
443     virq = kvm_irqchip_add_msi_route(kvm_state, vector_n, &vdev->pdev);
444     if (virq < 0) {
445         event_notifier_cleanup(&vector->kvm_interrupt);
446         return;
447     }
448 
449     if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
450                                        NULL, virq) < 0) {
451         kvm_irqchip_release_virq(kvm_state, virq);
452         event_notifier_cleanup(&vector->kvm_interrupt);
453         return;
454     }
455 
456     vector->virq = virq;
457 }
458 
459 static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector)
460 {
461     kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
462                                           vector->virq);
463     kvm_irqchip_release_virq(kvm_state, vector->virq);
464     vector->virq = -1;
465     event_notifier_cleanup(&vector->kvm_interrupt);
466 }
467 
468 static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg,
469                                      PCIDevice *pdev)
470 {
471     kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg, pdev);
472     kvm_irqchip_commit_routes(kvm_state);
473 }
474 
475 static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
476                                    MSIMessage *msg, IOHandler *handler)
477 {
478     VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
479     VFIOMSIVector *vector;
480     int ret;
481 
482     trace_vfio_msix_vector_do_use(vdev->vbasedev.name, nr);
483 
484     vector = &vdev->msi_vectors[nr];
485 
486     if (!vector->use) {
487         vector->vdev = vdev;
488         vector->virq = -1;
489         if (event_notifier_init(&vector->interrupt, 0)) {
490             error_report("vfio: Error: event_notifier_init failed");
491         }
492         vector->use = true;
493         msix_vector_use(pdev, nr);
494     }
495 
496     qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
497                         handler, NULL, vector);
498 
499     /*
500      * Attempt to enable route through KVM irqchip,
501      * default to userspace handling if unavailable.
502      */
503     if (vector->virq >= 0) {
504         if (!msg) {
505             vfio_remove_kvm_msi_virq(vector);
506         } else {
507             vfio_update_kvm_msi_virq(vector, *msg, pdev);
508         }
509     } else {
510         if (msg) {
511             vfio_add_kvm_msi_virq(vdev, vector, nr, true);
512         }
513     }
514 
515     /*
516      * We don't want to have the host allocate all possible MSI vectors
517      * for a device if they're not in use, so we shutdown and incrementally
518      * increase them as needed.
519      */
520     if (vdev->nr_vectors < nr + 1) {
521         vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
522         vdev->nr_vectors = nr + 1;
523         ret = vfio_enable_vectors(vdev, true);
524         if (ret) {
525             error_report("vfio: failed to enable vectors, %d", ret);
526         }
527     } else {
528         int argsz;
529         struct vfio_irq_set *irq_set;
530         int32_t *pfd;
531 
532         argsz = sizeof(*irq_set) + sizeof(*pfd);
533 
534         irq_set = g_malloc0(argsz);
535         irq_set->argsz = argsz;
536         irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
537                          VFIO_IRQ_SET_ACTION_TRIGGER;
538         irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
539         irq_set->start = nr;
540         irq_set->count = 1;
541         pfd = (int32_t *)&irq_set->data;
542 
543         if (vector->virq >= 0) {
544             *pfd = event_notifier_get_fd(&vector->kvm_interrupt);
545         } else {
546             *pfd = event_notifier_get_fd(&vector->interrupt);
547         }
548 
549         ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
550         g_free(irq_set);
551         if (ret) {
552             error_report("vfio: failed to modify vector, %d", ret);
553         }
554     }
555 
556     /* Disable PBA emulation when nothing more is pending. */
557     clear_bit(nr, vdev->msix->pending);
558     if (find_first_bit(vdev->msix->pending,
559                        vdev->nr_vectors) == vdev->nr_vectors) {
560         memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
561         trace_vfio_msix_pba_disable(vdev->vbasedev.name);
562     }
563 
564     return 0;
565 }
566 
567 static int vfio_msix_vector_use(PCIDevice *pdev,
568                                 unsigned int nr, MSIMessage msg)
569 {
570     return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt);
571 }
572 
573 static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
574 {
575     VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
576     VFIOMSIVector *vector = &vdev->msi_vectors[nr];
577 
578     trace_vfio_msix_vector_release(vdev->vbasedev.name, nr);
579 
580     /*
581      * There are still old guests that mask and unmask vectors on every
582      * interrupt.  If we're using QEMU bypass with a KVM irqfd, leave all of
583      * the KVM setup in place, simply switch VFIO to use the non-bypass
584      * eventfd.  We'll then fire the interrupt through QEMU and the MSI-X
585      * core will mask the interrupt and set pending bits, allowing it to
586      * be re-asserted on unmask.  Nothing to do if already using QEMU mode.
587      */
588     if (vector->virq >= 0) {
589         int argsz;
590         struct vfio_irq_set *irq_set;
591         int32_t *pfd;
592 
593         argsz = sizeof(*irq_set) + sizeof(*pfd);
594 
595         irq_set = g_malloc0(argsz);
596         irq_set->argsz = argsz;
597         irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
598                          VFIO_IRQ_SET_ACTION_TRIGGER;
599         irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
600         irq_set->start = nr;
601         irq_set->count = 1;
602         pfd = (int32_t *)&irq_set->data;
603 
604         *pfd = event_notifier_get_fd(&vector->interrupt);
605 
606         ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
607 
608         g_free(irq_set);
609     }
610 }
611 
612 static void vfio_msix_enable(VFIOPCIDevice *vdev)
613 {
614     vfio_disable_interrupts(vdev);
615 
616     vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->msix->entries);
617 
618     vdev->interrupt = VFIO_INT_MSIX;
619 
620     /*
621      * Some communication channels between VF & PF or PF & fw rely on the
622      * physical state of the device and expect that enabling MSI-X from the
623      * guest enables the same on the host.  When our guest is Linux, the
624      * guest driver call to pci_enable_msix() sets the enabling bit in the
625      * MSI-X capability, but leaves the vector table masked.  We therefore
626      * can't rely on a vector_use callback (from request_irq() in the guest)
627      * to switch the physical device into MSI-X mode because that may come a
628      * long time after pci_enable_msix().  This code enables vector 0 with
629      * triggering to userspace, then immediately release the vector, leaving
630      * the physical device with no vectors enabled, but MSI-X enabled, just
631      * like the guest view.
632      */
633     vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL);
634     vfio_msix_vector_release(&vdev->pdev, 0);
635 
636     if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
637                                   vfio_msix_vector_release, NULL)) {
638         error_report("vfio: msix_set_vector_notifiers failed");
639     }
640 
641     trace_vfio_msix_enable(vdev->vbasedev.name);
642 }
643 
644 static void vfio_msi_enable(VFIOPCIDevice *vdev)
645 {
646     int ret, i;
647 
648     vfio_disable_interrupts(vdev);
649 
650     vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
651 retry:
652     vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->nr_vectors);
653 
654     for (i = 0; i < vdev->nr_vectors; i++) {
655         VFIOMSIVector *vector = &vdev->msi_vectors[i];
656 
657         vector->vdev = vdev;
658         vector->virq = -1;
659         vector->use = true;
660 
661         if (event_notifier_init(&vector->interrupt, 0)) {
662             error_report("vfio: Error: event_notifier_init failed");
663         }
664 
665         qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
666                             vfio_msi_interrupt, NULL, vector);
667 
668         /*
669          * Attempt to enable route through KVM irqchip,
670          * default to userspace handling if unavailable.
671          */
672         vfio_add_kvm_msi_virq(vdev, vector, i, false);
673     }
674 
675     /* Set interrupt type prior to possible interrupts */
676     vdev->interrupt = VFIO_INT_MSI;
677 
678     ret = vfio_enable_vectors(vdev, false);
679     if (ret) {
680         if (ret < 0) {
681             error_report("vfio: Error: Failed to setup MSI fds: %m");
682         } else if (ret != vdev->nr_vectors) {
683             error_report("vfio: Error: Failed to enable %d "
684                          "MSI vectors, retry with %d", vdev->nr_vectors, ret);
685         }
686 
687         for (i = 0; i < vdev->nr_vectors; i++) {
688             VFIOMSIVector *vector = &vdev->msi_vectors[i];
689             if (vector->virq >= 0) {
690                 vfio_remove_kvm_msi_virq(vector);
691             }
692             qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
693                                 NULL, NULL, NULL);
694             event_notifier_cleanup(&vector->interrupt);
695         }
696 
697         g_free(vdev->msi_vectors);
698 
699         if (ret > 0 && ret != vdev->nr_vectors) {
700             vdev->nr_vectors = ret;
701             goto retry;
702         }
703         vdev->nr_vectors = 0;
704 
705         /*
706          * Failing to setup MSI doesn't really fall within any specification.
707          * Let's try leaving interrupts disabled and hope the guest figures
708          * out to fall back to INTx for this device.
709          */
710         error_report("vfio: Error: Failed to enable MSI");
711         vdev->interrupt = VFIO_INT_NONE;
712 
713         return;
714     }
715 
716     trace_vfio_msi_enable(vdev->vbasedev.name, vdev->nr_vectors);
717 }
718 
719 static void vfio_msi_disable_common(VFIOPCIDevice *vdev)
720 {
721     Error *err = NULL;
722     int i;
723 
724     for (i = 0; i < vdev->nr_vectors; i++) {
725         VFIOMSIVector *vector = &vdev->msi_vectors[i];
726         if (vdev->msi_vectors[i].use) {
727             if (vector->virq >= 0) {
728                 vfio_remove_kvm_msi_virq(vector);
729             }
730             qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
731                                 NULL, NULL, NULL);
732             event_notifier_cleanup(&vector->interrupt);
733         }
734     }
735 
736     g_free(vdev->msi_vectors);
737     vdev->msi_vectors = NULL;
738     vdev->nr_vectors = 0;
739     vdev->interrupt = VFIO_INT_NONE;
740 
741     vfio_intx_enable(vdev, &err);
742     if (err) {
743         error_reportf_err(err, ERR_PREFIX, vdev->vbasedev.name);
744     }
745 }
746 
747 static void vfio_msix_disable(VFIOPCIDevice *vdev)
748 {
749     int i;
750 
751     msix_unset_vector_notifiers(&vdev->pdev);
752 
753     /*
754      * MSI-X will only release vectors if MSI-X is still enabled on the
755      * device, check through the rest and release it ourselves if necessary.
756      */
757     for (i = 0; i < vdev->nr_vectors; i++) {
758         if (vdev->msi_vectors[i].use) {
759             vfio_msix_vector_release(&vdev->pdev, i);
760             msix_vector_unuse(&vdev->pdev, i);
761         }
762     }
763 
764     if (vdev->nr_vectors) {
765         vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
766     }
767 
768     vfio_msi_disable_common(vdev);
769 
770     memset(vdev->msix->pending, 0,
771            BITS_TO_LONGS(vdev->msix->entries) * sizeof(unsigned long));
772 
773     trace_vfio_msix_disable(vdev->vbasedev.name);
774 }
775 
776 static void vfio_msi_disable(VFIOPCIDevice *vdev)
777 {
778     vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX);
779     vfio_msi_disable_common(vdev);
780 
781     trace_vfio_msi_disable(vdev->vbasedev.name);
782 }
783 
784 static void vfio_update_msi(VFIOPCIDevice *vdev)
785 {
786     int i;
787 
788     for (i = 0; i < vdev->nr_vectors; i++) {
789         VFIOMSIVector *vector = &vdev->msi_vectors[i];
790         MSIMessage msg;
791 
792         if (!vector->use || vector->virq < 0) {
793             continue;
794         }
795 
796         msg = msi_get_message(&vdev->pdev, i);
797         vfio_update_kvm_msi_virq(vector, msg, &vdev->pdev);
798     }
799 }
800 
801 static void vfio_pci_load_rom(VFIOPCIDevice *vdev)
802 {
803     struct vfio_region_info *reg_info;
804     uint64_t size;
805     off_t off = 0;
806     ssize_t bytes;
807 
808     if (vfio_get_region_info(&vdev->vbasedev,
809                              VFIO_PCI_ROM_REGION_INDEX, &reg_info)) {
810         error_report("vfio: Error getting ROM info: %m");
811         return;
812     }
813 
814     trace_vfio_pci_load_rom(vdev->vbasedev.name, (unsigned long)reg_info->size,
815                             (unsigned long)reg_info->offset,
816                             (unsigned long)reg_info->flags);
817 
818     vdev->rom_size = size = reg_info->size;
819     vdev->rom_offset = reg_info->offset;
820 
821     g_free(reg_info);
822 
823     if (!vdev->rom_size) {
824         vdev->rom_read_failed = true;
825         error_report("vfio-pci: Cannot read device rom at "
826                     "%s", vdev->vbasedev.name);
827         error_printf("Device option ROM contents are probably invalid "
828                     "(check dmesg).\nSkip option ROM probe with rombar=0, "
829                     "or load from file with romfile=\n");
830         return;
831     }
832 
833     vdev->rom = g_malloc(size);
834     memset(vdev->rom, 0xff, size);
835 
836     while (size) {
837         bytes = pread(vdev->vbasedev.fd, vdev->rom + off,
838                       size, vdev->rom_offset + off);
839         if (bytes == 0) {
840             break;
841         } else if (bytes > 0) {
842             off += bytes;
843             size -= bytes;
844         } else {
845             if (errno == EINTR || errno == EAGAIN) {
846                 continue;
847             }
848             error_report("vfio: Error reading device ROM: %m");
849             break;
850         }
851     }
852 
853     /*
854      * Test the ROM signature against our device, if the vendor is correct
855      * but the device ID doesn't match, store the correct device ID and
856      * recompute the checksum.  Intel IGD devices need this and are known
857      * to have bogus checksums so we can't simply adjust the checksum.
858      */
859     if (pci_get_word(vdev->rom) == 0xaa55 &&
860         pci_get_word(vdev->rom + 0x18) + 8 < vdev->rom_size &&
861         !memcmp(vdev->rom + pci_get_word(vdev->rom + 0x18), "PCIR", 4)) {
862         uint16_t vid, did;
863 
864         vid = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 4);
865         did = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6);
866 
867         if (vid == vdev->vendor_id && did != vdev->device_id) {
868             int i;
869             uint8_t csum, *data = vdev->rom;
870 
871             pci_set_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6,
872                          vdev->device_id);
873             data[6] = 0;
874 
875             for (csum = 0, i = 0; i < vdev->rom_size; i++) {
876                 csum += data[i];
877             }
878 
879             data[6] = -csum;
880         }
881     }
882 }
883 
884 static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size)
885 {
886     VFIOPCIDevice *vdev = opaque;
887     union {
888         uint8_t byte;
889         uint16_t word;
890         uint32_t dword;
891         uint64_t qword;
892     } val;
893     uint64_t data = 0;
894 
895     /* Load the ROM lazily when the guest tries to read it */
896     if (unlikely(!vdev->rom && !vdev->rom_read_failed)) {
897         vfio_pci_load_rom(vdev);
898     }
899 
900     memcpy(&val, vdev->rom + addr,
901            (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0);
902 
903     switch (size) {
904     case 1:
905         data = val.byte;
906         break;
907     case 2:
908         data = le16_to_cpu(val.word);
909         break;
910     case 4:
911         data = le32_to_cpu(val.dword);
912         break;
913     default:
914         hw_error("vfio: unsupported read size, %d bytes\n", size);
915         break;
916     }
917 
918     trace_vfio_rom_read(vdev->vbasedev.name, addr, size, data);
919 
920     return data;
921 }
922 
923 static void vfio_rom_write(void *opaque, hwaddr addr,
924                            uint64_t data, unsigned size)
925 {
926 }
927 
928 static const MemoryRegionOps vfio_rom_ops = {
929     .read = vfio_rom_read,
930     .write = vfio_rom_write,
931     .endianness = DEVICE_LITTLE_ENDIAN,
932 };
933 
934 static void vfio_pci_size_rom(VFIOPCIDevice *vdev)
935 {
936     uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK);
937     off_t offset = vdev->config_offset + PCI_ROM_ADDRESS;
938     DeviceState *dev = DEVICE(vdev);
939     char *name;
940     int fd = vdev->vbasedev.fd;
941 
942     if (vdev->pdev.romfile || !vdev->pdev.rom_bar) {
943         /* Since pci handles romfile, just print a message and return */
944         if (vfio_blacklist_opt_rom(vdev) && vdev->pdev.romfile) {
945             error_printf("Warning : Device at %s is known to cause system instability issues during option rom execution. Proceeding anyway since user specified romfile\n",
946                          vdev->vbasedev.name);
947         }
948         return;
949     }
950 
951     /*
952      * Use the same size ROM BAR as the physical device.  The contents
953      * will get filled in later when the guest tries to read it.
954      */
955     if (pread(fd, &orig, 4, offset) != 4 ||
956         pwrite(fd, &size, 4, offset) != 4 ||
957         pread(fd, &size, 4, offset) != 4 ||
958         pwrite(fd, &orig, 4, offset) != 4) {
959         error_report("%s(%s) failed: %m", __func__, vdev->vbasedev.name);
960         return;
961     }
962 
963     size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1;
964 
965     if (!size) {
966         return;
967     }
968 
969     if (vfio_blacklist_opt_rom(vdev)) {
970         if (dev->opts && qemu_opt_get(dev->opts, "rombar")) {
971             error_printf("Warning : Device at %s is known to cause system instability issues during option rom execution. Proceeding anyway since user specified non zero value for rombar\n",
972                          vdev->vbasedev.name);
973         } else {
974             error_printf("Warning : Rom loading for device at %s has been disabled due to system instability issues. Specify rombar=1 or romfile to force\n",
975                          vdev->vbasedev.name);
976             return;
977         }
978     }
979 
980     trace_vfio_pci_size_rom(vdev->vbasedev.name, size);
981 
982     name = g_strdup_printf("vfio[%s].rom", vdev->vbasedev.name);
983 
984     memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev),
985                           &vfio_rom_ops, vdev, name, size);
986     g_free(name);
987 
988     pci_register_bar(&vdev->pdev, PCI_ROM_SLOT,
989                      PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom);
990 
991     vdev->pdev.has_rom = true;
992     vdev->rom_read_failed = false;
993 }
994 
995 void vfio_vga_write(void *opaque, hwaddr addr,
996                            uint64_t data, unsigned size)
997 {
998     VFIOVGARegion *region = opaque;
999     VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
1000     union {
1001         uint8_t byte;
1002         uint16_t word;
1003         uint32_t dword;
1004         uint64_t qword;
1005     } buf;
1006     off_t offset = vga->fd_offset + region->offset + addr;
1007 
1008     switch (size) {
1009     case 1:
1010         buf.byte = data;
1011         break;
1012     case 2:
1013         buf.word = cpu_to_le16(data);
1014         break;
1015     case 4:
1016         buf.dword = cpu_to_le32(data);
1017         break;
1018     default:
1019         hw_error("vfio: unsupported write size, %d bytes", size);
1020         break;
1021     }
1022 
1023     if (pwrite(vga->fd, &buf, size, offset) != size) {
1024         error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m",
1025                      __func__, region->offset + addr, data, size);
1026     }
1027 
1028     trace_vfio_vga_write(region->offset + addr, data, size);
1029 }
1030 
1031 uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size)
1032 {
1033     VFIOVGARegion *region = opaque;
1034     VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
1035     union {
1036         uint8_t byte;
1037         uint16_t word;
1038         uint32_t dword;
1039         uint64_t qword;
1040     } buf;
1041     uint64_t data = 0;
1042     off_t offset = vga->fd_offset + region->offset + addr;
1043 
1044     if (pread(vga->fd, &buf, size, offset) != size) {
1045         error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m",
1046                      __func__, region->offset + addr, size);
1047         return (uint64_t)-1;
1048     }
1049 
1050     switch (size) {
1051     case 1:
1052         data = buf.byte;
1053         break;
1054     case 2:
1055         data = le16_to_cpu(buf.word);
1056         break;
1057     case 4:
1058         data = le32_to_cpu(buf.dword);
1059         break;
1060     default:
1061         hw_error("vfio: unsupported read size, %d bytes", size);
1062         break;
1063     }
1064 
1065     trace_vfio_vga_read(region->offset + addr, size, data);
1066 
1067     return data;
1068 }
1069 
1070 static const MemoryRegionOps vfio_vga_ops = {
1071     .read = vfio_vga_read,
1072     .write = vfio_vga_write,
1073     .endianness = DEVICE_LITTLE_ENDIAN,
1074 };
1075 
1076 /*
1077  * Expand memory region of sub-page(size < PAGE_SIZE) MMIO BAR to page
1078  * size if the BAR is in an exclusive page in host so that we could map
1079  * this BAR to guest. But this sub-page BAR may not occupy an exclusive
1080  * page in guest. So we should set the priority of the expanded memory
1081  * region to zero in case of overlap with BARs which share the same page
1082  * with the sub-page BAR in guest. Besides, we should also recover the
1083  * size of this sub-page BAR when its base address is changed in guest
1084  * and not page aligned any more.
1085  */
1086 static void vfio_sub_page_bar_update_mapping(PCIDevice *pdev, int bar)
1087 {
1088     VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
1089     VFIORegion *region = &vdev->bars[bar].region;
1090     MemoryRegion *mmap_mr, *region_mr, *base_mr;
1091     PCIIORegion *r;
1092     pcibus_t bar_addr;
1093     uint64_t size = region->size;
1094 
1095     /* Make sure that the whole region is allowed to be mmapped */
1096     if (region->nr_mmaps != 1 || !region->mmaps[0].mmap ||
1097         region->mmaps[0].size != region->size) {
1098         return;
1099     }
1100 
1101     r = &pdev->io_regions[bar];
1102     bar_addr = r->addr;
1103     base_mr = vdev->bars[bar].mr;
1104     region_mr = region->mem;
1105     mmap_mr = &region->mmaps[0].mem;
1106 
1107     /* If BAR is mapped and page aligned, update to fill PAGE_SIZE */
1108     if (bar_addr != PCI_BAR_UNMAPPED &&
1109         !(bar_addr & ~qemu_real_host_page_mask)) {
1110         size = qemu_real_host_page_size;
1111     }
1112 
1113     memory_region_transaction_begin();
1114 
1115     if (vdev->bars[bar].size < size) {
1116         memory_region_set_size(base_mr, size);
1117     }
1118     memory_region_set_size(region_mr, size);
1119     memory_region_set_size(mmap_mr, size);
1120     if (size != vdev->bars[bar].size && memory_region_is_mapped(base_mr)) {
1121         memory_region_del_subregion(r->address_space, base_mr);
1122         memory_region_add_subregion_overlap(r->address_space,
1123                                             bar_addr, base_mr, 0);
1124     }
1125 
1126     memory_region_transaction_commit();
1127 }
1128 
1129 /*
1130  * PCI config space
1131  */
1132 uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
1133 {
1134     VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
1135     uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val;
1136 
1137     memcpy(&emu_bits, vdev->emulated_config_bits + addr, len);
1138     emu_bits = le32_to_cpu(emu_bits);
1139 
1140     if (emu_bits) {
1141         emu_val = pci_default_read_config(pdev, addr, len);
1142     }
1143 
1144     if (~emu_bits & (0xffffffffU >> (32 - len * 8))) {
1145         ssize_t ret;
1146 
1147         ret = pread(vdev->vbasedev.fd, &phys_val, len,
1148                     vdev->config_offset + addr);
1149         if (ret != len) {
1150             error_report("%s(%s, 0x%x, 0x%x) failed: %m",
1151                          __func__, vdev->vbasedev.name, addr, len);
1152             return -errno;
1153         }
1154         phys_val = le32_to_cpu(phys_val);
1155     }
1156 
1157     val = (emu_val & emu_bits) | (phys_val & ~emu_bits);
1158 
1159     trace_vfio_pci_read_config(vdev->vbasedev.name, addr, len, val);
1160 
1161     return val;
1162 }
1163 
1164 void vfio_pci_write_config(PCIDevice *pdev,
1165                            uint32_t addr, uint32_t val, int len)
1166 {
1167     VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
1168     uint32_t val_le = cpu_to_le32(val);
1169 
1170     trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len);
1171 
1172     /* Write everything to VFIO, let it filter out what we can't write */
1173     if (pwrite(vdev->vbasedev.fd, &val_le, len, vdev->config_offset + addr)
1174                 != len) {
1175         error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %m",
1176                      __func__, vdev->vbasedev.name, addr, val, len);
1177     }
1178 
1179     /* MSI/MSI-X Enabling/Disabling */
1180     if (pdev->cap_present & QEMU_PCI_CAP_MSI &&
1181         ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) {
1182         int is_enabled, was_enabled = msi_enabled(pdev);
1183 
1184         pci_default_write_config(pdev, addr, val, len);
1185 
1186         is_enabled = msi_enabled(pdev);
1187 
1188         if (!was_enabled) {
1189             if (is_enabled) {
1190                 vfio_msi_enable(vdev);
1191             }
1192         } else {
1193             if (!is_enabled) {
1194                 vfio_msi_disable(vdev);
1195             } else {
1196                 vfio_update_msi(vdev);
1197             }
1198         }
1199     } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX &&
1200         ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) {
1201         int is_enabled, was_enabled = msix_enabled(pdev);
1202 
1203         pci_default_write_config(pdev, addr, val, len);
1204 
1205         is_enabled = msix_enabled(pdev);
1206 
1207         if (!was_enabled && is_enabled) {
1208             vfio_msix_enable(vdev);
1209         } else if (was_enabled && !is_enabled) {
1210             vfio_msix_disable(vdev);
1211         }
1212     } else if (ranges_overlap(addr, len, PCI_BASE_ADDRESS_0, 24) ||
1213         range_covers_byte(addr, len, PCI_COMMAND)) {
1214         pcibus_t old_addr[PCI_NUM_REGIONS - 1];
1215         int bar;
1216 
1217         for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
1218             old_addr[bar] = pdev->io_regions[bar].addr;
1219         }
1220 
1221         pci_default_write_config(pdev, addr, val, len);
1222 
1223         for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
1224             if (old_addr[bar] != pdev->io_regions[bar].addr &&
1225                 vdev->bars[bar].region.size > 0 &&
1226                 vdev->bars[bar].region.size < qemu_real_host_page_size) {
1227                 vfio_sub_page_bar_update_mapping(pdev, bar);
1228             }
1229         }
1230     } else {
1231         /* Write everything to QEMU to keep emulated bits correct */
1232         pci_default_write_config(pdev, addr, val, len);
1233     }
1234 }
1235 
1236 /*
1237  * Interrupt setup
1238  */
1239 static void vfio_disable_interrupts(VFIOPCIDevice *vdev)
1240 {
1241     /*
1242      * More complicated than it looks.  Disabling MSI/X transitions the
1243      * device to INTx mode (if supported).  Therefore we need to first
1244      * disable MSI/X and then cleanup by disabling INTx.
1245      */
1246     if (vdev->interrupt == VFIO_INT_MSIX) {
1247         vfio_msix_disable(vdev);
1248     } else if (vdev->interrupt == VFIO_INT_MSI) {
1249         vfio_msi_disable(vdev);
1250     }
1251 
1252     if (vdev->interrupt == VFIO_INT_INTx) {
1253         vfio_intx_disable(vdev);
1254     }
1255 }
1256 
1257 static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
1258 {
1259     uint16_t ctrl;
1260     bool msi_64bit, msi_maskbit;
1261     int ret, entries;
1262     Error *err = NULL;
1263 
1264     if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl),
1265               vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
1266         error_setg_errno(errp, errno, "failed reading MSI PCI_CAP_FLAGS");
1267         return -errno;
1268     }
1269     ctrl = le16_to_cpu(ctrl);
1270 
1271     msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT);
1272     msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT);
1273     entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1);
1274 
1275     trace_vfio_msi_setup(vdev->vbasedev.name, pos);
1276 
1277     ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit, &err);
1278     if (ret < 0) {
1279         if (ret == -ENOTSUP) {
1280             return 0;
1281         }
1282         error_prepend(&err, "msi_init failed: ");
1283         error_propagate(errp, err);
1284         return ret;
1285     }
1286     vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0);
1287 
1288     return 0;
1289 }
1290 
1291 static void vfio_pci_fixup_msix_region(VFIOPCIDevice *vdev)
1292 {
1293     off_t start, end;
1294     VFIORegion *region = &vdev->bars[vdev->msix->table_bar].region;
1295 
1296     /*
1297      * We expect to find a single mmap covering the whole BAR, anything else
1298      * means it's either unsupported or already setup.
1299      */
1300     if (region->nr_mmaps != 1 || region->mmaps[0].offset ||
1301         region->size != region->mmaps[0].size) {
1302         return;
1303     }
1304 
1305     /* MSI-X table start and end aligned to host page size */
1306     start = vdev->msix->table_offset & qemu_real_host_page_mask;
1307     end = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset +
1308                                (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE));
1309 
1310     /*
1311      * Does the MSI-X table cover the beginning of the BAR?  The whole BAR?
1312      * NB - Host page size is necessarily a power of two and so is the PCI
1313      * BAR (not counting EA yet), therefore if we have host page aligned
1314      * @start and @end, then any remainder of the BAR before or after those
1315      * must be at least host page sized and therefore mmap'able.
1316      */
1317     if (!start) {
1318         if (end >= region->size) {
1319             region->nr_mmaps = 0;
1320             g_free(region->mmaps);
1321             region->mmaps = NULL;
1322             trace_vfio_msix_fixup(vdev->vbasedev.name,
1323                                   vdev->msix->table_bar, 0, 0);
1324         } else {
1325             region->mmaps[0].offset = end;
1326             region->mmaps[0].size = region->size - end;
1327             trace_vfio_msix_fixup(vdev->vbasedev.name,
1328                               vdev->msix->table_bar, region->mmaps[0].offset,
1329                               region->mmaps[0].offset + region->mmaps[0].size);
1330         }
1331 
1332     /* Maybe it's aligned at the end of the BAR */
1333     } else if (end >= region->size) {
1334         region->mmaps[0].size = start;
1335         trace_vfio_msix_fixup(vdev->vbasedev.name,
1336                               vdev->msix->table_bar, region->mmaps[0].offset,
1337                               region->mmaps[0].offset + region->mmaps[0].size);
1338 
1339     /* Otherwise it must split the BAR */
1340     } else {
1341         region->nr_mmaps = 2;
1342         region->mmaps = g_renew(VFIOMmap, region->mmaps, 2);
1343 
1344         memcpy(&region->mmaps[1], &region->mmaps[0], sizeof(VFIOMmap));
1345 
1346         region->mmaps[0].size = start;
1347         trace_vfio_msix_fixup(vdev->vbasedev.name,
1348                               vdev->msix->table_bar, region->mmaps[0].offset,
1349                               region->mmaps[0].offset + region->mmaps[0].size);
1350 
1351         region->mmaps[1].offset = end;
1352         region->mmaps[1].size = region->size - end;
1353         trace_vfio_msix_fixup(vdev->vbasedev.name,
1354                               vdev->msix->table_bar, region->mmaps[1].offset,
1355                               region->mmaps[1].offset + region->mmaps[1].size);
1356     }
1357 }
1358 
1359 static void vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp)
1360 {
1361     int target_bar = -1;
1362     size_t msix_sz;
1363 
1364     if (!vdev->msix || vdev->msix_relo == OFF_AUTOPCIBAR_OFF) {
1365         return;
1366     }
1367 
1368     /* The actual minimum size of MSI-X structures */
1369     msix_sz = (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE) +
1370               (QEMU_ALIGN_UP(vdev->msix->entries, 64) / 8);
1371     /* Round up to host pages, we don't want to share a page */
1372     msix_sz = REAL_HOST_PAGE_ALIGN(msix_sz);
1373     /* PCI BARs must be a power of 2 */
1374     msix_sz = pow2ceil(msix_sz);
1375 
1376     if (vdev->msix_relo == OFF_AUTOPCIBAR_AUTO) {
1377         /*
1378          * TODO: Lookup table for known devices.
1379          *
1380          * Logically we might use an algorithm here to select the BAR adding
1381          * the least additional MMIO space, but we cannot programatically
1382          * predict the driver dependency on BAR ordering or sizing, therefore
1383          * 'auto' becomes a lookup for combinations reported to work.
1384          */
1385         if (target_bar < 0) {
1386             error_setg(errp, "No automatic MSI-X relocation available for "
1387                        "device %04x:%04x", vdev->vendor_id, vdev->device_id);
1388             return;
1389         }
1390     } else {
1391         target_bar = (int)(vdev->msix_relo - OFF_AUTOPCIBAR_BAR0);
1392     }
1393 
1394     /* I/O port BARs cannot host MSI-X structures */
1395     if (vdev->bars[target_bar].ioport) {
1396         error_setg(errp, "Invalid MSI-X relocation BAR %d, "
1397                    "I/O port BAR", target_bar);
1398         return;
1399     }
1400 
1401     /* Cannot use a BAR in the "shadow" of a 64-bit BAR */
1402     if (!vdev->bars[target_bar].size &&
1403          target_bar > 0 && vdev->bars[target_bar - 1].mem64) {
1404         error_setg(errp, "Invalid MSI-X relocation BAR %d, "
1405                    "consumed by 64-bit BAR %d", target_bar, target_bar - 1);
1406         return;
1407     }
1408 
1409     /* 2GB max size for 32-bit BARs, cannot double if already > 1G */
1410     if (vdev->bars[target_bar].size > (1 * 1024 * 1024 * 1024) &&
1411         !vdev->bars[target_bar].mem64) {
1412         error_setg(errp, "Invalid MSI-X relocation BAR %d, "
1413                    "no space to extend 32-bit BAR", target_bar);
1414         return;
1415     }
1416 
1417     /*
1418      * If adding a new BAR, test if we can make it 64bit.  We make it
1419      * prefetchable since QEMU MSI-X emulation has no read side effects
1420      * and doing so makes mapping more flexible.
1421      */
1422     if (!vdev->bars[target_bar].size) {
1423         if (target_bar < (PCI_ROM_SLOT - 1) &&
1424             !vdev->bars[target_bar + 1].size) {
1425             vdev->bars[target_bar].mem64 = true;
1426             vdev->bars[target_bar].type = PCI_BASE_ADDRESS_MEM_TYPE_64;
1427         }
1428         vdev->bars[target_bar].type |= PCI_BASE_ADDRESS_MEM_PREFETCH;
1429         vdev->bars[target_bar].size = msix_sz;
1430         vdev->msix->table_offset = 0;
1431     } else {
1432         vdev->bars[target_bar].size = MAX(vdev->bars[target_bar].size * 2,
1433                                           msix_sz * 2);
1434         /*
1435          * Due to above size calc, MSI-X always starts halfway into the BAR,
1436          * which will always be a separate host page.
1437          */
1438         vdev->msix->table_offset = vdev->bars[target_bar].size / 2;
1439     }
1440 
1441     vdev->msix->table_bar = target_bar;
1442     vdev->msix->pba_bar = target_bar;
1443     /* Requires 8-byte alignment, but PCI_MSIX_ENTRY_SIZE guarantees that */
1444     vdev->msix->pba_offset = vdev->msix->table_offset +
1445                                   (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE);
1446 
1447     trace_vfio_msix_relo(vdev->vbasedev.name,
1448                          vdev->msix->table_bar, vdev->msix->table_offset);
1449 }
1450 
1451 /*
1452  * We don't have any control over how pci_add_capability() inserts
1453  * capabilities into the chain.  In order to setup MSI-X we need a
1454  * MemoryRegion for the BAR.  In order to setup the BAR and not
1455  * attempt to mmap the MSI-X table area, which VFIO won't allow, we
1456  * need to first look for where the MSI-X table lives.  So we
1457  * unfortunately split MSI-X setup across two functions.
1458  */
1459 static void vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp)
1460 {
1461     uint8_t pos;
1462     uint16_t ctrl;
1463     uint32_t table, pba;
1464     int fd = vdev->vbasedev.fd;
1465     VFIOMSIXInfo *msix;
1466 
1467     pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX);
1468     if (!pos) {
1469         return;
1470     }
1471 
1472     if (pread(fd, &ctrl, sizeof(ctrl),
1473               vdev->config_offset + pos + PCI_MSIX_FLAGS) != sizeof(ctrl)) {
1474         error_setg_errno(errp, errno, "failed to read PCI MSIX FLAGS");
1475         return;
1476     }
1477 
1478     if (pread(fd, &table, sizeof(table),
1479               vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
1480         error_setg_errno(errp, errno, "failed to read PCI MSIX TABLE");
1481         return;
1482     }
1483 
1484     if (pread(fd, &pba, sizeof(pba),
1485               vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
1486         error_setg_errno(errp, errno, "failed to read PCI MSIX PBA");
1487         return;
1488     }
1489 
1490     ctrl = le16_to_cpu(ctrl);
1491     table = le32_to_cpu(table);
1492     pba = le32_to_cpu(pba);
1493 
1494     msix = g_malloc0(sizeof(*msix));
1495     msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
1496     msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
1497     msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
1498     msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
1499     msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
1500 
1501     /*
1502      * Test the size of the pba_offset variable and catch if it extends outside
1503      * of the specified BAR. If it is the case, we need to apply a hardware
1504      * specific quirk if the device is known or we have a broken configuration.
1505      */
1506     if (msix->pba_offset >= vdev->bars[msix->pba_bar].region.size) {
1507         /*
1508          * Chelsio T5 Virtual Function devices are encoded as 0x58xx for T5
1509          * adapters. The T5 hardware returns an incorrect value of 0x8000 for
1510          * the VF PBA offset while the BAR itself is only 8k. The correct value
1511          * is 0x1000, so we hard code that here.
1512          */
1513         if (vdev->vendor_id == PCI_VENDOR_ID_CHELSIO &&
1514             (vdev->device_id & 0xff00) == 0x5800) {
1515             msix->pba_offset = 0x1000;
1516         } else {
1517             error_setg(errp, "hardware reports invalid configuration, "
1518                        "MSIX PBA outside of specified BAR");
1519             g_free(msix);
1520             return;
1521         }
1522     }
1523 
1524     trace_vfio_msix_early_setup(vdev->vbasedev.name, pos, msix->table_bar,
1525                                 msix->table_offset, msix->entries);
1526     vdev->msix = msix;
1527 
1528     vfio_pci_fixup_msix_region(vdev);
1529 
1530     vfio_pci_relocate_msix(vdev, errp);
1531 }
1532 
1533 static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
1534 {
1535     int ret;
1536     Error *err = NULL;
1537 
1538     vdev->msix->pending = g_malloc0(BITS_TO_LONGS(vdev->msix->entries) *
1539                                     sizeof(unsigned long));
1540     ret = msix_init(&vdev->pdev, vdev->msix->entries,
1541                     vdev->bars[vdev->msix->table_bar].mr,
1542                     vdev->msix->table_bar, vdev->msix->table_offset,
1543                     vdev->bars[vdev->msix->pba_bar].mr,
1544                     vdev->msix->pba_bar, vdev->msix->pba_offset, pos,
1545                     &err);
1546     if (ret < 0) {
1547         if (ret == -ENOTSUP) {
1548             error_report_err(err);
1549             return 0;
1550         }
1551 
1552         error_propagate(errp, err);
1553         return ret;
1554     }
1555 
1556     /*
1557      * The PCI spec suggests that devices provide additional alignment for
1558      * MSI-X structures and avoid overlapping non-MSI-X related registers.
1559      * For an assigned device, this hopefully means that emulation of MSI-X
1560      * structures does not affect the performance of the device.  If devices
1561      * fail to provide that alignment, a significant performance penalty may
1562      * result, for instance Mellanox MT27500 VFs:
1563      * http://www.spinics.net/lists/kvm/msg125881.html
1564      *
1565      * The PBA is simply not that important for such a serious regression and
1566      * most drivers do not appear to look at it.  The solution for this is to
1567      * disable the PBA MemoryRegion unless it's being used.  We disable it
1568      * here and only enable it if a masked vector fires through QEMU.  As the
1569      * vector-use notifier is called, which occurs on unmask, we test whether
1570      * PBA emulation is needed and again disable if not.
1571      */
1572     memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
1573 
1574     return 0;
1575 }
1576 
1577 static void vfio_teardown_msi(VFIOPCIDevice *vdev)
1578 {
1579     msi_uninit(&vdev->pdev);
1580 
1581     if (vdev->msix) {
1582         msix_uninit(&vdev->pdev,
1583                     vdev->bars[vdev->msix->table_bar].mr,
1584                     vdev->bars[vdev->msix->pba_bar].mr);
1585         g_free(vdev->msix->pending);
1586     }
1587 }
1588 
1589 /*
1590  * Resource setup
1591  */
1592 static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled)
1593 {
1594     int i;
1595 
1596     for (i = 0; i < PCI_ROM_SLOT; i++) {
1597         vfio_region_mmaps_set_enabled(&vdev->bars[i].region, enabled);
1598     }
1599 }
1600 
1601 static void vfio_bar_prepare(VFIOPCIDevice *vdev, int nr)
1602 {
1603     VFIOBAR *bar = &vdev->bars[nr];
1604 
1605     uint32_t pci_bar;
1606     int ret;
1607 
1608     /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
1609     if (!bar->region.size) {
1610         return;
1611     }
1612 
1613     /* Determine what type of BAR this is for registration */
1614     ret = pread(vdev->vbasedev.fd, &pci_bar, sizeof(pci_bar),
1615                 vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr));
1616     if (ret != sizeof(pci_bar)) {
1617         error_report("vfio: Failed to read BAR %d (%m)", nr);
1618         return;
1619     }
1620 
1621     pci_bar = le32_to_cpu(pci_bar);
1622     bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO);
1623     bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64);
1624     bar->type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK :
1625                                          ~PCI_BASE_ADDRESS_MEM_MASK);
1626     bar->size = bar->region.size;
1627 }
1628 
1629 static void vfio_bars_prepare(VFIOPCIDevice *vdev)
1630 {
1631     int i;
1632 
1633     for (i = 0; i < PCI_ROM_SLOT; i++) {
1634         vfio_bar_prepare(vdev, i);
1635     }
1636 }
1637 
1638 static void vfio_bar_register(VFIOPCIDevice *vdev, int nr)
1639 {
1640     VFIOBAR *bar = &vdev->bars[nr];
1641     char *name;
1642 
1643     if (!bar->size) {
1644         return;
1645     }
1646 
1647     bar->mr = g_new0(MemoryRegion, 1);
1648     name = g_strdup_printf("%s base BAR %d", vdev->vbasedev.name, nr);
1649     memory_region_init_io(bar->mr, OBJECT(vdev), NULL, NULL, name, bar->size);
1650     g_free(name);
1651 
1652     if (bar->region.size) {
1653         memory_region_add_subregion(bar->mr, 0, bar->region.mem);
1654 
1655         if (vfio_region_mmap(&bar->region)) {
1656             error_report("Failed to mmap %s BAR %d. Performance may be slow",
1657                          vdev->vbasedev.name, nr);
1658         }
1659     }
1660 
1661     pci_register_bar(&vdev->pdev, nr, bar->type, bar->mr);
1662 }
1663 
1664 static void vfio_bars_register(VFIOPCIDevice *vdev)
1665 {
1666     int i;
1667 
1668     for (i = 0; i < PCI_ROM_SLOT; i++) {
1669         vfio_bar_register(vdev, i);
1670     }
1671 }
1672 
1673 static void vfio_bars_exit(VFIOPCIDevice *vdev)
1674 {
1675     int i;
1676 
1677     for (i = 0; i < PCI_ROM_SLOT; i++) {
1678         VFIOBAR *bar = &vdev->bars[i];
1679 
1680         vfio_bar_quirk_exit(vdev, i);
1681         vfio_region_exit(&bar->region);
1682         if (bar->region.size) {
1683             memory_region_del_subregion(bar->mr, bar->region.mem);
1684         }
1685     }
1686 
1687     if (vdev->vga) {
1688         pci_unregister_vga(&vdev->pdev);
1689         vfio_vga_quirk_exit(vdev);
1690     }
1691 }
1692 
1693 static void vfio_bars_finalize(VFIOPCIDevice *vdev)
1694 {
1695     int i;
1696 
1697     for (i = 0; i < PCI_ROM_SLOT; i++) {
1698         VFIOBAR *bar = &vdev->bars[i];
1699 
1700         vfio_bar_quirk_finalize(vdev, i);
1701         vfio_region_finalize(&bar->region);
1702         if (bar->size) {
1703             object_unparent(OBJECT(bar->mr));
1704             g_free(bar->mr);
1705         }
1706     }
1707 
1708     if (vdev->vga) {
1709         vfio_vga_quirk_finalize(vdev);
1710         for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) {
1711             object_unparent(OBJECT(&vdev->vga->region[i].mem));
1712         }
1713         g_free(vdev->vga);
1714     }
1715 }
1716 
1717 /*
1718  * General setup
1719  */
1720 static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
1721 {
1722     uint8_t tmp;
1723     uint16_t next = PCI_CONFIG_SPACE_SIZE;
1724 
1725     for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp;
1726          tmp = pdev->config[tmp + PCI_CAP_LIST_NEXT]) {
1727         if (tmp > pos && tmp < next) {
1728             next = tmp;
1729         }
1730     }
1731 
1732     return next - pos;
1733 }
1734 
1735 
1736 static uint16_t vfio_ext_cap_max_size(const uint8_t *config, uint16_t pos)
1737 {
1738     uint16_t tmp, next = PCIE_CONFIG_SPACE_SIZE;
1739 
1740     for (tmp = PCI_CONFIG_SPACE_SIZE; tmp;
1741         tmp = PCI_EXT_CAP_NEXT(pci_get_long(config + tmp))) {
1742         if (tmp > pos && tmp < next) {
1743             next = tmp;
1744         }
1745     }
1746 
1747     return next - pos;
1748 }
1749 
1750 static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask)
1751 {
1752     pci_set_word(buf, (pci_get_word(buf) & ~mask) | val);
1753 }
1754 
1755 static void vfio_add_emulated_word(VFIOPCIDevice *vdev, int pos,
1756                                    uint16_t val, uint16_t mask)
1757 {
1758     vfio_set_word_bits(vdev->pdev.config + pos, val, mask);
1759     vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask);
1760     vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask);
1761 }
1762 
1763 static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask)
1764 {
1765     pci_set_long(buf, (pci_get_long(buf) & ~mask) | val);
1766 }
1767 
1768 static void vfio_add_emulated_long(VFIOPCIDevice *vdev, int pos,
1769                                    uint32_t val, uint32_t mask)
1770 {
1771     vfio_set_long_bits(vdev->pdev.config + pos, val, mask);
1772     vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask);
1773     vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask);
1774 }
1775 
1776 static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size,
1777                                Error **errp)
1778 {
1779     uint16_t flags;
1780     uint8_t type;
1781 
1782     flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS);
1783     type = (flags & PCI_EXP_FLAGS_TYPE) >> 4;
1784 
1785     if (type != PCI_EXP_TYPE_ENDPOINT &&
1786         type != PCI_EXP_TYPE_LEG_END &&
1787         type != PCI_EXP_TYPE_RC_END) {
1788 
1789         error_setg(errp, "assignment of PCIe type 0x%x "
1790                    "devices is not currently supported", type);
1791         return -EINVAL;
1792     }
1793 
1794     if (!pci_bus_is_express(pci_get_bus(&vdev->pdev))) {
1795         PCIBus *bus = pci_get_bus(&vdev->pdev);
1796         PCIDevice *bridge;
1797 
1798         /*
1799          * Traditionally PCI device assignment exposes the PCIe capability
1800          * as-is on non-express buses.  The reason being that some drivers
1801          * simply assume that it's there, for example tg3.  However when
1802          * we're running on a native PCIe machine type, like Q35, we need
1803          * to hide the PCIe capability.  The reason for this is twofold;
1804          * first Windows guests get a Code 10 error when the PCIe capability
1805          * is exposed in this configuration.  Therefore express devices won't
1806          * work at all unless they're attached to express buses in the VM.
1807          * Second, a native PCIe machine introduces the possibility of fine
1808          * granularity IOMMUs supporting both translation and isolation.
1809          * Guest code to discover the IOMMU visibility of a device, such as
1810          * IOMMU grouping code on Linux, is very aware of device types and
1811          * valid transitions between bus types.  An express device on a non-
1812          * express bus is not a valid combination on bare metal systems.
1813          *
1814          * Drivers that require a PCIe capability to make the device
1815          * functional are simply going to need to have their devices placed
1816          * on a PCIe bus in the VM.
1817          */
1818         while (!pci_bus_is_root(bus)) {
1819             bridge = pci_bridge_get_device(bus);
1820             bus = pci_get_bus(bridge);
1821         }
1822 
1823         if (pci_bus_is_express(bus)) {
1824             return 0;
1825         }
1826 
1827     } else if (pci_bus_is_root(pci_get_bus(&vdev->pdev))) {
1828         /*
1829          * On a Root Complex bus Endpoints become Root Complex Integrated
1830          * Endpoints, which changes the type and clears the LNK & LNK2 fields.
1831          */
1832         if (type == PCI_EXP_TYPE_ENDPOINT) {
1833             vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1834                                    PCI_EXP_TYPE_RC_END << 4,
1835                                    PCI_EXP_FLAGS_TYPE);
1836 
1837             /* Link Capabilities, Status, and Control goes away */
1838             if (size > PCI_EXP_LNKCTL) {
1839                 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0);
1840                 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
1841                 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0);
1842 
1843 #ifndef PCI_EXP_LNKCAP2
1844 #define PCI_EXP_LNKCAP2 44
1845 #endif
1846 #ifndef PCI_EXP_LNKSTA2
1847 #define PCI_EXP_LNKSTA2 50
1848 #endif
1849                 /* Link 2 Capabilities, Status, and Control goes away */
1850                 if (size > PCI_EXP_LNKCAP2) {
1851                     vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0);
1852                     vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0);
1853                     vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0);
1854                 }
1855             }
1856 
1857         } else if (type == PCI_EXP_TYPE_LEG_END) {
1858             /*
1859              * Legacy endpoints don't belong on the root complex.  Windows
1860              * seems to be happier with devices if we skip the capability.
1861              */
1862             return 0;
1863         }
1864 
1865     } else {
1866         /*
1867          * Convert Root Complex Integrated Endpoints to regular endpoints.
1868          * These devices don't support LNK/LNK2 capabilities, so make them up.
1869          */
1870         if (type == PCI_EXP_TYPE_RC_END) {
1871             vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1872                                    PCI_EXP_TYPE_ENDPOINT << 4,
1873                                    PCI_EXP_FLAGS_TYPE);
1874             vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP,
1875                                    PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25, ~0);
1876             vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
1877         }
1878 
1879         /* Mark the Link Status bits as emulated to allow virtual negotiation */
1880         vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA,
1881                                pci_get_word(vdev->pdev.config + pos +
1882                                             PCI_EXP_LNKSTA),
1883                                PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS);
1884     }
1885 
1886     /*
1887      * Intel 82599 SR-IOV VFs report an invalid PCIe capability version 0
1888      * (Niantic errate #35) causing Windows to error with a Code 10 for the
1889      * device on Q35.  Fixup any such devices to report version 1.  If we
1890      * were to remove the capability entirely the guest would lose extended
1891      * config space.
1892      */
1893     if ((flags & PCI_EXP_FLAGS_VERS) == 0) {
1894         vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1895                                1, PCI_EXP_FLAGS_VERS);
1896     }
1897 
1898     pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size,
1899                              errp);
1900     if (pos < 0) {
1901         return pos;
1902     }
1903 
1904     vdev->pdev.exp.exp_cap = pos;
1905 
1906     return pos;
1907 }
1908 
1909 static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos)
1910 {
1911     uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP);
1912 
1913     if (cap & PCI_EXP_DEVCAP_FLR) {
1914         trace_vfio_check_pcie_flr(vdev->vbasedev.name);
1915         vdev->has_flr = true;
1916     }
1917 }
1918 
1919 static void vfio_check_pm_reset(VFIOPCIDevice *vdev, uint8_t pos)
1920 {
1921     uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL);
1922 
1923     if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) {
1924         trace_vfio_check_pm_reset(vdev->vbasedev.name);
1925         vdev->has_pm_reset = true;
1926     }
1927 }
1928 
1929 static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos)
1930 {
1931     uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP);
1932 
1933     if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) {
1934         trace_vfio_check_af_flr(vdev->vbasedev.name);
1935         vdev->has_flr = true;
1936     }
1937 }
1938 
1939 static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp)
1940 {
1941     PCIDevice *pdev = &vdev->pdev;
1942     uint8_t cap_id, next, size;
1943     int ret;
1944 
1945     cap_id = pdev->config[pos];
1946     next = pdev->config[pos + PCI_CAP_LIST_NEXT];
1947 
1948     /*
1949      * If it becomes important to configure capabilities to their actual
1950      * size, use this as the default when it's something we don't recognize.
1951      * Since QEMU doesn't actually handle many of the config accesses,
1952      * exact size doesn't seem worthwhile.
1953      */
1954     size = vfio_std_cap_max_size(pdev, pos);
1955 
1956     /*
1957      * pci_add_capability always inserts the new capability at the head
1958      * of the chain.  Therefore to end up with a chain that matches the
1959      * physical device, we insert from the end by making this recursive.
1960      * This is also why we pre-calculate size above as cached config space
1961      * will be changed as we unwind the stack.
1962      */
1963     if (next) {
1964         ret = vfio_add_std_cap(vdev, next, errp);
1965         if (ret) {
1966             return ret;
1967         }
1968     } else {
1969         /* Begin the rebuild, use QEMU emulated list bits */
1970         pdev->config[PCI_CAPABILITY_LIST] = 0;
1971         vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff;
1972         vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
1973 
1974         ret = vfio_add_virt_caps(vdev, errp);
1975         if (ret) {
1976             return ret;
1977         }
1978     }
1979 
1980     /* Scale down size, esp in case virt caps were added above */
1981     size = MIN(size, vfio_std_cap_max_size(pdev, pos));
1982 
1983     /* Use emulated next pointer to allow dropping caps */
1984     pci_set_byte(vdev->emulated_config_bits + pos + PCI_CAP_LIST_NEXT, 0xff);
1985 
1986     switch (cap_id) {
1987     case PCI_CAP_ID_MSI:
1988         ret = vfio_msi_setup(vdev, pos, errp);
1989         break;
1990     case PCI_CAP_ID_EXP:
1991         vfio_check_pcie_flr(vdev, pos);
1992         ret = vfio_setup_pcie_cap(vdev, pos, size, errp);
1993         break;
1994     case PCI_CAP_ID_MSIX:
1995         ret = vfio_msix_setup(vdev, pos, errp);
1996         break;
1997     case PCI_CAP_ID_PM:
1998         vfio_check_pm_reset(vdev, pos);
1999         vdev->pm_cap = pos;
2000         ret = pci_add_capability(pdev, cap_id, pos, size, errp);
2001         break;
2002     case PCI_CAP_ID_AF:
2003         vfio_check_af_flr(vdev, pos);
2004         ret = pci_add_capability(pdev, cap_id, pos, size, errp);
2005         break;
2006     default:
2007         ret = pci_add_capability(pdev, cap_id, pos, size, errp);
2008         break;
2009     }
2010 
2011     if (ret < 0) {
2012         error_prepend(errp,
2013                       "failed to add PCI capability 0x%x[0x%x]@0x%x: ",
2014                       cap_id, size, pos);
2015         return ret;
2016     }
2017 
2018     return 0;
2019 }
2020 
2021 static void vfio_add_ext_cap(VFIOPCIDevice *vdev)
2022 {
2023     PCIDevice *pdev = &vdev->pdev;
2024     uint32_t header;
2025     uint16_t cap_id, next, size;
2026     uint8_t cap_ver;
2027     uint8_t *config;
2028 
2029     /* Only add extended caps if we have them and the guest can see them */
2030     if (!pci_is_express(pdev) || !pci_bus_is_express(pci_get_bus(pdev)) ||
2031         !pci_get_long(pdev->config + PCI_CONFIG_SPACE_SIZE)) {
2032         return;
2033     }
2034 
2035     /*
2036      * pcie_add_capability always inserts the new capability at the tail
2037      * of the chain.  Therefore to end up with a chain that matches the
2038      * physical device, we cache the config space to avoid overwriting
2039      * the original config space when we parse the extended capabilities.
2040      */
2041     config = g_memdup(pdev->config, vdev->config_size);
2042 
2043     /*
2044      * Extended capabilities are chained with each pointing to the next, so we
2045      * can drop anything other than the head of the chain simply by modifying
2046      * the previous next pointer.  Seed the head of the chain here such that
2047      * we can simply skip any capabilities we want to drop below, regardless
2048      * of their position in the chain.  If this stub capability still exists
2049      * after we add the capabilities we want to expose, update the capability
2050      * ID to zero.  Note that we cannot seed with the capability header being
2051      * zero as this conflicts with definition of an absent capability chain
2052      * and prevents capabilities beyond the head of the list from being added.
2053      * By replacing the dummy capability ID with zero after walking the device
2054      * chain, we also transparently mark extended capabilities as absent if
2055      * no capabilities were added.  Note that the PCIe spec defines an absence
2056      * of extended capabilities to be determined by a value of zero for the
2057      * capability ID, version, AND next pointer.  A non-zero next pointer
2058      * should be sufficient to indicate additional capabilities are present,
2059      * which will occur if we call pcie_add_capability() below.  The entire
2060      * first dword is emulated to support this.
2061      *
2062      * NB. The kernel side does similar masking, so be prepared that our
2063      * view of the device may also contain a capability ID zero in the head
2064      * of the chain.  Skip it for the same reason that we cannot seed the
2065      * chain with a zero capability.
2066      */
2067     pci_set_long(pdev->config + PCI_CONFIG_SPACE_SIZE,
2068                  PCI_EXT_CAP(0xFFFF, 0, 0));
2069     pci_set_long(pdev->wmask + PCI_CONFIG_SPACE_SIZE, 0);
2070     pci_set_long(vdev->emulated_config_bits + PCI_CONFIG_SPACE_SIZE, ~0);
2071 
2072     for (next = PCI_CONFIG_SPACE_SIZE; next;
2073          next = PCI_EXT_CAP_NEXT(pci_get_long(config + next))) {
2074         header = pci_get_long(config + next);
2075         cap_id = PCI_EXT_CAP_ID(header);
2076         cap_ver = PCI_EXT_CAP_VER(header);
2077 
2078         /*
2079          * If it becomes important to configure extended capabilities to their
2080          * actual size, use this as the default when it's something we don't
2081          * recognize. Since QEMU doesn't actually handle many of the config
2082          * accesses, exact size doesn't seem worthwhile.
2083          */
2084         size = vfio_ext_cap_max_size(config, next);
2085 
2086         /* Use emulated next pointer to allow dropping extended caps */
2087         pci_long_test_and_set_mask(vdev->emulated_config_bits + next,
2088                                    PCI_EXT_CAP_NEXT_MASK);
2089 
2090         switch (cap_id) {
2091         case 0: /* kernel masked capability */
2092         case PCI_EXT_CAP_ID_SRIOV: /* Read-only VF BARs confuse OVMF */
2093         case PCI_EXT_CAP_ID_ARI: /* XXX Needs next function virtualization */
2094             trace_vfio_add_ext_cap_dropped(vdev->vbasedev.name, cap_id, next);
2095             break;
2096         default:
2097             pcie_add_capability(pdev, cap_id, cap_ver, next, size);
2098         }
2099 
2100     }
2101 
2102     /* Cleanup chain head ID if necessary */
2103     if (pci_get_word(pdev->config + PCI_CONFIG_SPACE_SIZE) == 0xFFFF) {
2104         pci_set_word(pdev->config + PCI_CONFIG_SPACE_SIZE, 0);
2105     }
2106 
2107     g_free(config);
2108     return;
2109 }
2110 
2111 static int vfio_add_capabilities(VFIOPCIDevice *vdev, Error **errp)
2112 {
2113     PCIDevice *pdev = &vdev->pdev;
2114     int ret;
2115 
2116     if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
2117         !pdev->config[PCI_CAPABILITY_LIST]) {
2118         return 0; /* Nothing to add */
2119     }
2120 
2121     ret = vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST], errp);
2122     if (ret) {
2123         return ret;
2124     }
2125 
2126     vfio_add_ext_cap(vdev);
2127     return 0;
2128 }
2129 
2130 static void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
2131 {
2132     PCIDevice *pdev = &vdev->pdev;
2133     uint16_t cmd;
2134 
2135     vfio_disable_interrupts(vdev);
2136 
2137     /* Make sure the device is in D0 */
2138     if (vdev->pm_cap) {
2139         uint16_t pmcsr;
2140         uint8_t state;
2141 
2142         pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
2143         state = pmcsr & PCI_PM_CTRL_STATE_MASK;
2144         if (state) {
2145             pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2146             vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2);
2147             /* vfio handles the necessary delay here */
2148             pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
2149             state = pmcsr & PCI_PM_CTRL_STATE_MASK;
2150             if (state) {
2151                 error_report("vfio: Unable to power on device, stuck in D%d",
2152                              state);
2153             }
2154         }
2155     }
2156 
2157     /*
2158      * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
2159      * Also put INTx Disable in known state.
2160      */
2161     cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
2162     cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
2163              PCI_COMMAND_INTX_DISABLE);
2164     vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
2165 }
2166 
2167 static void vfio_pci_post_reset(VFIOPCIDevice *vdev)
2168 {
2169     Error *err = NULL;
2170     int nr;
2171 
2172     vfio_intx_enable(vdev, &err);
2173     if (err) {
2174         error_reportf_err(err, ERR_PREFIX, vdev->vbasedev.name);
2175     }
2176 
2177     for (nr = 0; nr < PCI_NUM_REGIONS - 1; ++nr) {
2178         off_t addr = vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr);
2179         uint32_t val = 0;
2180         uint32_t len = sizeof(val);
2181 
2182         if (pwrite(vdev->vbasedev.fd, &val, len, addr) != len) {
2183             error_report("%s(%s) reset bar %d failed: %m", __func__,
2184                          vdev->vbasedev.name, nr);
2185         }
2186     }
2187 }
2188 
2189 static bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name)
2190 {
2191     char tmp[13];
2192 
2193     sprintf(tmp, "%04x:%02x:%02x.%1x", addr->domain,
2194             addr->bus, addr->slot, addr->function);
2195 
2196     return (strcmp(tmp, name) == 0);
2197 }
2198 
2199 static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single)
2200 {
2201     VFIOGroup *group;
2202     struct vfio_pci_hot_reset_info *info;
2203     struct vfio_pci_dependent_device *devices;
2204     struct vfio_pci_hot_reset *reset;
2205     int32_t *fds;
2206     int ret, i, count;
2207     bool multi = false;
2208 
2209     trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
2210 
2211     if (!single) {
2212         vfio_pci_pre_reset(vdev);
2213     }
2214     vdev->vbasedev.needs_reset = false;
2215 
2216     info = g_malloc0(sizeof(*info));
2217     info->argsz = sizeof(*info);
2218 
2219     ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
2220     if (ret && errno != ENOSPC) {
2221         ret = -errno;
2222         if (!vdev->has_pm_reset) {
2223             error_report("vfio: Cannot reset device %s, "
2224                          "no available reset mechanism.", vdev->vbasedev.name);
2225         }
2226         goto out_single;
2227     }
2228 
2229     count = info->count;
2230     info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices)));
2231     info->argsz = sizeof(*info) + (count * sizeof(*devices));
2232     devices = &info->devices[0];
2233 
2234     ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
2235     if (ret) {
2236         ret = -errno;
2237         error_report("vfio: hot reset info failed: %m");
2238         goto out_single;
2239     }
2240 
2241     trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name);
2242 
2243     /* Verify that we have all the groups required */
2244     for (i = 0; i < info->count; i++) {
2245         PCIHostDeviceAddress host;
2246         VFIOPCIDevice *tmp;
2247         VFIODevice *vbasedev_iter;
2248 
2249         host.domain = devices[i].segment;
2250         host.bus = devices[i].bus;
2251         host.slot = PCI_SLOT(devices[i].devfn);
2252         host.function = PCI_FUNC(devices[i].devfn);
2253 
2254         trace_vfio_pci_hot_reset_dep_devices(host.domain,
2255                 host.bus, host.slot, host.function, devices[i].group_id);
2256 
2257         if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
2258             continue;
2259         }
2260 
2261         QLIST_FOREACH(group, &vfio_group_list, next) {
2262             if (group->groupid == devices[i].group_id) {
2263                 break;
2264             }
2265         }
2266 
2267         if (!group) {
2268             if (!vdev->has_pm_reset) {
2269                 error_report("vfio: Cannot reset device %s, "
2270                              "depends on group %d which is not owned.",
2271                              vdev->vbasedev.name, devices[i].group_id);
2272             }
2273             ret = -EPERM;
2274             goto out;
2275         }
2276 
2277         /* Prep dependent devices for reset and clear our marker. */
2278         QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
2279             if (!vbasedev_iter->dev->realized ||
2280                 vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
2281                 continue;
2282             }
2283             tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
2284             if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
2285                 if (single) {
2286                     ret = -EINVAL;
2287                     goto out_single;
2288                 }
2289                 vfio_pci_pre_reset(tmp);
2290                 tmp->vbasedev.needs_reset = false;
2291                 multi = true;
2292                 break;
2293             }
2294         }
2295     }
2296 
2297     if (!single && !multi) {
2298         ret = -EINVAL;
2299         goto out_single;
2300     }
2301 
2302     /* Determine how many group fds need to be passed */
2303     count = 0;
2304     QLIST_FOREACH(group, &vfio_group_list, next) {
2305         for (i = 0; i < info->count; i++) {
2306             if (group->groupid == devices[i].group_id) {
2307                 count++;
2308                 break;
2309             }
2310         }
2311     }
2312 
2313     reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds)));
2314     reset->argsz = sizeof(*reset) + (count * sizeof(*fds));
2315     fds = &reset->group_fds[0];
2316 
2317     /* Fill in group fds */
2318     QLIST_FOREACH(group, &vfio_group_list, next) {
2319         for (i = 0; i < info->count; i++) {
2320             if (group->groupid == devices[i].group_id) {
2321                 fds[reset->count++] = group->fd;
2322                 break;
2323             }
2324         }
2325     }
2326 
2327     /* Bus reset! */
2328     ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
2329     g_free(reset);
2330 
2331     trace_vfio_pci_hot_reset_result(vdev->vbasedev.name,
2332                                     ret ? "%m" : "Success");
2333 
2334 out:
2335     /* Re-enable INTx on affected devices */
2336     for (i = 0; i < info->count; i++) {
2337         PCIHostDeviceAddress host;
2338         VFIOPCIDevice *tmp;
2339         VFIODevice *vbasedev_iter;
2340 
2341         host.domain = devices[i].segment;
2342         host.bus = devices[i].bus;
2343         host.slot = PCI_SLOT(devices[i].devfn);
2344         host.function = PCI_FUNC(devices[i].devfn);
2345 
2346         if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
2347             continue;
2348         }
2349 
2350         QLIST_FOREACH(group, &vfio_group_list, next) {
2351             if (group->groupid == devices[i].group_id) {
2352                 break;
2353             }
2354         }
2355 
2356         if (!group) {
2357             break;
2358         }
2359 
2360         QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
2361             if (!vbasedev_iter->dev->realized ||
2362                 vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
2363                 continue;
2364             }
2365             tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
2366             if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
2367                 vfio_pci_post_reset(tmp);
2368                 break;
2369             }
2370         }
2371     }
2372 out_single:
2373     if (!single) {
2374         vfio_pci_post_reset(vdev);
2375     }
2376     g_free(info);
2377 
2378     return ret;
2379 }
2380 
2381 /*
2382  * We want to differentiate hot reset of mulitple in-use devices vs hot reset
2383  * of a single in-use device.  VFIO_DEVICE_RESET will already handle the case
2384  * of doing hot resets when there is only a single device per bus.  The in-use
2385  * here refers to how many VFIODevices are affected.  A hot reset that affects
2386  * multiple devices, but only a single in-use device, means that we can call
2387  * it from our bus ->reset() callback since the extent is effectively a single
2388  * device.  This allows us to make use of it in the hotplug path.  When there
2389  * are multiple in-use devices, we can only trigger the hot reset during a
2390  * system reset and thus from our reset handler.  We separate _one vs _multi
2391  * here so that we don't overlap and do a double reset on the system reset
2392  * path where both our reset handler and ->reset() callback are used.  Calling
2393  * _one() will only do a hot reset for the one in-use devices case, calling
2394  * _multi() will do nothing if a _one() would have been sufficient.
2395  */
2396 static int vfio_pci_hot_reset_one(VFIOPCIDevice *vdev)
2397 {
2398     return vfio_pci_hot_reset(vdev, true);
2399 }
2400 
2401 static int vfio_pci_hot_reset_multi(VFIODevice *vbasedev)
2402 {
2403     VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2404     return vfio_pci_hot_reset(vdev, false);
2405 }
2406 
2407 static void vfio_pci_compute_needs_reset(VFIODevice *vbasedev)
2408 {
2409     VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2410     if (!vbasedev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) {
2411         vbasedev->needs_reset = true;
2412     }
2413 }
2414 
2415 static VFIODeviceOps vfio_pci_ops = {
2416     .vfio_compute_needs_reset = vfio_pci_compute_needs_reset,
2417     .vfio_hot_reset_multi = vfio_pci_hot_reset_multi,
2418     .vfio_eoi = vfio_intx_eoi,
2419 };
2420 
2421 int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp)
2422 {
2423     VFIODevice *vbasedev = &vdev->vbasedev;
2424     struct vfio_region_info *reg_info;
2425     int ret;
2426 
2427     ret = vfio_get_region_info(vbasedev, VFIO_PCI_VGA_REGION_INDEX, &reg_info);
2428     if (ret) {
2429         error_setg_errno(errp, -ret,
2430                          "failed getting region info for VGA region index %d",
2431                          VFIO_PCI_VGA_REGION_INDEX);
2432         return ret;
2433     }
2434 
2435     if (!(reg_info->flags & VFIO_REGION_INFO_FLAG_READ) ||
2436         !(reg_info->flags & VFIO_REGION_INFO_FLAG_WRITE) ||
2437         reg_info->size < 0xbffff + 1) {
2438         error_setg(errp, "unexpected VGA info, flags 0x%lx, size 0x%lx",
2439                    (unsigned long)reg_info->flags,
2440                    (unsigned long)reg_info->size);
2441         g_free(reg_info);
2442         return -EINVAL;
2443     }
2444 
2445     vdev->vga = g_new0(VFIOVGA, 1);
2446 
2447     vdev->vga->fd_offset = reg_info->offset;
2448     vdev->vga->fd = vdev->vbasedev.fd;
2449 
2450     g_free(reg_info);
2451 
2452     vdev->vga->region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE;
2453     vdev->vga->region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM;
2454     QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_MEM].quirks);
2455 
2456     memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
2457                           OBJECT(vdev), &vfio_vga_ops,
2458                           &vdev->vga->region[QEMU_PCI_VGA_MEM],
2459                           "vfio-vga-mmio@0xa0000",
2460                           QEMU_PCI_VGA_MEM_SIZE);
2461 
2462     vdev->vga->region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE;
2463     vdev->vga->region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO;
2464     QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].quirks);
2465 
2466     memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
2467                           OBJECT(vdev), &vfio_vga_ops,
2468                           &vdev->vga->region[QEMU_PCI_VGA_IO_LO],
2469                           "vfio-vga-io@0x3b0",
2470                           QEMU_PCI_VGA_IO_LO_SIZE);
2471 
2472     vdev->vga->region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE;
2473     vdev->vga->region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI;
2474     QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks);
2475 
2476     memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem,
2477                           OBJECT(vdev), &vfio_vga_ops,
2478                           &vdev->vga->region[QEMU_PCI_VGA_IO_HI],
2479                           "vfio-vga-io@0x3c0",
2480                           QEMU_PCI_VGA_IO_HI_SIZE);
2481 
2482     pci_register_vga(&vdev->pdev, &vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
2483                      &vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
2484                      &vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem);
2485 
2486     return 0;
2487 }
2488 
2489 static void vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
2490 {
2491     VFIODevice *vbasedev = &vdev->vbasedev;
2492     struct vfio_region_info *reg_info;
2493     struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
2494     int i, ret = -1;
2495 
2496     /* Sanity check device */
2497     if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PCI)) {
2498         error_setg(errp, "this isn't a PCI device");
2499         return;
2500     }
2501 
2502     if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) {
2503         error_setg(errp, "unexpected number of io regions %u",
2504                    vbasedev->num_regions);
2505         return;
2506     }
2507 
2508     if (vbasedev->num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) {
2509         error_setg(errp, "unexpected number of irqs %u", vbasedev->num_irqs);
2510         return;
2511     }
2512 
2513     for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
2514         char *name = g_strdup_printf("%s BAR %d", vbasedev->name, i);
2515 
2516         ret = vfio_region_setup(OBJECT(vdev), vbasedev,
2517                                 &vdev->bars[i].region, i, name);
2518         g_free(name);
2519 
2520         if (ret) {
2521             error_setg_errno(errp, -ret, "failed to get region %d info", i);
2522             return;
2523         }
2524 
2525         QLIST_INIT(&vdev->bars[i].quirks);
2526     }
2527 
2528     ret = vfio_get_region_info(vbasedev,
2529                                VFIO_PCI_CONFIG_REGION_INDEX, &reg_info);
2530     if (ret) {
2531         error_setg_errno(errp, -ret, "failed to get config info");
2532         return;
2533     }
2534 
2535     trace_vfio_populate_device_config(vdev->vbasedev.name,
2536                                       (unsigned long)reg_info->size,
2537                                       (unsigned long)reg_info->offset,
2538                                       (unsigned long)reg_info->flags);
2539 
2540     vdev->config_size = reg_info->size;
2541     if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) {
2542         vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS;
2543     }
2544     vdev->config_offset = reg_info->offset;
2545 
2546     g_free(reg_info);
2547 
2548     if (vdev->features & VFIO_FEATURE_ENABLE_VGA) {
2549         ret = vfio_populate_vga(vdev, errp);
2550         if (ret) {
2551             error_append_hint(errp, "device does not support "
2552                               "requested feature x-vga\n");
2553             return;
2554         }
2555     }
2556 
2557     irq_info.index = VFIO_PCI_ERR_IRQ_INDEX;
2558 
2559     ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
2560     if (ret) {
2561         /* This can fail for an old kernel or legacy PCI dev */
2562         trace_vfio_populate_device_get_irq_info_failure();
2563     } else if (irq_info.count == 1) {
2564         vdev->pci_aer = true;
2565     } else {
2566         error_report(WARN_PREFIX
2567                      "Could not enable error recovery for the device",
2568                      vbasedev->name);
2569     }
2570 }
2571 
2572 static void vfio_put_device(VFIOPCIDevice *vdev)
2573 {
2574     g_free(vdev->vbasedev.name);
2575     g_free(vdev->msix);
2576 
2577     vfio_put_base_device(&vdev->vbasedev);
2578 }
2579 
2580 static void vfio_err_notifier_handler(void *opaque)
2581 {
2582     VFIOPCIDevice *vdev = opaque;
2583 
2584     if (!event_notifier_test_and_clear(&vdev->err_notifier)) {
2585         return;
2586     }
2587 
2588     /*
2589      * TBD. Retrieve the error details and decide what action
2590      * needs to be taken. One of the actions could be to pass
2591      * the error to the guest and have the guest driver recover
2592      * from the error. This requires that PCIe capabilities be
2593      * exposed to the guest. For now, we just terminate the
2594      * guest to contain the error.
2595      */
2596 
2597     error_report("%s(%s) Unrecoverable error detected. Please collect any data possible and then kill the guest", __func__, vdev->vbasedev.name);
2598 
2599     vm_stop(RUN_STATE_INTERNAL_ERROR);
2600 }
2601 
2602 /*
2603  * Registers error notifier for devices supporting error recovery.
2604  * If we encounter a failure in this function, we report an error
2605  * and continue after disabling error recovery support for the
2606  * device.
2607  */
2608 static void vfio_register_err_notifier(VFIOPCIDevice *vdev)
2609 {
2610     int ret;
2611     int argsz;
2612     struct vfio_irq_set *irq_set;
2613     int32_t *pfd;
2614 
2615     if (!vdev->pci_aer) {
2616         return;
2617     }
2618 
2619     if (event_notifier_init(&vdev->err_notifier, 0)) {
2620         error_report("vfio: Unable to init event notifier for error detection");
2621         vdev->pci_aer = false;
2622         return;
2623     }
2624 
2625     argsz = sizeof(*irq_set) + sizeof(*pfd);
2626 
2627     irq_set = g_malloc0(argsz);
2628     irq_set->argsz = argsz;
2629     irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2630                      VFIO_IRQ_SET_ACTION_TRIGGER;
2631     irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
2632     irq_set->start = 0;
2633     irq_set->count = 1;
2634     pfd = (int32_t *)&irq_set->data;
2635 
2636     *pfd = event_notifier_get_fd(&vdev->err_notifier);
2637     qemu_set_fd_handler(*pfd, vfio_err_notifier_handler, NULL, vdev);
2638 
2639     ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
2640     if (ret) {
2641         error_report("vfio: Failed to set up error notification");
2642         qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
2643         event_notifier_cleanup(&vdev->err_notifier);
2644         vdev->pci_aer = false;
2645     }
2646     g_free(irq_set);
2647 }
2648 
2649 static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev)
2650 {
2651     int argsz;
2652     struct vfio_irq_set *irq_set;
2653     int32_t *pfd;
2654     int ret;
2655 
2656     if (!vdev->pci_aer) {
2657         return;
2658     }
2659 
2660     argsz = sizeof(*irq_set) + sizeof(*pfd);
2661 
2662     irq_set = g_malloc0(argsz);
2663     irq_set->argsz = argsz;
2664     irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2665                      VFIO_IRQ_SET_ACTION_TRIGGER;
2666     irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
2667     irq_set->start = 0;
2668     irq_set->count = 1;
2669     pfd = (int32_t *)&irq_set->data;
2670     *pfd = -1;
2671 
2672     ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
2673     if (ret) {
2674         error_report("vfio: Failed to de-assign error fd: %m");
2675     }
2676     g_free(irq_set);
2677     qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier),
2678                         NULL, NULL, vdev);
2679     event_notifier_cleanup(&vdev->err_notifier);
2680 }
2681 
2682 static void vfio_req_notifier_handler(void *opaque)
2683 {
2684     VFIOPCIDevice *vdev = opaque;
2685     Error *err = NULL;
2686 
2687     if (!event_notifier_test_and_clear(&vdev->req_notifier)) {
2688         return;
2689     }
2690 
2691     qdev_unplug(&vdev->pdev.qdev, &err);
2692     if (err) {
2693         error_reportf_err(err, WARN_PREFIX, vdev->vbasedev.name);
2694     }
2695 }
2696 
2697 static void vfio_register_req_notifier(VFIOPCIDevice *vdev)
2698 {
2699     struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info),
2700                                       .index = VFIO_PCI_REQ_IRQ_INDEX };
2701     int argsz;
2702     struct vfio_irq_set *irq_set;
2703     int32_t *pfd;
2704 
2705     if (!(vdev->features & VFIO_FEATURE_ENABLE_REQ)) {
2706         return;
2707     }
2708 
2709     if (ioctl(vdev->vbasedev.fd,
2710               VFIO_DEVICE_GET_IRQ_INFO, &irq_info) < 0 || irq_info.count < 1) {
2711         return;
2712     }
2713 
2714     if (event_notifier_init(&vdev->req_notifier, 0)) {
2715         error_report("vfio: Unable to init event notifier for device request");
2716         return;
2717     }
2718 
2719     argsz = sizeof(*irq_set) + sizeof(*pfd);
2720 
2721     irq_set = g_malloc0(argsz);
2722     irq_set->argsz = argsz;
2723     irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2724                      VFIO_IRQ_SET_ACTION_TRIGGER;
2725     irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
2726     irq_set->start = 0;
2727     irq_set->count = 1;
2728     pfd = (int32_t *)&irq_set->data;
2729 
2730     *pfd = event_notifier_get_fd(&vdev->req_notifier);
2731     qemu_set_fd_handler(*pfd, vfio_req_notifier_handler, NULL, vdev);
2732 
2733     if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
2734         error_report("vfio: Failed to set up device request notification");
2735         qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
2736         event_notifier_cleanup(&vdev->req_notifier);
2737     } else {
2738         vdev->req_enabled = true;
2739     }
2740 
2741     g_free(irq_set);
2742 }
2743 
2744 static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev)
2745 {
2746     int argsz;
2747     struct vfio_irq_set *irq_set;
2748     int32_t *pfd;
2749 
2750     if (!vdev->req_enabled) {
2751         return;
2752     }
2753 
2754     argsz = sizeof(*irq_set) + sizeof(*pfd);
2755 
2756     irq_set = g_malloc0(argsz);
2757     irq_set->argsz = argsz;
2758     irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2759                      VFIO_IRQ_SET_ACTION_TRIGGER;
2760     irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
2761     irq_set->start = 0;
2762     irq_set->count = 1;
2763     pfd = (int32_t *)&irq_set->data;
2764     *pfd = -1;
2765 
2766     if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
2767         error_report("vfio: Failed to de-assign device request fd: %m");
2768     }
2769     g_free(irq_set);
2770     qemu_set_fd_handler(event_notifier_get_fd(&vdev->req_notifier),
2771                         NULL, NULL, vdev);
2772     event_notifier_cleanup(&vdev->req_notifier);
2773 
2774     vdev->req_enabled = false;
2775 }
2776 
2777 static void vfio_realize(PCIDevice *pdev, Error **errp)
2778 {
2779     VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2780     VFIODevice *vbasedev_iter;
2781     VFIOGroup *group;
2782     char *tmp, group_path[PATH_MAX], *group_name;
2783     Error *err = NULL;
2784     ssize_t len;
2785     struct stat st;
2786     int groupid;
2787     int i, ret;
2788 
2789     if (!vdev->vbasedev.sysfsdev) {
2790         if (!(~vdev->host.domain || ~vdev->host.bus ||
2791               ~vdev->host.slot || ~vdev->host.function)) {
2792             error_setg(errp, "No provided host device");
2793             error_append_hint(errp, "Use -device vfio-pci,host=DDDD:BB:DD.F "
2794                               "or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n");
2795             return;
2796         }
2797         vdev->vbasedev.sysfsdev =
2798             g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x",
2799                             vdev->host.domain, vdev->host.bus,
2800                             vdev->host.slot, vdev->host.function);
2801     }
2802 
2803     if (stat(vdev->vbasedev.sysfsdev, &st) < 0) {
2804         error_setg_errno(errp, errno, "no such host device");
2805         error_prepend(errp, ERR_PREFIX, vdev->vbasedev.sysfsdev);
2806         return;
2807     }
2808 
2809     vdev->vbasedev.name = g_strdup(basename(vdev->vbasedev.sysfsdev));
2810     vdev->vbasedev.ops = &vfio_pci_ops;
2811     vdev->vbasedev.type = VFIO_DEVICE_TYPE_PCI;
2812     vdev->vbasedev.dev = &vdev->pdev.qdev;
2813 
2814     tmp = g_strdup_printf("%s/iommu_group", vdev->vbasedev.sysfsdev);
2815     len = readlink(tmp, group_path, sizeof(group_path));
2816     g_free(tmp);
2817 
2818     if (len <= 0 || len >= sizeof(group_path)) {
2819         error_setg_errno(errp, len < 0 ? errno : ENAMETOOLONG,
2820                          "no iommu_group found");
2821         goto error;
2822     }
2823 
2824     group_path[len] = 0;
2825 
2826     group_name = basename(group_path);
2827     if (sscanf(group_name, "%d", &groupid) != 1) {
2828         error_setg_errno(errp, errno, "failed to read %s", group_path);
2829         goto error;
2830     }
2831 
2832     trace_vfio_realize(vdev->vbasedev.name, groupid);
2833 
2834     group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev), errp);
2835     if (!group) {
2836         goto error;
2837     }
2838 
2839     QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
2840         if (strcmp(vbasedev_iter->name, vdev->vbasedev.name) == 0) {
2841             error_setg(errp, "device is already attached");
2842             vfio_put_group(group);
2843             goto error;
2844         }
2845     }
2846 
2847     ret = vfio_get_device(group, vdev->vbasedev.name, &vdev->vbasedev, errp);
2848     if (ret) {
2849         vfio_put_group(group);
2850         goto error;
2851     }
2852 
2853     vfio_populate_device(vdev, &err);
2854     if (err) {
2855         error_propagate(errp, err);
2856         goto error;
2857     }
2858 
2859     /* Get a copy of config space */
2860     ret = pread(vdev->vbasedev.fd, vdev->pdev.config,
2861                 MIN(pci_config_size(&vdev->pdev), vdev->config_size),
2862                 vdev->config_offset);
2863     if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) {
2864         ret = ret < 0 ? -errno : -EFAULT;
2865         error_setg_errno(errp, -ret, "failed to read device config space");
2866         goto error;
2867     }
2868 
2869     /* vfio emulates a lot for us, but some bits need extra love */
2870     vdev->emulated_config_bits = g_malloc0(vdev->config_size);
2871 
2872     /* QEMU can choose to expose the ROM or not */
2873     memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4);
2874     /* QEMU can also add or extend BARs */
2875     memset(vdev->emulated_config_bits + PCI_BASE_ADDRESS_0, 0xff, 6 * 4);
2876 
2877     /*
2878      * The PCI spec reserves vendor ID 0xffff as an invalid value.  The
2879      * device ID is managed by the vendor and need only be a 16-bit value.
2880      * Allow any 16-bit value for subsystem so they can be hidden or changed.
2881      */
2882     if (vdev->vendor_id != PCI_ANY_ID) {
2883         if (vdev->vendor_id >= 0xffff) {
2884             error_setg(errp, "invalid PCI vendor ID provided");
2885             goto error;
2886         }
2887         vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0);
2888         trace_vfio_pci_emulated_vendor_id(vdev->vbasedev.name, vdev->vendor_id);
2889     } else {
2890         vdev->vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
2891     }
2892 
2893     if (vdev->device_id != PCI_ANY_ID) {
2894         if (vdev->device_id > 0xffff) {
2895             error_setg(errp, "invalid PCI device ID provided");
2896             goto error;
2897         }
2898         vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0);
2899         trace_vfio_pci_emulated_device_id(vdev->vbasedev.name, vdev->device_id);
2900     } else {
2901         vdev->device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
2902     }
2903 
2904     if (vdev->sub_vendor_id != PCI_ANY_ID) {
2905         if (vdev->sub_vendor_id > 0xffff) {
2906             error_setg(errp, "invalid PCI subsystem vendor ID provided");
2907             goto error;
2908         }
2909         vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_VENDOR_ID,
2910                                vdev->sub_vendor_id, ~0);
2911         trace_vfio_pci_emulated_sub_vendor_id(vdev->vbasedev.name,
2912                                               vdev->sub_vendor_id);
2913     }
2914 
2915     if (vdev->sub_device_id != PCI_ANY_ID) {
2916         if (vdev->sub_device_id > 0xffff) {
2917             error_setg(errp, "invalid PCI subsystem device ID provided");
2918             goto error;
2919         }
2920         vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0);
2921         trace_vfio_pci_emulated_sub_device_id(vdev->vbasedev.name,
2922                                               vdev->sub_device_id);
2923     }
2924 
2925     /* QEMU can change multi-function devices to single function, or reverse */
2926     vdev->emulated_config_bits[PCI_HEADER_TYPE] =
2927                                               PCI_HEADER_TYPE_MULTI_FUNCTION;
2928 
2929     /* Restore or clear multifunction, this is always controlled by QEMU */
2930     if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
2931         vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
2932     } else {
2933         vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION;
2934     }
2935 
2936     /*
2937      * Clear host resource mapping info.  If we choose not to register a
2938      * BAR, such as might be the case with the option ROM, we can get
2939      * confusing, unwritable, residual addresses from the host here.
2940      */
2941     memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24);
2942     memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4);
2943 
2944     vfio_pci_size_rom(vdev);
2945 
2946     vfio_bars_prepare(vdev);
2947 
2948     vfio_msix_early_setup(vdev, &err);
2949     if (err) {
2950         error_propagate(errp, err);
2951         goto error;
2952     }
2953 
2954     vfio_bars_register(vdev);
2955 
2956     ret = vfio_add_capabilities(vdev, errp);
2957     if (ret) {
2958         goto out_teardown;
2959     }
2960 
2961     if (vdev->vga) {
2962         vfio_vga_quirk_setup(vdev);
2963     }
2964 
2965     for (i = 0; i < PCI_ROM_SLOT; i++) {
2966         vfio_bar_quirk_setup(vdev, i);
2967     }
2968 
2969     if (!vdev->igd_opregion &&
2970         vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) {
2971         struct vfio_region_info *opregion;
2972 
2973         if (vdev->pdev.qdev.hotplugged) {
2974             error_setg(errp,
2975                        "cannot support IGD OpRegion feature on hotplugged "
2976                        "device");
2977             goto out_teardown;
2978         }
2979 
2980         ret = vfio_get_dev_region_info(&vdev->vbasedev,
2981                         VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
2982                         VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion);
2983         if (ret) {
2984             error_setg_errno(errp, -ret,
2985                              "does not support requested IGD OpRegion feature");
2986             goto out_teardown;
2987         }
2988 
2989         ret = vfio_pci_igd_opregion_init(vdev, opregion, errp);
2990         g_free(opregion);
2991         if (ret) {
2992             goto out_teardown;
2993         }
2994     }
2995 
2996     /* QEMU emulates all of MSI & MSIX */
2997     if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
2998         memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
2999                MSIX_CAP_LENGTH);
3000     }
3001 
3002     if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
3003         memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
3004                vdev->msi_cap_size);
3005     }
3006 
3007     if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
3008         vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
3009                                                   vfio_intx_mmap_enable, vdev);
3010         pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_intx_update);
3011         ret = vfio_intx_enable(vdev, errp);
3012         if (ret) {
3013             goto out_teardown;
3014         }
3015     }
3016 
3017     vfio_register_err_notifier(vdev);
3018     vfio_register_req_notifier(vdev);
3019     vfio_setup_resetfn_quirk(vdev);
3020 
3021     return;
3022 
3023 out_teardown:
3024     pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
3025     vfio_teardown_msi(vdev);
3026     vfio_bars_exit(vdev);
3027 error:
3028     error_prepend(errp, ERR_PREFIX, vdev->vbasedev.name);
3029 }
3030 
3031 static void vfio_instance_finalize(Object *obj)
3032 {
3033     PCIDevice *pci_dev = PCI_DEVICE(obj);
3034     VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pci_dev);
3035     VFIOGroup *group = vdev->vbasedev.group;
3036 
3037     vfio_bars_finalize(vdev);
3038     g_free(vdev->emulated_config_bits);
3039     g_free(vdev->rom);
3040     /*
3041      * XXX Leaking igd_opregion is not an oversight, we can't remove the
3042      * fw_cfg entry therefore leaking this allocation seems like the safest
3043      * option.
3044      *
3045      * g_free(vdev->igd_opregion);
3046      */
3047     vfio_put_device(vdev);
3048     vfio_put_group(group);
3049 }
3050 
3051 static void vfio_exitfn(PCIDevice *pdev)
3052 {
3053     VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
3054 
3055     vfio_unregister_req_notifier(vdev);
3056     vfio_unregister_err_notifier(vdev);
3057     pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
3058     vfio_disable_interrupts(vdev);
3059     if (vdev->intx.mmap_timer) {
3060         timer_free(vdev->intx.mmap_timer);
3061     }
3062     vfio_teardown_msi(vdev);
3063     vfio_bars_exit(vdev);
3064 }
3065 
3066 static void vfio_pci_reset(DeviceState *dev)
3067 {
3068     PCIDevice *pdev = DO_UPCAST(PCIDevice, qdev, dev);
3069     VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
3070 
3071     trace_vfio_pci_reset(vdev->vbasedev.name);
3072 
3073     vfio_pci_pre_reset(vdev);
3074 
3075     if (vdev->resetfn && !vdev->resetfn(vdev)) {
3076         goto post_reset;
3077     }
3078 
3079     if (vdev->vbasedev.reset_works &&
3080         (vdev->has_flr || !vdev->has_pm_reset) &&
3081         !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
3082         trace_vfio_pci_reset_flr(vdev->vbasedev.name);
3083         goto post_reset;
3084     }
3085 
3086     /* See if we can do our own bus reset */
3087     if (!vfio_pci_hot_reset_one(vdev)) {
3088         goto post_reset;
3089     }
3090 
3091     /* If nothing else works and the device supports PM reset, use it */
3092     if (vdev->vbasedev.reset_works && vdev->has_pm_reset &&
3093         !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
3094         trace_vfio_pci_reset_pm(vdev->vbasedev.name);
3095         goto post_reset;
3096     }
3097 
3098 post_reset:
3099     vfio_pci_post_reset(vdev);
3100 }
3101 
3102 static void vfio_instance_init(Object *obj)
3103 {
3104     PCIDevice *pci_dev = PCI_DEVICE(obj);
3105     VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, PCI_DEVICE(obj));
3106 
3107     device_add_bootindex_property(obj, &vdev->bootindex,
3108                                   "bootindex", NULL,
3109                                   &pci_dev->qdev, NULL);
3110     vdev->host.domain = ~0U;
3111     vdev->host.bus = ~0U;
3112     vdev->host.slot = ~0U;
3113     vdev->host.function = ~0U;
3114 
3115     vdev->nv_gpudirect_clique = 0xFF;
3116 }
3117 
3118 static Property vfio_pci_dev_properties[] = {
3119     DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host),
3120     DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice, vbasedev.sysfsdev),
3121     DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice,
3122                        intx.mmap_timeout, 1100),
3123     DEFINE_PROP_BIT("x-vga", VFIOPCIDevice, features,
3124                     VFIO_FEATURE_ENABLE_VGA_BIT, false),
3125     DEFINE_PROP_BIT("x-req", VFIOPCIDevice, features,
3126                     VFIO_FEATURE_ENABLE_REQ_BIT, true),
3127     DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice, features,
3128                     VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, false),
3129     DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false),
3130     DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice, no_kvm_intx, false),
3131     DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice, no_kvm_msi, false),
3132     DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice, no_kvm_msix, false),
3133     DEFINE_PROP_BOOL("x-no-geforce-quirks", VFIOPCIDevice,
3134                      no_geforce_quirks, false),
3135     DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice, vendor_id, PCI_ANY_ID),
3136     DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice, device_id, PCI_ANY_ID),
3137     DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice,
3138                        sub_vendor_id, PCI_ANY_ID),
3139     DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice,
3140                        sub_device_id, PCI_ANY_ID),
3141     DEFINE_PROP_UINT32("x-igd-gms", VFIOPCIDevice, igd_gms, 0),
3142     DEFINE_PROP_UNSIGNED_NODEFAULT("x-nv-gpudirect-clique", VFIOPCIDevice,
3143                                    nv_gpudirect_clique,
3144                                    qdev_prop_nv_gpudirect_clique, uint8_t),
3145     DEFINE_PROP_OFF_AUTO_PCIBAR("x-msix-relocation", VFIOPCIDevice, msix_relo,
3146                                 OFF_AUTOPCIBAR_OFF),
3147     /*
3148      * TODO - support passed fds... is this necessary?
3149      * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name),
3150      * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name),
3151      */
3152     DEFINE_PROP_END_OF_LIST(),
3153 };
3154 
3155 static const VMStateDescription vfio_pci_vmstate = {
3156     .name = "vfio-pci",
3157     .unmigratable = 1,
3158 };
3159 
3160 static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
3161 {
3162     DeviceClass *dc = DEVICE_CLASS(klass);
3163     PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);
3164 
3165     dc->reset = vfio_pci_reset;
3166     dc->props = vfio_pci_dev_properties;
3167     dc->vmsd = &vfio_pci_vmstate;
3168     dc->desc = "VFIO-based PCI device assignment";
3169     set_bit(DEVICE_CATEGORY_MISC, dc->categories);
3170     pdc->realize = vfio_realize;
3171     pdc->exit = vfio_exitfn;
3172     pdc->config_read = vfio_pci_read_config;
3173     pdc->config_write = vfio_pci_write_config;
3174     pdc->is_express = 1; /* We might be */
3175 }
3176 
3177 static const TypeInfo vfio_pci_dev_info = {
3178     .name = "vfio-pci",
3179     .parent = TYPE_PCI_DEVICE,
3180     .instance_size = sizeof(VFIOPCIDevice),
3181     .class_init = vfio_pci_dev_class_init,
3182     .instance_init = vfio_instance_init,
3183     .instance_finalize = vfio_instance_finalize,
3184     .interfaces = (InterfaceInfo[]) {
3185         { INTERFACE_PCIE_DEVICE },
3186         { INTERFACE_CONVENTIONAL_PCI_DEVICE },
3187         { }
3188     },
3189 };
3190 
3191 static void register_vfio_pci_dev_type(void)
3192 {
3193     type_register_static(&vfio_pci_dev_info);
3194 }
3195 
3196 type_init(register_vfio_pci_dev_type)
3197