xref: /openbmc/qemu/hw/vfio/pci.c (revision 5b262bb6)
1 /*
2  * vfio based device assignment support
3  *
4  * Copyright Red Hat, Inc. 2012
5  *
6  * Authors:
7  *  Alex Williamson <alex.williamson@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Based on qemu-kvm device-assignment:
13  *  Adapted for KVM by Qumranet.
14  *  Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15  *  Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16  *  Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17  *  Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18  *  Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
19  */
20 
21 #include "qemu/osdep.h"
22 #include <linux/vfio.h>
23 #include <sys/ioctl.h>
24 
25 #include "hw/pci/msi.h"
26 #include "hw/pci/msix.h"
27 #include "hw/pci/pci_bridge.h"
28 #include "qemu/error-report.h"
29 #include "qemu/range.h"
30 #include "sysemu/kvm.h"
31 #include "sysemu/sysemu.h"
32 #include "pci.h"
33 #include "trace.h"
34 #include "qapi/error.h"
35 
36 #define MSIX_CAP_LENGTH 12
37 
38 static void vfio_disable_interrupts(VFIOPCIDevice *vdev);
39 static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled);
40 
41 /*
42  * Disabling BAR mmaping can be slow, but toggling it around INTx can
43  * also be a huge overhead.  We try to get the best of both worlds by
44  * waiting until an interrupt to disable mmaps (subsequent transitions
45  * to the same state are effectively no overhead).  If the interrupt has
46  * been serviced and the time gap is long enough, we re-enable mmaps for
47  * performance.  This works well for things like graphics cards, which
48  * may not use their interrupt at all and are penalized to an unusable
49  * level by read/write BAR traps.  Other devices, like NICs, have more
50  * regular interrupts and see much better latency by staying in non-mmap
51  * mode.  We therefore set the default mmap_timeout such that a ping
52  * is just enough to keep the mmap disabled.  Users can experiment with
53  * other options with the x-intx-mmap-timeout-ms parameter (a value of
54  * zero disables the timer).
55  */
56 static void vfio_intx_mmap_enable(void *opaque)
57 {
58     VFIOPCIDevice *vdev = opaque;
59 
60     if (vdev->intx.pending) {
61         timer_mod(vdev->intx.mmap_timer,
62                        qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
63         return;
64     }
65 
66     vfio_mmap_set_enabled(vdev, true);
67 }
68 
69 static void vfio_intx_interrupt(void *opaque)
70 {
71     VFIOPCIDevice *vdev = opaque;
72 
73     if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) {
74         return;
75     }
76 
77     trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin);
78 
79     vdev->intx.pending = true;
80     pci_irq_assert(&vdev->pdev);
81     vfio_mmap_set_enabled(vdev, false);
82     if (vdev->intx.mmap_timeout) {
83         timer_mod(vdev->intx.mmap_timer,
84                        qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
85     }
86 }
87 
88 static void vfio_intx_eoi(VFIODevice *vbasedev)
89 {
90     VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
91 
92     if (!vdev->intx.pending) {
93         return;
94     }
95 
96     trace_vfio_intx_eoi(vbasedev->name);
97 
98     vdev->intx.pending = false;
99     pci_irq_deassert(&vdev->pdev);
100     vfio_unmask_single_irqindex(vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
101 }
102 
103 static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev)
104 {
105 #ifdef CONFIG_KVM
106     struct kvm_irqfd irqfd = {
107         .fd = event_notifier_get_fd(&vdev->intx.interrupt),
108         .gsi = vdev->intx.route.irq,
109         .flags = KVM_IRQFD_FLAG_RESAMPLE,
110     };
111     struct vfio_irq_set *irq_set;
112     int ret, argsz;
113     int32_t *pfd;
114 
115     if (vdev->no_kvm_intx || !kvm_irqfds_enabled() ||
116         vdev->intx.route.mode != PCI_INTX_ENABLED ||
117         !kvm_resamplefds_enabled()) {
118         return;
119     }
120 
121     /* Get to a known interrupt state */
122     qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev);
123     vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
124     vdev->intx.pending = false;
125     pci_irq_deassert(&vdev->pdev);
126 
127     /* Get an eventfd for resample/unmask */
128     if (event_notifier_init(&vdev->intx.unmask, 0)) {
129         error_report("vfio: Error: event_notifier_init failed eoi");
130         goto fail;
131     }
132 
133     /* KVM triggers it, VFIO listens for it */
134     irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask);
135 
136     if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
137         error_report("vfio: Error: Failed to setup resample irqfd: %m");
138         goto fail_irqfd;
139     }
140 
141     argsz = sizeof(*irq_set) + sizeof(*pfd);
142 
143     irq_set = g_malloc0(argsz);
144     irq_set->argsz = argsz;
145     irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK;
146     irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
147     irq_set->start = 0;
148     irq_set->count = 1;
149     pfd = (int32_t *)&irq_set->data;
150 
151     *pfd = irqfd.resamplefd;
152 
153     ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
154     g_free(irq_set);
155     if (ret) {
156         error_report("vfio: Error: Failed to setup INTx unmask fd: %m");
157         goto fail_vfio;
158     }
159 
160     /* Let'em rip */
161     vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
162 
163     vdev->intx.kvm_accel = true;
164 
165     trace_vfio_intx_enable_kvm(vdev->vbasedev.name);
166 
167     return;
168 
169 fail_vfio:
170     irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN;
171     kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd);
172 fail_irqfd:
173     event_notifier_cleanup(&vdev->intx.unmask);
174 fail:
175     qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
176     vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
177 #endif
178 }
179 
180 static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
181 {
182 #ifdef CONFIG_KVM
183     struct kvm_irqfd irqfd = {
184         .fd = event_notifier_get_fd(&vdev->intx.interrupt),
185         .gsi = vdev->intx.route.irq,
186         .flags = KVM_IRQFD_FLAG_DEASSIGN,
187     };
188 
189     if (!vdev->intx.kvm_accel) {
190         return;
191     }
192 
193     /*
194      * Get to a known state, hardware masked, QEMU ready to accept new
195      * interrupts, QEMU IRQ de-asserted.
196      */
197     vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
198     vdev->intx.pending = false;
199     pci_irq_deassert(&vdev->pdev);
200 
201     /* Tell KVM to stop listening for an INTx irqfd */
202     if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
203         error_report("vfio: Error: Failed to disable INTx irqfd: %m");
204     }
205 
206     /* We only need to close the eventfd for VFIO to cleanup the kernel side */
207     event_notifier_cleanup(&vdev->intx.unmask);
208 
209     /* QEMU starts listening for interrupt events. */
210     qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
211 
212     vdev->intx.kvm_accel = false;
213 
214     /* If we've missed an event, let it re-fire through QEMU */
215     vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
216 
217     trace_vfio_intx_disable_kvm(vdev->vbasedev.name);
218 #endif
219 }
220 
221 static void vfio_intx_update(PCIDevice *pdev)
222 {
223     VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
224     PCIINTxRoute route;
225 
226     if (vdev->interrupt != VFIO_INT_INTx) {
227         return;
228     }
229 
230     route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin);
231 
232     if (!pci_intx_route_changed(&vdev->intx.route, &route)) {
233         return; /* Nothing changed */
234     }
235 
236     trace_vfio_intx_update(vdev->vbasedev.name,
237                            vdev->intx.route.irq, route.irq);
238 
239     vfio_intx_disable_kvm(vdev);
240 
241     vdev->intx.route = route;
242 
243     if (route.mode != PCI_INTX_ENABLED) {
244         return;
245     }
246 
247     vfio_intx_enable_kvm(vdev);
248 
249     /* Re-enable the interrupt in cased we missed an EOI */
250     vfio_intx_eoi(&vdev->vbasedev);
251 }
252 
253 static int vfio_intx_enable(VFIOPCIDevice *vdev)
254 {
255     uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
256     int ret, argsz;
257     struct vfio_irq_set *irq_set;
258     int32_t *pfd;
259 
260     if (!pin) {
261         return 0;
262     }
263 
264     vfio_disable_interrupts(vdev);
265 
266     vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */
267     pci_config_set_interrupt_pin(vdev->pdev.config, pin);
268 
269 #ifdef CONFIG_KVM
270     /*
271      * Only conditional to avoid generating error messages on platforms
272      * where we won't actually use the result anyway.
273      */
274     if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
275         vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev,
276                                                         vdev->intx.pin);
277     }
278 #endif
279 
280     ret = event_notifier_init(&vdev->intx.interrupt, 0);
281     if (ret) {
282         error_report("vfio: Error: event_notifier_init failed");
283         return ret;
284     }
285 
286     argsz = sizeof(*irq_set) + sizeof(*pfd);
287 
288     irq_set = g_malloc0(argsz);
289     irq_set->argsz = argsz;
290     irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
291     irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
292     irq_set->start = 0;
293     irq_set->count = 1;
294     pfd = (int32_t *)&irq_set->data;
295 
296     *pfd = event_notifier_get_fd(&vdev->intx.interrupt);
297     qemu_set_fd_handler(*pfd, vfio_intx_interrupt, NULL, vdev);
298 
299     ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
300     g_free(irq_set);
301     if (ret) {
302         error_report("vfio: Error: Failed to setup INTx fd: %m");
303         qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
304         event_notifier_cleanup(&vdev->intx.interrupt);
305         return -errno;
306     }
307 
308     vfio_intx_enable_kvm(vdev);
309 
310     vdev->interrupt = VFIO_INT_INTx;
311 
312     trace_vfio_intx_enable(vdev->vbasedev.name);
313 
314     return 0;
315 }
316 
317 static void vfio_intx_disable(VFIOPCIDevice *vdev)
318 {
319     int fd;
320 
321     timer_del(vdev->intx.mmap_timer);
322     vfio_intx_disable_kvm(vdev);
323     vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
324     vdev->intx.pending = false;
325     pci_irq_deassert(&vdev->pdev);
326     vfio_mmap_set_enabled(vdev, true);
327 
328     fd = event_notifier_get_fd(&vdev->intx.interrupt);
329     qemu_set_fd_handler(fd, NULL, NULL, vdev);
330     event_notifier_cleanup(&vdev->intx.interrupt);
331 
332     vdev->interrupt = VFIO_INT_NONE;
333 
334     trace_vfio_intx_disable(vdev->vbasedev.name);
335 }
336 
337 /*
338  * MSI/X
339  */
340 static void vfio_msi_interrupt(void *opaque)
341 {
342     VFIOMSIVector *vector = opaque;
343     VFIOPCIDevice *vdev = vector->vdev;
344     MSIMessage (*get_msg)(PCIDevice *dev, unsigned vector);
345     void (*notify)(PCIDevice *dev, unsigned vector);
346     MSIMessage msg;
347     int nr = vector - vdev->msi_vectors;
348 
349     if (!event_notifier_test_and_clear(&vector->interrupt)) {
350         return;
351     }
352 
353     if (vdev->interrupt == VFIO_INT_MSIX) {
354         get_msg = msix_get_message;
355         notify = msix_notify;
356 
357         /* A masked vector firing needs to use the PBA, enable it */
358         if (msix_is_masked(&vdev->pdev, nr)) {
359             set_bit(nr, vdev->msix->pending);
360             memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, true);
361             trace_vfio_msix_pba_enable(vdev->vbasedev.name);
362         }
363     } else if (vdev->interrupt == VFIO_INT_MSI) {
364         get_msg = msi_get_message;
365         notify = msi_notify;
366     } else {
367         abort();
368     }
369 
370     msg = get_msg(&vdev->pdev, nr);
371     trace_vfio_msi_interrupt(vdev->vbasedev.name, nr, msg.address, msg.data);
372     notify(&vdev->pdev, nr);
373 }
374 
375 static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix)
376 {
377     struct vfio_irq_set *irq_set;
378     int ret = 0, i, argsz;
379     int32_t *fds;
380 
381     argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds));
382 
383     irq_set = g_malloc0(argsz);
384     irq_set->argsz = argsz;
385     irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
386     irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX;
387     irq_set->start = 0;
388     irq_set->count = vdev->nr_vectors;
389     fds = (int32_t *)&irq_set->data;
390 
391     for (i = 0; i < vdev->nr_vectors; i++) {
392         int fd = -1;
393 
394         /*
395          * MSI vs MSI-X - The guest has direct access to MSI mask and pending
396          * bits, therefore we always use the KVM signaling path when setup.
397          * MSI-X mask and pending bits are emulated, so we want to use the
398          * KVM signaling path only when configured and unmasked.
399          */
400         if (vdev->msi_vectors[i].use) {
401             if (vdev->msi_vectors[i].virq < 0 ||
402                 (msix && msix_is_masked(&vdev->pdev, i))) {
403                 fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt);
404             } else {
405                 fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt);
406             }
407         }
408 
409         fds[i] = fd;
410     }
411 
412     ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
413 
414     g_free(irq_set);
415 
416     return ret;
417 }
418 
419 static void vfio_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector,
420                                   int vector_n, bool msix)
421 {
422     int virq;
423 
424     if ((msix && vdev->no_kvm_msix) || (!msix && vdev->no_kvm_msi)) {
425         return;
426     }
427 
428     if (event_notifier_init(&vector->kvm_interrupt, 0)) {
429         return;
430     }
431 
432     virq = kvm_irqchip_add_msi_route(kvm_state, vector_n, &vdev->pdev);
433     if (virq < 0) {
434         event_notifier_cleanup(&vector->kvm_interrupt);
435         return;
436     }
437 
438     if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
439                                        NULL, virq) < 0) {
440         kvm_irqchip_release_virq(kvm_state, virq);
441         event_notifier_cleanup(&vector->kvm_interrupt);
442         return;
443     }
444 
445     vector->virq = virq;
446 }
447 
448 static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector)
449 {
450     kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
451                                           vector->virq);
452     kvm_irqchip_release_virq(kvm_state, vector->virq);
453     vector->virq = -1;
454     event_notifier_cleanup(&vector->kvm_interrupt);
455 }
456 
457 static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg,
458                                      PCIDevice *pdev)
459 {
460     kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg, pdev);
461     kvm_irqchip_commit_routes(kvm_state);
462 }
463 
464 static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
465                                    MSIMessage *msg, IOHandler *handler)
466 {
467     VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
468     VFIOMSIVector *vector;
469     int ret;
470 
471     trace_vfio_msix_vector_do_use(vdev->vbasedev.name, nr);
472 
473     vector = &vdev->msi_vectors[nr];
474 
475     if (!vector->use) {
476         vector->vdev = vdev;
477         vector->virq = -1;
478         if (event_notifier_init(&vector->interrupt, 0)) {
479             error_report("vfio: Error: event_notifier_init failed");
480         }
481         vector->use = true;
482         msix_vector_use(pdev, nr);
483     }
484 
485     qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
486                         handler, NULL, vector);
487 
488     /*
489      * Attempt to enable route through KVM irqchip,
490      * default to userspace handling if unavailable.
491      */
492     if (vector->virq >= 0) {
493         if (!msg) {
494             vfio_remove_kvm_msi_virq(vector);
495         } else {
496             vfio_update_kvm_msi_virq(vector, *msg, pdev);
497         }
498     } else {
499         if (msg) {
500             vfio_add_kvm_msi_virq(vdev, vector, nr, true);
501         }
502     }
503 
504     /*
505      * We don't want to have the host allocate all possible MSI vectors
506      * for a device if they're not in use, so we shutdown and incrementally
507      * increase them as needed.
508      */
509     if (vdev->nr_vectors < nr + 1) {
510         vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
511         vdev->nr_vectors = nr + 1;
512         ret = vfio_enable_vectors(vdev, true);
513         if (ret) {
514             error_report("vfio: failed to enable vectors, %d", ret);
515         }
516     } else {
517         int argsz;
518         struct vfio_irq_set *irq_set;
519         int32_t *pfd;
520 
521         argsz = sizeof(*irq_set) + sizeof(*pfd);
522 
523         irq_set = g_malloc0(argsz);
524         irq_set->argsz = argsz;
525         irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
526                          VFIO_IRQ_SET_ACTION_TRIGGER;
527         irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
528         irq_set->start = nr;
529         irq_set->count = 1;
530         pfd = (int32_t *)&irq_set->data;
531 
532         if (vector->virq >= 0) {
533             *pfd = event_notifier_get_fd(&vector->kvm_interrupt);
534         } else {
535             *pfd = event_notifier_get_fd(&vector->interrupt);
536         }
537 
538         ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
539         g_free(irq_set);
540         if (ret) {
541             error_report("vfio: failed to modify vector, %d", ret);
542         }
543     }
544 
545     /* Disable PBA emulation when nothing more is pending. */
546     clear_bit(nr, vdev->msix->pending);
547     if (find_first_bit(vdev->msix->pending,
548                        vdev->nr_vectors) == vdev->nr_vectors) {
549         memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
550         trace_vfio_msix_pba_disable(vdev->vbasedev.name);
551     }
552 
553     return 0;
554 }
555 
556 static int vfio_msix_vector_use(PCIDevice *pdev,
557                                 unsigned int nr, MSIMessage msg)
558 {
559     return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt);
560 }
561 
562 static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
563 {
564     VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
565     VFIOMSIVector *vector = &vdev->msi_vectors[nr];
566 
567     trace_vfio_msix_vector_release(vdev->vbasedev.name, nr);
568 
569     /*
570      * There are still old guests that mask and unmask vectors on every
571      * interrupt.  If we're using QEMU bypass with a KVM irqfd, leave all of
572      * the KVM setup in place, simply switch VFIO to use the non-bypass
573      * eventfd.  We'll then fire the interrupt through QEMU and the MSI-X
574      * core will mask the interrupt and set pending bits, allowing it to
575      * be re-asserted on unmask.  Nothing to do if already using QEMU mode.
576      */
577     if (vector->virq >= 0) {
578         int argsz;
579         struct vfio_irq_set *irq_set;
580         int32_t *pfd;
581 
582         argsz = sizeof(*irq_set) + sizeof(*pfd);
583 
584         irq_set = g_malloc0(argsz);
585         irq_set->argsz = argsz;
586         irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
587                          VFIO_IRQ_SET_ACTION_TRIGGER;
588         irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
589         irq_set->start = nr;
590         irq_set->count = 1;
591         pfd = (int32_t *)&irq_set->data;
592 
593         *pfd = event_notifier_get_fd(&vector->interrupt);
594 
595         ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
596 
597         g_free(irq_set);
598     }
599 }
600 
601 static void vfio_msix_enable(VFIOPCIDevice *vdev)
602 {
603     vfio_disable_interrupts(vdev);
604 
605     vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->msix->entries);
606 
607     vdev->interrupt = VFIO_INT_MSIX;
608 
609     /*
610      * Some communication channels between VF & PF or PF & fw rely on the
611      * physical state of the device and expect that enabling MSI-X from the
612      * guest enables the same on the host.  When our guest is Linux, the
613      * guest driver call to pci_enable_msix() sets the enabling bit in the
614      * MSI-X capability, but leaves the vector table masked.  We therefore
615      * can't rely on a vector_use callback (from request_irq() in the guest)
616      * to switch the physical device into MSI-X mode because that may come a
617      * long time after pci_enable_msix().  This code enables vector 0 with
618      * triggering to userspace, then immediately release the vector, leaving
619      * the physical device with no vectors enabled, but MSI-X enabled, just
620      * like the guest view.
621      */
622     vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL);
623     vfio_msix_vector_release(&vdev->pdev, 0);
624 
625     if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
626                                   vfio_msix_vector_release, NULL)) {
627         error_report("vfio: msix_set_vector_notifiers failed");
628     }
629 
630     trace_vfio_msix_enable(vdev->vbasedev.name);
631 }
632 
633 static void vfio_msi_enable(VFIOPCIDevice *vdev)
634 {
635     int ret, i;
636 
637     vfio_disable_interrupts(vdev);
638 
639     vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
640 retry:
641     vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->nr_vectors);
642 
643     for (i = 0; i < vdev->nr_vectors; i++) {
644         VFIOMSIVector *vector = &vdev->msi_vectors[i];
645 
646         vector->vdev = vdev;
647         vector->virq = -1;
648         vector->use = true;
649 
650         if (event_notifier_init(&vector->interrupt, 0)) {
651             error_report("vfio: Error: event_notifier_init failed");
652         }
653 
654         qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
655                             vfio_msi_interrupt, NULL, vector);
656 
657         /*
658          * Attempt to enable route through KVM irqchip,
659          * default to userspace handling if unavailable.
660          */
661         vfio_add_kvm_msi_virq(vdev, vector, i, false);
662     }
663 
664     /* Set interrupt type prior to possible interrupts */
665     vdev->interrupt = VFIO_INT_MSI;
666 
667     ret = vfio_enable_vectors(vdev, false);
668     if (ret) {
669         if (ret < 0) {
670             error_report("vfio: Error: Failed to setup MSI fds: %m");
671         } else if (ret != vdev->nr_vectors) {
672             error_report("vfio: Error: Failed to enable %d "
673                          "MSI vectors, retry with %d", vdev->nr_vectors, ret);
674         }
675 
676         for (i = 0; i < vdev->nr_vectors; i++) {
677             VFIOMSIVector *vector = &vdev->msi_vectors[i];
678             if (vector->virq >= 0) {
679                 vfio_remove_kvm_msi_virq(vector);
680             }
681             qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
682                                 NULL, NULL, NULL);
683             event_notifier_cleanup(&vector->interrupt);
684         }
685 
686         g_free(vdev->msi_vectors);
687 
688         if (ret > 0 && ret != vdev->nr_vectors) {
689             vdev->nr_vectors = ret;
690             goto retry;
691         }
692         vdev->nr_vectors = 0;
693 
694         /*
695          * Failing to setup MSI doesn't really fall within any specification.
696          * Let's try leaving interrupts disabled and hope the guest figures
697          * out to fall back to INTx for this device.
698          */
699         error_report("vfio: Error: Failed to enable MSI");
700         vdev->interrupt = VFIO_INT_NONE;
701 
702         return;
703     }
704 
705     trace_vfio_msi_enable(vdev->vbasedev.name, vdev->nr_vectors);
706 }
707 
708 static void vfio_msi_disable_common(VFIOPCIDevice *vdev)
709 {
710     int i;
711 
712     for (i = 0; i < vdev->nr_vectors; i++) {
713         VFIOMSIVector *vector = &vdev->msi_vectors[i];
714         if (vdev->msi_vectors[i].use) {
715             if (vector->virq >= 0) {
716                 vfio_remove_kvm_msi_virq(vector);
717             }
718             qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
719                                 NULL, NULL, NULL);
720             event_notifier_cleanup(&vector->interrupt);
721         }
722     }
723 
724     g_free(vdev->msi_vectors);
725     vdev->msi_vectors = NULL;
726     vdev->nr_vectors = 0;
727     vdev->interrupt = VFIO_INT_NONE;
728 
729     vfio_intx_enable(vdev);
730 }
731 
732 static void vfio_msix_disable(VFIOPCIDevice *vdev)
733 {
734     int i;
735 
736     msix_unset_vector_notifiers(&vdev->pdev);
737 
738     /*
739      * MSI-X will only release vectors if MSI-X is still enabled on the
740      * device, check through the rest and release it ourselves if necessary.
741      */
742     for (i = 0; i < vdev->nr_vectors; i++) {
743         if (vdev->msi_vectors[i].use) {
744             vfio_msix_vector_release(&vdev->pdev, i);
745             msix_vector_unuse(&vdev->pdev, i);
746         }
747     }
748 
749     if (vdev->nr_vectors) {
750         vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
751     }
752 
753     vfio_msi_disable_common(vdev);
754 
755     memset(vdev->msix->pending, 0,
756            BITS_TO_LONGS(vdev->msix->entries) * sizeof(unsigned long));
757 
758     trace_vfio_msix_disable(vdev->vbasedev.name);
759 }
760 
761 static void vfio_msi_disable(VFIOPCIDevice *vdev)
762 {
763     vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX);
764     vfio_msi_disable_common(vdev);
765 
766     trace_vfio_msi_disable(vdev->vbasedev.name);
767 }
768 
769 static void vfio_update_msi(VFIOPCIDevice *vdev)
770 {
771     int i;
772 
773     for (i = 0; i < vdev->nr_vectors; i++) {
774         VFIOMSIVector *vector = &vdev->msi_vectors[i];
775         MSIMessage msg;
776 
777         if (!vector->use || vector->virq < 0) {
778             continue;
779         }
780 
781         msg = msi_get_message(&vdev->pdev, i);
782         vfio_update_kvm_msi_virq(vector, msg, &vdev->pdev);
783     }
784 }
785 
786 static void vfio_pci_load_rom(VFIOPCIDevice *vdev)
787 {
788     struct vfio_region_info *reg_info;
789     uint64_t size;
790     off_t off = 0;
791     ssize_t bytes;
792 
793     if (vfio_get_region_info(&vdev->vbasedev,
794                              VFIO_PCI_ROM_REGION_INDEX, &reg_info)) {
795         error_report("vfio: Error getting ROM info: %m");
796         return;
797     }
798 
799     trace_vfio_pci_load_rom(vdev->vbasedev.name, (unsigned long)reg_info->size,
800                             (unsigned long)reg_info->offset,
801                             (unsigned long)reg_info->flags);
802 
803     vdev->rom_size = size = reg_info->size;
804     vdev->rom_offset = reg_info->offset;
805 
806     g_free(reg_info);
807 
808     if (!vdev->rom_size) {
809         vdev->rom_read_failed = true;
810         error_report("vfio-pci: Cannot read device rom at "
811                     "%s", vdev->vbasedev.name);
812         error_printf("Device option ROM contents are probably invalid "
813                     "(check dmesg).\nSkip option ROM probe with rombar=0, "
814                     "or load from file with romfile=\n");
815         return;
816     }
817 
818     vdev->rom = g_malloc(size);
819     memset(vdev->rom, 0xff, size);
820 
821     while (size) {
822         bytes = pread(vdev->vbasedev.fd, vdev->rom + off,
823                       size, vdev->rom_offset + off);
824         if (bytes == 0) {
825             break;
826         } else if (bytes > 0) {
827             off += bytes;
828             size -= bytes;
829         } else {
830             if (errno == EINTR || errno == EAGAIN) {
831                 continue;
832             }
833             error_report("vfio: Error reading device ROM: %m");
834             break;
835         }
836     }
837 
838     /*
839      * Test the ROM signature against our device, if the vendor is correct
840      * but the device ID doesn't match, store the correct device ID and
841      * recompute the checksum.  Intel IGD devices need this and are known
842      * to have bogus checksums so we can't simply adjust the checksum.
843      */
844     if (pci_get_word(vdev->rom) == 0xaa55 &&
845         pci_get_word(vdev->rom + 0x18) + 8 < vdev->rom_size &&
846         !memcmp(vdev->rom + pci_get_word(vdev->rom + 0x18), "PCIR", 4)) {
847         uint16_t vid, did;
848 
849         vid = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 4);
850         did = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6);
851 
852         if (vid == vdev->vendor_id && did != vdev->device_id) {
853             int i;
854             uint8_t csum, *data = vdev->rom;
855 
856             pci_set_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6,
857                          vdev->device_id);
858             data[6] = 0;
859 
860             for (csum = 0, i = 0; i < vdev->rom_size; i++) {
861                 csum += data[i];
862             }
863 
864             data[6] = -csum;
865         }
866     }
867 }
868 
869 static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size)
870 {
871     VFIOPCIDevice *vdev = opaque;
872     union {
873         uint8_t byte;
874         uint16_t word;
875         uint32_t dword;
876         uint64_t qword;
877     } val;
878     uint64_t data = 0;
879 
880     /* Load the ROM lazily when the guest tries to read it */
881     if (unlikely(!vdev->rom && !vdev->rom_read_failed)) {
882         vfio_pci_load_rom(vdev);
883     }
884 
885     memcpy(&val, vdev->rom + addr,
886            (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0);
887 
888     switch (size) {
889     case 1:
890         data = val.byte;
891         break;
892     case 2:
893         data = le16_to_cpu(val.word);
894         break;
895     case 4:
896         data = le32_to_cpu(val.dword);
897         break;
898     default:
899         hw_error("vfio: unsupported read size, %d bytes\n", size);
900         break;
901     }
902 
903     trace_vfio_rom_read(vdev->vbasedev.name, addr, size, data);
904 
905     return data;
906 }
907 
908 static void vfio_rom_write(void *opaque, hwaddr addr,
909                            uint64_t data, unsigned size)
910 {
911 }
912 
913 static const MemoryRegionOps vfio_rom_ops = {
914     .read = vfio_rom_read,
915     .write = vfio_rom_write,
916     .endianness = DEVICE_LITTLE_ENDIAN,
917 };
918 
919 static void vfio_pci_size_rom(VFIOPCIDevice *vdev)
920 {
921     uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK);
922     off_t offset = vdev->config_offset + PCI_ROM_ADDRESS;
923     DeviceState *dev = DEVICE(vdev);
924     char *name;
925     int fd = vdev->vbasedev.fd;
926 
927     if (vdev->pdev.romfile || !vdev->pdev.rom_bar) {
928         /* Since pci handles romfile, just print a message and return */
929         if (vfio_blacklist_opt_rom(vdev) && vdev->pdev.romfile) {
930             error_printf("Warning : Device at %s is known to cause system instability issues during option rom execution. Proceeding anyway since user specified romfile\n",
931                          vdev->vbasedev.name);
932         }
933         return;
934     }
935 
936     /*
937      * Use the same size ROM BAR as the physical device.  The contents
938      * will get filled in later when the guest tries to read it.
939      */
940     if (pread(fd, &orig, 4, offset) != 4 ||
941         pwrite(fd, &size, 4, offset) != 4 ||
942         pread(fd, &size, 4, offset) != 4 ||
943         pwrite(fd, &orig, 4, offset) != 4) {
944         error_report("%s(%s) failed: %m", __func__, vdev->vbasedev.name);
945         return;
946     }
947 
948     size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1;
949 
950     if (!size) {
951         return;
952     }
953 
954     if (vfio_blacklist_opt_rom(vdev)) {
955         if (dev->opts && qemu_opt_get(dev->opts, "rombar")) {
956             error_printf("Warning : Device at %s is known to cause system instability issues during option rom execution. Proceeding anyway since user specified non zero value for rombar\n",
957                          vdev->vbasedev.name);
958         } else {
959             error_printf("Warning : Rom loading for device at %s has been disabled due to system instability issues. Specify rombar=1 or romfile to force\n",
960                          vdev->vbasedev.name);
961             return;
962         }
963     }
964 
965     trace_vfio_pci_size_rom(vdev->vbasedev.name, size);
966 
967     name = g_strdup_printf("vfio[%s].rom", vdev->vbasedev.name);
968 
969     memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev),
970                           &vfio_rom_ops, vdev, name, size);
971     g_free(name);
972 
973     pci_register_bar(&vdev->pdev, PCI_ROM_SLOT,
974                      PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom);
975 
976     vdev->pdev.has_rom = true;
977     vdev->rom_read_failed = false;
978 }
979 
980 void vfio_vga_write(void *opaque, hwaddr addr,
981                            uint64_t data, unsigned size)
982 {
983     VFIOVGARegion *region = opaque;
984     VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
985     union {
986         uint8_t byte;
987         uint16_t word;
988         uint32_t dword;
989         uint64_t qword;
990     } buf;
991     off_t offset = vga->fd_offset + region->offset + addr;
992 
993     switch (size) {
994     case 1:
995         buf.byte = data;
996         break;
997     case 2:
998         buf.word = cpu_to_le16(data);
999         break;
1000     case 4:
1001         buf.dword = cpu_to_le32(data);
1002         break;
1003     default:
1004         hw_error("vfio: unsupported write size, %d bytes", size);
1005         break;
1006     }
1007 
1008     if (pwrite(vga->fd, &buf, size, offset) != size) {
1009         error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m",
1010                      __func__, region->offset + addr, data, size);
1011     }
1012 
1013     trace_vfio_vga_write(region->offset + addr, data, size);
1014 }
1015 
1016 uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size)
1017 {
1018     VFIOVGARegion *region = opaque;
1019     VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
1020     union {
1021         uint8_t byte;
1022         uint16_t word;
1023         uint32_t dword;
1024         uint64_t qword;
1025     } buf;
1026     uint64_t data = 0;
1027     off_t offset = vga->fd_offset + region->offset + addr;
1028 
1029     if (pread(vga->fd, &buf, size, offset) != size) {
1030         error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m",
1031                      __func__, region->offset + addr, size);
1032         return (uint64_t)-1;
1033     }
1034 
1035     switch (size) {
1036     case 1:
1037         data = buf.byte;
1038         break;
1039     case 2:
1040         data = le16_to_cpu(buf.word);
1041         break;
1042     case 4:
1043         data = le32_to_cpu(buf.dword);
1044         break;
1045     default:
1046         hw_error("vfio: unsupported read size, %d bytes", size);
1047         break;
1048     }
1049 
1050     trace_vfio_vga_read(region->offset + addr, size, data);
1051 
1052     return data;
1053 }
1054 
1055 static const MemoryRegionOps vfio_vga_ops = {
1056     .read = vfio_vga_read,
1057     .write = vfio_vga_write,
1058     .endianness = DEVICE_LITTLE_ENDIAN,
1059 };
1060 
1061 /*
1062  * PCI config space
1063  */
1064 uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
1065 {
1066     VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
1067     uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val;
1068 
1069     memcpy(&emu_bits, vdev->emulated_config_bits + addr, len);
1070     emu_bits = le32_to_cpu(emu_bits);
1071 
1072     if (emu_bits) {
1073         emu_val = pci_default_read_config(pdev, addr, len);
1074     }
1075 
1076     if (~emu_bits & (0xffffffffU >> (32 - len * 8))) {
1077         ssize_t ret;
1078 
1079         ret = pread(vdev->vbasedev.fd, &phys_val, len,
1080                     vdev->config_offset + addr);
1081         if (ret != len) {
1082             error_report("%s(%s, 0x%x, 0x%x) failed: %m",
1083                          __func__, vdev->vbasedev.name, addr, len);
1084             return -errno;
1085         }
1086         phys_val = le32_to_cpu(phys_val);
1087     }
1088 
1089     val = (emu_val & emu_bits) | (phys_val & ~emu_bits);
1090 
1091     trace_vfio_pci_read_config(vdev->vbasedev.name, addr, len, val);
1092 
1093     return val;
1094 }
1095 
1096 void vfio_pci_write_config(PCIDevice *pdev,
1097                            uint32_t addr, uint32_t val, int len)
1098 {
1099     VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
1100     uint32_t val_le = cpu_to_le32(val);
1101 
1102     trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len);
1103 
1104     /* Write everything to VFIO, let it filter out what we can't write */
1105     if (pwrite(vdev->vbasedev.fd, &val_le, len, vdev->config_offset + addr)
1106                 != len) {
1107         error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %m",
1108                      __func__, vdev->vbasedev.name, addr, val, len);
1109     }
1110 
1111     /* MSI/MSI-X Enabling/Disabling */
1112     if (pdev->cap_present & QEMU_PCI_CAP_MSI &&
1113         ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) {
1114         int is_enabled, was_enabled = msi_enabled(pdev);
1115 
1116         pci_default_write_config(pdev, addr, val, len);
1117 
1118         is_enabled = msi_enabled(pdev);
1119 
1120         if (!was_enabled) {
1121             if (is_enabled) {
1122                 vfio_msi_enable(vdev);
1123             }
1124         } else {
1125             if (!is_enabled) {
1126                 vfio_msi_disable(vdev);
1127             } else {
1128                 vfio_update_msi(vdev);
1129             }
1130         }
1131     } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX &&
1132         ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) {
1133         int is_enabled, was_enabled = msix_enabled(pdev);
1134 
1135         pci_default_write_config(pdev, addr, val, len);
1136 
1137         is_enabled = msix_enabled(pdev);
1138 
1139         if (!was_enabled && is_enabled) {
1140             vfio_msix_enable(vdev);
1141         } else if (was_enabled && !is_enabled) {
1142             vfio_msix_disable(vdev);
1143         }
1144     } else {
1145         /* Write everything to QEMU to keep emulated bits correct */
1146         pci_default_write_config(pdev, addr, val, len);
1147     }
1148 }
1149 
1150 /*
1151  * Interrupt setup
1152  */
1153 static void vfio_disable_interrupts(VFIOPCIDevice *vdev)
1154 {
1155     /*
1156      * More complicated than it looks.  Disabling MSI/X transitions the
1157      * device to INTx mode (if supported).  Therefore we need to first
1158      * disable MSI/X and then cleanup by disabling INTx.
1159      */
1160     if (vdev->interrupt == VFIO_INT_MSIX) {
1161         vfio_msix_disable(vdev);
1162     } else if (vdev->interrupt == VFIO_INT_MSI) {
1163         vfio_msi_disable(vdev);
1164     }
1165 
1166     if (vdev->interrupt == VFIO_INT_INTx) {
1167         vfio_intx_disable(vdev);
1168     }
1169 }
1170 
1171 static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos)
1172 {
1173     uint16_t ctrl;
1174     bool msi_64bit, msi_maskbit;
1175     int ret, entries;
1176     Error *err = NULL;
1177 
1178     if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl),
1179               vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
1180         return -errno;
1181     }
1182     ctrl = le16_to_cpu(ctrl);
1183 
1184     msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT);
1185     msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT);
1186     entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1);
1187 
1188     trace_vfio_msi_setup(vdev->vbasedev.name, pos);
1189 
1190     ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit, &err);
1191     if (ret < 0) {
1192         if (ret == -ENOTSUP) {
1193             return 0;
1194         }
1195         error_prepend(&err, "vfio: msi_init failed: ");
1196         error_report_err(err);
1197         return ret;
1198     }
1199     vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0);
1200 
1201     return 0;
1202 }
1203 
1204 static void vfio_pci_fixup_msix_region(VFIOPCIDevice *vdev)
1205 {
1206     off_t start, end;
1207     VFIORegion *region = &vdev->bars[vdev->msix->table_bar].region;
1208 
1209     /*
1210      * We expect to find a single mmap covering the whole BAR, anything else
1211      * means it's either unsupported or already setup.
1212      */
1213     if (region->nr_mmaps != 1 || region->mmaps[0].offset ||
1214         region->size != region->mmaps[0].size) {
1215         return;
1216     }
1217 
1218     /* MSI-X table start and end aligned to host page size */
1219     start = vdev->msix->table_offset & qemu_real_host_page_mask;
1220     end = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset +
1221                                (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE));
1222 
1223     /*
1224      * Does the MSI-X table cover the beginning of the BAR?  The whole BAR?
1225      * NB - Host page size is necessarily a power of two and so is the PCI
1226      * BAR (not counting EA yet), therefore if we have host page aligned
1227      * @start and @end, then any remainder of the BAR before or after those
1228      * must be at least host page sized and therefore mmap'able.
1229      */
1230     if (!start) {
1231         if (end >= region->size) {
1232             region->nr_mmaps = 0;
1233             g_free(region->mmaps);
1234             region->mmaps = NULL;
1235             trace_vfio_msix_fixup(vdev->vbasedev.name,
1236                                   vdev->msix->table_bar, 0, 0);
1237         } else {
1238             region->mmaps[0].offset = end;
1239             region->mmaps[0].size = region->size - end;
1240             trace_vfio_msix_fixup(vdev->vbasedev.name,
1241                               vdev->msix->table_bar, region->mmaps[0].offset,
1242                               region->mmaps[0].offset + region->mmaps[0].size);
1243         }
1244 
1245     /* Maybe it's aligned at the end of the BAR */
1246     } else if (end >= region->size) {
1247         region->mmaps[0].size = start;
1248         trace_vfio_msix_fixup(vdev->vbasedev.name,
1249                               vdev->msix->table_bar, region->mmaps[0].offset,
1250                               region->mmaps[0].offset + region->mmaps[0].size);
1251 
1252     /* Otherwise it must split the BAR */
1253     } else {
1254         region->nr_mmaps = 2;
1255         region->mmaps = g_renew(VFIOMmap, region->mmaps, 2);
1256 
1257         memcpy(&region->mmaps[1], &region->mmaps[0], sizeof(VFIOMmap));
1258 
1259         region->mmaps[0].size = start;
1260         trace_vfio_msix_fixup(vdev->vbasedev.name,
1261                               vdev->msix->table_bar, region->mmaps[0].offset,
1262                               region->mmaps[0].offset + region->mmaps[0].size);
1263 
1264         region->mmaps[1].offset = end;
1265         region->mmaps[1].size = region->size - end;
1266         trace_vfio_msix_fixup(vdev->vbasedev.name,
1267                               vdev->msix->table_bar, region->mmaps[1].offset,
1268                               region->mmaps[1].offset + region->mmaps[1].size);
1269     }
1270 }
1271 
1272 /*
1273  * We don't have any control over how pci_add_capability() inserts
1274  * capabilities into the chain.  In order to setup MSI-X we need a
1275  * MemoryRegion for the BAR.  In order to setup the BAR and not
1276  * attempt to mmap the MSI-X table area, which VFIO won't allow, we
1277  * need to first look for where the MSI-X table lives.  So we
1278  * unfortunately split MSI-X setup across two functions.
1279  */
1280 static int vfio_msix_early_setup(VFIOPCIDevice *vdev)
1281 {
1282     uint8_t pos;
1283     uint16_t ctrl;
1284     uint32_t table, pba;
1285     int fd = vdev->vbasedev.fd;
1286     VFIOMSIXInfo *msix;
1287 
1288     pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX);
1289     if (!pos) {
1290         return 0;
1291     }
1292 
1293     if (pread(fd, &ctrl, sizeof(ctrl),
1294               vdev->config_offset + pos + PCI_MSIX_FLAGS) != sizeof(ctrl)) {
1295         return -errno;
1296     }
1297 
1298     if (pread(fd, &table, sizeof(table),
1299               vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
1300         return -errno;
1301     }
1302 
1303     if (pread(fd, &pba, sizeof(pba),
1304               vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
1305         return -errno;
1306     }
1307 
1308     ctrl = le16_to_cpu(ctrl);
1309     table = le32_to_cpu(table);
1310     pba = le32_to_cpu(pba);
1311 
1312     msix = g_malloc0(sizeof(*msix));
1313     msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
1314     msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
1315     msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
1316     msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
1317     msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
1318 
1319     /*
1320      * Test the size of the pba_offset variable and catch if it extends outside
1321      * of the specified BAR. If it is the case, we need to apply a hardware
1322      * specific quirk if the device is known or we have a broken configuration.
1323      */
1324     if (msix->pba_offset >= vdev->bars[msix->pba_bar].region.size) {
1325         /*
1326          * Chelsio T5 Virtual Function devices are encoded as 0x58xx for T5
1327          * adapters. The T5 hardware returns an incorrect value of 0x8000 for
1328          * the VF PBA offset while the BAR itself is only 8k. The correct value
1329          * is 0x1000, so we hard code that here.
1330          */
1331         if (vdev->vendor_id == PCI_VENDOR_ID_CHELSIO &&
1332             (vdev->device_id & 0xff00) == 0x5800) {
1333             msix->pba_offset = 0x1000;
1334         } else {
1335             error_report("vfio: Hardware reports invalid configuration, "
1336                          "MSIX PBA outside of specified BAR");
1337             g_free(msix);
1338             return -EINVAL;
1339         }
1340     }
1341 
1342     trace_vfio_msix_early_setup(vdev->vbasedev.name, pos, msix->table_bar,
1343                                 msix->table_offset, msix->entries);
1344     vdev->msix = msix;
1345 
1346     vfio_pci_fixup_msix_region(vdev);
1347 
1348     return 0;
1349 }
1350 
1351 static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos)
1352 {
1353     int ret;
1354 
1355     vdev->msix->pending = g_malloc0(BITS_TO_LONGS(vdev->msix->entries) *
1356                                     sizeof(unsigned long));
1357     ret = msix_init(&vdev->pdev, vdev->msix->entries,
1358                     vdev->bars[vdev->msix->table_bar].region.mem,
1359                     vdev->msix->table_bar, vdev->msix->table_offset,
1360                     vdev->bars[vdev->msix->pba_bar].region.mem,
1361                     vdev->msix->pba_bar, vdev->msix->pba_offset, pos);
1362     if (ret < 0) {
1363         if (ret == -ENOTSUP) {
1364             return 0;
1365         }
1366         error_report("vfio: msix_init failed");
1367         return ret;
1368     }
1369 
1370     /*
1371      * The PCI spec suggests that devices provide additional alignment for
1372      * MSI-X structures and avoid overlapping non-MSI-X related registers.
1373      * For an assigned device, this hopefully means that emulation of MSI-X
1374      * structures does not affect the performance of the device.  If devices
1375      * fail to provide that alignment, a significant performance penalty may
1376      * result, for instance Mellanox MT27500 VFs:
1377      * http://www.spinics.net/lists/kvm/msg125881.html
1378      *
1379      * The PBA is simply not that important for such a serious regression and
1380      * most drivers do not appear to look at it.  The solution for this is to
1381      * disable the PBA MemoryRegion unless it's being used.  We disable it
1382      * here and only enable it if a masked vector fires through QEMU.  As the
1383      * vector-use notifier is called, which occurs on unmask, we test whether
1384      * PBA emulation is needed and again disable if not.
1385      */
1386     memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
1387 
1388     return 0;
1389 }
1390 
1391 static void vfio_teardown_msi(VFIOPCIDevice *vdev)
1392 {
1393     msi_uninit(&vdev->pdev);
1394 
1395     if (vdev->msix) {
1396         msix_uninit(&vdev->pdev,
1397                     vdev->bars[vdev->msix->table_bar].region.mem,
1398                     vdev->bars[vdev->msix->pba_bar].region.mem);
1399         g_free(vdev->msix->pending);
1400     }
1401 }
1402 
1403 /*
1404  * Resource setup
1405  */
1406 static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled)
1407 {
1408     int i;
1409 
1410     for (i = 0; i < PCI_ROM_SLOT; i++) {
1411         vfio_region_mmaps_set_enabled(&vdev->bars[i].region, enabled);
1412     }
1413 }
1414 
1415 static void vfio_bar_setup(VFIOPCIDevice *vdev, int nr)
1416 {
1417     VFIOBAR *bar = &vdev->bars[nr];
1418 
1419     uint32_t pci_bar;
1420     uint8_t type;
1421     int ret;
1422 
1423     /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
1424     if (!bar->region.size) {
1425         return;
1426     }
1427 
1428     /* Determine what type of BAR this is for registration */
1429     ret = pread(vdev->vbasedev.fd, &pci_bar, sizeof(pci_bar),
1430                 vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr));
1431     if (ret != sizeof(pci_bar)) {
1432         error_report("vfio: Failed to read BAR %d (%m)", nr);
1433         return;
1434     }
1435 
1436     pci_bar = le32_to_cpu(pci_bar);
1437     bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO);
1438     bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64);
1439     type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK :
1440                                     ~PCI_BASE_ADDRESS_MEM_MASK);
1441 
1442     if (vfio_region_mmap(&bar->region)) {
1443         error_report("Failed to mmap %s BAR %d. Performance may be slow",
1444                      vdev->vbasedev.name, nr);
1445     }
1446 
1447     pci_register_bar(&vdev->pdev, nr, type, bar->region.mem);
1448 }
1449 
1450 static void vfio_bars_setup(VFIOPCIDevice *vdev)
1451 {
1452     int i;
1453 
1454     for (i = 0; i < PCI_ROM_SLOT; i++) {
1455         vfio_bar_setup(vdev, i);
1456     }
1457 }
1458 
1459 static void vfio_bars_exit(VFIOPCIDevice *vdev)
1460 {
1461     int i;
1462 
1463     for (i = 0; i < PCI_ROM_SLOT; i++) {
1464         vfio_bar_quirk_exit(vdev, i);
1465         vfio_region_exit(&vdev->bars[i].region);
1466     }
1467 
1468     if (vdev->vga) {
1469         pci_unregister_vga(&vdev->pdev);
1470         vfio_vga_quirk_exit(vdev);
1471     }
1472 }
1473 
1474 static void vfio_bars_finalize(VFIOPCIDevice *vdev)
1475 {
1476     int i;
1477 
1478     for (i = 0; i < PCI_ROM_SLOT; i++) {
1479         vfio_bar_quirk_finalize(vdev, i);
1480         vfio_region_finalize(&vdev->bars[i].region);
1481     }
1482 
1483     if (vdev->vga) {
1484         vfio_vga_quirk_finalize(vdev);
1485         for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) {
1486             object_unparent(OBJECT(&vdev->vga->region[i].mem));
1487         }
1488         g_free(vdev->vga);
1489     }
1490 }
1491 
1492 /*
1493  * General setup
1494  */
1495 static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
1496 {
1497     uint8_t tmp;
1498     uint16_t next = PCI_CONFIG_SPACE_SIZE;
1499 
1500     for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp;
1501          tmp = pdev->config[tmp + PCI_CAP_LIST_NEXT]) {
1502         if (tmp > pos && tmp < next) {
1503             next = tmp;
1504         }
1505     }
1506 
1507     return next - pos;
1508 }
1509 
1510 
1511 static uint16_t vfio_ext_cap_max_size(const uint8_t *config, uint16_t pos)
1512 {
1513     uint16_t tmp, next = PCIE_CONFIG_SPACE_SIZE;
1514 
1515     for (tmp = PCI_CONFIG_SPACE_SIZE; tmp;
1516         tmp = PCI_EXT_CAP_NEXT(pci_get_long(config + tmp))) {
1517         if (tmp > pos && tmp < next) {
1518             next = tmp;
1519         }
1520     }
1521 
1522     return next - pos;
1523 }
1524 
1525 static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask)
1526 {
1527     pci_set_word(buf, (pci_get_word(buf) & ~mask) | val);
1528 }
1529 
1530 static void vfio_add_emulated_word(VFIOPCIDevice *vdev, int pos,
1531                                    uint16_t val, uint16_t mask)
1532 {
1533     vfio_set_word_bits(vdev->pdev.config + pos, val, mask);
1534     vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask);
1535     vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask);
1536 }
1537 
1538 static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask)
1539 {
1540     pci_set_long(buf, (pci_get_long(buf) & ~mask) | val);
1541 }
1542 
1543 static void vfio_add_emulated_long(VFIOPCIDevice *vdev, int pos,
1544                                    uint32_t val, uint32_t mask)
1545 {
1546     vfio_set_long_bits(vdev->pdev.config + pos, val, mask);
1547     vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask);
1548     vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask);
1549 }
1550 
1551 static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size)
1552 {
1553     uint16_t flags;
1554     uint8_t type;
1555 
1556     flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS);
1557     type = (flags & PCI_EXP_FLAGS_TYPE) >> 4;
1558 
1559     if (type != PCI_EXP_TYPE_ENDPOINT &&
1560         type != PCI_EXP_TYPE_LEG_END &&
1561         type != PCI_EXP_TYPE_RC_END) {
1562 
1563         error_report("vfio: Assignment of PCIe type 0x%x "
1564                      "devices is not currently supported", type);
1565         return -EINVAL;
1566     }
1567 
1568     if (!pci_bus_is_express(vdev->pdev.bus)) {
1569         PCIBus *bus = vdev->pdev.bus;
1570         PCIDevice *bridge;
1571 
1572         /*
1573          * Traditionally PCI device assignment exposes the PCIe capability
1574          * as-is on non-express buses.  The reason being that some drivers
1575          * simply assume that it's there, for example tg3.  However when
1576          * we're running on a native PCIe machine type, like Q35, we need
1577          * to hide the PCIe capability.  The reason for this is twofold;
1578          * first Windows guests get a Code 10 error when the PCIe capability
1579          * is exposed in this configuration.  Therefore express devices won't
1580          * work at all unless they're attached to express buses in the VM.
1581          * Second, a native PCIe machine introduces the possibility of fine
1582          * granularity IOMMUs supporting both translation and isolation.
1583          * Guest code to discover the IOMMU visibility of a device, such as
1584          * IOMMU grouping code on Linux, is very aware of device types and
1585          * valid transitions between bus types.  An express device on a non-
1586          * express bus is not a valid combination on bare metal systems.
1587          *
1588          * Drivers that require a PCIe capability to make the device
1589          * functional are simply going to need to have their devices placed
1590          * on a PCIe bus in the VM.
1591          */
1592         while (!pci_bus_is_root(bus)) {
1593             bridge = pci_bridge_get_device(bus);
1594             bus = bridge->bus;
1595         }
1596 
1597         if (pci_bus_is_express(bus)) {
1598             return 0;
1599         }
1600 
1601     } else if (pci_bus_is_root(vdev->pdev.bus)) {
1602         /*
1603          * On a Root Complex bus Endpoints become Root Complex Integrated
1604          * Endpoints, which changes the type and clears the LNK & LNK2 fields.
1605          */
1606         if (type == PCI_EXP_TYPE_ENDPOINT) {
1607             vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1608                                    PCI_EXP_TYPE_RC_END << 4,
1609                                    PCI_EXP_FLAGS_TYPE);
1610 
1611             /* Link Capabilities, Status, and Control goes away */
1612             if (size > PCI_EXP_LNKCTL) {
1613                 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0);
1614                 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
1615                 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0);
1616 
1617 #ifndef PCI_EXP_LNKCAP2
1618 #define PCI_EXP_LNKCAP2 44
1619 #endif
1620 #ifndef PCI_EXP_LNKSTA2
1621 #define PCI_EXP_LNKSTA2 50
1622 #endif
1623                 /* Link 2 Capabilities, Status, and Control goes away */
1624                 if (size > PCI_EXP_LNKCAP2) {
1625                     vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0);
1626                     vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0);
1627                     vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0);
1628                 }
1629             }
1630 
1631         } else if (type == PCI_EXP_TYPE_LEG_END) {
1632             /*
1633              * Legacy endpoints don't belong on the root complex.  Windows
1634              * seems to be happier with devices if we skip the capability.
1635              */
1636             return 0;
1637         }
1638 
1639     } else {
1640         /*
1641          * Convert Root Complex Integrated Endpoints to regular endpoints.
1642          * These devices don't support LNK/LNK2 capabilities, so make them up.
1643          */
1644         if (type == PCI_EXP_TYPE_RC_END) {
1645             vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1646                                    PCI_EXP_TYPE_ENDPOINT << 4,
1647                                    PCI_EXP_FLAGS_TYPE);
1648             vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP,
1649                                    PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25, ~0);
1650             vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
1651         }
1652 
1653         /* Mark the Link Status bits as emulated to allow virtual negotiation */
1654         vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA,
1655                                pci_get_word(vdev->pdev.config + pos +
1656                                             PCI_EXP_LNKSTA),
1657                                PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS);
1658     }
1659 
1660     pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size);
1661     if (pos >= 0) {
1662         vdev->pdev.exp.exp_cap = pos;
1663     }
1664 
1665     return pos;
1666 }
1667 
1668 static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos)
1669 {
1670     uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP);
1671 
1672     if (cap & PCI_EXP_DEVCAP_FLR) {
1673         trace_vfio_check_pcie_flr(vdev->vbasedev.name);
1674         vdev->has_flr = true;
1675     }
1676 }
1677 
1678 static void vfio_check_pm_reset(VFIOPCIDevice *vdev, uint8_t pos)
1679 {
1680     uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL);
1681 
1682     if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) {
1683         trace_vfio_check_pm_reset(vdev->vbasedev.name);
1684         vdev->has_pm_reset = true;
1685     }
1686 }
1687 
1688 static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos)
1689 {
1690     uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP);
1691 
1692     if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) {
1693         trace_vfio_check_af_flr(vdev->vbasedev.name);
1694         vdev->has_flr = true;
1695     }
1696 }
1697 
1698 static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos)
1699 {
1700     PCIDevice *pdev = &vdev->pdev;
1701     uint8_t cap_id, next, size;
1702     int ret;
1703 
1704     cap_id = pdev->config[pos];
1705     next = pdev->config[pos + PCI_CAP_LIST_NEXT];
1706 
1707     /*
1708      * If it becomes important to configure capabilities to their actual
1709      * size, use this as the default when it's something we don't recognize.
1710      * Since QEMU doesn't actually handle many of the config accesses,
1711      * exact size doesn't seem worthwhile.
1712      */
1713     size = vfio_std_cap_max_size(pdev, pos);
1714 
1715     /*
1716      * pci_add_capability always inserts the new capability at the head
1717      * of the chain.  Therefore to end up with a chain that matches the
1718      * physical device, we insert from the end by making this recursive.
1719      * This is also why we pre-calculate size above as cached config space
1720      * will be changed as we unwind the stack.
1721      */
1722     if (next) {
1723         ret = vfio_add_std_cap(vdev, next);
1724         if (ret) {
1725             return ret;
1726         }
1727     } else {
1728         /* Begin the rebuild, use QEMU emulated list bits */
1729         pdev->config[PCI_CAPABILITY_LIST] = 0;
1730         vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff;
1731         vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
1732     }
1733 
1734     /* Use emulated next pointer to allow dropping caps */
1735     pci_set_byte(vdev->emulated_config_bits + pos + PCI_CAP_LIST_NEXT, 0xff);
1736 
1737     switch (cap_id) {
1738     case PCI_CAP_ID_MSI:
1739         ret = vfio_msi_setup(vdev, pos);
1740         break;
1741     case PCI_CAP_ID_EXP:
1742         vfio_check_pcie_flr(vdev, pos);
1743         ret = vfio_setup_pcie_cap(vdev, pos, size);
1744         break;
1745     case PCI_CAP_ID_MSIX:
1746         ret = vfio_msix_setup(vdev, pos);
1747         break;
1748     case PCI_CAP_ID_PM:
1749         vfio_check_pm_reset(vdev, pos);
1750         vdev->pm_cap = pos;
1751         ret = pci_add_capability(pdev, cap_id, pos, size);
1752         break;
1753     case PCI_CAP_ID_AF:
1754         vfio_check_af_flr(vdev, pos);
1755         ret = pci_add_capability(pdev, cap_id, pos, size);
1756         break;
1757     default:
1758         ret = pci_add_capability(pdev, cap_id, pos, size);
1759         break;
1760     }
1761 
1762     if (ret < 0) {
1763         error_report("vfio: %s Error adding PCI capability "
1764                      "0x%x[0x%x]@0x%x: %d", vdev->vbasedev.name,
1765                      cap_id, size, pos, ret);
1766         return ret;
1767     }
1768 
1769     return 0;
1770 }
1771 
1772 static int vfio_add_ext_cap(VFIOPCIDevice *vdev)
1773 {
1774     PCIDevice *pdev = &vdev->pdev;
1775     uint32_t header;
1776     uint16_t cap_id, next, size;
1777     uint8_t cap_ver;
1778     uint8_t *config;
1779 
1780     /* Only add extended caps if we have them and the guest can see them */
1781     if (!pci_is_express(pdev) || !pci_bus_is_express(pdev->bus) ||
1782         !pci_get_long(pdev->config + PCI_CONFIG_SPACE_SIZE)) {
1783         return 0;
1784     }
1785 
1786     /*
1787      * pcie_add_capability always inserts the new capability at the tail
1788      * of the chain.  Therefore to end up with a chain that matches the
1789      * physical device, we cache the config space to avoid overwriting
1790      * the original config space when we parse the extended capabilities.
1791      */
1792     config = g_memdup(pdev->config, vdev->config_size);
1793 
1794     /*
1795      * Extended capabilities are chained with each pointing to the next, so we
1796      * can drop anything other than the head of the chain simply by modifying
1797      * the previous next pointer.  For the head of the chain, we can modify the
1798      * capability ID to something that cannot match a valid capability.  ID
1799      * 0 is reserved for this since absence of capabilities is indicated by
1800      * 0 for the ID, version, AND next pointer.  However, pcie_add_capability()
1801      * uses ID 0 as reserved for list management and will incorrectly match and
1802      * assert if we attempt to pre-load the head of the chain with with this
1803      * ID.  Use ID 0xFFFF temporarily since it is also seems to be reserved in
1804      * part for identifying absence of capabilities in a root complex register
1805      * block.  If the ID still exists after adding capabilities, switch back to
1806      * zero.  We'll mark this entire first dword as emulated for this purpose.
1807      */
1808     pci_set_long(pdev->config + PCI_CONFIG_SPACE_SIZE,
1809                  PCI_EXT_CAP(0xFFFF, 0, 0));
1810     pci_set_long(pdev->wmask + PCI_CONFIG_SPACE_SIZE, 0);
1811     pci_set_long(vdev->emulated_config_bits + PCI_CONFIG_SPACE_SIZE, ~0);
1812 
1813     for (next = PCI_CONFIG_SPACE_SIZE; next;
1814          next = PCI_EXT_CAP_NEXT(pci_get_long(config + next))) {
1815         header = pci_get_long(config + next);
1816         cap_id = PCI_EXT_CAP_ID(header);
1817         cap_ver = PCI_EXT_CAP_VER(header);
1818 
1819         /*
1820          * If it becomes important to configure extended capabilities to their
1821          * actual size, use this as the default when it's something we don't
1822          * recognize. Since QEMU doesn't actually handle many of the config
1823          * accesses, exact size doesn't seem worthwhile.
1824          */
1825         size = vfio_ext_cap_max_size(config, next);
1826 
1827         /* Use emulated next pointer to allow dropping extended caps */
1828         pci_long_test_and_set_mask(vdev->emulated_config_bits + next,
1829                                    PCI_EXT_CAP_NEXT_MASK);
1830 
1831         switch (cap_id) {
1832         case PCI_EXT_CAP_ID_SRIOV: /* Read-only VF BARs confuse OVMF */
1833         case PCI_EXT_CAP_ID_ARI: /* XXX Needs next function virtualization */
1834             trace_vfio_add_ext_cap_dropped(vdev->vbasedev.name, cap_id, next);
1835             break;
1836         default:
1837             pcie_add_capability(pdev, cap_id, cap_ver, next, size);
1838         }
1839 
1840     }
1841 
1842     /* Cleanup chain head ID if necessary */
1843     if (pci_get_word(pdev->config + PCI_CONFIG_SPACE_SIZE) == 0xFFFF) {
1844         pci_set_word(pdev->config + PCI_CONFIG_SPACE_SIZE, 0);
1845     }
1846 
1847     g_free(config);
1848     return 0;
1849 }
1850 
1851 static int vfio_add_capabilities(VFIOPCIDevice *vdev)
1852 {
1853     PCIDevice *pdev = &vdev->pdev;
1854     int ret;
1855 
1856     if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
1857         !pdev->config[PCI_CAPABILITY_LIST]) {
1858         return 0; /* Nothing to add */
1859     }
1860 
1861     ret = vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST]);
1862     if (ret) {
1863         return ret;
1864     }
1865 
1866     return vfio_add_ext_cap(vdev);
1867 }
1868 
1869 static void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
1870 {
1871     PCIDevice *pdev = &vdev->pdev;
1872     uint16_t cmd;
1873 
1874     vfio_disable_interrupts(vdev);
1875 
1876     /* Make sure the device is in D0 */
1877     if (vdev->pm_cap) {
1878         uint16_t pmcsr;
1879         uint8_t state;
1880 
1881         pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
1882         state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1883         if (state) {
1884             pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1885             vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2);
1886             /* vfio handles the necessary delay here */
1887             pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
1888             state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1889             if (state) {
1890                 error_report("vfio: Unable to power on device, stuck in D%d",
1891                              state);
1892             }
1893         }
1894     }
1895 
1896     /*
1897      * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
1898      * Also put INTx Disable in known state.
1899      */
1900     cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
1901     cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
1902              PCI_COMMAND_INTX_DISABLE);
1903     vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
1904 }
1905 
1906 static void vfio_pci_post_reset(VFIOPCIDevice *vdev)
1907 {
1908     vfio_intx_enable(vdev);
1909 }
1910 
1911 static bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name)
1912 {
1913     char tmp[13];
1914 
1915     sprintf(tmp, "%04x:%02x:%02x.%1x", addr->domain,
1916             addr->bus, addr->slot, addr->function);
1917 
1918     return (strcmp(tmp, name) == 0);
1919 }
1920 
1921 static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single)
1922 {
1923     VFIOGroup *group;
1924     struct vfio_pci_hot_reset_info *info;
1925     struct vfio_pci_dependent_device *devices;
1926     struct vfio_pci_hot_reset *reset;
1927     int32_t *fds;
1928     int ret, i, count;
1929     bool multi = false;
1930 
1931     trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
1932 
1933     vfio_pci_pre_reset(vdev);
1934     vdev->vbasedev.needs_reset = false;
1935 
1936     info = g_malloc0(sizeof(*info));
1937     info->argsz = sizeof(*info);
1938 
1939     ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
1940     if (ret && errno != ENOSPC) {
1941         ret = -errno;
1942         if (!vdev->has_pm_reset) {
1943             error_report("vfio: Cannot reset device %s, "
1944                          "no available reset mechanism.", vdev->vbasedev.name);
1945         }
1946         goto out_single;
1947     }
1948 
1949     count = info->count;
1950     info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices)));
1951     info->argsz = sizeof(*info) + (count * sizeof(*devices));
1952     devices = &info->devices[0];
1953 
1954     ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
1955     if (ret) {
1956         ret = -errno;
1957         error_report("vfio: hot reset info failed: %m");
1958         goto out_single;
1959     }
1960 
1961     trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name);
1962 
1963     /* Verify that we have all the groups required */
1964     for (i = 0; i < info->count; i++) {
1965         PCIHostDeviceAddress host;
1966         VFIOPCIDevice *tmp;
1967         VFIODevice *vbasedev_iter;
1968 
1969         host.domain = devices[i].segment;
1970         host.bus = devices[i].bus;
1971         host.slot = PCI_SLOT(devices[i].devfn);
1972         host.function = PCI_FUNC(devices[i].devfn);
1973 
1974         trace_vfio_pci_hot_reset_dep_devices(host.domain,
1975                 host.bus, host.slot, host.function, devices[i].group_id);
1976 
1977         if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
1978             continue;
1979         }
1980 
1981         QLIST_FOREACH(group, &vfio_group_list, next) {
1982             if (group->groupid == devices[i].group_id) {
1983                 break;
1984             }
1985         }
1986 
1987         if (!group) {
1988             if (!vdev->has_pm_reset) {
1989                 error_report("vfio: Cannot reset device %s, "
1990                              "depends on group %d which is not owned.",
1991                              vdev->vbasedev.name, devices[i].group_id);
1992             }
1993             ret = -EPERM;
1994             goto out;
1995         }
1996 
1997         /* Prep dependent devices for reset and clear our marker. */
1998         QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
1999             if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
2000                 continue;
2001             }
2002             tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
2003             if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
2004                 if (single) {
2005                     ret = -EINVAL;
2006                     goto out_single;
2007                 }
2008                 vfio_pci_pre_reset(tmp);
2009                 tmp->vbasedev.needs_reset = false;
2010                 multi = true;
2011                 break;
2012             }
2013         }
2014     }
2015 
2016     if (!single && !multi) {
2017         ret = -EINVAL;
2018         goto out_single;
2019     }
2020 
2021     /* Determine how many group fds need to be passed */
2022     count = 0;
2023     QLIST_FOREACH(group, &vfio_group_list, next) {
2024         for (i = 0; i < info->count; i++) {
2025             if (group->groupid == devices[i].group_id) {
2026                 count++;
2027                 break;
2028             }
2029         }
2030     }
2031 
2032     reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds)));
2033     reset->argsz = sizeof(*reset) + (count * sizeof(*fds));
2034     fds = &reset->group_fds[0];
2035 
2036     /* Fill in group fds */
2037     QLIST_FOREACH(group, &vfio_group_list, next) {
2038         for (i = 0; i < info->count; i++) {
2039             if (group->groupid == devices[i].group_id) {
2040                 fds[reset->count++] = group->fd;
2041                 break;
2042             }
2043         }
2044     }
2045 
2046     /* Bus reset! */
2047     ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
2048     g_free(reset);
2049 
2050     trace_vfio_pci_hot_reset_result(vdev->vbasedev.name,
2051                                     ret ? "%m" : "Success");
2052 
2053 out:
2054     /* Re-enable INTx on affected devices */
2055     for (i = 0; i < info->count; i++) {
2056         PCIHostDeviceAddress host;
2057         VFIOPCIDevice *tmp;
2058         VFIODevice *vbasedev_iter;
2059 
2060         host.domain = devices[i].segment;
2061         host.bus = devices[i].bus;
2062         host.slot = PCI_SLOT(devices[i].devfn);
2063         host.function = PCI_FUNC(devices[i].devfn);
2064 
2065         if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
2066             continue;
2067         }
2068 
2069         QLIST_FOREACH(group, &vfio_group_list, next) {
2070             if (group->groupid == devices[i].group_id) {
2071                 break;
2072             }
2073         }
2074 
2075         if (!group) {
2076             break;
2077         }
2078 
2079         QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
2080             if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
2081                 continue;
2082             }
2083             tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
2084             if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
2085                 vfio_pci_post_reset(tmp);
2086                 break;
2087             }
2088         }
2089     }
2090 out_single:
2091     vfio_pci_post_reset(vdev);
2092     g_free(info);
2093 
2094     return ret;
2095 }
2096 
2097 /*
2098  * We want to differentiate hot reset of mulitple in-use devices vs hot reset
2099  * of a single in-use device.  VFIO_DEVICE_RESET will already handle the case
2100  * of doing hot resets when there is only a single device per bus.  The in-use
2101  * here refers to how many VFIODevices are affected.  A hot reset that affects
2102  * multiple devices, but only a single in-use device, means that we can call
2103  * it from our bus ->reset() callback since the extent is effectively a single
2104  * device.  This allows us to make use of it in the hotplug path.  When there
2105  * are multiple in-use devices, we can only trigger the hot reset during a
2106  * system reset and thus from our reset handler.  We separate _one vs _multi
2107  * here so that we don't overlap and do a double reset on the system reset
2108  * path where both our reset handler and ->reset() callback are used.  Calling
2109  * _one() will only do a hot reset for the one in-use devices case, calling
2110  * _multi() will do nothing if a _one() would have been sufficient.
2111  */
2112 static int vfio_pci_hot_reset_one(VFIOPCIDevice *vdev)
2113 {
2114     return vfio_pci_hot_reset(vdev, true);
2115 }
2116 
2117 static int vfio_pci_hot_reset_multi(VFIODevice *vbasedev)
2118 {
2119     VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2120     return vfio_pci_hot_reset(vdev, false);
2121 }
2122 
2123 static void vfio_pci_compute_needs_reset(VFIODevice *vbasedev)
2124 {
2125     VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2126     if (!vbasedev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) {
2127         vbasedev->needs_reset = true;
2128     }
2129 }
2130 
2131 static VFIODeviceOps vfio_pci_ops = {
2132     .vfio_compute_needs_reset = vfio_pci_compute_needs_reset,
2133     .vfio_hot_reset_multi = vfio_pci_hot_reset_multi,
2134     .vfio_eoi = vfio_intx_eoi,
2135 };
2136 
2137 int vfio_populate_vga(VFIOPCIDevice *vdev)
2138 {
2139     VFIODevice *vbasedev = &vdev->vbasedev;
2140     struct vfio_region_info *reg_info;
2141     int ret;
2142 
2143     ret = vfio_get_region_info(vbasedev, VFIO_PCI_VGA_REGION_INDEX, &reg_info);
2144     if (ret) {
2145         return ret;
2146     }
2147 
2148     if (!(reg_info->flags & VFIO_REGION_INFO_FLAG_READ) ||
2149         !(reg_info->flags & VFIO_REGION_INFO_FLAG_WRITE) ||
2150         reg_info->size < 0xbffff + 1) {
2151         error_report("vfio: Unexpected VGA info, flags 0x%lx, size 0x%lx",
2152                      (unsigned long)reg_info->flags,
2153                      (unsigned long)reg_info->size);
2154         g_free(reg_info);
2155         return -EINVAL;
2156     }
2157 
2158     vdev->vga = g_new0(VFIOVGA, 1);
2159 
2160     vdev->vga->fd_offset = reg_info->offset;
2161     vdev->vga->fd = vdev->vbasedev.fd;
2162 
2163     g_free(reg_info);
2164 
2165     vdev->vga->region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE;
2166     vdev->vga->region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM;
2167     QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_MEM].quirks);
2168 
2169     memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
2170                           OBJECT(vdev), &vfio_vga_ops,
2171                           &vdev->vga->region[QEMU_PCI_VGA_MEM],
2172                           "vfio-vga-mmio@0xa0000",
2173                           QEMU_PCI_VGA_MEM_SIZE);
2174 
2175     vdev->vga->region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE;
2176     vdev->vga->region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO;
2177     QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].quirks);
2178 
2179     memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
2180                           OBJECT(vdev), &vfio_vga_ops,
2181                           &vdev->vga->region[QEMU_PCI_VGA_IO_LO],
2182                           "vfio-vga-io@0x3b0",
2183                           QEMU_PCI_VGA_IO_LO_SIZE);
2184 
2185     vdev->vga->region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE;
2186     vdev->vga->region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI;
2187     QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks);
2188 
2189     memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem,
2190                           OBJECT(vdev), &vfio_vga_ops,
2191                           &vdev->vga->region[QEMU_PCI_VGA_IO_HI],
2192                           "vfio-vga-io@0x3c0",
2193                           QEMU_PCI_VGA_IO_HI_SIZE);
2194 
2195     pci_register_vga(&vdev->pdev, &vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
2196                      &vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
2197                      &vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem);
2198 
2199     return 0;
2200 }
2201 
2202 static int vfio_populate_device(VFIOPCIDevice *vdev)
2203 {
2204     VFIODevice *vbasedev = &vdev->vbasedev;
2205     struct vfio_region_info *reg_info;
2206     struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
2207     int i, ret = -1;
2208 
2209     /* Sanity check device */
2210     if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PCI)) {
2211         error_report("vfio: Um, this isn't a PCI device");
2212         goto error;
2213     }
2214 
2215     if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) {
2216         error_report("vfio: unexpected number of io regions %u",
2217                      vbasedev->num_regions);
2218         goto error;
2219     }
2220 
2221     if (vbasedev->num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) {
2222         error_report("vfio: unexpected number of irqs %u", vbasedev->num_irqs);
2223         goto error;
2224     }
2225 
2226     for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
2227         char *name = g_strdup_printf("%s BAR %d", vbasedev->name, i);
2228 
2229         ret = vfio_region_setup(OBJECT(vdev), vbasedev,
2230                                 &vdev->bars[i].region, i, name);
2231         g_free(name);
2232 
2233         if (ret) {
2234             error_report("vfio: Error getting region %d info: %m", i);
2235             goto error;
2236         }
2237 
2238         QLIST_INIT(&vdev->bars[i].quirks);
2239     }
2240 
2241     ret = vfio_get_region_info(vbasedev,
2242                                VFIO_PCI_CONFIG_REGION_INDEX, &reg_info);
2243     if (ret) {
2244         error_report("vfio: Error getting config info: %m");
2245         goto error;
2246     }
2247 
2248     trace_vfio_populate_device_config(vdev->vbasedev.name,
2249                                       (unsigned long)reg_info->size,
2250                                       (unsigned long)reg_info->offset,
2251                                       (unsigned long)reg_info->flags);
2252 
2253     vdev->config_size = reg_info->size;
2254     if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) {
2255         vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS;
2256     }
2257     vdev->config_offset = reg_info->offset;
2258 
2259     g_free(reg_info);
2260 
2261     if (vdev->features & VFIO_FEATURE_ENABLE_VGA) {
2262         ret = vfio_populate_vga(vdev);
2263         if (ret) {
2264             error_report(
2265                 "vfio: Device does not support requested feature x-vga");
2266             goto error;
2267         }
2268     }
2269 
2270     irq_info.index = VFIO_PCI_ERR_IRQ_INDEX;
2271 
2272     ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
2273     if (ret) {
2274         /* This can fail for an old kernel or legacy PCI dev */
2275         trace_vfio_populate_device_get_irq_info_failure();
2276         ret = 0;
2277     } else if (irq_info.count == 1) {
2278         vdev->pci_aer = true;
2279     } else {
2280         error_report("vfio: %s "
2281                      "Could not enable error recovery for the device",
2282                      vbasedev->name);
2283     }
2284 
2285 error:
2286     return ret;
2287 }
2288 
2289 static void vfio_put_device(VFIOPCIDevice *vdev)
2290 {
2291     g_free(vdev->vbasedev.name);
2292     g_free(vdev->msix);
2293 
2294     vfio_put_base_device(&vdev->vbasedev);
2295 }
2296 
2297 static void vfio_err_notifier_handler(void *opaque)
2298 {
2299     VFIOPCIDevice *vdev = opaque;
2300 
2301     if (!event_notifier_test_and_clear(&vdev->err_notifier)) {
2302         return;
2303     }
2304 
2305     /*
2306      * TBD. Retrieve the error details and decide what action
2307      * needs to be taken. One of the actions could be to pass
2308      * the error to the guest and have the guest driver recover
2309      * from the error. This requires that PCIe capabilities be
2310      * exposed to the guest. For now, we just terminate the
2311      * guest to contain the error.
2312      */
2313 
2314     error_report("%s(%s) Unrecoverable error detected. Please collect any data possible and then kill the guest", __func__, vdev->vbasedev.name);
2315 
2316     vm_stop(RUN_STATE_INTERNAL_ERROR);
2317 }
2318 
2319 /*
2320  * Registers error notifier for devices supporting error recovery.
2321  * If we encounter a failure in this function, we report an error
2322  * and continue after disabling error recovery support for the
2323  * device.
2324  */
2325 static void vfio_register_err_notifier(VFIOPCIDevice *vdev)
2326 {
2327     int ret;
2328     int argsz;
2329     struct vfio_irq_set *irq_set;
2330     int32_t *pfd;
2331 
2332     if (!vdev->pci_aer) {
2333         return;
2334     }
2335 
2336     if (event_notifier_init(&vdev->err_notifier, 0)) {
2337         error_report("vfio: Unable to init event notifier for error detection");
2338         vdev->pci_aer = false;
2339         return;
2340     }
2341 
2342     argsz = sizeof(*irq_set) + sizeof(*pfd);
2343 
2344     irq_set = g_malloc0(argsz);
2345     irq_set->argsz = argsz;
2346     irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2347                      VFIO_IRQ_SET_ACTION_TRIGGER;
2348     irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
2349     irq_set->start = 0;
2350     irq_set->count = 1;
2351     pfd = (int32_t *)&irq_set->data;
2352 
2353     *pfd = event_notifier_get_fd(&vdev->err_notifier);
2354     qemu_set_fd_handler(*pfd, vfio_err_notifier_handler, NULL, vdev);
2355 
2356     ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
2357     if (ret) {
2358         error_report("vfio: Failed to set up error notification");
2359         qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
2360         event_notifier_cleanup(&vdev->err_notifier);
2361         vdev->pci_aer = false;
2362     }
2363     g_free(irq_set);
2364 }
2365 
2366 static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev)
2367 {
2368     int argsz;
2369     struct vfio_irq_set *irq_set;
2370     int32_t *pfd;
2371     int ret;
2372 
2373     if (!vdev->pci_aer) {
2374         return;
2375     }
2376 
2377     argsz = sizeof(*irq_set) + sizeof(*pfd);
2378 
2379     irq_set = g_malloc0(argsz);
2380     irq_set->argsz = argsz;
2381     irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2382                      VFIO_IRQ_SET_ACTION_TRIGGER;
2383     irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
2384     irq_set->start = 0;
2385     irq_set->count = 1;
2386     pfd = (int32_t *)&irq_set->data;
2387     *pfd = -1;
2388 
2389     ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
2390     if (ret) {
2391         error_report("vfio: Failed to de-assign error fd: %m");
2392     }
2393     g_free(irq_set);
2394     qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier),
2395                         NULL, NULL, vdev);
2396     event_notifier_cleanup(&vdev->err_notifier);
2397 }
2398 
2399 static void vfio_req_notifier_handler(void *opaque)
2400 {
2401     VFIOPCIDevice *vdev = opaque;
2402 
2403     if (!event_notifier_test_and_clear(&vdev->req_notifier)) {
2404         return;
2405     }
2406 
2407     qdev_unplug(&vdev->pdev.qdev, NULL);
2408 }
2409 
2410 static void vfio_register_req_notifier(VFIOPCIDevice *vdev)
2411 {
2412     struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info),
2413                                       .index = VFIO_PCI_REQ_IRQ_INDEX };
2414     int argsz;
2415     struct vfio_irq_set *irq_set;
2416     int32_t *pfd;
2417 
2418     if (!(vdev->features & VFIO_FEATURE_ENABLE_REQ)) {
2419         return;
2420     }
2421 
2422     if (ioctl(vdev->vbasedev.fd,
2423               VFIO_DEVICE_GET_IRQ_INFO, &irq_info) < 0 || irq_info.count < 1) {
2424         return;
2425     }
2426 
2427     if (event_notifier_init(&vdev->req_notifier, 0)) {
2428         error_report("vfio: Unable to init event notifier for device request");
2429         return;
2430     }
2431 
2432     argsz = sizeof(*irq_set) + sizeof(*pfd);
2433 
2434     irq_set = g_malloc0(argsz);
2435     irq_set->argsz = argsz;
2436     irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2437                      VFIO_IRQ_SET_ACTION_TRIGGER;
2438     irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
2439     irq_set->start = 0;
2440     irq_set->count = 1;
2441     pfd = (int32_t *)&irq_set->data;
2442 
2443     *pfd = event_notifier_get_fd(&vdev->req_notifier);
2444     qemu_set_fd_handler(*pfd, vfio_req_notifier_handler, NULL, vdev);
2445 
2446     if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
2447         error_report("vfio: Failed to set up device request notification");
2448         qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
2449         event_notifier_cleanup(&vdev->req_notifier);
2450     } else {
2451         vdev->req_enabled = true;
2452     }
2453 
2454     g_free(irq_set);
2455 }
2456 
2457 static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev)
2458 {
2459     int argsz;
2460     struct vfio_irq_set *irq_set;
2461     int32_t *pfd;
2462 
2463     if (!vdev->req_enabled) {
2464         return;
2465     }
2466 
2467     argsz = sizeof(*irq_set) + sizeof(*pfd);
2468 
2469     irq_set = g_malloc0(argsz);
2470     irq_set->argsz = argsz;
2471     irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2472                      VFIO_IRQ_SET_ACTION_TRIGGER;
2473     irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
2474     irq_set->start = 0;
2475     irq_set->count = 1;
2476     pfd = (int32_t *)&irq_set->data;
2477     *pfd = -1;
2478 
2479     if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
2480         error_report("vfio: Failed to de-assign device request fd: %m");
2481     }
2482     g_free(irq_set);
2483     qemu_set_fd_handler(event_notifier_get_fd(&vdev->req_notifier),
2484                         NULL, NULL, vdev);
2485     event_notifier_cleanup(&vdev->req_notifier);
2486 
2487     vdev->req_enabled = false;
2488 }
2489 
2490 static int vfio_initfn(PCIDevice *pdev)
2491 {
2492     VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2493     VFIODevice *vbasedev_iter;
2494     VFIOGroup *group;
2495     char *tmp, group_path[PATH_MAX], *group_name;
2496     ssize_t len;
2497     struct stat st;
2498     int groupid;
2499     int i, ret;
2500 
2501     if (!vdev->vbasedev.sysfsdev) {
2502         vdev->vbasedev.sysfsdev =
2503             g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x",
2504                             vdev->host.domain, vdev->host.bus,
2505                             vdev->host.slot, vdev->host.function);
2506     }
2507 
2508     if (stat(vdev->vbasedev.sysfsdev, &st) < 0) {
2509         error_report("vfio: error: no such host device: %s",
2510                      vdev->vbasedev.sysfsdev);
2511         return -errno;
2512     }
2513 
2514     vdev->vbasedev.name = g_strdup(basename(vdev->vbasedev.sysfsdev));
2515     vdev->vbasedev.ops = &vfio_pci_ops;
2516     vdev->vbasedev.type = VFIO_DEVICE_TYPE_PCI;
2517 
2518     tmp = g_strdup_printf("%s/iommu_group", vdev->vbasedev.sysfsdev);
2519     len = readlink(tmp, group_path, sizeof(group_path));
2520     g_free(tmp);
2521 
2522     if (len <= 0 || len >= sizeof(group_path)) {
2523         error_report("vfio: error no iommu_group for device");
2524         return len < 0 ? -errno : -ENAMETOOLONG;
2525     }
2526 
2527     group_path[len] = 0;
2528 
2529     group_name = basename(group_path);
2530     if (sscanf(group_name, "%d", &groupid) != 1) {
2531         error_report("vfio: error reading %s: %m", group_path);
2532         return -errno;
2533     }
2534 
2535     trace_vfio_initfn(vdev->vbasedev.name, groupid);
2536 
2537     group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev));
2538     if (!group) {
2539         error_report("vfio: failed to get group %d", groupid);
2540         return -ENOENT;
2541     }
2542 
2543     QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
2544         if (strcmp(vbasedev_iter->name, vdev->vbasedev.name) == 0) {
2545             error_report("vfio: error: device %s is already attached",
2546                          vdev->vbasedev.name);
2547             vfio_put_group(group);
2548             return -EBUSY;
2549         }
2550     }
2551 
2552     ret = vfio_get_device(group, vdev->vbasedev.name, &vdev->vbasedev);
2553     if (ret) {
2554         error_report("vfio: failed to get device %s", vdev->vbasedev.name);
2555         vfio_put_group(group);
2556         return ret;
2557     }
2558 
2559     ret = vfio_populate_device(vdev);
2560     if (ret) {
2561         return ret;
2562     }
2563 
2564     /* Get a copy of config space */
2565     ret = pread(vdev->vbasedev.fd, vdev->pdev.config,
2566                 MIN(pci_config_size(&vdev->pdev), vdev->config_size),
2567                 vdev->config_offset);
2568     if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) {
2569         ret = ret < 0 ? -errno : -EFAULT;
2570         error_report("vfio: Failed to read device config space");
2571         return ret;
2572     }
2573 
2574     /* vfio emulates a lot for us, but some bits need extra love */
2575     vdev->emulated_config_bits = g_malloc0(vdev->config_size);
2576 
2577     /* QEMU can choose to expose the ROM or not */
2578     memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4);
2579 
2580     /*
2581      * The PCI spec reserves vendor ID 0xffff as an invalid value.  The
2582      * device ID is managed by the vendor and need only be a 16-bit value.
2583      * Allow any 16-bit value for subsystem so they can be hidden or changed.
2584      */
2585     if (vdev->vendor_id != PCI_ANY_ID) {
2586         if (vdev->vendor_id >= 0xffff) {
2587             error_report("vfio: Invalid PCI vendor ID provided");
2588             return -EINVAL;
2589         }
2590         vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0);
2591         trace_vfio_pci_emulated_vendor_id(vdev->vbasedev.name, vdev->vendor_id);
2592     } else {
2593         vdev->vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
2594     }
2595 
2596     if (vdev->device_id != PCI_ANY_ID) {
2597         if (vdev->device_id > 0xffff) {
2598             error_report("vfio: Invalid PCI device ID provided");
2599             return -EINVAL;
2600         }
2601         vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0);
2602         trace_vfio_pci_emulated_device_id(vdev->vbasedev.name, vdev->device_id);
2603     } else {
2604         vdev->device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
2605     }
2606 
2607     if (vdev->sub_vendor_id != PCI_ANY_ID) {
2608         if (vdev->sub_vendor_id > 0xffff) {
2609             error_report("vfio: Invalid PCI subsystem vendor ID provided");
2610             return -EINVAL;
2611         }
2612         vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_VENDOR_ID,
2613                                vdev->sub_vendor_id, ~0);
2614         trace_vfio_pci_emulated_sub_vendor_id(vdev->vbasedev.name,
2615                                               vdev->sub_vendor_id);
2616     }
2617 
2618     if (vdev->sub_device_id != PCI_ANY_ID) {
2619         if (vdev->sub_device_id > 0xffff) {
2620             error_report("vfio: Invalid PCI subsystem device ID provided");
2621             return -EINVAL;
2622         }
2623         vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0);
2624         trace_vfio_pci_emulated_sub_device_id(vdev->vbasedev.name,
2625                                               vdev->sub_device_id);
2626     }
2627 
2628     /* QEMU can change multi-function devices to single function, or reverse */
2629     vdev->emulated_config_bits[PCI_HEADER_TYPE] =
2630                                               PCI_HEADER_TYPE_MULTI_FUNCTION;
2631 
2632     /* Restore or clear multifunction, this is always controlled by QEMU */
2633     if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
2634         vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
2635     } else {
2636         vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION;
2637     }
2638 
2639     /*
2640      * Clear host resource mapping info.  If we choose not to register a
2641      * BAR, such as might be the case with the option ROM, we can get
2642      * confusing, unwritable, residual addresses from the host here.
2643      */
2644     memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24);
2645     memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4);
2646 
2647     vfio_pci_size_rom(vdev);
2648 
2649     ret = vfio_msix_early_setup(vdev);
2650     if (ret) {
2651         return ret;
2652     }
2653 
2654     vfio_bars_setup(vdev);
2655 
2656     ret = vfio_add_capabilities(vdev);
2657     if (ret) {
2658         goto out_teardown;
2659     }
2660 
2661     if (vdev->vga) {
2662         vfio_vga_quirk_setup(vdev);
2663     }
2664 
2665     for (i = 0; i < PCI_ROM_SLOT; i++) {
2666         vfio_bar_quirk_setup(vdev, i);
2667     }
2668 
2669     if (!vdev->igd_opregion &&
2670         vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) {
2671         struct vfio_region_info *opregion;
2672 
2673         if (vdev->pdev.qdev.hotplugged) {
2674             error_report("Cannot support IGD OpRegion feature on hotplugged "
2675                          "device %s", vdev->vbasedev.name);
2676             ret = -EINVAL;
2677             goto out_teardown;
2678         }
2679 
2680         ret = vfio_get_dev_region_info(&vdev->vbasedev,
2681                         VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
2682                         VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion);
2683         if (ret) {
2684             error_report("Device %s does not support requested IGD OpRegion "
2685                          "feature", vdev->vbasedev.name);
2686             goto out_teardown;
2687         }
2688 
2689         ret = vfio_pci_igd_opregion_init(vdev, opregion);
2690         g_free(opregion);
2691         if (ret) {
2692             error_report("Device %s IGD OpRegion initialization failed",
2693                          vdev->vbasedev.name);
2694             goto out_teardown;
2695         }
2696     }
2697 
2698     /* QEMU emulates all of MSI & MSIX */
2699     if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
2700         memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
2701                MSIX_CAP_LENGTH);
2702     }
2703 
2704     if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
2705         memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
2706                vdev->msi_cap_size);
2707     }
2708 
2709     if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
2710         vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
2711                                                   vfio_intx_mmap_enable, vdev);
2712         pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_intx_update);
2713         ret = vfio_intx_enable(vdev);
2714         if (ret) {
2715             goto out_teardown;
2716         }
2717     }
2718 
2719     vfio_register_err_notifier(vdev);
2720     vfio_register_req_notifier(vdev);
2721     vfio_setup_resetfn_quirk(vdev);
2722 
2723     return 0;
2724 
2725 out_teardown:
2726     pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
2727     vfio_teardown_msi(vdev);
2728     vfio_bars_exit(vdev);
2729     return ret;
2730 }
2731 
2732 static void vfio_instance_finalize(Object *obj)
2733 {
2734     PCIDevice *pci_dev = PCI_DEVICE(obj);
2735     VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pci_dev);
2736     VFIOGroup *group = vdev->vbasedev.group;
2737 
2738     vfio_bars_finalize(vdev);
2739     g_free(vdev->emulated_config_bits);
2740     g_free(vdev->rom);
2741     /*
2742      * XXX Leaking igd_opregion is not an oversight, we can't remove the
2743      * fw_cfg entry therefore leaking this allocation seems like the safest
2744      * option.
2745      *
2746      * g_free(vdev->igd_opregion);
2747      */
2748     vfio_put_device(vdev);
2749     vfio_put_group(group);
2750 }
2751 
2752 static void vfio_exitfn(PCIDevice *pdev)
2753 {
2754     VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2755 
2756     vfio_unregister_req_notifier(vdev);
2757     vfio_unregister_err_notifier(vdev);
2758     pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
2759     vfio_disable_interrupts(vdev);
2760     if (vdev->intx.mmap_timer) {
2761         timer_free(vdev->intx.mmap_timer);
2762     }
2763     vfio_teardown_msi(vdev);
2764     vfio_bars_exit(vdev);
2765 }
2766 
2767 static void vfio_pci_reset(DeviceState *dev)
2768 {
2769     PCIDevice *pdev = DO_UPCAST(PCIDevice, qdev, dev);
2770     VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2771 
2772     trace_vfio_pci_reset(vdev->vbasedev.name);
2773 
2774     vfio_pci_pre_reset(vdev);
2775 
2776     if (vdev->resetfn && !vdev->resetfn(vdev)) {
2777         goto post_reset;
2778     }
2779 
2780     if (vdev->vbasedev.reset_works &&
2781         (vdev->has_flr || !vdev->has_pm_reset) &&
2782         !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
2783         trace_vfio_pci_reset_flr(vdev->vbasedev.name);
2784         goto post_reset;
2785     }
2786 
2787     /* See if we can do our own bus reset */
2788     if (!vfio_pci_hot_reset_one(vdev)) {
2789         goto post_reset;
2790     }
2791 
2792     /* If nothing else works and the device supports PM reset, use it */
2793     if (vdev->vbasedev.reset_works && vdev->has_pm_reset &&
2794         !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
2795         trace_vfio_pci_reset_pm(vdev->vbasedev.name);
2796         goto post_reset;
2797     }
2798 
2799 post_reset:
2800     vfio_pci_post_reset(vdev);
2801 }
2802 
2803 static void vfio_instance_init(Object *obj)
2804 {
2805     PCIDevice *pci_dev = PCI_DEVICE(obj);
2806     VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, PCI_DEVICE(obj));
2807 
2808     device_add_bootindex_property(obj, &vdev->bootindex,
2809                                   "bootindex", NULL,
2810                                   &pci_dev->qdev, NULL);
2811 }
2812 
2813 static Property vfio_pci_dev_properties[] = {
2814     DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host),
2815     DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice, vbasedev.sysfsdev),
2816     DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice,
2817                        intx.mmap_timeout, 1100),
2818     DEFINE_PROP_BIT("x-vga", VFIOPCIDevice, features,
2819                     VFIO_FEATURE_ENABLE_VGA_BIT, false),
2820     DEFINE_PROP_BIT("x-req", VFIOPCIDevice, features,
2821                     VFIO_FEATURE_ENABLE_REQ_BIT, true),
2822     DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice, features,
2823                     VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, false),
2824     DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false),
2825     DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice, no_kvm_intx, false),
2826     DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice, no_kvm_msi, false),
2827     DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice, no_kvm_msix, false),
2828     DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice, vendor_id, PCI_ANY_ID),
2829     DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice, device_id, PCI_ANY_ID),
2830     DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice,
2831                        sub_vendor_id, PCI_ANY_ID),
2832     DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice,
2833                        sub_device_id, PCI_ANY_ID),
2834     DEFINE_PROP_UINT32("x-igd-gms", VFIOPCIDevice, igd_gms, 0),
2835     /*
2836      * TODO - support passed fds... is this necessary?
2837      * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name),
2838      * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name),
2839      */
2840     DEFINE_PROP_END_OF_LIST(),
2841 };
2842 
2843 static const VMStateDescription vfio_pci_vmstate = {
2844     .name = "vfio-pci",
2845     .unmigratable = 1,
2846 };
2847 
2848 static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
2849 {
2850     DeviceClass *dc = DEVICE_CLASS(klass);
2851     PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);
2852 
2853     dc->reset = vfio_pci_reset;
2854     dc->props = vfio_pci_dev_properties;
2855     dc->vmsd = &vfio_pci_vmstate;
2856     dc->desc = "VFIO-based PCI device assignment";
2857     set_bit(DEVICE_CATEGORY_MISC, dc->categories);
2858     pdc->init = vfio_initfn;
2859     pdc->exit = vfio_exitfn;
2860     pdc->config_read = vfio_pci_read_config;
2861     pdc->config_write = vfio_pci_write_config;
2862     pdc->is_express = 1; /* We might be */
2863 }
2864 
2865 static const TypeInfo vfio_pci_dev_info = {
2866     .name = "vfio-pci",
2867     .parent = TYPE_PCI_DEVICE,
2868     .instance_size = sizeof(VFIOPCIDevice),
2869     .class_init = vfio_pci_dev_class_init,
2870     .instance_init = vfio_instance_init,
2871     .instance_finalize = vfio_instance_finalize,
2872 };
2873 
2874 static void register_vfio_pci_dev_type(void)
2875 {
2876     type_register_static(&vfio_pci_dev_info);
2877 }
2878 
2879 type_init(register_vfio_pci_dev_type)
2880