Lines Matching +full:- +full:- +full:disable +full:- +full:sparse

10  * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
30 #include "hw/qdev-properties.h"
31 #include "hw/qdev-properties-system.h"
32 #include "hw/vfio/vfio-cpr.h"
36 #include "qemu/error-report.h"
37 #include "qemu/main-loop.h"
47 #include "migration/qemu-file.h"
49 #include "vfio-migration-internal.h"
50 #include "vfio-helpers.h"
73 error_setg_errno(errp, -ret, "vfio_notifier_init %s failed", name); in vfio_notifier_init()
92 * waiting until an interrupt to disable mmaps (subsequent transitions
94 * been serviced and the time gap is long enough, we re-enable mmaps for
98 * regular interrupts and see much better latency by staying in non-mmap
101 * other options with the x-intx-mmap-timeout-ms parameter (a value of
108 if (vdev->intx.pending) { in vfio_intx_mmap_enable()
109 timer_mod(vdev->intx.mmap_timer, in vfio_intx_mmap_enable()
110 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout); in vfio_intx_mmap_enable()
121 if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) { in vfio_intx_interrupt()
125 trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin); in vfio_intx_interrupt()
127 vdev->intx.pending = true; in vfio_intx_interrupt()
128 pci_irq_assert(&vdev->pdev); in vfio_intx_interrupt()
130 if (vdev->intx.mmap_timeout) { in vfio_intx_interrupt()
131 timer_mod(vdev->intx.mmap_timer, in vfio_intx_interrupt()
132 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout); in vfio_intx_interrupt()
140 if (!vdev->intx.pending) { in vfio_pci_intx_eoi()
144 trace_vfio_pci_intx_eoi(vbasedev->name); in vfio_pci_intx_eoi()
146 vdev->intx.pending = false; in vfio_pci_intx_eoi()
147 pci_irq_deassert(&vdev->pdev); in vfio_pci_intx_eoi()
154 int irq_fd = event_notifier_get_fd(&vdev->intx.interrupt); in vfio_intx_enable_kvm()
156 if (vdev->no_kvm_intx || !kvm_irqfds_enabled() || in vfio_intx_enable_kvm()
157 vdev->intx.route.mode != PCI_INTX_ENABLED || in vfio_intx_enable_kvm()
164 vfio_device_irq_mask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); in vfio_intx_enable_kvm()
165 vdev->intx.pending = false; in vfio_intx_enable_kvm()
166 pci_irq_deassert(&vdev->pdev); in vfio_intx_enable_kvm()
169 if (!vfio_notifier_init(vdev, &vdev->intx.unmask, "intx-unmask", 0, errp)) { in vfio_intx_enable_kvm()
174 &vdev->intx.interrupt, in vfio_intx_enable_kvm()
175 &vdev->intx.unmask, in vfio_intx_enable_kvm()
176 vdev->intx.route.irq)) { in vfio_intx_enable_kvm()
181 if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0, in vfio_intx_enable_kvm()
183 event_notifier_get_fd(&vdev->intx.unmask), in vfio_intx_enable_kvm()
189 vfio_device_irq_unmask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); in vfio_intx_enable_kvm()
191 vdev->intx.kvm_accel = true; in vfio_intx_enable_kvm()
193 trace_vfio_intx_enable_kvm(vdev->vbasedev.name); in vfio_intx_enable_kvm()
198 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt, in vfio_intx_enable_kvm()
199 vdev->intx.route.irq); in vfio_intx_enable_kvm()
201 vfio_notifier_cleanup(vdev, &vdev->intx.unmask, "intx-unmask", 0); in vfio_intx_enable_kvm()
204 vfio_device_irq_unmask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); in vfio_intx_enable_kvm()
214 if (vdev->no_kvm_intx || !kvm_irqfds_enabled() || in vfio_cpr_intx_enable_kvm()
215 vdev->intx.route.mode != PCI_INTX_ENABLED || in vfio_cpr_intx_enable_kvm()
220 if (!vfio_notifier_init(vdev, &vdev->intx.unmask, "intx-unmask", 0, errp)) { in vfio_cpr_intx_enable_kvm()
225 &vdev->intx.interrupt, in vfio_cpr_intx_enable_kvm()
226 &vdev->intx.unmask, in vfio_cpr_intx_enable_kvm()
227 vdev->intx.route.irq)) { in vfio_cpr_intx_enable_kvm()
229 vfio_notifier_cleanup(vdev, &vdev->intx.unmask, "intx-unmask", 0); in vfio_cpr_intx_enable_kvm()
233 vdev->intx.kvm_accel = true; in vfio_cpr_intx_enable_kvm()
234 trace_vfio_intx_enable_kvm(vdev->vbasedev.name); in vfio_cpr_intx_enable_kvm()
244 if (!vdev->intx.kvm_accel) { in vfio_intx_disable_kvm()
250 * interrupts, QEMU IRQ de-asserted. in vfio_intx_disable_kvm()
252 vfio_device_irq_mask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); in vfio_intx_disable_kvm()
253 vdev->intx.pending = false; in vfio_intx_disable_kvm()
254 pci_irq_deassert(&vdev->pdev); in vfio_intx_disable_kvm()
257 if (kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt, in vfio_intx_disable_kvm()
258 vdev->intx.route.irq)) { in vfio_intx_disable_kvm()
259 error_report("vfio: Error: Failed to disable INTx irqfd: %m"); in vfio_intx_disable_kvm()
263 vfio_notifier_cleanup(vdev, &vdev->intx.unmask, "intx-unmask", 0); in vfio_intx_disable_kvm()
266 qemu_set_fd_handler(event_notifier_get_fd(&vdev->intx.interrupt), in vfio_intx_disable_kvm()
269 vdev->intx.kvm_accel = false; in vfio_intx_disable_kvm()
271 /* If we've missed an event, let it re-fire through QEMU */ in vfio_intx_disable_kvm()
272 vfio_device_irq_unmask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); in vfio_intx_disable_kvm()
274 trace_vfio_intx_disable_kvm(vdev->vbasedev.name); in vfio_intx_disable_kvm()
282 trace_vfio_intx_update(vdev->vbasedev.name, in vfio_intx_update()
283 vdev->intx.route.irq, route->irq); in vfio_intx_update()
287 vdev->intx.route = *route; in vfio_intx_update()
289 if (route->mode != PCI_INTX_ENABLED) { in vfio_intx_update()
294 warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_intx_update()
297 /* Re-enable the interrupt in cased we missed an EOI */ in vfio_intx_update()
298 vfio_pci_intx_eoi(&vdev->vbasedev); in vfio_intx_update()
306 if (vdev->interrupt != VFIO_INT_INTx) { in vfio_intx_routing_notifier()
310 route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin); in vfio_intx_routing_notifier()
312 if (pci_intx_route_changed(&vdev->intx.route, &route)) { in vfio_intx_routing_notifier()
322 vfio_intx_update(vdev, &vdev->intx.route); in vfio_irqchip_change()
327 uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1); in vfio_intx_enable()
344 vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */ in vfio_intx_enable()
345 pci_config_set_interrupt_pin(vdev->pdev.config, pin); in vfio_intx_enable()
353 vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev, in vfio_intx_enable()
354 vdev->intx.pin); in vfio_intx_enable()
358 if (!vfio_notifier_init(vdev, &vdev->intx.interrupt, "intx-interrupt", 0, in vfio_intx_enable()
362 fd = event_notifier_get_fd(&vdev->intx.interrupt); in vfio_intx_enable()
368 warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_intx_enable()
373 if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0, in vfio_intx_enable()
376 vfio_notifier_cleanup(vdev, &vdev->intx.interrupt, "intx-interrupt", 0); in vfio_intx_enable()
381 warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_intx_enable()
385 vdev->interrupt = VFIO_INT_INTx; in vfio_intx_enable()
387 trace_vfio_intx_enable(vdev->vbasedev.name); in vfio_intx_enable()
395 timer_del(vdev->intx.mmap_timer); in vfio_intx_disable()
397 vfio_device_irq_disable(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); in vfio_intx_disable()
398 vdev->intx.pending = false; in vfio_intx_disable()
399 pci_irq_deassert(&vdev->pdev); in vfio_intx_disable()
402 fd = event_notifier_get_fd(&vdev->intx.interrupt); in vfio_intx_disable()
404 vfio_notifier_cleanup(vdev, &vdev->intx.interrupt, "intx-interrupt", 0); in vfio_intx_disable()
406 vdev->interrupt = VFIO_INT_NONE; in vfio_intx_disable()
408 trace_vfio_intx_disable(vdev->vbasedev.name); in vfio_intx_disable()
418 int fd = event_notifier_get_fd(&vdev->intx.interrupt); in vfio_pci_intx_set_handler()
430 VFIOPCIDevice *vdev = vector->vdev; in vfio_msi_interrupt()
434 int nr = vector - vdev->msi_vectors; in vfio_msi_interrupt()
436 if (!event_notifier_test_and_clear(&vector->interrupt)) { in vfio_msi_interrupt()
440 if (vdev->interrupt == VFIO_INT_MSIX) { in vfio_msi_interrupt()
445 if (msix_is_masked(&vdev->pdev, nr)) { in vfio_msi_interrupt()
446 set_bit(nr, vdev->msix->pending); in vfio_msi_interrupt()
447 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, true); in vfio_msi_interrupt()
448 trace_vfio_msix_pba_enable(vdev->vbasedev.name); in vfio_msi_interrupt()
450 } else if (vdev->interrupt == VFIO_INT_MSI) { in vfio_msi_interrupt()
457 msg = get_msg(&vdev->pdev, nr); in vfio_msi_interrupt()
458 trace_vfio_msi_interrupt(vdev->vbasedev.name, nr, msg.address, msg.data); in vfio_msi_interrupt()
459 notify(&vdev->pdev, nr); in vfio_msi_interrupt()
464 VFIOMSIVector *vector = &vdev->msi_vectors[nr]; in vfio_pci_msi_set_handler()
465 int fd = event_notifier_get_fd(&vector->interrupt); in vfio_pci_msi_set_handler()
472 * Get MSI-X enabled, but no vector enabled, by setting vector 0 with an invalid
484 irq_set->argsz = argsz; in vfio_enable_msix_no_vec()
485 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | in vfio_enable_msix_no_vec()
487 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; in vfio_enable_msix_no_vec()
488 irq_set->start = 0; in vfio_enable_msix_no_vec()
489 irq_set->count = 1; in vfio_enable_msix_no_vec()
490 fd = (int32_t *)&irq_set->data; in vfio_enable_msix_no_vec()
491 *fd = -1; in vfio_enable_msix_no_vec()
493 return vdev->vbasedev.io_ops->set_irqs(&vdev->vbasedev, irq_set); in vfio_enable_msix_no_vec()
503 * If dynamic MSI-X allocation is supported, the vectors to be allocated in vfio_enable_vectors()
504 * and enabled can be scattered. Before kernel enabling MSI-X, setting in vfio_enable_vectors()
508 * MSI-X enabled first, then set vectors with a potentially sparse set of in vfio_enable_vectors()
511 if (msix && !vdev->msix->noresize) { in vfio_enable_vectors()
519 argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds)); in vfio_enable_vectors()
522 irq_set->argsz = argsz; in vfio_enable_vectors()
523 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; in vfio_enable_vectors()
524 irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX; in vfio_enable_vectors()
525 irq_set->start = 0; in vfio_enable_vectors()
526 irq_set->count = vdev->nr_vectors; in vfio_enable_vectors()
527 fds = (int32_t *)&irq_set->data; in vfio_enable_vectors()
529 for (i = 0; i < vdev->nr_vectors; i++) { in vfio_enable_vectors()
530 int fd = -1; in vfio_enable_vectors()
533 * MSI vs MSI-X - The guest has direct access to MSI mask and pending in vfio_enable_vectors()
535 * MSI-X mask and pending bits are emulated, so we want to use the in vfio_enable_vectors()
538 if (vdev->msi_vectors[i].use) { in vfio_enable_vectors()
539 if (vdev->msi_vectors[i].virq < 0 || in vfio_enable_vectors()
540 (msix && msix_is_masked(&vdev->pdev, i))) { in vfio_enable_vectors()
541 fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt); in vfio_enable_vectors()
543 fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt); in vfio_enable_vectors()
550 ret = vdev->vbasedev.io_ops->set_irqs(&vdev->vbasedev, irq_set); in vfio_enable_vectors()
560 if ((msix && vdev->no_kvm_msix) || (!msix && vdev->no_kvm_msi)) { in vfio_pci_add_kvm_msi_virq()
564 vector->virq = kvm_irqchip_add_msi_route(&vfio_route_change, in vfio_pci_add_kvm_msi_virq()
565 vector_n, &vdev->pdev); in vfio_pci_add_kvm_msi_virq()
572 if (vector->virq < 0) { in vfio_connect_kvm_msi_virq()
576 if (!vfio_notifier_init(vector->vdev, &vector->kvm_interrupt, name, nr, in vfio_connect_kvm_msi_virq()
581 if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt, in vfio_connect_kvm_msi_virq()
582 NULL, vector->virq) < 0) { in vfio_connect_kvm_msi_virq()
589 vfio_notifier_cleanup(vector->vdev, &vector->kvm_interrupt, name, nr); in vfio_connect_kvm_msi_virq()
591 kvm_irqchip_release_virq(kvm_state, vector->virq); in vfio_connect_kvm_msi_virq()
592 vector->virq = -1; in vfio_connect_kvm_msi_virq()
598 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt, in vfio_remove_kvm_msi_virq()
599 vector->virq); in vfio_remove_kvm_msi_virq()
600 kvm_irqchip_release_virq(kvm_state, vector->virq); in vfio_remove_kvm_msi_virq()
601 vector->virq = -1; in vfio_remove_kvm_msi_virq()
602 vfio_notifier_cleanup(vdev, &vector->kvm_interrupt, "kvm_interrupt", nr); in vfio_remove_kvm_msi_virq()
608 kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg, pdev); in vfio_update_kvm_msi_virq()
618 if (vector->virq >= 0) { in set_irq_signalling()
619 fd = event_notifier_get_fd(&vector->kvm_interrupt); in set_irq_signalling()
621 fd = event_notifier_get_fd(&vector->interrupt); in set_irq_signalling()
627 error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name); in set_irq_signalling()
633 VFIOMSIVector *vector = &vdev->msi_vectors[nr]; in vfio_pci_vector_init()
634 PCIDevice *pdev = &vdev->pdev; in vfio_pci_vector_init()
637 vector->vdev = vdev; in vfio_pci_vector_init()
638 vector->virq = -1; in vfio_pci_vector_init()
639 if (!vfio_notifier_init(vdev, &vector->interrupt, "interrupt", nr, in vfio_pci_vector_init()
643 vector->use = true; in vfio_pci_vector_init()
644 if (vdev->interrupt == VFIO_INT_MSIX) { in vfio_pci_vector_init()
655 bool resizing = !!(vdev->nr_vectors < nr + 1); in vfio_msix_vector_do_use()
657 trace_vfio_msix_vector_do_use(vdev->vbasedev.name, nr); in vfio_msix_vector_do_use()
659 vector = &vdev->msi_vectors[nr]; in vfio_msix_vector_do_use()
661 if (!vector->use) { in vfio_msix_vector_do_use()
665 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), in vfio_msix_vector_do_use()
672 if (vector->virq >= 0) { in vfio_msix_vector_do_use()
680 if (vdev->defer_kvm_irq_routing) { in vfio_msix_vector_do_use()
703 vdev->nr_vectors = nr + 1; in vfio_msix_vector_do_use()
706 if (!vdev->defer_kvm_irq_routing) { in vfio_msix_vector_do_use()
707 if (vdev->msix->noresize && resizing) { in vfio_msix_vector_do_use()
708 vfio_device_irq_disable(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX); in vfio_msix_vector_do_use()
712 strerror(-ret)); in vfio_msix_vector_do_use()
715 set_irq_signalling(&vdev->vbasedev, vector, nr); in vfio_msix_vector_do_use()
719 /* Disable PBA emulation when nothing more is pending. */ in vfio_msix_vector_do_use()
720 clear_bit(nr, vdev->msix->pending); in vfio_msix_vector_do_use()
721 if (find_first_bit(vdev->msix->pending, in vfio_msix_vector_do_use()
722 vdev->nr_vectors) == vdev->nr_vectors) { in vfio_msix_vector_do_use()
723 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false); in vfio_msix_vector_do_use()
724 trace_vfio_msix_pba_disable(vdev->vbasedev.name); in vfio_msix_vector_do_use()
748 VFIOMSIVector *vector = &vdev->msi_vectors[nr]; in vfio_msix_vector_release()
750 trace_vfio_msix_vector_release(vdev->vbasedev.name, nr); in vfio_msix_vector_release()
755 * the KVM setup in place, simply switch VFIO to use the non-bypass in vfio_msix_vector_release()
756 * eventfd. We'll then fire the interrupt through QEMU and the MSI-X in vfio_msix_vector_release()
758 * be re-asserted on unmask. Nothing to do if already using QEMU mode. in vfio_msix_vector_release()
760 if (vector->virq >= 0) { in vfio_msix_vector_release()
761 int32_t fd = event_notifier_get_fd(&vector->interrupt); in vfio_msix_vector_release()
764 if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX, in vfio_msix_vector_release()
767 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_msix_vector_release()
774 msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use, in vfio_pci_msix_set_notifiers()
780 assert(!vdev->defer_kvm_irq_routing); in vfio_pci_prepare_kvm_msi_virq_batch()
781 vdev->defer_kvm_irq_routing = true; in vfio_pci_prepare_kvm_msi_virq_batch()
789 assert(vdev->defer_kvm_irq_routing); in vfio_pci_commit_kvm_msi_virq_batch()
790 vdev->defer_kvm_irq_routing = false; in vfio_pci_commit_kvm_msi_virq_batch()
794 for (i = 0; i < vdev->nr_vectors; i++) { in vfio_pci_commit_kvm_msi_virq_batch()
795 vfio_connect_kvm_msi_virq(&vdev->msi_vectors[i], i); in vfio_pci_commit_kvm_msi_virq_batch()
805 vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->msix->entries); in vfio_msix_enable()
807 vdev->interrupt = VFIO_INT_MSIX; in vfio_msix_enable()
810 * Setting vector notifiers triggers synchronous vector-use in vfio_msix_enable()
817 if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use, in vfio_msix_enable()
824 if (vdev->nr_vectors) { in vfio_msix_enable()
828 strerror(-ret)); in vfio_msix_enable()
833 * physical state of the device and expect that enabling MSI-X from the in vfio_msix_enable()
836 * MSI-X capability, but leaves the vector table masked. We therefore in vfio_msix_enable()
838 * to switch the physical device into MSI-X mode because that may come a in vfio_msix_enable()
840 * invalid fd to make the physical device MSI-X enabled, but with no in vfio_msix_enable()
845 error_report("vfio: failed to enable MSI-X, %s", in vfio_msix_enable()
846 strerror(-ret)); in vfio_msix_enable()
850 trace_vfio_msix_enable(vdev->vbasedev.name); in vfio_msix_enable()
859 vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev); in vfio_msi_enable()
868 vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->nr_vectors); in vfio_msi_enable()
870 for (i = 0; i < vdev->nr_vectors; i++) { in vfio_msi_enable()
871 VFIOMSIVector *vector = &vdev->msi_vectors[i]; in vfio_msi_enable()
874 vector->vdev = vdev; in vfio_msi_enable()
875 vector->virq = -1; in vfio_msi_enable()
876 vector->use = true; in vfio_msi_enable()
878 if (!vfio_notifier_init(vdev, &vector->interrupt, "interrupt", i, in vfio_msi_enable()
883 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), in vfio_msi_enable()
896 vdev->interrupt = VFIO_INT_MSI; in vfio_msi_enable()
902 strerror(-ret)); in vfio_msi_enable()
905 "MSI vectors, retry with %d", vdev->nr_vectors, ret); in vfio_msi_enable()
911 vdev->nr_vectors = ret; in vfio_msi_enable()
925 trace_vfio_msi_enable(vdev->vbasedev.name, vdev->nr_vectors); in vfio_msi_enable()
932 for (i = 0; i < vdev->nr_vectors; i++) { in vfio_msi_disable_common()
933 VFIOMSIVector *vector = &vdev->msi_vectors[i]; in vfio_msi_disable_common()
934 if (vdev->msi_vectors[i].use) { in vfio_msi_disable_common()
935 if (vector->virq >= 0) { in vfio_msi_disable_common()
938 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), in vfio_msi_disable_common()
940 vfio_notifier_cleanup(vdev, &vector->interrupt, "interrupt", i); in vfio_msi_disable_common()
944 g_free(vdev->msi_vectors); in vfio_msi_disable_common()
945 vdev->msi_vectors = NULL; in vfio_msi_disable_common()
946 vdev->nr_vectors = 0; in vfio_msi_disable_common()
947 vdev->interrupt = VFIO_INT_NONE; in vfio_msi_disable_common()
955 msix_unset_vector_notifiers(&vdev->pdev); in vfio_msix_disable()
958 * MSI-X will only release vectors if MSI-X is still enabled on the in vfio_msix_disable()
961 for (i = 0; i < vdev->nr_vectors; i++) { in vfio_msix_disable()
962 if (vdev->msi_vectors[i].use) { in vfio_msix_disable()
963 vfio_msix_vector_release(&vdev->pdev, i); in vfio_msix_disable()
964 msix_vector_unuse(&vdev->pdev, i); in vfio_msix_disable()
969 * Always clear MSI-X IRQ index. A PF device could have enabled in vfio_msix_disable()
970 * MSI-X with no vectors. See vfio_msix_enable(). in vfio_msix_disable()
972 vfio_device_irq_disable(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX); in vfio_msix_disable()
976 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_msix_disable()
979 memset(vdev->msix->pending, 0, in vfio_msix_disable()
980 BITS_TO_LONGS(vdev->msix->entries) * sizeof(unsigned long)); in vfio_msix_disable()
982 trace_vfio_msix_disable(vdev->vbasedev.name); in vfio_msix_disable()
989 vfio_device_irq_disable(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX); in vfio_msi_disable()
993 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_msi_disable()
996 trace_vfio_msi_disable(vdev->vbasedev.name); in vfio_msi_disable()
1003 for (i = 0; i < vdev->nr_vectors; i++) { in vfio_update_msi()
1004 VFIOMSIVector *vector = &vdev->msi_vectors[i]; in vfio_update_msi()
1007 if (!vector->use || vector->virq < 0) { in vfio_update_msi()
1011 msg = msi_get_message(&vdev->pdev, i); in vfio_update_msi()
1012 vfio_update_kvm_msi_virq(vector, msg, &vdev->pdev); in vfio_update_msi()
1018 VFIODevice *vbasedev = &vdev->vbasedev; in vfio_pci_load_rom()
1029 error_report("vfio: Error getting ROM info: %s", strerror(-ret)); in vfio_pci_load_rom()
1033 trace_vfio_pci_load_rom(vbasedev->name, (unsigned long)reg_info->size, in vfio_pci_load_rom()
1034 (unsigned long)reg_info->offset, in vfio_pci_load_rom()
1035 (unsigned long)reg_info->flags); in vfio_pci_load_rom()
1037 vdev->rom_size = size = reg_info->size; in vfio_pci_load_rom()
1038 vdev->rom_offset = reg_info->offset; in vfio_pci_load_rom()
1040 if (!vdev->rom_size) { in vfio_pci_load_rom()
1041 vdev->rom_read_failed = true; in vfio_pci_load_rom()
1042 error_report("vfio-pci: Cannot read device rom at %s", vbasedev->name); in vfio_pci_load_rom()
1049 vdev->rom = g_malloc(size); in vfio_pci_load_rom()
1050 memset(vdev->rom, 0xff, size); in vfio_pci_load_rom()
1053 bytes = vbasedev->io_ops->region_read(vbasedev, in vfio_pci_load_rom()
1055 off, size, vdev->rom + off); in vfio_pci_load_rom()
1061 size -= bytes; in vfio_pci_load_rom()
1063 if (bytes == -EINTR || bytes == -EAGAIN) { in vfio_pci_load_rom()
1079 if (pci_get_word(vdev->rom) == 0xaa55 && in vfio_pci_load_rom()
1080 pci_get_word(vdev->rom + 0x18) + 8 < vdev->rom_size && in vfio_pci_load_rom()
1081 !memcmp(vdev->rom + pci_get_word(vdev->rom + 0x18), "PCIR", 4)) { in vfio_pci_load_rom()
1084 vid = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 4); in vfio_pci_load_rom()
1085 did = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6); in vfio_pci_load_rom()
1087 if (vid == vdev->vendor_id && did != vdev->device_id) { in vfio_pci_load_rom()
1089 uint8_t csum, *data = vdev->rom; in vfio_pci_load_rom()
1091 pci_set_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6, in vfio_pci_load_rom()
1092 vdev->device_id); in vfio_pci_load_rom()
1095 for (csum = 0, i = 0; i < vdev->rom_size; i++) { in vfio_pci_load_rom()
1099 data[6] = -csum; in vfio_pci_load_rom()
1108 return vdev->vbasedev.io_ops->region_read(&vdev->vbasedev, in vfio_pci_config_space_read()
1117 return vdev->vbasedev.io_ops->region_write(&vdev->vbasedev, in vfio_pci_config_space_write()
1134 if (unlikely(!vdev->rom && !vdev->rom_read_failed)) { in vfio_rom_read()
1138 memcpy(&val, vdev->rom + addr, in vfio_rom_read()
1139 (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0); in vfio_rom_read()
1156 trace_vfio_rom_read(vdev->vbasedev.name, addr, size, data); in vfio_rom_read()
1174 VFIODevice *vbasedev = &vdev->vbasedev; in vfio_pci_size_rom()
1178 if (vdev->pdev.romfile || !vdev->pdev.rom_bar) { in vfio_pci_size_rom()
1180 if (vfio_opt_rom_in_denylist(vdev) && vdev->pdev.romfile) { in vfio_pci_size_rom()
1183 vdev->vbasedev.name); in vfio_pci_size_rom()
1198 error_report("%s(%s) ROM access failed", __func__, vbasedev->name); in vfio_pci_size_rom()
1209 if (vdev->pdev.rom_bar > 0) { in vfio_pci_size_rom()
1212 vdev->vbasedev.name); in vfio_pci_size_rom()
1218 vdev->vbasedev.name); in vfio_pci_size_rom()
1224 trace_vfio_pci_size_rom(vdev->vbasedev.name, size); in vfio_pci_size_rom()
1226 name = g_strdup_printf("vfio[%s].rom", vdev->vbasedev.name); in vfio_pci_size_rom()
1228 memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev), in vfio_pci_size_rom()
1232 pci_register_bar(&vdev->pdev, PCI_ROM_SLOT, in vfio_pci_size_rom()
1233 PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom); in vfio_pci_size_rom()
1235 vdev->rom_read_failed = false; in vfio_pci_size_rom()
1242 VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]); in vfio_vga_write()
1249 off_t offset = vga->fd_offset + region->offset + addr; in vfio_vga_write()
1266 if (pwrite(vga->fd, &buf, size, offset) != size) { in vfio_vga_write()
1268 __func__, region->offset + addr, data, size); in vfio_vga_write()
1271 trace_vfio_vga_write(region->offset + addr, data, size); in vfio_vga_write()
1277 VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]); in vfio_vga_read()
1285 off_t offset = vga->fd_offset + region->offset + addr; in vfio_vga_read()
1287 if (pread(vga->fd, &buf, size, offset) != size) { in vfio_vga_read()
1289 __func__, region->offset + addr, size); in vfio_vga_read()
1290 return (uint64_t)-1; in vfio_vga_read()
1308 trace_vfio_vga_read(region->offset + addr, size, data); in vfio_vga_read()
1320 * Expand memory region of sub-page(size < PAGE_SIZE) MMIO BAR to page
1322 * this BAR to guest. But this sub-page BAR may not occupy an exclusive
1325 * with the sub-page BAR in guest. Besides, we should also recover the
1326 * size of this sub-page BAR when its base address is changed in guest
1332 VFIORegion *region = &vdev->bars[bar].region; in vfio_sub_page_bar_update_mapping()
1336 uint64_t size = region->size; in vfio_sub_page_bar_update_mapping()
1339 if (region->nr_mmaps != 1 || !region->mmaps[0].mmap || in vfio_sub_page_bar_update_mapping()
1340 region->mmaps[0].size != region->size) { in vfio_sub_page_bar_update_mapping()
1344 r = &pdev->io_regions[bar]; in vfio_sub_page_bar_update_mapping()
1345 bar_addr = r->addr; in vfio_sub_page_bar_update_mapping()
1346 base_mr = vdev->bars[bar].mr; in vfio_sub_page_bar_update_mapping()
1347 region_mr = region->mem; in vfio_sub_page_bar_update_mapping()
1348 mmap_mr = &region->mmaps[0].mem; in vfio_sub_page_bar_update_mapping()
1358 if (vdev->bars[bar].size < size) { in vfio_sub_page_bar_update_mapping()
1363 if (size != vdev->bars[bar].size && memory_region_is_mapped(base_mr)) { in vfio_sub_page_bar_update_mapping()
1364 memory_region_del_subregion(r->address_space, base_mr); in vfio_sub_page_bar_update_mapping()
1365 memory_region_add_subregion_overlap(r->address_space, in vfio_sub_page_bar_update_mapping()
1378 VFIODevice *vbasedev = &vdev->vbasedev; in vfio_pci_read_config()
1381 memcpy(&emu_bits, vdev->emulated_config_bits + addr, len); in vfio_pci_read_config()
1388 if (~emu_bits & (0xffffffffU >> (32 - len * 8))) { in vfio_pci_read_config()
1394 __func__, vbasedev->name, addr, len, in vfio_pci_read_config()
1396 return -1; in vfio_pci_read_config()
1403 trace_vfio_pci_read_config(vdev->vbasedev.name, addr, len, val); in vfio_pci_read_config()
1412 VFIODevice *vbasedev = &vdev->vbasedev; in vfio_pci_write_config()
1416 trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len); in vfio_pci_write_config()
1422 __func__, vbasedev->name, addr, val, len, in vfio_pci_write_config()
1426 /* MSI/MSI-X Enabling/Disabling */ in vfio_pci_write_config()
1427 if (pdev->cap_present & QEMU_PCI_CAP_MSI && in vfio_pci_write_config()
1428 ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) { in vfio_pci_write_config()
1446 } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX && in vfio_pci_write_config()
1447 ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) { in vfio_pci_write_config()
1461 pcibus_t old_addr[PCI_NUM_REGIONS - 1]; in vfio_pci_write_config()
1465 old_addr[bar] = pdev->io_regions[bar].addr; in vfio_pci_write_config()
1471 if (old_addr[bar] != pdev->io_regions[bar].addr && in vfio_pci_write_config()
1472 vdev->bars[bar].region.size > 0 && in vfio_pci_write_config()
1473 vdev->bars[bar].region.size < qemu_real_host_page_size()) { in vfio_pci_write_config()
1491 * disable MSI/X and then cleanup by disabling INTx. in vfio_disable_interrupts()
1493 if (vdev->interrupt == VFIO_INT_MSIX) { in vfio_disable_interrupts()
1495 } else if (vdev->interrupt == VFIO_INT_MSI) { in vfio_disable_interrupts()
1499 if (vdev->interrupt == VFIO_INT_INTx) { in vfio_disable_interrupts()
1524 trace_vfio_msi_setup(vdev->vbasedev.name, pos); in vfio_msi_setup()
1526 ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit, &err); in vfio_msi_setup()
1528 if (ret == -ENOTSUP) { in vfio_msi_setup()
1534 vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0); in vfio_msi_setup()
1542 VFIORegion *region = &vdev->bars[vdev->msix->table_bar].region; in vfio_pci_fixup_msix_region()
1548 if (vfio_device_has_region_cap(&vdev->vbasedev, region->nr, in vfio_pci_fixup_msix_region()
1557 if (region->nr_mmaps != 1 || region->mmaps[0].offset || in vfio_pci_fixup_msix_region()
1558 region->size != region->mmaps[0].size) { in vfio_pci_fixup_msix_region()
1562 /* MSI-X table start and end aligned to host page size */ in vfio_pci_fixup_msix_region()
1563 start = vdev->msix->table_offset & qemu_real_host_page_mask(); in vfio_pci_fixup_msix_region()
1564 end = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset + in vfio_pci_fixup_msix_region()
1565 (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE)); in vfio_pci_fixup_msix_region()
1568 * Does the MSI-X table cover the beginning of the BAR? The whole BAR? in vfio_pci_fixup_msix_region()
1569 * NB - Host page size is necessarily a power of two and so is the PCI in vfio_pci_fixup_msix_region()
1575 if (end >= region->size) { in vfio_pci_fixup_msix_region()
1576 region->nr_mmaps = 0; in vfio_pci_fixup_msix_region()
1577 g_free(region->mmaps); in vfio_pci_fixup_msix_region()
1578 region->mmaps = NULL; in vfio_pci_fixup_msix_region()
1579 trace_vfio_msix_fixup(vdev->vbasedev.name, in vfio_pci_fixup_msix_region()
1580 vdev->msix->table_bar, 0, 0); in vfio_pci_fixup_msix_region()
1582 region->mmaps[0].offset = end; in vfio_pci_fixup_msix_region()
1583 region->mmaps[0].size = region->size - end; in vfio_pci_fixup_msix_region()
1584 trace_vfio_msix_fixup(vdev->vbasedev.name, in vfio_pci_fixup_msix_region()
1585 vdev->msix->table_bar, region->mmaps[0].offset, in vfio_pci_fixup_msix_region()
1586 region->mmaps[0].offset + region->mmaps[0].size); in vfio_pci_fixup_msix_region()
1590 } else if (end >= region->size) { in vfio_pci_fixup_msix_region()
1591 region->mmaps[0].size = start; in vfio_pci_fixup_msix_region()
1592 trace_vfio_msix_fixup(vdev->vbasedev.name, in vfio_pci_fixup_msix_region()
1593 vdev->msix->table_bar, region->mmaps[0].offset, in vfio_pci_fixup_msix_region()
1594 region->mmaps[0].offset + region->mmaps[0].size); in vfio_pci_fixup_msix_region()
1598 region->nr_mmaps = 2; in vfio_pci_fixup_msix_region()
1599 region->mmaps = g_renew(VFIOMmap, region->mmaps, 2); in vfio_pci_fixup_msix_region()
1601 memcpy(&region->mmaps[1], &region->mmaps[0], sizeof(VFIOMmap)); in vfio_pci_fixup_msix_region()
1603 region->mmaps[0].size = start; in vfio_pci_fixup_msix_region()
1604 trace_vfio_msix_fixup(vdev->vbasedev.name, in vfio_pci_fixup_msix_region()
1605 vdev->msix->table_bar, region->mmaps[0].offset, in vfio_pci_fixup_msix_region()
1606 region->mmaps[0].offset + region->mmaps[0].size); in vfio_pci_fixup_msix_region()
1608 region->mmaps[1].offset = end; in vfio_pci_fixup_msix_region()
1609 region->mmaps[1].size = region->size - end; in vfio_pci_fixup_msix_region()
1610 trace_vfio_msix_fixup(vdev->vbasedev.name, in vfio_pci_fixup_msix_region()
1611 vdev->msix->table_bar, region->mmaps[1].offset, in vfio_pci_fixup_msix_region()
1612 region->mmaps[1].offset + region->mmaps[1].size); in vfio_pci_fixup_msix_region()
1618 int target_bar = -1; in vfio_pci_relocate_msix()
1621 if (!vdev->msix || vdev->msix_relo == OFF_AUTO_PCIBAR_OFF) { in vfio_pci_relocate_msix()
1625 /* The actual minimum size of MSI-X structures */ in vfio_pci_relocate_msix()
1626 msix_sz = (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE) + in vfio_pci_relocate_msix()
1627 (QEMU_ALIGN_UP(vdev->msix->entries, 64) / 8); in vfio_pci_relocate_msix()
1633 if (vdev->msix_relo == OFF_AUTO_PCIBAR_AUTO) { in vfio_pci_relocate_msix()
1643 error_setg(errp, "No automatic MSI-X relocation available for " in vfio_pci_relocate_msix()
1644 "device %04x:%04x", vdev->vendor_id, vdev->device_id); in vfio_pci_relocate_msix()
1648 target_bar = (int)(vdev->msix_relo - OFF_AUTO_PCIBAR_BAR0); in vfio_pci_relocate_msix()
1651 /* I/O port BARs cannot host MSI-X structures */ in vfio_pci_relocate_msix()
1652 if (vdev->bars[target_bar].ioport) { in vfio_pci_relocate_msix()
1653 error_setg(errp, "Invalid MSI-X relocation BAR %d, " in vfio_pci_relocate_msix()
1658 /* Cannot use a BAR in the "shadow" of a 64-bit BAR */ in vfio_pci_relocate_msix()
1659 if (!vdev->bars[target_bar].size && in vfio_pci_relocate_msix()
1660 target_bar > 0 && vdev->bars[target_bar - 1].mem64) { in vfio_pci_relocate_msix()
1661 error_setg(errp, "Invalid MSI-X relocation BAR %d, " in vfio_pci_relocate_msix()
1662 "consumed by 64-bit BAR %d", target_bar, target_bar - 1); in vfio_pci_relocate_msix()
1666 /* 2GB max size for 32-bit BARs, cannot double if already > 1G */ in vfio_pci_relocate_msix()
1667 if (vdev->bars[target_bar].size > 1 * GiB && in vfio_pci_relocate_msix()
1668 !vdev->bars[target_bar].mem64) { in vfio_pci_relocate_msix()
1669 error_setg(errp, "Invalid MSI-X relocation BAR %d, " in vfio_pci_relocate_msix()
1670 "no space to extend 32-bit BAR", target_bar); in vfio_pci_relocate_msix()
1676 * prefetchable since QEMU MSI-X emulation has no read side effects in vfio_pci_relocate_msix()
1679 if (!vdev->bars[target_bar].size) { in vfio_pci_relocate_msix()
1680 if (target_bar < (PCI_ROM_SLOT - 1) && in vfio_pci_relocate_msix()
1681 !vdev->bars[target_bar + 1].size) { in vfio_pci_relocate_msix()
1682 vdev->bars[target_bar].mem64 = true; in vfio_pci_relocate_msix()
1683 vdev->bars[target_bar].type = PCI_BASE_ADDRESS_MEM_TYPE_64; in vfio_pci_relocate_msix()
1685 vdev->bars[target_bar].type |= PCI_BASE_ADDRESS_MEM_PREFETCH; in vfio_pci_relocate_msix()
1686 vdev->bars[target_bar].size = msix_sz; in vfio_pci_relocate_msix()
1687 vdev->msix->table_offset = 0; in vfio_pci_relocate_msix()
1689 vdev->bars[target_bar].size = MAX(vdev->bars[target_bar].size * 2, in vfio_pci_relocate_msix()
1692 * Due to above size calc, MSI-X always starts halfway into the BAR, in vfio_pci_relocate_msix()
1695 vdev->msix->table_offset = vdev->bars[target_bar].size / 2; in vfio_pci_relocate_msix()
1698 vdev->msix->table_bar = target_bar; in vfio_pci_relocate_msix()
1699 vdev->msix->pba_bar = target_bar; in vfio_pci_relocate_msix()
1700 /* Requires 8-byte alignment, but PCI_MSIX_ENTRY_SIZE guarantees that */ in vfio_pci_relocate_msix()
1701 vdev->msix->pba_offset = vdev->msix->table_offset + in vfio_pci_relocate_msix()
1702 (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE); in vfio_pci_relocate_msix()
1704 trace_vfio_msix_relo(vdev->vbasedev.name, in vfio_pci_relocate_msix()
1705 vdev->msix->table_bar, vdev->msix->table_offset); in vfio_pci_relocate_msix()
1711 * capabilities into the chain. In order to setup MSI-X we need a
1713 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
1714 * need to first look for where the MSI-X table lives. So we
1715 * unfortunately split MSI-X setup across two functions.
1726 pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX); in vfio_msix_early_setup()
1759 msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK; in vfio_msix_early_setup()
1760 msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK; in vfio_msix_early_setup()
1761 msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK; in vfio_msix_early_setup()
1762 msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK; in vfio_msix_early_setup()
1763 msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; in vfio_msix_early_setup()
1765 ret = vfio_device_get_irq_info(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX, in vfio_msix_early_setup()
1768 error_setg_errno(errp, -ret, "failed to get MSI-X irq info"); in vfio_msix_early_setup()
1773 msix->noresize = !!(irq_info.flags & VFIO_IRQ_INFO_NORESIZE); in vfio_msix_early_setup()
1780 if (msix->pba_offset >= vdev->bars[msix->pba_bar].region.size) { in vfio_msix_early_setup()
1787 if (vdev->vendor_id == PCI_VENDOR_ID_CHELSIO && in vfio_msix_early_setup()
1788 (vdev->device_id & 0xff00) == 0x5800) { in vfio_msix_early_setup()
1789 msix->pba_offset = 0x1000; in vfio_msix_early_setup()
1797 msix->pba_offset = 0xb400; in vfio_msix_early_setup()
1798 } else if (vdev->msix_relo == OFF_AUTO_PCIBAR_OFF) { in vfio_msix_early_setup()
1806 trace_vfio_msix_early_setup(vdev->vbasedev.name, pos, msix->table_bar, in vfio_msix_early_setup()
1807 msix->table_offset, msix->entries, in vfio_msix_early_setup()
1808 msix->noresize); in vfio_msix_early_setup()
1809 vdev->msix = msix; in vfio_msix_early_setup()
1821 vdev->msix->pending = g_new0(unsigned long, in vfio_msix_setup()
1822 BITS_TO_LONGS(vdev->msix->entries)); in vfio_msix_setup()
1823 ret = msix_init(&vdev->pdev, vdev->msix->entries, in vfio_msix_setup()
1824 vdev->bars[vdev->msix->table_bar].mr, in vfio_msix_setup()
1825 vdev->msix->table_bar, vdev->msix->table_offset, in vfio_msix_setup()
1826 vdev->bars[vdev->msix->pba_bar].mr, in vfio_msix_setup()
1827 vdev->msix->pba_bar, vdev->msix->pba_offset, pos, in vfio_msix_setup()
1830 if (ret == -ENOTSUP) { in vfio_msix_setup()
1841 * MSI-X structures and avoid overlapping non-MSI-X related registers. in vfio_msix_setup()
1842 * For an assigned device, this hopefully means that emulation of MSI-X in vfio_msix_setup()
1850 * disable the PBA MemoryRegion unless it's being used. We disable it in vfio_msix_setup()
1852 * vector-use notifier is called, which occurs on unmask, we test whether in vfio_msix_setup()
1853 * PBA emulation is needed and again disable if not. in vfio_msix_setup()
1855 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false); in vfio_msix_setup()
1866 "vfio-no-msix-emulation", NULL)) { in vfio_msix_setup()
1867 memory_region_set_enabled(&vdev->pdev.msix_table_mmio, false); in vfio_msix_setup()
1875 msi_uninit(&vdev->pdev); in vfio_pci_teardown_msi()
1877 if (vdev->msix) { in vfio_pci_teardown_msi()
1878 msix_uninit(&vdev->pdev, in vfio_pci_teardown_msi()
1879 vdev->bars[vdev->msix->table_bar].mr, in vfio_pci_teardown_msi()
1880 vdev->bars[vdev->msix->pba_bar].mr); in vfio_pci_teardown_msi()
1881 g_free(vdev->msix->pending); in vfio_pci_teardown_msi()
1893 vfio_region_mmaps_set_enabled(&vdev->bars[i].region, enabled); in vfio_mmap_set_enabled()
1899 VFIOBAR *bar = &vdev->bars[nr]; in vfio_bar_prepare()
1905 if (!bar->region.size) { in vfio_bar_prepare()
1918 bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO); in vfio_bar_prepare()
1919 bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64); in vfio_bar_prepare()
1920 bar->type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK : in vfio_bar_prepare()
1922 bar->size = bar->region.size; in vfio_bar_prepare()
1925 bar->region.post_wr = (bar->ioport == 0); in vfio_bar_prepare()
1939 VFIOBAR *bar = &vdev->bars[nr]; in vfio_bar_register()
1942 if (!bar->size) { in vfio_bar_register()
1946 bar->mr = g_new0(MemoryRegion, 1); in vfio_bar_register()
1947 name = g_strdup_printf("%s base BAR %d", vdev->vbasedev.name, nr); in vfio_bar_register()
1948 memory_region_init_io(bar->mr, OBJECT(vdev), NULL, NULL, name, bar->size); in vfio_bar_register()
1951 if (bar->region.size) { in vfio_bar_register()
1952 memory_region_add_subregion(bar->mr, 0, bar->region.mem); in vfio_bar_register()
1954 if (vfio_region_mmap(&bar->region)) { in vfio_bar_register()
1956 vdev->vbasedev.name, nr); in vfio_bar_register()
1960 pci_register_bar(&vdev->pdev, nr, bar->type, bar->mr); in vfio_bar_register()
1977 VFIOBAR *bar = &vdev->bars[i]; in vfio_pci_bars_exit()
1980 vfio_region_exit(&bar->region); in vfio_pci_bars_exit()
1981 if (bar->region.size) { in vfio_pci_bars_exit()
1982 memory_region_del_subregion(bar->mr, bar->region.mem); in vfio_pci_bars_exit()
1986 if (vdev->vga) { in vfio_pci_bars_exit()
1987 pci_unregister_vga(&vdev->pdev); in vfio_pci_bars_exit()
1997 VFIOBAR *bar = &vdev->bars[i]; in vfio_bars_finalize()
2000 vfio_region_finalize(&bar->region); in vfio_bars_finalize()
2001 if (bar->mr) { in vfio_bars_finalize()
2002 assert(bar->size); in vfio_bars_finalize()
2003 object_unparent(OBJECT(bar->mr)); in vfio_bars_finalize()
2004 g_free(bar->mr); in vfio_bars_finalize()
2005 bar->mr = NULL; in vfio_bars_finalize()
2009 if (vdev->vga) { in vfio_bars_finalize()
2011 for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) { in vfio_bars_finalize()
2012 object_unparent(OBJECT(&vdev->vga->region[i].mem)); in vfio_bars_finalize()
2014 g_free(vdev->vga); in vfio_bars_finalize()
2026 for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp; in vfio_std_cap_max_size()
2027 tmp = pdev->config[tmp + PCI_CAP_LIST_NEXT]) { in vfio_std_cap_max_size()
2033 return next - pos; in vfio_std_cap_max_size()
2048 return next - pos; in vfio_ext_cap_max_size()
2059 vfio_set_word_bits(vdev->pdev.config + pos, val, mask); in vfio_add_emulated_word()
2060 vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask); in vfio_add_emulated_word()
2061 vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask); in vfio_add_emulated_word()
2072 vfio_set_long_bits(vdev->pdev.config + pos, val, mask); in vfio_add_emulated_long()
2073 vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask); in vfio_add_emulated_long()
2074 vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask); in vfio_add_emulated_long()
2081 PCIBus *bus = pci_get_bus(&vdev->pdev); in vfio_pci_enable_rp_atomics()
2082 PCIDevice *parent = bus->parent_dev; in vfio_pci_enable_rp_atomics()
2094 if (pci_bus_is_root(bus) || !parent || !parent->exp.exp_cap || in vfio_pci_enable_rp_atomics()
2097 vdev->pdev.devfn || in vfio_pci_enable_rp_atomics()
2098 vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { in vfio_pci_enable_rp_atomics()
2102 pos = parent->config + parent->exp.exp_cap + PCI_EXP_DEVCAP2; in vfio_pci_enable_rp_atomics()
2111 info = vfio_get_device_info(vdev->vbasedev.fd); in vfio_pci_enable_rp_atomics()
2122 if (cap->flags & VFIO_PCI_ATOMIC_COMP32) { in vfio_pci_enable_rp_atomics()
2125 if (cap->flags & VFIO_PCI_ATOMIC_COMP64) { in vfio_pci_enable_rp_atomics()
2128 if (cap->flags & VFIO_PCI_ATOMIC_COMP128) { in vfio_pci_enable_rp_atomics()
2137 vdev->clear_parent_atomics_on_exit = true; in vfio_pci_enable_rp_atomics()
2142 if (vdev->clear_parent_atomics_on_exit) { in vfio_pci_disable_rp_atomics()
2143 PCIDevice *parent = pci_get_bus(&vdev->pdev)->parent_dev; in vfio_pci_disable_rp_atomics()
2144 uint8_t *pos = parent->config + parent->exp.exp_cap + PCI_EXP_DEVCAP2; in vfio_pci_disable_rp_atomics()
2158 flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS); in vfio_setup_pcie_cap()
2170 if (!pci_bus_is_express(pci_get_bus(&vdev->pdev))) { in vfio_setup_pcie_cap()
2171 PCIBus *bus = pci_get_bus(&vdev->pdev); in vfio_setup_pcie_cap()
2176 * as-is on non-express buses. The reason being that some drivers in vfio_setup_pcie_cap()
2187 * valid transitions between bus types. An express device on a non- in vfio_setup_pcie_cap()
2203 } else if (pci_bus_is_root(pci_get_bus(&vdev->pdev))) { in vfio_setup_pcie_cap()
2260 * Intel 82599 SR-IOV VFs report an invalid PCIe capability version 0 in vfio_setup_pcie_cap()
2271 pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size, in vfio_setup_pcie_cap()
2277 vdev->pdev.exp.exp_cap = pos; in vfio_setup_pcie_cap()
2284 uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP); in vfio_check_pcie_flr()
2287 trace_vfio_check_pcie_flr(vdev->vbasedev.name); in vfio_check_pcie_flr()
2288 vdev->has_flr = true; in vfio_check_pcie_flr()
2294 uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL); in vfio_check_pm_reset()
2297 trace_vfio_check_pm_reset(vdev->vbasedev.name); in vfio_check_pm_reset()
2298 vdev->has_pm_reset = true; in vfio_check_pm_reset()
2304 uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP); in vfio_check_af_flr()
2307 trace_vfio_check_af_flr(vdev->vbasedev.name); in vfio_check_af_flr()
2308 vdev->has_flr = true; in vfio_check_af_flr()
2315 PCIDevice *pdev = &vdev->pdev; in vfio_add_vendor_specific_cap()
2327 if (vdev->skip_vsc_check && size > 3) { in vfio_add_vendor_specific_cap()
2328 memset(pdev->cmask + pos + 3, 0, size - 3); in vfio_add_vendor_specific_cap()
2337 PCIDevice *pdev = &vdev->pdev; in vfio_add_std_cap()
2341 cap_id = pdev->config[pos]; in vfio_add_std_cap()
2342 next = pdev->config[pos + PCI_CAP_LIST_NEXT]; in vfio_add_std_cap()
2356 * This is also why we pre-calculate size above as cached config space in vfio_add_std_cap()
2365 pdev->config[PCI_CAPABILITY_LIST] = 0; in vfio_add_std_cap()
2366 vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff; in vfio_add_std_cap()
2367 vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST; in vfio_add_std_cap()
2378 pci_set_byte(vdev->emulated_config_bits + pos + PCI_CAP_LIST_NEXT, 0xff); in vfio_add_std_cap()
2395 * PCI-core config space emulation needs write access to the power in vfio_add_std_cap()
2398 pci_set_word(pdev->wmask + pos + PCI_PM_CTRL, PCI_PM_CTRL_STATE_MASK); in vfio_add_std_cap()
2426 ctrl = pci_get_long(vdev->pdev.config + pos + PCI_REBAR_CTRL); in vfio_setup_rebar_ecap()
2433 ctrl = pci_get_long(vdev->pdev.config + pos + PCI_REBAR_CTRL + (i * 8)); in vfio_setup_rebar_ecap()
2445 * might need an opt-in or reservation scheme in the kernel. in vfio_setup_rebar_ecap()
2448 return -EINVAL; in vfio_setup_rebar_ecap()
2471 PCIDevice *pdev = &vdev->pdev; in vfio_add_ext_cap()
2479 !pci_get_long(pdev->config + PCI_CONFIG_SPACE_SIZE)) { in vfio_add_ext_cap()
2489 config = g_memdup(pdev->config, vdev->config_size); in vfio_add_ext_cap()
2505 * capability ID, version, AND next pointer. A non-zero next pointer in vfio_add_ext_cap()
2515 pci_set_long(pdev->config + PCI_CONFIG_SPACE_SIZE, in vfio_add_ext_cap()
2517 pci_set_long(pdev->wmask + PCI_CONFIG_SPACE_SIZE, 0); in vfio_add_ext_cap()
2518 pci_set_long(vdev->emulated_config_bits + PCI_CONFIG_SPACE_SIZE, ~0); in vfio_add_ext_cap()
2535 pci_long_test_and_set_mask(vdev->emulated_config_bits + next, in vfio_add_ext_cap()
2540 case PCI_EXT_CAP_ID_SRIOV: /* Read-only VF BARs confuse OVMF */ in vfio_add_ext_cap()
2542 trace_vfio_add_ext_cap_dropped(vdev->vbasedev.name, cap_id, next); in vfio_add_ext_cap()
2556 if (pci_get_word(pdev->config + PCI_CONFIG_SPACE_SIZE) == 0xFFFF) { in vfio_add_ext_cap()
2557 pci_set_word(pdev->config + PCI_CONFIG_SPACE_SIZE, 0); in vfio_add_ext_cap()
2565 PCIDevice *pdev = &vdev->pdev; in vfio_pci_add_capabilities()
2567 if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) || in vfio_pci_add_capabilities()
2568 !pdev->config[PCI_CAPABILITY_LIST]) { in vfio_pci_add_capabilities()
2572 if (!vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST], errp)) { in vfio_pci_add_capabilities()
2582 PCIDevice *pdev = &vdev->pdev; in vfio_pci_pre_reset()
2589 * Also put INTx Disable in known state. in vfio_pci_pre_reset()
2597 if (pdev->pm_cap) { in vfio_pci_pre_reset()
2601 pmcsr = vfio_pci_read_config(pdev, pdev->pm_cap + PCI_PM_CTRL, 2); in vfio_pci_pre_reset()
2605 vfio_pci_write_config(pdev, pdev->pm_cap + PCI_PM_CTRL, pmcsr, 2); in vfio_pci_pre_reset()
2607 pmcsr = vfio_pci_read_config(pdev, pdev->pm_cap + PCI_PM_CTRL, 2); in vfio_pci_pre_reset()
2619 VFIODevice *vbasedev = &vdev->vbasedev; in vfio_pci_post_reset()
2624 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_pci_post_reset()
2627 for (nr = 0; nr < PCI_NUM_REGIONS - 1; ++nr) { in vfio_pci_post_reset()
2635 vbasedev->name, nr, strwriteerror(ret)); in vfio_pci_post_reset()
2646 sprintf(tmp, "%04x:%02x:%02x.%1x", addr->domain, in vfio_pci_host_match()
2647 addr->bus, addr->slot, addr->function); in vfio_pci_host_match()
2661 info->argsz = sizeof(*info); in vfio_pci_get_pci_hot_reset_info()
2663 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info); in vfio_pci_get_pci_hot_reset_info()
2665 ret = -errno; in vfio_pci_get_pci_hot_reset_info()
2667 if (!vdev->has_pm_reset) { in vfio_pci_get_pci_hot_reset_info()
2669 "no available reset mechanism.", vdev->vbasedev.name); in vfio_pci_get_pci_hot_reset_info()
2674 count = info->count; in vfio_pci_get_pci_hot_reset_info()
2675 info = g_realloc(info, sizeof(*info) + (count * sizeof(info->devices[0]))); in vfio_pci_get_pci_hot_reset_info()
2676 info->argsz = sizeof(*info) + (count * sizeof(info->devices[0])); in vfio_pci_get_pci_hot_reset_info()
2678 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info); in vfio_pci_get_pci_hot_reset_info()
2680 ret = -errno; in vfio_pci_get_pci_hot_reset_info()
2692 VFIODevice *vbasedev = &vdev->vbasedev; in vfio_pci_hot_reset()
2693 const VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(vbasedev->bcontainer); in vfio_pci_hot_reset()
2695 return vioc->pci_hot_reset(vbasedev, single); in vfio_pci_hot_reset()
2699 * We want to differentiate hot reset of multiple in-use devices vs hot reset
2700 * of a single in-use device. VFIO_DEVICE_RESET will already handle the case
2701 * of doing hot resets when there is only a single device per bus. The in-use
2703 * multiple devices, but only a single in-use device, means that we can call
2704 * it from our bus ->reset() callback since the extent is effectively a single
2706 * are multiple in-use devices, we can only trigger the hot reset during a
2709 * path where both our reset handler and ->reset() callback are used. Calling
2710 * _one() will only do a hot reset for the one in-use devices case, calling
2727 if (!vbasedev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) { in vfio_pci_compute_needs_reset()
2728 vbasedev->needs_reset = true; in vfio_pci_compute_needs_reset()
2757 return vdev->ramfb_migrate == ON_OFF_AUTO_ON || in vfio_display_migration_needed()
2758 (vdev->ramfb_migrate == ON_OFF_AUTO_AUTO && vdev->enable_ramfb); in vfio_display_migration_needed()
2799 PCIDevice *pdev = &vdev->pdev; in vfio_pci_load_config()
2800 pcibus_t old_addr[PCI_NUM_REGIONS - 1]; in vfio_pci_load_config()
2804 old_addr[bar] = pdev->io_regions[bar].addr; in vfio_pci_load_config()
2813 pci_get_word(pdev->config + PCI_COMMAND), 2); in vfio_pci_load_config()
2820 if (old_addr[bar] != pdev->io_regions[bar].addr && in vfio_pci_load_config()
2821 vdev->bars[bar].region.size > 0 && in vfio_pci_load_config()
2822 vdev->bars[bar].region.size < qemu_real_host_page_size()) { in vfio_pci_load_config()
2838 PCIDevice *pdev = &vdev->pdev; in vfio_sub_page_bar_update_mappings()
2843 PCIIORegion *r = &pdev->io_regions[bar]; in vfio_sub_page_bar_update_mappings()
2844 if (r->addr != PCI_BAR_UNMAPPED && r->size > 0 && r->size < page_size) { in vfio_sub_page_bar_update_mappings()
2861 VFIODevice *vbasedev = &vdev->vbasedev; in vfio_populate_vga()
2867 error_setg_errno(errp, -ret, in vfio_populate_vga()
2873 if (!(reg_info->flags & VFIO_REGION_INFO_FLAG_READ) || in vfio_populate_vga()
2874 !(reg_info->flags & VFIO_REGION_INFO_FLAG_WRITE) || in vfio_populate_vga()
2875 reg_info->size < 0xbffff + 1) { in vfio_populate_vga()
2877 (unsigned long)reg_info->flags, in vfio_populate_vga()
2878 (unsigned long)reg_info->size); in vfio_populate_vga()
2882 vdev->vga = g_new0(VFIOVGA, 1); in vfio_populate_vga()
2884 vdev->vga->fd_offset = reg_info->offset; in vfio_populate_vga()
2885 vdev->vga->fd = vdev->vbasedev.fd; in vfio_populate_vga()
2887 vdev->vga->region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE; in vfio_populate_vga()
2888 vdev->vga->region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM; in vfio_populate_vga()
2889 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_MEM].quirks); in vfio_populate_vga()
2891 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_MEM].mem, in vfio_populate_vga()
2893 &vdev->vga->region[QEMU_PCI_VGA_MEM], in vfio_populate_vga()
2894 "vfio-vga-mmio@0xa0000", in vfio_populate_vga()
2897 vdev->vga->region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE; in vfio_populate_vga()
2898 vdev->vga->region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO; in vfio_populate_vga()
2899 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].quirks); in vfio_populate_vga()
2901 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem, in vfio_populate_vga()
2903 &vdev->vga->region[QEMU_PCI_VGA_IO_LO], in vfio_populate_vga()
2904 "vfio-vga-io@0x3b0", in vfio_populate_vga()
2907 vdev->vga->region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE; in vfio_populate_vga()
2908 vdev->vga->region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI; in vfio_populate_vga()
2909 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks); in vfio_populate_vga()
2911 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem, in vfio_populate_vga()
2913 &vdev->vga->region[QEMU_PCI_VGA_IO_HI], in vfio_populate_vga()
2914 "vfio-vga-io@0x3c0", in vfio_populate_vga()
2922 VFIODevice *vbasedev = &vdev->vbasedev; in vfio_pci_populate_device()
2925 int i, ret = -1; in vfio_pci_populate_device()
2928 if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PCI)) { in vfio_pci_populate_device()
2933 if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) { in vfio_pci_populate_device()
2935 vbasedev->num_regions); in vfio_pci_populate_device()
2939 if (vbasedev->num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) { in vfio_pci_populate_device()
2940 error_setg(errp, "unexpected number of irqs %u", vbasedev->num_irqs); in vfio_pci_populate_device()
2945 char *name = g_strdup_printf("%s BAR %d", vbasedev->name, i); in vfio_pci_populate_device()
2948 &vdev->bars[i].region, i, name); in vfio_pci_populate_device()
2952 error_setg_errno(errp, -ret, "failed to get region %d info", i); in vfio_pci_populate_device()
2956 QLIST_INIT(&vdev->bars[i].quirks); in vfio_pci_populate_device()
2962 error_setg_errno(errp, -ret, "failed to get config info"); in vfio_pci_populate_device()
2966 trace_vfio_pci_populate_device_config(vdev->vbasedev.name, in vfio_pci_populate_device()
2967 (unsigned long)reg_info->size, in vfio_pci_populate_device()
2968 (unsigned long)reg_info->offset, in vfio_pci_populate_device()
2969 (unsigned long)reg_info->flags); in vfio_pci_populate_device()
2971 vdev->config_size = reg_info->size; in vfio_pci_populate_device()
2972 if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) { in vfio_pci_populate_device()
2973 vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS; in vfio_pci_populate_device()
2975 vdev->config_offset = reg_info->offset; in vfio_pci_populate_device()
2977 if (vdev->features & VFIO_FEATURE_ENABLE_VGA) { in vfio_pci_populate_device()
2980 "requested feature x-vga\n"); in vfio_pci_populate_device()
2988 trace_vfio_pci_populate_device_get_irq_info_failure(strerror(-ret)); in vfio_pci_populate_device()
2990 vdev->pci_aer = true; in vfio_pci_populate_device()
2994 vbasedev->name); in vfio_pci_populate_device()
3005 g_free(vdev->emulated_config_bits); in vfio_pci_put_device()
3006 g_free(vdev->rom); in vfio_pci_put_device()
3012 * g_free(vdev->igd_opregion); in vfio_pci_put_device()
3015 vfio_device_detach(&vdev->vbasedev); in vfio_pci_put_device()
3017 vfio_device_free_name(&vdev->vbasedev); in vfio_pci_put_device()
3018 g_free(vdev->msix); in vfio_pci_put_device()
3025 if (!event_notifier_test_and_clear(&vdev->err_notifier)) { in vfio_err_notifier_handler()
3038 …etected. Please collect any data possible and then kill the guest", __func__, vdev->vbasedev.name); in vfio_err_notifier_handler()
3054 if (!vdev->pci_aer) { in vfio_pci_register_err_notifier()
3058 if (!vfio_notifier_init(vdev, &vdev->err_notifier, "err_notifier", 0, in vfio_pci_register_err_notifier()
3061 vdev->pci_aer = false; in vfio_pci_register_err_notifier()
3065 fd = event_notifier_get_fd(&vdev->err_notifier); in vfio_pci_register_err_notifier()
3073 if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0, in vfio_pci_register_err_notifier()
3075 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_pci_register_err_notifier()
3077 vfio_notifier_cleanup(vdev, &vdev->err_notifier, "err_notifier", 0); in vfio_pci_register_err_notifier()
3078 vdev->pci_aer = false; in vfio_pci_register_err_notifier()
3086 if (!vdev->pci_aer) { in vfio_unregister_err_notifier()
3090 if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0, in vfio_unregister_err_notifier()
3091 VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) { in vfio_unregister_err_notifier()
3092 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_unregister_err_notifier()
3094 qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier), in vfio_unregister_err_notifier()
3096 vfio_notifier_cleanup(vdev, &vdev->err_notifier, "err_notifier", 0); in vfio_unregister_err_notifier()
3104 if (!event_notifier_test_and_clear(&vdev->req_notifier)) { in vfio_req_notifier_handler()
3110 warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_req_notifier_handler()
3121 if (!(vdev->features & VFIO_FEATURE_ENABLE_REQ)) { in vfio_pci_register_req_notifier()
3125 ret = vfio_device_get_irq_info(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, in vfio_pci_register_req_notifier()
3131 if (!vfio_notifier_init(vdev, &vdev->req_notifier, "req_notifier", 0, in vfio_pci_register_req_notifier()
3137 fd = event_notifier_get_fd(&vdev->req_notifier); in vfio_pci_register_req_notifier()
3142 vdev->req_enabled = true; in vfio_pci_register_req_notifier()
3146 if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0, in vfio_pci_register_req_notifier()
3148 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_pci_register_req_notifier()
3150 vfio_notifier_cleanup(vdev, &vdev->req_notifier, "req_notifier", 0); in vfio_pci_register_req_notifier()
3152 vdev->req_enabled = true; in vfio_pci_register_req_notifier()
3160 if (!vdev->req_enabled) { in vfio_unregister_req_notifier()
3164 if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0, in vfio_unregister_req_notifier()
3165 VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) { in vfio_unregister_req_notifier()
3166 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_unregister_req_notifier()
3168 qemu_set_fd_handler(event_notifier_get_fd(&vdev->req_notifier), in vfio_unregister_req_notifier()
3170 vfio_notifier_cleanup(vdev, &vdev->req_notifier, "req_notifier", 0); in vfio_unregister_req_notifier()
3172 vdev->req_enabled = false; in vfio_unregister_req_notifier()
3177 assert(vdev->vga != NULL); in vfio_pci_config_register_vga()
3179 pci_register_vga(&vdev->pdev, &vdev->vga->region[QEMU_PCI_VGA_MEM].mem, in vfio_pci_config_register_vga()
3180 &vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem, in vfio_pci_config_register_vga()
3181 &vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem); in vfio_pci_config_register_vga()
3186 PCIDevice *pdev = &vdev->pdev; in vfio_pci_config_setup()
3187 VFIODevice *vbasedev = &vdev->vbasedev; in vfio_pci_config_setup()
3191 config_space_size = MIN(pci_config_size(&vdev->pdev), vdev->config_size); in vfio_pci_config_setup()
3195 vdev->pdev.config); in vfio_pci_config_setup()
3197 ret = ret < 0 ? -ret : EFAULT; in vfio_pci_config_setup()
3203 vdev->emulated_config_bits = g_malloc0(vdev->config_size); in vfio_pci_config_setup()
3206 memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4); in vfio_pci_config_setup()
3208 memset(vdev->emulated_config_bits + PCI_BASE_ADDRESS_0, 0xff, 6 * 4); in vfio_pci_config_setup()
3212 * device ID is managed by the vendor and need only be a 16-bit value. in vfio_pci_config_setup()
3213 * Allow any 16-bit value for subsystem so they can be hidden or changed. in vfio_pci_config_setup()
3215 if (vdev->vendor_id != PCI_ANY_ID) { in vfio_pci_config_setup()
3216 if (vdev->vendor_id >= 0xffff) { in vfio_pci_config_setup()
3220 vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0); in vfio_pci_config_setup()
3221 trace_vfio_pci_emulated_vendor_id(vbasedev->name, vdev->vendor_id); in vfio_pci_config_setup()
3223 vdev->vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID); in vfio_pci_config_setup()
3226 if (vdev->device_id != PCI_ANY_ID) { in vfio_pci_config_setup()
3227 if (vdev->device_id > 0xffff) { in vfio_pci_config_setup()
3231 vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0); in vfio_pci_config_setup()
3232 trace_vfio_pci_emulated_device_id(vbasedev->name, vdev->device_id); in vfio_pci_config_setup()
3234 vdev->device_id = pci_get_word(pdev->config + PCI_DEVICE_ID); in vfio_pci_config_setup()
3237 if (vdev->sub_vendor_id != PCI_ANY_ID) { in vfio_pci_config_setup()
3238 if (vdev->sub_vendor_id > 0xffff) { in vfio_pci_config_setup()
3243 vdev->sub_vendor_id, ~0); in vfio_pci_config_setup()
3244 trace_vfio_pci_emulated_sub_vendor_id(vbasedev->name, in vfio_pci_config_setup()
3245 vdev->sub_vendor_id); in vfio_pci_config_setup()
3248 if (vdev->sub_device_id != PCI_ANY_ID) { in vfio_pci_config_setup()
3249 if (vdev->sub_device_id > 0xffff) { in vfio_pci_config_setup()
3253 vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0); in vfio_pci_config_setup()
3254 trace_vfio_pci_emulated_sub_device_id(vbasedev->name, in vfio_pci_config_setup()
3255 vdev->sub_device_id); in vfio_pci_config_setup()
3259 * Class code is a 24-bit value at config space 0x09. Allow overriding it in vfio_pci_config_setup()
3260 * with any 24-bit value. in vfio_pci_config_setup()
3262 if (vdev->class_code != PCI_ANY_ID) { in vfio_pci_config_setup()
3263 if (vdev->class_code > 0xffffff) { in vfio_pci_config_setup()
3269 vdev->class_code << 8, ~0xff); in vfio_pci_config_setup()
3270 trace_vfio_pci_emulated_class_code(vbasedev->name, vdev->class_code); in vfio_pci_config_setup()
3272 vdev->class_code = pci_get_long(pdev->config + PCI_CLASS_REVISION) >> 8; in vfio_pci_config_setup()
3275 /* QEMU can change multi-function devices to single function, or reverse */ in vfio_pci_config_setup()
3276 vdev->emulated_config_bits[PCI_HEADER_TYPE] = in vfio_pci_config_setup()
3280 if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { in vfio_pci_config_setup()
3281 vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION; in vfio_pci_config_setup()
3283 vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION; in vfio_pci_config_setup()
3291 memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24); in vfio_pci_config_setup()
3292 memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4); in vfio_pci_config_setup()
3304 if (vdev->vga && vfio_is_vga(vdev)) { in vfio_pci_config_setup()
3313 PCIDevice *pdev = &vdev->pdev; in vfio_pci_interrupt_setup()
3316 if (pdev->cap_present & QEMU_PCI_CAP_MSIX) { in vfio_pci_interrupt_setup()
3317 memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff, in vfio_pci_interrupt_setup()
3321 if (pdev->cap_present & QEMU_PCI_CAP_MSI) { in vfio_pci_interrupt_setup()
3322 memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff, in vfio_pci_interrupt_setup()
3323 vdev->msi_cap_size); in vfio_pci_interrupt_setup()
3326 if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) { in vfio_pci_interrupt_setup()
3327 vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, in vfio_pci_interrupt_setup()
3329 pci_device_set_intx_routing_notifier(&vdev->pdev, in vfio_pci_interrupt_setup()
3331 vdev->irqchip_change_notifier.notify = vfio_irqchip_change; in vfio_pci_interrupt_setup()
3332 kvm_irqchip_add_change_notifier(&vdev->irqchip_change_notifier); in vfio_pci_interrupt_setup()
3340 timer_free(vdev->intx.mmap_timer); in vfio_pci_interrupt_setup()
3341 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL); in vfio_pci_interrupt_setup()
3342 kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier); in vfio_pci_interrupt_setup()
3353 VFIODevice *vbasedev = &vdev->vbasedev; in vfio_pci_realize()
3358 if (vbasedev->fd < 0 && !vbasedev->sysfsdev) { in vfio_pci_realize()
3359 if (!(~vdev->host.domain || ~vdev->host.bus || in vfio_pci_realize()
3360 ~vdev->host.slot || ~vdev->host.function)) { in vfio_pci_realize()
3362 error_append_hint(errp, "Use -device vfio-pci,host=DDDD:BB:DD.F " in vfio_pci_realize()
3364 "or -device vfio-pci,fd=DEVICE_FD " in vfio_pci_realize()
3366 "or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n"); in vfio_pci_realize()
3369 vbasedev->sysfsdev = in vfio_pci_realize()
3371 vdev->host.domain, vdev->host.bus, in vfio_pci_realize()
3372 vdev->host.slot, vdev->host.function); in vfio_pci_realize()
3383 * the x-balloon-allowed option unless this is minimally an mdev device. in vfio_pci_realize()
3385 vbasedev->mdev = vfio_device_is_mdev(vbasedev); in vfio_pci_realize()
3387 trace_vfio_mdev(vbasedev->name, vbasedev->mdev); in vfio_pci_realize()
3389 if (vbasedev->ram_block_discard_allowed && !vbasedev->mdev) { in vfio_pci_realize()
3390 error_setg(errp, "x-balloon-allowed only potentially compatible " in vfio_pci_realize()
3395 if (!qemu_uuid_is_null(&vdev->vf_token)) { in vfio_pci_realize()
3396 qemu_uuid_unparse(&vdev->vf_token, uuid); in vfio_pci_realize()
3397 name = g_strdup_printf("%s vf_token=%s", vbasedev->name, uuid); in vfio_pci_realize()
3399 name = g_strdup(vbasedev->name); in vfio_pci_realize()
3415 if (!vbasedev->mdev && in vfio_pci_realize()
3416 !pci_device_set_iommu_device(pdev, vbasedev->hiod, errp)) { in vfio_pci_realize()
3429 if (vdev->vga) { in vfio_pci_realize()
3441 if (vdev->display != ON_OFF_AUTO_OFF) { in vfio_pci_realize()
3446 if (vdev->enable_ramfb && vdev->dpy == NULL) { in vfio_pci_realize()
3450 if (vdev->display_xres || vdev->display_yres) { in vfio_pci_realize()
3451 if (vdev->dpy == NULL) { in vfio_pci_realize()
3455 if (vdev->dpy->edid_regs == NULL) { in vfio_pci_realize()
3461 if (vdev->ramfb_migrate == ON_OFF_AUTO_ON && !vdev->enable_ramfb) { in vfio_pci_realize()
3462 warn_report("x-ramfb-migrate=on but ramfb=off. " in vfio_pci_realize()
3463 "Forcing x-ramfb-migrate to off."); in vfio_pci_realize()
3464 vdev->ramfb_migrate = ON_OFF_AUTO_OFF; in vfio_pci_realize()
3466 if (vbasedev->enable_migration == ON_OFF_AUTO_OFF) { in vfio_pci_realize()
3467 if (vdev->ramfb_migrate == ON_OFF_AUTO_AUTO) { in vfio_pci_realize()
3468 vdev->ramfb_migrate = ON_OFF_AUTO_OFF; in vfio_pci_realize()
3469 } else if (vdev->ramfb_migrate == ON_OFF_AUTO_ON) { in vfio_pci_realize()
3470 error_setg(errp, "x-ramfb-migrate requires enable-migration"); in vfio_pci_realize()
3475 if (!pdev->failover_pair_id) { in vfio_pci_realize()
3489 if (vdev->interrupt == VFIO_INT_INTx) { in vfio_pci_realize()
3492 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL); in vfio_pci_realize()
3493 if (vdev->irqchip_change_notifier.notify) { in vfio_pci_realize()
3494 kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier); in vfio_pci_realize()
3496 if (vdev->intx.mmap_timer) { in vfio_pci_realize()
3497 timer_free(vdev->intx.mmap_timer); in vfio_pci_realize()
3500 if (!vbasedev->mdev) { in vfio_pci_realize()
3507 error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->name); in vfio_pci_realize()
3520 VFIODevice *vbasedev = &vdev->vbasedev; in vfio_exitfn()
3524 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL); in vfio_exitfn()
3525 if (vdev->irqchip_change_notifier.notify) { in vfio_exitfn()
3526 kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier); in vfio_exitfn()
3529 if (vdev->intx.mmap_timer) { in vfio_exitfn()
3530 timer_free(vdev->intx.mmap_timer); in vfio_exitfn()
3536 if (!vbasedev->mdev) { in vfio_exitfn()
3550 trace_vfio_pci_reset(vdev->vbasedev.name); in vfio_pci_reset()
3554 if (vdev->display != ON_OFF_AUTO_OFF) { in vfio_pci_reset()
3558 if (vdev->resetfn && !vdev->resetfn(vdev)) { in vfio_pci_reset()
3562 if (vdev->vbasedev.reset_works && in vfio_pci_reset()
3563 (vdev->has_flr || !vdev->has_pm_reset) && in vfio_pci_reset()
3564 !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) { in vfio_pci_reset()
3565 trace_vfio_pci_reset_flr(vdev->vbasedev.name); in vfio_pci_reset()
3575 if (vdev->vbasedev.reset_works && vdev->has_pm_reset && in vfio_pci_reset()
3576 !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) { in vfio_pci_reset()
3577 trace_vfio_pci_reset_pm(vdev->vbasedev.name); in vfio_pci_reset()
3589 VFIODevice *vbasedev = &vdev->vbasedev; in vfio_instance_init()
3591 device_add_bootindex_property(obj, &vdev->bootindex, in vfio_instance_init()
3593 &pci_dev->qdev); in vfio_instance_init()
3594 vdev->host.domain = ~0U; in vfio_instance_init()
3595 vdev->host.bus = ~0U; in vfio_instance_init()
3596 vdev->host.slot = ~0U; in vfio_instance_init()
3597 vdev->host.function = ~0U; in vfio_instance_init()
3602 vdev->nv_gpudirect_clique = 0xFF; in vfio_instance_init()
3606 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; in vfio_instance_init()
3613 pci_dev->cap_present |= QEMU_PCI_SKIP_RESET_ON_CPR; in vfio_instance_init()
3621 dc->desc = "VFIO PCI base device"; in vfio_pci_base_dev_class_init()
3622 set_bit(DEVICE_CATEGORY_MISC, dc->categories); in vfio_pci_base_dev_class_init()
3623 pdc->exit = vfio_exitfn; in vfio_pci_base_dev_class_init()
3624 pdc->config_read = vfio_pci_read_config; in vfio_pci_base_dev_class_init()
3625 pdc->config_write = vfio_pci_write_config; in vfio_pci_base_dev_class_init()
3645 DEFINE_PROP_UUID_NODEFAULT("vf-token", VFIOPCIDevice, vf_token),
3647 DEFINE_PROP_ON_OFF_AUTO("x-pre-copy-dirty-page-tracking", VFIOPCIDevice,
3650 DEFINE_PROP_ON_OFF_AUTO("x-device-dirty-page-tracking", VFIOPCIDevice,
3657 DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice,
3659 DEFINE_PROP_BIT("x-vga", VFIOPCIDevice, features,
3661 DEFINE_PROP_BIT("x-req", VFIOPCIDevice, features,
3663 DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice, features,
3665 DEFINE_PROP_BIT("x-igd-lpc", VFIOPCIDevice, features,
3667 DEFINE_PROP_ON_OFF_AUTO("x-igd-legacy-mode", VFIOPCIDevice,
3669 DEFINE_PROP_ON_OFF_AUTO("enable-migration", VFIOPCIDevice,
3671 DEFINE_PROP("x-migration-multifd-transfer", VFIOPCIDevice,
3675 DEFINE_PROP_ON_OFF_AUTO("x-migration-load-config-after-iter", VFIOPCIDevice,
3678 DEFINE_PROP_SIZE("x-migration-max-queued-buffers-size", VFIOPCIDevice,
3680 DEFINE_PROP_BOOL("migration-events", VFIOPCIDevice,
3682 DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false),
3683 DEFINE_PROP_BOOL("x-balloon-allowed", VFIOPCIDevice,
3685 DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice, no_kvm_intx, false),
3686 DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice, no_kvm_msi, false),
3687 DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice, no_kvm_msix, false),
3688 DEFINE_PROP_BOOL("x-no-geforce-quirks", VFIOPCIDevice,
3690 DEFINE_PROP_BOOL("x-no-kvm-ioeventfd", VFIOPCIDevice, no_kvm_ioeventfd,
3692 DEFINE_PROP_BOOL("x-no-vfio-ioeventfd", VFIOPCIDevice, no_vfio_ioeventfd,
3694 DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice, vendor_id, PCI_ANY_ID),
3695 DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice, device_id, PCI_ANY_ID),
3696 DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice,
3698 DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice,
3700 DEFINE_PROP_UINT32("x-pci-class-code", VFIOPCIDevice,
3702 DEFINE_PROP_UINT32("x-igd-gms", VFIOPCIDevice, igd_gms, 0),
3703 DEFINE_PROP_UNSIGNED_NODEFAULT("x-nv-gpudirect-clique", VFIOPCIDevice,
3706 DEFINE_PROP_OFF_AUTO_PCIBAR("x-msix-relocation", VFIOPCIDevice, msix_relo,
3712 DEFINE_PROP_BOOL("skip-vsc-check", VFIOPCIDevice, skip_vsc_check, true),
3719 vfio_device_set_fd(&vdev->vbasedev, str, errp); in vfio_pci_set_fd()
3733 dc->vmsd = &vfio_cpr_pci_vmstate; in vfio_pci_dev_class_init()
3734 dc->desc = "VFIO-based PCI device assignment"; in vfio_pci_dev_class_init()
3735 pdc->realize = vfio_pci_realize; in vfio_pci_dev_class_init()
3741 "x-intx-mmap-timeout-ms", in vfio_pci_dev_class_init()
3743 "(milliseconds) to re-enable device direct access " in vfio_pci_dev_class_init()
3746 "x-vga", in vfio_pci_dev_class_init()
3749 "x-req", in vfio_pci_dev_class_init()
3750 "Disable device request notification support (DEBUG)"); in vfio_pci_dev_class_init()
3752 "x-no-mmap", in vfio_pci_dev_class_init()
3753 "Disable MMAP for device. Allows to trace MMIO " in vfio_pci_dev_class_init()
3756 "x-no-kvm-intx", in vfio_pci_dev_class_init()
3757 "Disable direct VFIO->KVM INTx injection. Allows to " in vfio_pci_dev_class_init()
3760 "x-no-kvm-msi", in vfio_pci_dev_class_init()
3761 "Disable direct VFIO->KVM MSI injection. Allows to " in vfio_pci_dev_class_init()
3764 "x-no-kvm-msix", in vfio_pci_dev_class_init()
3765 "Disable direct VFIO->KVM MSIx injection. Allows to " in vfio_pci_dev_class_init()
3768 "x-pci-vendor-id", in vfio_pci_dev_class_init()
3771 "x-pci-device-id", in vfio_pci_dev_class_init()
3774 "x-pci-sub-vendor-id", in vfio_pci_dev_class_init()
3778 "x-pci-sub-device-id", in vfio_pci_dev_class_init()
3785 "x-igd-opregion", in vfio_pci_dev_class_init()
3788 "x-igd-gms", in vfio_pci_dev_class_init()
3791 "x-nv-gpudirect-clique", in vfio_pci_dev_class_init()
3793 "clique for device [0-15]"); in vfio_pci_dev_class_init()
3795 "x-no-geforce-quirks", in vfio_pci_dev_class_init()
3796 "Disable GeForce quirks (for NVIDIA Quadro/GRID/Tesla). " in vfio_pci_dev_class_init()
3802 "x-msix-relocation", in vfio_pci_dev_class_init()
3803 "Specify MSI-X MMIO relocation to the end of specified " in vfio_pci_dev_class_init()
3807 "x-no-kvm-ioeventfd", in vfio_pci_dev_class_init()
3808 "Disable registration of ioeventfds with KVM (DEBUG)"); in vfio_pci_dev_class_init()
3810 "x-no-vfio-ioeventfd", in vfio_pci_dev_class_init()
3811 "Disable linking of KVM ioeventfds to VFIO ioeventfds " in vfio_pci_dev_class_init()
3814 "x-balloon-allowed", in vfio_pci_dev_class_init()
3823 "x-pre-copy-dirty-page-tracking", in vfio_pci_dev_class_init()
3824 "Disable dirty pages tracking during iterative phase " in vfio_pci_dev_class_init()
3826 object_class_property_set_description(klass, /* 5.2, 8.0 non-experimetal */ in vfio_pci_dev_class_init()
3827 "enable-migration", in vfio_pci_dev_class_init()
3831 "vf-token", in vfio_pci_dev_class_init()
3840 "x-device-dirty-page-tracking", in vfio_pci_dev_class_init()
3841 "Disable device dirty page tracking and use " in vfio_pci_dev_class_init()
3842 "container-based dirty page tracking"); in vfio_pci_dev_class_init()
3844 "migration-events", in vfio_pci_dev_class_init()
3848 "skip-vsc-check", in vfio_pci_dev_class_init()
3853 "x-migration-multifd-transfer", in vfio_pci_dev_class_init()
3857 "x-migration-load-config-after-iter", in vfio_pci_dev_class_init()
3860 "non-iterables loading phase) when " in vfio_pci_dev_class_init()
3864 "x-migration-max-queued-buffers-size", in vfio_pci_dev_class_init()
3865 "Maximum size of in-flight VFIO " in vfio_pci_dev_class_init()
3882 DEFINE_PROP_BOOL("use-legacy-x86-rom", VFIOPCIDevice,
3884 DEFINE_PROP_ON_OFF_AUTO("x-ramfb-migrate", VFIOPCIDevice, ramfb_migrate,
3894 dc->hotpluggable = false; in vfio_pci_nohotplug_dev_class_init()
3898 "Enable ramfb to provide pre-boot graphics for devices " in vfio_pci_nohotplug_dev_class_init()
3901 "x-ramfb-migrate", in vfio_pci_nohotplug_dev_class_init()
3905 "use-legacy-x86-rom", in vfio_pci_nohotplug_dev_class_init()
3919 * Ordinary ON_OFF_AUTO property isn't runtime-mutable, but source VM can in register_vfio_pci_dev_type()