Lines Matching full:vdev

53 static void vfio_disable_interrupts(VFIOPCIDevice *vdev);
54 static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled);
55 static void vfio_msi_disable_common(VFIOPCIDevice *vdev);
74 VFIOPCIDevice *vdev = opaque; in vfio_intx_mmap_enable() local
76 if (vdev->intx.pending) { in vfio_intx_mmap_enable()
77 timer_mod(vdev->intx.mmap_timer, in vfio_intx_mmap_enable()
78 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout); in vfio_intx_mmap_enable()
82 vfio_mmap_set_enabled(vdev, true); in vfio_intx_mmap_enable()
87 VFIOPCIDevice *vdev = opaque; in vfio_intx_interrupt() local
89 if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) { in vfio_intx_interrupt()
93 trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin); in vfio_intx_interrupt()
95 vdev->intx.pending = true; in vfio_intx_interrupt()
96 pci_irq_assert(&vdev->pdev); in vfio_intx_interrupt()
97 vfio_mmap_set_enabled(vdev, false); in vfio_intx_interrupt()
98 if (vdev->intx.mmap_timeout) { in vfio_intx_interrupt()
99 timer_mod(vdev->intx.mmap_timer, in vfio_intx_interrupt()
100 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout); in vfio_intx_interrupt()
106 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); in vfio_intx_eoi() local
108 if (!vdev->intx.pending) { in vfio_intx_eoi()
114 vdev->intx.pending = false; in vfio_intx_eoi()
115 pci_irq_deassert(&vdev->pdev); in vfio_intx_eoi()
119 static bool vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp) in vfio_intx_enable_kvm() argument
122 int irq_fd = event_notifier_get_fd(&vdev->intx.interrupt); in vfio_intx_enable_kvm()
124 if (vdev->no_kvm_intx || !kvm_irqfds_enabled() || in vfio_intx_enable_kvm()
125 vdev->intx.route.mode != PCI_INTX_ENABLED || in vfio_intx_enable_kvm()
131 qemu_set_fd_handler(irq_fd, NULL, NULL, vdev); in vfio_intx_enable_kvm()
132 vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); in vfio_intx_enable_kvm()
133 vdev->intx.pending = false; in vfio_intx_enable_kvm()
134 pci_irq_deassert(&vdev->pdev); in vfio_intx_enable_kvm()
137 if (event_notifier_init(&vdev->intx.unmask, 0)) { in vfio_intx_enable_kvm()
143 &vdev->intx.interrupt, in vfio_intx_enable_kvm()
144 &vdev->intx.unmask, in vfio_intx_enable_kvm()
145 vdev->intx.route.irq)) { in vfio_intx_enable_kvm()
150 if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0, in vfio_intx_enable_kvm()
152 event_notifier_get_fd(&vdev->intx.unmask), in vfio_intx_enable_kvm()
158 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); in vfio_intx_enable_kvm()
160 vdev->intx.kvm_accel = true; in vfio_intx_enable_kvm()
162 trace_vfio_intx_enable_kvm(vdev->vbasedev.name); in vfio_intx_enable_kvm()
167 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt, in vfio_intx_enable_kvm()
168 vdev->intx.route.irq); in vfio_intx_enable_kvm()
170 event_notifier_cleanup(&vdev->intx.unmask); in vfio_intx_enable_kvm()
172 qemu_set_fd_handler(irq_fd, vfio_intx_interrupt, NULL, vdev); in vfio_intx_enable_kvm()
173 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); in vfio_intx_enable_kvm()
180 static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev) in vfio_intx_disable_kvm() argument
183 if (!vdev->intx.kvm_accel) { in vfio_intx_disable_kvm()
191 vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); in vfio_intx_disable_kvm()
192 vdev->intx.pending = false; in vfio_intx_disable_kvm()
193 pci_irq_deassert(&vdev->pdev); in vfio_intx_disable_kvm()
196 if (kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt, in vfio_intx_disable_kvm()
197 vdev->intx.route.irq)) { in vfio_intx_disable_kvm()
202 event_notifier_cleanup(&vdev->intx.unmask); in vfio_intx_disable_kvm()
205 qemu_set_fd_handler(event_notifier_get_fd(&vdev->intx.interrupt), in vfio_intx_disable_kvm()
206 vfio_intx_interrupt, NULL, vdev); in vfio_intx_disable_kvm()
208 vdev->intx.kvm_accel = false; in vfio_intx_disable_kvm()
211 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); in vfio_intx_disable_kvm()
213 trace_vfio_intx_disable_kvm(vdev->vbasedev.name); in vfio_intx_disable_kvm()
217 static void vfio_intx_update(VFIOPCIDevice *vdev, PCIINTxRoute *route) in vfio_intx_update() argument
221 trace_vfio_intx_update(vdev->vbasedev.name, in vfio_intx_update()
222 vdev->intx.route.irq, route->irq); in vfio_intx_update()
224 vfio_intx_disable_kvm(vdev); in vfio_intx_update()
226 vdev->intx.route = *route; in vfio_intx_update()
232 if (!vfio_intx_enable_kvm(vdev, &err)) { in vfio_intx_update()
233 warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_intx_update()
237 vfio_intx_eoi(&vdev->vbasedev); in vfio_intx_update()
242 VFIOPCIDevice *vdev = VFIO_PCI(pdev); in vfio_intx_routing_notifier() local
245 if (vdev->interrupt != VFIO_INT_INTx) { in vfio_intx_routing_notifier()
249 route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin); in vfio_intx_routing_notifier()
251 if (pci_intx_route_changed(&vdev->intx.route, &route)) { in vfio_intx_routing_notifier()
252 vfio_intx_update(vdev, &route); in vfio_intx_routing_notifier()
258 VFIOPCIDevice *vdev = container_of(notify, VFIOPCIDevice, in vfio_irqchip_change() local
261 vfio_intx_update(vdev, &vdev->intx.route); in vfio_irqchip_change()
264 static bool vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp) in vfio_intx_enable() argument
266 uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1); in vfio_intx_enable()
276 vfio_disable_interrupts(vdev); in vfio_intx_enable()
278 vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */ in vfio_intx_enable()
279 pci_config_set_interrupt_pin(vdev->pdev.config, pin); in vfio_intx_enable()
287 vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev, in vfio_intx_enable()
288 vdev->intx.pin); in vfio_intx_enable()
292 ret = event_notifier_init(&vdev->intx.interrupt, 0); in vfio_intx_enable()
297 fd = event_notifier_get_fd(&vdev->intx.interrupt); in vfio_intx_enable()
298 qemu_set_fd_handler(fd, vfio_intx_interrupt, NULL, vdev); in vfio_intx_enable()
300 if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0, in vfio_intx_enable()
302 qemu_set_fd_handler(fd, NULL, NULL, vdev); in vfio_intx_enable()
303 event_notifier_cleanup(&vdev->intx.interrupt); in vfio_intx_enable()
307 if (!vfio_intx_enable_kvm(vdev, &err)) { in vfio_intx_enable()
308 warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_intx_enable()
311 vdev->interrupt = VFIO_INT_INTx; in vfio_intx_enable()
313 trace_vfio_intx_enable(vdev->vbasedev.name); in vfio_intx_enable()
317 static void vfio_intx_disable(VFIOPCIDevice *vdev) in vfio_intx_disable() argument
321 timer_del(vdev->intx.mmap_timer); in vfio_intx_disable()
322 vfio_intx_disable_kvm(vdev); in vfio_intx_disable()
323 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); in vfio_intx_disable()
324 vdev->intx.pending = false; in vfio_intx_disable()
325 pci_irq_deassert(&vdev->pdev); in vfio_intx_disable()
326 vfio_mmap_set_enabled(vdev, true); in vfio_intx_disable()
328 fd = event_notifier_get_fd(&vdev->intx.interrupt); in vfio_intx_disable()
329 qemu_set_fd_handler(fd, NULL, NULL, vdev); in vfio_intx_disable()
330 event_notifier_cleanup(&vdev->intx.interrupt); in vfio_intx_disable()
332 vdev->interrupt = VFIO_INT_NONE; in vfio_intx_disable()
334 trace_vfio_intx_disable(vdev->vbasedev.name); in vfio_intx_disable()
343 VFIOPCIDevice *vdev = vector->vdev; in vfio_msi_interrupt() local
347 int nr = vector - vdev->msi_vectors; in vfio_msi_interrupt()
353 if (vdev->interrupt == VFIO_INT_MSIX) { in vfio_msi_interrupt()
358 if (msix_is_masked(&vdev->pdev, nr)) { in vfio_msi_interrupt()
359 set_bit(nr, vdev->msix->pending); in vfio_msi_interrupt()
360 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, true); in vfio_msi_interrupt()
361 trace_vfio_msix_pba_enable(vdev->vbasedev.name); in vfio_msi_interrupt()
363 } else if (vdev->interrupt == VFIO_INT_MSI) { in vfio_msi_interrupt()
370 msg = get_msg(&vdev->pdev, nr); in vfio_msi_interrupt()
371 trace_vfio_msi_interrupt(vdev->vbasedev.name, nr, msg.address, msg.data); in vfio_msi_interrupt()
372 notify(&vdev->pdev, nr); in vfio_msi_interrupt()
379 static int vfio_enable_msix_no_vec(VFIOPCIDevice *vdev) in vfio_enable_msix_no_vec() argument
397 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set); in vfio_enable_msix_no_vec()
402 static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix) in vfio_enable_vectors() argument
417 if (msix && !vdev->msix->noresize) { in vfio_enable_vectors()
418 ret = vfio_enable_msix_no_vec(vdev); in vfio_enable_vectors()
425 argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds)); in vfio_enable_vectors()
432 irq_set->count = vdev->nr_vectors; in vfio_enable_vectors()
435 for (i = 0; i < vdev->nr_vectors; i++) { in vfio_enable_vectors()
444 if (vdev->msi_vectors[i].use) { in vfio_enable_vectors()
445 if (vdev->msi_vectors[i].virq < 0 || in vfio_enable_vectors()
446 (msix && msix_is_masked(&vdev->pdev, i))) { in vfio_enable_vectors()
447 fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt); in vfio_enable_vectors()
449 fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt); in vfio_enable_vectors()
456 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set); in vfio_enable_vectors()
463 static void vfio_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector, in vfio_add_kvm_msi_virq() argument
466 if ((msix && vdev->no_kvm_msix) || (!msix && vdev->no_kvm_msi)) { in vfio_add_kvm_msi_virq()
471 vector_n, &vdev->pdev); in vfio_add_kvm_msi_virq()
517 VFIOPCIDevice *vdev = VFIO_PCI(pdev); in vfio_msix_vector_do_use() local
520 bool resizing = !!(vdev->nr_vectors < nr + 1); in vfio_msix_vector_do_use()
522 trace_vfio_msix_vector_do_use(vdev->vbasedev.name, nr); in vfio_msix_vector_do_use()
524 vector = &vdev->msi_vectors[nr]; in vfio_msix_vector_do_use()
527 vector->vdev = vdev; in vfio_msix_vector_do_use()
551 if (vdev->defer_kvm_irq_routing) { in vfio_msix_vector_do_use()
552 vfio_add_kvm_msi_virq(vdev, vector, nr, true); in vfio_msix_vector_do_use()
555 vfio_add_kvm_msi_virq(vdev, vector, nr, true); in vfio_msix_vector_do_use()
574 vdev->nr_vectors = nr + 1; in vfio_msix_vector_do_use()
577 if (!vdev->defer_kvm_irq_routing) { in vfio_msix_vector_do_use()
578 if (vdev->msix->noresize && resizing) { in vfio_msix_vector_do_use()
579 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX); in vfio_msix_vector_do_use()
580 ret = vfio_enable_vectors(vdev, true); in vfio_msix_vector_do_use()
594 if (!vfio_set_irq_signaling(&vdev->vbasedev, in vfio_msix_vector_do_use()
598 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_msix_vector_do_use()
604 clear_bit(nr, vdev->msix->pending); in vfio_msix_vector_do_use()
605 if (find_first_bit(vdev->msix->pending, in vfio_msix_vector_do_use()
606 vdev->nr_vectors) == vdev->nr_vectors) { in vfio_msix_vector_do_use()
607 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false); in vfio_msix_vector_do_use()
608 trace_vfio_msix_pba_disable(vdev->vbasedev.name); in vfio_msix_vector_do_use()
622 VFIOPCIDevice *vdev = VFIO_PCI(pdev); in vfio_msix_vector_release() local
623 VFIOMSIVector *vector = &vdev->msi_vectors[nr]; in vfio_msix_vector_release()
625 trace_vfio_msix_vector_release(vdev->vbasedev.name, nr); in vfio_msix_vector_release()
639 if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX, in vfio_msix_vector_release()
642 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_msix_vector_release()
647 static void vfio_prepare_kvm_msi_virq_batch(VFIOPCIDevice *vdev) in vfio_prepare_kvm_msi_virq_batch() argument
649 assert(!vdev->defer_kvm_irq_routing); in vfio_prepare_kvm_msi_virq_batch()
650 vdev->defer_kvm_irq_routing = true; in vfio_prepare_kvm_msi_virq_batch()
654 static void vfio_commit_kvm_msi_virq_batch(VFIOPCIDevice *vdev) in vfio_commit_kvm_msi_virq_batch() argument
658 assert(vdev->defer_kvm_irq_routing); in vfio_commit_kvm_msi_virq_batch()
659 vdev->defer_kvm_irq_routing = false; in vfio_commit_kvm_msi_virq_batch()
663 for (i = 0; i < vdev->nr_vectors; i++) { in vfio_commit_kvm_msi_virq_batch()
664 vfio_connect_kvm_msi_virq(&vdev->msi_vectors[i]); in vfio_commit_kvm_msi_virq_batch()
668 static void vfio_msix_enable(VFIOPCIDevice *vdev) in vfio_msix_enable() argument
672 vfio_disable_interrupts(vdev); in vfio_msix_enable()
674 vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->msix->entries); in vfio_msix_enable()
676 vdev->interrupt = VFIO_INT_MSIX; in vfio_msix_enable()
684 vfio_prepare_kvm_msi_virq_batch(vdev); in vfio_msix_enable()
686 if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use, in vfio_msix_enable()
691 vfio_commit_kvm_msi_virq_batch(vdev); in vfio_msix_enable()
693 if (vdev->nr_vectors) { in vfio_msix_enable()
694 ret = vfio_enable_vectors(vdev, true); in vfio_msix_enable()
711 ret = vfio_enable_msix_no_vec(vdev); in vfio_msix_enable()
717 trace_vfio_msix_enable(vdev->vbasedev.name); in vfio_msix_enable()
720 static void vfio_msi_enable(VFIOPCIDevice *vdev) in vfio_msi_enable() argument
724 vfio_disable_interrupts(vdev); in vfio_msi_enable()
726 vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev); in vfio_msi_enable()
733 vfio_prepare_kvm_msi_virq_batch(vdev); in vfio_msi_enable()
735 vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->nr_vectors); in vfio_msi_enable()
737 for (i = 0; i < vdev->nr_vectors; i++) { in vfio_msi_enable()
738 VFIOMSIVector *vector = &vdev->msi_vectors[i]; in vfio_msi_enable()
740 vector->vdev = vdev; in vfio_msi_enable()
755 vfio_add_kvm_msi_virq(vdev, vector, i, false); in vfio_msi_enable()
758 vfio_commit_kvm_msi_virq_batch(vdev); in vfio_msi_enable()
761 vdev->interrupt = VFIO_INT_MSI; in vfio_msi_enable()
763 ret = vfio_enable_vectors(vdev, false); in vfio_msi_enable()
769 "MSI vectors, retry with %d", vdev->nr_vectors, ret); in vfio_msi_enable()
772 vfio_msi_disable_common(vdev); in vfio_msi_enable()
775 vdev->nr_vectors = ret; in vfio_msi_enable()
789 trace_vfio_msi_enable(vdev->vbasedev.name, vdev->nr_vectors); in vfio_msi_enable()
792 static void vfio_msi_disable_common(VFIOPCIDevice *vdev) in vfio_msi_disable_common() argument
796 for (i = 0; i < vdev->nr_vectors; i++) { in vfio_msi_disable_common()
797 VFIOMSIVector *vector = &vdev->msi_vectors[i]; in vfio_msi_disable_common()
798 if (vdev->msi_vectors[i].use) { in vfio_msi_disable_common()
808 g_free(vdev->msi_vectors); in vfio_msi_disable_common()
809 vdev->msi_vectors = NULL; in vfio_msi_disable_common()
810 vdev->nr_vectors = 0; in vfio_msi_disable_common()
811 vdev->interrupt = VFIO_INT_NONE; in vfio_msi_disable_common()
814 static void vfio_msix_disable(VFIOPCIDevice *vdev) in vfio_msix_disable() argument
819 msix_unset_vector_notifiers(&vdev->pdev); in vfio_msix_disable()
825 for (i = 0; i < vdev->nr_vectors; i++) { in vfio_msix_disable()
826 if (vdev->msi_vectors[i].use) { in vfio_msix_disable()
827 vfio_msix_vector_release(&vdev->pdev, i); in vfio_msix_disable()
828 msix_vector_unuse(&vdev->pdev, i); in vfio_msix_disable()
836 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX); in vfio_msix_disable()
838 vfio_msi_disable_common(vdev); in vfio_msix_disable()
839 if (!vfio_intx_enable(vdev, &err)) { in vfio_msix_disable()
840 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_msix_disable()
843 memset(vdev->msix->pending, 0, in vfio_msix_disable()
844 BITS_TO_LONGS(vdev->msix->entries) * sizeof(unsigned long)); in vfio_msix_disable()
846 trace_vfio_msix_disable(vdev->vbasedev.name); in vfio_msix_disable()
849 static void vfio_msi_disable(VFIOPCIDevice *vdev) in vfio_msi_disable() argument
853 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX); in vfio_msi_disable()
854 vfio_msi_disable_common(vdev); in vfio_msi_disable()
855 vfio_intx_enable(vdev, &err); in vfio_msi_disable()
857 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_msi_disable()
860 trace_vfio_msi_disable(vdev->vbasedev.name); in vfio_msi_disable()
863 static void vfio_update_msi(VFIOPCIDevice *vdev) in vfio_update_msi() argument
867 for (i = 0; i < vdev->nr_vectors; i++) { in vfio_update_msi()
868 VFIOMSIVector *vector = &vdev->msi_vectors[i]; in vfio_update_msi()
875 msg = msi_get_message(&vdev->pdev, i); in vfio_update_msi()
876 vfio_update_kvm_msi_virq(vector, msg, &vdev->pdev); in vfio_update_msi()
880 static void vfio_pci_load_rom(VFIOPCIDevice *vdev) in vfio_pci_load_rom() argument
887 if (vfio_get_region_info(&vdev->vbasedev, in vfio_pci_load_rom()
893 trace_vfio_pci_load_rom(vdev->vbasedev.name, (unsigned long)reg_info->size, in vfio_pci_load_rom()
897 vdev->rom_size = size = reg_info->size; in vfio_pci_load_rom()
898 vdev->rom_offset = reg_info->offset; in vfio_pci_load_rom()
900 if (!vdev->rom_size) { in vfio_pci_load_rom()
901 vdev->rom_read_failed = true; in vfio_pci_load_rom()
903 "%s", vdev->vbasedev.name); in vfio_pci_load_rom()
910 vdev->rom = g_malloc(size); in vfio_pci_load_rom()
911 memset(vdev->rom, 0xff, size); in vfio_pci_load_rom()
914 bytes = pread(vdev->vbasedev.fd, vdev->rom + off, in vfio_pci_load_rom()
915 size, vdev->rom_offset + off); in vfio_pci_load_rom()
936 if (pci_get_word(vdev->rom) == 0xaa55 && in vfio_pci_load_rom()
937 pci_get_word(vdev->rom + 0x18) + 8 < vdev->rom_size && in vfio_pci_load_rom()
938 !memcmp(vdev->rom + pci_get_word(vdev->rom + 0x18), "PCIR", 4)) { in vfio_pci_load_rom()
941 vid = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 4); in vfio_pci_load_rom()
942 did = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6); in vfio_pci_load_rom()
944 if (vid == vdev->vendor_id && did != vdev->device_id) { in vfio_pci_load_rom()
946 uint8_t csum, *data = vdev->rom; in vfio_pci_load_rom()
948 pci_set_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6, in vfio_pci_load_rom()
949 vdev->device_id); in vfio_pci_load_rom()
952 for (csum = 0, i = 0; i < vdev->rom_size; i++) { in vfio_pci_load_rom()
963 VFIOPCIDevice *vdev = opaque; in vfio_rom_read() local
973 if (unlikely(!vdev->rom && !vdev->rom_read_failed)) { in vfio_rom_read()
974 vfio_pci_load_rom(vdev); in vfio_rom_read()
977 memcpy(&val, vdev->rom + addr, in vfio_rom_read()
978 (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0); in vfio_rom_read()
995 trace_vfio_rom_read(vdev->vbasedev.name, addr, size, data); in vfio_rom_read()
1011 static void vfio_pci_size_rom(VFIOPCIDevice *vdev) in vfio_pci_size_rom() argument
1014 off_t offset = vdev->config_offset + PCI_ROM_ADDRESS; in vfio_pci_size_rom()
1015 DeviceState *dev = DEVICE(vdev); in vfio_pci_size_rom()
1017 int fd = vdev->vbasedev.fd; in vfio_pci_size_rom()
1019 if (vdev->pdev.romfile || !vdev->pdev.rom_bar) { in vfio_pci_size_rom()
1021 if (vfio_opt_rom_in_denylist(vdev) && vdev->pdev.romfile) { in vfio_pci_size_rom()
1024 vdev->vbasedev.name); in vfio_pci_size_rom()
1038 error_report("%s(%s) failed: %m", __func__, vdev->vbasedev.name); in vfio_pci_size_rom()
1048 if (vfio_opt_rom_in_denylist(vdev)) { in vfio_pci_size_rom()
1052 vdev->vbasedev.name); in vfio_pci_size_rom()
1058 vdev->vbasedev.name); in vfio_pci_size_rom()
1064 trace_vfio_pci_size_rom(vdev->vbasedev.name, size); in vfio_pci_size_rom()
1066 name = g_strdup_printf("vfio[%s].rom", vdev->vbasedev.name); in vfio_pci_size_rom()
1068 memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev), in vfio_pci_size_rom()
1069 &vfio_rom_ops, vdev, name, size); in vfio_pci_size_rom()
1072 pci_register_bar(&vdev->pdev, PCI_ROM_SLOT, in vfio_pci_size_rom()
1073 PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom); in vfio_pci_size_rom()
1075 vdev->rom_read_failed = false; in vfio_pci_size_rom()
1171 VFIOPCIDevice *vdev = VFIO_PCI(pdev); in vfio_sub_page_bar_update_mapping() local
1172 VFIORegion *region = &vdev->bars[bar].region; in vfio_sub_page_bar_update_mapping()
1186 base_mr = vdev->bars[bar].mr; in vfio_sub_page_bar_update_mapping()
1198 if (vdev->bars[bar].size < size) { in vfio_sub_page_bar_update_mapping()
1203 if (size != vdev->bars[bar].size && memory_region_is_mapped(base_mr)) { in vfio_sub_page_bar_update_mapping()
1217 VFIOPCIDevice *vdev = VFIO_PCI(pdev); in vfio_pci_read_config() local
1220 memcpy(&emu_bits, vdev->emulated_config_bits + addr, len); in vfio_pci_read_config()
1230 ret = pread(vdev->vbasedev.fd, &phys_val, len, in vfio_pci_read_config()
1231 vdev->config_offset + addr); in vfio_pci_read_config()
1234 __func__, vdev->vbasedev.name, addr, len); in vfio_pci_read_config()
1242 trace_vfio_pci_read_config(vdev->vbasedev.name, addr, len, val); in vfio_pci_read_config()
1250 VFIOPCIDevice *vdev = VFIO_PCI(pdev); in vfio_pci_write_config() local
1253 trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len); in vfio_pci_write_config()
1256 if (pwrite(vdev->vbasedev.fd, &val_le, len, vdev->config_offset + addr) in vfio_pci_write_config()
1259 __func__, vdev->vbasedev.name, addr, val, len); in vfio_pci_write_config()
1264 ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) { in vfio_pci_write_config()
1273 vfio_msi_enable(vdev); in vfio_pci_write_config()
1277 vfio_msi_disable(vdev); in vfio_pci_write_config()
1279 vfio_update_msi(vdev); in vfio_pci_write_config()
1291 vfio_msix_enable(vdev); in vfio_pci_write_config()
1293 vfio_msix_disable(vdev); in vfio_pci_write_config()
1308 vdev->bars[bar].region.size > 0 && in vfio_pci_write_config()
1309 vdev->bars[bar].region.size < qemu_real_host_page_size()) { in vfio_pci_write_config()
1322 static void vfio_disable_interrupts(VFIOPCIDevice *vdev) in vfio_disable_interrupts() argument
1329 if (vdev->interrupt == VFIO_INT_MSIX) { in vfio_disable_interrupts()
1330 vfio_msix_disable(vdev); in vfio_disable_interrupts()
1331 } else if (vdev->interrupt == VFIO_INT_MSI) { in vfio_disable_interrupts()
1332 vfio_msi_disable(vdev); in vfio_disable_interrupts()
1335 if (vdev->interrupt == VFIO_INT_INTx) { in vfio_disable_interrupts()
1336 vfio_intx_disable(vdev); in vfio_disable_interrupts()
1340 static bool vfio_msi_setup(VFIOPCIDevice *vdev, int pos, Error **errp) in vfio_msi_setup() argument
1347 if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl), in vfio_msi_setup()
1348 vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) { in vfio_msi_setup()
1358 trace_vfio_msi_setup(vdev->vbasedev.name, pos); in vfio_msi_setup()
1360 ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit, &err); in vfio_msi_setup()
1368 vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0); in vfio_msi_setup()
1373 static void vfio_pci_fixup_msix_region(VFIOPCIDevice *vdev) in vfio_pci_fixup_msix_region() argument
1376 VFIORegion *region = &vdev->bars[vdev->msix->table_bar].region; in vfio_pci_fixup_msix_region()
1382 if (vfio_has_region_cap(&vdev->vbasedev, region->nr, in vfio_pci_fixup_msix_region()
1397 start = vdev->msix->table_offset & qemu_real_host_page_mask(); in vfio_pci_fixup_msix_region()
1398 end = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset + in vfio_pci_fixup_msix_region()
1399 (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE)); in vfio_pci_fixup_msix_region()
1413 trace_vfio_msix_fixup(vdev->vbasedev.name, in vfio_pci_fixup_msix_region()
1414 vdev->msix->table_bar, 0, 0); in vfio_pci_fixup_msix_region()
1418 trace_vfio_msix_fixup(vdev->vbasedev.name, in vfio_pci_fixup_msix_region()
1419 vdev->msix->table_bar, region->mmaps[0].offset, in vfio_pci_fixup_msix_region()
1426 trace_vfio_msix_fixup(vdev->vbasedev.name, in vfio_pci_fixup_msix_region()
1427 vdev->msix->table_bar, region->mmaps[0].offset, in vfio_pci_fixup_msix_region()
1438 trace_vfio_msix_fixup(vdev->vbasedev.name, in vfio_pci_fixup_msix_region()
1439 vdev->msix->table_bar, region->mmaps[0].offset, in vfio_pci_fixup_msix_region()
1444 trace_vfio_msix_fixup(vdev->vbasedev.name, in vfio_pci_fixup_msix_region()
1445 vdev->msix->table_bar, region->mmaps[1].offset, in vfio_pci_fixup_msix_region()
1450 static bool vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp) in vfio_pci_relocate_msix() argument
1455 if (!vdev->msix || vdev->msix_relo == OFF_AUTO_PCIBAR_OFF) { in vfio_pci_relocate_msix()
1460 msix_sz = (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE) + in vfio_pci_relocate_msix()
1461 (QEMU_ALIGN_UP(vdev->msix->entries, 64) / 8); in vfio_pci_relocate_msix()
1467 if (vdev->msix_relo == OFF_AUTO_PCIBAR_AUTO) { in vfio_pci_relocate_msix()
1478 "device %04x:%04x", vdev->vendor_id, vdev->device_id); in vfio_pci_relocate_msix()
1482 target_bar = (int)(vdev->msix_relo - OFF_AUTO_PCIBAR_BAR0); in vfio_pci_relocate_msix()
1486 if (vdev->bars[target_bar].ioport) { in vfio_pci_relocate_msix()
1493 if (!vdev->bars[target_bar].size && in vfio_pci_relocate_msix()
1494 target_bar > 0 && vdev->bars[target_bar - 1].mem64) { in vfio_pci_relocate_msix()
1501 if (vdev->bars[target_bar].size > 1 * GiB && in vfio_pci_relocate_msix()
1502 !vdev->bars[target_bar].mem64) { in vfio_pci_relocate_msix()
1513 if (!vdev->bars[target_bar].size) { in vfio_pci_relocate_msix()
1515 !vdev->bars[target_bar + 1].size) { in vfio_pci_relocate_msix()
1516 vdev->bars[target_bar].mem64 = true; in vfio_pci_relocate_msix()
1517 vdev->bars[target_bar].type = PCI_BASE_ADDRESS_MEM_TYPE_64; in vfio_pci_relocate_msix()
1519 vdev->bars[target_bar].type |= PCI_BASE_ADDRESS_MEM_PREFETCH; in vfio_pci_relocate_msix()
1520 vdev->bars[target_bar].size = msix_sz; in vfio_pci_relocate_msix()
1521 vdev->msix->table_offset = 0; in vfio_pci_relocate_msix()
1523 vdev->bars[target_bar].size = MAX(vdev->bars[target_bar].size * 2, in vfio_pci_relocate_msix()
1529 vdev->msix->table_offset = vdev->bars[target_bar].size / 2; in vfio_pci_relocate_msix()
1532 vdev->msix->table_bar = target_bar; in vfio_pci_relocate_msix()
1533 vdev->msix->pba_bar = target_bar; in vfio_pci_relocate_msix()
1535 vdev->msix->pba_offset = vdev->msix->table_offset + in vfio_pci_relocate_msix()
1536 (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE); in vfio_pci_relocate_msix()
1538 trace_vfio_msix_relo(vdev->vbasedev.name, in vfio_pci_relocate_msix()
1539 vdev->msix->table_bar, vdev->msix->table_offset); in vfio_pci_relocate_msix()
1551 static bool vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp) in vfio_msix_early_setup() argument
1556 int ret, fd = vdev->vbasedev.fd; in vfio_msix_early_setup()
1561 pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX); in vfio_msix_early_setup()
1567 vdev->config_offset + pos + PCI_MSIX_FLAGS) != sizeof(ctrl)) { in vfio_msix_early_setup()
1573 vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) { in vfio_msix_early_setup()
1579 vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) { in vfio_msix_early_setup()
1595 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info); in vfio_msix_early_setup()
1609 if (msix->pba_offset >= vdev->bars[msix->pba_bar].region.size) { in vfio_msix_early_setup()
1616 if (vdev->vendor_id == PCI_VENDOR_ID_CHELSIO && in vfio_msix_early_setup()
1617 (vdev->device_id & 0xff00) == 0x5800) { in vfio_msix_early_setup()
1624 } else if (vfio_pci_is(vdev, PCI_VENDOR_ID_BAIDU, in vfio_msix_early_setup()
1627 } else if (vdev->msix_relo == OFF_AUTO_PCIBAR_OFF) { in vfio_msix_early_setup()
1635 trace_vfio_msix_early_setup(vdev->vbasedev.name, pos, msix->table_bar, in vfio_msix_early_setup()
1638 vdev->msix = msix; in vfio_msix_early_setup()
1640 vfio_pci_fixup_msix_region(vdev); in vfio_msix_early_setup()
1642 return vfio_pci_relocate_msix(vdev, errp); in vfio_msix_early_setup()
1645 static bool vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp) in vfio_msix_setup() argument
1650 vdev->msix->pending = g_new0(unsigned long, in vfio_msix_setup()
1651 BITS_TO_LONGS(vdev->msix->entries)); in vfio_msix_setup()
1652 ret = msix_init(&vdev->pdev, vdev->msix->entries, in vfio_msix_setup()
1653 vdev->bars[vdev->msix->table_bar].mr, in vfio_msix_setup()
1654 vdev->msix->table_bar, vdev->msix->table_offset, in vfio_msix_setup()
1655 vdev->bars[vdev->msix->pba_bar].mr, in vfio_msix_setup()
1656 vdev->msix->pba_bar, vdev->msix->pba_offset, pos, in vfio_msix_setup()
1684 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false); in vfio_msix_setup()
1696 memory_region_set_enabled(&vdev->pdev.msix_table_mmio, false); in vfio_msix_setup()
1702 static void vfio_teardown_msi(VFIOPCIDevice *vdev) in vfio_teardown_msi() argument
1704 msi_uninit(&vdev->pdev); in vfio_teardown_msi()
1706 if (vdev->msix) { in vfio_teardown_msi()
1707 msix_uninit(&vdev->pdev, in vfio_teardown_msi()
1708 vdev->bars[vdev->msix->table_bar].mr, in vfio_teardown_msi()
1709 vdev->bars[vdev->msix->pba_bar].mr); in vfio_teardown_msi()
1710 g_free(vdev->msix->pending); in vfio_teardown_msi()
1717 static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled) in vfio_mmap_set_enabled() argument
1722 vfio_region_mmaps_set_enabled(&vdev->bars[i].region, enabled); in vfio_mmap_set_enabled()
1726 static void vfio_bar_prepare(VFIOPCIDevice *vdev, int nr) in vfio_bar_prepare() argument
1728 VFIOBAR *bar = &vdev->bars[nr]; in vfio_bar_prepare()
1739 ret = pread(vdev->vbasedev.fd, &pci_bar, sizeof(pci_bar), in vfio_bar_prepare()
1740 vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr)); in vfio_bar_prepare()
1754 static void vfio_bars_prepare(VFIOPCIDevice *vdev) in vfio_bars_prepare() argument
1759 vfio_bar_prepare(vdev, i); in vfio_bars_prepare()
1763 static void vfio_bar_register(VFIOPCIDevice *vdev, int nr) in vfio_bar_register() argument
1765 VFIOBAR *bar = &vdev->bars[nr]; in vfio_bar_register()
1773 name = g_strdup_printf("%s base BAR %d", vdev->vbasedev.name, nr); in vfio_bar_register()
1774 memory_region_init_io(bar->mr, OBJECT(vdev), NULL, NULL, name, bar->size); in vfio_bar_register()
1782 vdev->vbasedev.name, nr); in vfio_bar_register()
1786 pci_register_bar(&vdev->pdev, nr, bar->type, bar->mr); in vfio_bar_register()
1789 static void vfio_bars_register(VFIOPCIDevice *vdev) in vfio_bars_register() argument
1794 vfio_bar_register(vdev, i); in vfio_bars_register()
1798 static void vfio_bars_exit(VFIOPCIDevice *vdev) in vfio_bars_exit() argument
1803 VFIOBAR *bar = &vdev->bars[i]; in vfio_bars_exit()
1805 vfio_bar_quirk_exit(vdev, i); in vfio_bars_exit()
1812 if (vdev->vga) { in vfio_bars_exit()
1813 pci_unregister_vga(&vdev->pdev); in vfio_bars_exit()
1814 vfio_vga_quirk_exit(vdev); in vfio_bars_exit()
1818 static void vfio_bars_finalize(VFIOPCIDevice *vdev) in vfio_bars_finalize() argument
1823 VFIOBAR *bar = &vdev->bars[i]; in vfio_bars_finalize()
1825 vfio_bar_quirk_finalize(vdev, i); in vfio_bars_finalize()
1835 if (vdev->vga) { in vfio_bars_finalize()
1836 vfio_vga_quirk_finalize(vdev); in vfio_bars_finalize()
1837 for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) { in vfio_bars_finalize()
1838 object_unparent(OBJECT(&vdev->vga->region[i].mem)); in vfio_bars_finalize()
1840 g_free(vdev->vga); in vfio_bars_finalize()
1882 static void vfio_add_emulated_word(VFIOPCIDevice *vdev, int pos, in vfio_add_emulated_word() argument
1885 vfio_set_word_bits(vdev->pdev.config + pos, val, mask); in vfio_add_emulated_word()
1886 vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask); in vfio_add_emulated_word()
1887 vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask); in vfio_add_emulated_word()
1895 static void vfio_add_emulated_long(VFIOPCIDevice *vdev, int pos, in vfio_add_emulated_long() argument
1898 vfio_set_long_bits(vdev->pdev.config + pos, val, mask); in vfio_add_emulated_long()
1899 vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask); in vfio_add_emulated_long()
1900 vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask); in vfio_add_emulated_long()
1903 static void vfio_pci_enable_rp_atomics(VFIOPCIDevice *vdev) in vfio_pci_enable_rp_atomics() argument
1907 PCIBus *bus = pci_get_bus(&vdev->pdev); in vfio_pci_enable_rp_atomics()
1923 vdev->pdev.devfn || in vfio_pci_enable_rp_atomics()
1924 vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { in vfio_pci_enable_rp_atomics()
1937 info = vfio_get_device_info(vdev->vbasedev.fd); in vfio_pci_enable_rp_atomics()
1963 vdev->clear_parent_atomics_on_exit = true; in vfio_pci_enable_rp_atomics()
1966 static void vfio_pci_disable_rp_atomics(VFIOPCIDevice *vdev) in vfio_pci_disable_rp_atomics() argument
1968 if (vdev->clear_parent_atomics_on_exit) { in vfio_pci_disable_rp_atomics()
1969 PCIDevice *parent = pci_get_bus(&vdev->pdev)->parent_dev; in vfio_pci_disable_rp_atomics()
1978 static bool vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size, in vfio_setup_pcie_cap() argument
1984 flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS); in vfio_setup_pcie_cap()
1996 if (!pci_bus_is_express(pci_get_bus(&vdev->pdev))) { in vfio_setup_pcie_cap()
1997 PCIBus *bus = pci_get_bus(&vdev->pdev); in vfio_setup_pcie_cap()
2029 } else if (pci_bus_is_root(pci_get_bus(&vdev->pdev))) { in vfio_setup_pcie_cap()
2035 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS, in vfio_setup_pcie_cap()
2041 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0); in vfio_setup_pcie_cap()
2042 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0); in vfio_setup_pcie_cap()
2043 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0); in vfio_setup_pcie_cap()
2053 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0); in vfio_setup_pcie_cap()
2054 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0); in vfio_setup_pcie_cap()
2055 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0); in vfio_setup_pcie_cap()
2073 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS, in vfio_setup_pcie_cap()
2076 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, in vfio_setup_pcie_cap()
2079 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0); in vfio_setup_pcie_cap()
2082 vfio_pci_enable_rp_atomics(vdev); in vfio_setup_pcie_cap()
2093 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS, in vfio_setup_pcie_cap()
2097 pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size, in vfio_setup_pcie_cap()
2103 vdev->pdev.exp.exp_cap = pos; in vfio_setup_pcie_cap()
2108 static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos) in vfio_check_pcie_flr() argument
2110 uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP); in vfio_check_pcie_flr()
2113 trace_vfio_check_pcie_flr(vdev->vbasedev.name); in vfio_check_pcie_flr()
2114 vdev->has_flr = true; in vfio_check_pcie_flr()
2118 static void vfio_check_pm_reset(VFIOPCIDevice *vdev, uint8_t pos) in vfio_check_pm_reset() argument
2120 uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL); in vfio_check_pm_reset()
2123 trace_vfio_check_pm_reset(vdev->vbasedev.name); in vfio_check_pm_reset()
2124 vdev->has_pm_reset = true; in vfio_check_pm_reset()
2128 static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos) in vfio_check_af_flr() argument
2130 uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP); in vfio_check_af_flr()
2133 trace_vfio_check_af_flr(vdev->vbasedev.name); in vfio_check_af_flr()
2134 vdev->has_flr = true; in vfio_check_af_flr()
2138 static bool vfio_add_vendor_specific_cap(VFIOPCIDevice *vdev, int pos, in vfio_add_vendor_specific_cap() argument
2141 PCIDevice *pdev = &vdev->pdev; in vfio_add_vendor_specific_cap()
2153 if (vdev->skip_vsc_check && size > 3) { in vfio_add_vendor_specific_cap()
2160 static bool vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp) in vfio_add_std_cap() argument
2163 PCIDevice *pdev = &vdev->pdev; in vfio_add_std_cap()
2186 if (!vfio_add_std_cap(vdev, next, errp)) { in vfio_add_std_cap()
2192 vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff; in vfio_add_std_cap()
2193 vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST; in vfio_add_std_cap()
2195 if (!vfio_add_virt_caps(vdev, errp)) { in vfio_add_std_cap()
2204 pci_set_byte(vdev->emulated_config_bits + pos + PCI_CAP_LIST_NEXT, 0xff); in vfio_add_std_cap()
2208 ret = vfio_msi_setup(vdev, pos, errp); in vfio_add_std_cap()
2211 vfio_check_pcie_flr(vdev, pos); in vfio_add_std_cap()
2212 ret = vfio_setup_pcie_cap(vdev, pos, size, errp); in vfio_add_std_cap()
2215 ret = vfio_msix_setup(vdev, pos, errp); in vfio_add_std_cap()
2218 vfio_check_pm_reset(vdev, pos); in vfio_add_std_cap()
2219 vdev->pm_cap = pos; in vfio_add_std_cap()
2223 vfio_check_af_flr(vdev, pos); in vfio_add_std_cap()
2227 ret = vfio_add_vendor_specific_cap(vdev, pos, size, errp); in vfio_add_std_cap()
2243 static int vfio_setup_rebar_ecap(VFIOPCIDevice *vdev, uint16_t pos) in vfio_setup_rebar_ecap() argument
2248 ctrl = pci_get_long(vdev->pdev.config + pos + PCI_REBAR_CTRL); in vfio_setup_rebar_ecap()
2255 ctrl = pci_get_long(vdev->pdev.config + pos + PCI_REBAR_CTRL + (i * 8)); in vfio_setup_rebar_ecap()
2284 vfio_add_emulated_long(vdev, pos + PCI_REBAR_CAP + (i * 8), cap, ~0); in vfio_setup_rebar_ecap()
2285 vfio_add_emulated_long(vdev, pos + PCI_REBAR_CTRL + (i * 8), ctrl, ~0); in vfio_setup_rebar_ecap()
2291 static void vfio_add_ext_cap(VFIOPCIDevice *vdev) in vfio_add_ext_cap() argument
2293 PCIDevice *pdev = &vdev->pdev; in vfio_add_ext_cap()
2311 config = g_memdup(pdev->config, vdev->config_size); in vfio_add_ext_cap()
2340 pci_set_long(vdev->emulated_config_bits + PCI_CONFIG_SPACE_SIZE, ~0); in vfio_add_ext_cap()
2357 pci_long_test_and_set_mask(vdev->emulated_config_bits + next, in vfio_add_ext_cap()
2364 trace_vfio_add_ext_cap_dropped(vdev->vbasedev.name, cap_id, next); in vfio_add_ext_cap()
2367 if (!vfio_setup_rebar_ecap(vdev, next)) { in vfio_add_ext_cap()
2386 static bool vfio_add_capabilities(VFIOPCIDevice *vdev, Error **errp) in vfio_add_capabilities() argument
2388 PCIDevice *pdev = &vdev->pdev; in vfio_add_capabilities()
2395 if (!vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST], errp)) { in vfio_add_capabilities()
2399 vfio_add_ext_cap(vdev); in vfio_add_capabilities()
2403 void vfio_pci_pre_reset(VFIOPCIDevice *vdev) in vfio_pci_pre_reset() argument
2405 PCIDevice *pdev = &vdev->pdev; in vfio_pci_pre_reset()
2408 vfio_disable_interrupts(vdev); in vfio_pci_pre_reset()
2411 if (vdev->pm_cap) { in vfio_pci_pre_reset()
2415 pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2); in vfio_pci_pre_reset()
2419 vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2); in vfio_pci_pre_reset()
2421 pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2); in vfio_pci_pre_reset()
2440 void vfio_pci_post_reset(VFIOPCIDevice *vdev) in vfio_pci_post_reset() argument
2445 if (!vfio_intx_enable(vdev, &err)) { in vfio_pci_post_reset()
2446 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_pci_post_reset()
2450 off_t addr = vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr); in vfio_pci_post_reset()
2454 if (pwrite(vdev->vbasedev.fd, &val, len, addr) != len) { in vfio_pci_post_reset()
2456 vdev->vbasedev.name, nr); in vfio_pci_post_reset()
2460 vfio_quirk_reset(vdev); in vfio_pci_post_reset()
2473 int vfio_pci_get_pci_hot_reset_info(VFIOPCIDevice *vdev, in vfio_pci_get_pci_hot_reset_info() argument
2484 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info); in vfio_pci_get_pci_hot_reset_info()
2488 if (!vdev->has_pm_reset) { in vfio_pci_get_pci_hot_reset_info()
2490 "no available reset mechanism.", vdev->vbasedev.name); in vfio_pci_get_pci_hot_reset_info()
2499 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info); in vfio_pci_get_pci_hot_reset_info()
2511 static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single) in vfio_pci_hot_reset() argument
2513 VFIODevice *vbasedev = &vdev->vbasedev; in vfio_pci_hot_reset()
2534 static int vfio_pci_hot_reset_one(VFIOPCIDevice *vdev) in vfio_pci_hot_reset_one() argument
2536 return vfio_pci_hot_reset(vdev, true); in vfio_pci_hot_reset_one()
2541 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); in vfio_pci_hot_reset_multi() local
2542 return vfio_pci_hot_reset(vdev, false); in vfio_pci_hot_reset_multi()
2547 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); in vfio_pci_compute_needs_reset() local
2548 if (!vbasedev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) { in vfio_pci_compute_needs_reset()
2555 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); in vfio_pci_get_object() local
2557 return OBJECT(vdev); in vfio_pci_get_object()
2569 VFIOPCIDevice *vdev = opaque; in vfio_display_migration_needed() local
2578 return vdev->ramfb_migrate == ON_OFF_AUTO_ON || in vfio_display_migration_needed()
2579 (vdev->ramfb_migrate == ON_OFF_AUTO_AUTO && vdev->enable_ramfb); in vfio_display_migration_needed()
2611 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); in vfio_pci_save_config() local
2613 return vmstate_save_state_with_err(f, &vmstate_vfio_pci_config, vdev, NULL, in vfio_pci_save_config()
2619 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); in vfio_pci_load_config() local
2620 PCIDevice *pdev = &vdev->pdev; in vfio_pci_load_config()
2628 ret = vmstate_load_state(f, &vmstate_vfio_pci_config, vdev, 1); in vfio_pci_load_config()
2642 vdev->bars[bar].region.size > 0 && in vfio_pci_load_config()
2643 vdev->bars[bar].region.size < qemu_real_host_page_size()) { in vfio_pci_load_config()
2649 vfio_msi_enable(vdev); in vfio_pci_load_config()
2651 vfio_msix_enable(vdev); in vfio_pci_load_config()
2666 bool vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp) in vfio_populate_vga() argument
2668 VFIODevice *vbasedev = &vdev->vbasedev; in vfio_populate_vga()
2689 vdev->vga = g_new0(VFIOVGA, 1); in vfio_populate_vga()
2691 vdev->vga->fd_offset = reg_info->offset; in vfio_populate_vga()
2692 vdev->vga->fd = vdev->vbasedev.fd; in vfio_populate_vga()
2694 vdev->vga->region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE; in vfio_populate_vga()
2695 vdev->vga->region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM; in vfio_populate_vga()
2696 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_MEM].quirks); in vfio_populate_vga()
2698 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_MEM].mem, in vfio_populate_vga()
2699 OBJECT(vdev), &vfio_vga_ops, in vfio_populate_vga()
2700 &vdev->vga->region[QEMU_PCI_VGA_MEM], in vfio_populate_vga()
2704 vdev->vga->region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE; in vfio_populate_vga()
2705 vdev->vga->region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO; in vfio_populate_vga()
2706 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].quirks); in vfio_populate_vga()
2708 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem, in vfio_populate_vga()
2709 OBJECT(vdev), &vfio_vga_ops, in vfio_populate_vga()
2710 &vdev->vga->region[QEMU_PCI_VGA_IO_LO], in vfio_populate_vga()
2714 vdev->vga->region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE; in vfio_populate_vga()
2715 vdev->vga->region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI; in vfio_populate_vga()
2716 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks); in vfio_populate_vga()
2718 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem, in vfio_populate_vga()
2719 OBJECT(vdev), &vfio_vga_ops, in vfio_populate_vga()
2720 &vdev->vga->region[QEMU_PCI_VGA_IO_HI], in vfio_populate_vga()
2724 pci_register_vga(&vdev->pdev, &vdev->vga->region[QEMU_PCI_VGA_MEM].mem, in vfio_populate_vga()
2725 &vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem, in vfio_populate_vga()
2726 &vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem); in vfio_populate_vga()
2731 static bool vfio_populate_device(VFIOPCIDevice *vdev, Error **errp) in vfio_populate_device() argument
2733 VFIODevice *vbasedev = &vdev->vbasedev; in vfio_populate_device()
2758 ret = vfio_region_setup(OBJECT(vdev), vbasedev, in vfio_populate_device()
2759 &vdev->bars[i].region, i, name); in vfio_populate_device()
2767 QLIST_INIT(&vdev->bars[i].quirks); in vfio_populate_device()
2777 trace_vfio_populate_device_config(vdev->vbasedev.name, in vfio_populate_device()
2782 vdev->config_size = reg_info->size; in vfio_populate_device()
2783 if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) { in vfio_populate_device()
2784 vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS; in vfio_populate_device()
2786 vdev->config_offset = reg_info->offset; in vfio_populate_device()
2788 if (vdev->features & VFIO_FEATURE_ENABLE_VGA) { in vfio_populate_device()
2789 if (!vfio_populate_vga(vdev, errp)) { in vfio_populate_device()
2798 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info); in vfio_populate_device()
2803 vdev->pci_aer = true; in vfio_populate_device()
2813 static void vfio_pci_put_device(VFIOPCIDevice *vdev) in vfio_pci_put_device() argument
2815 vfio_detach_device(&vdev->vbasedev); in vfio_pci_put_device()
2817 g_free(vdev->vbasedev.name); in vfio_pci_put_device()
2818 g_free(vdev->msix); in vfio_pci_put_device()
2823 VFIOPCIDevice *vdev = opaque; in vfio_err_notifier_handler() local
2825 if (!event_notifier_test_and_clear(&vdev->err_notifier)) { in vfio_err_notifier_handler()
2838 …etected. Please collect any data possible and then kill the guest", __func__, vdev->vbasedev.name); in vfio_err_notifier_handler()
2849 static void vfio_register_err_notifier(VFIOPCIDevice *vdev) in vfio_register_err_notifier() argument
2854 if (!vdev->pci_aer) { in vfio_register_err_notifier()
2858 if (event_notifier_init(&vdev->err_notifier, 0)) { in vfio_register_err_notifier()
2860 vdev->pci_aer = false; in vfio_register_err_notifier()
2864 fd = event_notifier_get_fd(&vdev->err_notifier); in vfio_register_err_notifier()
2865 qemu_set_fd_handler(fd, vfio_err_notifier_handler, NULL, vdev); in vfio_register_err_notifier()
2867 if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0, in vfio_register_err_notifier()
2869 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_register_err_notifier()
2870 qemu_set_fd_handler(fd, NULL, NULL, vdev); in vfio_register_err_notifier()
2871 event_notifier_cleanup(&vdev->err_notifier); in vfio_register_err_notifier()
2872 vdev->pci_aer = false; in vfio_register_err_notifier()
2876 static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev) in vfio_unregister_err_notifier() argument
2880 if (!vdev->pci_aer) { in vfio_unregister_err_notifier()
2884 if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0, in vfio_unregister_err_notifier()
2886 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_unregister_err_notifier()
2888 qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier), in vfio_unregister_err_notifier()
2889 NULL, NULL, vdev); in vfio_unregister_err_notifier()
2890 event_notifier_cleanup(&vdev->err_notifier); in vfio_unregister_err_notifier()
2895 VFIOPCIDevice *vdev = opaque; in vfio_req_notifier_handler() local
2898 if (!event_notifier_test_and_clear(&vdev->req_notifier)) { in vfio_req_notifier_handler()
2902 qdev_unplug(DEVICE(vdev), &err); in vfio_req_notifier_handler()
2904 warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_req_notifier_handler()
2908 static void vfio_register_req_notifier(VFIOPCIDevice *vdev) in vfio_register_req_notifier() argument
2915 if (!(vdev->features & VFIO_FEATURE_ENABLE_REQ)) { in vfio_register_req_notifier()
2919 if (ioctl(vdev->vbasedev.fd, in vfio_register_req_notifier()
2924 if (event_notifier_init(&vdev->req_notifier, 0)) { in vfio_register_req_notifier()
2929 fd = event_notifier_get_fd(&vdev->req_notifier); in vfio_register_req_notifier()
2930 qemu_set_fd_handler(fd, vfio_req_notifier_handler, NULL, vdev); in vfio_register_req_notifier()
2932 if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0, in vfio_register_req_notifier()
2934 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_register_req_notifier()
2935 qemu_set_fd_handler(fd, NULL, NULL, vdev); in vfio_register_req_notifier()
2936 event_notifier_cleanup(&vdev->req_notifier); in vfio_register_req_notifier()
2938 vdev->req_enabled = true; in vfio_register_req_notifier()
2942 static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev) in vfio_unregister_req_notifier() argument
2946 if (!vdev->req_enabled) { in vfio_unregister_req_notifier()
2950 if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0, in vfio_unregister_req_notifier()
2952 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); in vfio_unregister_req_notifier()
2954 qemu_set_fd_handler(event_notifier_get_fd(&vdev->req_notifier), in vfio_unregister_req_notifier()
2955 NULL, NULL, vdev); in vfio_unregister_req_notifier()
2956 event_notifier_cleanup(&vdev->req_notifier); in vfio_unregister_req_notifier()
2958 vdev->req_enabled = false; in vfio_unregister_req_notifier()
2964 VFIOPCIDevice *vdev = VFIO_PCI(pdev); in vfio_realize() local
2965 VFIODevice *vbasedev = &vdev->vbasedev; in vfio_realize()
2971 if (!(~vdev->host.domain || ~vdev->host.bus || in vfio_realize()
2972 ~vdev->host.slot || ~vdev->host.function)) { in vfio_realize()
2983 vdev->host.domain, vdev->host.bus, in vfio_realize()
2984 vdev->host.slot, vdev->host.function); in vfio_realize()
3007 if (!qemu_uuid_is_null(&vdev->vf_token)) { in vfio_realize()
3008 qemu_uuid_unparse(&vdev->vf_token, uuid); in vfio_realize()
3019 if (!vfio_populate_device(vdev, errp)) { in vfio_realize()
3024 ret = pread(vbasedev->fd, vdev->pdev.config, in vfio_realize()
3025 MIN(pci_config_size(&vdev->pdev), vdev->config_size), in vfio_realize()
3026 vdev->config_offset); in vfio_realize()
3027 if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) { in vfio_realize()
3034 vdev->emulated_config_bits = g_malloc0(vdev->config_size); in vfio_realize()
3037 memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4); in vfio_realize()
3039 memset(vdev->emulated_config_bits + PCI_BASE_ADDRESS_0, 0xff, 6 * 4); in vfio_realize()
3046 if (vdev->vendor_id != PCI_ANY_ID) { in vfio_realize()
3047 if (vdev->vendor_id >= 0xffff) { in vfio_realize()
3051 vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0); in vfio_realize()
3052 trace_vfio_pci_emulated_vendor_id(vbasedev->name, vdev->vendor_id); in vfio_realize()
3054 vdev->vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID); in vfio_realize()
3057 if (vdev->device_id != PCI_ANY_ID) { in vfio_realize()
3058 if (vdev->device_id > 0xffff) { in vfio_realize()
3062 vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0); in vfio_realize()
3063 trace_vfio_pci_emulated_device_id(vbasedev->name, vdev->device_id); in vfio_realize()
3065 vdev->device_id = pci_get_word(pdev->config + PCI_DEVICE_ID); in vfio_realize()
3068 if (vdev->sub_vendor_id != PCI_ANY_ID) { in vfio_realize()
3069 if (vdev->sub_vendor_id > 0xffff) { in vfio_realize()
3073 vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_VENDOR_ID, in vfio_realize()
3074 vdev->sub_vendor_id, ~0); in vfio_realize()
3076 vdev->sub_vendor_id); in vfio_realize()
3079 if (vdev->sub_device_id != PCI_ANY_ID) { in vfio_realize()
3080 if (vdev->sub_device_id > 0xffff) { in vfio_realize()
3084 vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0); in vfio_realize()
3086 vdev->sub_device_id); in vfio_realize()
3090 vdev->emulated_config_bits[PCI_HEADER_TYPE] = in vfio_realize()
3094 if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { in vfio_realize()
3095 vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION; in vfio_realize()
3097 vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION; in vfio_realize()
3105 memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24); in vfio_realize()
3106 memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4); in vfio_realize()
3108 vfio_pci_size_rom(vdev); in vfio_realize()
3110 vfio_bars_prepare(vdev); in vfio_realize()
3112 if (!vfio_msix_early_setup(vdev, errp)) { in vfio_realize()
3116 vfio_bars_register(vdev); in vfio_realize()
3124 if (!vfio_add_capabilities(vdev, errp)) { in vfio_realize()
3128 if (vdev->vga) { in vfio_realize()
3129 vfio_vga_quirk_setup(vdev); in vfio_realize()
3133 vfio_bar_quirk_setup(vdev, i); in vfio_realize()
3136 if (!vdev->igd_opregion && in vfio_realize()
3137 vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) { in vfio_realize()
3140 if (vdev->pdev.qdev.hotplugged) { in vfio_realize()
3156 if (!vfio_pci_igd_opregion_init(vdev, opregion, errp)) { in vfio_realize()
3163 memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff, in vfio_realize()
3168 memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff, in vfio_realize()
3169 vdev->msi_cap_size); in vfio_realize()
3172 if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) { in vfio_realize()
3173 vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, in vfio_realize()
3174 vfio_intx_mmap_enable, vdev); in vfio_realize()
3175 pci_device_set_intx_routing_notifier(&vdev->pdev, in vfio_realize()
3177 vdev->irqchip_change_notifier.notify = vfio_irqchip_change; in vfio_realize()
3178 kvm_irqchip_add_change_notifier(&vdev->irqchip_change_notifier); in vfio_realize()
3179 if (!vfio_intx_enable(vdev, errp)) { in vfio_realize()
3184 if (vdev->display != ON_OFF_AUTO_OFF) { in vfio_realize()
3185 if (!vfio_display_probe(vdev, errp)) { in vfio_realize()
3189 if (vdev->enable_ramfb && vdev->dpy == NULL) { in vfio_realize()
3193 if (vdev->display_xres || vdev->display_yres) { in vfio_realize()
3194 if (vdev->dpy == NULL) { in vfio_realize()
3198 if (vdev->dpy->edid_regs == NULL) { in vfio_realize()
3204 if (vdev->ramfb_migrate == ON_OFF_AUTO_ON && !vdev->enable_ramfb) { in vfio_realize()
3207 vdev->ramfb_migrate = ON_OFF_AUTO_OFF; in vfio_realize()
3210 if (vdev->ramfb_migrate == ON_OFF_AUTO_AUTO) { in vfio_realize()
3211 vdev->ramfb_migrate = ON_OFF_AUTO_OFF; in vfio_realize()
3212 } else if (vdev->ramfb_migrate == ON_OFF_AUTO_ON) { in vfio_realize()
3224 vfio_register_err_notifier(vdev); in vfio_realize()
3225 vfio_register_req_notifier(vdev); in vfio_realize()
3226 vfio_setup_resetfn_quirk(vdev); in vfio_realize()
3231 if (vdev->interrupt == VFIO_INT_INTx) { in vfio_realize()
3232 vfio_intx_disable(vdev); in vfio_realize()
3234 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL); in vfio_realize()
3235 if (vdev->irqchip_change_notifier.notify) { in vfio_realize()
3236 kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier); in vfio_realize()
3238 if (vdev->intx.mmap_timer) { in vfio_realize()
3239 timer_free(vdev->intx.mmap_timer); in vfio_realize()
3246 vfio_teardown_msi(vdev); in vfio_realize()
3247 vfio_bars_exit(vdev); in vfio_realize()
3254 VFIOPCIDevice *vdev = VFIO_PCI(obj); in vfio_instance_finalize() local
3256 vfio_display_finalize(vdev); in vfio_instance_finalize()
3257 vfio_bars_finalize(vdev); in vfio_instance_finalize()
3258 g_free(vdev->emulated_config_bits); in vfio_instance_finalize()
3259 g_free(vdev->rom); in vfio_instance_finalize()
3265 * g_free(vdev->igd_opregion); in vfio_instance_finalize()
3267 vfio_pci_put_device(vdev); in vfio_instance_finalize()
3272 VFIOPCIDevice *vdev = VFIO_PCI(pdev); in vfio_exitfn() local
3273 VFIODevice *vbasedev = &vdev->vbasedev; in vfio_exitfn()
3275 vfio_unregister_req_notifier(vdev); in vfio_exitfn()
3276 vfio_unregister_err_notifier(vdev); in vfio_exitfn()
3277 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL); in vfio_exitfn()
3278 if (vdev->irqchip_change_notifier.notify) { in vfio_exitfn()
3279 kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier); in vfio_exitfn()
3281 vfio_disable_interrupts(vdev); in vfio_exitfn()
3282 if (vdev->intx.mmap_timer) { in vfio_exitfn()
3283 timer_free(vdev->intx.mmap_timer); in vfio_exitfn()
3285 vfio_teardown_msi(vdev); in vfio_exitfn()
3286 vfio_pci_disable_rp_atomics(vdev); in vfio_exitfn()
3287 vfio_bars_exit(vdev); in vfio_exitfn()
3296 VFIOPCIDevice *vdev = VFIO_PCI(dev); in vfio_pci_reset() local
3298 trace_vfio_pci_reset(vdev->vbasedev.name); in vfio_pci_reset()
3300 vfio_pci_pre_reset(vdev); in vfio_pci_reset()
3302 if (vdev->display != ON_OFF_AUTO_OFF) { in vfio_pci_reset()
3303 vfio_display_reset(vdev); in vfio_pci_reset()
3306 if (vdev->resetfn && !vdev->resetfn(vdev)) { in vfio_pci_reset()
3310 if (vdev->vbasedev.reset_works && in vfio_pci_reset()
3311 (vdev->has_flr || !vdev->has_pm_reset) && in vfio_pci_reset()
3312 !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) { in vfio_pci_reset()
3313 trace_vfio_pci_reset_flr(vdev->vbasedev.name); in vfio_pci_reset()
3318 if (!vfio_pci_hot_reset_one(vdev)) { in vfio_pci_reset()
3323 if (vdev->vbasedev.reset_works && vdev->has_pm_reset && in vfio_pci_reset()
3324 !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) { in vfio_pci_reset()
3325 trace_vfio_pci_reset_pm(vdev->vbasedev.name); in vfio_pci_reset()
3330 vfio_pci_post_reset(vdev); in vfio_pci_reset()
3336 VFIOPCIDevice *vdev = VFIO_PCI(obj); in vfio_instance_init() local
3337 VFIODevice *vbasedev = &vdev->vbasedev; in vfio_instance_init()
3339 device_add_bootindex_property(obj, &vdev->bootindex, in vfio_instance_init()
3342 vdev->host.domain = ~0U; in vfio_instance_init()
3343 vdev->host.bus = ~0U; in vfio_instance_init()
3344 vdev->host.slot = ~0U; in vfio_instance_init()
3345 vdev->host.function = ~0U; in vfio_instance_init()
3348 DEVICE(vdev), false); in vfio_instance_init()
3350 vdev->nv_gpudirect_clique = 0xFF; in vfio_instance_init()