Lines Matching +full:close +full:- +full:range

10  * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
25 #include "hw/vfio/vfio-common.h"
26 #include "exec/address-spaces.h"
29 #include "qemu/error-report.h"
30 #include "qemu/range.h"
41 switch (container->iommu_type) { in vfio_ram_block_discard_disable()
55 * required by RamDiscardManager. We would have to special-case sections in vfio_ram_block_discard_disable()
66 const VFIOContainerBase *bcontainer = &container->bcontainer; in vfio_dma_unmap_bitmap()
79 unmap->argsz = sizeof(*unmap) + sizeof(*bitmap); in vfio_dma_unmap_bitmap()
80 unmap->iova = iova; in vfio_dma_unmap_bitmap()
81 unmap->size = size; in vfio_dma_unmap_bitmap()
82 unmap->flags |= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP; in vfio_dma_unmap_bitmap()
83 bitmap = (struct vfio_bitmap *)&unmap->data; in vfio_dma_unmap_bitmap()
90 bitmap->pgsize = qemu_real_host_page_size(); in vfio_dma_unmap_bitmap()
91 bitmap->size = vbmap.size; in vfio_dma_unmap_bitmap()
92 bitmap->data = (__u64 *)vbmap.bitmap; in vfio_dma_unmap_bitmap()
94 if (vbmap.size > bcontainer->max_dirty_bitmap_size) { in vfio_dma_unmap_bitmap()
96 ret = -E2BIG; in vfio_dma_unmap_bitmap()
100 ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap); in vfio_dma_unmap_bitmap()
103 iotlb->translated_addr, vbmap.pages); in vfio_dma_unmap_bitmap()
116 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
136 bcontainer->dirty_pages_supported) { in vfio_legacy_dma_unmap()
143 while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) { in vfio_legacy_dma_unmap()
145 * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c in vfio_legacy_dma_unmap()
146 * v4.15) where an overflow in its wrap-around check prevents us from in vfio_legacy_dma_unmap()
148 * condition and re-try the unmap excluding the last page. The in vfio_legacy_dma_unmap()
157 container->iommu_type == VFIO_TYPE1v2_IOMMU) { in vfio_legacy_dma_unmap()
159 unmap.size -= 1ULL << ctz64(bcontainer->pgsizes); in vfio_legacy_dma_unmap()
163 return -errno; in vfio_legacy_dma_unmap()
168 iotlb->translated_addr, &local_err); in vfio_legacy_dma_unmap()
200 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || in vfio_legacy_dma_map()
203 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) { in vfio_legacy_dma_map()
208 return -errno; in vfio_legacy_dma_map()
228 ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, &dirty); in vfio_legacy_set_dirty_page_tracking()
230 ret = -errno; in vfio_legacy_set_dirty_page_tracking()
244 struct vfio_iommu_type1_dirty_bitmap_get *range; in vfio_legacy_query_dirty_bitmap() local
247 dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range)); in vfio_legacy_query_dirty_bitmap()
249 dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range); in vfio_legacy_query_dirty_bitmap()
250 dbitmap->flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP; in vfio_legacy_query_dirty_bitmap()
251 range = (struct vfio_iommu_type1_dirty_bitmap_get *)&dbitmap->data; in vfio_legacy_query_dirty_bitmap()
252 range->iova = iova; in vfio_legacy_query_dirty_bitmap()
253 range->size = size; in vfio_legacy_query_dirty_bitmap()
260 range->bitmap.pgsize = qemu_real_host_page_size(); in vfio_legacy_query_dirty_bitmap()
261 range->bitmap.size = vbmap->size; in vfio_legacy_query_dirty_bitmap()
262 range->bitmap.data = (__u64 *)vbmap->bitmap; in vfio_legacy_query_dirty_bitmap()
264 ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap); in vfio_legacy_query_dirty_bitmap()
266 ret = -errno; in vfio_legacy_query_dirty_bitmap()
269 " size: 0x%"PRIx64, (uint64_t)range->iova, in vfio_legacy_query_dirty_bitmap()
270 (uint64_t)range->size); in vfio_legacy_query_dirty_bitmap()
281 if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) { in vfio_get_iommu_type1_info_cap()
285 return vfio_get_cap((void *)info, info->cap_offset, id); in vfio_get_iommu_type1_info_cap()
303 *avail = cap->avail; in vfio_get_info_dma_avail()
323 for (int i = 0; i < cap->nr_iovas; i++) { in vfio_get_info_iova_range()
324 Range *range = g_new(Range, 1); in vfio_get_info_iova_range() local
326 range_set_bounds(range, cap->iova_ranges[i].start, in vfio_get_info_iova_range()
327 cap->iova_ranges[i].end); in vfio_get_info_iova_range()
328 bcontainer->iova_ranges = in vfio_get_info_iova_range()
329 range_list_insert(bcontainer->iova_ranges, range); in vfio_get_info_iova_range()
339 if (vfio_kvm_device_add_fd(group->fd, &err)) { in vfio_kvm_device_add_group()
340 error_reportf_err(err, "group ID %d: ", group->groupid); in vfio_kvm_device_add_group()
348 if (vfio_kvm_device_del_fd(group->fd, &err)) { in vfio_kvm_device_del_group()
349 error_reportf_err(err, "group ID %d: ", group->groupid); in vfio_kvm_device_del_group()
354 * vfio_get_iommu_type - selects the richest iommu_type (v2 first)
369 return -EINVAL; in vfio_get_iommu_type()
373 * vfio_get_iommu_ops - get a VFIOIOMMUClass associated with a type
429 if (!vfio_set_iommu(fd, group->fd, &iommu_type, errp)) { in vfio_create_container()
436 container->fd = fd; in vfio_create_container()
437 container->iommu_type = iommu_type; in vfio_create_container()
449 (*info)->argsz = argsz; in vfio_get_iommu_info()
451 if (ioctl(container->fd, VFIO_IOMMU_GET_INFO, *info)) { in vfio_get_iommu_info()
454 return -errno; in vfio_get_iommu_info()
457 if (((*info)->argsz > argsz)) { in vfio_get_iommu_info()
458 argsz = (*info)->argsz; in vfio_get_iommu_info()
472 if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) { in vfio_get_iommu_info_cap()
476 for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) { in vfio_get_iommu_info_cap()
477 if (hdr->id == id) { in vfio_get_iommu_info_cap()
490 VFIOContainerBase *bcontainer = &container->bcontainer; in vfio_get_iommu_info_migration()
504 if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) { in vfio_get_iommu_info_migration()
505 bcontainer->dirty_pages_supported = true; in vfio_get_iommu_info_migration()
506 bcontainer->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size; in vfio_get_iommu_info_migration()
507 bcontainer->dirty_pgsizes = cap_mig->pgsize_bitmap; in vfio_get_iommu_info_migration()
520 error_setg_errno(errp, -ret, "Failed to get VFIO IOMMU info"); in vfio_legacy_setup()
524 if (info->flags & VFIO_IOMMU_INFO_PGSIZES) { in vfio_legacy_setup()
525 bcontainer->pgsizes = info->iova_pgsizes; in vfio_legacy_setup()
527 bcontainer->pgsizes = qemu_real_host_page_size(); in vfio_legacy_setup()
530 if (!vfio_get_info_dma_avail(info, &bcontainer->dma_max_mappings)) { in vfio_legacy_setup()
531 bcontainer->dma_max_mappings = 65535; in vfio_legacy_setup()
558 * us with options to allow devices within a group to opt-in and allow in vfio_connect_container()
568 * re-established. Likewise, when the overall MemoryListener for a new in vfio_connect_container()
570 * will occur, re-establishing any previously zapped pages as well. in vfio_connect_container()
572 * Especially virtio-balloon is currently only prevented from discarding in vfio_connect_container()
582 QLIST_FOREACH(bcontainer, &space->containers, next) { in vfio_connect_container()
584 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { in vfio_connect_container()
587 error_setg_errno(errp, -ret, in vfio_connect_container()
589 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, in vfio_connect_container()
590 &container->fd)) { in vfio_connect_container()
592 " container", group->groupid); in vfio_connect_container()
596 group->container = container; in vfio_connect_container()
597 QLIST_INSERT_HEAD(&container->group_list, group, container_next); in vfio_connect_container()
619 bcontainer = &container->bcontainer; in vfio_connect_container()
627 error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken"); in vfio_connect_container()
632 assert(vioc->setup); in vfio_connect_container()
634 if (!vioc->setup(bcontainer, errp)) { in vfio_connect_container()
642 group->container = container; in vfio_connect_container()
643 QLIST_INSERT_HEAD(&container->group_list, group, container_next); in vfio_connect_container()
645 bcontainer->listener = vfio_memory_listener; in vfio_connect_container()
646 memory_listener_register(&bcontainer->listener, bcontainer->space->as); in vfio_connect_container()
648 if (bcontainer->error) { in vfio_connect_container()
649 error_propagate_prepend(errp, bcontainer->error, in vfio_connect_container()
654 bcontainer->initialized = true; in vfio_connect_container()
660 memory_listener_unregister(&bcontainer->listener); in vfio_connect_container()
661 if (vioc->release) { in vfio_connect_container()
662 vioc->release(bcontainer); in vfio_connect_container()
675 close(fd); in vfio_connect_container()
685 VFIOContainer *container = group->container; in vfio_disconnect_container()
686 VFIOContainerBase *bcontainer = &container->bcontainer; in vfio_disconnect_container()
690 group->container = NULL; in vfio_disconnect_container()
697 if (QLIST_EMPTY(&container->group_list)) { in vfio_disconnect_container()
698 memory_listener_unregister(&bcontainer->listener); in vfio_disconnect_container()
699 if (vioc->release) { in vfio_disconnect_container()
700 vioc->release(bcontainer); in vfio_disconnect_container()
704 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { in vfio_disconnect_container()
706 group->groupid); in vfio_disconnect_container()
709 if (QLIST_EMPTY(&container->group_list)) { in vfio_disconnect_container()
710 VFIOAddressSpace *space = bcontainer->space; in vfio_disconnect_container()
712 trace_vfio_disconnect_container(container->fd); in vfio_disconnect_container()
714 close(container->fd); in vfio_disconnect_container()
729 if (group->groupid == groupid) { in vfio_get_group()
731 if (group->container->bcontainer.space->as == as) { in vfio_get_group()
735 group->groupid); in vfio_get_group()
744 group->fd = qemu_open(path, O_RDWR, errp); in vfio_get_group()
745 if (group->fd < 0) { in vfio_get_group()
749 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) { in vfio_get_group()
762 group->groupid = groupid; in vfio_get_group()
763 QLIST_INIT(&group->device_list); in vfio_get_group()
776 close(group->fd); in vfio_get_group()
786 if (!group || !QLIST_EMPTY(&group->device_list)) { in vfio_put_group()
790 if (!group->ram_block_discard_allowed) { in vfio_put_group()
791 vfio_ram_block_discard_disable(group->container, false); in vfio_put_group()
796 trace_vfio_put_group(group->fd); in vfio_put_group()
797 close(group->fd); in vfio_put_group()
807 fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name); in vfio_get_device()
810 group->groupid); in vfio_get_device()
812 "Verify all devices in group %d are bound to vfio-<bus> " in vfio_get_device()
813 "or pci-stub and not already in use\n", group->groupid); in vfio_get_device()
820 close(fd); in vfio_get_device()
830 if (vbasedev->ram_block_discard_allowed != in vfio_get_device()
831 group->ram_block_discard_allowed) { in vfio_get_device()
832 if (!QLIST_EMPTY(&group->device_list)) { in vfio_get_device()
835 close(fd); in vfio_get_device()
839 if (!group->ram_block_discard_allowed) { in vfio_get_device()
840 group->ram_block_discard_allowed = true; in vfio_get_device()
841 vfio_ram_block_discard_disable(group->container, false); in vfio_get_device()
845 vbasedev->fd = fd; in vfio_get_device()
846 vbasedev->group = group; in vfio_get_device()
847 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next); in vfio_get_device()
849 vbasedev->num_irqs = info->num_irqs; in vfio_get_device()
850 vbasedev->num_regions = info->num_regions; in vfio_get_device()
851 vbasedev->flags = info->flags; in vfio_get_device()
853 trace_vfio_get_device(name, info->flags, info->num_regions, info->num_irqs); in vfio_get_device()
855 vbasedev->reset_works = !!(info->flags & VFIO_DEVICE_FLAGS_RESET); in vfio_get_device()
862 if (!vbasedev->group) { in vfio_put_base_device()
866 vbasedev->group = NULL; in vfio_put_base_device()
867 trace_vfio_put_base_device(vbasedev->fd); in vfio_put_base_device()
868 close(vbasedev->fd); in vfio_put_base_device()
878 tmp = g_strdup_printf("%s/iommu_group", vbasedev->sysfsdev); in vfio_device_groupid()
883 ret = len < 0 ? -errno : -ENAMETOOLONG; in vfio_device_groupid()
884 error_setg_errno(errp, -ret, "no iommu_group found"); in vfio_device_groupid()
893 return -errno; in vfio_device_groupid()
900 * @name and @vbasedev->name are likely to be different depending
915 trace_vfio_attach_device(vbasedev->name, groupid); in vfio_legacy_attach_device()
926 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { in vfio_legacy_attach_device()
927 if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) { in vfio_legacy_attach_device()
938 bcontainer = &group->container->bcontainer; in vfio_legacy_attach_device()
939 vbasedev->bcontainer = bcontainer; in vfio_legacy_attach_device()
940 QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next); in vfio_legacy_attach_device()
948 VFIOGroup *group = vbasedev->group; in vfio_legacy_detach_device()
952 vbasedev->bcontainer = NULL; in vfio_legacy_detach_device()
953 trace_vfio_detach_device(vbasedev->name, group->groupid); in vfio_legacy_detach_device()
969 trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi"); in vfio_legacy_pci_hot_reset()
974 vdev->vbasedev.needs_reset = false; in vfio_legacy_pci_hot_reset()
981 devices = &info->devices[0]; in vfio_legacy_pci_hot_reset()
983 trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name); in vfio_legacy_pci_hot_reset()
986 for (i = 0; i < info->count; i++) { in vfio_legacy_pci_hot_reset()
999 if (vfio_pci_host_match(&host, vdev->vbasedev.name)) { in vfio_legacy_pci_hot_reset()
1004 if (group->groupid == devices[i].group_id) { in vfio_legacy_pci_hot_reset()
1010 if (!vdev->has_pm_reset) { in vfio_legacy_pci_hot_reset()
1013 vdev->vbasedev.name, devices[i].group_id); in vfio_legacy_pci_hot_reset()
1015 ret = -EPERM; in vfio_legacy_pci_hot_reset()
1020 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { in vfio_legacy_pci_hot_reset()
1021 if (!vbasedev_iter->dev->realized || in vfio_legacy_pci_hot_reset()
1022 vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { in vfio_legacy_pci_hot_reset()
1026 if (vfio_pci_host_match(&host, tmp->vbasedev.name)) { in vfio_legacy_pci_hot_reset()
1028 ret = -EINVAL; in vfio_legacy_pci_hot_reset()
1032 tmp->vbasedev.needs_reset = false; in vfio_legacy_pci_hot_reset()
1040 ret = -EINVAL; in vfio_legacy_pci_hot_reset()
1047 for (i = 0; i < info->count; i++) { in vfio_legacy_pci_hot_reset()
1048 if (group->groupid == devices[i].group_id) { in vfio_legacy_pci_hot_reset()
1056 reset->argsz = sizeof(*reset) + (count * sizeof(*fds)); in vfio_legacy_pci_hot_reset()
1057 fds = &reset->group_fds[0]; in vfio_legacy_pci_hot_reset()
1061 for (i = 0; i < info->count; i++) { in vfio_legacy_pci_hot_reset()
1062 if (group->groupid == devices[i].group_id) { in vfio_legacy_pci_hot_reset()
1063 fds[reset->count++] = group->fd; in vfio_legacy_pci_hot_reset()
1070 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset); in vfio_legacy_pci_hot_reset()
1073 ret = -errno; in vfio_legacy_pci_hot_reset()
1076 trace_vfio_pci_hot_reset_result(vdev->vbasedev.name, in vfio_legacy_pci_hot_reset()
1080 /* Re-enable INTx on affected devices */ in vfio_legacy_pci_hot_reset()
1081 for (i = 0; i < info->count; i++) { in vfio_legacy_pci_hot_reset()
1091 if (vfio_pci_host_match(&host, vdev->vbasedev.name)) { in vfio_legacy_pci_hot_reset()
1096 if (group->groupid == devices[i].group_id) { in vfio_legacy_pci_hot_reset()
1105 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { in vfio_legacy_pci_hot_reset()
1106 if (!vbasedev_iter->dev->realized || in vfio_legacy_pci_hot_reset()
1107 vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { in vfio_legacy_pci_hot_reset()
1111 if (vfio_pci_host_match(&host, tmp->vbasedev.name)) { in vfio_legacy_pci_hot_reset()
1130 vioc->hiod_typename = TYPE_HOST_IOMMU_DEVICE_LEGACY_VFIO; in vfio_iommu_legacy_class_init()
1132 vioc->setup = vfio_legacy_setup; in vfio_iommu_legacy_class_init()
1133 vioc->dma_map = vfio_legacy_dma_map; in vfio_iommu_legacy_class_init()
1134 vioc->dma_unmap = vfio_legacy_dma_unmap; in vfio_iommu_legacy_class_init()
1135 vioc->attach_device = vfio_legacy_attach_device; in vfio_iommu_legacy_class_init()
1136 vioc->detach_device = vfio_legacy_detach_device; in vfio_iommu_legacy_class_init()
1137 vioc->set_dirty_page_tracking = vfio_legacy_set_dirty_page_tracking; in vfio_iommu_legacy_class_init()
1138 vioc->query_dirty_bitmap = vfio_legacy_query_dirty_bitmap; in vfio_iommu_legacy_class_init()
1139 vioc->pci_hot_reset = vfio_legacy_pci_hot_reset; in vfio_iommu_legacy_class_init()
1147 hiod->name = g_strdup(vdev->name); in hiod_legacy_vfio_realize()
1148 hiod->agent = opaque; in hiod_legacy_vfio_realize()
1158 return vfio_device_get_aw_bits(hiod->agent); in hiod_legacy_vfio_get_cap()
1160 error_setg(errp, "%s: unsupported capability %x", hiod->name, cap); in hiod_legacy_vfio_get_cap()
1161 return -EINVAL; in hiod_legacy_vfio_get_cap()
1168 VFIODevice *vdev = hiod->agent; in hiod_legacy_vfio_get_iova_ranges()
1171 return vfio_container_get_iova_ranges(vdev->bcontainer); in hiod_legacy_vfio_get_iova_ranges()
1177 VFIODevice *vdev = hiod->agent; in hiod_legacy_vfio_get_page_size_mask()
1180 return vfio_container_get_page_size_mask(vdev->bcontainer); in hiod_legacy_vfio_get_page_size_mask()
1187 QLIST_INIT(&container->group_list); in vfio_iommu_legacy_instance_init()
1194 hioc->realize = hiod_legacy_vfio_realize; in hiod_legacy_vfio_class_init()
1195 hioc->get_cap = hiod_legacy_vfio_get_cap; in hiod_legacy_vfio_class_init()
1196 hioc->get_iova_ranges = hiod_legacy_vfio_get_iova_ranges; in hiod_legacy_vfio_class_init()
1197 hioc->get_page_size_mask = hiod_legacy_vfio_get_page_size_mask; in hiod_legacy_vfio_class_init()