1 /* 2 * VFIO BASE CONTAINER 3 * 4 * Copyright (C) 2023 Intel Corporation. 5 * Copyright Red Hat, Inc. 2023 6 * 7 * Authors: Yi Liu <yi.l.liu@intel.com> 8 * Eric Auger <eric.auger@redhat.com> 9 * 10 * SPDX-License-Identifier: GPL-2.0-or-later 11 */ 12 13 #include "qemu/osdep.h" 14 #include "qapi/error.h" 15 #include "qemu/error-report.h" 16 #include "hw/vfio/vfio-container-base.h" 17 #include "hw/vfio/vfio-common.h" /* vfio_reset_handler */ 18 #include "system/reset.h" 19 20 static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces = 21 QLIST_HEAD_INITIALIZER(vfio_address_spaces); 22 23 VFIOAddressSpace *vfio_address_space_get(AddressSpace *as) 24 { 25 VFIOAddressSpace *space; 26 27 QLIST_FOREACH(space, &vfio_address_spaces, list) { 28 if (space->as == as) { 29 return space; 30 } 31 } 32 33 /* No suitable VFIOAddressSpace, create a new one */ 34 space = g_malloc0(sizeof(*space)); 35 space->as = as; 36 QLIST_INIT(&space->containers); 37 38 if (QLIST_EMPTY(&vfio_address_spaces)) { 39 qemu_register_reset(vfio_reset_handler, NULL); 40 } 41 42 QLIST_INSERT_HEAD(&vfio_address_spaces, space, list); 43 44 return space; 45 } 46 47 void vfio_address_space_put(VFIOAddressSpace *space) 48 { 49 if (!QLIST_EMPTY(&space->containers)) { 50 return; 51 } 52 53 QLIST_REMOVE(space, list); 54 g_free(space); 55 56 if (QLIST_EMPTY(&vfio_address_spaces)) { 57 qemu_unregister_reset(vfio_reset_handler, NULL); 58 } 59 } 60 61 void vfio_address_space_insert(VFIOAddressSpace *space, 62 VFIOContainerBase *bcontainer) 63 { 64 QLIST_INSERT_HEAD(&space->containers, bcontainer, next); 65 bcontainer->space = space; 66 } 67 68 int vfio_container_dma_map(VFIOContainerBase *bcontainer, 69 hwaddr iova, ram_addr_t size, 70 void *vaddr, bool readonly) 71 { 72 VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); 73 74 g_assert(vioc->dma_map); 75 return vioc->dma_map(bcontainer, iova, size, vaddr, readonly); 76 } 77 78 int vfio_container_dma_unmap(VFIOContainerBase *bcontainer, 79 hwaddr iova, ram_addr_t size, 80 IOMMUTLBEntry *iotlb) 81 { 82 VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); 83 84 g_assert(vioc->dma_unmap); 85 return vioc->dma_unmap(bcontainer, iova, size, iotlb); 86 } 87 88 bool vfio_container_add_section_window(VFIOContainerBase *bcontainer, 89 MemoryRegionSection *section, 90 Error **errp) 91 { 92 VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); 93 94 if (!vioc->add_window) { 95 return true; 96 } 97 98 return vioc->add_window(bcontainer, section, errp); 99 } 100 101 void vfio_container_del_section_window(VFIOContainerBase *bcontainer, 102 MemoryRegionSection *section) 103 { 104 VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); 105 106 if (!vioc->del_window) { 107 return; 108 } 109 110 return vioc->del_window(bcontainer, section); 111 } 112 113 int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer, 114 bool start, Error **errp) 115 { 116 VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); 117 int ret; 118 119 if (!bcontainer->dirty_pages_supported) { 120 return 0; 121 } 122 123 g_assert(vioc->set_dirty_page_tracking); 124 if (bcontainer->dirty_pages_started == start) { 125 return 0; 126 } 127 128 ret = vioc->set_dirty_page_tracking(bcontainer, start, errp); 129 if (!ret) { 130 bcontainer->dirty_pages_started = start; 131 } 132 133 return ret; 134 } 135 136 int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer, 137 VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp) 138 { 139 VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); 140 141 g_assert(vioc->query_dirty_bitmap); 142 return vioc->query_dirty_bitmap(bcontainer, vbmap, iova, size, 143 errp); 144 } 145 146 static gpointer copy_iova_range(gconstpointer src, gpointer data) 147 { 148 Range *source = (Range *)src; 149 Range *dest = g_new(Range, 1); 150 151 range_set_bounds(dest, range_lob(source), range_upb(source)); 152 return dest; 153 } 154 155 GList *vfio_container_get_iova_ranges(const VFIOContainerBase *bcontainer) 156 { 157 assert(bcontainer); 158 return g_list_copy_deep(bcontainer->iova_ranges, copy_iova_range, NULL); 159 } 160 161 static void vfio_container_instance_finalize(Object *obj) 162 { 163 VFIOContainerBase *bcontainer = VFIO_IOMMU(obj); 164 VFIOGuestIOMMU *giommu, *tmp; 165 166 QLIST_SAFE_REMOVE(bcontainer, next); 167 168 QLIST_FOREACH_SAFE(giommu, &bcontainer->giommu_list, giommu_next, tmp) { 169 memory_region_unregister_iommu_notifier( 170 MEMORY_REGION(giommu->iommu_mr), &giommu->n); 171 QLIST_REMOVE(giommu, giommu_next); 172 g_free(giommu); 173 } 174 175 g_list_free_full(bcontainer->iova_ranges, g_free); 176 } 177 178 static void vfio_container_instance_init(Object *obj) 179 { 180 VFIOContainerBase *bcontainer = VFIO_IOMMU(obj); 181 182 bcontainer->error = NULL; 183 bcontainer->dirty_pages_supported = false; 184 bcontainer->dma_max_mappings = 0; 185 bcontainer->iova_ranges = NULL; 186 QLIST_INIT(&bcontainer->giommu_list); 187 QLIST_INIT(&bcontainer->vrdl_list); 188 } 189 190 static const TypeInfo types[] = { 191 { 192 .name = TYPE_VFIO_IOMMU, 193 .parent = TYPE_OBJECT, 194 .instance_init = vfio_container_instance_init, 195 .instance_finalize = vfio_container_instance_finalize, 196 .instance_size = sizeof(VFIOContainerBase), 197 .class_size = sizeof(VFIOIOMMUClass), 198 .abstract = true, 199 }, 200 }; 201 202 DEFINE_TYPES(types) 203