1 /* 2 * VFIO BASE CONTAINER 3 * 4 * Copyright (C) 2023 Intel Corporation. 5 * Copyright Red Hat, Inc. 2023 6 * 7 * Authors: Yi Liu <yi.l.liu@intel.com> 8 * Eric Auger <eric.auger@redhat.com> 9 * 10 * SPDX-License-Identifier: GPL-2.0-or-later 11 */ 12 13 #include "qemu/osdep.h" 14 #include "qapi/error.h" 15 #include "qemu/error-report.h" 16 #include "hw/vfio/vfio-container-base.h" 17 18 int vfio_container_dma_map(VFIOContainerBase *bcontainer, 19 hwaddr iova, ram_addr_t size, 20 void *vaddr, bool readonly) 21 { 22 VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); 23 24 g_assert(vioc->dma_map); 25 return vioc->dma_map(bcontainer, iova, size, vaddr, readonly); 26 } 27 28 int vfio_container_dma_unmap(VFIOContainerBase *bcontainer, 29 hwaddr iova, ram_addr_t size, 30 IOMMUTLBEntry *iotlb) 31 { 32 VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); 33 34 g_assert(vioc->dma_unmap); 35 return vioc->dma_unmap(bcontainer, iova, size, iotlb); 36 } 37 38 bool vfio_container_add_section_window(VFIOContainerBase *bcontainer, 39 MemoryRegionSection *section, 40 Error **errp) 41 { 42 VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); 43 44 if (!vioc->add_window) { 45 return true; 46 } 47 48 return vioc->add_window(bcontainer, section, errp); 49 } 50 51 void vfio_container_del_section_window(VFIOContainerBase *bcontainer, 52 MemoryRegionSection *section) 53 { 54 VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); 55 56 if (!vioc->del_window) { 57 return; 58 } 59 60 return vioc->del_window(bcontainer, section); 61 } 62 63 int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer, 64 bool start, Error **errp) 65 { 66 VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); 67 68 if (!bcontainer->dirty_pages_supported) { 69 return 0; 70 } 71 72 g_assert(vioc->set_dirty_page_tracking); 73 return vioc->set_dirty_page_tracking(bcontainer, start, errp); 74 } 75 76 int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer, 77 VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp) 78 { 79 VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); 80 81 g_assert(vioc->query_dirty_bitmap); 82 return vioc->query_dirty_bitmap(bcontainer, vbmap, iova, size, 83 errp); 84 } 85 86 static gpointer copy_iova_range(gconstpointer src, gpointer data) 87 { 88 Range *source = (Range *)src; 89 Range *dest = g_new(Range, 1); 90 91 range_set_bounds(dest, range_lob(source), range_upb(source)); 92 return dest; 93 } 94 95 GList *vfio_container_get_iova_ranges(const VFIOContainerBase *bcontainer) 96 { 97 assert(bcontainer); 98 return g_list_copy_deep(bcontainer->iova_ranges, copy_iova_range, NULL); 99 } 100 101 static void vfio_container_instance_finalize(Object *obj) 102 { 103 VFIOContainerBase *bcontainer = VFIO_IOMMU(obj); 104 VFIOGuestIOMMU *giommu, *tmp; 105 106 QLIST_SAFE_REMOVE(bcontainer, next); 107 108 QLIST_FOREACH_SAFE(giommu, &bcontainer->giommu_list, giommu_next, tmp) { 109 memory_region_unregister_iommu_notifier( 110 MEMORY_REGION(giommu->iommu_mr), &giommu->n); 111 QLIST_REMOVE(giommu, giommu_next); 112 g_free(giommu); 113 } 114 115 g_list_free_full(bcontainer->iova_ranges, g_free); 116 } 117 118 static void vfio_container_instance_init(Object *obj) 119 { 120 VFIOContainerBase *bcontainer = VFIO_IOMMU(obj); 121 122 bcontainer->error = NULL; 123 bcontainer->dirty_pages_supported = false; 124 bcontainer->dma_max_mappings = 0; 125 bcontainer->iova_ranges = NULL; 126 QLIST_INIT(&bcontainer->giommu_list); 127 QLIST_INIT(&bcontainer->vrdl_list); 128 } 129 130 static const TypeInfo types[] = { 131 { 132 .name = TYPE_VFIO_IOMMU, 133 .parent = TYPE_OBJECT, 134 .instance_init = vfio_container_instance_init, 135 .instance_finalize = vfio_container_instance_finalize, 136 .instance_size = sizeof(VFIOContainerBase), 137 .class_size = sizeof(VFIOIOMMUClass), 138 .abstract = true, 139 }, 140 }; 141 142 DEFINE_TYPES(types) 143