xref: /openbmc/qemu/hw/vfio/container-base.c (revision 7ab1cb74)
1b08501a9SEric Auger /*
2b08501a9SEric Auger  * VFIO BASE CONTAINER
3b08501a9SEric Auger  *
4b08501a9SEric Auger  * Copyright (C) 2023 Intel Corporation.
5b08501a9SEric Auger  * Copyright Red Hat, Inc. 2023
6b08501a9SEric Auger  *
7b08501a9SEric Auger  * Authors: Yi Liu <yi.l.liu@intel.com>
8b08501a9SEric Auger  *          Eric Auger <eric.auger@redhat.com>
9b08501a9SEric Auger  *
10b08501a9SEric Auger  * SPDX-License-Identifier: GPL-2.0-or-later
11b08501a9SEric Auger  */
12b08501a9SEric Auger 
13b08501a9SEric Auger #include "qemu/osdep.h"
14b08501a9SEric Auger #include "qapi/error.h"
15b08501a9SEric Auger #include "qemu/error-report.h"
16b08501a9SEric Auger #include "hw/vfio/vfio-container-base.h"
17b08501a9SEric Auger 
18b08501a9SEric Auger int vfio_container_dma_map(VFIOContainerBase *bcontainer,
19b08501a9SEric Auger                            hwaddr iova, ram_addr_t size,
20b08501a9SEric Auger                            void *vaddr, bool readonly)
21b08501a9SEric Auger {
22b08501a9SEric Auger     g_assert(bcontainer->ops->dma_map);
23b08501a9SEric Auger     return bcontainer->ops->dma_map(bcontainer, iova, size, vaddr, readonly);
24b08501a9SEric Auger }
25b08501a9SEric Auger 
26b08501a9SEric Auger int vfio_container_dma_unmap(VFIOContainerBase *bcontainer,
27b08501a9SEric Auger                              hwaddr iova, ram_addr_t size,
28b08501a9SEric Auger                              IOMMUTLBEntry *iotlb)
29b08501a9SEric Auger {
30b08501a9SEric Auger     g_assert(bcontainer->ops->dma_unmap);
31b08501a9SEric Auger     return bcontainer->ops->dma_unmap(bcontainer, iova, size, iotlb);
32b08501a9SEric Auger }
33ed2f7f80SZhenzhong Duan 
34bb424490SEric Auger int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer,
35bb424490SEric Auger                                            bool start)
36bb424490SEric Auger {
37bb424490SEric Auger     g_assert(bcontainer->ops->set_dirty_page_tracking);
38bb424490SEric Auger     return bcontainer->ops->set_dirty_page_tracking(bcontainer, start);
39bb424490SEric Auger }
40bb424490SEric Auger 
41bb424490SEric Auger int vfio_container_query_dirty_bitmap(VFIOContainerBase *bcontainer,
42bb424490SEric Auger                                       VFIOBitmap *vbmap,
43bb424490SEric Auger                                       hwaddr iova, hwaddr size)
44bb424490SEric Auger {
45bb424490SEric Auger     g_assert(bcontainer->ops->query_dirty_bitmap);
46bb424490SEric Auger     return bcontainer->ops->query_dirty_bitmap(bcontainer, vbmap, iova, size);
47bb424490SEric Auger }
48bb424490SEric Auger 
49e5597063SEric Auger void vfio_container_init(VFIOContainerBase *bcontainer, VFIOAddressSpace *space,
50e5597063SEric Auger                          const VFIOIOMMUOps *ops)
51ed2f7f80SZhenzhong Duan {
52ed2f7f80SZhenzhong Duan     bcontainer->ops = ops;
53e5597063SEric Auger     bcontainer->space = space;
54bb424490SEric Auger     bcontainer->dirty_pages_supported = false;
55*7ab1cb74SEric Auger     bcontainer->dma_max_mappings = 0;
56dddf83abSEric Auger     QLIST_INIT(&bcontainer->giommu_list);
57ed2f7f80SZhenzhong Duan }
58ed2f7f80SZhenzhong Duan 
59ed2f7f80SZhenzhong Duan void vfio_container_destroy(VFIOContainerBase *bcontainer)
60ed2f7f80SZhenzhong Duan {
61dddf83abSEric Auger     VFIOGuestIOMMU *giommu, *tmp;
62dddf83abSEric Auger 
63e5597063SEric Auger     QLIST_REMOVE(bcontainer, next);
64e5597063SEric Auger 
65dddf83abSEric Auger     QLIST_FOREACH_SAFE(giommu, &bcontainer->giommu_list, giommu_next, tmp) {
66dddf83abSEric Auger         memory_region_unregister_iommu_notifier(
67dddf83abSEric Auger                 MEMORY_REGION(giommu->iommu_mr), &giommu->n);
68dddf83abSEric Auger         QLIST_REMOVE(giommu, giommu_next);
69dddf83abSEric Auger         g_free(giommu);
70dddf83abSEric Auger     }
71ed2f7f80SZhenzhong Duan }
72