16e6d8ac6SEric Auger /*
26e6d8ac6SEric Auger * iommufd container backend
36e6d8ac6SEric Auger *
46e6d8ac6SEric Auger * Copyright (C) 2023 Intel Corporation.
56e6d8ac6SEric Auger * Copyright Red Hat, Inc. 2023
66e6d8ac6SEric Auger *
76e6d8ac6SEric Auger * Authors: Yi Liu <yi.l.liu@intel.com>
86e6d8ac6SEric Auger * Eric Auger <eric.auger@redhat.com>
96e6d8ac6SEric Auger *
106e6d8ac6SEric Auger * SPDX-License-Identifier: GPL-2.0-or-later
116e6d8ac6SEric Auger */
126e6d8ac6SEric Auger
136e6d8ac6SEric Auger #include "qemu/osdep.h"
146e6d8ac6SEric Auger #include "sysemu/iommufd.h"
156e6d8ac6SEric Auger #include "qapi/error.h"
166e6d8ac6SEric Auger #include "qemu/module.h"
176e6d8ac6SEric Auger #include "qom/object_interfaces.h"
186e6d8ac6SEric Auger #include "qemu/error-report.h"
196e6d8ac6SEric Auger #include "monitor/monitor.h"
206e6d8ac6SEric Auger #include "trace.h"
216c635326SJoao Martins #include "hw/vfio/vfio-common.h"
226e6d8ac6SEric Auger #include <sys/ioctl.h>
236e6d8ac6SEric Auger #include <linux/iommufd.h>
246e6d8ac6SEric Auger
iommufd_backend_init(Object * obj)256e6d8ac6SEric Auger static void iommufd_backend_init(Object *obj)
266e6d8ac6SEric Auger {
276e6d8ac6SEric Auger IOMMUFDBackend *be = IOMMUFD_BACKEND(obj);
286e6d8ac6SEric Auger
296e6d8ac6SEric Auger be->fd = -1;
306e6d8ac6SEric Auger be->users = 0;
316e6d8ac6SEric Auger be->owned = true;
326e6d8ac6SEric Auger }
336e6d8ac6SEric Auger
iommufd_backend_finalize(Object * obj)346e6d8ac6SEric Auger static void iommufd_backend_finalize(Object *obj)
356e6d8ac6SEric Auger {
366e6d8ac6SEric Auger IOMMUFDBackend *be = IOMMUFD_BACKEND(obj);
376e6d8ac6SEric Auger
386e6d8ac6SEric Auger if (be->owned) {
396e6d8ac6SEric Auger close(be->fd);
406e6d8ac6SEric Auger be->fd = -1;
416e6d8ac6SEric Auger }
426e6d8ac6SEric Auger }
436e6d8ac6SEric Auger
iommufd_backend_set_fd(Object * obj,const char * str,Error ** errp)446e6d8ac6SEric Auger static void iommufd_backend_set_fd(Object *obj, const char *str, Error **errp)
456e6d8ac6SEric Auger {
46c1cccad8SZhao Liu ERRP_GUARD();
476e6d8ac6SEric Auger IOMMUFDBackend *be = IOMMUFD_BACKEND(obj);
486e6d8ac6SEric Auger int fd = -1;
496e6d8ac6SEric Auger
506e6d8ac6SEric Auger fd = monitor_fd_param(monitor_cur(), str, errp);
516e6d8ac6SEric Auger if (fd == -1) {
526e6d8ac6SEric Auger error_prepend(errp, "Could not parse remote object fd %s:", str);
536e6d8ac6SEric Auger return;
546e6d8ac6SEric Auger }
556e6d8ac6SEric Auger be->fd = fd;
566e6d8ac6SEric Auger be->owned = false;
576e6d8ac6SEric Auger trace_iommu_backend_set_fd(be->fd);
586e6d8ac6SEric Auger }
596e6d8ac6SEric Auger
iommufd_backend_can_be_deleted(UserCreatable * uc)606e6d8ac6SEric Auger static bool iommufd_backend_can_be_deleted(UserCreatable *uc)
616e6d8ac6SEric Auger {
626e6d8ac6SEric Auger IOMMUFDBackend *be = IOMMUFD_BACKEND(uc);
636e6d8ac6SEric Auger
646e6d8ac6SEric Auger return !be->users;
656e6d8ac6SEric Auger }
666e6d8ac6SEric Auger
iommufd_backend_class_init(ObjectClass * oc,void * data)676e6d8ac6SEric Auger static void iommufd_backend_class_init(ObjectClass *oc, void *data)
686e6d8ac6SEric Auger {
696e6d8ac6SEric Auger UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
706e6d8ac6SEric Auger
716e6d8ac6SEric Auger ucc->can_be_deleted = iommufd_backend_can_be_deleted;
726e6d8ac6SEric Auger
736e6d8ac6SEric Auger object_class_property_add_str(oc, "fd", NULL, iommufd_backend_set_fd);
746e6d8ac6SEric Auger }
756e6d8ac6SEric Auger
iommufd_backend_connect(IOMMUFDBackend * be,Error ** errp)769067d50dSZhenzhong Duan bool iommufd_backend_connect(IOMMUFDBackend *be, Error **errp)
776e6d8ac6SEric Auger {
789067d50dSZhenzhong Duan int fd;
796e6d8ac6SEric Auger
806e6d8ac6SEric Auger if (be->owned && !be->users) {
8147cd2f1aSZhao Liu fd = qemu_open("/dev/iommu", O_RDWR, errp);
826e6d8ac6SEric Auger if (fd < 0) {
839067d50dSZhenzhong Duan return false;
846e6d8ac6SEric Auger }
856e6d8ac6SEric Auger be->fd = fd;
866e6d8ac6SEric Auger }
876e6d8ac6SEric Auger be->users++;
889067d50dSZhenzhong Duan
899067d50dSZhenzhong Duan trace_iommufd_backend_connect(be->fd, be->owned, be->users);
909067d50dSZhenzhong Duan return true;
916e6d8ac6SEric Auger }
926e6d8ac6SEric Auger
iommufd_backend_disconnect(IOMMUFDBackend * be)936e6d8ac6SEric Auger void iommufd_backend_disconnect(IOMMUFDBackend *be)
946e6d8ac6SEric Auger {
956e6d8ac6SEric Auger if (!be->users) {
966e6d8ac6SEric Auger goto out;
976e6d8ac6SEric Auger }
986e6d8ac6SEric Auger be->users--;
996e6d8ac6SEric Auger if (!be->users && be->owned) {
1006e6d8ac6SEric Auger close(be->fd);
1016e6d8ac6SEric Auger be->fd = -1;
1026e6d8ac6SEric Auger }
1036e6d8ac6SEric Auger out:
1046e6d8ac6SEric Auger trace_iommufd_backend_disconnect(be->fd, be->users);
1056e6d8ac6SEric Auger }
1066e6d8ac6SEric Auger
iommufd_backend_alloc_ioas(IOMMUFDBackend * be,uint32_t * ioas_id,Error ** errp)1079067d50dSZhenzhong Duan bool iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id,
1086e6d8ac6SEric Auger Error **errp)
1096e6d8ac6SEric Auger {
1109067d50dSZhenzhong Duan int fd = be->fd;
1116e6d8ac6SEric Auger struct iommu_ioas_alloc alloc_data = {
1126e6d8ac6SEric Auger .size = sizeof(alloc_data),
1136e6d8ac6SEric Auger .flags = 0,
1146e6d8ac6SEric Auger };
1156e6d8ac6SEric Auger
1169067d50dSZhenzhong Duan if (ioctl(fd, IOMMU_IOAS_ALLOC, &alloc_data)) {
1176e6d8ac6SEric Auger error_setg_errno(errp, errno, "Failed to allocate ioas");
1189067d50dSZhenzhong Duan return false;
1196e6d8ac6SEric Auger }
1206e6d8ac6SEric Auger
1216e6d8ac6SEric Auger *ioas_id = alloc_data.out_ioas_id;
1229067d50dSZhenzhong Duan trace_iommufd_backend_alloc_ioas(fd, *ioas_id);
1236e6d8ac6SEric Auger
1249067d50dSZhenzhong Duan return true;
1256e6d8ac6SEric Auger }
1266e6d8ac6SEric Auger
iommufd_backend_free_id(IOMMUFDBackend * be,uint32_t id)1276e6d8ac6SEric Auger void iommufd_backend_free_id(IOMMUFDBackend *be, uint32_t id)
1286e6d8ac6SEric Auger {
1296e6d8ac6SEric Auger int ret, fd = be->fd;
1306e6d8ac6SEric Auger struct iommu_destroy des = {
1316e6d8ac6SEric Auger .size = sizeof(des),
1326e6d8ac6SEric Auger .id = id,
1336e6d8ac6SEric Auger };
1346e6d8ac6SEric Auger
1356e6d8ac6SEric Auger ret = ioctl(fd, IOMMU_DESTROY, &des);
1366e6d8ac6SEric Auger trace_iommufd_backend_free_id(fd, id, ret);
1376e6d8ac6SEric Auger if (ret) {
1386e6d8ac6SEric Auger error_report("Failed to free id: %u %m", id);
1396e6d8ac6SEric Auger }
1406e6d8ac6SEric Auger }
1416e6d8ac6SEric Auger
iommufd_backend_map_dma(IOMMUFDBackend * be,uint32_t ioas_id,hwaddr iova,ram_addr_t size,void * vaddr,bool readonly)1426e6d8ac6SEric Auger int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova,
1436e6d8ac6SEric Auger ram_addr_t size, void *vaddr, bool readonly)
1446e6d8ac6SEric Auger {
1456e6d8ac6SEric Auger int ret, fd = be->fd;
1466e6d8ac6SEric Auger struct iommu_ioas_map map = {
1476e6d8ac6SEric Auger .size = sizeof(map),
1486e6d8ac6SEric Auger .flags = IOMMU_IOAS_MAP_READABLE |
1496e6d8ac6SEric Auger IOMMU_IOAS_MAP_FIXED_IOVA,
1506e6d8ac6SEric Auger .ioas_id = ioas_id,
1516e6d8ac6SEric Auger .__reserved = 0,
1526e6d8ac6SEric Auger .user_va = (uintptr_t)vaddr,
1536e6d8ac6SEric Auger .iova = iova,
1546e6d8ac6SEric Auger .length = size,
1556e6d8ac6SEric Auger };
1566e6d8ac6SEric Auger
1576e6d8ac6SEric Auger if (!readonly) {
1586e6d8ac6SEric Auger map.flags |= IOMMU_IOAS_MAP_WRITEABLE;
1596e6d8ac6SEric Auger }
1606e6d8ac6SEric Auger
1616e6d8ac6SEric Auger ret = ioctl(fd, IOMMU_IOAS_MAP, &map);
1626e6d8ac6SEric Auger trace_iommufd_backend_map_dma(fd, ioas_id, iova, size,
1636e6d8ac6SEric Auger vaddr, readonly, ret);
1646e6d8ac6SEric Auger if (ret) {
1656e6d8ac6SEric Auger ret = -errno;
1666e6d8ac6SEric Auger
1676e6d8ac6SEric Auger /* TODO: Not support mapping hardware PCI BAR region for now. */
1686e6d8ac6SEric Auger if (errno == EFAULT) {
1696e6d8ac6SEric Auger warn_report("IOMMU_IOAS_MAP failed: %m, PCI BAR?");
1706e6d8ac6SEric Auger } else {
1716e6d8ac6SEric Auger error_report("IOMMU_IOAS_MAP failed: %m");
1726e6d8ac6SEric Auger }
1736e6d8ac6SEric Auger }
1746e6d8ac6SEric Auger return ret;
1756e6d8ac6SEric Auger }
1766e6d8ac6SEric Auger
iommufd_backend_unmap_dma(IOMMUFDBackend * be,uint32_t ioas_id,hwaddr iova,ram_addr_t size)1776e6d8ac6SEric Auger int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id,
1786e6d8ac6SEric Auger hwaddr iova, ram_addr_t size)
1796e6d8ac6SEric Auger {
1806e6d8ac6SEric Auger int ret, fd = be->fd;
1816e6d8ac6SEric Auger struct iommu_ioas_unmap unmap = {
1826e6d8ac6SEric Auger .size = sizeof(unmap),
1836e6d8ac6SEric Auger .ioas_id = ioas_id,
1846e6d8ac6SEric Auger .iova = iova,
1856e6d8ac6SEric Auger .length = size,
1866e6d8ac6SEric Auger };
1876e6d8ac6SEric Auger
1886e6d8ac6SEric Auger ret = ioctl(fd, IOMMU_IOAS_UNMAP, &unmap);
1896e6d8ac6SEric Auger /*
1906e6d8ac6SEric Auger * IOMMUFD takes mapping as some kind of object, unmapping
1916e6d8ac6SEric Auger * nonexistent mapping is treated as deleting a nonexistent
1926e6d8ac6SEric Auger * object and return ENOENT. This is different from legacy
1936e6d8ac6SEric Auger * backend which allows it. vIOMMU may trigger a lot of
1946e6d8ac6SEric Auger * redundant unmapping, to avoid flush the log, treat them
1956e6d8ac6SEric Auger * as succeess for IOMMUFD just like legacy backend.
1966e6d8ac6SEric Auger */
1976e6d8ac6SEric Auger if (ret && errno == ENOENT) {
1986e6d8ac6SEric Auger trace_iommufd_backend_unmap_dma_non_exist(fd, ioas_id, iova, size, ret);
1996e6d8ac6SEric Auger ret = 0;
2006e6d8ac6SEric Auger } else {
2016e6d8ac6SEric Auger trace_iommufd_backend_unmap_dma(fd, ioas_id, iova, size, ret);
2026e6d8ac6SEric Auger }
2036e6d8ac6SEric Auger
2046e6d8ac6SEric Auger if (ret) {
2056e6d8ac6SEric Auger ret = -errno;
2066e6d8ac6SEric Auger error_report("IOMMU_IOAS_UNMAP failed: %m");
2076e6d8ac6SEric Auger }
2086e6d8ac6SEric Auger return ret;
2096e6d8ac6SEric Auger }
2106e6d8ac6SEric Auger
iommufd_backend_alloc_hwpt(IOMMUFDBackend * be,uint32_t dev_id,uint32_t pt_id,uint32_t flags,uint32_t data_type,uint32_t data_len,void * data_ptr,uint32_t * out_hwpt,Error ** errp)2115b1e96e6SJoao Martins bool iommufd_backend_alloc_hwpt(IOMMUFDBackend *be, uint32_t dev_id,
2125b1e96e6SJoao Martins uint32_t pt_id, uint32_t flags,
2135b1e96e6SJoao Martins uint32_t data_type, uint32_t data_len,
2145b1e96e6SJoao Martins void *data_ptr, uint32_t *out_hwpt,
2155b1e96e6SJoao Martins Error **errp)
2165b1e96e6SJoao Martins {
2175b1e96e6SJoao Martins int ret, fd = be->fd;
2185b1e96e6SJoao Martins struct iommu_hwpt_alloc alloc_hwpt = {
2195b1e96e6SJoao Martins .size = sizeof(struct iommu_hwpt_alloc),
2205b1e96e6SJoao Martins .flags = flags,
2215b1e96e6SJoao Martins .dev_id = dev_id,
2225b1e96e6SJoao Martins .pt_id = pt_id,
2235b1e96e6SJoao Martins .data_type = data_type,
2245b1e96e6SJoao Martins .data_len = data_len,
2255b1e96e6SJoao Martins .data_uptr = (uintptr_t)data_ptr,
2265b1e96e6SJoao Martins };
2275b1e96e6SJoao Martins
2285b1e96e6SJoao Martins ret = ioctl(fd, IOMMU_HWPT_ALLOC, &alloc_hwpt);
2295b1e96e6SJoao Martins trace_iommufd_backend_alloc_hwpt(fd, dev_id, pt_id, flags, data_type,
2305b1e96e6SJoao Martins data_len, (uintptr_t)data_ptr,
2315b1e96e6SJoao Martins alloc_hwpt.out_hwpt_id, ret);
2325b1e96e6SJoao Martins if (ret) {
2335b1e96e6SJoao Martins error_setg_errno(errp, errno, "Failed to allocate hwpt");
2345b1e96e6SJoao Martins return false;
2355b1e96e6SJoao Martins }
2365b1e96e6SJoao Martins
2375b1e96e6SJoao Martins *out_hwpt = alloc_hwpt.out_hwpt_id;
2385b1e96e6SJoao Martins return true;
2395b1e96e6SJoao Martins }
2405b1e96e6SJoao Martins
iommufd_backend_set_dirty_tracking(IOMMUFDBackend * be,uint32_t hwpt_id,bool start,Error ** errp)24152ce8822SJoao Martins bool iommufd_backend_set_dirty_tracking(IOMMUFDBackend *be,
24252ce8822SJoao Martins uint32_t hwpt_id, bool start,
24352ce8822SJoao Martins Error **errp)
24452ce8822SJoao Martins {
24552ce8822SJoao Martins int ret;
24652ce8822SJoao Martins struct iommu_hwpt_set_dirty_tracking set_dirty = {
24752ce8822SJoao Martins .size = sizeof(set_dirty),
24852ce8822SJoao Martins .hwpt_id = hwpt_id,
24952ce8822SJoao Martins .flags = start ? IOMMU_HWPT_DIRTY_TRACKING_ENABLE : 0,
25052ce8822SJoao Martins };
25152ce8822SJoao Martins
25252ce8822SJoao Martins ret = ioctl(be->fd, IOMMU_HWPT_SET_DIRTY_TRACKING, &set_dirty);
25352ce8822SJoao Martins trace_iommufd_backend_set_dirty(be->fd, hwpt_id, start, ret ? errno : 0);
25452ce8822SJoao Martins if (ret) {
25552ce8822SJoao Martins error_setg_errno(errp, errno,
25652ce8822SJoao Martins "IOMMU_HWPT_SET_DIRTY_TRACKING(hwpt_id %u) failed",
25752ce8822SJoao Martins hwpt_id);
25852ce8822SJoao Martins return false;
25952ce8822SJoao Martins }
26052ce8822SJoao Martins
26152ce8822SJoao Martins return true;
26252ce8822SJoao Martins }
26352ce8822SJoao Martins
iommufd_backend_get_dirty_bitmap(IOMMUFDBackend * be,uint32_t hwpt_id,uint64_t iova,ram_addr_t size,uint64_t page_size,uint64_t * data,Error ** errp)264*7c30710bSJoao Martins bool iommufd_backend_get_dirty_bitmap(IOMMUFDBackend *be,
265*7c30710bSJoao Martins uint32_t hwpt_id,
266*7c30710bSJoao Martins uint64_t iova, ram_addr_t size,
267*7c30710bSJoao Martins uint64_t page_size, uint64_t *data,
268*7c30710bSJoao Martins Error **errp)
269*7c30710bSJoao Martins {
270*7c30710bSJoao Martins int ret;
271*7c30710bSJoao Martins struct iommu_hwpt_get_dirty_bitmap get_dirty_bitmap = {
272*7c30710bSJoao Martins .size = sizeof(get_dirty_bitmap),
273*7c30710bSJoao Martins .hwpt_id = hwpt_id,
274*7c30710bSJoao Martins .iova = iova,
275*7c30710bSJoao Martins .length = size,
276*7c30710bSJoao Martins .page_size = page_size,
277*7c30710bSJoao Martins .data = (uintptr_t)data,
278*7c30710bSJoao Martins };
279*7c30710bSJoao Martins
280*7c30710bSJoao Martins ret = ioctl(be->fd, IOMMU_HWPT_GET_DIRTY_BITMAP, &get_dirty_bitmap);
281*7c30710bSJoao Martins trace_iommufd_backend_get_dirty_bitmap(be->fd, hwpt_id, iova, size,
282*7c30710bSJoao Martins page_size, ret ? errno : 0);
283*7c30710bSJoao Martins if (ret) {
284*7c30710bSJoao Martins error_setg_errno(errp, errno,
285*7c30710bSJoao Martins "IOMMU_HWPT_GET_DIRTY_BITMAP (iova: 0x%"HWADDR_PRIx
286*7c30710bSJoao Martins " size: 0x"RAM_ADDR_FMT") failed", iova, size);
287*7c30710bSJoao Martins return false;
288*7c30710bSJoao Martins }
289*7c30710bSJoao Martins
290*7c30710bSJoao Martins return true;
291*7c30710bSJoao Martins }
292*7c30710bSJoao Martins
iommufd_backend_get_device_info(IOMMUFDBackend * be,uint32_t devid,uint32_t * type,void * data,uint32_t len,uint64_t * caps,Error ** errp)29342965386SZhenzhong Duan bool iommufd_backend_get_device_info(IOMMUFDBackend *be, uint32_t devid,
29442965386SZhenzhong Duan uint32_t *type, void *data, uint32_t len,
2952d1bf258SJoao Martins uint64_t *caps, Error **errp)
29642965386SZhenzhong Duan {
29742965386SZhenzhong Duan struct iommu_hw_info info = {
29842965386SZhenzhong Duan .size = sizeof(info),
29942965386SZhenzhong Duan .dev_id = devid,
30042965386SZhenzhong Duan .data_len = len,
30142965386SZhenzhong Duan .data_uptr = (uintptr_t)data,
30242965386SZhenzhong Duan };
30342965386SZhenzhong Duan
30442965386SZhenzhong Duan if (ioctl(be->fd, IOMMU_GET_HW_INFO, &info)) {
30542965386SZhenzhong Duan error_setg_errno(errp, errno, "Failed to get hardware info");
30642965386SZhenzhong Duan return false;
30742965386SZhenzhong Duan }
30842965386SZhenzhong Duan
30942965386SZhenzhong Duan g_assert(type);
31042965386SZhenzhong Duan *type = info.out_data_type;
3112d1bf258SJoao Martins g_assert(caps);
3122d1bf258SJoao Martins *caps = info.out_capabilities;
31342965386SZhenzhong Duan
31442965386SZhenzhong Duan return true;
31542965386SZhenzhong Duan }
31642965386SZhenzhong Duan
hiod_iommufd_get_cap(HostIOMMUDevice * hiod,int cap,Error ** errp)31763c6e83eSZhenzhong Duan static int hiod_iommufd_get_cap(HostIOMMUDevice *hiod, int cap, Error **errp)
31863c6e83eSZhenzhong Duan {
31963c6e83eSZhenzhong Duan HostIOMMUDeviceCaps *caps = &hiod->caps;
32063c6e83eSZhenzhong Duan
32163c6e83eSZhenzhong Duan switch (cap) {
32263c6e83eSZhenzhong Duan case HOST_IOMMU_DEVICE_CAP_IOMMU_TYPE:
32363c6e83eSZhenzhong Duan return caps->type;
32463c6e83eSZhenzhong Duan case HOST_IOMMU_DEVICE_CAP_AW_BITS:
3256c635326SJoao Martins return vfio_device_get_aw_bits(hiod->agent);
32663c6e83eSZhenzhong Duan default:
32763c6e83eSZhenzhong Duan error_setg(errp, "%s: unsupported capability %x", hiod->name, cap);
32863c6e83eSZhenzhong Duan return -EINVAL;
32963c6e83eSZhenzhong Duan }
33063c6e83eSZhenzhong Duan }
33163c6e83eSZhenzhong Duan
hiod_iommufd_class_init(ObjectClass * oc,void * data)33263c6e83eSZhenzhong Duan static void hiod_iommufd_class_init(ObjectClass *oc, void *data)
33363c6e83eSZhenzhong Duan {
33463c6e83eSZhenzhong Duan HostIOMMUDeviceClass *hioc = HOST_IOMMU_DEVICE_CLASS(oc);
33563c6e83eSZhenzhong Duan
33663c6e83eSZhenzhong Duan hioc->get_cap = hiod_iommufd_get_cap;
33763c6e83eSZhenzhong Duan };
33863c6e83eSZhenzhong Duan
3399005f928SZhenzhong Duan static const TypeInfo types[] = {
3409005f928SZhenzhong Duan {
3416e6d8ac6SEric Auger .name = TYPE_IOMMUFD_BACKEND,
3426e6d8ac6SEric Auger .parent = TYPE_OBJECT,
3436e6d8ac6SEric Auger .instance_size = sizeof(IOMMUFDBackend),
3446e6d8ac6SEric Auger .instance_init = iommufd_backend_init,
3456e6d8ac6SEric Auger .instance_finalize = iommufd_backend_finalize,
3466e6d8ac6SEric Auger .class_size = sizeof(IOMMUFDBackendClass),
3476e6d8ac6SEric Auger .class_init = iommufd_backend_class_init,
3486e6d8ac6SEric Auger .interfaces = (InterfaceInfo[]) {
3496e6d8ac6SEric Auger { TYPE_USER_CREATABLE },
3506e6d8ac6SEric Auger { }
3516e6d8ac6SEric Auger }
3529005f928SZhenzhong Duan }, {
3539005f928SZhenzhong Duan .name = TYPE_HOST_IOMMU_DEVICE_IOMMUFD,
3549005f928SZhenzhong Duan .parent = TYPE_HOST_IOMMU_DEVICE,
35563c6e83eSZhenzhong Duan .class_init = hiod_iommufd_class_init,
3569005f928SZhenzhong Duan .abstract = true,
3579005f928SZhenzhong Duan }
3586e6d8ac6SEric Auger };
3596e6d8ac6SEric Auger
3609005f928SZhenzhong Duan DEFINE_TYPES(types)
361