xref: /openbmc/qemu/backends/iommufd.c (revision 47cd2f1a)
1 /*
2  * iommufd container backend
3  *
4  * Copyright (C) 2023 Intel Corporation.
5  * Copyright Red Hat, Inc. 2023
6  *
7  * Authors: Yi Liu <yi.l.liu@intel.com>
8  *          Eric Auger <eric.auger@redhat.com>
9  *
10  * SPDX-License-Identifier: GPL-2.0-or-later
11  */
12 
13 #include "qemu/osdep.h"
14 #include "sysemu/iommufd.h"
15 #include "qapi/error.h"
16 #include "qemu/module.h"
17 #include "qom/object_interfaces.h"
18 #include "qemu/error-report.h"
19 #include "monitor/monitor.h"
20 #include "trace.h"
21 #include <sys/ioctl.h>
22 #include <linux/iommufd.h>
23 
24 static void iommufd_backend_init(Object *obj)
25 {
26     IOMMUFDBackend *be = IOMMUFD_BACKEND(obj);
27 
28     be->fd = -1;
29     be->users = 0;
30     be->owned = true;
31 }
32 
33 static void iommufd_backend_finalize(Object *obj)
34 {
35     IOMMUFDBackend *be = IOMMUFD_BACKEND(obj);
36 
37     if (be->owned) {
38         close(be->fd);
39         be->fd = -1;
40     }
41 }
42 
43 static void iommufd_backend_set_fd(Object *obj, const char *str, Error **errp)
44 {
45     ERRP_GUARD();
46     IOMMUFDBackend *be = IOMMUFD_BACKEND(obj);
47     int fd = -1;
48 
49     fd = monitor_fd_param(monitor_cur(), str, errp);
50     if (fd == -1) {
51         error_prepend(errp, "Could not parse remote object fd %s:", str);
52         return;
53     }
54     be->fd = fd;
55     be->owned = false;
56     trace_iommu_backend_set_fd(be->fd);
57 }
58 
59 static bool iommufd_backend_can_be_deleted(UserCreatable *uc)
60 {
61     IOMMUFDBackend *be = IOMMUFD_BACKEND(uc);
62 
63     return !be->users;
64 }
65 
66 static void iommufd_backend_class_init(ObjectClass *oc, void *data)
67 {
68     UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
69 
70     ucc->can_be_deleted = iommufd_backend_can_be_deleted;
71 
72     object_class_property_add_str(oc, "fd", NULL, iommufd_backend_set_fd);
73 }
74 
75 bool iommufd_backend_connect(IOMMUFDBackend *be, Error **errp)
76 {
77     int fd;
78 
79     if (be->owned && !be->users) {
80         fd = qemu_open("/dev/iommu", O_RDWR, errp);
81         if (fd < 0) {
82             return false;
83         }
84         be->fd = fd;
85     }
86     be->users++;
87 
88     trace_iommufd_backend_connect(be->fd, be->owned, be->users);
89     return true;
90 }
91 
92 void iommufd_backend_disconnect(IOMMUFDBackend *be)
93 {
94     if (!be->users) {
95         goto out;
96     }
97     be->users--;
98     if (!be->users && be->owned) {
99         close(be->fd);
100         be->fd = -1;
101     }
102 out:
103     trace_iommufd_backend_disconnect(be->fd, be->users);
104 }
105 
106 bool iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id,
107                                 Error **errp)
108 {
109     int fd = be->fd;
110     struct iommu_ioas_alloc alloc_data  = {
111         .size = sizeof(alloc_data),
112         .flags = 0,
113     };
114 
115     if (ioctl(fd, IOMMU_IOAS_ALLOC, &alloc_data)) {
116         error_setg_errno(errp, errno, "Failed to allocate ioas");
117         return false;
118     }
119 
120     *ioas_id = alloc_data.out_ioas_id;
121     trace_iommufd_backend_alloc_ioas(fd, *ioas_id);
122 
123     return true;
124 }
125 
126 void iommufd_backend_free_id(IOMMUFDBackend *be, uint32_t id)
127 {
128     int ret, fd = be->fd;
129     struct iommu_destroy des = {
130         .size = sizeof(des),
131         .id = id,
132     };
133 
134     ret = ioctl(fd, IOMMU_DESTROY, &des);
135     trace_iommufd_backend_free_id(fd, id, ret);
136     if (ret) {
137         error_report("Failed to free id: %u %m", id);
138     }
139 }
140 
141 int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova,
142                             ram_addr_t size, void *vaddr, bool readonly)
143 {
144     int ret, fd = be->fd;
145     struct iommu_ioas_map map = {
146         .size = sizeof(map),
147         .flags = IOMMU_IOAS_MAP_READABLE |
148                  IOMMU_IOAS_MAP_FIXED_IOVA,
149         .ioas_id = ioas_id,
150         .__reserved = 0,
151         .user_va = (uintptr_t)vaddr,
152         .iova = iova,
153         .length = size,
154     };
155 
156     if (!readonly) {
157         map.flags |= IOMMU_IOAS_MAP_WRITEABLE;
158     }
159 
160     ret = ioctl(fd, IOMMU_IOAS_MAP, &map);
161     trace_iommufd_backend_map_dma(fd, ioas_id, iova, size,
162                                   vaddr, readonly, ret);
163     if (ret) {
164         ret = -errno;
165 
166         /* TODO: Not support mapping hardware PCI BAR region for now. */
167         if (errno == EFAULT) {
168             warn_report("IOMMU_IOAS_MAP failed: %m, PCI BAR?");
169         } else {
170             error_report("IOMMU_IOAS_MAP failed: %m");
171         }
172     }
173     return ret;
174 }
175 
176 int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id,
177                               hwaddr iova, ram_addr_t size)
178 {
179     int ret, fd = be->fd;
180     struct iommu_ioas_unmap unmap = {
181         .size = sizeof(unmap),
182         .ioas_id = ioas_id,
183         .iova = iova,
184         .length = size,
185     };
186 
187     ret = ioctl(fd, IOMMU_IOAS_UNMAP, &unmap);
188     /*
189      * IOMMUFD takes mapping as some kind of object, unmapping
190      * nonexistent mapping is treated as deleting a nonexistent
191      * object and return ENOENT. This is different from legacy
192      * backend which allows it. vIOMMU may trigger a lot of
193      * redundant unmapping, to avoid flush the log, treat them
194      * as succeess for IOMMUFD just like legacy backend.
195      */
196     if (ret && errno == ENOENT) {
197         trace_iommufd_backend_unmap_dma_non_exist(fd, ioas_id, iova, size, ret);
198         ret = 0;
199     } else {
200         trace_iommufd_backend_unmap_dma(fd, ioas_id, iova, size, ret);
201     }
202 
203     if (ret) {
204         ret = -errno;
205         error_report("IOMMU_IOAS_UNMAP failed: %m");
206     }
207     return ret;
208 }
209 
210 bool iommufd_backend_get_device_info(IOMMUFDBackend *be, uint32_t devid,
211                                      uint32_t *type, void *data, uint32_t len,
212                                      Error **errp)
213 {
214     struct iommu_hw_info info = {
215         .size = sizeof(info),
216         .dev_id = devid,
217         .data_len = len,
218         .data_uptr = (uintptr_t)data,
219     };
220 
221     if (ioctl(be->fd, IOMMU_GET_HW_INFO, &info)) {
222         error_setg_errno(errp, errno, "Failed to get hardware info");
223         return false;
224     }
225 
226     g_assert(type);
227     *type = info.out_data_type;
228 
229     return true;
230 }
231 
232 static int hiod_iommufd_get_cap(HostIOMMUDevice *hiod, int cap, Error **errp)
233 {
234     HostIOMMUDeviceCaps *caps = &hiod->caps;
235 
236     switch (cap) {
237     case HOST_IOMMU_DEVICE_CAP_IOMMU_TYPE:
238         return caps->type;
239     case HOST_IOMMU_DEVICE_CAP_AW_BITS:
240         return caps->aw_bits;
241     default:
242         error_setg(errp, "%s: unsupported capability %x", hiod->name, cap);
243         return -EINVAL;
244     }
245 }
246 
247 static void hiod_iommufd_class_init(ObjectClass *oc, void *data)
248 {
249     HostIOMMUDeviceClass *hioc = HOST_IOMMU_DEVICE_CLASS(oc);
250 
251     hioc->get_cap = hiod_iommufd_get_cap;
252 };
253 
254 static const TypeInfo types[] = {
255     {
256         .name = TYPE_IOMMUFD_BACKEND,
257         .parent = TYPE_OBJECT,
258         .instance_size = sizeof(IOMMUFDBackend),
259         .instance_init = iommufd_backend_init,
260         .instance_finalize = iommufd_backend_finalize,
261         .class_size = sizeof(IOMMUFDBackendClass),
262         .class_init = iommufd_backend_class_init,
263         .interfaces = (InterfaceInfo[]) {
264             { TYPE_USER_CREATABLE },
265             { }
266         }
267     }, {
268         .name = TYPE_HOST_IOMMU_DEVICE_IOMMUFD,
269         .parent = TYPE_HOST_IOMMU_DEVICE,
270         .class_init = hiod_iommufd_class_init,
271         .abstract = true,
272     }
273 };
274 
275 DEFINE_TYPES(types)
276