xref: /openbmc/qemu/backends/iommufd.c (revision 7d87775f)
1 /*
2  * iommufd container backend
3  *
4  * Copyright (C) 2023 Intel Corporation.
5  * Copyright Red Hat, Inc. 2023
6  *
7  * Authors: Yi Liu <yi.l.liu@intel.com>
8  *          Eric Auger <eric.auger@redhat.com>
9  *
10  * SPDX-License-Identifier: GPL-2.0-or-later
11  */
12 
13 #include "qemu/osdep.h"
14 #include "sysemu/iommufd.h"
15 #include "qapi/error.h"
16 #include "qemu/module.h"
17 #include "qom/object_interfaces.h"
18 #include "qemu/error-report.h"
19 #include "monitor/monitor.h"
20 #include "trace.h"
21 #include "hw/vfio/vfio-common.h"
22 #include <sys/ioctl.h>
23 #include <linux/iommufd.h>
24 
25 static void iommufd_backend_init(Object *obj)
26 {
27     IOMMUFDBackend *be = IOMMUFD_BACKEND(obj);
28 
29     be->fd = -1;
30     be->users = 0;
31     be->owned = true;
32 }
33 
34 static void iommufd_backend_finalize(Object *obj)
35 {
36     IOMMUFDBackend *be = IOMMUFD_BACKEND(obj);
37 
38     if (be->owned) {
39         close(be->fd);
40         be->fd = -1;
41     }
42 }
43 
44 static void iommufd_backend_set_fd(Object *obj, const char *str, Error **errp)
45 {
46     ERRP_GUARD();
47     IOMMUFDBackend *be = IOMMUFD_BACKEND(obj);
48     int fd = -1;
49 
50     fd = monitor_fd_param(monitor_cur(), str, errp);
51     if (fd == -1) {
52         error_prepend(errp, "Could not parse remote object fd %s:", str);
53         return;
54     }
55     be->fd = fd;
56     be->owned = false;
57     trace_iommu_backend_set_fd(be->fd);
58 }
59 
60 static bool iommufd_backend_can_be_deleted(UserCreatable *uc)
61 {
62     IOMMUFDBackend *be = IOMMUFD_BACKEND(uc);
63 
64     return !be->users;
65 }
66 
67 static void iommufd_backend_class_init(ObjectClass *oc, void *data)
68 {
69     UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
70 
71     ucc->can_be_deleted = iommufd_backend_can_be_deleted;
72 
73     object_class_property_add_str(oc, "fd", NULL, iommufd_backend_set_fd);
74 }
75 
76 bool iommufd_backend_connect(IOMMUFDBackend *be, Error **errp)
77 {
78     int fd;
79 
80     if (be->owned && !be->users) {
81         fd = qemu_open("/dev/iommu", O_RDWR, errp);
82         if (fd < 0) {
83             return false;
84         }
85         be->fd = fd;
86     }
87     be->users++;
88 
89     trace_iommufd_backend_connect(be->fd, be->owned, be->users);
90     return true;
91 }
92 
93 void iommufd_backend_disconnect(IOMMUFDBackend *be)
94 {
95     if (!be->users) {
96         goto out;
97     }
98     be->users--;
99     if (!be->users && be->owned) {
100         close(be->fd);
101         be->fd = -1;
102     }
103 out:
104     trace_iommufd_backend_disconnect(be->fd, be->users);
105 }
106 
107 bool iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id,
108                                 Error **errp)
109 {
110     int fd = be->fd;
111     struct iommu_ioas_alloc alloc_data  = {
112         .size = sizeof(alloc_data),
113         .flags = 0,
114     };
115 
116     if (ioctl(fd, IOMMU_IOAS_ALLOC, &alloc_data)) {
117         error_setg_errno(errp, errno, "Failed to allocate ioas");
118         return false;
119     }
120 
121     *ioas_id = alloc_data.out_ioas_id;
122     trace_iommufd_backend_alloc_ioas(fd, *ioas_id);
123 
124     return true;
125 }
126 
127 void iommufd_backend_free_id(IOMMUFDBackend *be, uint32_t id)
128 {
129     int ret, fd = be->fd;
130     struct iommu_destroy des = {
131         .size = sizeof(des),
132         .id = id,
133     };
134 
135     ret = ioctl(fd, IOMMU_DESTROY, &des);
136     trace_iommufd_backend_free_id(fd, id, ret);
137     if (ret) {
138         error_report("Failed to free id: %u %m", id);
139     }
140 }
141 
142 int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova,
143                             ram_addr_t size, void *vaddr, bool readonly)
144 {
145     int ret, fd = be->fd;
146     struct iommu_ioas_map map = {
147         .size = sizeof(map),
148         .flags = IOMMU_IOAS_MAP_READABLE |
149                  IOMMU_IOAS_MAP_FIXED_IOVA,
150         .ioas_id = ioas_id,
151         .__reserved = 0,
152         .user_va = (uintptr_t)vaddr,
153         .iova = iova,
154         .length = size,
155     };
156 
157     if (!readonly) {
158         map.flags |= IOMMU_IOAS_MAP_WRITEABLE;
159     }
160 
161     ret = ioctl(fd, IOMMU_IOAS_MAP, &map);
162     trace_iommufd_backend_map_dma(fd, ioas_id, iova, size,
163                                   vaddr, readonly, ret);
164     if (ret) {
165         ret = -errno;
166 
167         /* TODO: Not support mapping hardware PCI BAR region for now. */
168         if (errno == EFAULT) {
169             warn_report("IOMMU_IOAS_MAP failed: %m, PCI BAR?");
170         } else {
171             error_report("IOMMU_IOAS_MAP failed: %m");
172         }
173     }
174     return ret;
175 }
176 
177 int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id,
178                               hwaddr iova, ram_addr_t size)
179 {
180     int ret, fd = be->fd;
181     struct iommu_ioas_unmap unmap = {
182         .size = sizeof(unmap),
183         .ioas_id = ioas_id,
184         .iova = iova,
185         .length = size,
186     };
187 
188     ret = ioctl(fd, IOMMU_IOAS_UNMAP, &unmap);
189     /*
190      * IOMMUFD takes mapping as some kind of object, unmapping
191      * nonexistent mapping is treated as deleting a nonexistent
192      * object and return ENOENT. This is different from legacy
193      * backend which allows it. vIOMMU may trigger a lot of
194      * redundant unmapping, to avoid flush the log, treat them
195      * as succeess for IOMMUFD just like legacy backend.
196      */
197     if (ret && errno == ENOENT) {
198         trace_iommufd_backend_unmap_dma_non_exist(fd, ioas_id, iova, size, ret);
199         ret = 0;
200     } else {
201         trace_iommufd_backend_unmap_dma(fd, ioas_id, iova, size, ret);
202     }
203 
204     if (ret) {
205         ret = -errno;
206         error_report("IOMMU_IOAS_UNMAP failed: %m");
207     }
208     return ret;
209 }
210 
211 bool iommufd_backend_alloc_hwpt(IOMMUFDBackend *be, uint32_t dev_id,
212                                 uint32_t pt_id, uint32_t flags,
213                                 uint32_t data_type, uint32_t data_len,
214                                 void *data_ptr, uint32_t *out_hwpt,
215                                 Error **errp)
216 {
217     int ret, fd = be->fd;
218     struct iommu_hwpt_alloc alloc_hwpt = {
219         .size = sizeof(struct iommu_hwpt_alloc),
220         .flags = flags,
221         .dev_id = dev_id,
222         .pt_id = pt_id,
223         .data_type = data_type,
224         .data_len = data_len,
225         .data_uptr = (uintptr_t)data_ptr,
226     };
227 
228     ret = ioctl(fd, IOMMU_HWPT_ALLOC, &alloc_hwpt);
229     trace_iommufd_backend_alloc_hwpt(fd, dev_id, pt_id, flags, data_type,
230                                      data_len, (uintptr_t)data_ptr,
231                                      alloc_hwpt.out_hwpt_id, ret);
232     if (ret) {
233         error_setg_errno(errp, errno, "Failed to allocate hwpt");
234         return false;
235     }
236 
237     *out_hwpt = alloc_hwpt.out_hwpt_id;
238     return true;
239 }
240 
241 bool iommufd_backend_set_dirty_tracking(IOMMUFDBackend *be,
242                                         uint32_t hwpt_id, bool start,
243                                         Error **errp)
244 {
245     int ret;
246     struct iommu_hwpt_set_dirty_tracking set_dirty = {
247             .size = sizeof(set_dirty),
248             .hwpt_id = hwpt_id,
249             .flags = start ? IOMMU_HWPT_DIRTY_TRACKING_ENABLE : 0,
250     };
251 
252     ret = ioctl(be->fd, IOMMU_HWPT_SET_DIRTY_TRACKING, &set_dirty);
253     trace_iommufd_backend_set_dirty(be->fd, hwpt_id, start, ret ? errno : 0);
254     if (ret) {
255         error_setg_errno(errp, errno,
256                          "IOMMU_HWPT_SET_DIRTY_TRACKING(hwpt_id %u) failed",
257                          hwpt_id);
258         return false;
259     }
260 
261     return true;
262 }
263 
264 bool iommufd_backend_get_dirty_bitmap(IOMMUFDBackend *be,
265                                       uint32_t hwpt_id,
266                                       uint64_t iova, ram_addr_t size,
267                                       uint64_t page_size, uint64_t *data,
268                                       Error **errp)
269 {
270     int ret;
271     struct iommu_hwpt_get_dirty_bitmap get_dirty_bitmap = {
272         .size = sizeof(get_dirty_bitmap),
273         .hwpt_id = hwpt_id,
274         .iova = iova,
275         .length = size,
276         .page_size = page_size,
277         .data = (uintptr_t)data,
278     };
279 
280     ret = ioctl(be->fd, IOMMU_HWPT_GET_DIRTY_BITMAP, &get_dirty_bitmap);
281     trace_iommufd_backend_get_dirty_bitmap(be->fd, hwpt_id, iova, size,
282                                            page_size, ret ? errno : 0);
283     if (ret) {
284         error_setg_errno(errp, errno,
285                          "IOMMU_HWPT_GET_DIRTY_BITMAP (iova: 0x%"HWADDR_PRIx
286                          " size: 0x"RAM_ADDR_FMT") failed", iova, size);
287         return false;
288     }
289 
290     return true;
291 }
292 
293 bool iommufd_backend_get_device_info(IOMMUFDBackend *be, uint32_t devid,
294                                      uint32_t *type, void *data, uint32_t len,
295                                      uint64_t *caps, Error **errp)
296 {
297     struct iommu_hw_info info = {
298         .size = sizeof(info),
299         .dev_id = devid,
300         .data_len = len,
301         .data_uptr = (uintptr_t)data,
302     };
303 
304     if (ioctl(be->fd, IOMMU_GET_HW_INFO, &info)) {
305         error_setg_errno(errp, errno, "Failed to get hardware info");
306         return false;
307     }
308 
309     g_assert(type);
310     *type = info.out_data_type;
311     g_assert(caps);
312     *caps = info.out_capabilities;
313 
314     return true;
315 }
316 
317 static int hiod_iommufd_get_cap(HostIOMMUDevice *hiod, int cap, Error **errp)
318 {
319     HostIOMMUDeviceCaps *caps = &hiod->caps;
320 
321     switch (cap) {
322     case HOST_IOMMU_DEVICE_CAP_IOMMU_TYPE:
323         return caps->type;
324     case HOST_IOMMU_DEVICE_CAP_AW_BITS:
325         return vfio_device_get_aw_bits(hiod->agent);
326     default:
327         error_setg(errp, "%s: unsupported capability %x", hiod->name, cap);
328         return -EINVAL;
329     }
330 }
331 
332 static void hiod_iommufd_class_init(ObjectClass *oc, void *data)
333 {
334     HostIOMMUDeviceClass *hioc = HOST_IOMMU_DEVICE_CLASS(oc);
335 
336     hioc->get_cap = hiod_iommufd_get_cap;
337 };
338 
339 static const TypeInfo types[] = {
340     {
341         .name = TYPE_IOMMUFD_BACKEND,
342         .parent = TYPE_OBJECT,
343         .instance_size = sizeof(IOMMUFDBackend),
344         .instance_init = iommufd_backend_init,
345         .instance_finalize = iommufd_backend_finalize,
346         .class_size = sizeof(IOMMUFDBackendClass),
347         .class_init = iommufd_backend_class_init,
348         .interfaces = (InterfaceInfo[]) {
349             { TYPE_USER_CREATABLE },
350             { }
351         }
352     }, {
353         .name = TYPE_HOST_IOMMU_DEVICE_IOMMUFD,
354         .parent = TYPE_HOST_IOMMU_DEVICE,
355         .class_init = hiod_iommufd_class_init,
356         .abstract = true,
357     }
358 };
359 
360 DEFINE_TYPES(types)
361