xref: /openbmc/qemu/hw/vfio/device.c (revision 374d766da75ab89749f59813f7ae55d913c37b58)
1 /*
2  * VFIO device
3  *
4  * Copyright Red Hat, Inc. 2012
5  *
6  * Authors:
7  *  Alex Williamson <alex.williamson@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Based on qemu-kvm device-assignment:
13  *  Adapted for KVM by Qumranet.
14  *  Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15  *  Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16  *  Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17  *  Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18  *  Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
19  */
20 
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 
24 #include "hw/vfio/vfio-device.h"
25 #include "hw/vfio/pci.h"
26 #include "hw/hw.h"
27 #include "trace.h"
28 #include "qapi/error.h"
29 #include "qemu/error-report.h"
30 #include "qemu/units.h"
31 #include "monitor/monitor.h"
32 #include "vfio-helpers.h"
33 
34 VFIODeviceList vfio_device_list =
35     QLIST_HEAD_INITIALIZER(vfio_device_list);
36 
37 /*
38  * We want to differentiate hot reset of multiple in-use devices vs
39  * hot reset of a single in-use device. VFIO_DEVICE_RESET will already
40  * handle the case of doing hot resets when there is only a single
41  * device per bus. The in-use here refers to how many VFIODevices are
42  * affected. A hot reset that affects multiple devices, but only a
43  * single in-use device, means that we can call it from our bus
44  * ->reset() callback since the extent is effectively a single
45  * device. This allows us to make use of it in the hotplug path. When
46  * there are multiple in-use devices, we can only trigger the hot
47  * reset during a system reset and thus from our reset handler. We
48  * separate _one vs _multi here so that we don't overlap and do a
49  * double reset on the system reset path where both our reset handler
50  * and ->reset() callback are used. Calling _one() will only do a hot
51  * reset for the one in-use devices case, calling _multi() will do
52  * nothing if a _one() would have been sufficient.
53  */
54 void vfio_device_reset_handler(void *opaque)
55 {
56     VFIODevice *vbasedev;
57 
58     trace_vfio_device_reset_handler();
59     QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) {
60         if (vbasedev->dev->realized) {
61             vbasedev->ops->vfio_compute_needs_reset(vbasedev);
62         }
63     }
64 
65     QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) {
66         if (vbasedev->dev->realized && vbasedev->needs_reset) {
67             vbasedev->ops->vfio_hot_reset_multi(vbasedev);
68         }
69     }
70 }
71 
72 /*
73  * Common VFIO interrupt disable
74  */
75 void vfio_device_irq_disable(VFIODevice *vbasedev, int index)
76 {
77     struct vfio_irq_set irq_set = {
78         .argsz = sizeof(irq_set),
79         .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
80         .index = index,
81         .start = 0,
82         .count = 0,
83     };
84 
85     vbasedev->io_ops->set_irqs(vbasedev, &irq_set);
86 }
87 
88 void vfio_device_irq_unmask(VFIODevice *vbasedev, int index)
89 {
90     struct vfio_irq_set irq_set = {
91         .argsz = sizeof(irq_set),
92         .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
93         .index = index,
94         .start = 0,
95         .count = 1,
96     };
97 
98     vbasedev->io_ops->set_irqs(vbasedev, &irq_set);
99 }
100 
101 void vfio_device_irq_mask(VFIODevice *vbasedev, int index)
102 {
103     struct vfio_irq_set irq_set = {
104         .argsz = sizeof(irq_set),
105         .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
106         .index = index,
107         .start = 0,
108         .count = 1,
109     };
110 
111     vbasedev->io_ops->set_irqs(vbasedev, &irq_set);
112 }
113 
114 static inline const char *action_to_str(int action)
115 {
116     switch (action) {
117     case VFIO_IRQ_SET_ACTION_MASK:
118         return "MASK";
119     case VFIO_IRQ_SET_ACTION_UNMASK:
120         return "UNMASK";
121     case VFIO_IRQ_SET_ACTION_TRIGGER:
122         return "TRIGGER";
123     default:
124         return "UNKNOWN ACTION";
125     }
126 }
127 
128 static const char *index_to_str(VFIODevice *vbasedev, int index)
129 {
130     if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
131         return NULL;
132     }
133 
134     switch (index) {
135     case VFIO_PCI_INTX_IRQ_INDEX:
136         return "INTX";
137     case VFIO_PCI_MSI_IRQ_INDEX:
138         return "MSI";
139     case VFIO_PCI_MSIX_IRQ_INDEX:
140         return "MSIX";
141     case VFIO_PCI_ERR_IRQ_INDEX:
142         return "ERR";
143     case VFIO_PCI_REQ_IRQ_INDEX:
144         return "REQ";
145     default:
146         return NULL;
147     }
148 }
149 
150 bool vfio_device_irq_set_signaling(VFIODevice *vbasedev, int index, int subindex,
151                                    int action, int fd, Error **errp)
152 {
153     ERRP_GUARD();
154     g_autofree struct vfio_irq_set *irq_set = NULL;
155     int argsz;
156     const char *name;
157     int32_t *pfd;
158 
159     argsz = sizeof(*irq_set) + sizeof(*pfd);
160 
161     irq_set = g_malloc0(argsz);
162     irq_set->argsz = argsz;
163     irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action;
164     irq_set->index = index;
165     irq_set->start = subindex;
166     irq_set->count = 1;
167     pfd = (int32_t *)&irq_set->data;
168     *pfd = fd;
169 
170     if (!vbasedev->io_ops->set_irqs(vbasedev, irq_set)) {
171         return true;
172     }
173 
174     error_setg_errno(errp, errno, "VFIO_DEVICE_SET_IRQS failure");
175 
176     name = index_to_str(vbasedev, index);
177     if (name) {
178         error_prepend(errp, "%s-%d: ", name, subindex);
179     } else {
180         error_prepend(errp, "index %d-%d: ", index, subindex);
181     }
182     error_prepend(errp,
183                   "Failed to %s %s eventfd signaling for interrupt ",
184                   fd < 0 ? "tear down" : "set up", action_to_str(action));
185     return false;
186 }
187 
188 int vfio_device_get_irq_info(VFIODevice *vbasedev, int index,
189                              struct vfio_irq_info *info)
190 {
191     memset(info, 0, sizeof(*info));
192 
193     info->argsz = sizeof(*info);
194     info->index = index;
195 
196     return vbasedev->io_ops->get_irq_info(vbasedev, info);
197 }
198 
199 int vfio_device_get_region_info(VFIODevice *vbasedev, int index,
200                                 struct vfio_region_info **info)
201 {
202     size_t argsz = sizeof(struct vfio_region_info);
203     int fd = -1;
204     int ret;
205 
206     /* check cache */
207     if (vbasedev->reginfo[index] != NULL) {
208         *info = vbasedev->reginfo[index];
209         return 0;
210     }
211 
212     *info = g_malloc0(argsz);
213 
214     (*info)->index = index;
215 retry:
216     (*info)->argsz = argsz;
217 
218     ret = vbasedev->io_ops->get_region_info(vbasedev, *info, &fd);
219     if (ret != 0) {
220         g_free(*info);
221         *info = NULL;
222         return ret;
223     }
224 
225     if ((*info)->argsz > argsz) {
226         argsz = (*info)->argsz;
227         *info = g_realloc(*info, argsz);
228 
229         if (fd != -1) {
230             close(fd);
231             fd = -1;
232         }
233 
234         goto retry;
235     }
236 
237     /* fill cache */
238     vbasedev->reginfo[index] = *info;
239     if (vbasedev->region_fds != NULL) {
240         vbasedev->region_fds[index] = fd;
241     }
242 
243     return 0;
244 }
245 
246 int vfio_device_get_region_fd(VFIODevice *vbasedev, int index)
247 {
248         return vbasedev->region_fds ?
249                vbasedev->region_fds[index] :
250                vbasedev->fd;
251 }
252 
253 int vfio_device_get_region_info_type(VFIODevice *vbasedev, uint32_t type,
254                                      uint32_t subtype, struct vfio_region_info **info)
255 {
256     int i;
257 
258     for (i = 0; i < vbasedev->num_regions; i++) {
259         struct vfio_info_cap_header *hdr;
260         struct vfio_region_info_cap_type *cap_type;
261 
262         if (vfio_device_get_region_info(vbasedev, i, info)) {
263             continue;
264         }
265 
266         hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE);
267         if (!hdr) {
268             continue;
269         }
270 
271         cap_type = container_of(hdr, struct vfio_region_info_cap_type, header);
272 
273         trace_vfio_device_get_region_info_type(vbasedev->name, i,
274                                                cap_type->type, cap_type->subtype);
275 
276         if (cap_type->type == type && cap_type->subtype == subtype) {
277             return 0;
278         }
279     }
280 
281     *info = NULL;
282     return -ENODEV;
283 }
284 
285 bool vfio_device_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type)
286 {
287     struct vfio_region_info *info = NULL;
288     bool ret = false;
289 
290     if (!vfio_device_get_region_info(vbasedev, region, &info)) {
291         if (vfio_get_region_info_cap(info, cap_type)) {
292             ret = true;
293         }
294     }
295 
296     return ret;
297 }
298 
299 bool vfio_device_get_name(VFIODevice *vbasedev, Error **errp)
300 {
301     ERRP_GUARD();
302     struct stat st;
303 
304     if (vbasedev->fd < 0) {
305         if (stat(vbasedev->sysfsdev, &st) < 0) {
306             error_setg_errno(errp, errno, "no such host device");
307             error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->sysfsdev);
308             return false;
309         }
310         /* User may specify a name, e.g: VFIO platform device */
311         if (!vbasedev->name) {
312             vbasedev->name = g_path_get_basename(vbasedev->sysfsdev);
313         }
314     } else {
315         if (!vbasedev->iommufd) {
316             error_setg(errp, "Use FD passing only with iommufd backend");
317             return false;
318         }
319         /*
320          * Give a name with fd so any function printing out vbasedev->name
321          * will not break.
322          */
323         if (!vbasedev->name) {
324             vbasedev->name = g_strdup_printf("VFIO_FD%d", vbasedev->fd);
325         }
326     }
327 
328     return true;
329 }
330 
331 void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp)
332 {
333     ERRP_GUARD();
334     int fd = monitor_fd_param(monitor_cur(), str, errp);
335 
336     if (fd < 0) {
337         error_prepend(errp, "Could not parse remote object fd %s:", str);
338         return;
339     }
340     vbasedev->fd = fd;
341 }
342 
343 static VFIODeviceIOOps vfio_device_io_ops_ioctl;
344 
345 void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops,
346                       DeviceState *dev, bool ram_discard)
347 {
348     vbasedev->type = type;
349     vbasedev->ops = ops;
350     vbasedev->io_ops = &vfio_device_io_ops_ioctl;
351     vbasedev->dev = dev;
352     vbasedev->fd = -1;
353     vbasedev->use_region_fds = false;
354 
355     vbasedev->ram_block_discard_allowed = ram_discard;
356 }
357 
358 int vfio_device_get_aw_bits(VFIODevice *vdev)
359 {
360     /*
361      * iova_ranges is a sorted list. For old kernels that support
362      * VFIO but not support query of iova ranges, iova_ranges is NULL,
363      * in this case HOST_IOMMU_DEVICE_CAP_AW_BITS_MAX(64) is returned.
364      */
365     GList *l = g_list_last(vdev->bcontainer->iova_ranges);
366 
367     if (l) {
368         Range *range = l->data;
369         return range_get_last_bit(range) + 1;
370     }
371 
372     return HOST_IOMMU_DEVICE_CAP_AW_BITS_MAX;
373 }
374 
375 bool vfio_device_is_mdev(VFIODevice *vbasedev)
376 {
377     g_autofree char *subsys = NULL;
378     g_autofree char *tmp = NULL;
379 
380     if (!vbasedev->sysfsdev) {
381         return false;
382     }
383 
384     tmp = g_strdup_printf("%s/subsystem", vbasedev->sysfsdev);
385     subsys = realpath(tmp, NULL);
386     return subsys && (strcmp(subsys, "/sys/bus/mdev") == 0);
387 }
388 
389 bool vfio_device_hiod_create_and_realize(VFIODevice *vbasedev,
390                                          const char *typename, Error **errp)
391 {
392     HostIOMMUDevice *hiod;
393 
394     if (vbasedev->mdev) {
395         return true;
396     }
397 
398     hiod = HOST_IOMMU_DEVICE(object_new(typename));
399 
400     if (!HOST_IOMMU_DEVICE_GET_CLASS(hiod)->realize(hiod, vbasedev, errp)) {
401         object_unref(hiod);
402         return false;
403     }
404 
405     vbasedev->hiod = hiod;
406     return true;
407 }
408 
409 VFIODevice *vfio_get_vfio_device(Object *obj)
410 {
411     if (object_dynamic_cast(obj, TYPE_VFIO_PCI)) {
412         return &VFIO_PCI_BASE(obj)->vbasedev;
413     } else {
414         return NULL;
415     }
416 }
417 
418 bool vfio_device_attach_by_iommu_type(const char *iommu_type, char *name,
419                                       VFIODevice *vbasedev, AddressSpace *as,
420                                       Error **errp)
421 {
422     const VFIOIOMMUClass *ops =
423         VFIO_IOMMU_CLASS(object_class_by_name(iommu_type));
424 
425     assert(ops);
426 
427     return ops->attach_device(name, vbasedev, as, errp);
428 }
429 
430 bool vfio_device_attach(char *name, VFIODevice *vbasedev,
431                         AddressSpace *as, Error **errp)
432 {
433     const char *iommu_type = vbasedev->iommufd ?
434                              TYPE_VFIO_IOMMU_IOMMUFD :
435                              TYPE_VFIO_IOMMU_LEGACY;
436 
437     return vfio_device_attach_by_iommu_type(iommu_type, name, vbasedev,
438                                             as, errp);
439 }
440 
441 void vfio_device_detach(VFIODevice *vbasedev)
442 {
443     if (!vbasedev->bcontainer) {
444         return;
445     }
446     VFIO_IOMMU_GET_CLASS(vbasedev->bcontainer)->detach_device(vbasedev);
447 }
448 
449 void vfio_device_prepare(VFIODevice *vbasedev, VFIOContainerBase *bcontainer,
450                          struct vfio_device_info *info)
451 {
452     vbasedev->num_irqs = info->num_irqs;
453     vbasedev->num_regions = info->num_regions;
454     vbasedev->flags = info->flags;
455     vbasedev->reset_works = !!(info->flags & VFIO_DEVICE_FLAGS_RESET);
456 
457     vbasedev->bcontainer = bcontainer;
458     QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next);
459 
460     QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next);
461 
462     vbasedev->reginfo = g_new0(struct vfio_region_info *,
463                                vbasedev->num_regions);
464     if (vbasedev->use_region_fds) {
465         vbasedev->region_fds = g_new0(int, vbasedev->num_regions);
466     }
467 }
468 
469 void vfio_device_unprepare(VFIODevice *vbasedev)
470 {
471     int i;
472 
473     for (i = 0; i < vbasedev->num_regions; i++) {
474         g_free(vbasedev->reginfo[i]);
475         if (vbasedev->region_fds != NULL && vbasedev->region_fds[i] != -1) {
476             close(vbasedev->region_fds[i]);
477         }
478 
479     }
480 
481     g_clear_pointer(&vbasedev->reginfo, g_free);
482     g_clear_pointer(&vbasedev->region_fds, g_free);
483 
484     QLIST_REMOVE(vbasedev, container_next);
485     QLIST_REMOVE(vbasedev, global_next);
486     vbasedev->bcontainer = NULL;
487 }
488 
489 /*
490  * Traditional ioctl() based io
491  */
492 
493 static int vfio_device_io_device_feature(VFIODevice *vbasedev,
494                                          struct vfio_device_feature *feature)
495 {
496     int ret;
497 
498     ret = ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature);
499 
500     return ret < 0 ? -errno : ret;
501 }
502 
503 static int vfio_device_io_get_region_info(VFIODevice *vbasedev,
504                                           struct vfio_region_info *info,
505                                           int *fd)
506 {
507     int ret;
508 
509     *fd = -1;
510 
511     ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, info);
512 
513     return ret < 0 ? -errno : ret;
514 }
515 
516 static int vfio_device_io_get_irq_info(VFIODevice *vbasedev,
517                                        struct vfio_irq_info *info)
518 {
519     int ret;
520 
521     ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_IRQ_INFO, info);
522 
523     return ret < 0 ? -errno : ret;
524 }
525 
526 static int vfio_device_io_set_irqs(VFIODevice *vbasedev,
527                                    struct vfio_irq_set *irqs)
528 {
529     int ret;
530 
531     ret = ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irqs);
532 
533     return ret < 0 ? -errno : ret;
534 }
535 
536 static int vfio_device_io_region_read(VFIODevice *vbasedev, uint8_t index,
537                                       off_t off, uint32_t size, void *data)
538 {
539     struct vfio_region_info *info;
540     int ret;
541 
542     ret = vfio_device_get_region_info(vbasedev, index, &info);
543     if (ret != 0) {
544         return ret;
545     }
546 
547     ret = pread(vbasedev->fd, data, size, info->offset + off);
548 
549     return ret < 0 ? -errno : ret;
550 }
551 
552 static int vfio_device_io_region_write(VFIODevice *vbasedev, uint8_t index,
553                                        off_t off, uint32_t size, void *data,
554                                        bool post)
555 {
556     struct vfio_region_info *info;
557     int ret;
558 
559     ret = vfio_device_get_region_info(vbasedev, index, &info);
560     if (ret != 0) {
561         return ret;
562     }
563 
564     ret = pwrite(vbasedev->fd, data, size, info->offset + off);
565 
566     return ret < 0 ? -errno : ret;
567 }
568 
569 static VFIODeviceIOOps vfio_device_io_ops_ioctl = {
570     .device_feature = vfio_device_io_device_feature,
571     .get_region_info = vfio_device_io_get_region_info,
572     .get_irq_info = vfio_device_io_get_irq_info,
573     .set_irqs = vfio_device_io_set_irqs,
574     .region_read = vfio_device_io_region_read,
575     .region_write = vfio_device_io_region_write,
576 };
577