xref: /openbmc/qemu/hw/vfio/platform.c (revision 8e6fe6b8)
1 /*
2  * vfio based device assignment support - platform devices
3  *
4  * Copyright Linaro Limited, 2014
5  *
6  * Authors:
7  *  Kim Phillips <kim.phillips@linaro.org>
8  *  Eric Auger <eric.auger@linaro.org>
9  *
10  * This work is licensed under the terms of the GNU GPL, version 2.  See
11  * the COPYING file in the top-level directory.
12  *
13  * Based on vfio based PCI device assignment support:
14  *  Copyright Red Hat, Inc. 2012
15  */
16 
17 #include "qemu/osdep.h"
18 #include "qapi/error.h"
19 #include <sys/ioctl.h>
20 #include <linux/vfio.h>
21 
22 #include "hw/vfio/vfio-platform.h"
23 #include "qemu/error-report.h"
24 #include "qemu/module.h"
25 #include "qemu/range.h"
26 #include "sysemu/sysemu.h"
27 #include "exec/memory.h"
28 #include "exec/address-spaces.h"
29 #include "qemu/queue.h"
30 #include "hw/sysbus.h"
31 #include "trace.h"
32 #include "hw/platform-bus.h"
33 #include "sysemu/kvm.h"
34 
35 /*
36  * Functions used whatever the injection method
37  */
38 
39 static inline bool vfio_irq_is_automasked(VFIOINTp *intp)
40 {
41     return intp->flags & VFIO_IRQ_INFO_AUTOMASKED;
42 }
43 
44 /**
45  * vfio_init_intp - allocate, initialize the IRQ struct pointer
46  * and add it into the list of IRQs
47  * @vbasedev: the VFIO device handle
48  * @info: irq info struct retrieved from VFIO driver
49  * @errp: error object
50  */
51 static VFIOINTp *vfio_init_intp(VFIODevice *vbasedev,
52                                 struct vfio_irq_info info, Error **errp)
53 {
54     int ret;
55     VFIOPlatformDevice *vdev =
56         container_of(vbasedev, VFIOPlatformDevice, vbasedev);
57     SysBusDevice *sbdev = SYS_BUS_DEVICE(vdev);
58     VFIOINTp *intp;
59 
60     intp = g_malloc0(sizeof(*intp));
61     intp->vdev = vdev;
62     intp->pin = info.index;
63     intp->flags = info.flags;
64     intp->state = VFIO_IRQ_INACTIVE;
65     intp->kvm_accel = false;
66 
67     sysbus_init_irq(sbdev, &intp->qemuirq);
68 
69     /* Get an eventfd for trigger */
70     intp->interrupt = g_malloc0(sizeof(EventNotifier));
71     ret = event_notifier_init(intp->interrupt, 0);
72     if (ret) {
73         g_free(intp->interrupt);
74         g_free(intp);
75         error_setg_errno(errp, -ret,
76                          "failed to initialize trigger eventfd notifier");
77         return NULL;
78     }
79     if (vfio_irq_is_automasked(intp)) {
80         /* Get an eventfd for resample/unmask */
81         intp->unmask = g_malloc0(sizeof(EventNotifier));
82         ret = event_notifier_init(intp->unmask, 0);
83         if (ret) {
84             g_free(intp->interrupt);
85             g_free(intp->unmask);
86             g_free(intp);
87             error_setg_errno(errp, -ret,
88                              "failed to initialize resample eventfd notifier");
89             return NULL;
90         }
91     }
92 
93     QLIST_INSERT_HEAD(&vdev->intp_list, intp, next);
94     return intp;
95 }
96 
97 /**
98  * vfio_set_trigger_eventfd - set VFIO eventfd handling
99  *
100  * @intp: IRQ struct handle
101  * @handler: handler to be called on eventfd signaling
102  *
103  * Setup VFIO signaling and attach an optional user-side handler
104  * to the eventfd
105  */
106 static int vfio_set_trigger_eventfd(VFIOINTp *intp,
107                                     eventfd_user_side_handler_t handler)
108 {
109     VFIODevice *vbasedev = &intp->vdev->vbasedev;
110     int32_t fd = event_notifier_get_fd(intp->interrupt);
111     Error *err = NULL;
112     int ret;
113 
114     qemu_set_fd_handler(fd, (IOHandler *)handler, NULL, intp);
115 
116     ret = vfio_set_irq_signaling(vbasedev, intp->pin, 0,
117                                  VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err);
118     if (ret) {
119         error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name);
120         qemu_set_fd_handler(fd, NULL, NULL, NULL);
121     }
122 
123     return ret;
124 }
125 
126 /*
127  * Functions only used when eventfds are handled on user-side
128  * ie. without irqfd
129  */
130 
131 /**
132  * vfio_mmap_set_enabled - enable/disable the fast path mode
133  * @vdev: the VFIO platform device
134  * @enabled: the target mmap state
135  *
136  * enabled = true ~ fast path = MMIO region is mmaped (no KVM TRAP);
137  * enabled = false ~ slow path = MMIO region is trapped and region callbacks
138  * are called; slow path enables to trap the device IRQ status register reset
139 */
140 
141 static void vfio_mmap_set_enabled(VFIOPlatformDevice *vdev, bool enabled)
142 {
143     int i;
144 
145     for (i = 0; i < vdev->vbasedev.num_regions; i++) {
146         vfio_region_mmaps_set_enabled(vdev->regions[i], enabled);
147     }
148 }
149 
150 /**
151  * vfio_intp_mmap_enable - timer function, restores the fast path
152  * if there is no more active IRQ
153  * @opaque: actually points to the VFIO platform device
154  *
155  * Called on mmap timer timout, this function checks whether the
156  * IRQ is still active and if not, restores the fast path.
157  * by construction a single eventfd is handled at a time.
158  * if the IRQ is still active, the timer is re-programmed.
159  */
160 static void vfio_intp_mmap_enable(void *opaque)
161 {
162     VFIOINTp *tmp;
163     VFIOPlatformDevice *vdev = (VFIOPlatformDevice *)opaque;
164 
165     qemu_mutex_lock(&vdev->intp_mutex);
166     QLIST_FOREACH(tmp, &vdev->intp_list, next) {
167         if (tmp->state == VFIO_IRQ_ACTIVE) {
168             trace_vfio_platform_intp_mmap_enable(tmp->pin);
169             /* re-program the timer to check active status later */
170             timer_mod(vdev->mmap_timer,
171                       qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
172                           vdev->mmap_timeout);
173             qemu_mutex_unlock(&vdev->intp_mutex);
174             return;
175         }
176     }
177     vfio_mmap_set_enabled(vdev, true);
178     qemu_mutex_unlock(&vdev->intp_mutex);
179 }
180 
181 /**
182  * vfio_intp_inject_pending_lockheld - Injects a pending IRQ
183  * @opaque: opaque pointer, in practice the VFIOINTp handle
184  *
185  * The function is called on a previous IRQ completion, from
186  * vfio_platform_eoi, while the intp_mutex is locked.
187  * Also in such situation, the slow path already is set and
188  * the mmap timer was already programmed.
189  */
190 static void vfio_intp_inject_pending_lockheld(VFIOINTp *intp)
191 {
192     trace_vfio_platform_intp_inject_pending_lockheld(intp->pin,
193                               event_notifier_get_fd(intp->interrupt));
194 
195     intp->state = VFIO_IRQ_ACTIVE;
196 
197     /* trigger the virtual IRQ */
198     qemu_set_irq(intp->qemuirq, 1);
199 }
200 
201 /**
202  * vfio_intp_interrupt - The user-side eventfd handler
203  * @opaque: opaque pointer which in practice is the VFIOINTp handle
204  *
205  * the function is entered in event handler context:
206  * the vIRQ is injected into the guest if there is no other active
207  * or pending IRQ.
208  */
209 static void vfio_intp_interrupt(VFIOINTp *intp)
210 {
211     int ret;
212     VFIOINTp *tmp;
213     VFIOPlatformDevice *vdev = intp->vdev;
214     bool delay_handling = false;
215 
216     qemu_mutex_lock(&vdev->intp_mutex);
217     if (intp->state == VFIO_IRQ_INACTIVE) {
218         QLIST_FOREACH(tmp, &vdev->intp_list, next) {
219             if (tmp->state == VFIO_IRQ_ACTIVE ||
220                 tmp->state == VFIO_IRQ_PENDING) {
221                 delay_handling = true;
222                 break;
223             }
224         }
225     }
226     if (delay_handling) {
227         /*
228          * the new IRQ gets a pending status and is pushed in
229          * the pending queue
230          */
231         intp->state = VFIO_IRQ_PENDING;
232         trace_vfio_intp_interrupt_set_pending(intp->pin);
233         QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue,
234                              intp, pqnext);
235         ret = event_notifier_test_and_clear(intp->interrupt);
236         qemu_mutex_unlock(&vdev->intp_mutex);
237         return;
238     }
239 
240     trace_vfio_platform_intp_interrupt(intp->pin,
241                               event_notifier_get_fd(intp->interrupt));
242 
243     ret = event_notifier_test_and_clear(intp->interrupt);
244     if (!ret) {
245         error_report("Error when clearing fd=%d (ret = %d)",
246                      event_notifier_get_fd(intp->interrupt), ret);
247     }
248 
249     intp->state = VFIO_IRQ_ACTIVE;
250 
251     /* sets slow path */
252     vfio_mmap_set_enabled(vdev, false);
253 
254     /* trigger the virtual IRQ */
255     qemu_set_irq(intp->qemuirq, 1);
256 
257     /*
258      * Schedule the mmap timer which will restore fastpath when no IRQ
259      * is active anymore
260      */
261     if (vdev->mmap_timeout) {
262         timer_mod(vdev->mmap_timer,
263                   qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
264                       vdev->mmap_timeout);
265     }
266     qemu_mutex_unlock(&vdev->intp_mutex);
267 }
268 
269 /**
270  * vfio_platform_eoi - IRQ completion routine
271  * @vbasedev: the VFIO device handle
272  *
273  * De-asserts the active virtual IRQ and unmasks the physical IRQ
274  * (effective for level sensitive IRQ auto-masked by the  VFIO driver).
275  * Then it handles next pending IRQ if any.
276  * eoi function is called on the first access to any MMIO region
277  * after an IRQ was triggered, trapped since slow path was set.
278  * It is assumed this access corresponds to the IRQ status
279  * register reset. With such a mechanism, a single IRQ can be
280  * handled at a time since there is no way to know which IRQ
281  * was completed by the guest (we would need additional details
282  * about the IRQ status register mask).
283  */
284 static void vfio_platform_eoi(VFIODevice *vbasedev)
285 {
286     VFIOINTp *intp;
287     VFIOPlatformDevice *vdev =
288         container_of(vbasedev, VFIOPlatformDevice, vbasedev);
289 
290     qemu_mutex_lock(&vdev->intp_mutex);
291     QLIST_FOREACH(intp, &vdev->intp_list, next) {
292         if (intp->state == VFIO_IRQ_ACTIVE) {
293             trace_vfio_platform_eoi(intp->pin,
294                                 event_notifier_get_fd(intp->interrupt));
295             intp->state = VFIO_IRQ_INACTIVE;
296 
297             /* deassert the virtual IRQ */
298             qemu_set_irq(intp->qemuirq, 0);
299 
300             if (vfio_irq_is_automasked(intp)) {
301                 /* unmasks the physical level-sensitive IRQ */
302                 vfio_unmask_single_irqindex(vbasedev, intp->pin);
303             }
304 
305             /* a single IRQ can be active at a time */
306             break;
307         }
308     }
309     /* in case there are pending IRQs, handle the first one */
310     if (!QSIMPLEQ_EMPTY(&vdev->pending_intp_queue)) {
311         intp = QSIMPLEQ_FIRST(&vdev->pending_intp_queue);
312         vfio_intp_inject_pending_lockheld(intp);
313         QSIMPLEQ_REMOVE_HEAD(&vdev->pending_intp_queue, pqnext);
314     }
315     qemu_mutex_unlock(&vdev->intp_mutex);
316 }
317 
318 /**
319  * vfio_start_eventfd_injection - starts the virtual IRQ injection using
320  * user-side handled eventfds
321  * @sbdev: the sysbus device handle
322  * @irq: the qemu irq handle
323  */
324 
325 static void vfio_start_eventfd_injection(SysBusDevice *sbdev, qemu_irq irq)
326 {
327     VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev);
328     VFIOINTp *intp;
329 
330     QLIST_FOREACH(intp, &vdev->intp_list, next) {
331         if (intp->qemuirq == irq) {
332             break;
333         }
334     }
335     assert(intp);
336 
337     if (vfio_set_trigger_eventfd(intp, vfio_intp_interrupt)) {
338         abort();
339     }
340 }
341 
342 /*
343  * Functions used for irqfd
344  */
345 
346 /**
347  * vfio_set_resample_eventfd - sets the resamplefd for an IRQ
348  * @intp: the IRQ struct handle
349  * programs the VFIO driver to unmask this IRQ when the
350  * intp->unmask eventfd is triggered
351  */
352 static int vfio_set_resample_eventfd(VFIOINTp *intp)
353 {
354     int32_t fd = event_notifier_get_fd(intp->unmask);
355     VFIODevice *vbasedev = &intp->vdev->vbasedev;
356     Error *err = NULL;
357     int ret;
358 
359     qemu_set_fd_handler(fd, NULL, NULL, NULL);
360     ret = vfio_set_irq_signaling(vbasedev, intp->pin, 0,
361                                  VFIO_IRQ_SET_ACTION_UNMASK, fd, &err);
362     if (ret) {
363         error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name);
364     }
365     return ret;
366 }
367 
368 /**
369  * vfio_start_irqfd_injection - starts the virtual IRQ injection using
370  * irqfd
371  *
372  * @sbdev: the sysbus device handle
373  * @irq: the qemu irq handle
374  *
375  * In case the irqfd setup fails, we fallback to userspace handled eventfd
376  */
377 static void vfio_start_irqfd_injection(SysBusDevice *sbdev, qemu_irq irq)
378 {
379     VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev);
380     VFIOINTp *intp;
381 
382     if (!kvm_irqfds_enabled() || !kvm_resamplefds_enabled() ||
383         !vdev->irqfd_allowed) {
384         goto fail_irqfd;
385     }
386 
387     QLIST_FOREACH(intp, &vdev->intp_list, next) {
388         if (intp->qemuirq == irq) {
389             break;
390         }
391     }
392     assert(intp);
393 
394     if (kvm_irqchip_add_irqfd_notifier(kvm_state, intp->interrupt,
395                                    intp->unmask, irq) < 0) {
396         goto fail_irqfd;
397     }
398 
399     if (vfio_set_trigger_eventfd(intp, NULL) < 0) {
400         goto fail_vfio;
401     }
402     if (vfio_irq_is_automasked(intp)) {
403         if (vfio_set_resample_eventfd(intp) < 0) {
404             goto fail_vfio;
405         }
406         trace_vfio_platform_start_level_irqfd_injection(intp->pin,
407                                     event_notifier_get_fd(intp->interrupt),
408                                     event_notifier_get_fd(intp->unmask));
409     } else {
410         trace_vfio_platform_start_edge_irqfd_injection(intp->pin,
411                                     event_notifier_get_fd(intp->interrupt));
412     }
413 
414     intp->kvm_accel = true;
415 
416     return;
417 fail_vfio:
418     kvm_irqchip_remove_irqfd_notifier(kvm_state, intp->interrupt, irq);
419     abort();
420 fail_irqfd:
421     vfio_start_eventfd_injection(sbdev, irq);
422     return;
423 }
424 
425 /* VFIO skeleton */
426 
427 static void vfio_platform_compute_needs_reset(VFIODevice *vbasedev)
428 {
429     vbasedev->needs_reset = true;
430 }
431 
432 /* not implemented yet */
433 static int vfio_platform_hot_reset_multi(VFIODevice *vbasedev)
434 {
435     return -1;
436 }
437 
438 /**
439  * vfio_populate_device - Allocate and populate MMIO region
440  * and IRQ structs according to driver returned information
441  * @vbasedev: the VFIO device handle
442  * @errp: error object
443  *
444  */
445 static int vfio_populate_device(VFIODevice *vbasedev, Error **errp)
446 {
447     VFIOINTp *intp, *tmp;
448     int i, ret = -1;
449     VFIOPlatformDevice *vdev =
450         container_of(vbasedev, VFIOPlatformDevice, vbasedev);
451 
452     if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PLATFORM)) {
453         error_setg(errp, "this isn't a platform device");
454         return ret;
455     }
456 
457     vdev->regions = g_new0(VFIORegion *, vbasedev->num_regions);
458 
459     for (i = 0; i < vbasedev->num_regions; i++) {
460         char *name = g_strdup_printf("VFIO %s region %d\n", vbasedev->name, i);
461 
462         vdev->regions[i] = g_new0(VFIORegion, 1);
463         ret = vfio_region_setup(OBJECT(vdev), vbasedev,
464                                 vdev->regions[i], i, name);
465         g_free(name);
466         if (ret) {
467             error_setg_errno(errp, -ret, "failed to get region %d info", i);
468             goto reg_error;
469         }
470     }
471 
472     vdev->mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
473                                     vfio_intp_mmap_enable, vdev);
474 
475     QSIMPLEQ_INIT(&vdev->pending_intp_queue);
476 
477     for (i = 0; i < vbasedev->num_irqs; i++) {
478         struct vfio_irq_info irq = { .argsz = sizeof(irq) };
479 
480         irq.index = i;
481         ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
482         if (ret) {
483             error_setg_errno(errp, -ret, "failed to get device irq info");
484             goto irq_err;
485         } else {
486             trace_vfio_platform_populate_interrupts(irq.index,
487                                                     irq.count,
488                                                     irq.flags);
489             intp = vfio_init_intp(vbasedev, irq, errp);
490             if (!intp) {
491                 ret = -1;
492                 goto irq_err;
493             }
494         }
495     }
496     return 0;
497 irq_err:
498     timer_del(vdev->mmap_timer);
499     QLIST_FOREACH_SAFE(intp, &vdev->intp_list, next, tmp) {
500         QLIST_REMOVE(intp, next);
501         g_free(intp);
502     }
503 reg_error:
504     for (i = 0; i < vbasedev->num_regions; i++) {
505         if (vdev->regions[i]) {
506             vfio_region_finalize(vdev->regions[i]);
507         }
508         g_free(vdev->regions[i]);
509     }
510     g_free(vdev->regions);
511     return ret;
512 }
513 
514 /* specialized functions for VFIO Platform devices */
515 static VFIODeviceOps vfio_platform_ops = {
516     .vfio_compute_needs_reset = vfio_platform_compute_needs_reset,
517     .vfio_hot_reset_multi = vfio_platform_hot_reset_multi,
518     .vfio_eoi = vfio_platform_eoi,
519 };
520 
521 /**
522  * vfio_base_device_init - perform preliminary VFIO setup
523  * @vbasedev: the VFIO device handle
524  * @errp: error object
525  *
526  * Implement the VFIO command sequence that allows to discover
527  * assigned device resources: group extraction, device
528  * fd retrieval, resource query.
529  * Precondition: the device name must be initialized
530  */
531 static int vfio_base_device_init(VFIODevice *vbasedev, Error **errp)
532 {
533     VFIOGroup *group;
534     VFIODevice *vbasedev_iter;
535     char *tmp, group_path[PATH_MAX], *group_name;
536     ssize_t len;
537     struct stat st;
538     int groupid;
539     int ret;
540 
541     /* @sysfsdev takes precedence over @host */
542     if (vbasedev->sysfsdev) {
543         g_free(vbasedev->name);
544         vbasedev->name = g_path_get_basename(vbasedev->sysfsdev);
545     } else {
546         if (!vbasedev->name || strchr(vbasedev->name, '/')) {
547             error_setg(errp, "wrong host device name");
548             return -EINVAL;
549         }
550 
551         vbasedev->sysfsdev = g_strdup_printf("/sys/bus/platform/devices/%s",
552                                              vbasedev->name);
553     }
554 
555     if (stat(vbasedev->sysfsdev, &st) < 0) {
556         error_setg_errno(errp, errno,
557                          "failed to get the sysfs host device file status");
558         return -errno;
559     }
560 
561     tmp = g_strdup_printf("%s/iommu_group", vbasedev->sysfsdev);
562     len = readlink(tmp, group_path, sizeof(group_path));
563     g_free(tmp);
564 
565     if (len < 0 || len >= sizeof(group_path)) {
566         ret = len < 0 ? -errno : -ENAMETOOLONG;
567         error_setg_errno(errp, -ret, "no iommu_group found");
568         return ret;
569     }
570 
571     group_path[len] = 0;
572 
573     group_name = basename(group_path);
574     if (sscanf(group_name, "%d", &groupid) != 1) {
575         error_setg_errno(errp, errno, "failed to read %s", group_path);
576         return -errno;
577     }
578 
579     trace_vfio_platform_base_device_init(vbasedev->name, groupid);
580 
581     group = vfio_get_group(groupid, &address_space_memory, errp);
582     if (!group) {
583         return -ENOENT;
584     }
585 
586     QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
587         if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) {
588             error_setg(errp, "device is already attached");
589             vfio_put_group(group);
590             return -EBUSY;
591         }
592     }
593     ret = vfio_get_device(group, vbasedev->name, vbasedev, errp);
594     if (ret) {
595         vfio_put_group(group);
596         return ret;
597     }
598 
599     ret = vfio_populate_device(vbasedev, errp);
600     if (ret) {
601         vfio_put_group(group);
602     }
603 
604     return ret;
605 }
606 
607 /**
608  * vfio_platform_realize  - the device realize function
609  * @dev: device state pointer
610  * @errp: error
611  *
612  * initialize the device, its memory regions and IRQ structures
613  * IRQ are started separately
614  */
615 static void vfio_platform_realize(DeviceState *dev, Error **errp)
616 {
617     VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev);
618     SysBusDevice *sbdev = SYS_BUS_DEVICE(dev);
619     VFIODevice *vbasedev = &vdev->vbasedev;
620     int i, ret;
621 
622     vbasedev->type = VFIO_DEVICE_TYPE_PLATFORM;
623     vbasedev->dev = dev;
624     vbasedev->ops = &vfio_platform_ops;
625 
626     qemu_mutex_init(&vdev->intp_mutex);
627 
628     trace_vfio_platform_realize(vbasedev->sysfsdev ?
629                                 vbasedev->sysfsdev : vbasedev->name,
630                                 vdev->compat);
631 
632     ret = vfio_base_device_init(vbasedev, errp);
633     if (ret) {
634         goto out;
635     }
636 
637     if (!vdev->compat) {
638         GError *gerr = NULL;
639         gchar *contents;
640         gsize length;
641         char *path;
642 
643         path = g_strdup_printf("%s/of_node/compatible", vbasedev->sysfsdev);
644         if (!g_file_get_contents(path, &contents, &length, &gerr)) {
645             error_setg(errp, "%s", gerr->message);
646             g_error_free(gerr);
647             g_free(path);
648             return;
649         }
650         g_free(path);
651         vdev->compat = contents;
652         for (vdev->num_compat = 0; length; vdev->num_compat++) {
653             size_t skip = strlen(contents) + 1;
654             contents += skip;
655             length -= skip;
656         }
657     }
658 
659     for (i = 0; i < vbasedev->num_regions; i++) {
660         if (vfio_region_mmap(vdev->regions[i])) {
661             warn_report("%s mmap unsupported, performance may be slow",
662                         memory_region_name(vdev->regions[i]->mem));
663         }
664         sysbus_init_mmio(sbdev, vdev->regions[i]->mem);
665     }
666 out:
667     if (!ret) {
668         return;
669     }
670 
671     if (vdev->vbasedev.name) {
672         error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.name);
673     } else {
674         error_prepend(errp, "vfio error: ");
675     }
676 }
677 
678 static const VMStateDescription vfio_platform_vmstate = {
679     .name = "vfio-platform",
680     .unmigratable = 1,
681 };
682 
683 static Property vfio_platform_dev_properties[] = {
684     DEFINE_PROP_STRING("host", VFIOPlatformDevice, vbasedev.name),
685     DEFINE_PROP_STRING("sysfsdev", VFIOPlatformDevice, vbasedev.sysfsdev),
686     DEFINE_PROP_BOOL("x-no-mmap", VFIOPlatformDevice, vbasedev.no_mmap, false),
687     DEFINE_PROP_UINT32("mmap-timeout-ms", VFIOPlatformDevice,
688                        mmap_timeout, 1100),
689     DEFINE_PROP_BOOL("x-irqfd", VFIOPlatformDevice, irqfd_allowed, true),
690     DEFINE_PROP_END_OF_LIST(),
691 };
692 
693 static void vfio_platform_class_init(ObjectClass *klass, void *data)
694 {
695     DeviceClass *dc = DEVICE_CLASS(klass);
696     SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass);
697 
698     dc->realize = vfio_platform_realize;
699     dc->props = vfio_platform_dev_properties;
700     dc->vmsd = &vfio_platform_vmstate;
701     dc->desc = "VFIO-based platform device assignment";
702     sbc->connect_irq_notifier = vfio_start_irqfd_injection;
703     set_bit(DEVICE_CATEGORY_MISC, dc->categories);
704     /* Supported by TYPE_VIRT_MACHINE */
705     dc->user_creatable = true;
706 }
707 
708 static const TypeInfo vfio_platform_dev_info = {
709     .name = TYPE_VFIO_PLATFORM,
710     .parent = TYPE_SYS_BUS_DEVICE,
711     .instance_size = sizeof(VFIOPlatformDevice),
712     .class_init = vfio_platform_class_init,
713     .class_size = sizeof(VFIOPlatformDeviceClass),
714 };
715 
716 static void register_vfio_platform_dev_type(void)
717 {
718     type_register_static(&vfio_platform_dev_info);
719 }
720 
721 type_init(register_vfio_platform_dev_type)
722