1 /* 2 * vfio based device assignment support - platform devices 3 * 4 * Copyright Linaro Limited, 2014 5 * 6 * Authors: 7 * Kim Phillips <kim.phillips@linaro.org> 8 * Eric Auger <eric.auger@linaro.org> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2. See 11 * the COPYING file in the top-level directory. 12 * 13 * Based on vfio based PCI device assignment support: 14 * Copyright Red Hat, Inc. 2012 15 */ 16 17 #include "qemu/osdep.h" 18 #include "qapi/error.h" 19 #include <sys/ioctl.h> 20 #include <linux/vfio.h> 21 22 #include "hw/vfio/vfio-platform.h" 23 #include "migration/vmstate.h" 24 #include "qemu/error-report.h" 25 #include "qemu/lockable.h" 26 #include "qemu/main-loop.h" 27 #include "qemu/module.h" 28 #include "qemu/range.h" 29 #include "exec/memory.h" 30 #include "exec/address-spaces.h" 31 #include "qemu/queue.h" 32 #include "hw/sysbus.h" 33 #include "trace.h" 34 #include "hw/irq.h" 35 #include "hw/platform-bus.h" 36 #include "hw/qdev-properties.h" 37 #include "sysemu/kvm.h" 38 39 /* 40 * Functions used whatever the injection method 41 */ 42 43 static inline bool vfio_irq_is_automasked(VFIOINTp *intp) 44 { 45 return intp->flags & VFIO_IRQ_INFO_AUTOMASKED; 46 } 47 48 /** 49 * vfio_init_intp - allocate, initialize the IRQ struct pointer 50 * and add it into the list of IRQs 51 * @vbasedev: the VFIO device handle 52 * @info: irq info struct retrieved from VFIO driver 53 * @errp: error object 54 */ 55 static VFIOINTp *vfio_init_intp(VFIODevice *vbasedev, 56 struct vfio_irq_info info, Error **errp) 57 { 58 int ret; 59 VFIOPlatformDevice *vdev = 60 container_of(vbasedev, VFIOPlatformDevice, vbasedev); 61 SysBusDevice *sbdev = SYS_BUS_DEVICE(vdev); 62 VFIOINTp *intp; 63 64 intp = g_malloc0(sizeof(*intp)); 65 intp->vdev = vdev; 66 intp->pin = info.index; 67 intp->flags = info.flags; 68 intp->state = VFIO_IRQ_INACTIVE; 69 intp->kvm_accel = false; 70 71 sysbus_init_irq(sbdev, &intp->qemuirq); 72 73 /* Get an eventfd for trigger */ 74 intp->interrupt = g_new0(EventNotifier, 1); 75 ret = event_notifier_init(intp->interrupt, 0); 76 if (ret) { 77 g_free(intp->interrupt); 78 g_free(intp); 79 error_setg_errno(errp, -ret, 80 "failed to initialize trigger eventfd notifier"); 81 return NULL; 82 } 83 if (vfio_irq_is_automasked(intp)) { 84 /* Get an eventfd for resample/unmask */ 85 intp->unmask = g_new0(EventNotifier, 1); 86 ret = event_notifier_init(intp->unmask, 0); 87 if (ret) { 88 g_free(intp->interrupt); 89 g_free(intp->unmask); 90 g_free(intp); 91 error_setg_errno(errp, -ret, 92 "failed to initialize resample eventfd notifier"); 93 return NULL; 94 } 95 } 96 97 QLIST_INSERT_HEAD(&vdev->intp_list, intp, next); 98 return intp; 99 } 100 101 /** 102 * vfio_set_trigger_eventfd - set VFIO eventfd handling 103 * 104 * @intp: IRQ struct handle 105 * @handler: handler to be called on eventfd signaling 106 * 107 * Setup VFIO signaling and attach an optional user-side handler 108 * to the eventfd 109 */ 110 static int vfio_set_trigger_eventfd(VFIOINTp *intp, 111 eventfd_user_side_handler_t handler) 112 { 113 VFIODevice *vbasedev = &intp->vdev->vbasedev; 114 int32_t fd = event_notifier_get_fd(intp->interrupt); 115 Error *err = NULL; 116 int ret; 117 118 qemu_set_fd_handler(fd, (IOHandler *)handler, NULL, intp); 119 120 ret = vfio_set_irq_signaling(vbasedev, intp->pin, 0, 121 VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err); 122 if (ret) { 123 error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name); 124 qemu_set_fd_handler(fd, NULL, NULL, NULL); 125 } 126 127 return ret; 128 } 129 130 /* 131 * Functions only used when eventfds are handled on user-side 132 * ie. without irqfd 133 */ 134 135 /** 136 * vfio_mmap_set_enabled - enable/disable the fast path mode 137 * @vdev: the VFIO platform device 138 * @enabled: the target mmap state 139 * 140 * enabled = true ~ fast path = MMIO region is mmaped (no KVM TRAP); 141 * enabled = false ~ slow path = MMIO region is trapped and region callbacks 142 * are called; slow path enables to trap the device IRQ status register reset 143 */ 144 145 static void vfio_mmap_set_enabled(VFIOPlatformDevice *vdev, bool enabled) 146 { 147 int i; 148 149 for (i = 0; i < vdev->vbasedev.num_regions; i++) { 150 vfio_region_mmaps_set_enabled(vdev->regions[i], enabled); 151 } 152 } 153 154 /** 155 * vfio_intp_mmap_enable - timer function, restores the fast path 156 * if there is no more active IRQ 157 * @opaque: actually points to the VFIO platform device 158 * 159 * Called on mmap timer timeout, this function checks whether the 160 * IRQ is still active and if not, restores the fast path. 161 * by construction a single eventfd is handled at a time. 162 * if the IRQ is still active, the timer is re-programmed. 163 */ 164 static void vfio_intp_mmap_enable(void *opaque) 165 { 166 VFIOINTp *tmp; 167 VFIOPlatformDevice *vdev = (VFIOPlatformDevice *)opaque; 168 169 QEMU_LOCK_GUARD(&vdev->intp_mutex); 170 QLIST_FOREACH(tmp, &vdev->intp_list, next) { 171 if (tmp->state == VFIO_IRQ_ACTIVE) { 172 trace_vfio_platform_intp_mmap_enable(tmp->pin); 173 /* re-program the timer to check active status later */ 174 timer_mod(vdev->mmap_timer, 175 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 176 vdev->mmap_timeout); 177 return; 178 } 179 } 180 vfio_mmap_set_enabled(vdev, true); 181 } 182 183 /** 184 * vfio_intp_inject_pending_lockheld - Injects a pending IRQ 185 * @opaque: opaque pointer, in practice the VFIOINTp handle 186 * 187 * The function is called on a previous IRQ completion, from 188 * vfio_platform_eoi, while the intp_mutex is locked. 189 * Also in such situation, the slow path already is set and 190 * the mmap timer was already programmed. 191 */ 192 static void vfio_intp_inject_pending_lockheld(VFIOINTp *intp) 193 { 194 trace_vfio_platform_intp_inject_pending_lockheld(intp->pin, 195 event_notifier_get_fd(intp->interrupt)); 196 197 intp->state = VFIO_IRQ_ACTIVE; 198 199 /* trigger the virtual IRQ */ 200 qemu_set_irq(intp->qemuirq, 1); 201 } 202 203 /** 204 * vfio_intp_interrupt - The user-side eventfd handler 205 * @opaque: opaque pointer which in practice is the VFIOINTp handle 206 * 207 * the function is entered in event handler context: 208 * the vIRQ is injected into the guest if there is no other active 209 * or pending IRQ. 210 */ 211 static void vfio_intp_interrupt(VFIOINTp *intp) 212 { 213 int ret; 214 VFIOINTp *tmp; 215 VFIOPlatformDevice *vdev = intp->vdev; 216 bool delay_handling = false; 217 218 QEMU_LOCK_GUARD(&vdev->intp_mutex); 219 if (intp->state == VFIO_IRQ_INACTIVE) { 220 QLIST_FOREACH(tmp, &vdev->intp_list, next) { 221 if (tmp->state == VFIO_IRQ_ACTIVE || 222 tmp->state == VFIO_IRQ_PENDING) { 223 delay_handling = true; 224 break; 225 } 226 } 227 } 228 if (delay_handling) { 229 /* 230 * the new IRQ gets a pending status and is pushed in 231 * the pending queue 232 */ 233 intp->state = VFIO_IRQ_PENDING; 234 trace_vfio_intp_interrupt_set_pending(intp->pin); 235 QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue, 236 intp, pqnext); 237 event_notifier_test_and_clear(intp->interrupt); 238 return; 239 } 240 241 trace_vfio_platform_intp_interrupt(intp->pin, 242 event_notifier_get_fd(intp->interrupt)); 243 244 ret = event_notifier_test_and_clear(intp->interrupt); 245 if (!ret) { 246 error_report("Error when clearing fd=%d (ret = %d)", 247 event_notifier_get_fd(intp->interrupt), ret); 248 } 249 250 intp->state = VFIO_IRQ_ACTIVE; 251 252 /* sets slow path */ 253 vfio_mmap_set_enabled(vdev, false); 254 255 /* trigger the virtual IRQ */ 256 qemu_set_irq(intp->qemuirq, 1); 257 258 /* 259 * Schedule the mmap timer which will restore fastpath when no IRQ 260 * is active anymore 261 */ 262 if (vdev->mmap_timeout) { 263 timer_mod(vdev->mmap_timer, 264 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 265 vdev->mmap_timeout); 266 } 267 } 268 269 /** 270 * vfio_platform_eoi - IRQ completion routine 271 * @vbasedev: the VFIO device handle 272 * 273 * De-asserts the active virtual IRQ and unmasks the physical IRQ 274 * (effective for level sensitive IRQ auto-masked by the VFIO driver). 275 * Then it handles next pending IRQ if any. 276 * eoi function is called on the first access to any MMIO region 277 * after an IRQ was triggered, trapped since slow path was set. 278 * It is assumed this access corresponds to the IRQ status 279 * register reset. With such a mechanism, a single IRQ can be 280 * handled at a time since there is no way to know which IRQ 281 * was completed by the guest (we would need additional details 282 * about the IRQ status register mask). 283 */ 284 static void vfio_platform_eoi(VFIODevice *vbasedev) 285 { 286 VFIOINTp *intp; 287 VFIOPlatformDevice *vdev = 288 container_of(vbasedev, VFIOPlatformDevice, vbasedev); 289 290 QEMU_LOCK_GUARD(&vdev->intp_mutex); 291 QLIST_FOREACH(intp, &vdev->intp_list, next) { 292 if (intp->state == VFIO_IRQ_ACTIVE) { 293 trace_vfio_platform_eoi(intp->pin, 294 event_notifier_get_fd(intp->interrupt)); 295 intp->state = VFIO_IRQ_INACTIVE; 296 297 /* deassert the virtual IRQ */ 298 qemu_set_irq(intp->qemuirq, 0); 299 300 if (vfio_irq_is_automasked(intp)) { 301 /* unmasks the physical level-sensitive IRQ */ 302 vfio_unmask_single_irqindex(vbasedev, intp->pin); 303 } 304 305 /* a single IRQ can be active at a time */ 306 break; 307 } 308 } 309 /* in case there are pending IRQs, handle the first one */ 310 if (!QSIMPLEQ_EMPTY(&vdev->pending_intp_queue)) { 311 intp = QSIMPLEQ_FIRST(&vdev->pending_intp_queue); 312 vfio_intp_inject_pending_lockheld(intp); 313 QSIMPLEQ_REMOVE_HEAD(&vdev->pending_intp_queue, pqnext); 314 } 315 } 316 317 /** 318 * vfio_start_eventfd_injection - starts the virtual IRQ injection using 319 * user-side handled eventfds 320 * @sbdev: the sysbus device handle 321 * @irq: the qemu irq handle 322 */ 323 324 static void vfio_start_eventfd_injection(SysBusDevice *sbdev, qemu_irq irq) 325 { 326 VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev); 327 VFIOINTp *intp; 328 329 QLIST_FOREACH(intp, &vdev->intp_list, next) { 330 if (intp->qemuirq == irq) { 331 break; 332 } 333 } 334 assert(intp); 335 336 if (vfio_set_trigger_eventfd(intp, vfio_intp_interrupt)) { 337 abort(); 338 } 339 } 340 341 /* 342 * Functions used for irqfd 343 */ 344 345 /** 346 * vfio_set_resample_eventfd - sets the resamplefd for an IRQ 347 * @intp: the IRQ struct handle 348 * programs the VFIO driver to unmask this IRQ when the 349 * intp->unmask eventfd is triggered 350 */ 351 static int vfio_set_resample_eventfd(VFIOINTp *intp) 352 { 353 int32_t fd = event_notifier_get_fd(intp->unmask); 354 VFIODevice *vbasedev = &intp->vdev->vbasedev; 355 Error *err = NULL; 356 int ret; 357 358 qemu_set_fd_handler(fd, NULL, NULL, NULL); 359 ret = vfio_set_irq_signaling(vbasedev, intp->pin, 0, 360 VFIO_IRQ_SET_ACTION_UNMASK, fd, &err); 361 if (ret) { 362 error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name); 363 } 364 return ret; 365 } 366 367 /** 368 * vfio_start_irqfd_injection - starts the virtual IRQ injection using 369 * irqfd 370 * 371 * @sbdev: the sysbus device handle 372 * @irq: the qemu irq handle 373 * 374 * In case the irqfd setup fails, we fallback to userspace handled eventfd 375 */ 376 static void vfio_start_irqfd_injection(SysBusDevice *sbdev, qemu_irq irq) 377 { 378 VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev); 379 VFIOINTp *intp; 380 381 if (!kvm_irqfds_enabled() || !kvm_resamplefds_enabled() || 382 !vdev->irqfd_allowed) { 383 goto fail_irqfd; 384 } 385 386 QLIST_FOREACH(intp, &vdev->intp_list, next) { 387 if (intp->qemuirq == irq) { 388 break; 389 } 390 } 391 assert(intp); 392 393 if (kvm_irqchip_add_irqfd_notifier(kvm_state, intp->interrupt, 394 intp->unmask, irq) < 0) { 395 goto fail_irqfd; 396 } 397 398 if (vfio_set_trigger_eventfd(intp, NULL) < 0) { 399 goto fail_vfio; 400 } 401 if (vfio_irq_is_automasked(intp)) { 402 if (vfio_set_resample_eventfd(intp) < 0) { 403 goto fail_vfio; 404 } 405 trace_vfio_platform_start_level_irqfd_injection(intp->pin, 406 event_notifier_get_fd(intp->interrupt), 407 event_notifier_get_fd(intp->unmask)); 408 } else { 409 trace_vfio_platform_start_edge_irqfd_injection(intp->pin, 410 event_notifier_get_fd(intp->interrupt)); 411 } 412 413 intp->kvm_accel = true; 414 415 return; 416 fail_vfio: 417 kvm_irqchip_remove_irqfd_notifier(kvm_state, intp->interrupt, irq); 418 abort(); 419 fail_irqfd: 420 vfio_start_eventfd_injection(sbdev, irq); 421 return; 422 } 423 424 /* VFIO skeleton */ 425 426 static void vfio_platform_compute_needs_reset(VFIODevice *vbasedev) 427 { 428 vbasedev->needs_reset = true; 429 } 430 431 /* not implemented yet */ 432 static int vfio_platform_hot_reset_multi(VFIODevice *vbasedev) 433 { 434 return -1; 435 } 436 437 /** 438 * vfio_populate_device - Allocate and populate MMIO region 439 * and IRQ structs according to driver returned information 440 * @vbasedev: the VFIO device handle 441 * @errp: error object 442 * 443 */ 444 static int vfio_populate_device(VFIODevice *vbasedev, Error **errp) 445 { 446 VFIOINTp *intp, *tmp; 447 int i, ret = -1; 448 VFIOPlatformDevice *vdev = 449 container_of(vbasedev, VFIOPlatformDevice, vbasedev); 450 451 if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PLATFORM)) { 452 error_setg(errp, "this isn't a platform device"); 453 return ret; 454 } 455 456 vdev->regions = g_new0(VFIORegion *, vbasedev->num_regions); 457 458 for (i = 0; i < vbasedev->num_regions; i++) { 459 char *name = g_strdup_printf("VFIO %s region %d\n", vbasedev->name, i); 460 461 vdev->regions[i] = g_new0(VFIORegion, 1); 462 ret = vfio_region_setup(OBJECT(vdev), vbasedev, 463 vdev->regions[i], i, name); 464 g_free(name); 465 if (ret) { 466 error_setg_errno(errp, -ret, "failed to get region %d info", i); 467 goto reg_error; 468 } 469 } 470 471 vdev->mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 472 vfio_intp_mmap_enable, vdev); 473 474 QSIMPLEQ_INIT(&vdev->pending_intp_queue); 475 476 for (i = 0; i < vbasedev->num_irqs; i++) { 477 struct vfio_irq_info irq = { .argsz = sizeof(irq) }; 478 479 irq.index = i; 480 ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_IRQ_INFO, &irq); 481 if (ret) { 482 error_setg_errno(errp, -ret, "failed to get device irq info"); 483 goto irq_err; 484 } else { 485 trace_vfio_platform_populate_interrupts(irq.index, 486 irq.count, 487 irq.flags); 488 intp = vfio_init_intp(vbasedev, irq, errp); 489 if (!intp) { 490 ret = -1; 491 goto irq_err; 492 } 493 } 494 } 495 return 0; 496 irq_err: 497 timer_del(vdev->mmap_timer); 498 QLIST_FOREACH_SAFE(intp, &vdev->intp_list, next, tmp) { 499 QLIST_REMOVE(intp, next); 500 g_free(intp); 501 } 502 reg_error: 503 for (i = 0; i < vbasedev->num_regions; i++) { 504 if (vdev->regions[i]) { 505 vfio_region_finalize(vdev->regions[i]); 506 } 507 g_free(vdev->regions[i]); 508 } 509 g_free(vdev->regions); 510 return ret; 511 } 512 513 /* specialized functions for VFIO Platform devices */ 514 static VFIODeviceOps vfio_platform_ops = { 515 .vfio_compute_needs_reset = vfio_platform_compute_needs_reset, 516 .vfio_hot_reset_multi = vfio_platform_hot_reset_multi, 517 .vfio_eoi = vfio_platform_eoi, 518 }; 519 520 /** 521 * vfio_base_device_init - perform preliminary VFIO setup 522 * @vbasedev: the VFIO device handle 523 * @errp: error object 524 * 525 * Implement the VFIO command sequence that allows to discover 526 * assigned device resources: group extraction, device 527 * fd retrieval, resource query. 528 * Precondition: the device name must be initialized 529 */ 530 static int vfio_base_device_init(VFIODevice *vbasedev, Error **errp) 531 { 532 struct stat st; 533 int ret; 534 535 /* @sysfsdev takes precedence over @host */ 536 if (vbasedev->sysfsdev) { 537 g_free(vbasedev->name); 538 vbasedev->name = g_path_get_basename(vbasedev->sysfsdev); 539 } else { 540 if (!vbasedev->name || strchr(vbasedev->name, '/')) { 541 error_setg(errp, "wrong host device name"); 542 return -EINVAL; 543 } 544 545 vbasedev->sysfsdev = g_strdup_printf("/sys/bus/platform/devices/%s", 546 vbasedev->name); 547 } 548 549 if (stat(vbasedev->sysfsdev, &st) < 0) { 550 error_setg_errno(errp, errno, 551 "failed to get the sysfs host device file status"); 552 return -errno; 553 } 554 555 ret = vfio_attach_device(vbasedev->name, vbasedev, 556 &address_space_memory, errp); 557 if (ret) { 558 return ret; 559 } 560 561 ret = vfio_populate_device(vbasedev, errp); 562 if (ret) { 563 vfio_detach_device(vbasedev); 564 } 565 566 return ret; 567 } 568 569 /** 570 * vfio_platform_realize - the device realize function 571 * @dev: device state pointer 572 * @errp: error 573 * 574 * initialize the device, its memory regions and IRQ structures 575 * IRQ are started separately 576 */ 577 static void vfio_platform_realize(DeviceState *dev, Error **errp) 578 { 579 VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev); 580 SysBusDevice *sbdev = SYS_BUS_DEVICE(dev); 581 VFIODevice *vbasedev = &vdev->vbasedev; 582 int i, ret; 583 584 vbasedev->type = VFIO_DEVICE_TYPE_PLATFORM; 585 vbasedev->dev = dev; 586 vbasedev->ops = &vfio_platform_ops; 587 588 qemu_mutex_init(&vdev->intp_mutex); 589 590 trace_vfio_platform_realize(vbasedev->sysfsdev ? 591 vbasedev->sysfsdev : vbasedev->name, 592 vdev->compat); 593 594 ret = vfio_base_device_init(vbasedev, errp); 595 if (ret) { 596 goto out; 597 } 598 599 if (!vdev->compat) { 600 GError *gerr = NULL; 601 gchar *contents; 602 gsize length; 603 char *path; 604 605 path = g_strdup_printf("%s/of_node/compatible", vbasedev->sysfsdev); 606 if (!g_file_get_contents(path, &contents, &length, &gerr)) { 607 error_setg(errp, "%s", gerr->message); 608 g_error_free(gerr); 609 g_free(path); 610 return; 611 } 612 g_free(path); 613 vdev->compat = contents; 614 for (vdev->num_compat = 0; length; vdev->num_compat++) { 615 size_t skip = strlen(contents) + 1; 616 contents += skip; 617 length -= skip; 618 } 619 } 620 621 for (i = 0; i < vbasedev->num_regions; i++) { 622 if (vfio_region_mmap(vdev->regions[i])) { 623 warn_report("%s mmap unsupported, performance may be slow", 624 memory_region_name(vdev->regions[i]->mem)); 625 } 626 sysbus_init_mmio(sbdev, vdev->regions[i]->mem); 627 } 628 out: 629 if (!ret) { 630 return; 631 } 632 633 if (vdev->vbasedev.name) { 634 error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.name); 635 } else { 636 error_prepend(errp, "vfio error: "); 637 } 638 } 639 640 static const VMStateDescription vfio_platform_vmstate = { 641 .name = "vfio-platform", 642 .unmigratable = 1, 643 }; 644 645 static Property vfio_platform_dev_properties[] = { 646 DEFINE_PROP_STRING("host", VFIOPlatformDevice, vbasedev.name), 647 DEFINE_PROP_STRING("sysfsdev", VFIOPlatformDevice, vbasedev.sysfsdev), 648 DEFINE_PROP_BOOL("x-no-mmap", VFIOPlatformDevice, vbasedev.no_mmap, false), 649 DEFINE_PROP_UINT32("mmap-timeout-ms", VFIOPlatformDevice, 650 mmap_timeout, 1100), 651 DEFINE_PROP_BOOL("x-irqfd", VFIOPlatformDevice, irqfd_allowed, true), 652 DEFINE_PROP_END_OF_LIST(), 653 }; 654 655 static void vfio_platform_class_init(ObjectClass *klass, void *data) 656 { 657 DeviceClass *dc = DEVICE_CLASS(klass); 658 SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass); 659 660 dc->realize = vfio_platform_realize; 661 device_class_set_props(dc, vfio_platform_dev_properties); 662 dc->vmsd = &vfio_platform_vmstate; 663 dc->desc = "VFIO-based platform device assignment"; 664 sbc->connect_irq_notifier = vfio_start_irqfd_injection; 665 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 666 /* Supported by TYPE_VIRT_MACHINE */ 667 dc->user_creatable = true; 668 } 669 670 static const TypeInfo vfio_platform_dev_info = { 671 .name = TYPE_VFIO_PLATFORM, 672 .parent = TYPE_SYS_BUS_DEVICE, 673 .instance_size = sizeof(VFIOPlatformDevice), 674 .class_init = vfio_platform_class_init, 675 .class_size = sizeof(VFIOPlatformDeviceClass), 676 }; 677 678 static void register_vfio_platform_dev_type(void) 679 { 680 type_register_static(&vfio_platform_dev_info); 681 } 682 683 type_init(register_vfio_platform_dev_type) 684