1 /* 2 * vfio based device assignment support - platform devices 3 * 4 * Copyright Linaro Limited, 2014 5 * 6 * Authors: 7 * Kim Phillips <kim.phillips@linaro.org> 8 * Eric Auger <eric.auger@linaro.org> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2. See 11 * the COPYING file in the top-level directory. 12 * 13 * Based on vfio based PCI device assignment support: 14 * Copyright Red Hat, Inc. 2012 15 */ 16 17 #include "qemu/osdep.h" 18 #include "qapi/error.h" 19 #include <sys/ioctl.h> 20 #include <linux/vfio.h> 21 22 #include "hw/vfio/vfio-platform.h" 23 #include "qemu/error-report.h" 24 #include "qemu/module.h" 25 #include "qemu/range.h" 26 #include "sysemu/sysemu.h" 27 #include "exec/memory.h" 28 #include "exec/address-spaces.h" 29 #include "qemu/queue.h" 30 #include "hw/sysbus.h" 31 #include "trace.h" 32 #include "hw/platform-bus.h" 33 #include "sysemu/kvm.h" 34 35 /* 36 * Functions used whatever the injection method 37 */ 38 39 static inline bool vfio_irq_is_automasked(VFIOINTp *intp) 40 { 41 return intp->flags & VFIO_IRQ_INFO_AUTOMASKED; 42 } 43 44 /** 45 * vfio_init_intp - allocate, initialize the IRQ struct pointer 46 * and add it into the list of IRQs 47 * @vbasedev: the VFIO device handle 48 * @info: irq info struct retrieved from VFIO driver 49 * @errp: error object 50 */ 51 static VFIOINTp *vfio_init_intp(VFIODevice *vbasedev, 52 struct vfio_irq_info info, Error **errp) 53 { 54 int ret; 55 VFIOPlatformDevice *vdev = 56 container_of(vbasedev, VFIOPlatformDevice, vbasedev); 57 SysBusDevice *sbdev = SYS_BUS_DEVICE(vdev); 58 VFIOINTp *intp; 59 60 intp = g_malloc0(sizeof(*intp)); 61 intp->vdev = vdev; 62 intp->pin = info.index; 63 intp->flags = info.flags; 64 intp->state = VFIO_IRQ_INACTIVE; 65 intp->kvm_accel = false; 66 67 sysbus_init_irq(sbdev, &intp->qemuirq); 68 69 /* Get an eventfd for trigger */ 70 intp->interrupt = g_malloc0(sizeof(EventNotifier)); 71 ret = event_notifier_init(intp->interrupt, 0); 72 if (ret) { 73 g_free(intp->interrupt); 74 g_free(intp); 75 error_setg_errno(errp, -ret, 76 "failed to initialize trigger eventfd notifier"); 77 return NULL; 78 } 79 if (vfio_irq_is_automasked(intp)) { 80 /* Get an eventfd for resample/unmask */ 81 intp->unmask = g_malloc0(sizeof(EventNotifier)); 82 ret = event_notifier_init(intp->unmask, 0); 83 if (ret) { 84 g_free(intp->interrupt); 85 g_free(intp->unmask); 86 g_free(intp); 87 error_setg_errno(errp, -ret, 88 "failed to initialize resample eventfd notifier"); 89 return NULL; 90 } 91 } 92 93 QLIST_INSERT_HEAD(&vdev->intp_list, intp, next); 94 return intp; 95 } 96 97 /** 98 * vfio_set_trigger_eventfd - set VFIO eventfd handling 99 * 100 * @intp: IRQ struct handle 101 * @handler: handler to be called on eventfd signaling 102 * 103 * Setup VFIO signaling and attach an optional user-side handler 104 * to the eventfd 105 */ 106 static int vfio_set_trigger_eventfd(VFIOINTp *intp, 107 eventfd_user_side_handler_t handler) 108 { 109 VFIODevice *vbasedev = &intp->vdev->vbasedev; 110 struct vfio_irq_set *irq_set; 111 int argsz, ret; 112 int32_t *pfd; 113 114 argsz = sizeof(*irq_set) + sizeof(*pfd); 115 irq_set = g_malloc0(argsz); 116 irq_set->argsz = argsz; 117 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; 118 irq_set->index = intp->pin; 119 irq_set->start = 0; 120 irq_set->count = 1; 121 pfd = (int32_t *)&irq_set->data; 122 *pfd = event_notifier_get_fd(intp->interrupt); 123 qemu_set_fd_handler(*pfd, (IOHandler *)handler, NULL, intp); 124 ret = ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set); 125 if (ret < 0) { 126 error_report("vfio: Failed to set trigger eventfd: %m"); 127 qemu_set_fd_handler(*pfd, NULL, NULL, NULL); 128 } 129 g_free(irq_set); 130 return ret; 131 } 132 133 /* 134 * Functions only used when eventfds are handled on user-side 135 * ie. without irqfd 136 */ 137 138 /** 139 * vfio_mmap_set_enabled - enable/disable the fast path mode 140 * @vdev: the VFIO platform device 141 * @enabled: the target mmap state 142 * 143 * enabled = true ~ fast path = MMIO region is mmaped (no KVM TRAP); 144 * enabled = false ~ slow path = MMIO region is trapped and region callbacks 145 * are called; slow path enables to trap the device IRQ status register reset 146 */ 147 148 static void vfio_mmap_set_enabled(VFIOPlatformDevice *vdev, bool enabled) 149 { 150 int i; 151 152 for (i = 0; i < vdev->vbasedev.num_regions; i++) { 153 vfio_region_mmaps_set_enabled(vdev->regions[i], enabled); 154 } 155 } 156 157 /** 158 * vfio_intp_mmap_enable - timer function, restores the fast path 159 * if there is no more active IRQ 160 * @opaque: actually points to the VFIO platform device 161 * 162 * Called on mmap timer timout, this function checks whether the 163 * IRQ is still active and if not, restores the fast path. 164 * by construction a single eventfd is handled at a time. 165 * if the IRQ is still active, the timer is re-programmed. 166 */ 167 static void vfio_intp_mmap_enable(void *opaque) 168 { 169 VFIOINTp *tmp; 170 VFIOPlatformDevice *vdev = (VFIOPlatformDevice *)opaque; 171 172 qemu_mutex_lock(&vdev->intp_mutex); 173 QLIST_FOREACH(tmp, &vdev->intp_list, next) { 174 if (tmp->state == VFIO_IRQ_ACTIVE) { 175 trace_vfio_platform_intp_mmap_enable(tmp->pin); 176 /* re-program the timer to check active status later */ 177 timer_mod(vdev->mmap_timer, 178 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 179 vdev->mmap_timeout); 180 qemu_mutex_unlock(&vdev->intp_mutex); 181 return; 182 } 183 } 184 vfio_mmap_set_enabled(vdev, true); 185 qemu_mutex_unlock(&vdev->intp_mutex); 186 } 187 188 /** 189 * vfio_intp_inject_pending_lockheld - Injects a pending IRQ 190 * @opaque: opaque pointer, in practice the VFIOINTp handle 191 * 192 * The function is called on a previous IRQ completion, from 193 * vfio_platform_eoi, while the intp_mutex is locked. 194 * Also in such situation, the slow path already is set and 195 * the mmap timer was already programmed. 196 */ 197 static void vfio_intp_inject_pending_lockheld(VFIOINTp *intp) 198 { 199 trace_vfio_platform_intp_inject_pending_lockheld(intp->pin, 200 event_notifier_get_fd(intp->interrupt)); 201 202 intp->state = VFIO_IRQ_ACTIVE; 203 204 /* trigger the virtual IRQ */ 205 qemu_set_irq(intp->qemuirq, 1); 206 } 207 208 /** 209 * vfio_intp_interrupt - The user-side eventfd handler 210 * @opaque: opaque pointer which in practice is the VFIOINTp handle 211 * 212 * the function is entered in event handler context: 213 * the vIRQ is injected into the guest if there is no other active 214 * or pending IRQ. 215 */ 216 static void vfio_intp_interrupt(VFIOINTp *intp) 217 { 218 int ret; 219 VFIOINTp *tmp; 220 VFIOPlatformDevice *vdev = intp->vdev; 221 bool delay_handling = false; 222 223 qemu_mutex_lock(&vdev->intp_mutex); 224 if (intp->state == VFIO_IRQ_INACTIVE) { 225 QLIST_FOREACH(tmp, &vdev->intp_list, next) { 226 if (tmp->state == VFIO_IRQ_ACTIVE || 227 tmp->state == VFIO_IRQ_PENDING) { 228 delay_handling = true; 229 break; 230 } 231 } 232 } 233 if (delay_handling) { 234 /* 235 * the new IRQ gets a pending status and is pushed in 236 * the pending queue 237 */ 238 intp->state = VFIO_IRQ_PENDING; 239 trace_vfio_intp_interrupt_set_pending(intp->pin); 240 QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue, 241 intp, pqnext); 242 ret = event_notifier_test_and_clear(intp->interrupt); 243 qemu_mutex_unlock(&vdev->intp_mutex); 244 return; 245 } 246 247 trace_vfio_platform_intp_interrupt(intp->pin, 248 event_notifier_get_fd(intp->interrupt)); 249 250 ret = event_notifier_test_and_clear(intp->interrupt); 251 if (!ret) { 252 error_report("Error when clearing fd=%d (ret = %d)", 253 event_notifier_get_fd(intp->interrupt), ret); 254 } 255 256 intp->state = VFIO_IRQ_ACTIVE; 257 258 /* sets slow path */ 259 vfio_mmap_set_enabled(vdev, false); 260 261 /* trigger the virtual IRQ */ 262 qemu_set_irq(intp->qemuirq, 1); 263 264 /* 265 * Schedule the mmap timer which will restore fastpath when no IRQ 266 * is active anymore 267 */ 268 if (vdev->mmap_timeout) { 269 timer_mod(vdev->mmap_timer, 270 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 271 vdev->mmap_timeout); 272 } 273 qemu_mutex_unlock(&vdev->intp_mutex); 274 } 275 276 /** 277 * vfio_platform_eoi - IRQ completion routine 278 * @vbasedev: the VFIO device handle 279 * 280 * De-asserts the active virtual IRQ and unmasks the physical IRQ 281 * (effective for level sensitive IRQ auto-masked by the VFIO driver). 282 * Then it handles next pending IRQ if any. 283 * eoi function is called on the first access to any MMIO region 284 * after an IRQ was triggered, trapped since slow path was set. 285 * It is assumed this access corresponds to the IRQ status 286 * register reset. With such a mechanism, a single IRQ can be 287 * handled at a time since there is no way to know which IRQ 288 * was completed by the guest (we would need additional details 289 * about the IRQ status register mask). 290 */ 291 static void vfio_platform_eoi(VFIODevice *vbasedev) 292 { 293 VFIOINTp *intp; 294 VFIOPlatformDevice *vdev = 295 container_of(vbasedev, VFIOPlatformDevice, vbasedev); 296 297 qemu_mutex_lock(&vdev->intp_mutex); 298 QLIST_FOREACH(intp, &vdev->intp_list, next) { 299 if (intp->state == VFIO_IRQ_ACTIVE) { 300 trace_vfio_platform_eoi(intp->pin, 301 event_notifier_get_fd(intp->interrupt)); 302 intp->state = VFIO_IRQ_INACTIVE; 303 304 /* deassert the virtual IRQ */ 305 qemu_set_irq(intp->qemuirq, 0); 306 307 if (vfio_irq_is_automasked(intp)) { 308 /* unmasks the physical level-sensitive IRQ */ 309 vfio_unmask_single_irqindex(vbasedev, intp->pin); 310 } 311 312 /* a single IRQ can be active at a time */ 313 break; 314 } 315 } 316 /* in case there are pending IRQs, handle the first one */ 317 if (!QSIMPLEQ_EMPTY(&vdev->pending_intp_queue)) { 318 intp = QSIMPLEQ_FIRST(&vdev->pending_intp_queue); 319 vfio_intp_inject_pending_lockheld(intp); 320 QSIMPLEQ_REMOVE_HEAD(&vdev->pending_intp_queue, pqnext); 321 } 322 qemu_mutex_unlock(&vdev->intp_mutex); 323 } 324 325 /** 326 * vfio_start_eventfd_injection - starts the virtual IRQ injection using 327 * user-side handled eventfds 328 * @sbdev: the sysbus device handle 329 * @irq: the qemu irq handle 330 */ 331 332 static void vfio_start_eventfd_injection(SysBusDevice *sbdev, qemu_irq irq) 333 { 334 int ret; 335 VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev); 336 VFIOINTp *intp; 337 338 QLIST_FOREACH(intp, &vdev->intp_list, next) { 339 if (intp->qemuirq == irq) { 340 break; 341 } 342 } 343 assert(intp); 344 345 ret = vfio_set_trigger_eventfd(intp, vfio_intp_interrupt); 346 if (ret) { 347 error_report("vfio: failed to start eventfd signaling for IRQ %d: %m", 348 intp->pin); 349 abort(); 350 } 351 } 352 353 /* 354 * Functions used for irqfd 355 */ 356 357 /** 358 * vfio_set_resample_eventfd - sets the resamplefd for an IRQ 359 * @intp: the IRQ struct handle 360 * programs the VFIO driver to unmask this IRQ when the 361 * intp->unmask eventfd is triggered 362 */ 363 static int vfio_set_resample_eventfd(VFIOINTp *intp) 364 { 365 VFIODevice *vbasedev = &intp->vdev->vbasedev; 366 struct vfio_irq_set *irq_set; 367 int argsz, ret; 368 int32_t *pfd; 369 370 argsz = sizeof(*irq_set) + sizeof(*pfd); 371 irq_set = g_malloc0(argsz); 372 irq_set->argsz = argsz; 373 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK; 374 irq_set->index = intp->pin; 375 irq_set->start = 0; 376 irq_set->count = 1; 377 pfd = (int32_t *)&irq_set->data; 378 *pfd = event_notifier_get_fd(intp->unmask); 379 qemu_set_fd_handler(*pfd, NULL, NULL, NULL); 380 ret = ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set); 381 g_free(irq_set); 382 if (ret < 0) { 383 error_report("vfio: Failed to set resample eventfd: %m"); 384 } 385 return ret; 386 } 387 388 /** 389 * vfio_start_irqfd_injection - starts the virtual IRQ injection using 390 * irqfd 391 * 392 * @sbdev: the sysbus device handle 393 * @irq: the qemu irq handle 394 * 395 * In case the irqfd setup fails, we fallback to userspace handled eventfd 396 */ 397 static void vfio_start_irqfd_injection(SysBusDevice *sbdev, qemu_irq irq) 398 { 399 VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev); 400 VFIOINTp *intp; 401 402 if (!kvm_irqfds_enabled() || !kvm_resamplefds_enabled() || 403 !vdev->irqfd_allowed) { 404 goto fail_irqfd; 405 } 406 407 QLIST_FOREACH(intp, &vdev->intp_list, next) { 408 if (intp->qemuirq == irq) { 409 break; 410 } 411 } 412 assert(intp); 413 414 if (kvm_irqchip_add_irqfd_notifier(kvm_state, intp->interrupt, 415 intp->unmask, irq) < 0) { 416 goto fail_irqfd; 417 } 418 419 if (vfio_set_trigger_eventfd(intp, NULL) < 0) { 420 goto fail_vfio; 421 } 422 if (vfio_irq_is_automasked(intp)) { 423 if (vfio_set_resample_eventfd(intp) < 0) { 424 goto fail_vfio; 425 } 426 trace_vfio_platform_start_level_irqfd_injection(intp->pin, 427 event_notifier_get_fd(intp->interrupt), 428 event_notifier_get_fd(intp->unmask)); 429 } else { 430 trace_vfio_platform_start_edge_irqfd_injection(intp->pin, 431 event_notifier_get_fd(intp->interrupt)); 432 } 433 434 intp->kvm_accel = true; 435 436 return; 437 fail_vfio: 438 kvm_irqchip_remove_irqfd_notifier(kvm_state, intp->interrupt, irq); 439 error_report("vfio: failed to start eventfd signaling for IRQ %d: %m", 440 intp->pin); 441 abort(); 442 fail_irqfd: 443 vfio_start_eventfd_injection(sbdev, irq); 444 return; 445 } 446 447 /* VFIO skeleton */ 448 449 static void vfio_platform_compute_needs_reset(VFIODevice *vbasedev) 450 { 451 vbasedev->needs_reset = true; 452 } 453 454 /* not implemented yet */ 455 static int vfio_platform_hot_reset_multi(VFIODevice *vbasedev) 456 { 457 return -1; 458 } 459 460 /** 461 * vfio_populate_device - Allocate and populate MMIO region 462 * and IRQ structs according to driver returned information 463 * @vbasedev: the VFIO device handle 464 * @errp: error object 465 * 466 */ 467 static int vfio_populate_device(VFIODevice *vbasedev, Error **errp) 468 { 469 VFIOINTp *intp, *tmp; 470 int i, ret = -1; 471 VFIOPlatformDevice *vdev = 472 container_of(vbasedev, VFIOPlatformDevice, vbasedev); 473 474 if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PLATFORM)) { 475 error_setg(errp, "this isn't a platform device"); 476 return ret; 477 } 478 479 vdev->regions = g_new0(VFIORegion *, vbasedev->num_regions); 480 481 for (i = 0; i < vbasedev->num_regions; i++) { 482 char *name = g_strdup_printf("VFIO %s region %d\n", vbasedev->name, i); 483 484 vdev->regions[i] = g_new0(VFIORegion, 1); 485 ret = vfio_region_setup(OBJECT(vdev), vbasedev, 486 vdev->regions[i], i, name); 487 g_free(name); 488 if (ret) { 489 error_setg_errno(errp, -ret, "failed to get region %d info", i); 490 goto reg_error; 491 } 492 } 493 494 vdev->mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 495 vfio_intp_mmap_enable, vdev); 496 497 QSIMPLEQ_INIT(&vdev->pending_intp_queue); 498 499 for (i = 0; i < vbasedev->num_irqs; i++) { 500 struct vfio_irq_info irq = { .argsz = sizeof(irq) }; 501 502 irq.index = i; 503 ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_IRQ_INFO, &irq); 504 if (ret) { 505 error_setg_errno(errp, -ret, "failed to get device irq info"); 506 goto irq_err; 507 } else { 508 trace_vfio_platform_populate_interrupts(irq.index, 509 irq.count, 510 irq.flags); 511 intp = vfio_init_intp(vbasedev, irq, errp); 512 if (!intp) { 513 ret = -1; 514 goto irq_err; 515 } 516 } 517 } 518 return 0; 519 irq_err: 520 timer_del(vdev->mmap_timer); 521 QLIST_FOREACH_SAFE(intp, &vdev->intp_list, next, tmp) { 522 QLIST_REMOVE(intp, next); 523 g_free(intp); 524 } 525 reg_error: 526 for (i = 0; i < vbasedev->num_regions; i++) { 527 if (vdev->regions[i]) { 528 vfio_region_finalize(vdev->regions[i]); 529 } 530 g_free(vdev->regions[i]); 531 } 532 g_free(vdev->regions); 533 return ret; 534 } 535 536 /* specialized functions for VFIO Platform devices */ 537 static VFIODeviceOps vfio_platform_ops = { 538 .vfio_compute_needs_reset = vfio_platform_compute_needs_reset, 539 .vfio_hot_reset_multi = vfio_platform_hot_reset_multi, 540 .vfio_eoi = vfio_platform_eoi, 541 }; 542 543 /** 544 * vfio_base_device_init - perform preliminary VFIO setup 545 * @vbasedev: the VFIO device handle 546 * @errp: error object 547 * 548 * Implement the VFIO command sequence that allows to discover 549 * assigned device resources: group extraction, device 550 * fd retrieval, resource query. 551 * Precondition: the device name must be initialized 552 */ 553 static int vfio_base_device_init(VFIODevice *vbasedev, Error **errp) 554 { 555 VFIOGroup *group; 556 VFIODevice *vbasedev_iter; 557 char *tmp, group_path[PATH_MAX], *group_name; 558 ssize_t len; 559 struct stat st; 560 int groupid; 561 int ret; 562 563 /* @sysfsdev takes precedence over @host */ 564 if (vbasedev->sysfsdev) { 565 g_free(vbasedev->name); 566 vbasedev->name = g_path_get_basename(vbasedev->sysfsdev); 567 } else { 568 if (!vbasedev->name || strchr(vbasedev->name, '/')) { 569 error_setg(errp, "wrong host device name"); 570 return -EINVAL; 571 } 572 573 vbasedev->sysfsdev = g_strdup_printf("/sys/bus/platform/devices/%s", 574 vbasedev->name); 575 } 576 577 if (stat(vbasedev->sysfsdev, &st) < 0) { 578 error_setg_errno(errp, errno, 579 "failed to get the sysfs host device file status"); 580 return -errno; 581 } 582 583 tmp = g_strdup_printf("%s/iommu_group", vbasedev->sysfsdev); 584 len = readlink(tmp, group_path, sizeof(group_path)); 585 g_free(tmp); 586 587 if (len < 0 || len >= sizeof(group_path)) { 588 ret = len < 0 ? -errno : -ENAMETOOLONG; 589 error_setg_errno(errp, -ret, "no iommu_group found"); 590 return ret; 591 } 592 593 group_path[len] = 0; 594 595 group_name = basename(group_path); 596 if (sscanf(group_name, "%d", &groupid) != 1) { 597 error_setg_errno(errp, errno, "failed to read %s", group_path); 598 return -errno; 599 } 600 601 trace_vfio_platform_base_device_init(vbasedev->name, groupid); 602 603 group = vfio_get_group(groupid, &address_space_memory, errp); 604 if (!group) { 605 return -ENOENT; 606 } 607 608 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { 609 if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) { 610 error_setg(errp, "device is already attached"); 611 vfio_put_group(group); 612 return -EBUSY; 613 } 614 } 615 ret = vfio_get_device(group, vbasedev->name, vbasedev, errp); 616 if (ret) { 617 vfio_put_group(group); 618 return ret; 619 } 620 621 ret = vfio_populate_device(vbasedev, errp); 622 if (ret) { 623 vfio_put_group(group); 624 } 625 626 return ret; 627 } 628 629 /** 630 * vfio_platform_realize - the device realize function 631 * @dev: device state pointer 632 * @errp: error 633 * 634 * initialize the device, its memory regions and IRQ structures 635 * IRQ are started separately 636 */ 637 static void vfio_platform_realize(DeviceState *dev, Error **errp) 638 { 639 VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev); 640 SysBusDevice *sbdev = SYS_BUS_DEVICE(dev); 641 VFIODevice *vbasedev = &vdev->vbasedev; 642 int i, ret; 643 644 vbasedev->type = VFIO_DEVICE_TYPE_PLATFORM; 645 vbasedev->dev = dev; 646 vbasedev->ops = &vfio_platform_ops; 647 648 qemu_mutex_init(&vdev->intp_mutex); 649 650 trace_vfio_platform_realize(vbasedev->sysfsdev ? 651 vbasedev->sysfsdev : vbasedev->name, 652 vdev->compat); 653 654 ret = vfio_base_device_init(vbasedev, errp); 655 if (ret) { 656 goto out; 657 } 658 659 if (!vdev->compat) { 660 GError *gerr = NULL; 661 gchar *contents; 662 gsize length; 663 char *path; 664 665 path = g_strdup_printf("%s/of_node/compatible", vbasedev->sysfsdev); 666 if (!g_file_get_contents(path, &contents, &length, &gerr)) { 667 error_setg(errp, "%s", gerr->message); 668 g_error_free(gerr); 669 g_free(path); 670 return; 671 } 672 g_free(path); 673 vdev->compat = contents; 674 for (vdev->num_compat = 0; length; vdev->num_compat++) { 675 size_t skip = strlen(contents) + 1; 676 contents += skip; 677 length -= skip; 678 } 679 } 680 681 for (i = 0; i < vbasedev->num_regions; i++) { 682 if (vfio_region_mmap(vdev->regions[i])) { 683 warn_report("%s mmap unsupported, performance may be slow", 684 memory_region_name(vdev->regions[i]->mem)); 685 } 686 sysbus_init_mmio(sbdev, vdev->regions[i]->mem); 687 } 688 out: 689 if (!ret) { 690 return; 691 } 692 693 if (vdev->vbasedev.name) { 694 error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.name); 695 } else { 696 error_prepend(errp, "vfio error: "); 697 } 698 } 699 700 static const VMStateDescription vfio_platform_vmstate = { 701 .name = "vfio-platform", 702 .unmigratable = 1, 703 }; 704 705 static Property vfio_platform_dev_properties[] = { 706 DEFINE_PROP_STRING("host", VFIOPlatformDevice, vbasedev.name), 707 DEFINE_PROP_STRING("sysfsdev", VFIOPlatformDevice, vbasedev.sysfsdev), 708 DEFINE_PROP_BOOL("x-no-mmap", VFIOPlatformDevice, vbasedev.no_mmap, false), 709 DEFINE_PROP_UINT32("mmap-timeout-ms", VFIOPlatformDevice, 710 mmap_timeout, 1100), 711 DEFINE_PROP_BOOL("x-irqfd", VFIOPlatformDevice, irqfd_allowed, true), 712 DEFINE_PROP_END_OF_LIST(), 713 }; 714 715 static void vfio_platform_class_init(ObjectClass *klass, void *data) 716 { 717 DeviceClass *dc = DEVICE_CLASS(klass); 718 SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass); 719 720 dc->realize = vfio_platform_realize; 721 dc->props = vfio_platform_dev_properties; 722 dc->vmsd = &vfio_platform_vmstate; 723 dc->desc = "VFIO-based platform device assignment"; 724 sbc->connect_irq_notifier = vfio_start_irqfd_injection; 725 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 726 /* Supported by TYPE_VIRT_MACHINE */ 727 dc->user_creatable = true; 728 } 729 730 static const TypeInfo vfio_platform_dev_info = { 731 .name = TYPE_VFIO_PLATFORM, 732 .parent = TYPE_SYS_BUS_DEVICE, 733 .instance_size = sizeof(VFIOPlatformDevice), 734 .class_init = vfio_platform_class_init, 735 .class_size = sizeof(VFIOPlatformDeviceClass), 736 }; 737 738 static void register_vfio_platform_dev_type(void) 739 { 740 type_register_static(&vfio_platform_dev_info); 741 } 742 743 type_init(register_vfio_platform_dev_type) 744