1 /* 2 * vfio based device assignment support - platform devices 3 * 4 * Copyright Linaro Limited, 2014 5 * 6 * Authors: 7 * Kim Phillips <kim.phillips@linaro.org> 8 * Eric Auger <eric.auger@linaro.org> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2. See 11 * the COPYING file in the top-level directory. 12 * 13 * Based on vfio based PCI device assignment support: 14 * Copyright Red Hat, Inc. 2012 15 */ 16 17 #include <sys/ioctl.h> 18 #include <linux/vfio.h> 19 20 #include "hw/vfio/vfio-platform.h" 21 #include "qemu/error-report.h" 22 #include "qemu/range.h" 23 #include "sysemu/sysemu.h" 24 #include "exec/memory.h" 25 #include "qemu/queue.h" 26 #include "hw/sysbus.h" 27 #include "trace.h" 28 #include "hw/platform-bus.h" 29 #include "sysemu/kvm.h" 30 31 /* 32 * Functions used whatever the injection method 33 */ 34 35 static inline bool vfio_irq_is_automasked(VFIOINTp *intp) 36 { 37 return intp->flags & VFIO_IRQ_INFO_AUTOMASKED; 38 } 39 40 /** 41 * vfio_init_intp - allocate, initialize the IRQ struct pointer 42 * and add it into the list of IRQs 43 * @vbasedev: the VFIO device handle 44 * @info: irq info struct retrieved from VFIO driver 45 */ 46 static VFIOINTp *vfio_init_intp(VFIODevice *vbasedev, 47 struct vfio_irq_info info) 48 { 49 int ret; 50 VFIOPlatformDevice *vdev = 51 container_of(vbasedev, VFIOPlatformDevice, vbasedev); 52 SysBusDevice *sbdev = SYS_BUS_DEVICE(vdev); 53 VFIOINTp *intp; 54 55 intp = g_malloc0(sizeof(*intp)); 56 intp->vdev = vdev; 57 intp->pin = info.index; 58 intp->flags = info.flags; 59 intp->state = VFIO_IRQ_INACTIVE; 60 intp->kvm_accel = false; 61 62 sysbus_init_irq(sbdev, &intp->qemuirq); 63 64 /* Get an eventfd for trigger */ 65 intp->interrupt = g_malloc0(sizeof(EventNotifier)); 66 ret = event_notifier_init(intp->interrupt, 0); 67 if (ret) { 68 g_free(intp->interrupt); 69 g_free(intp); 70 error_report("vfio: Error: trigger event_notifier_init failed "); 71 return NULL; 72 } 73 if (vfio_irq_is_automasked(intp)) { 74 /* Get an eventfd for resample/unmask */ 75 intp->unmask = g_malloc0(sizeof(EventNotifier)); 76 ret = event_notifier_init(intp->unmask, 0); 77 if (ret) { 78 g_free(intp->interrupt); 79 g_free(intp->unmask); 80 g_free(intp); 81 error_report("vfio: Error: resamplefd event_notifier_init failed"); 82 return NULL; 83 } 84 } 85 86 QLIST_INSERT_HEAD(&vdev->intp_list, intp, next); 87 return intp; 88 } 89 90 /** 91 * vfio_set_trigger_eventfd - set VFIO eventfd handling 92 * 93 * @intp: IRQ struct handle 94 * @handler: handler to be called on eventfd signaling 95 * 96 * Setup VFIO signaling and attach an optional user-side handler 97 * to the eventfd 98 */ 99 static int vfio_set_trigger_eventfd(VFIOINTp *intp, 100 eventfd_user_side_handler_t handler) 101 { 102 VFIODevice *vbasedev = &intp->vdev->vbasedev; 103 struct vfio_irq_set *irq_set; 104 int argsz, ret; 105 int32_t *pfd; 106 107 argsz = sizeof(*irq_set) + sizeof(*pfd); 108 irq_set = g_malloc0(argsz); 109 irq_set->argsz = argsz; 110 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; 111 irq_set->index = intp->pin; 112 irq_set->start = 0; 113 irq_set->count = 1; 114 pfd = (int32_t *)&irq_set->data; 115 *pfd = event_notifier_get_fd(intp->interrupt); 116 qemu_set_fd_handler(*pfd, (IOHandler *)handler, NULL, intp); 117 ret = ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set); 118 g_free(irq_set); 119 if (ret < 0) { 120 error_report("vfio: Failed to set trigger eventfd: %m"); 121 qemu_set_fd_handler(*pfd, NULL, NULL, NULL); 122 } 123 return ret; 124 } 125 126 /* 127 * Functions only used when eventfds are handled on user-side 128 * ie. without irqfd 129 */ 130 131 /** 132 * vfio_mmap_set_enabled - enable/disable the fast path mode 133 * @vdev: the VFIO platform device 134 * @enabled: the target mmap state 135 * 136 * enabled = true ~ fast path = MMIO region is mmaped (no KVM TRAP); 137 * enabled = false ~ slow path = MMIO region is trapped and region callbacks 138 * are called; slow path enables to trap the device IRQ status register reset 139 */ 140 141 static void vfio_mmap_set_enabled(VFIOPlatformDevice *vdev, bool enabled) 142 { 143 int i; 144 145 trace_vfio_platform_mmap_set_enabled(enabled); 146 147 for (i = 0; i < vdev->vbasedev.num_regions; i++) { 148 VFIORegion *region = vdev->regions[i]; 149 150 memory_region_set_enabled(®ion->mmap_mem, enabled); 151 } 152 } 153 154 /** 155 * vfio_intp_mmap_enable - timer function, restores the fast path 156 * if there is no more active IRQ 157 * @opaque: actually points to the VFIO platform device 158 * 159 * Called on mmap timer timout, this function checks whether the 160 * IRQ is still active and if not, restores the fast path. 161 * by construction a single eventfd is handled at a time. 162 * if the IRQ is still active, the timer is re-programmed. 163 */ 164 static void vfio_intp_mmap_enable(void *opaque) 165 { 166 VFIOINTp *tmp; 167 VFIOPlatformDevice *vdev = (VFIOPlatformDevice *)opaque; 168 169 qemu_mutex_lock(&vdev->intp_mutex); 170 QLIST_FOREACH(tmp, &vdev->intp_list, next) { 171 if (tmp->state == VFIO_IRQ_ACTIVE) { 172 trace_vfio_platform_intp_mmap_enable(tmp->pin); 173 /* re-program the timer to check active status later */ 174 timer_mod(vdev->mmap_timer, 175 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 176 vdev->mmap_timeout); 177 qemu_mutex_unlock(&vdev->intp_mutex); 178 return; 179 } 180 } 181 vfio_mmap_set_enabled(vdev, true); 182 qemu_mutex_unlock(&vdev->intp_mutex); 183 } 184 185 /** 186 * vfio_intp_inject_pending_lockheld - Injects a pending IRQ 187 * @opaque: opaque pointer, in practice the VFIOINTp handle 188 * 189 * The function is called on a previous IRQ completion, from 190 * vfio_platform_eoi, while the intp_mutex is locked. 191 * Also in such situation, the slow path already is set and 192 * the mmap timer was already programmed. 193 */ 194 static void vfio_intp_inject_pending_lockheld(VFIOINTp *intp) 195 { 196 trace_vfio_platform_intp_inject_pending_lockheld(intp->pin, 197 event_notifier_get_fd(intp->interrupt)); 198 199 intp->state = VFIO_IRQ_ACTIVE; 200 201 /* trigger the virtual IRQ */ 202 qemu_set_irq(intp->qemuirq, 1); 203 } 204 205 /** 206 * vfio_intp_interrupt - The user-side eventfd handler 207 * @opaque: opaque pointer which in practice is the VFIOINTp handle 208 * 209 * the function is entered in event handler context: 210 * the vIRQ is injected into the guest if there is no other active 211 * or pending IRQ. 212 */ 213 static void vfio_intp_interrupt(VFIOINTp *intp) 214 { 215 int ret; 216 VFIOINTp *tmp; 217 VFIOPlatformDevice *vdev = intp->vdev; 218 bool delay_handling = false; 219 220 qemu_mutex_lock(&vdev->intp_mutex); 221 if (intp->state == VFIO_IRQ_INACTIVE) { 222 QLIST_FOREACH(tmp, &vdev->intp_list, next) { 223 if (tmp->state == VFIO_IRQ_ACTIVE || 224 tmp->state == VFIO_IRQ_PENDING) { 225 delay_handling = true; 226 break; 227 } 228 } 229 } 230 if (delay_handling) { 231 /* 232 * the new IRQ gets a pending status and is pushed in 233 * the pending queue 234 */ 235 intp->state = VFIO_IRQ_PENDING; 236 trace_vfio_intp_interrupt_set_pending(intp->pin); 237 QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue, 238 intp, pqnext); 239 ret = event_notifier_test_and_clear(intp->interrupt); 240 qemu_mutex_unlock(&vdev->intp_mutex); 241 return; 242 } 243 244 trace_vfio_platform_intp_interrupt(intp->pin, 245 event_notifier_get_fd(intp->interrupt)); 246 247 ret = event_notifier_test_and_clear(intp->interrupt); 248 if (!ret) { 249 error_report("Error when clearing fd=%d (ret = %d)", 250 event_notifier_get_fd(intp->interrupt), ret); 251 } 252 253 intp->state = VFIO_IRQ_ACTIVE; 254 255 /* sets slow path */ 256 vfio_mmap_set_enabled(vdev, false); 257 258 /* trigger the virtual IRQ */ 259 qemu_set_irq(intp->qemuirq, 1); 260 261 /* 262 * Schedule the mmap timer which will restore fastpath when no IRQ 263 * is active anymore 264 */ 265 if (vdev->mmap_timeout) { 266 timer_mod(vdev->mmap_timer, 267 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 268 vdev->mmap_timeout); 269 } 270 qemu_mutex_unlock(&vdev->intp_mutex); 271 } 272 273 /** 274 * vfio_platform_eoi - IRQ completion routine 275 * @vbasedev: the VFIO device handle 276 * 277 * De-asserts the active virtual IRQ and unmasks the physical IRQ 278 * (effective for level sensitive IRQ auto-masked by the VFIO driver). 279 * Then it handles next pending IRQ if any. 280 * eoi function is called on the first access to any MMIO region 281 * after an IRQ was triggered, trapped since slow path was set. 282 * It is assumed this access corresponds to the IRQ status 283 * register reset. With such a mechanism, a single IRQ can be 284 * handled at a time since there is no way to know which IRQ 285 * was completed by the guest (we would need additional details 286 * about the IRQ status register mask). 287 */ 288 static void vfio_platform_eoi(VFIODevice *vbasedev) 289 { 290 VFIOINTp *intp; 291 VFIOPlatformDevice *vdev = 292 container_of(vbasedev, VFIOPlatformDevice, vbasedev); 293 294 qemu_mutex_lock(&vdev->intp_mutex); 295 QLIST_FOREACH(intp, &vdev->intp_list, next) { 296 if (intp->state == VFIO_IRQ_ACTIVE) { 297 trace_vfio_platform_eoi(intp->pin, 298 event_notifier_get_fd(intp->interrupt)); 299 intp->state = VFIO_IRQ_INACTIVE; 300 301 /* deassert the virtual IRQ */ 302 qemu_set_irq(intp->qemuirq, 0); 303 304 if (vfio_irq_is_automasked(intp)) { 305 /* unmasks the physical level-sensitive IRQ */ 306 vfio_unmask_single_irqindex(vbasedev, intp->pin); 307 } 308 309 /* a single IRQ can be active at a time */ 310 break; 311 } 312 } 313 /* in case there are pending IRQs, handle the first one */ 314 if (!QSIMPLEQ_EMPTY(&vdev->pending_intp_queue)) { 315 intp = QSIMPLEQ_FIRST(&vdev->pending_intp_queue); 316 vfio_intp_inject_pending_lockheld(intp); 317 QSIMPLEQ_REMOVE_HEAD(&vdev->pending_intp_queue, pqnext); 318 } 319 qemu_mutex_unlock(&vdev->intp_mutex); 320 } 321 322 /** 323 * vfio_start_eventfd_injection - starts the virtual IRQ injection using 324 * user-side handled eventfds 325 * @sbdev: the sysbus device handle 326 * @irq: the qemu irq handle 327 */ 328 329 static void vfio_start_eventfd_injection(SysBusDevice *sbdev, qemu_irq irq) 330 { 331 int ret; 332 VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev); 333 VFIOINTp *intp; 334 335 QLIST_FOREACH(intp, &vdev->intp_list, next) { 336 if (intp->qemuirq == irq) { 337 break; 338 } 339 } 340 assert(intp); 341 342 ret = vfio_set_trigger_eventfd(intp, vfio_intp_interrupt); 343 if (ret) { 344 error_report("vfio: failed to start eventfd signaling for IRQ %d: %m", 345 intp->pin); 346 abort(); 347 } 348 } 349 350 /* 351 * Functions used for irqfd 352 */ 353 354 /** 355 * vfio_set_resample_eventfd - sets the resamplefd for an IRQ 356 * @intp: the IRQ struct handle 357 * programs the VFIO driver to unmask this IRQ when the 358 * intp->unmask eventfd is triggered 359 */ 360 static int vfio_set_resample_eventfd(VFIOINTp *intp) 361 { 362 VFIODevice *vbasedev = &intp->vdev->vbasedev; 363 struct vfio_irq_set *irq_set; 364 int argsz, ret; 365 int32_t *pfd; 366 367 argsz = sizeof(*irq_set) + sizeof(*pfd); 368 irq_set = g_malloc0(argsz); 369 irq_set->argsz = argsz; 370 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK; 371 irq_set->index = intp->pin; 372 irq_set->start = 0; 373 irq_set->count = 1; 374 pfd = (int32_t *)&irq_set->data; 375 *pfd = event_notifier_get_fd(intp->unmask); 376 qemu_set_fd_handler(*pfd, NULL, NULL, NULL); 377 ret = ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set); 378 g_free(irq_set); 379 if (ret < 0) { 380 error_report("vfio: Failed to set resample eventfd: %m"); 381 } 382 return ret; 383 } 384 385 /** 386 * vfio_start_irqfd_injection - starts the virtual IRQ injection using 387 * irqfd 388 * 389 * @sbdev: the sysbus device handle 390 * @irq: the qemu irq handle 391 * 392 * In case the irqfd setup fails, we fallback to userspace handled eventfd 393 */ 394 static void vfio_start_irqfd_injection(SysBusDevice *sbdev, qemu_irq irq) 395 { 396 VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev); 397 VFIOINTp *intp; 398 399 if (!kvm_irqfds_enabled() || !kvm_resamplefds_enabled() || 400 !vdev->irqfd_allowed) { 401 goto fail_irqfd; 402 } 403 404 QLIST_FOREACH(intp, &vdev->intp_list, next) { 405 if (intp->qemuirq == irq) { 406 break; 407 } 408 } 409 assert(intp); 410 411 if (kvm_irqchip_add_irqfd_notifier(kvm_state, intp->interrupt, 412 intp->unmask, irq) < 0) { 413 goto fail_irqfd; 414 } 415 416 if (vfio_set_trigger_eventfd(intp, NULL) < 0) { 417 goto fail_vfio; 418 } 419 if (vfio_irq_is_automasked(intp)) { 420 if (vfio_set_resample_eventfd(intp) < 0) { 421 goto fail_vfio; 422 } 423 trace_vfio_platform_start_level_irqfd_injection(intp->pin, 424 event_notifier_get_fd(intp->interrupt), 425 event_notifier_get_fd(intp->unmask)); 426 } else { 427 trace_vfio_platform_start_edge_irqfd_injection(intp->pin, 428 event_notifier_get_fd(intp->interrupt)); 429 } 430 431 intp->kvm_accel = true; 432 433 return; 434 fail_vfio: 435 kvm_irqchip_remove_irqfd_notifier(kvm_state, intp->interrupt, irq); 436 error_report("vfio: failed to start eventfd signaling for IRQ %d: %m", 437 intp->pin); 438 abort(); 439 fail_irqfd: 440 vfio_start_eventfd_injection(sbdev, irq); 441 return; 442 } 443 444 /* VFIO skeleton */ 445 446 static void vfio_platform_compute_needs_reset(VFIODevice *vbasedev) 447 { 448 vbasedev->needs_reset = true; 449 } 450 451 /* not implemented yet */ 452 static int vfio_platform_hot_reset_multi(VFIODevice *vbasedev) 453 { 454 return -1; 455 } 456 457 /** 458 * vfio_populate_device - Allocate and populate MMIO region 459 * and IRQ structs according to driver returned information 460 * @vbasedev: the VFIO device handle 461 * 462 */ 463 static int vfio_populate_device(VFIODevice *vbasedev) 464 { 465 VFIOINTp *intp, *tmp; 466 int i, ret = -1; 467 VFIOPlatformDevice *vdev = 468 container_of(vbasedev, VFIOPlatformDevice, vbasedev); 469 470 if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PLATFORM)) { 471 error_report("vfio: Um, this isn't a platform device"); 472 return ret; 473 } 474 475 vdev->regions = g_new0(VFIORegion *, vbasedev->num_regions); 476 477 for (i = 0; i < vbasedev->num_regions; i++) { 478 struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) }; 479 VFIORegion *ptr; 480 481 vdev->regions[i] = g_new0(VFIORegion, 1); 482 ptr = vdev->regions[i]; 483 reg_info.index = i; 484 ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); 485 if (ret) { 486 error_report("vfio: Error getting region %d info: %m", i); 487 goto reg_error; 488 } 489 ptr->flags = reg_info.flags; 490 ptr->size = reg_info.size; 491 ptr->fd_offset = reg_info.offset; 492 ptr->nr = i; 493 ptr->vbasedev = vbasedev; 494 495 trace_vfio_platform_populate_regions(ptr->nr, 496 (unsigned long)ptr->flags, 497 (unsigned long)ptr->size, 498 ptr->vbasedev->fd, 499 (unsigned long)ptr->fd_offset); 500 } 501 502 vdev->mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 503 vfio_intp_mmap_enable, vdev); 504 505 QSIMPLEQ_INIT(&vdev->pending_intp_queue); 506 507 for (i = 0; i < vbasedev->num_irqs; i++) { 508 struct vfio_irq_info irq = { .argsz = sizeof(irq) }; 509 510 irq.index = i; 511 ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_IRQ_INFO, &irq); 512 if (ret) { 513 error_printf("vfio: error getting device %s irq info", 514 vbasedev->name); 515 goto irq_err; 516 } else { 517 trace_vfio_platform_populate_interrupts(irq.index, 518 irq.count, 519 irq.flags); 520 intp = vfio_init_intp(vbasedev, irq); 521 if (!intp) { 522 error_report("vfio: Error installing IRQ %d up", i); 523 goto irq_err; 524 } 525 } 526 } 527 return 0; 528 irq_err: 529 timer_del(vdev->mmap_timer); 530 QLIST_FOREACH_SAFE(intp, &vdev->intp_list, next, tmp) { 531 QLIST_REMOVE(intp, next); 532 g_free(intp); 533 } 534 reg_error: 535 for (i = 0; i < vbasedev->num_regions; i++) { 536 g_free(vdev->regions[i]); 537 } 538 g_free(vdev->regions); 539 return ret; 540 } 541 542 /* specialized functions for VFIO Platform devices */ 543 static VFIODeviceOps vfio_platform_ops = { 544 .vfio_compute_needs_reset = vfio_platform_compute_needs_reset, 545 .vfio_hot_reset_multi = vfio_platform_hot_reset_multi, 546 .vfio_eoi = vfio_platform_eoi, 547 }; 548 549 /** 550 * vfio_base_device_init - perform preliminary VFIO setup 551 * @vbasedev: the VFIO device handle 552 * 553 * Implement the VFIO command sequence that allows to discover 554 * assigned device resources: group extraction, device 555 * fd retrieval, resource query. 556 * Precondition: the device name must be initialized 557 */ 558 static int vfio_base_device_init(VFIODevice *vbasedev) 559 { 560 VFIOGroup *group; 561 VFIODevice *vbasedev_iter; 562 char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name; 563 ssize_t len; 564 struct stat st; 565 int groupid; 566 int ret; 567 568 /* name must be set prior to the call */ 569 if (!vbasedev->name || strchr(vbasedev->name, '/')) { 570 return -EINVAL; 571 } 572 573 /* Check that the host device exists */ 574 g_snprintf(path, sizeof(path), "/sys/bus/platform/devices/%s/", 575 vbasedev->name); 576 577 if (stat(path, &st) < 0) { 578 error_report("vfio: error: no such host device: %s", path); 579 return -errno; 580 } 581 582 g_strlcat(path, "iommu_group", sizeof(path)); 583 len = readlink(path, iommu_group_path, sizeof(iommu_group_path)); 584 if (len < 0 || len >= sizeof(iommu_group_path)) { 585 error_report("vfio: error no iommu_group for device"); 586 return len < 0 ? -errno : -ENAMETOOLONG; 587 } 588 589 iommu_group_path[len] = 0; 590 group_name = basename(iommu_group_path); 591 592 if (sscanf(group_name, "%d", &groupid) != 1) { 593 error_report("vfio: error reading %s: %m", path); 594 return -errno; 595 } 596 597 trace_vfio_platform_base_device_init(vbasedev->name, groupid); 598 599 group = vfio_get_group(groupid, &address_space_memory); 600 if (!group) { 601 error_report("vfio: failed to get group %d", groupid); 602 return -ENOENT; 603 } 604 605 g_snprintf(path, sizeof(path), "%s", vbasedev->name); 606 607 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { 608 if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) { 609 error_report("vfio: error: device %s is already attached", path); 610 vfio_put_group(group); 611 return -EBUSY; 612 } 613 } 614 ret = vfio_get_device(group, path, vbasedev); 615 if (ret) { 616 error_report("vfio: failed to get device %s", path); 617 vfio_put_group(group); 618 return ret; 619 } 620 621 ret = vfio_populate_device(vbasedev); 622 if (ret) { 623 error_report("vfio: failed to populate device %s", path); 624 vfio_put_group(group); 625 } 626 627 return ret; 628 } 629 630 /** 631 * vfio_map_region - initialize the 2 memory regions for a given 632 * MMIO region index 633 * @vdev: the VFIO platform device handle 634 * @nr: the index of the region 635 * 636 * Init the top memory region and the mmapped memory region beneath 637 * VFIOPlatformDevice is used since VFIODevice is not a QOM Object 638 * and could not be passed to memory region functions 639 */ 640 static void vfio_map_region(VFIOPlatformDevice *vdev, int nr) 641 { 642 VFIORegion *region = vdev->regions[nr]; 643 uint64_t size = region->size; 644 char name[64]; 645 646 if (!size) { 647 return; 648 } 649 650 g_snprintf(name, sizeof(name), "VFIO %s region %d", 651 vdev->vbasedev.name, nr); 652 653 /* A "slow" read/write mapping underlies all regions */ 654 memory_region_init_io(®ion->mem, OBJECT(vdev), &vfio_region_ops, 655 region, name, size); 656 657 g_strlcat(name, " mmap", sizeof(name)); 658 659 if (vfio_mmap_region(OBJECT(vdev), region, ®ion->mem, 660 ®ion->mmap_mem, ®ion->mmap, size, 0, name)) { 661 error_report("%s unsupported. Performance may be slow", name); 662 } 663 } 664 665 /** 666 * vfio_platform_realize - the device realize function 667 * @dev: device state pointer 668 * @errp: error 669 * 670 * initialize the device, its memory regions and IRQ structures 671 * IRQ are started separately 672 */ 673 static void vfio_platform_realize(DeviceState *dev, Error **errp) 674 { 675 VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev); 676 SysBusDevice *sbdev = SYS_BUS_DEVICE(dev); 677 VFIODevice *vbasedev = &vdev->vbasedev; 678 int i, ret; 679 680 vbasedev->type = VFIO_DEVICE_TYPE_PLATFORM; 681 vbasedev->ops = &vfio_platform_ops; 682 683 trace_vfio_platform_realize(vbasedev->name, vdev->compat); 684 685 ret = vfio_base_device_init(vbasedev); 686 if (ret) { 687 error_setg(errp, "vfio: vfio_base_device_init failed for %s", 688 vbasedev->name); 689 return; 690 } 691 692 for (i = 0; i < vbasedev->num_regions; i++) { 693 vfio_map_region(vdev, i); 694 sysbus_init_mmio(sbdev, &vdev->regions[i]->mem); 695 } 696 } 697 698 static const VMStateDescription vfio_platform_vmstate = { 699 .name = TYPE_VFIO_PLATFORM, 700 .unmigratable = 1, 701 }; 702 703 static Property vfio_platform_dev_properties[] = { 704 DEFINE_PROP_STRING("host", VFIOPlatformDevice, vbasedev.name), 705 DEFINE_PROP_BOOL("x-no-mmap", VFIOPlatformDevice, vbasedev.no_mmap, false), 706 DEFINE_PROP_UINT32("mmap-timeout-ms", VFIOPlatformDevice, 707 mmap_timeout, 1100), 708 DEFINE_PROP_BOOL("x-irqfd", VFIOPlatformDevice, irqfd_allowed, true), 709 DEFINE_PROP_END_OF_LIST(), 710 }; 711 712 static void vfio_platform_class_init(ObjectClass *klass, void *data) 713 { 714 DeviceClass *dc = DEVICE_CLASS(klass); 715 SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass); 716 717 dc->realize = vfio_platform_realize; 718 dc->props = vfio_platform_dev_properties; 719 dc->vmsd = &vfio_platform_vmstate; 720 dc->desc = "VFIO-based platform device assignment"; 721 sbc->connect_irq_notifier = vfio_start_irqfd_injection; 722 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 723 } 724 725 static const TypeInfo vfio_platform_dev_info = { 726 .name = TYPE_VFIO_PLATFORM, 727 .parent = TYPE_SYS_BUS_DEVICE, 728 .instance_size = sizeof(VFIOPlatformDevice), 729 .class_init = vfio_platform_class_init, 730 .class_size = sizeof(VFIOPlatformDeviceClass), 731 .abstract = true, 732 }; 733 734 static void register_vfio_platform_dev_type(void) 735 { 736 type_register_static(&vfio_platform_dev_info); 737 } 738 739 type_init(register_vfio_platform_dev_type) 740