1 /* 2 * iommufd container backend 3 * 4 * Copyright (C) 2023 Intel Corporation. 5 * Copyright Red Hat, Inc. 2023 6 * 7 * Authors: Yi Liu <yi.l.liu@intel.com> 8 * Eric Auger <eric.auger@redhat.com> 9 * 10 * SPDX-License-Identifier: GPL-2.0-or-later 11 */ 12 13 #include "qemu/osdep.h" 14 #include <sys/ioctl.h> 15 #include <linux/vfio.h> 16 #include <linux/iommufd.h> 17 18 #include "hw/vfio/vfio-common.h" 19 #include "qemu/error-report.h" 20 #include "trace.h" 21 #include "qapi/error.h" 22 #include "sysemu/iommufd.h" 23 #include "hw/qdev-core.h" 24 #include "sysemu/reset.h" 25 #include "qemu/cutils.h" 26 #include "qemu/chardev_open.h" 27 #include "pci.h" 28 29 static int iommufd_cdev_map(const VFIOContainerBase *bcontainer, hwaddr iova, 30 ram_addr_t size, void *vaddr, bool readonly) 31 { 32 const VFIOIOMMUFDContainer *container = 33 container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer); 34 35 return iommufd_backend_map_dma(container->be, 36 container->ioas_id, 37 iova, size, vaddr, readonly); 38 } 39 40 static int iommufd_cdev_unmap(const VFIOContainerBase *bcontainer, 41 hwaddr iova, ram_addr_t size, 42 IOMMUTLBEntry *iotlb) 43 { 44 const VFIOIOMMUFDContainer *container = 45 container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer); 46 47 /* TODO: Handle dma_unmap_bitmap with iotlb args (migration) */ 48 return iommufd_backend_unmap_dma(container->be, 49 container->ioas_id, iova, size); 50 } 51 52 static int iommufd_cdev_kvm_device_add(VFIODevice *vbasedev, Error **errp) 53 { 54 return vfio_kvm_device_add_fd(vbasedev->fd, errp); 55 } 56 57 static void iommufd_cdev_kvm_device_del(VFIODevice *vbasedev) 58 { 59 Error *err = NULL; 60 61 if (vfio_kvm_device_del_fd(vbasedev->fd, &err)) { 62 error_report_err(err); 63 } 64 } 65 66 static int iommufd_cdev_connect_and_bind(VFIODevice *vbasedev, Error **errp) 67 { 68 IOMMUFDBackend *iommufd = vbasedev->iommufd; 69 struct vfio_device_bind_iommufd bind = { 70 .argsz = sizeof(bind), 71 .flags = 0, 72 }; 73 int ret; 74 75 ret = iommufd_backend_connect(iommufd, errp); 76 if (ret) { 77 return ret; 78 } 79 80 /* 81 * Add device to kvm-vfio to be prepared for the tracking 82 * in KVM. Especially for some emulated devices, it requires 83 * to have kvm information in the device open. 84 */ 85 ret = iommufd_cdev_kvm_device_add(vbasedev, errp); 86 if (ret) { 87 goto err_kvm_device_add; 88 } 89 90 /* Bind device to iommufd */ 91 bind.iommufd = iommufd->fd; 92 ret = ioctl(vbasedev->fd, VFIO_DEVICE_BIND_IOMMUFD, &bind); 93 if (ret) { 94 error_setg_errno(errp, errno, "error bind device fd=%d to iommufd=%d", 95 vbasedev->fd, bind.iommufd); 96 goto err_bind; 97 } 98 99 vbasedev->devid = bind.out_devid; 100 trace_iommufd_cdev_connect_and_bind(bind.iommufd, vbasedev->name, 101 vbasedev->fd, vbasedev->devid); 102 return ret; 103 err_bind: 104 iommufd_cdev_kvm_device_del(vbasedev); 105 err_kvm_device_add: 106 iommufd_backend_disconnect(iommufd); 107 return ret; 108 } 109 110 static void iommufd_cdev_unbind_and_disconnect(VFIODevice *vbasedev) 111 { 112 /* Unbind is automatically conducted when device fd is closed */ 113 iommufd_cdev_kvm_device_del(vbasedev); 114 iommufd_backend_disconnect(vbasedev->iommufd); 115 } 116 117 static int iommufd_cdev_getfd(const char *sysfs_path, Error **errp) 118 { 119 long int ret = -ENOTTY; 120 char *path, *vfio_dev_path = NULL, *vfio_path = NULL; 121 DIR *dir = NULL; 122 struct dirent *dent; 123 gchar *contents; 124 gsize length; 125 int major, minor; 126 dev_t vfio_devt; 127 128 path = g_strdup_printf("%s/vfio-dev", sysfs_path); 129 dir = opendir(path); 130 if (!dir) { 131 error_setg_errno(errp, errno, "couldn't open directory %s", path); 132 goto out_free_path; 133 } 134 135 while ((dent = readdir(dir))) { 136 if (!strncmp(dent->d_name, "vfio", 4)) { 137 vfio_dev_path = g_strdup_printf("%s/%s/dev", path, dent->d_name); 138 break; 139 } 140 } 141 142 if (!vfio_dev_path) { 143 error_setg(errp, "failed to find vfio-dev/vfioX/dev"); 144 goto out_close_dir; 145 } 146 147 if (!g_file_get_contents(vfio_dev_path, &contents, &length, NULL)) { 148 error_setg(errp, "failed to load \"%s\"", vfio_dev_path); 149 goto out_free_dev_path; 150 } 151 152 if (sscanf(contents, "%d:%d", &major, &minor) != 2) { 153 error_setg(errp, "failed to get major:minor for \"%s\"", vfio_dev_path); 154 goto out_free_dev_path; 155 } 156 g_free(contents); 157 vfio_devt = makedev(major, minor); 158 159 vfio_path = g_strdup_printf("/dev/vfio/devices/%s", dent->d_name); 160 ret = open_cdev(vfio_path, vfio_devt); 161 if (ret < 0) { 162 error_setg(errp, "Failed to open %s", vfio_path); 163 } 164 165 trace_iommufd_cdev_getfd(vfio_path, ret); 166 g_free(vfio_path); 167 168 out_free_dev_path: 169 g_free(vfio_dev_path); 170 out_close_dir: 171 closedir(dir); 172 out_free_path: 173 if (*errp) { 174 error_prepend(errp, VFIO_MSG_PREFIX, path); 175 } 176 g_free(path); 177 178 return ret; 179 } 180 181 static int iommufd_cdev_attach_ioas_hwpt(VFIODevice *vbasedev, uint32_t id, 182 Error **errp) 183 { 184 int ret, iommufd = vbasedev->iommufd->fd; 185 struct vfio_device_attach_iommufd_pt attach_data = { 186 .argsz = sizeof(attach_data), 187 .flags = 0, 188 .pt_id = id, 189 }; 190 191 /* Attach device to an IOAS or hwpt within iommufd */ 192 ret = ioctl(vbasedev->fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &attach_data); 193 if (ret) { 194 error_setg_errno(errp, errno, 195 "[iommufd=%d] error attach %s (%d) to id=%d", 196 iommufd, vbasedev->name, vbasedev->fd, id); 197 } else { 198 trace_iommufd_cdev_attach_ioas_hwpt(iommufd, vbasedev->name, 199 vbasedev->fd, id); 200 } 201 return ret; 202 } 203 204 static int iommufd_cdev_detach_ioas_hwpt(VFIODevice *vbasedev, Error **errp) 205 { 206 int ret, iommufd = vbasedev->iommufd->fd; 207 struct vfio_device_detach_iommufd_pt detach_data = { 208 .argsz = sizeof(detach_data), 209 .flags = 0, 210 }; 211 212 ret = ioctl(vbasedev->fd, VFIO_DEVICE_DETACH_IOMMUFD_PT, &detach_data); 213 if (ret) { 214 error_setg_errno(errp, errno, "detach %s failed", vbasedev->name); 215 } else { 216 trace_iommufd_cdev_detach_ioas_hwpt(iommufd, vbasedev->name); 217 } 218 return ret; 219 } 220 221 static int iommufd_cdev_attach_container(VFIODevice *vbasedev, 222 VFIOIOMMUFDContainer *container, 223 Error **errp) 224 { 225 return iommufd_cdev_attach_ioas_hwpt(vbasedev, container->ioas_id, errp); 226 } 227 228 static void iommufd_cdev_detach_container(VFIODevice *vbasedev, 229 VFIOIOMMUFDContainer *container) 230 { 231 Error *err = NULL; 232 233 if (iommufd_cdev_detach_ioas_hwpt(vbasedev, &err)) { 234 error_report_err(err); 235 } 236 } 237 238 static void iommufd_cdev_container_destroy(VFIOIOMMUFDContainer *container) 239 { 240 VFIOContainerBase *bcontainer = &container->bcontainer; 241 242 if (!QLIST_EMPTY(&bcontainer->device_list)) { 243 return; 244 } 245 memory_listener_unregister(&bcontainer->listener); 246 vfio_container_destroy(bcontainer); 247 iommufd_backend_free_id(container->be, container->ioas_id); 248 g_free(container); 249 } 250 251 static int iommufd_cdev_ram_block_discard_disable(bool state) 252 { 253 /* 254 * We support coordinated discarding of RAM via the RamDiscardManager. 255 */ 256 return ram_block_uncoordinated_discard_disable(state); 257 } 258 259 static int iommufd_cdev_get_info_iova_range(VFIOIOMMUFDContainer *container, 260 uint32_t ioas_id, Error **errp) 261 { 262 VFIOContainerBase *bcontainer = &container->bcontainer; 263 struct iommu_ioas_iova_ranges *info; 264 struct iommu_iova_range *iova_ranges; 265 int ret, sz, fd = container->be->fd; 266 267 info = g_malloc0(sizeof(*info)); 268 info->size = sizeof(*info); 269 info->ioas_id = ioas_id; 270 271 ret = ioctl(fd, IOMMU_IOAS_IOVA_RANGES, info); 272 if (ret && errno != EMSGSIZE) { 273 goto error; 274 } 275 276 sz = info->num_iovas * sizeof(struct iommu_iova_range); 277 info = g_realloc(info, sizeof(*info) + sz); 278 info->allowed_iovas = (uintptr_t)(info + 1); 279 280 ret = ioctl(fd, IOMMU_IOAS_IOVA_RANGES, info); 281 if (ret) { 282 goto error; 283 } 284 285 iova_ranges = (struct iommu_iova_range *)(uintptr_t)info->allowed_iovas; 286 287 for (int i = 0; i < info->num_iovas; i++) { 288 Range *range = g_new(Range, 1); 289 290 range_set_bounds(range, iova_ranges[i].start, iova_ranges[i].last); 291 bcontainer->iova_ranges = 292 range_list_insert(bcontainer->iova_ranges, range); 293 } 294 bcontainer->pgsizes = info->out_iova_alignment; 295 296 g_free(info); 297 return 0; 298 299 error: 300 ret = -errno; 301 g_free(info); 302 error_setg_errno(errp, errno, "Cannot get IOVA ranges"); 303 return ret; 304 } 305 306 static int iommufd_cdev_attach(const char *name, VFIODevice *vbasedev, 307 AddressSpace *as, Error **errp) 308 { 309 VFIOContainerBase *bcontainer; 310 VFIOIOMMUFDContainer *container; 311 VFIOAddressSpace *space; 312 struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) }; 313 int ret, devfd; 314 uint32_t ioas_id; 315 Error *err = NULL; 316 const VFIOIOMMUClass *iommufd_vioc = 317 VFIO_IOMMU_CLASS(object_class_by_name(TYPE_VFIO_IOMMU_IOMMUFD)); 318 319 if (vbasedev->fd < 0) { 320 devfd = iommufd_cdev_getfd(vbasedev->sysfsdev, errp); 321 if (devfd < 0) { 322 return devfd; 323 } 324 vbasedev->fd = devfd; 325 } else { 326 devfd = vbasedev->fd; 327 } 328 329 ret = iommufd_cdev_connect_and_bind(vbasedev, errp); 330 if (ret) { 331 goto err_connect_bind; 332 } 333 334 space = vfio_get_address_space(as); 335 336 /* try to attach to an existing container in this space */ 337 QLIST_FOREACH(bcontainer, &space->containers, next) { 338 container = container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer); 339 if (bcontainer->ops != iommufd_vioc || 340 vbasedev->iommufd != container->be) { 341 continue; 342 } 343 if (iommufd_cdev_attach_container(vbasedev, container, &err)) { 344 const char *msg = error_get_pretty(err); 345 346 trace_iommufd_cdev_fail_attach_existing_container(msg); 347 error_free(err); 348 err = NULL; 349 } else { 350 ret = iommufd_cdev_ram_block_discard_disable(true); 351 if (ret) { 352 error_setg(errp, 353 "Cannot set discarding of RAM broken (%d)", ret); 354 goto err_discard_disable; 355 } 356 goto found_container; 357 } 358 } 359 360 /* Need to allocate a new dedicated container */ 361 ret = iommufd_backend_alloc_ioas(vbasedev->iommufd, &ioas_id, errp); 362 if (ret < 0) { 363 goto err_alloc_ioas; 364 } 365 366 trace_iommufd_cdev_alloc_ioas(vbasedev->iommufd->fd, ioas_id); 367 368 container = g_malloc0(sizeof(*container)); 369 container->be = vbasedev->iommufd; 370 container->ioas_id = ioas_id; 371 372 bcontainer = &container->bcontainer; 373 vfio_container_init(bcontainer, space, iommufd_vioc); 374 QLIST_INSERT_HEAD(&space->containers, bcontainer, next); 375 376 ret = iommufd_cdev_attach_container(vbasedev, container, errp); 377 if (ret) { 378 goto err_attach_container; 379 } 380 381 ret = iommufd_cdev_ram_block_discard_disable(true); 382 if (ret) { 383 goto err_discard_disable; 384 } 385 386 ret = iommufd_cdev_get_info_iova_range(container, ioas_id, &err); 387 if (ret) { 388 error_append_hint(&err, 389 "Fallback to default 64bit IOVA range and 4K page size\n"); 390 warn_report_err(err); 391 err = NULL; 392 bcontainer->pgsizes = qemu_real_host_page_size(); 393 } 394 395 bcontainer->listener = vfio_memory_listener; 396 memory_listener_register(&bcontainer->listener, bcontainer->space->as); 397 398 if (bcontainer->error) { 399 ret = -1; 400 error_propagate_prepend(errp, bcontainer->error, 401 "memory listener initialization failed: "); 402 goto err_listener_register; 403 } 404 405 bcontainer->initialized = true; 406 407 found_container: 408 ret = ioctl(devfd, VFIO_DEVICE_GET_INFO, &dev_info); 409 if (ret) { 410 error_setg_errno(errp, errno, "error getting device info"); 411 goto err_listener_register; 412 } 413 414 /* 415 * TODO: examine RAM_BLOCK_DISCARD stuff, should we do group level 416 * for discarding incompatibility check as well? 417 */ 418 if (vbasedev->ram_block_discard_allowed) { 419 iommufd_cdev_ram_block_discard_disable(false); 420 } 421 422 vbasedev->group = 0; 423 vbasedev->num_irqs = dev_info.num_irqs; 424 vbasedev->num_regions = dev_info.num_regions; 425 vbasedev->flags = dev_info.flags; 426 vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET); 427 vbasedev->bcontainer = bcontainer; 428 QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next); 429 QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next); 430 431 trace_iommufd_cdev_device_info(vbasedev->name, devfd, vbasedev->num_irqs, 432 vbasedev->num_regions, vbasedev->flags); 433 return 0; 434 435 err_listener_register: 436 iommufd_cdev_ram_block_discard_disable(false); 437 err_discard_disable: 438 iommufd_cdev_detach_container(vbasedev, container); 439 err_attach_container: 440 iommufd_cdev_container_destroy(container); 441 err_alloc_ioas: 442 vfio_put_address_space(space); 443 iommufd_cdev_unbind_and_disconnect(vbasedev); 444 err_connect_bind: 445 close(vbasedev->fd); 446 return ret; 447 } 448 449 static void iommufd_cdev_detach(VFIODevice *vbasedev) 450 { 451 VFIOContainerBase *bcontainer = vbasedev->bcontainer; 452 VFIOAddressSpace *space = bcontainer->space; 453 VFIOIOMMUFDContainer *container = container_of(bcontainer, 454 VFIOIOMMUFDContainer, 455 bcontainer); 456 QLIST_REMOVE(vbasedev, global_next); 457 QLIST_REMOVE(vbasedev, container_next); 458 vbasedev->bcontainer = NULL; 459 460 if (!vbasedev->ram_block_discard_allowed) { 461 iommufd_cdev_ram_block_discard_disable(false); 462 } 463 464 iommufd_cdev_detach_container(vbasedev, container); 465 iommufd_cdev_container_destroy(container); 466 vfio_put_address_space(space); 467 468 iommufd_cdev_unbind_and_disconnect(vbasedev); 469 close(vbasedev->fd); 470 } 471 472 static VFIODevice *iommufd_cdev_pci_find_by_devid(__u32 devid) 473 { 474 VFIODevice *vbasedev_iter; 475 const VFIOIOMMUClass *iommufd_vioc = 476 VFIO_IOMMU_CLASS(object_class_by_name(TYPE_VFIO_IOMMU_IOMMUFD)); 477 478 QLIST_FOREACH(vbasedev_iter, &vfio_device_list, global_next) { 479 if (vbasedev_iter->bcontainer->ops != iommufd_vioc) { 480 continue; 481 } 482 if (devid == vbasedev_iter->devid) { 483 return vbasedev_iter; 484 } 485 } 486 return NULL; 487 } 488 489 static VFIOPCIDevice * 490 iommufd_cdev_dep_get_realized_vpdev(struct vfio_pci_dependent_device *dep_dev, 491 VFIODevice *reset_dev) 492 { 493 VFIODevice *vbasedev_tmp; 494 495 if (dep_dev->devid == reset_dev->devid || 496 dep_dev->devid == VFIO_PCI_DEVID_OWNED) { 497 return NULL; 498 } 499 500 vbasedev_tmp = iommufd_cdev_pci_find_by_devid(dep_dev->devid); 501 if (!vbasedev_tmp || !vbasedev_tmp->dev->realized || 502 vbasedev_tmp->type != VFIO_DEVICE_TYPE_PCI) { 503 return NULL; 504 } 505 506 return container_of(vbasedev_tmp, VFIOPCIDevice, vbasedev); 507 } 508 509 static int iommufd_cdev_pci_hot_reset(VFIODevice *vbasedev, bool single) 510 { 511 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); 512 struct vfio_pci_hot_reset_info *info = NULL; 513 struct vfio_pci_dependent_device *devices; 514 struct vfio_pci_hot_reset *reset; 515 int ret, i; 516 bool multi = false; 517 518 trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi"); 519 520 if (!single) { 521 vfio_pci_pre_reset(vdev); 522 } 523 vdev->vbasedev.needs_reset = false; 524 525 ret = vfio_pci_get_pci_hot_reset_info(vdev, &info); 526 527 if (ret) { 528 goto out_single; 529 } 530 531 assert(info->flags & VFIO_PCI_HOT_RESET_FLAG_DEV_ID); 532 533 devices = &info->devices[0]; 534 535 if (!(info->flags & VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED)) { 536 if (!vdev->has_pm_reset) { 537 for (i = 0; i < info->count; i++) { 538 if (devices[i].devid == VFIO_PCI_DEVID_NOT_OWNED) { 539 error_report("vfio: Cannot reset device %s, " 540 "depends on device %04x:%02x:%02x.%x " 541 "which is not owned.", 542 vdev->vbasedev.name, devices[i].segment, 543 devices[i].bus, PCI_SLOT(devices[i].devfn), 544 PCI_FUNC(devices[i].devfn)); 545 } 546 } 547 } 548 ret = -EPERM; 549 goto out_single; 550 } 551 552 trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name); 553 554 for (i = 0; i < info->count; i++) { 555 VFIOPCIDevice *tmp; 556 557 trace_iommufd_cdev_pci_hot_reset_dep_devices(devices[i].segment, 558 devices[i].bus, 559 PCI_SLOT(devices[i].devfn), 560 PCI_FUNC(devices[i].devfn), 561 devices[i].devid); 562 563 /* 564 * If a VFIO cdev device is resettable, all the dependent devices 565 * are either bound to same iommufd or within same iommu_groups as 566 * one of the iommufd bound devices. 567 */ 568 assert(devices[i].devid != VFIO_PCI_DEVID_NOT_OWNED); 569 570 tmp = iommufd_cdev_dep_get_realized_vpdev(&devices[i], &vdev->vbasedev); 571 if (!tmp) { 572 continue; 573 } 574 575 if (single) { 576 ret = -EINVAL; 577 goto out_single; 578 } 579 vfio_pci_pre_reset(tmp); 580 tmp->vbasedev.needs_reset = false; 581 multi = true; 582 } 583 584 if (!single && !multi) { 585 ret = -EINVAL; 586 goto out_single; 587 } 588 589 /* Use zero length array for hot reset with iommufd backend */ 590 reset = g_malloc0(sizeof(*reset)); 591 reset->argsz = sizeof(*reset); 592 593 /* Bus reset! */ 594 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset); 595 g_free(reset); 596 if (ret) { 597 ret = -errno; 598 } 599 600 trace_vfio_pci_hot_reset_result(vdev->vbasedev.name, 601 ret ? strerror(errno) : "Success"); 602 603 /* Re-enable INTx on affected devices */ 604 for (i = 0; i < info->count; i++) { 605 VFIOPCIDevice *tmp; 606 607 tmp = iommufd_cdev_dep_get_realized_vpdev(&devices[i], &vdev->vbasedev); 608 if (!tmp) { 609 continue; 610 } 611 vfio_pci_post_reset(tmp); 612 } 613 out_single: 614 if (!single) { 615 vfio_pci_post_reset(vdev); 616 } 617 g_free(info); 618 619 return ret; 620 } 621 622 static void vfio_iommu_iommufd_class_init(ObjectClass *klass, void *data) 623 { 624 VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass); 625 626 vioc->dma_map = iommufd_cdev_map; 627 vioc->dma_unmap = iommufd_cdev_unmap; 628 vioc->attach_device = iommufd_cdev_attach; 629 vioc->detach_device = iommufd_cdev_detach; 630 vioc->pci_hot_reset = iommufd_cdev_pci_hot_reset; 631 }; 632 633 static const TypeInfo types[] = { 634 { 635 .name = TYPE_VFIO_IOMMU_IOMMUFD, 636 .parent = TYPE_VFIO_IOMMU, 637 .class_init = vfio_iommu_iommufd_class_init, 638 }, 639 }; 640 641 DEFINE_TYPES(types) 642