1 /* 2 * generic functions used by VFIO devices 3 * 4 * Copyright Red Hat, Inc. 2012 5 * 6 * Authors: 7 * Alex Williamson <alex.williamson@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Based on qemu-kvm device-assignment: 13 * Adapted for KVM by Qumranet. 14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) 15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) 16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) 17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) 18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) 19 */ 20 21 #include "qemu/osdep.h" 22 #include <sys/ioctl.h> 23 #include <linux/vfio.h> 24 25 #include "hw/vfio/vfio-device.h" 26 #include "system/address-spaces.h" 27 #include "system/memory.h" 28 #include "system/ram_addr.h" 29 #include "qemu/error-report.h" 30 #include "qemu/range.h" 31 #include "system/reset.h" 32 #include "trace.h" 33 #include "qapi/error.h" 34 #include "pci.h" 35 #include "hw/vfio/vfio-container.h" 36 #include "vfio-helpers.h" 37 #include "vfio-cpr.h" 38 #include "vfio-listener.h" 39 40 #define TYPE_HOST_IOMMU_DEVICE_LEGACY_VFIO TYPE_HOST_IOMMU_DEVICE "-legacy-vfio" 41 42 typedef QLIST_HEAD(VFIOGroupList, VFIOGroup) VFIOGroupList; 43 static VFIOGroupList vfio_group_list = 44 QLIST_HEAD_INITIALIZER(vfio_group_list); 45 46 static int vfio_ram_block_discard_disable(VFIOContainer *container, bool state) 47 { 48 switch (container->iommu_type) { 49 case VFIO_TYPE1v2_IOMMU: 50 case VFIO_TYPE1_IOMMU: 51 /* 52 * We support coordinated discarding of RAM via the RamDiscardManager. 53 */ 54 return ram_block_uncoordinated_discard_disable(state); 55 default: 56 /* 57 * VFIO_SPAPR_TCE_IOMMU most probably works just fine with 58 * RamDiscardManager, however, it is completely untested. 59 * 60 * VFIO_SPAPR_TCE_v2_IOMMU with "DMA memory preregistering" does 61 * completely the opposite of managing mapping/pinning dynamically as 62 * required by RamDiscardManager. We would have to special-case sections 63 * with a RamDiscardManager. 64 */ 65 return ram_block_discard_disable(state); 66 } 67 } 68 69 static int vfio_dma_unmap_bitmap(const VFIOContainer *container, 70 hwaddr iova, ram_addr_t size, 71 IOMMUTLBEntry *iotlb) 72 { 73 const VFIOContainerBase *bcontainer = &container->bcontainer; 74 struct vfio_iommu_type1_dma_unmap *unmap; 75 struct vfio_bitmap *bitmap; 76 VFIOBitmap vbmap; 77 int ret; 78 79 ret = vfio_bitmap_alloc(&vbmap, size); 80 if (ret) { 81 return ret; 82 } 83 84 unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap)); 85 86 unmap->argsz = sizeof(*unmap) + sizeof(*bitmap); 87 unmap->iova = iova; 88 unmap->size = size; 89 unmap->flags |= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP; 90 bitmap = (struct vfio_bitmap *)&unmap->data; 91 92 /* 93 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of 94 * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize 95 * to qemu_real_host_page_size. 96 */ 97 bitmap->pgsize = qemu_real_host_page_size(); 98 bitmap->size = vbmap.size; 99 bitmap->data = (__u64 *)vbmap.bitmap; 100 101 if (vbmap.size > bcontainer->max_dirty_bitmap_size) { 102 error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, vbmap.size); 103 ret = -E2BIG; 104 goto unmap_exit; 105 } 106 107 ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap); 108 if (!ret) { 109 cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, 110 iotlb->translated_addr, vbmap.pages); 111 } else { 112 error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m"); 113 } 114 115 unmap_exit: 116 g_free(unmap); 117 g_free(vbmap.bitmap); 118 119 return ret; 120 } 121 122 /* 123 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 124 */ 125 static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer, 126 hwaddr iova, ram_addr_t size, 127 IOMMUTLBEntry *iotlb) 128 { 129 const VFIOContainer *container = container_of(bcontainer, VFIOContainer, 130 bcontainer); 131 struct vfio_iommu_type1_dma_unmap unmap = { 132 .argsz = sizeof(unmap), 133 .flags = 0, 134 .iova = iova, 135 .size = size, 136 }; 137 bool need_dirty_sync = false; 138 int ret; 139 Error *local_err = NULL; 140 141 if (iotlb && vfio_container_dirty_tracking_is_started(bcontainer)) { 142 if (!vfio_container_devices_dirty_tracking_is_supported(bcontainer) && 143 bcontainer->dirty_pages_supported) { 144 return vfio_dma_unmap_bitmap(container, iova, size, iotlb); 145 } 146 147 need_dirty_sync = true; 148 } 149 150 while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) { 151 /* 152 * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c 153 * v4.15) where an overflow in its wrap-around check prevents us from 154 * unmapping the last page of the address space. Test for the error 155 * condition and re-try the unmap excluding the last page. The 156 * expectation is that we've never mapped the last page anyway and this 157 * unmap request comes via vIOMMU support which also makes it unlikely 158 * that this page is used. This bug was introduced well after type1 v2 159 * support was introduced, so we shouldn't need to test for v1. A fix 160 * is queued for kernel v5.0 so this workaround can be removed once 161 * affected kernels are sufficiently deprecated. 162 */ 163 if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) && 164 container->iommu_type == VFIO_TYPE1v2_IOMMU) { 165 trace_vfio_legacy_dma_unmap_overflow_workaround(); 166 unmap.size -= 1ULL << ctz64(bcontainer->pgsizes); 167 continue; 168 } 169 return -errno; 170 } 171 172 if (need_dirty_sync) { 173 ret = vfio_container_query_dirty_bitmap(bcontainer, iova, size, 174 iotlb->translated_addr, &local_err); 175 if (ret) { 176 error_report_err(local_err); 177 return ret; 178 } 179 } 180 181 return 0; 182 } 183 184 static int vfio_legacy_dma_map(const VFIOContainerBase *bcontainer, hwaddr iova, 185 ram_addr_t size, void *vaddr, bool readonly) 186 { 187 const VFIOContainer *container = container_of(bcontainer, VFIOContainer, 188 bcontainer); 189 struct vfio_iommu_type1_dma_map map = { 190 .argsz = sizeof(map), 191 .flags = VFIO_DMA_MAP_FLAG_READ, 192 .vaddr = (__u64)(uintptr_t)vaddr, 193 .iova = iova, 194 .size = size, 195 }; 196 197 if (!readonly) { 198 map.flags |= VFIO_DMA_MAP_FLAG_WRITE; 199 } 200 201 /* 202 * Try the mapping, if it fails with EBUSY, unmap the region and try 203 * again. This shouldn't be necessary, but we sometimes see it in 204 * the VGA ROM space. 205 */ 206 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || 207 (errno == EBUSY && 208 vfio_legacy_dma_unmap(bcontainer, iova, size, NULL) == 0 && 209 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) { 210 return 0; 211 } 212 213 return -errno; 214 } 215 216 static int 217 vfio_legacy_set_dirty_page_tracking(const VFIOContainerBase *bcontainer, 218 bool start, Error **errp) 219 { 220 const VFIOContainer *container = container_of(bcontainer, VFIOContainer, 221 bcontainer); 222 int ret; 223 struct vfio_iommu_type1_dirty_bitmap dirty = { 224 .argsz = sizeof(dirty), 225 }; 226 227 if (start) { 228 dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_START; 229 } else { 230 dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP; 231 } 232 233 ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, &dirty); 234 if (ret) { 235 ret = -errno; 236 error_setg_errno(errp, errno, "Failed to set dirty tracking flag 0x%x", 237 dirty.flags); 238 } 239 240 return ret; 241 } 242 243 static int vfio_legacy_query_dirty_bitmap(const VFIOContainerBase *bcontainer, 244 VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp) 245 { 246 const VFIOContainer *container = container_of(bcontainer, VFIOContainer, 247 bcontainer); 248 struct vfio_iommu_type1_dirty_bitmap *dbitmap; 249 struct vfio_iommu_type1_dirty_bitmap_get *range; 250 int ret; 251 252 dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range)); 253 254 dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range); 255 dbitmap->flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP; 256 range = (struct vfio_iommu_type1_dirty_bitmap_get *)&dbitmap->data; 257 range->iova = iova; 258 range->size = size; 259 260 /* 261 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of 262 * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize 263 * to qemu_real_host_page_size. 264 */ 265 range->bitmap.pgsize = qemu_real_host_page_size(); 266 range->bitmap.size = vbmap->size; 267 range->bitmap.data = (__u64 *)vbmap->bitmap; 268 269 ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap); 270 if (ret) { 271 ret = -errno; 272 error_setg_errno(errp, errno, 273 "Failed to get dirty bitmap for iova: 0x%"PRIx64 274 " size: 0x%"PRIx64, (uint64_t)range->iova, 275 (uint64_t)range->size); 276 } 277 278 g_free(dbitmap); 279 280 return ret; 281 } 282 283 static bool vfio_get_info_iova_range(struct vfio_iommu_type1_info *info, 284 VFIOContainerBase *bcontainer) 285 { 286 struct vfio_info_cap_header *hdr; 287 struct vfio_iommu_type1_info_cap_iova_range *cap; 288 289 hdr = vfio_get_iommu_type1_info_cap(info, 290 VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE); 291 if (!hdr) { 292 return false; 293 } 294 295 cap = (void *)hdr; 296 297 for (int i = 0; i < cap->nr_iovas; i++) { 298 Range *range = g_new(Range, 1); 299 300 range_set_bounds(range, cap->iova_ranges[i].start, 301 cap->iova_ranges[i].end); 302 bcontainer->iova_ranges = 303 range_list_insert(bcontainer->iova_ranges, range); 304 } 305 306 return true; 307 } 308 309 static void vfio_group_add_kvm_device(VFIOGroup *group) 310 { 311 Error *err = NULL; 312 313 if (vfio_kvm_device_add_fd(group->fd, &err)) { 314 error_reportf_err(err, "group ID %d: ", group->groupid); 315 } 316 } 317 318 static void vfio_group_del_kvm_device(VFIOGroup *group) 319 { 320 Error *err = NULL; 321 322 if (vfio_kvm_device_del_fd(group->fd, &err)) { 323 error_reportf_err(err, "group ID %d: ", group->groupid); 324 } 325 } 326 327 /* 328 * vfio_get_iommu_type - selects the richest iommu_type (v2 first) 329 */ 330 static int vfio_get_iommu_type(int container_fd, 331 Error **errp) 332 { 333 int iommu_types[] = { VFIO_TYPE1v2_IOMMU, VFIO_TYPE1_IOMMU, 334 VFIO_SPAPR_TCE_v2_IOMMU, VFIO_SPAPR_TCE_IOMMU }; 335 int i; 336 337 for (i = 0; i < ARRAY_SIZE(iommu_types); i++) { 338 if (ioctl(container_fd, VFIO_CHECK_EXTENSION, iommu_types[i])) { 339 return iommu_types[i]; 340 } 341 } 342 error_setg(errp, "No available IOMMU models"); 343 return -EINVAL; 344 } 345 346 /* 347 * vfio_get_iommu_ops - get a VFIOIOMMUClass associated with a type 348 */ 349 static const char *vfio_get_iommu_class_name(int iommu_type) 350 { 351 switch (iommu_type) { 352 case VFIO_TYPE1v2_IOMMU: 353 case VFIO_TYPE1_IOMMU: 354 return TYPE_VFIO_IOMMU_LEGACY; 355 break; 356 case VFIO_SPAPR_TCE_v2_IOMMU: 357 case VFIO_SPAPR_TCE_IOMMU: 358 return TYPE_VFIO_IOMMU_SPAPR; 359 break; 360 default: 361 g_assert_not_reached(); 362 }; 363 } 364 365 static bool vfio_set_iommu(int container_fd, int group_fd, 366 int *iommu_type, Error **errp) 367 { 368 if (ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container_fd)) { 369 error_setg_errno(errp, errno, "Failed to set group container"); 370 return false; 371 } 372 373 while (ioctl(container_fd, VFIO_SET_IOMMU, *iommu_type)) { 374 if (*iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { 375 /* 376 * On sPAPR, despite the IOMMU subdriver always advertises v1 and 377 * v2, the running platform may not support v2 and there is no 378 * way to guess it until an IOMMU group gets added to the container. 379 * So in case it fails with v2, try v1 as a fallback. 380 */ 381 *iommu_type = VFIO_SPAPR_TCE_IOMMU; 382 continue; 383 } 384 error_setg_errno(errp, errno, "Failed to set iommu for container"); 385 return false; 386 } 387 388 return true; 389 } 390 391 static VFIOContainer *vfio_create_container(int fd, VFIOGroup *group, 392 Error **errp) 393 { 394 int iommu_type; 395 const char *vioc_name; 396 VFIOContainer *container; 397 398 iommu_type = vfio_get_iommu_type(fd, errp); 399 if (iommu_type < 0) { 400 return NULL; 401 } 402 403 if (!vfio_set_iommu(fd, group->fd, &iommu_type, errp)) { 404 return NULL; 405 } 406 407 vioc_name = vfio_get_iommu_class_name(iommu_type); 408 409 container = VFIO_IOMMU_LEGACY(object_new(vioc_name)); 410 container->fd = fd; 411 container->iommu_type = iommu_type; 412 return container; 413 } 414 415 static int vfio_get_iommu_info(VFIOContainer *container, 416 struct vfio_iommu_type1_info **info) 417 { 418 419 size_t argsz = sizeof(struct vfio_iommu_type1_info); 420 421 *info = g_new0(struct vfio_iommu_type1_info, 1); 422 again: 423 (*info)->argsz = argsz; 424 425 if (ioctl(container->fd, VFIO_IOMMU_GET_INFO, *info)) { 426 g_free(*info); 427 *info = NULL; 428 return -errno; 429 } 430 431 if (((*info)->argsz > argsz)) { 432 argsz = (*info)->argsz; 433 *info = g_realloc(*info, argsz); 434 goto again; 435 } 436 437 return 0; 438 } 439 440 static struct vfio_info_cap_header * 441 vfio_get_iommu_info_cap(struct vfio_iommu_type1_info *info, uint16_t id) 442 { 443 struct vfio_info_cap_header *hdr; 444 void *ptr = info; 445 446 if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) { 447 return NULL; 448 } 449 450 for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) { 451 if (hdr->id == id) { 452 return hdr; 453 } 454 } 455 456 return NULL; 457 } 458 459 static void vfio_get_iommu_info_migration(VFIOContainer *container, 460 struct vfio_iommu_type1_info *info) 461 { 462 struct vfio_info_cap_header *hdr; 463 struct vfio_iommu_type1_info_cap_migration *cap_mig; 464 VFIOContainerBase *bcontainer = &container->bcontainer; 465 466 hdr = vfio_get_iommu_info_cap(info, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION); 467 if (!hdr) { 468 return; 469 } 470 471 cap_mig = container_of(hdr, struct vfio_iommu_type1_info_cap_migration, 472 header); 473 474 /* 475 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of 476 * qemu_real_host_page_size to mark those dirty. 477 */ 478 if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) { 479 bcontainer->dirty_pages_supported = true; 480 bcontainer->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size; 481 bcontainer->dirty_pgsizes = cap_mig->pgsize_bitmap; 482 } 483 } 484 485 static bool vfio_legacy_setup(VFIOContainerBase *bcontainer, Error **errp) 486 { 487 VFIOContainer *container = container_of(bcontainer, VFIOContainer, 488 bcontainer); 489 g_autofree struct vfio_iommu_type1_info *info = NULL; 490 int ret; 491 492 ret = vfio_get_iommu_info(container, &info); 493 if (ret) { 494 error_setg_errno(errp, -ret, "Failed to get VFIO IOMMU info"); 495 return false; 496 } 497 498 if (info->flags & VFIO_IOMMU_INFO_PGSIZES) { 499 bcontainer->pgsizes = info->iova_pgsizes; 500 } else { 501 bcontainer->pgsizes = qemu_real_host_page_size(); 502 } 503 504 if (!vfio_get_info_dma_avail(info, &bcontainer->dma_max_mappings)) { 505 bcontainer->dma_max_mappings = 65535; 506 } 507 508 vfio_get_info_iova_range(info, bcontainer); 509 510 vfio_get_iommu_info_migration(container, info); 511 return true; 512 } 513 514 static bool vfio_container_attach_discard_disable(VFIOContainer *container, 515 VFIOGroup *group, Error **errp) 516 { 517 int ret; 518 519 /* 520 * VFIO is currently incompatible with discarding of RAM insofar as the 521 * madvise to purge (zap) the page from QEMU's address space does not 522 * interact with the memory API and therefore leaves stale virtual to 523 * physical mappings in the IOMMU if the page was previously pinned. We 524 * therefore set discarding broken for each group added to a container, 525 * whether the container is used individually or shared. This provides 526 * us with options to allow devices within a group to opt-in and allow 527 * discarding, so long as it is done consistently for a group (for instance 528 * if the device is an mdev device where it is known that the host vendor 529 * driver will never pin pages outside of the working set of the guest 530 * driver, which would thus not be discarding candidates). 531 * 532 * The first opportunity to induce pinning occurs here where we attempt to 533 * attach the group to existing containers within the AddressSpace. If any 534 * pages are already zapped from the virtual address space, such as from 535 * previous discards, new pinning will cause valid mappings to be 536 * re-established. Likewise, when the overall MemoryListener for a new 537 * container is registered, a replay of mappings within the AddressSpace 538 * will occur, re-establishing any previously zapped pages as well. 539 * 540 * Especially virtio-balloon is currently only prevented from discarding 541 * new memory, it will not yet set ram_block_discard_set_required() and 542 * therefore, neither stops us here or deals with the sudden memory 543 * consumption of inflated memory. 544 * 545 * We do support discarding of memory coordinated via the RamDiscardManager 546 * with some IOMMU types. vfio_ram_block_discard_disable() handles the 547 * details once we know which type of IOMMU we are using. 548 */ 549 550 ret = vfio_ram_block_discard_disable(container, true); 551 if (ret) { 552 error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken"); 553 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { 554 error_report("vfio: error disconnecting group %d from" 555 " container", group->groupid); 556 } 557 } 558 return !ret; 559 } 560 561 static bool vfio_container_group_add(VFIOContainer *container, VFIOGroup *group, 562 Error **errp) 563 { 564 if (!vfio_container_attach_discard_disable(container, group, errp)) { 565 return false; 566 } 567 group->container = container; 568 QLIST_INSERT_HEAD(&container->group_list, group, container_next); 569 vfio_group_add_kvm_device(group); 570 return true; 571 } 572 573 static void vfio_container_group_del(VFIOContainer *container, VFIOGroup *group) 574 { 575 QLIST_REMOVE(group, container_next); 576 group->container = NULL; 577 vfio_group_del_kvm_device(group); 578 vfio_ram_block_discard_disable(container, false); 579 } 580 581 static bool vfio_container_connect(VFIOGroup *group, AddressSpace *as, 582 Error **errp) 583 { 584 VFIOContainer *container; 585 VFIOContainerBase *bcontainer; 586 int ret, fd = -1; 587 VFIOAddressSpace *space; 588 VFIOIOMMUClass *vioc = NULL; 589 bool new_container = false; 590 bool group_was_added = false; 591 592 space = vfio_address_space_get(as); 593 594 QLIST_FOREACH(bcontainer, &space->containers, next) { 595 container = container_of(bcontainer, VFIOContainer, bcontainer); 596 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { 597 return vfio_container_group_add(container, group, errp); 598 } 599 } 600 601 fd = qemu_open("/dev/vfio/vfio", O_RDWR, errp); 602 if (fd < 0) { 603 goto fail; 604 } 605 606 ret = ioctl(fd, VFIO_GET_API_VERSION); 607 if (ret != VFIO_API_VERSION) { 608 error_setg(errp, "supported vfio version: %d, " 609 "reported version: %d", VFIO_API_VERSION, ret); 610 goto fail; 611 } 612 613 container = vfio_create_container(fd, group, errp); 614 if (!container) { 615 goto fail; 616 } 617 new_container = true; 618 bcontainer = &container->bcontainer; 619 620 if (!vfio_cpr_register_container(bcontainer, errp)) { 621 goto fail; 622 } 623 624 vioc = VFIO_IOMMU_GET_CLASS(bcontainer); 625 assert(vioc->setup); 626 627 if (!vioc->setup(bcontainer, errp)) { 628 goto fail; 629 } 630 631 vfio_address_space_insert(space, bcontainer); 632 633 if (!vfio_container_group_add(container, group, errp)) { 634 goto fail; 635 } 636 group_was_added = true; 637 638 if (!vfio_listener_register(bcontainer, errp)) { 639 goto fail; 640 } 641 642 bcontainer->initialized = true; 643 644 return true; 645 646 fail: 647 vfio_listener_unregister(bcontainer); 648 649 if (group_was_added) { 650 vfio_container_group_del(container, group); 651 } 652 if (vioc && vioc->release) { 653 vioc->release(bcontainer); 654 } 655 if (new_container) { 656 vfio_cpr_unregister_container(bcontainer); 657 object_unref(container); 658 } 659 if (fd >= 0) { 660 close(fd); 661 } 662 vfio_address_space_put(space); 663 664 return false; 665 } 666 667 static void vfio_container_disconnect(VFIOGroup *group) 668 { 669 VFIOContainer *container = group->container; 670 VFIOContainerBase *bcontainer = &container->bcontainer; 671 VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); 672 673 QLIST_REMOVE(group, container_next); 674 group->container = NULL; 675 676 /* 677 * Explicitly release the listener first before unset container, 678 * since unset may destroy the backend container if it's the last 679 * group. 680 */ 681 if (QLIST_EMPTY(&container->group_list)) { 682 vfio_listener_unregister(bcontainer); 683 if (vioc->release) { 684 vioc->release(bcontainer); 685 } 686 } 687 688 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { 689 error_report("vfio: error disconnecting group %d from container", 690 group->groupid); 691 } 692 693 if (QLIST_EMPTY(&container->group_list)) { 694 VFIOAddressSpace *space = bcontainer->space; 695 696 trace_vfio_container_disconnect(container->fd); 697 vfio_cpr_unregister_container(bcontainer); 698 close(container->fd); 699 object_unref(container); 700 701 vfio_address_space_put(space); 702 } 703 } 704 705 static VFIOGroup *vfio_group_get(int groupid, AddressSpace *as, Error **errp) 706 { 707 ERRP_GUARD(); 708 VFIOGroup *group; 709 char path[32]; 710 struct vfio_group_status status = { .argsz = sizeof(status) }; 711 712 QLIST_FOREACH(group, &vfio_group_list, next) { 713 if (group->groupid == groupid) { 714 /* Found it. Now is it already in the right context? */ 715 if (group->container->bcontainer.space->as == as) { 716 return group; 717 } else { 718 error_setg(errp, "group %d used in multiple address spaces", 719 group->groupid); 720 return NULL; 721 } 722 } 723 } 724 725 group = g_malloc0(sizeof(*group)); 726 727 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid); 728 group->fd = qemu_open(path, O_RDWR, errp); 729 if (group->fd < 0) { 730 goto free_group_exit; 731 } 732 733 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) { 734 error_setg_errno(errp, errno, "failed to get group %d status", groupid); 735 goto close_fd_exit; 736 } 737 738 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) { 739 error_setg(errp, "group %d is not viable", groupid); 740 error_append_hint(errp, 741 "Please ensure all devices within the iommu_group " 742 "are bound to their vfio bus driver.\n"); 743 goto close_fd_exit; 744 } 745 746 group->groupid = groupid; 747 QLIST_INIT(&group->device_list); 748 749 if (!vfio_container_connect(group, as, errp)) { 750 error_prepend(errp, "failed to setup container for group %d: ", 751 groupid); 752 goto close_fd_exit; 753 } 754 755 QLIST_INSERT_HEAD(&vfio_group_list, group, next); 756 757 return group; 758 759 close_fd_exit: 760 close(group->fd); 761 762 free_group_exit: 763 g_free(group); 764 765 return NULL; 766 } 767 768 static void vfio_group_put(VFIOGroup *group) 769 { 770 if (!group || !QLIST_EMPTY(&group->device_list)) { 771 return; 772 } 773 774 if (!group->ram_block_discard_allowed) { 775 vfio_ram_block_discard_disable(group->container, false); 776 } 777 vfio_group_del_kvm_device(group); 778 vfio_container_disconnect(group); 779 QLIST_REMOVE(group, next); 780 trace_vfio_group_put(group->fd); 781 close(group->fd); 782 g_free(group); 783 } 784 785 static bool vfio_device_get(VFIOGroup *group, const char *name, 786 VFIODevice *vbasedev, Error **errp) 787 { 788 g_autofree struct vfio_device_info *info = NULL; 789 int fd; 790 791 fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name); 792 if (fd < 0) { 793 error_setg_errno(errp, errno, "error getting device from group %d", 794 group->groupid); 795 error_append_hint(errp, 796 "Verify all devices in group %d are bound to vfio-<bus> " 797 "or pci-stub and not already in use\n", group->groupid); 798 return false; 799 } 800 801 info = vfio_get_device_info(fd); 802 if (!info) { 803 error_setg_errno(errp, errno, "error getting device info"); 804 close(fd); 805 return false; 806 } 807 808 /* 809 * Set discarding of RAM as not broken for this group if the driver knows 810 * the device operates compatibly with discarding. Setting must be 811 * consistent per group, but since compatibility is really only possible 812 * with mdev currently, we expect singleton groups. 813 */ 814 if (vbasedev->ram_block_discard_allowed != 815 group->ram_block_discard_allowed) { 816 if (!QLIST_EMPTY(&group->device_list)) { 817 error_setg(errp, "Inconsistent setting of support for discarding " 818 "RAM (e.g., balloon) within group"); 819 close(fd); 820 return false; 821 } 822 823 if (!group->ram_block_discard_allowed) { 824 group->ram_block_discard_allowed = true; 825 vfio_ram_block_discard_disable(group->container, false); 826 } 827 } 828 829 vbasedev->fd = fd; 830 vbasedev->group = group; 831 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next); 832 833 vbasedev->num_irqs = info->num_irqs; 834 vbasedev->num_regions = info->num_regions; 835 vbasedev->flags = info->flags; 836 837 trace_vfio_device_get(name, info->flags, info->num_regions, info->num_irqs); 838 839 vbasedev->reset_works = !!(info->flags & VFIO_DEVICE_FLAGS_RESET); 840 841 return true; 842 } 843 844 static void vfio_device_put(VFIODevice *vbasedev) 845 { 846 if (!vbasedev->group) { 847 return; 848 } 849 QLIST_REMOVE(vbasedev, next); 850 vbasedev->group = NULL; 851 trace_vfio_device_put(vbasedev->fd); 852 close(vbasedev->fd); 853 } 854 855 static int vfio_device_get_groupid(VFIODevice *vbasedev, Error **errp) 856 { 857 char *tmp, group_path[PATH_MAX]; 858 g_autofree char *group_name = NULL; 859 int ret, groupid; 860 ssize_t len; 861 862 tmp = g_strdup_printf("%s/iommu_group", vbasedev->sysfsdev); 863 len = readlink(tmp, group_path, sizeof(group_path)); 864 g_free(tmp); 865 866 if (len <= 0 || len >= sizeof(group_path)) { 867 ret = len < 0 ? -errno : -ENAMETOOLONG; 868 error_setg_errno(errp, -ret, "no iommu_group found"); 869 return ret; 870 } 871 872 group_path[len] = 0; 873 874 group_name = g_path_get_basename(group_path); 875 if (sscanf(group_name, "%d", &groupid) != 1) { 876 error_setg_errno(errp, errno, "failed to read %s", group_path); 877 return -errno; 878 } 879 return groupid; 880 } 881 882 /* 883 * vfio_device_attach: attach a device to a security context 884 * @name and @vbasedev->name are likely to be different depending 885 * on the type of the device, hence the need for passing @name 886 */ 887 static bool vfio_legacy_attach_device(const char *name, VFIODevice *vbasedev, 888 AddressSpace *as, Error **errp) 889 { 890 int groupid = vfio_device_get_groupid(vbasedev, errp); 891 VFIODevice *vbasedev_iter; 892 VFIOGroup *group; 893 VFIOContainerBase *bcontainer; 894 895 if (groupid < 0) { 896 return false; 897 } 898 899 trace_vfio_device_attach(vbasedev->name, groupid); 900 901 group = vfio_group_get(groupid, as, errp); 902 if (!group) { 903 return false; 904 } 905 906 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { 907 if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) { 908 error_setg(errp, "device is already attached"); 909 goto group_put_exit; 910 } 911 } 912 if (!vfio_device_get(group, name, vbasedev, errp)) { 913 goto group_put_exit; 914 } 915 916 if (!vfio_device_hiod_create_and_realize(vbasedev, 917 TYPE_HOST_IOMMU_DEVICE_LEGACY_VFIO, 918 errp)) { 919 goto device_put_exit; 920 } 921 922 bcontainer = &group->container->bcontainer; 923 vbasedev->bcontainer = bcontainer; 924 QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next); 925 QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next); 926 927 return true; 928 929 device_put_exit: 930 vfio_device_put(vbasedev); 931 group_put_exit: 932 vfio_group_put(group); 933 return false; 934 } 935 936 static void vfio_legacy_detach_device(VFIODevice *vbasedev) 937 { 938 VFIOGroup *group = vbasedev->group; 939 940 QLIST_REMOVE(vbasedev, global_next); 941 QLIST_REMOVE(vbasedev, container_next); 942 vbasedev->bcontainer = NULL; 943 trace_vfio_device_detach(vbasedev->name, group->groupid); 944 object_unref(vbasedev->hiod); 945 vfio_device_put(vbasedev); 946 vfio_group_put(group); 947 } 948 949 static int vfio_legacy_pci_hot_reset(VFIODevice *vbasedev, bool single) 950 { 951 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); 952 VFIOGroup *group; 953 struct vfio_pci_hot_reset_info *info = NULL; 954 struct vfio_pci_dependent_device *devices; 955 struct vfio_pci_hot_reset *reset; 956 int32_t *fds; 957 int ret, i, count; 958 bool multi = false; 959 960 trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi"); 961 962 if (!single) { 963 vfio_pci_pre_reset(vdev); 964 } 965 vdev->vbasedev.needs_reset = false; 966 967 ret = vfio_pci_get_pci_hot_reset_info(vdev, &info); 968 969 if (ret) { 970 goto out_single; 971 } 972 devices = &info->devices[0]; 973 974 trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name); 975 976 /* Verify that we have all the groups required */ 977 for (i = 0; i < info->count; i++) { 978 PCIHostDeviceAddress host; 979 VFIOPCIDevice *tmp; 980 VFIODevice *vbasedev_iter; 981 982 host.domain = devices[i].segment; 983 host.bus = devices[i].bus; 984 host.slot = PCI_SLOT(devices[i].devfn); 985 host.function = PCI_FUNC(devices[i].devfn); 986 987 trace_vfio_pci_hot_reset_dep_devices(host.domain, 988 host.bus, host.slot, host.function, devices[i].group_id); 989 990 if (vfio_pci_host_match(&host, vdev->vbasedev.name)) { 991 continue; 992 } 993 994 QLIST_FOREACH(group, &vfio_group_list, next) { 995 if (group->groupid == devices[i].group_id) { 996 break; 997 } 998 } 999 1000 if (!group) { 1001 if (!vdev->has_pm_reset) { 1002 error_report("vfio: Cannot reset device %s, " 1003 "depends on group %d which is not owned.", 1004 vdev->vbasedev.name, devices[i].group_id); 1005 } 1006 ret = -EPERM; 1007 goto out; 1008 } 1009 1010 /* Prep dependent devices for reset and clear our marker. */ 1011 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { 1012 if (!vbasedev_iter->dev->realized || 1013 vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { 1014 continue; 1015 } 1016 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); 1017 if (vfio_pci_host_match(&host, tmp->vbasedev.name)) { 1018 if (single) { 1019 ret = -EINVAL; 1020 goto out_single; 1021 } 1022 vfio_pci_pre_reset(tmp); 1023 tmp->vbasedev.needs_reset = false; 1024 multi = true; 1025 break; 1026 } 1027 } 1028 } 1029 1030 if (!single && !multi) { 1031 ret = -EINVAL; 1032 goto out_single; 1033 } 1034 1035 /* Determine how many group fds need to be passed */ 1036 count = 0; 1037 QLIST_FOREACH(group, &vfio_group_list, next) { 1038 for (i = 0; i < info->count; i++) { 1039 if (group->groupid == devices[i].group_id) { 1040 count++; 1041 break; 1042 } 1043 } 1044 } 1045 1046 reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds))); 1047 reset->argsz = sizeof(*reset) + (count * sizeof(*fds)); 1048 fds = &reset->group_fds[0]; 1049 1050 /* Fill in group fds */ 1051 QLIST_FOREACH(group, &vfio_group_list, next) { 1052 for (i = 0; i < info->count; i++) { 1053 if (group->groupid == devices[i].group_id) { 1054 fds[reset->count++] = group->fd; 1055 break; 1056 } 1057 } 1058 } 1059 1060 /* Bus reset! */ 1061 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset); 1062 g_free(reset); 1063 if (ret) { 1064 ret = -errno; 1065 } 1066 1067 trace_vfio_pci_hot_reset_result(vdev->vbasedev.name, 1068 ret ? strerror(errno) : "Success"); 1069 1070 out: 1071 /* Re-enable INTx on affected devices */ 1072 for (i = 0; i < info->count; i++) { 1073 PCIHostDeviceAddress host; 1074 VFIOPCIDevice *tmp; 1075 VFIODevice *vbasedev_iter; 1076 1077 host.domain = devices[i].segment; 1078 host.bus = devices[i].bus; 1079 host.slot = PCI_SLOT(devices[i].devfn); 1080 host.function = PCI_FUNC(devices[i].devfn); 1081 1082 if (vfio_pci_host_match(&host, vdev->vbasedev.name)) { 1083 continue; 1084 } 1085 1086 QLIST_FOREACH(group, &vfio_group_list, next) { 1087 if (group->groupid == devices[i].group_id) { 1088 break; 1089 } 1090 } 1091 1092 if (!group) { 1093 break; 1094 } 1095 1096 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { 1097 if (!vbasedev_iter->dev->realized || 1098 vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { 1099 continue; 1100 } 1101 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); 1102 if (vfio_pci_host_match(&host, tmp->vbasedev.name)) { 1103 vfio_pci_post_reset(tmp); 1104 break; 1105 } 1106 } 1107 } 1108 out_single: 1109 if (!single) { 1110 vfio_pci_post_reset(vdev); 1111 } 1112 g_free(info); 1113 1114 return ret; 1115 } 1116 1117 static void vfio_iommu_legacy_class_init(ObjectClass *klass, const void *data) 1118 { 1119 VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass); 1120 1121 vioc->setup = vfio_legacy_setup; 1122 vioc->dma_map = vfio_legacy_dma_map; 1123 vioc->dma_unmap = vfio_legacy_dma_unmap; 1124 vioc->attach_device = vfio_legacy_attach_device; 1125 vioc->detach_device = vfio_legacy_detach_device; 1126 vioc->set_dirty_page_tracking = vfio_legacy_set_dirty_page_tracking; 1127 vioc->query_dirty_bitmap = vfio_legacy_query_dirty_bitmap; 1128 vioc->pci_hot_reset = vfio_legacy_pci_hot_reset; 1129 }; 1130 1131 static bool hiod_legacy_vfio_realize(HostIOMMUDevice *hiod, void *opaque, 1132 Error **errp) 1133 { 1134 VFIODevice *vdev = opaque; 1135 1136 hiod->name = g_strdup(vdev->name); 1137 hiod->agent = opaque; 1138 1139 return true; 1140 } 1141 1142 static int hiod_legacy_vfio_get_cap(HostIOMMUDevice *hiod, int cap, 1143 Error **errp) 1144 { 1145 switch (cap) { 1146 case HOST_IOMMU_DEVICE_CAP_AW_BITS: 1147 return vfio_device_get_aw_bits(hiod->agent); 1148 default: 1149 error_setg(errp, "%s: unsupported capability %x", hiod->name, cap); 1150 return -EINVAL; 1151 } 1152 } 1153 1154 static GList * 1155 hiod_legacy_vfio_get_iova_ranges(HostIOMMUDevice *hiod) 1156 { 1157 VFIODevice *vdev = hiod->agent; 1158 1159 g_assert(vdev); 1160 return vfio_container_get_iova_ranges(vdev->bcontainer); 1161 } 1162 1163 static uint64_t 1164 hiod_legacy_vfio_get_page_size_mask(HostIOMMUDevice *hiod) 1165 { 1166 VFIODevice *vdev = hiod->agent; 1167 1168 g_assert(vdev); 1169 return vfio_container_get_page_size_mask(vdev->bcontainer); 1170 } 1171 1172 static void vfio_iommu_legacy_instance_init(Object *obj) 1173 { 1174 VFIOContainer *container = VFIO_IOMMU_LEGACY(obj); 1175 1176 QLIST_INIT(&container->group_list); 1177 } 1178 1179 static void hiod_legacy_vfio_class_init(ObjectClass *oc, const void *data) 1180 { 1181 HostIOMMUDeviceClass *hioc = HOST_IOMMU_DEVICE_CLASS(oc); 1182 1183 hioc->realize = hiod_legacy_vfio_realize; 1184 hioc->get_cap = hiod_legacy_vfio_get_cap; 1185 hioc->get_iova_ranges = hiod_legacy_vfio_get_iova_ranges; 1186 hioc->get_page_size_mask = hiod_legacy_vfio_get_page_size_mask; 1187 }; 1188 1189 static const TypeInfo types[] = { 1190 { 1191 .name = TYPE_VFIO_IOMMU_LEGACY, 1192 .parent = TYPE_VFIO_IOMMU, 1193 .instance_init = vfio_iommu_legacy_instance_init, 1194 .instance_size = sizeof(VFIOContainer), 1195 .class_init = vfio_iommu_legacy_class_init, 1196 }, { 1197 .name = TYPE_HOST_IOMMU_DEVICE_LEGACY_VFIO, 1198 .parent = TYPE_HOST_IOMMU_DEVICE, 1199 .class_init = hiod_legacy_vfio_class_init, 1200 } 1201 }; 1202 1203 DEFINE_TYPES(types) 1204