1 /* 2 * generic functions used by VFIO devices 3 * 4 * Copyright Red Hat, Inc. 2012 5 * 6 * Authors: 7 * Alex Williamson <alex.williamson@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Based on qemu-kvm device-assignment: 13 * Adapted for KVM by Qumranet. 14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) 15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) 16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) 17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) 18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) 19 */ 20 21 #include "qemu/osdep.h" 22 #include <sys/ioctl.h> 23 #include <linux/vfio.h> 24 25 #include "hw/vfio/vfio-common.h" 26 #include "exec/address-spaces.h" 27 #include "exec/memory.h" 28 #include "exec/ram_addr.h" 29 #include "hw/hw.h" 30 #include "qemu/error-report.h" 31 #include "qemu/range.h" 32 #include "sysemu/reset.h" 33 #include "trace.h" 34 #include "qapi/error.h" 35 #include "pci.h" 36 37 VFIOGroupList vfio_group_list = 38 QLIST_HEAD_INITIALIZER(vfio_group_list); 39 40 static int vfio_ram_block_discard_disable(VFIOContainer *container, bool state) 41 { 42 switch (container->iommu_type) { 43 case VFIO_TYPE1v2_IOMMU: 44 case VFIO_TYPE1_IOMMU: 45 /* 46 * We support coordinated discarding of RAM via the RamDiscardManager. 47 */ 48 return ram_block_uncoordinated_discard_disable(state); 49 default: 50 /* 51 * VFIO_SPAPR_TCE_IOMMU most probably works just fine with 52 * RamDiscardManager, however, it is completely untested. 53 * 54 * VFIO_SPAPR_TCE_v2_IOMMU with "DMA memory preregistering" does 55 * completely the opposite of managing mapping/pinning dynamically as 56 * required by RamDiscardManager. We would have to special-case sections 57 * with a RamDiscardManager. 58 */ 59 return ram_block_discard_disable(state); 60 } 61 } 62 63 static int vfio_dma_unmap_bitmap(const VFIOContainer *container, 64 hwaddr iova, ram_addr_t size, 65 IOMMUTLBEntry *iotlb) 66 { 67 const VFIOContainerBase *bcontainer = &container->bcontainer; 68 struct vfio_iommu_type1_dma_unmap *unmap; 69 struct vfio_bitmap *bitmap; 70 VFIOBitmap vbmap; 71 int ret; 72 73 ret = vfio_bitmap_alloc(&vbmap, size); 74 if (ret) { 75 return ret; 76 } 77 78 unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap)); 79 80 unmap->argsz = sizeof(*unmap) + sizeof(*bitmap); 81 unmap->iova = iova; 82 unmap->size = size; 83 unmap->flags |= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP; 84 bitmap = (struct vfio_bitmap *)&unmap->data; 85 86 /* 87 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of 88 * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize 89 * to qemu_real_host_page_size. 90 */ 91 bitmap->pgsize = qemu_real_host_page_size(); 92 bitmap->size = vbmap.size; 93 bitmap->data = (__u64 *)vbmap.bitmap; 94 95 if (vbmap.size > bcontainer->max_dirty_bitmap_size) { 96 error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, vbmap.size); 97 ret = -E2BIG; 98 goto unmap_exit; 99 } 100 101 ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap); 102 if (!ret) { 103 cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, 104 iotlb->translated_addr, vbmap.pages); 105 } else { 106 error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m"); 107 } 108 109 unmap_exit: 110 g_free(unmap); 111 g_free(vbmap.bitmap); 112 113 return ret; 114 } 115 116 /* 117 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 118 */ 119 static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer, 120 hwaddr iova, ram_addr_t size, 121 IOMMUTLBEntry *iotlb) 122 { 123 const VFIOContainer *container = container_of(bcontainer, VFIOContainer, 124 bcontainer); 125 struct vfio_iommu_type1_dma_unmap unmap = { 126 .argsz = sizeof(unmap), 127 .flags = 0, 128 .iova = iova, 129 .size = size, 130 }; 131 bool need_dirty_sync = false; 132 int ret; 133 Error *local_err = NULL; 134 135 if (iotlb && vfio_devices_all_running_and_mig_active(bcontainer)) { 136 if (!vfio_devices_all_device_dirty_tracking(bcontainer) && 137 bcontainer->dirty_pages_supported) { 138 return vfio_dma_unmap_bitmap(container, iova, size, iotlb); 139 } 140 141 need_dirty_sync = true; 142 } 143 144 while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) { 145 /* 146 * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c 147 * v4.15) where an overflow in its wrap-around check prevents us from 148 * unmapping the last page of the address space. Test for the error 149 * condition and re-try the unmap excluding the last page. The 150 * expectation is that we've never mapped the last page anyway and this 151 * unmap request comes via vIOMMU support which also makes it unlikely 152 * that this page is used. This bug was introduced well after type1 v2 153 * support was introduced, so we shouldn't need to test for v1. A fix 154 * is queued for kernel v5.0 so this workaround can be removed once 155 * affected kernels are sufficiently deprecated. 156 */ 157 if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) && 158 container->iommu_type == VFIO_TYPE1v2_IOMMU) { 159 trace_vfio_legacy_dma_unmap_overflow_workaround(); 160 unmap.size -= 1ULL << ctz64(bcontainer->pgsizes); 161 continue; 162 } 163 error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno)); 164 return -errno; 165 } 166 167 if (need_dirty_sync) { 168 ret = vfio_get_dirty_bitmap(bcontainer, iova, size, 169 iotlb->translated_addr, &local_err); 170 if (ret) { 171 error_report_err(local_err); 172 return ret; 173 } 174 } 175 176 return 0; 177 } 178 179 static int vfio_legacy_dma_map(const VFIOContainerBase *bcontainer, hwaddr iova, 180 ram_addr_t size, void *vaddr, bool readonly) 181 { 182 const VFIOContainer *container = container_of(bcontainer, VFIOContainer, 183 bcontainer); 184 struct vfio_iommu_type1_dma_map map = { 185 .argsz = sizeof(map), 186 .flags = VFIO_DMA_MAP_FLAG_READ, 187 .vaddr = (__u64)(uintptr_t)vaddr, 188 .iova = iova, 189 .size = size, 190 }; 191 192 if (!readonly) { 193 map.flags |= VFIO_DMA_MAP_FLAG_WRITE; 194 } 195 196 /* 197 * Try the mapping, if it fails with EBUSY, unmap the region and try 198 * again. This shouldn't be necessary, but we sometimes see it in 199 * the VGA ROM space. 200 */ 201 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || 202 (errno == EBUSY && 203 vfio_legacy_dma_unmap(bcontainer, iova, size, NULL) == 0 && 204 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) { 205 return 0; 206 } 207 208 error_report("VFIO_MAP_DMA failed: %s", strerror(errno)); 209 return -errno; 210 } 211 212 static int 213 vfio_legacy_set_dirty_page_tracking(const VFIOContainerBase *bcontainer, 214 bool start, Error **errp) 215 { 216 const VFIOContainer *container = container_of(bcontainer, VFIOContainer, 217 bcontainer); 218 int ret; 219 struct vfio_iommu_type1_dirty_bitmap dirty = { 220 .argsz = sizeof(dirty), 221 }; 222 223 if (start) { 224 dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_START; 225 } else { 226 dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP; 227 } 228 229 ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, &dirty); 230 if (ret) { 231 ret = -errno; 232 error_setg_errno(errp, errno, "Failed to set dirty tracking flag 0x%x", 233 dirty.flags); 234 } 235 236 return ret; 237 } 238 239 static int vfio_legacy_query_dirty_bitmap(const VFIOContainerBase *bcontainer, 240 VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp) 241 { 242 const VFIOContainer *container = container_of(bcontainer, VFIOContainer, 243 bcontainer); 244 struct vfio_iommu_type1_dirty_bitmap *dbitmap; 245 struct vfio_iommu_type1_dirty_bitmap_get *range; 246 int ret; 247 248 dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range)); 249 250 dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range); 251 dbitmap->flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP; 252 range = (struct vfio_iommu_type1_dirty_bitmap_get *)&dbitmap->data; 253 range->iova = iova; 254 range->size = size; 255 256 /* 257 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of 258 * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize 259 * to qemu_real_host_page_size. 260 */ 261 range->bitmap.pgsize = qemu_real_host_page_size(); 262 range->bitmap.size = vbmap->size; 263 range->bitmap.data = (__u64 *)vbmap->bitmap; 264 265 ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap); 266 if (ret) { 267 ret = -errno; 268 error_setg_errno(errp, errno, 269 "Failed to get dirty bitmap for iova: 0x%"PRIx64 270 " size: 0x%"PRIx64, (uint64_t)range->iova, 271 (uint64_t)range->size); 272 } 273 274 g_free(dbitmap); 275 276 return ret; 277 } 278 279 static struct vfio_info_cap_header * 280 vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info *info, uint16_t id) 281 { 282 if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) { 283 return NULL; 284 } 285 286 return vfio_get_cap((void *)info, info->cap_offset, id); 287 } 288 289 bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info, 290 unsigned int *avail) 291 { 292 struct vfio_info_cap_header *hdr; 293 struct vfio_iommu_type1_info_dma_avail *cap; 294 295 /* If the capability cannot be found, assume no DMA limiting */ 296 hdr = vfio_get_iommu_type1_info_cap(info, 297 VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL); 298 if (!hdr) { 299 return false; 300 } 301 302 if (avail != NULL) { 303 cap = (void *) hdr; 304 *avail = cap->avail; 305 } 306 307 return true; 308 } 309 310 static bool vfio_get_info_iova_range(struct vfio_iommu_type1_info *info, 311 VFIOContainerBase *bcontainer) 312 { 313 struct vfio_info_cap_header *hdr; 314 struct vfio_iommu_type1_info_cap_iova_range *cap; 315 316 hdr = vfio_get_iommu_type1_info_cap(info, 317 VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE); 318 if (!hdr) { 319 return false; 320 } 321 322 cap = (void *)hdr; 323 324 for (int i = 0; i < cap->nr_iovas; i++) { 325 Range *range = g_new(Range, 1); 326 327 range_set_bounds(range, cap->iova_ranges[i].start, 328 cap->iova_ranges[i].end); 329 bcontainer->iova_ranges = 330 range_list_insert(bcontainer->iova_ranges, range); 331 } 332 333 return true; 334 } 335 336 static void vfio_kvm_device_add_group(VFIOGroup *group) 337 { 338 Error *err = NULL; 339 340 if (vfio_kvm_device_add_fd(group->fd, &err)) { 341 error_reportf_err(err, "group ID %d: ", group->groupid); 342 } 343 } 344 345 static void vfio_kvm_device_del_group(VFIOGroup *group) 346 { 347 Error *err = NULL; 348 349 if (vfio_kvm_device_del_fd(group->fd, &err)) { 350 error_reportf_err(err, "group ID %d: ", group->groupid); 351 } 352 } 353 354 /* 355 * vfio_get_iommu_type - selects the richest iommu_type (v2 first) 356 */ 357 static int vfio_get_iommu_type(VFIOContainer *container, 358 Error **errp) 359 { 360 int iommu_types[] = { VFIO_TYPE1v2_IOMMU, VFIO_TYPE1_IOMMU, 361 VFIO_SPAPR_TCE_v2_IOMMU, VFIO_SPAPR_TCE_IOMMU }; 362 int i; 363 364 for (i = 0; i < ARRAY_SIZE(iommu_types); i++) { 365 if (ioctl(container->fd, VFIO_CHECK_EXTENSION, iommu_types[i])) { 366 return iommu_types[i]; 367 } 368 } 369 error_setg(errp, "No available IOMMU models"); 370 return -EINVAL; 371 } 372 373 /* 374 * vfio_get_iommu_ops - get a VFIOIOMMUClass associated with a type 375 */ 376 static const VFIOIOMMUClass *vfio_get_iommu_class(int iommu_type, Error **errp) 377 { 378 ObjectClass *klass = NULL; 379 380 switch (iommu_type) { 381 case VFIO_TYPE1v2_IOMMU: 382 case VFIO_TYPE1_IOMMU: 383 klass = object_class_by_name(TYPE_VFIO_IOMMU_LEGACY); 384 break; 385 case VFIO_SPAPR_TCE_v2_IOMMU: 386 case VFIO_SPAPR_TCE_IOMMU: 387 klass = object_class_by_name(TYPE_VFIO_IOMMU_SPAPR); 388 break; 389 default: 390 g_assert_not_reached(); 391 }; 392 393 return VFIO_IOMMU_CLASS(klass); 394 } 395 396 static int vfio_set_iommu(VFIOContainer *container, int group_fd, 397 VFIOAddressSpace *space, Error **errp) 398 { 399 int iommu_type, ret; 400 const VFIOIOMMUClass *vioc; 401 402 iommu_type = vfio_get_iommu_type(container, errp); 403 if (iommu_type < 0) { 404 return iommu_type; 405 } 406 407 ret = ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container->fd); 408 if (ret) { 409 error_setg_errno(errp, errno, "Failed to set group container"); 410 return -errno; 411 } 412 413 while (ioctl(container->fd, VFIO_SET_IOMMU, iommu_type)) { 414 if (iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { 415 /* 416 * On sPAPR, despite the IOMMU subdriver always advertises v1 and 417 * v2, the running platform may not support v2 and there is no 418 * way to guess it until an IOMMU group gets added to the container. 419 * So in case it fails with v2, try v1 as a fallback. 420 */ 421 iommu_type = VFIO_SPAPR_TCE_IOMMU; 422 continue; 423 } 424 error_setg_errno(errp, errno, "Failed to set iommu for container"); 425 return -errno; 426 } 427 428 container->iommu_type = iommu_type; 429 430 vioc = vfio_get_iommu_class(iommu_type, errp); 431 if (!vioc) { 432 error_setg(errp, "No available IOMMU models"); 433 return -EINVAL; 434 } 435 436 vfio_container_init(&container->bcontainer, space, vioc); 437 return 0; 438 } 439 440 static int vfio_get_iommu_info(VFIOContainer *container, 441 struct vfio_iommu_type1_info **info) 442 { 443 444 size_t argsz = sizeof(struct vfio_iommu_type1_info); 445 446 *info = g_new0(struct vfio_iommu_type1_info, 1); 447 again: 448 (*info)->argsz = argsz; 449 450 if (ioctl(container->fd, VFIO_IOMMU_GET_INFO, *info)) { 451 g_free(*info); 452 *info = NULL; 453 return -errno; 454 } 455 456 if (((*info)->argsz > argsz)) { 457 argsz = (*info)->argsz; 458 *info = g_realloc(*info, argsz); 459 goto again; 460 } 461 462 return 0; 463 } 464 465 static struct vfio_info_cap_header * 466 vfio_get_iommu_info_cap(struct vfio_iommu_type1_info *info, uint16_t id) 467 { 468 struct vfio_info_cap_header *hdr; 469 void *ptr = info; 470 471 if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) { 472 return NULL; 473 } 474 475 for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) { 476 if (hdr->id == id) { 477 return hdr; 478 } 479 } 480 481 return NULL; 482 } 483 484 static void vfio_get_iommu_info_migration(VFIOContainer *container, 485 struct vfio_iommu_type1_info *info) 486 { 487 struct vfio_info_cap_header *hdr; 488 struct vfio_iommu_type1_info_cap_migration *cap_mig; 489 VFIOContainerBase *bcontainer = &container->bcontainer; 490 491 hdr = vfio_get_iommu_info_cap(info, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION); 492 if (!hdr) { 493 return; 494 } 495 496 cap_mig = container_of(hdr, struct vfio_iommu_type1_info_cap_migration, 497 header); 498 499 /* 500 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of 501 * qemu_real_host_page_size to mark those dirty. 502 */ 503 if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) { 504 bcontainer->dirty_pages_supported = true; 505 bcontainer->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size; 506 bcontainer->dirty_pgsizes = cap_mig->pgsize_bitmap; 507 } 508 } 509 510 static int vfio_legacy_setup(VFIOContainerBase *bcontainer, Error **errp) 511 { 512 VFIOContainer *container = container_of(bcontainer, VFIOContainer, 513 bcontainer); 514 g_autofree struct vfio_iommu_type1_info *info = NULL; 515 int ret; 516 517 ret = vfio_get_iommu_info(container, &info); 518 if (ret) { 519 error_setg_errno(errp, -ret, "Failed to get VFIO IOMMU info"); 520 return ret; 521 } 522 523 if (info->flags & VFIO_IOMMU_INFO_PGSIZES) { 524 bcontainer->pgsizes = info->iova_pgsizes; 525 } else { 526 bcontainer->pgsizes = qemu_real_host_page_size(); 527 } 528 529 if (!vfio_get_info_dma_avail(info, &bcontainer->dma_max_mappings)) { 530 bcontainer->dma_max_mappings = 65535; 531 } 532 533 vfio_get_info_iova_range(info, bcontainer); 534 535 vfio_get_iommu_info_migration(container, info); 536 return 0; 537 } 538 539 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, 540 Error **errp) 541 { 542 VFIOContainer *container; 543 VFIOContainerBase *bcontainer; 544 int ret, fd; 545 VFIOAddressSpace *space; 546 547 space = vfio_get_address_space(as); 548 549 /* 550 * VFIO is currently incompatible with discarding of RAM insofar as the 551 * madvise to purge (zap) the page from QEMU's address space does not 552 * interact with the memory API and therefore leaves stale virtual to 553 * physical mappings in the IOMMU if the page was previously pinned. We 554 * therefore set discarding broken for each group added to a container, 555 * whether the container is used individually or shared. This provides 556 * us with options to allow devices within a group to opt-in and allow 557 * discarding, so long as it is done consistently for a group (for instance 558 * if the device is an mdev device where it is known that the host vendor 559 * driver will never pin pages outside of the working set of the guest 560 * driver, which would thus not be discarding candidates). 561 * 562 * The first opportunity to induce pinning occurs here where we attempt to 563 * attach the group to existing containers within the AddressSpace. If any 564 * pages are already zapped from the virtual address space, such as from 565 * previous discards, new pinning will cause valid mappings to be 566 * re-established. Likewise, when the overall MemoryListener for a new 567 * container is registered, a replay of mappings within the AddressSpace 568 * will occur, re-establishing any previously zapped pages as well. 569 * 570 * Especially virtio-balloon is currently only prevented from discarding 571 * new memory, it will not yet set ram_block_discard_set_required() and 572 * therefore, neither stops us here or deals with the sudden memory 573 * consumption of inflated memory. 574 * 575 * We do support discarding of memory coordinated via the RamDiscardManager 576 * with some IOMMU types. vfio_ram_block_discard_disable() handles the 577 * details once we know which type of IOMMU we are using. 578 */ 579 580 QLIST_FOREACH(bcontainer, &space->containers, next) { 581 container = container_of(bcontainer, VFIOContainer, bcontainer); 582 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { 583 ret = vfio_ram_block_discard_disable(container, true); 584 if (ret) { 585 error_setg_errno(errp, -ret, 586 "Cannot set discarding of RAM broken"); 587 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, 588 &container->fd)) { 589 error_report("vfio: error disconnecting group %d from" 590 " container", group->groupid); 591 } 592 return ret; 593 } 594 group->container = container; 595 QLIST_INSERT_HEAD(&container->group_list, group, container_next); 596 vfio_kvm_device_add_group(group); 597 return 0; 598 } 599 } 600 601 fd = qemu_open_old("/dev/vfio/vfio", O_RDWR); 602 if (fd < 0) { 603 error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio"); 604 ret = -errno; 605 goto put_space_exit; 606 } 607 608 ret = ioctl(fd, VFIO_GET_API_VERSION); 609 if (ret != VFIO_API_VERSION) { 610 error_setg(errp, "supported vfio version: %d, " 611 "reported version: %d", VFIO_API_VERSION, ret); 612 ret = -EINVAL; 613 goto close_fd_exit; 614 } 615 616 container = g_malloc0(sizeof(*container)); 617 container->fd = fd; 618 bcontainer = &container->bcontainer; 619 620 ret = vfio_set_iommu(container, group->fd, space, errp); 621 if (ret) { 622 goto free_container_exit; 623 } 624 625 ret = vfio_cpr_register_container(bcontainer, errp); 626 if (ret) { 627 goto free_container_exit; 628 } 629 630 ret = vfio_ram_block_discard_disable(container, true); 631 if (ret) { 632 error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken"); 633 goto unregister_container_exit; 634 } 635 636 assert(bcontainer->ops->setup); 637 638 ret = bcontainer->ops->setup(bcontainer, errp); 639 if (ret) { 640 goto enable_discards_exit; 641 } 642 643 vfio_kvm_device_add_group(group); 644 645 QLIST_INIT(&container->group_list); 646 QLIST_INSERT_HEAD(&space->containers, bcontainer, next); 647 648 group->container = container; 649 QLIST_INSERT_HEAD(&container->group_list, group, container_next); 650 651 bcontainer->listener = vfio_memory_listener; 652 memory_listener_register(&bcontainer->listener, bcontainer->space->as); 653 654 if (bcontainer->error) { 655 ret = -1; 656 error_propagate_prepend(errp, bcontainer->error, 657 "memory listener initialization failed: "); 658 goto listener_release_exit; 659 } 660 661 bcontainer->initialized = true; 662 663 return 0; 664 listener_release_exit: 665 QLIST_REMOVE(group, container_next); 666 QLIST_REMOVE(bcontainer, next); 667 vfio_kvm_device_del_group(group); 668 memory_listener_unregister(&bcontainer->listener); 669 if (bcontainer->ops->release) { 670 bcontainer->ops->release(bcontainer); 671 } 672 673 enable_discards_exit: 674 vfio_ram_block_discard_disable(container, false); 675 676 unregister_container_exit: 677 vfio_cpr_unregister_container(bcontainer); 678 679 free_container_exit: 680 g_free(container); 681 682 close_fd_exit: 683 close(fd); 684 685 put_space_exit: 686 vfio_put_address_space(space); 687 688 return ret; 689 } 690 691 static void vfio_disconnect_container(VFIOGroup *group) 692 { 693 VFIOContainer *container = group->container; 694 VFIOContainerBase *bcontainer = &container->bcontainer; 695 696 QLIST_REMOVE(group, container_next); 697 group->container = NULL; 698 699 /* 700 * Explicitly release the listener first before unset container, 701 * since unset may destroy the backend container if it's the last 702 * group. 703 */ 704 if (QLIST_EMPTY(&container->group_list)) { 705 memory_listener_unregister(&bcontainer->listener); 706 if (bcontainer->ops->release) { 707 bcontainer->ops->release(bcontainer); 708 } 709 } 710 711 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { 712 error_report("vfio: error disconnecting group %d from container", 713 group->groupid); 714 } 715 716 if (QLIST_EMPTY(&container->group_list)) { 717 VFIOAddressSpace *space = bcontainer->space; 718 719 vfio_container_destroy(bcontainer); 720 721 trace_vfio_disconnect_container(container->fd); 722 vfio_cpr_unregister_container(bcontainer); 723 close(container->fd); 724 g_free(container); 725 726 vfio_put_address_space(space); 727 } 728 } 729 730 static VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp) 731 { 732 ERRP_GUARD(); 733 VFIOGroup *group; 734 char path[32]; 735 struct vfio_group_status status = { .argsz = sizeof(status) }; 736 737 QLIST_FOREACH(group, &vfio_group_list, next) { 738 if (group->groupid == groupid) { 739 /* Found it. Now is it already in the right context? */ 740 if (group->container->bcontainer.space->as == as) { 741 return group; 742 } else { 743 error_setg(errp, "group %d used in multiple address spaces", 744 group->groupid); 745 return NULL; 746 } 747 } 748 } 749 750 group = g_malloc0(sizeof(*group)); 751 752 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid); 753 group->fd = qemu_open_old(path, O_RDWR); 754 if (group->fd < 0) { 755 error_setg_errno(errp, errno, "failed to open %s", path); 756 goto free_group_exit; 757 } 758 759 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) { 760 error_setg_errno(errp, errno, "failed to get group %d status", groupid); 761 goto close_fd_exit; 762 } 763 764 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) { 765 error_setg(errp, "group %d is not viable", groupid); 766 error_append_hint(errp, 767 "Please ensure all devices within the iommu_group " 768 "are bound to their vfio bus driver.\n"); 769 goto close_fd_exit; 770 } 771 772 group->groupid = groupid; 773 QLIST_INIT(&group->device_list); 774 775 if (vfio_connect_container(group, as, errp)) { 776 error_prepend(errp, "failed to setup container for group %d: ", 777 groupid); 778 goto close_fd_exit; 779 } 780 781 QLIST_INSERT_HEAD(&vfio_group_list, group, next); 782 783 return group; 784 785 close_fd_exit: 786 close(group->fd); 787 788 free_group_exit: 789 g_free(group); 790 791 return NULL; 792 } 793 794 static void vfio_put_group(VFIOGroup *group) 795 { 796 if (!group || !QLIST_EMPTY(&group->device_list)) { 797 return; 798 } 799 800 if (!group->ram_block_discard_allowed) { 801 vfio_ram_block_discard_disable(group->container, false); 802 } 803 vfio_kvm_device_del_group(group); 804 vfio_disconnect_container(group); 805 QLIST_REMOVE(group, next); 806 trace_vfio_put_group(group->fd); 807 close(group->fd); 808 g_free(group); 809 } 810 811 static int vfio_get_device(VFIOGroup *group, const char *name, 812 VFIODevice *vbasedev, Error **errp) 813 { 814 g_autofree struct vfio_device_info *info = NULL; 815 int fd; 816 817 fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name); 818 if (fd < 0) { 819 error_setg_errno(errp, errno, "error getting device from group %d", 820 group->groupid); 821 error_append_hint(errp, 822 "Verify all devices in group %d are bound to vfio-<bus> " 823 "or pci-stub and not already in use\n", group->groupid); 824 return fd; 825 } 826 827 info = vfio_get_device_info(fd); 828 if (!info) { 829 error_setg_errno(errp, errno, "error getting device info"); 830 close(fd); 831 return -1; 832 } 833 834 /* 835 * Set discarding of RAM as not broken for this group if the driver knows 836 * the device operates compatibly with discarding. Setting must be 837 * consistent per group, but since compatibility is really only possible 838 * with mdev currently, we expect singleton groups. 839 */ 840 if (vbasedev->ram_block_discard_allowed != 841 group->ram_block_discard_allowed) { 842 if (!QLIST_EMPTY(&group->device_list)) { 843 error_setg(errp, "Inconsistent setting of support for discarding " 844 "RAM (e.g., balloon) within group"); 845 close(fd); 846 return -1; 847 } 848 849 if (!group->ram_block_discard_allowed) { 850 group->ram_block_discard_allowed = true; 851 vfio_ram_block_discard_disable(group->container, false); 852 } 853 } 854 855 vbasedev->fd = fd; 856 vbasedev->group = group; 857 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next); 858 859 vbasedev->num_irqs = info->num_irqs; 860 vbasedev->num_regions = info->num_regions; 861 vbasedev->flags = info->flags; 862 863 trace_vfio_get_device(name, info->flags, info->num_regions, info->num_irqs); 864 865 vbasedev->reset_works = !!(info->flags & VFIO_DEVICE_FLAGS_RESET); 866 867 return 0; 868 } 869 870 static void vfio_put_base_device(VFIODevice *vbasedev) 871 { 872 if (!vbasedev->group) { 873 return; 874 } 875 QLIST_REMOVE(vbasedev, next); 876 vbasedev->group = NULL; 877 trace_vfio_put_base_device(vbasedev->fd); 878 close(vbasedev->fd); 879 } 880 881 static int vfio_device_groupid(VFIODevice *vbasedev, Error **errp) 882 { 883 char *tmp, group_path[PATH_MAX]; 884 g_autofree char *group_name = NULL; 885 int ret, groupid; 886 ssize_t len; 887 888 tmp = g_strdup_printf("%s/iommu_group", vbasedev->sysfsdev); 889 len = readlink(tmp, group_path, sizeof(group_path)); 890 g_free(tmp); 891 892 if (len <= 0 || len >= sizeof(group_path)) { 893 ret = len < 0 ? -errno : -ENAMETOOLONG; 894 error_setg_errno(errp, -ret, "no iommu_group found"); 895 return ret; 896 } 897 898 group_path[len] = 0; 899 900 group_name = g_path_get_basename(group_path); 901 if (sscanf(group_name, "%d", &groupid) != 1) { 902 error_setg_errno(errp, errno, "failed to read %s", group_path); 903 return -errno; 904 } 905 return groupid; 906 } 907 908 /* 909 * vfio_attach_device: attach a device to a security context 910 * @name and @vbasedev->name are likely to be different depending 911 * on the type of the device, hence the need for passing @name 912 */ 913 static int vfio_legacy_attach_device(const char *name, VFIODevice *vbasedev, 914 AddressSpace *as, Error **errp) 915 { 916 int groupid = vfio_device_groupid(vbasedev, errp); 917 VFIODevice *vbasedev_iter; 918 VFIOGroup *group; 919 VFIOContainerBase *bcontainer; 920 int ret; 921 922 if (groupid < 0) { 923 return groupid; 924 } 925 926 trace_vfio_attach_device(vbasedev->name, groupid); 927 928 group = vfio_get_group(groupid, as, errp); 929 if (!group) { 930 return -ENOENT; 931 } 932 933 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { 934 if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) { 935 error_setg(errp, "device is already attached"); 936 vfio_put_group(group); 937 return -EBUSY; 938 } 939 } 940 ret = vfio_get_device(group, name, vbasedev, errp); 941 if (ret) { 942 vfio_put_group(group); 943 return ret; 944 } 945 946 bcontainer = &group->container->bcontainer; 947 vbasedev->bcontainer = bcontainer; 948 QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next); 949 QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next); 950 951 return ret; 952 } 953 954 static void vfio_legacy_detach_device(VFIODevice *vbasedev) 955 { 956 VFIOGroup *group = vbasedev->group; 957 958 QLIST_REMOVE(vbasedev, global_next); 959 QLIST_REMOVE(vbasedev, container_next); 960 vbasedev->bcontainer = NULL; 961 trace_vfio_detach_device(vbasedev->name, group->groupid); 962 vfio_put_base_device(vbasedev); 963 vfio_put_group(group); 964 } 965 966 static int vfio_legacy_pci_hot_reset(VFIODevice *vbasedev, bool single) 967 { 968 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); 969 VFIOGroup *group; 970 struct vfio_pci_hot_reset_info *info = NULL; 971 struct vfio_pci_dependent_device *devices; 972 struct vfio_pci_hot_reset *reset; 973 int32_t *fds; 974 int ret, i, count; 975 bool multi = false; 976 977 trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi"); 978 979 if (!single) { 980 vfio_pci_pre_reset(vdev); 981 } 982 vdev->vbasedev.needs_reset = false; 983 984 ret = vfio_pci_get_pci_hot_reset_info(vdev, &info); 985 986 if (ret) { 987 goto out_single; 988 } 989 devices = &info->devices[0]; 990 991 trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name); 992 993 /* Verify that we have all the groups required */ 994 for (i = 0; i < info->count; i++) { 995 PCIHostDeviceAddress host; 996 VFIOPCIDevice *tmp; 997 VFIODevice *vbasedev_iter; 998 999 host.domain = devices[i].segment; 1000 host.bus = devices[i].bus; 1001 host.slot = PCI_SLOT(devices[i].devfn); 1002 host.function = PCI_FUNC(devices[i].devfn); 1003 1004 trace_vfio_pci_hot_reset_dep_devices(host.domain, 1005 host.bus, host.slot, host.function, devices[i].group_id); 1006 1007 if (vfio_pci_host_match(&host, vdev->vbasedev.name)) { 1008 continue; 1009 } 1010 1011 QLIST_FOREACH(group, &vfio_group_list, next) { 1012 if (group->groupid == devices[i].group_id) { 1013 break; 1014 } 1015 } 1016 1017 if (!group) { 1018 if (!vdev->has_pm_reset) { 1019 error_report("vfio: Cannot reset device %s, " 1020 "depends on group %d which is not owned.", 1021 vdev->vbasedev.name, devices[i].group_id); 1022 } 1023 ret = -EPERM; 1024 goto out; 1025 } 1026 1027 /* Prep dependent devices for reset and clear our marker. */ 1028 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { 1029 if (!vbasedev_iter->dev->realized || 1030 vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { 1031 continue; 1032 } 1033 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); 1034 if (vfio_pci_host_match(&host, tmp->vbasedev.name)) { 1035 if (single) { 1036 ret = -EINVAL; 1037 goto out_single; 1038 } 1039 vfio_pci_pre_reset(tmp); 1040 tmp->vbasedev.needs_reset = false; 1041 multi = true; 1042 break; 1043 } 1044 } 1045 } 1046 1047 if (!single && !multi) { 1048 ret = -EINVAL; 1049 goto out_single; 1050 } 1051 1052 /* Determine how many group fds need to be passed */ 1053 count = 0; 1054 QLIST_FOREACH(group, &vfio_group_list, next) { 1055 for (i = 0; i < info->count; i++) { 1056 if (group->groupid == devices[i].group_id) { 1057 count++; 1058 break; 1059 } 1060 } 1061 } 1062 1063 reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds))); 1064 reset->argsz = sizeof(*reset) + (count * sizeof(*fds)); 1065 fds = &reset->group_fds[0]; 1066 1067 /* Fill in group fds */ 1068 QLIST_FOREACH(group, &vfio_group_list, next) { 1069 for (i = 0; i < info->count; i++) { 1070 if (group->groupid == devices[i].group_id) { 1071 fds[reset->count++] = group->fd; 1072 break; 1073 } 1074 } 1075 } 1076 1077 /* Bus reset! */ 1078 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset); 1079 g_free(reset); 1080 if (ret) { 1081 ret = -errno; 1082 } 1083 1084 trace_vfio_pci_hot_reset_result(vdev->vbasedev.name, 1085 ret ? strerror(errno) : "Success"); 1086 1087 out: 1088 /* Re-enable INTx on affected devices */ 1089 for (i = 0; i < info->count; i++) { 1090 PCIHostDeviceAddress host; 1091 VFIOPCIDevice *tmp; 1092 VFIODevice *vbasedev_iter; 1093 1094 host.domain = devices[i].segment; 1095 host.bus = devices[i].bus; 1096 host.slot = PCI_SLOT(devices[i].devfn); 1097 host.function = PCI_FUNC(devices[i].devfn); 1098 1099 if (vfio_pci_host_match(&host, vdev->vbasedev.name)) { 1100 continue; 1101 } 1102 1103 QLIST_FOREACH(group, &vfio_group_list, next) { 1104 if (group->groupid == devices[i].group_id) { 1105 break; 1106 } 1107 } 1108 1109 if (!group) { 1110 break; 1111 } 1112 1113 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { 1114 if (!vbasedev_iter->dev->realized || 1115 vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { 1116 continue; 1117 } 1118 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); 1119 if (vfio_pci_host_match(&host, tmp->vbasedev.name)) { 1120 vfio_pci_post_reset(tmp); 1121 break; 1122 } 1123 } 1124 } 1125 out_single: 1126 if (!single) { 1127 vfio_pci_post_reset(vdev); 1128 } 1129 g_free(info); 1130 1131 return ret; 1132 } 1133 1134 static void vfio_iommu_legacy_class_init(ObjectClass *klass, void *data) 1135 { 1136 VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass); 1137 1138 vioc->setup = vfio_legacy_setup; 1139 vioc->dma_map = vfio_legacy_dma_map; 1140 vioc->dma_unmap = vfio_legacy_dma_unmap; 1141 vioc->attach_device = vfio_legacy_attach_device; 1142 vioc->detach_device = vfio_legacy_detach_device; 1143 vioc->set_dirty_page_tracking = vfio_legacy_set_dirty_page_tracking; 1144 vioc->query_dirty_bitmap = vfio_legacy_query_dirty_bitmap; 1145 vioc->pci_hot_reset = vfio_legacy_pci_hot_reset; 1146 }; 1147 1148 static const TypeInfo types[] = { 1149 { 1150 .name = TYPE_VFIO_IOMMU_LEGACY, 1151 .parent = TYPE_VFIO_IOMMU, 1152 .class_init = vfio_iommu_legacy_class_init, 1153 }, 1154 }; 1155 1156 DEFINE_TYPES(types) 1157