1 /* 2 * generic functions used by VFIO devices 3 * 4 * Copyright Red Hat, Inc. 2012 5 * 6 * Authors: 7 * Alex Williamson <alex.williamson@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Based on qemu-kvm device-assignment: 13 * Adapted for KVM by Qumranet. 14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) 15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) 16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) 17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) 18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) 19 */ 20 21 #include "qemu/osdep.h" 22 #include <sys/ioctl.h> 23 #ifdef CONFIG_KVM 24 #include <linux/kvm.h> 25 #endif 26 #include <linux/vfio.h> 27 28 #include "hw/vfio/vfio-common.h" 29 #include "hw/vfio/vfio.h" 30 #include "exec/address-spaces.h" 31 #include "exec/memory.h" 32 #include "exec/ram_addr.h" 33 #include "hw/hw.h" 34 #include "qemu/error-report.h" 35 #include "qemu/main-loop.h" 36 #include "qemu/range.h" 37 #include "sysemu/kvm.h" 38 #include "sysemu/reset.h" 39 #include "trace.h" 40 #include "qapi/error.h" 41 #include "migration/migration.h" 42 43 VFIOGroupList vfio_group_list = 44 QLIST_HEAD_INITIALIZER(vfio_group_list); 45 static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces = 46 QLIST_HEAD_INITIALIZER(vfio_address_spaces); 47 48 #ifdef CONFIG_KVM 49 /* 50 * We have a single VFIO pseudo device per KVM VM. Once created it lives 51 * for the life of the VM. Closing the file descriptor only drops our 52 * reference to it and the device's reference to kvm. Therefore once 53 * initialized, this file descriptor is only released on QEMU exit and 54 * we'll re-use it should another vfio device be attached before then. 55 */ 56 static int vfio_kvm_device_fd = -1; 57 #endif 58 59 /* 60 * Common VFIO interrupt disable 61 */ 62 void vfio_disable_irqindex(VFIODevice *vbasedev, int index) 63 { 64 struct vfio_irq_set irq_set = { 65 .argsz = sizeof(irq_set), 66 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER, 67 .index = index, 68 .start = 0, 69 .count = 0, 70 }; 71 72 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); 73 } 74 75 void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index) 76 { 77 struct vfio_irq_set irq_set = { 78 .argsz = sizeof(irq_set), 79 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK, 80 .index = index, 81 .start = 0, 82 .count = 1, 83 }; 84 85 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); 86 } 87 88 void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index) 89 { 90 struct vfio_irq_set irq_set = { 91 .argsz = sizeof(irq_set), 92 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK, 93 .index = index, 94 .start = 0, 95 .count = 1, 96 }; 97 98 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); 99 } 100 101 static inline const char *action_to_str(int action) 102 { 103 switch (action) { 104 case VFIO_IRQ_SET_ACTION_MASK: 105 return "MASK"; 106 case VFIO_IRQ_SET_ACTION_UNMASK: 107 return "UNMASK"; 108 case VFIO_IRQ_SET_ACTION_TRIGGER: 109 return "TRIGGER"; 110 default: 111 return "UNKNOWN ACTION"; 112 } 113 } 114 115 static const char *index_to_str(VFIODevice *vbasedev, int index) 116 { 117 if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) { 118 return NULL; 119 } 120 121 switch (index) { 122 case VFIO_PCI_INTX_IRQ_INDEX: 123 return "INTX"; 124 case VFIO_PCI_MSI_IRQ_INDEX: 125 return "MSI"; 126 case VFIO_PCI_MSIX_IRQ_INDEX: 127 return "MSIX"; 128 case VFIO_PCI_ERR_IRQ_INDEX: 129 return "ERR"; 130 case VFIO_PCI_REQ_IRQ_INDEX: 131 return "REQ"; 132 default: 133 return NULL; 134 } 135 } 136 137 int vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex, 138 int action, int fd, Error **errp) 139 { 140 struct vfio_irq_set *irq_set; 141 int argsz, ret = 0; 142 const char *name; 143 int32_t *pfd; 144 145 argsz = sizeof(*irq_set) + sizeof(*pfd); 146 147 irq_set = g_malloc0(argsz); 148 irq_set->argsz = argsz; 149 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action; 150 irq_set->index = index; 151 irq_set->start = subindex; 152 irq_set->count = 1; 153 pfd = (int32_t *)&irq_set->data; 154 *pfd = fd; 155 156 if (ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set)) { 157 ret = -errno; 158 } 159 g_free(irq_set); 160 161 if (!ret) { 162 return 0; 163 } 164 165 error_setg_errno(errp, -ret, "VFIO_DEVICE_SET_IRQS failure"); 166 167 name = index_to_str(vbasedev, index); 168 if (name) { 169 error_prepend(errp, "%s-%d: ", name, subindex); 170 } else { 171 error_prepend(errp, "index %d-%d: ", index, subindex); 172 } 173 error_prepend(errp, 174 "Failed to %s %s eventfd signaling for interrupt ", 175 fd < 0 ? "tear down" : "set up", action_to_str(action)); 176 return ret; 177 } 178 179 /* 180 * IO Port/MMIO - Beware of the endians, VFIO is always little endian 181 */ 182 void vfio_region_write(void *opaque, hwaddr addr, 183 uint64_t data, unsigned size) 184 { 185 VFIORegion *region = opaque; 186 VFIODevice *vbasedev = region->vbasedev; 187 union { 188 uint8_t byte; 189 uint16_t word; 190 uint32_t dword; 191 uint64_t qword; 192 } buf; 193 194 switch (size) { 195 case 1: 196 buf.byte = data; 197 break; 198 case 2: 199 buf.word = cpu_to_le16(data); 200 break; 201 case 4: 202 buf.dword = cpu_to_le32(data); 203 break; 204 case 8: 205 buf.qword = cpu_to_le64(data); 206 break; 207 default: 208 hw_error("vfio: unsupported write size, %u bytes", size); 209 break; 210 } 211 212 if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { 213 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64 214 ",%d) failed: %m", 215 __func__, vbasedev->name, region->nr, 216 addr, data, size); 217 } 218 219 trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size); 220 221 /* 222 * A read or write to a BAR always signals an INTx EOI. This will 223 * do nothing if not pending (including not in INTx mode). We assume 224 * that a BAR access is in response to an interrupt and that BAR 225 * accesses will service the interrupt. Unfortunately, we don't know 226 * which access will service the interrupt, so we're potentially 227 * getting quite a few host interrupts per guest interrupt. 228 */ 229 vbasedev->ops->vfio_eoi(vbasedev); 230 } 231 232 uint64_t vfio_region_read(void *opaque, 233 hwaddr addr, unsigned size) 234 { 235 VFIORegion *region = opaque; 236 VFIODevice *vbasedev = region->vbasedev; 237 union { 238 uint8_t byte; 239 uint16_t word; 240 uint32_t dword; 241 uint64_t qword; 242 } buf; 243 uint64_t data = 0; 244 245 if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { 246 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m", 247 __func__, vbasedev->name, region->nr, 248 addr, size); 249 return (uint64_t)-1; 250 } 251 switch (size) { 252 case 1: 253 data = buf.byte; 254 break; 255 case 2: 256 data = le16_to_cpu(buf.word); 257 break; 258 case 4: 259 data = le32_to_cpu(buf.dword); 260 break; 261 case 8: 262 data = le64_to_cpu(buf.qword); 263 break; 264 default: 265 hw_error("vfio: unsupported read size, %u bytes", size); 266 break; 267 } 268 269 trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data); 270 271 /* Same as write above */ 272 vbasedev->ops->vfio_eoi(vbasedev); 273 274 return data; 275 } 276 277 const MemoryRegionOps vfio_region_ops = { 278 .read = vfio_region_read, 279 .write = vfio_region_write, 280 .endianness = DEVICE_LITTLE_ENDIAN, 281 .valid = { 282 .min_access_size = 1, 283 .max_access_size = 8, 284 }, 285 .impl = { 286 .min_access_size = 1, 287 .max_access_size = 8, 288 }, 289 }; 290 291 /* 292 * Device state interfaces 293 */ 294 295 bool vfio_mig_active(void) 296 { 297 VFIOGroup *group; 298 VFIODevice *vbasedev; 299 300 if (QLIST_EMPTY(&vfio_group_list)) { 301 return false; 302 } 303 304 QLIST_FOREACH(group, &vfio_group_list, next) { 305 QLIST_FOREACH(vbasedev, &group->device_list, next) { 306 if (vbasedev->migration_blocker) { 307 return false; 308 } 309 } 310 } 311 return true; 312 } 313 314 static bool vfio_devices_all_dirty_tracking(VFIOContainer *container) 315 { 316 VFIOGroup *group; 317 VFIODevice *vbasedev; 318 MigrationState *ms = migrate_get_current(); 319 320 if (!migration_is_setup_or_active(ms->state)) { 321 return false; 322 } 323 324 QLIST_FOREACH(group, &container->group_list, container_next) { 325 QLIST_FOREACH(vbasedev, &group->device_list, next) { 326 VFIOMigration *migration = vbasedev->migration; 327 328 if (!migration) { 329 return false; 330 } 331 332 if ((vbasedev->pre_copy_dirty_page_tracking == ON_OFF_AUTO_OFF) 333 && (migration->device_state & VFIO_DEVICE_STATE_RUNNING)) { 334 return false; 335 } 336 } 337 } 338 return true; 339 } 340 341 static bool vfio_devices_all_running_and_saving(VFIOContainer *container) 342 { 343 VFIOGroup *group; 344 VFIODevice *vbasedev; 345 MigrationState *ms = migrate_get_current(); 346 347 if (!migration_is_setup_or_active(ms->state)) { 348 return false; 349 } 350 351 QLIST_FOREACH(group, &container->group_list, container_next) { 352 QLIST_FOREACH(vbasedev, &group->device_list, next) { 353 VFIOMigration *migration = vbasedev->migration; 354 355 if (!migration) { 356 return false; 357 } 358 359 if ((migration->device_state & VFIO_DEVICE_STATE_SAVING) && 360 (migration->device_state & VFIO_DEVICE_STATE_RUNNING)) { 361 continue; 362 } else { 363 return false; 364 } 365 } 366 } 367 return true; 368 } 369 370 static int vfio_dma_unmap_bitmap(VFIOContainer *container, 371 hwaddr iova, ram_addr_t size, 372 IOMMUTLBEntry *iotlb) 373 { 374 struct vfio_iommu_type1_dma_unmap *unmap; 375 struct vfio_bitmap *bitmap; 376 uint64_t pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size; 377 int ret; 378 379 unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap)); 380 381 unmap->argsz = sizeof(*unmap) + sizeof(*bitmap); 382 unmap->iova = iova; 383 unmap->size = size; 384 unmap->flags |= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP; 385 bitmap = (struct vfio_bitmap *)&unmap->data; 386 387 /* 388 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of 389 * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize 390 * to qemu_real_host_page_size. 391 */ 392 393 bitmap->pgsize = qemu_real_host_page_size; 394 bitmap->size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) / 395 BITS_PER_BYTE; 396 397 if (bitmap->size > container->max_dirty_bitmap_size) { 398 error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, 399 (uint64_t)bitmap->size); 400 ret = -E2BIG; 401 goto unmap_exit; 402 } 403 404 bitmap->data = g_try_malloc0(bitmap->size); 405 if (!bitmap->data) { 406 ret = -ENOMEM; 407 goto unmap_exit; 408 } 409 410 ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap); 411 if (!ret) { 412 cpu_physical_memory_set_dirty_lebitmap((unsigned long *)bitmap->data, 413 iotlb->translated_addr, pages); 414 } else { 415 error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m"); 416 } 417 418 g_free(bitmap->data); 419 unmap_exit: 420 g_free(unmap); 421 return ret; 422 } 423 424 /* 425 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 426 */ 427 static int vfio_dma_unmap(VFIOContainer *container, 428 hwaddr iova, ram_addr_t size, 429 IOMMUTLBEntry *iotlb) 430 { 431 struct vfio_iommu_type1_dma_unmap unmap = { 432 .argsz = sizeof(unmap), 433 .flags = 0, 434 .iova = iova, 435 .size = size, 436 }; 437 438 if (iotlb && container->dirty_pages_supported && 439 vfio_devices_all_running_and_saving(container)) { 440 return vfio_dma_unmap_bitmap(container, iova, size, iotlb); 441 } 442 443 while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) { 444 /* 445 * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c 446 * v4.15) where an overflow in its wrap-around check prevents us from 447 * unmapping the last page of the address space. Test for the error 448 * condition and re-try the unmap excluding the last page. The 449 * expectation is that we've never mapped the last page anyway and this 450 * unmap request comes via vIOMMU support which also makes it unlikely 451 * that this page is used. This bug was introduced well after type1 v2 452 * support was introduced, so we shouldn't need to test for v1. A fix 453 * is queued for kernel v5.0 so this workaround can be removed once 454 * affected kernels are sufficiently deprecated. 455 */ 456 if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) && 457 container->iommu_type == VFIO_TYPE1v2_IOMMU) { 458 trace_vfio_dma_unmap_overflow_workaround(); 459 unmap.size -= 1ULL << ctz64(container->pgsizes); 460 continue; 461 } 462 error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno)); 463 return -errno; 464 } 465 466 return 0; 467 } 468 469 static int vfio_dma_map(VFIOContainer *container, hwaddr iova, 470 ram_addr_t size, void *vaddr, bool readonly) 471 { 472 struct vfio_iommu_type1_dma_map map = { 473 .argsz = sizeof(map), 474 .flags = VFIO_DMA_MAP_FLAG_READ, 475 .vaddr = (__u64)(uintptr_t)vaddr, 476 .iova = iova, 477 .size = size, 478 }; 479 480 if (!readonly) { 481 map.flags |= VFIO_DMA_MAP_FLAG_WRITE; 482 } 483 484 /* 485 * Try the mapping, if it fails with EBUSY, unmap the region and try 486 * again. This shouldn't be necessary, but we sometimes see it in 487 * the VGA ROM space. 488 */ 489 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || 490 (errno == EBUSY && vfio_dma_unmap(container, iova, size, NULL) == 0 && 491 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) { 492 return 0; 493 } 494 495 error_report("VFIO_MAP_DMA failed: %s", strerror(errno)); 496 return -errno; 497 } 498 499 static void vfio_host_win_add(VFIOContainer *container, 500 hwaddr min_iova, hwaddr max_iova, 501 uint64_t iova_pgsizes) 502 { 503 VFIOHostDMAWindow *hostwin; 504 505 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 506 if (ranges_overlap(hostwin->min_iova, 507 hostwin->max_iova - hostwin->min_iova + 1, 508 min_iova, 509 max_iova - min_iova + 1)) { 510 hw_error("%s: Overlapped IOMMU are not enabled", __func__); 511 } 512 } 513 514 hostwin = g_malloc0(sizeof(*hostwin)); 515 516 hostwin->min_iova = min_iova; 517 hostwin->max_iova = max_iova; 518 hostwin->iova_pgsizes = iova_pgsizes; 519 QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next); 520 } 521 522 static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova, 523 hwaddr max_iova) 524 { 525 VFIOHostDMAWindow *hostwin; 526 527 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 528 if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) { 529 QLIST_REMOVE(hostwin, hostwin_next); 530 return 0; 531 } 532 } 533 534 return -1; 535 } 536 537 static bool vfio_listener_skipped_section(MemoryRegionSection *section) 538 { 539 return (!memory_region_is_ram(section->mr) && 540 !memory_region_is_iommu(section->mr)) || 541 /* 542 * Sizing an enabled 64-bit BAR can cause spurious mappings to 543 * addresses in the upper part of the 64-bit address space. These 544 * are never accessed by the CPU and beyond the address width of 545 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width. 546 */ 547 section->offset_within_address_space & (1ULL << 63); 548 } 549 550 /* Called with rcu_read_lock held. */ 551 static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, 552 ram_addr_t *ram_addr, bool *read_only) 553 { 554 MemoryRegion *mr; 555 hwaddr xlat; 556 hwaddr len = iotlb->addr_mask + 1; 557 bool writable = iotlb->perm & IOMMU_WO; 558 559 /* 560 * The IOMMU TLB entry we have just covers translation through 561 * this IOMMU to its immediate target. We need to translate 562 * it the rest of the way through to memory. 563 */ 564 mr = address_space_translate(&address_space_memory, 565 iotlb->translated_addr, 566 &xlat, &len, writable, 567 MEMTXATTRS_UNSPECIFIED); 568 if (!memory_region_is_ram(mr)) { 569 error_report("iommu map to non memory area %"HWADDR_PRIx"", 570 xlat); 571 return false; 572 } 573 574 /* 575 * Translation truncates length to the IOMMU page size, 576 * check that it did not truncate too much. 577 */ 578 if (len & iotlb->addr_mask) { 579 error_report("iommu has granularity incompatible with target AS"); 580 return false; 581 } 582 583 if (vaddr) { 584 *vaddr = memory_region_get_ram_ptr(mr) + xlat; 585 } 586 587 if (ram_addr) { 588 *ram_addr = memory_region_get_ram_addr(mr) + xlat; 589 } 590 591 if (read_only) { 592 *read_only = !writable || mr->readonly; 593 } 594 595 return true; 596 } 597 598 static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) 599 { 600 VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); 601 VFIOContainer *container = giommu->container; 602 hwaddr iova = iotlb->iova + giommu->iommu_offset; 603 void *vaddr; 604 int ret; 605 606 trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP", 607 iova, iova + iotlb->addr_mask); 608 609 if (iotlb->target_as != &address_space_memory) { 610 error_report("Wrong target AS \"%s\", only system memory is allowed", 611 iotlb->target_as->name ? iotlb->target_as->name : "none"); 612 return; 613 } 614 615 rcu_read_lock(); 616 617 if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { 618 bool read_only; 619 620 if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only)) { 621 goto out; 622 } 623 /* 624 * vaddr is only valid until rcu_read_unlock(). But after 625 * vfio_dma_map has set up the mapping the pages will be 626 * pinned by the kernel. This makes sure that the RAM backend 627 * of vaddr will always be there, even if the memory object is 628 * destroyed and its backing memory munmap-ed. 629 */ 630 ret = vfio_dma_map(container, iova, 631 iotlb->addr_mask + 1, vaddr, 632 read_only); 633 if (ret) { 634 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " 635 "0x%"HWADDR_PRIx", %p) = %d (%m)", 636 container, iova, 637 iotlb->addr_mask + 1, vaddr, ret); 638 } 639 } else { 640 ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1, iotlb); 641 if (ret) { 642 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " 643 "0x%"HWADDR_PRIx") = %d (%m)", 644 container, iova, 645 iotlb->addr_mask + 1, ret); 646 } 647 } 648 out: 649 rcu_read_unlock(); 650 } 651 652 static void vfio_listener_region_add(MemoryListener *listener, 653 MemoryRegionSection *section) 654 { 655 VFIOContainer *container = container_of(listener, VFIOContainer, listener); 656 hwaddr iova, end; 657 Int128 llend, llsize; 658 void *vaddr; 659 int ret; 660 VFIOHostDMAWindow *hostwin; 661 bool hostwin_found; 662 Error *err = NULL; 663 664 if (vfio_listener_skipped_section(section)) { 665 trace_vfio_listener_region_add_skip( 666 section->offset_within_address_space, 667 section->offset_within_address_space + 668 int128_get64(int128_sub(section->size, int128_one()))); 669 return; 670 } 671 672 if (unlikely((section->offset_within_address_space & 673 ~qemu_real_host_page_mask) != 674 (section->offset_within_region & ~qemu_real_host_page_mask))) { 675 error_report("%s received unaligned region", __func__); 676 return; 677 } 678 679 iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space); 680 llend = int128_make64(section->offset_within_address_space); 681 llend = int128_add(llend, section->size); 682 llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask)); 683 684 if (int128_ge(int128_make64(iova), llend)) { 685 return; 686 } 687 end = int128_get64(int128_sub(llend, int128_one())); 688 689 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { 690 hwaddr pgsize = 0; 691 692 /* For now intersections are not allowed, we may relax this later */ 693 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 694 if (ranges_overlap(hostwin->min_iova, 695 hostwin->max_iova - hostwin->min_iova + 1, 696 section->offset_within_address_space, 697 int128_get64(section->size))) { 698 error_setg(&err, 699 "region [0x%"PRIx64",0x%"PRIx64"] overlaps with existing" 700 "host DMA window [0x%"PRIx64",0x%"PRIx64"]", 701 section->offset_within_address_space, 702 section->offset_within_address_space + 703 int128_get64(section->size) - 1, 704 hostwin->min_iova, hostwin->max_iova); 705 goto fail; 706 } 707 } 708 709 ret = vfio_spapr_create_window(container, section, &pgsize); 710 if (ret) { 711 error_setg_errno(&err, -ret, "Failed to create SPAPR window"); 712 goto fail; 713 } 714 715 vfio_host_win_add(container, section->offset_within_address_space, 716 section->offset_within_address_space + 717 int128_get64(section->size) - 1, pgsize); 718 #ifdef CONFIG_KVM 719 if (kvm_enabled()) { 720 VFIOGroup *group; 721 IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); 722 struct kvm_vfio_spapr_tce param; 723 struct kvm_device_attr attr = { 724 .group = KVM_DEV_VFIO_GROUP, 725 .attr = KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE, 726 .addr = (uint64_t)(unsigned long)¶m, 727 }; 728 729 if (!memory_region_iommu_get_attr(iommu_mr, IOMMU_ATTR_SPAPR_TCE_FD, 730 ¶m.tablefd)) { 731 QLIST_FOREACH(group, &container->group_list, container_next) { 732 param.groupfd = group->fd; 733 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { 734 error_report("vfio: failed to setup fd %d " 735 "for a group with fd %d: %s", 736 param.tablefd, param.groupfd, 737 strerror(errno)); 738 return; 739 } 740 trace_vfio_spapr_group_attach(param.groupfd, param.tablefd); 741 } 742 } 743 } 744 #endif 745 } 746 747 hostwin_found = false; 748 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 749 if (hostwin->min_iova <= iova && end <= hostwin->max_iova) { 750 hostwin_found = true; 751 break; 752 } 753 } 754 755 if (!hostwin_found) { 756 error_setg(&err, "Container %p can't map guest IOVA region" 757 " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, container, iova, end); 758 goto fail; 759 } 760 761 memory_region_ref(section->mr); 762 763 if (memory_region_is_iommu(section->mr)) { 764 VFIOGuestIOMMU *giommu; 765 IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); 766 int iommu_idx; 767 768 trace_vfio_listener_region_add_iommu(iova, end); 769 /* 770 * FIXME: For VFIO iommu types which have KVM acceleration to 771 * avoid bouncing all map/unmaps through qemu this way, this 772 * would be the right place to wire that up (tell the KVM 773 * device emulation the VFIO iommu handles to use). 774 */ 775 giommu = g_malloc0(sizeof(*giommu)); 776 giommu->iommu = iommu_mr; 777 giommu->iommu_offset = section->offset_within_address_space - 778 section->offset_within_region; 779 giommu->container = container; 780 llend = int128_add(int128_make64(section->offset_within_region), 781 section->size); 782 llend = int128_sub(llend, int128_one()); 783 iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr, 784 MEMTXATTRS_UNSPECIFIED); 785 iommu_notifier_init(&giommu->n, vfio_iommu_map_notify, 786 IOMMU_NOTIFIER_IOTLB_EVENTS, 787 section->offset_within_region, 788 int128_get64(llend), 789 iommu_idx); 790 791 ret = memory_region_iommu_set_page_size_mask(giommu->iommu, 792 container->pgsizes, 793 &err); 794 if (ret) { 795 g_free(giommu); 796 goto fail; 797 } 798 799 ret = memory_region_register_iommu_notifier(section->mr, &giommu->n, 800 &err); 801 if (ret) { 802 g_free(giommu); 803 goto fail; 804 } 805 QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); 806 memory_region_iommu_replay(giommu->iommu, &giommu->n); 807 808 return; 809 } 810 811 /* Here we assume that memory_region_is_ram(section->mr)==true */ 812 813 vaddr = memory_region_get_ram_ptr(section->mr) + 814 section->offset_within_region + 815 (iova - section->offset_within_address_space); 816 817 trace_vfio_listener_region_add_ram(iova, end, vaddr); 818 819 llsize = int128_sub(llend, int128_make64(iova)); 820 821 if (memory_region_is_ram_device(section->mr)) { 822 hwaddr pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1; 823 824 if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) { 825 trace_vfio_listener_region_add_no_dma_map( 826 memory_region_name(section->mr), 827 section->offset_within_address_space, 828 int128_getlo(section->size), 829 pgmask + 1); 830 return; 831 } 832 } 833 834 ret = vfio_dma_map(container, iova, int128_get64(llsize), 835 vaddr, section->readonly); 836 if (ret) { 837 error_setg(&err, "vfio_dma_map(%p, 0x%"HWADDR_PRIx", " 838 "0x%"HWADDR_PRIx", %p) = %d (%m)", 839 container, iova, int128_get64(llsize), vaddr, ret); 840 if (memory_region_is_ram_device(section->mr)) { 841 /* Allow unexpected mappings not to be fatal for RAM devices */ 842 error_report_err(err); 843 return; 844 } 845 goto fail; 846 } 847 848 return; 849 850 fail: 851 if (memory_region_is_ram_device(section->mr)) { 852 error_report("failed to vfio_dma_map. pci p2p may not work"); 853 return; 854 } 855 /* 856 * On the initfn path, store the first error in the container so we 857 * can gracefully fail. Runtime, there's not much we can do other 858 * than throw a hardware error. 859 */ 860 if (!container->initialized) { 861 if (!container->error) { 862 error_propagate_prepend(&container->error, err, 863 "Region %s: ", 864 memory_region_name(section->mr)); 865 } else { 866 error_free(err); 867 } 868 } else { 869 error_report_err(err); 870 hw_error("vfio: DMA mapping failed, unable to continue"); 871 } 872 } 873 874 static void vfio_listener_region_del(MemoryListener *listener, 875 MemoryRegionSection *section) 876 { 877 VFIOContainer *container = container_of(listener, VFIOContainer, listener); 878 hwaddr iova, end; 879 Int128 llend, llsize; 880 int ret; 881 bool try_unmap = true; 882 883 if (vfio_listener_skipped_section(section)) { 884 trace_vfio_listener_region_del_skip( 885 section->offset_within_address_space, 886 section->offset_within_address_space + 887 int128_get64(int128_sub(section->size, int128_one()))); 888 return; 889 } 890 891 if (unlikely((section->offset_within_address_space & 892 ~qemu_real_host_page_mask) != 893 (section->offset_within_region & ~qemu_real_host_page_mask))) { 894 error_report("%s received unaligned region", __func__); 895 return; 896 } 897 898 if (memory_region_is_iommu(section->mr)) { 899 VFIOGuestIOMMU *giommu; 900 901 QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { 902 if (MEMORY_REGION(giommu->iommu) == section->mr && 903 giommu->n.start == section->offset_within_region) { 904 memory_region_unregister_iommu_notifier(section->mr, 905 &giommu->n); 906 QLIST_REMOVE(giommu, giommu_next); 907 g_free(giommu); 908 break; 909 } 910 } 911 912 /* 913 * FIXME: We assume the one big unmap below is adequate to 914 * remove any individual page mappings in the IOMMU which 915 * might have been copied into VFIO. This works for a page table 916 * based IOMMU where a big unmap flattens a large range of IO-PTEs. 917 * That may not be true for all IOMMU types. 918 */ 919 } 920 921 iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space); 922 llend = int128_make64(section->offset_within_address_space); 923 llend = int128_add(llend, section->size); 924 llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask)); 925 926 if (int128_ge(int128_make64(iova), llend)) { 927 return; 928 } 929 end = int128_get64(int128_sub(llend, int128_one())); 930 931 llsize = int128_sub(llend, int128_make64(iova)); 932 933 trace_vfio_listener_region_del(iova, end); 934 935 if (memory_region_is_ram_device(section->mr)) { 936 hwaddr pgmask; 937 VFIOHostDMAWindow *hostwin; 938 bool hostwin_found = false; 939 940 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 941 if (hostwin->min_iova <= iova && end <= hostwin->max_iova) { 942 hostwin_found = true; 943 break; 944 } 945 } 946 assert(hostwin_found); /* or region_add() would have failed */ 947 948 pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1; 949 try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask)); 950 } 951 952 if (try_unmap) { 953 if (int128_eq(llsize, int128_2_64())) { 954 /* The unmap ioctl doesn't accept a full 64-bit span. */ 955 llsize = int128_rshift(llsize, 1); 956 ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL); 957 if (ret) { 958 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " 959 "0x%"HWADDR_PRIx") = %d (%m)", 960 container, iova, int128_get64(llsize), ret); 961 } 962 iova += int128_get64(llsize); 963 } 964 ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL); 965 if (ret) { 966 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " 967 "0x%"HWADDR_PRIx") = %d (%m)", 968 container, iova, int128_get64(llsize), ret); 969 } 970 } 971 972 memory_region_unref(section->mr); 973 974 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { 975 vfio_spapr_remove_window(container, 976 section->offset_within_address_space); 977 if (vfio_host_win_del(container, 978 section->offset_within_address_space, 979 section->offset_within_address_space + 980 int128_get64(section->size) - 1) < 0) { 981 hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx, 982 __func__, section->offset_within_address_space); 983 } 984 } 985 } 986 987 static void vfio_set_dirty_page_tracking(VFIOContainer *container, bool start) 988 { 989 int ret; 990 struct vfio_iommu_type1_dirty_bitmap dirty = { 991 .argsz = sizeof(dirty), 992 }; 993 994 if (start) { 995 dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_START; 996 } else { 997 dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP; 998 } 999 1000 ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, &dirty); 1001 if (ret) { 1002 error_report("Failed to set dirty tracking flag 0x%x errno: %d", 1003 dirty.flags, errno); 1004 } 1005 } 1006 1007 static void vfio_listener_log_global_start(MemoryListener *listener) 1008 { 1009 VFIOContainer *container = container_of(listener, VFIOContainer, listener); 1010 1011 vfio_set_dirty_page_tracking(container, true); 1012 } 1013 1014 static void vfio_listener_log_global_stop(MemoryListener *listener) 1015 { 1016 VFIOContainer *container = container_of(listener, VFIOContainer, listener); 1017 1018 vfio_set_dirty_page_tracking(container, false); 1019 } 1020 1021 static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, 1022 uint64_t size, ram_addr_t ram_addr) 1023 { 1024 struct vfio_iommu_type1_dirty_bitmap *dbitmap; 1025 struct vfio_iommu_type1_dirty_bitmap_get *range; 1026 uint64_t pages; 1027 int ret; 1028 1029 dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range)); 1030 1031 dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range); 1032 dbitmap->flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP; 1033 range = (struct vfio_iommu_type1_dirty_bitmap_get *)&dbitmap->data; 1034 range->iova = iova; 1035 range->size = size; 1036 1037 /* 1038 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of 1039 * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize 1040 * to qemu_real_host_page_size. 1041 */ 1042 range->bitmap.pgsize = qemu_real_host_page_size; 1043 1044 pages = REAL_HOST_PAGE_ALIGN(range->size) / qemu_real_host_page_size; 1045 range->bitmap.size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) / 1046 BITS_PER_BYTE; 1047 range->bitmap.data = g_try_malloc0(range->bitmap.size); 1048 if (!range->bitmap.data) { 1049 ret = -ENOMEM; 1050 goto err_out; 1051 } 1052 1053 ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap); 1054 if (ret) { 1055 error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64 1056 " size: 0x%"PRIx64" err: %d", (uint64_t)range->iova, 1057 (uint64_t)range->size, errno); 1058 goto err_out; 1059 } 1060 1061 cpu_physical_memory_set_dirty_lebitmap((unsigned long *)range->bitmap.data, 1062 ram_addr, pages); 1063 1064 trace_vfio_get_dirty_bitmap(container->fd, range->iova, range->size, 1065 range->bitmap.size, ram_addr); 1066 err_out: 1067 g_free(range->bitmap.data); 1068 g_free(dbitmap); 1069 1070 return ret; 1071 } 1072 1073 typedef struct { 1074 IOMMUNotifier n; 1075 VFIOGuestIOMMU *giommu; 1076 } vfio_giommu_dirty_notifier; 1077 1078 static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) 1079 { 1080 vfio_giommu_dirty_notifier *gdn = container_of(n, 1081 vfio_giommu_dirty_notifier, n); 1082 VFIOGuestIOMMU *giommu = gdn->giommu; 1083 VFIOContainer *container = giommu->container; 1084 hwaddr iova = iotlb->iova + giommu->iommu_offset; 1085 ram_addr_t translated_addr; 1086 1087 trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask); 1088 1089 if (iotlb->target_as != &address_space_memory) { 1090 error_report("Wrong target AS \"%s\", only system memory is allowed", 1091 iotlb->target_as->name ? iotlb->target_as->name : "none"); 1092 return; 1093 } 1094 1095 rcu_read_lock(); 1096 if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) { 1097 int ret; 1098 1099 ret = vfio_get_dirty_bitmap(container, iova, iotlb->addr_mask + 1, 1100 translated_addr); 1101 if (ret) { 1102 error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", " 1103 "0x%"HWADDR_PRIx") = %d (%m)", 1104 container, iova, 1105 iotlb->addr_mask + 1, ret); 1106 } 1107 } 1108 rcu_read_unlock(); 1109 } 1110 1111 static int vfio_sync_dirty_bitmap(VFIOContainer *container, 1112 MemoryRegionSection *section) 1113 { 1114 ram_addr_t ram_addr; 1115 1116 if (memory_region_is_iommu(section->mr)) { 1117 VFIOGuestIOMMU *giommu; 1118 1119 QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { 1120 if (MEMORY_REGION(giommu->iommu) == section->mr && 1121 giommu->n.start == section->offset_within_region) { 1122 Int128 llend; 1123 vfio_giommu_dirty_notifier gdn = { .giommu = giommu }; 1124 int idx = memory_region_iommu_attrs_to_index(giommu->iommu, 1125 MEMTXATTRS_UNSPECIFIED); 1126 1127 llend = int128_add(int128_make64(section->offset_within_region), 1128 section->size); 1129 llend = int128_sub(llend, int128_one()); 1130 1131 iommu_notifier_init(&gdn.n, 1132 vfio_iommu_map_dirty_notify, 1133 IOMMU_NOTIFIER_MAP, 1134 section->offset_within_region, 1135 int128_get64(llend), 1136 idx); 1137 memory_region_iommu_replay(giommu->iommu, &gdn.n); 1138 break; 1139 } 1140 } 1141 return 0; 1142 } 1143 1144 ram_addr = memory_region_get_ram_addr(section->mr) + 1145 section->offset_within_region; 1146 1147 return vfio_get_dirty_bitmap(container, 1148 REAL_HOST_PAGE_ALIGN(section->offset_within_address_space), 1149 int128_get64(section->size), ram_addr); 1150 } 1151 1152 static void vfio_listener_log_sync(MemoryListener *listener, 1153 MemoryRegionSection *section) 1154 { 1155 VFIOContainer *container = container_of(listener, VFIOContainer, listener); 1156 1157 if (vfio_listener_skipped_section(section) || 1158 !container->dirty_pages_supported) { 1159 return; 1160 } 1161 1162 if (vfio_devices_all_dirty_tracking(container)) { 1163 vfio_sync_dirty_bitmap(container, section); 1164 } 1165 } 1166 1167 static const MemoryListener vfio_memory_listener = { 1168 .region_add = vfio_listener_region_add, 1169 .region_del = vfio_listener_region_del, 1170 .log_global_start = vfio_listener_log_global_start, 1171 .log_global_stop = vfio_listener_log_global_stop, 1172 .log_sync = vfio_listener_log_sync, 1173 }; 1174 1175 static void vfio_listener_release(VFIOContainer *container) 1176 { 1177 memory_listener_unregister(&container->listener); 1178 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { 1179 memory_listener_unregister(&container->prereg_listener); 1180 } 1181 } 1182 1183 static struct vfio_info_cap_header * 1184 vfio_get_cap(void *ptr, uint32_t cap_offset, uint16_t id) 1185 { 1186 struct vfio_info_cap_header *hdr; 1187 1188 for (hdr = ptr + cap_offset; hdr != ptr; hdr = ptr + hdr->next) { 1189 if (hdr->id == id) { 1190 return hdr; 1191 } 1192 } 1193 1194 return NULL; 1195 } 1196 1197 struct vfio_info_cap_header * 1198 vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id) 1199 { 1200 if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) { 1201 return NULL; 1202 } 1203 1204 return vfio_get_cap((void *)info, info->cap_offset, id); 1205 } 1206 1207 static struct vfio_info_cap_header * 1208 vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info *info, uint16_t id) 1209 { 1210 if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) { 1211 return NULL; 1212 } 1213 1214 return vfio_get_cap((void *)info, info->cap_offset, id); 1215 } 1216 1217 struct vfio_info_cap_header * 1218 vfio_get_device_info_cap(struct vfio_device_info *info, uint16_t id) 1219 { 1220 if (!(info->flags & VFIO_DEVICE_FLAGS_CAPS)) { 1221 return NULL; 1222 } 1223 1224 return vfio_get_cap((void *)info, info->cap_offset, id); 1225 } 1226 1227 bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info, 1228 unsigned int *avail) 1229 { 1230 struct vfio_info_cap_header *hdr; 1231 struct vfio_iommu_type1_info_dma_avail *cap; 1232 1233 /* If the capability cannot be found, assume no DMA limiting */ 1234 hdr = vfio_get_iommu_type1_info_cap(info, 1235 VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL); 1236 if (hdr == NULL) { 1237 return false; 1238 } 1239 1240 if (avail != NULL) { 1241 cap = (void *) hdr; 1242 *avail = cap->avail; 1243 } 1244 1245 return true; 1246 } 1247 1248 static int vfio_setup_region_sparse_mmaps(VFIORegion *region, 1249 struct vfio_region_info *info) 1250 { 1251 struct vfio_info_cap_header *hdr; 1252 struct vfio_region_info_cap_sparse_mmap *sparse; 1253 int i, j; 1254 1255 hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP); 1256 if (!hdr) { 1257 return -ENODEV; 1258 } 1259 1260 sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header); 1261 1262 trace_vfio_region_sparse_mmap_header(region->vbasedev->name, 1263 region->nr, sparse->nr_areas); 1264 1265 region->mmaps = g_new0(VFIOMmap, sparse->nr_areas); 1266 1267 for (i = 0, j = 0; i < sparse->nr_areas; i++) { 1268 trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset, 1269 sparse->areas[i].offset + 1270 sparse->areas[i].size); 1271 1272 if (sparse->areas[i].size) { 1273 region->mmaps[j].offset = sparse->areas[i].offset; 1274 region->mmaps[j].size = sparse->areas[i].size; 1275 j++; 1276 } 1277 } 1278 1279 region->nr_mmaps = j; 1280 region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap)); 1281 1282 return 0; 1283 } 1284 1285 int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region, 1286 int index, const char *name) 1287 { 1288 struct vfio_region_info *info; 1289 int ret; 1290 1291 ret = vfio_get_region_info(vbasedev, index, &info); 1292 if (ret) { 1293 return ret; 1294 } 1295 1296 region->vbasedev = vbasedev; 1297 region->flags = info->flags; 1298 region->size = info->size; 1299 region->fd_offset = info->offset; 1300 region->nr = index; 1301 1302 if (region->size) { 1303 region->mem = g_new0(MemoryRegion, 1); 1304 memory_region_init_io(region->mem, obj, &vfio_region_ops, 1305 region, name, region->size); 1306 1307 if (!vbasedev->no_mmap && 1308 region->flags & VFIO_REGION_INFO_FLAG_MMAP) { 1309 1310 ret = vfio_setup_region_sparse_mmaps(region, info); 1311 1312 if (ret) { 1313 region->nr_mmaps = 1; 1314 region->mmaps = g_new0(VFIOMmap, region->nr_mmaps); 1315 region->mmaps[0].offset = 0; 1316 region->mmaps[0].size = region->size; 1317 } 1318 } 1319 } 1320 1321 g_free(info); 1322 1323 trace_vfio_region_setup(vbasedev->name, index, name, 1324 region->flags, region->fd_offset, region->size); 1325 return 0; 1326 } 1327 1328 static void vfio_subregion_unmap(VFIORegion *region, int index) 1329 { 1330 trace_vfio_region_unmap(memory_region_name(®ion->mmaps[index].mem), 1331 region->mmaps[index].offset, 1332 region->mmaps[index].offset + 1333 region->mmaps[index].size - 1); 1334 memory_region_del_subregion(region->mem, ®ion->mmaps[index].mem); 1335 munmap(region->mmaps[index].mmap, region->mmaps[index].size); 1336 object_unparent(OBJECT(®ion->mmaps[index].mem)); 1337 region->mmaps[index].mmap = NULL; 1338 } 1339 1340 int vfio_region_mmap(VFIORegion *region) 1341 { 1342 int i, prot = 0; 1343 char *name; 1344 1345 if (!region->mem) { 1346 return 0; 1347 } 1348 1349 prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0; 1350 prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0; 1351 1352 for (i = 0; i < region->nr_mmaps; i++) { 1353 region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot, 1354 MAP_SHARED, region->vbasedev->fd, 1355 region->fd_offset + 1356 region->mmaps[i].offset); 1357 if (region->mmaps[i].mmap == MAP_FAILED) { 1358 int ret = -errno; 1359 1360 trace_vfio_region_mmap_fault(memory_region_name(region->mem), i, 1361 region->fd_offset + 1362 region->mmaps[i].offset, 1363 region->fd_offset + 1364 region->mmaps[i].offset + 1365 region->mmaps[i].size - 1, ret); 1366 1367 region->mmaps[i].mmap = NULL; 1368 1369 for (i--; i >= 0; i--) { 1370 vfio_subregion_unmap(region, i); 1371 } 1372 1373 return ret; 1374 } 1375 1376 name = g_strdup_printf("%s mmaps[%d]", 1377 memory_region_name(region->mem), i); 1378 memory_region_init_ram_device_ptr(®ion->mmaps[i].mem, 1379 memory_region_owner(region->mem), 1380 name, region->mmaps[i].size, 1381 region->mmaps[i].mmap); 1382 g_free(name); 1383 memory_region_add_subregion(region->mem, region->mmaps[i].offset, 1384 ®ion->mmaps[i].mem); 1385 1386 trace_vfio_region_mmap(memory_region_name(®ion->mmaps[i].mem), 1387 region->mmaps[i].offset, 1388 region->mmaps[i].offset + 1389 region->mmaps[i].size - 1); 1390 } 1391 1392 return 0; 1393 } 1394 1395 void vfio_region_unmap(VFIORegion *region) 1396 { 1397 int i; 1398 1399 if (!region->mem) { 1400 return; 1401 } 1402 1403 for (i = 0; i < region->nr_mmaps; i++) { 1404 if (region->mmaps[i].mmap) { 1405 vfio_subregion_unmap(region, i); 1406 } 1407 } 1408 } 1409 1410 void vfio_region_exit(VFIORegion *region) 1411 { 1412 int i; 1413 1414 if (!region->mem) { 1415 return; 1416 } 1417 1418 for (i = 0; i < region->nr_mmaps; i++) { 1419 if (region->mmaps[i].mmap) { 1420 memory_region_del_subregion(region->mem, ®ion->mmaps[i].mem); 1421 } 1422 } 1423 1424 trace_vfio_region_exit(region->vbasedev->name, region->nr); 1425 } 1426 1427 void vfio_region_finalize(VFIORegion *region) 1428 { 1429 int i; 1430 1431 if (!region->mem) { 1432 return; 1433 } 1434 1435 for (i = 0; i < region->nr_mmaps; i++) { 1436 if (region->mmaps[i].mmap) { 1437 munmap(region->mmaps[i].mmap, region->mmaps[i].size); 1438 object_unparent(OBJECT(®ion->mmaps[i].mem)); 1439 } 1440 } 1441 1442 object_unparent(OBJECT(region->mem)); 1443 1444 g_free(region->mem); 1445 g_free(region->mmaps); 1446 1447 trace_vfio_region_finalize(region->vbasedev->name, region->nr); 1448 1449 region->mem = NULL; 1450 region->mmaps = NULL; 1451 region->nr_mmaps = 0; 1452 region->size = 0; 1453 region->flags = 0; 1454 region->nr = 0; 1455 } 1456 1457 void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled) 1458 { 1459 int i; 1460 1461 if (!region->mem) { 1462 return; 1463 } 1464 1465 for (i = 0; i < region->nr_mmaps; i++) { 1466 if (region->mmaps[i].mmap) { 1467 memory_region_set_enabled(®ion->mmaps[i].mem, enabled); 1468 } 1469 } 1470 1471 trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem), 1472 enabled); 1473 } 1474 1475 void vfio_reset_handler(void *opaque) 1476 { 1477 VFIOGroup *group; 1478 VFIODevice *vbasedev; 1479 1480 QLIST_FOREACH(group, &vfio_group_list, next) { 1481 QLIST_FOREACH(vbasedev, &group->device_list, next) { 1482 if (vbasedev->dev->realized) { 1483 vbasedev->ops->vfio_compute_needs_reset(vbasedev); 1484 } 1485 } 1486 } 1487 1488 QLIST_FOREACH(group, &vfio_group_list, next) { 1489 QLIST_FOREACH(vbasedev, &group->device_list, next) { 1490 if (vbasedev->dev->realized && vbasedev->needs_reset) { 1491 vbasedev->ops->vfio_hot_reset_multi(vbasedev); 1492 } 1493 } 1494 } 1495 } 1496 1497 static void vfio_kvm_device_add_group(VFIOGroup *group) 1498 { 1499 #ifdef CONFIG_KVM 1500 struct kvm_device_attr attr = { 1501 .group = KVM_DEV_VFIO_GROUP, 1502 .attr = KVM_DEV_VFIO_GROUP_ADD, 1503 .addr = (uint64_t)(unsigned long)&group->fd, 1504 }; 1505 1506 if (!kvm_enabled()) { 1507 return; 1508 } 1509 1510 if (vfio_kvm_device_fd < 0) { 1511 struct kvm_create_device cd = { 1512 .type = KVM_DEV_TYPE_VFIO, 1513 }; 1514 1515 if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) { 1516 error_report("Failed to create KVM VFIO device: %m"); 1517 return; 1518 } 1519 1520 vfio_kvm_device_fd = cd.fd; 1521 } 1522 1523 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { 1524 error_report("Failed to add group %d to KVM VFIO device: %m", 1525 group->groupid); 1526 } 1527 #endif 1528 } 1529 1530 static void vfio_kvm_device_del_group(VFIOGroup *group) 1531 { 1532 #ifdef CONFIG_KVM 1533 struct kvm_device_attr attr = { 1534 .group = KVM_DEV_VFIO_GROUP, 1535 .attr = KVM_DEV_VFIO_GROUP_DEL, 1536 .addr = (uint64_t)(unsigned long)&group->fd, 1537 }; 1538 1539 if (vfio_kvm_device_fd < 0) { 1540 return; 1541 } 1542 1543 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { 1544 error_report("Failed to remove group %d from KVM VFIO device: %m", 1545 group->groupid); 1546 } 1547 #endif 1548 } 1549 1550 static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as) 1551 { 1552 VFIOAddressSpace *space; 1553 1554 QLIST_FOREACH(space, &vfio_address_spaces, list) { 1555 if (space->as == as) { 1556 return space; 1557 } 1558 } 1559 1560 /* No suitable VFIOAddressSpace, create a new one */ 1561 space = g_malloc0(sizeof(*space)); 1562 space->as = as; 1563 QLIST_INIT(&space->containers); 1564 1565 QLIST_INSERT_HEAD(&vfio_address_spaces, space, list); 1566 1567 return space; 1568 } 1569 1570 static void vfio_put_address_space(VFIOAddressSpace *space) 1571 { 1572 if (QLIST_EMPTY(&space->containers)) { 1573 QLIST_REMOVE(space, list); 1574 g_free(space); 1575 } 1576 } 1577 1578 /* 1579 * vfio_get_iommu_type - selects the richest iommu_type (v2 first) 1580 */ 1581 static int vfio_get_iommu_type(VFIOContainer *container, 1582 Error **errp) 1583 { 1584 int iommu_types[] = { VFIO_TYPE1v2_IOMMU, VFIO_TYPE1_IOMMU, 1585 VFIO_SPAPR_TCE_v2_IOMMU, VFIO_SPAPR_TCE_IOMMU }; 1586 int i; 1587 1588 for (i = 0; i < ARRAY_SIZE(iommu_types); i++) { 1589 if (ioctl(container->fd, VFIO_CHECK_EXTENSION, iommu_types[i])) { 1590 return iommu_types[i]; 1591 } 1592 } 1593 error_setg(errp, "No available IOMMU models"); 1594 return -EINVAL; 1595 } 1596 1597 static int vfio_init_container(VFIOContainer *container, int group_fd, 1598 Error **errp) 1599 { 1600 int iommu_type, ret; 1601 1602 iommu_type = vfio_get_iommu_type(container, errp); 1603 if (iommu_type < 0) { 1604 return iommu_type; 1605 } 1606 1607 ret = ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container->fd); 1608 if (ret) { 1609 error_setg_errno(errp, errno, "Failed to set group container"); 1610 return -errno; 1611 } 1612 1613 while (ioctl(container->fd, VFIO_SET_IOMMU, iommu_type)) { 1614 if (iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { 1615 /* 1616 * On sPAPR, despite the IOMMU subdriver always advertises v1 and 1617 * v2, the running platform may not support v2 and there is no 1618 * way to guess it until an IOMMU group gets added to the container. 1619 * So in case it fails with v2, try v1 as a fallback. 1620 */ 1621 iommu_type = VFIO_SPAPR_TCE_IOMMU; 1622 continue; 1623 } 1624 error_setg_errno(errp, errno, "Failed to set iommu for container"); 1625 return -errno; 1626 } 1627 1628 container->iommu_type = iommu_type; 1629 return 0; 1630 } 1631 1632 static int vfio_get_iommu_info(VFIOContainer *container, 1633 struct vfio_iommu_type1_info **info) 1634 { 1635 1636 size_t argsz = sizeof(struct vfio_iommu_type1_info); 1637 1638 *info = g_new0(struct vfio_iommu_type1_info, 1); 1639 again: 1640 (*info)->argsz = argsz; 1641 1642 if (ioctl(container->fd, VFIO_IOMMU_GET_INFO, *info)) { 1643 g_free(*info); 1644 *info = NULL; 1645 return -errno; 1646 } 1647 1648 if (((*info)->argsz > argsz)) { 1649 argsz = (*info)->argsz; 1650 *info = g_realloc(*info, argsz); 1651 goto again; 1652 } 1653 1654 return 0; 1655 } 1656 1657 static struct vfio_info_cap_header * 1658 vfio_get_iommu_info_cap(struct vfio_iommu_type1_info *info, uint16_t id) 1659 { 1660 struct vfio_info_cap_header *hdr; 1661 void *ptr = info; 1662 1663 if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) { 1664 return NULL; 1665 } 1666 1667 for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) { 1668 if (hdr->id == id) { 1669 return hdr; 1670 } 1671 } 1672 1673 return NULL; 1674 } 1675 1676 static void vfio_get_iommu_info_migration(VFIOContainer *container, 1677 struct vfio_iommu_type1_info *info) 1678 { 1679 struct vfio_info_cap_header *hdr; 1680 struct vfio_iommu_type1_info_cap_migration *cap_mig; 1681 1682 hdr = vfio_get_iommu_info_cap(info, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION); 1683 if (!hdr) { 1684 return; 1685 } 1686 1687 cap_mig = container_of(hdr, struct vfio_iommu_type1_info_cap_migration, 1688 header); 1689 1690 /* 1691 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of 1692 * qemu_real_host_page_size to mark those dirty. 1693 */ 1694 if (cap_mig->pgsize_bitmap & qemu_real_host_page_size) { 1695 container->dirty_pages_supported = true; 1696 container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size; 1697 container->dirty_pgsizes = cap_mig->pgsize_bitmap; 1698 } 1699 } 1700 1701 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, 1702 Error **errp) 1703 { 1704 VFIOContainer *container; 1705 int ret, fd; 1706 VFIOAddressSpace *space; 1707 1708 space = vfio_get_address_space(as); 1709 1710 /* 1711 * VFIO is currently incompatible with discarding of RAM insofar as the 1712 * madvise to purge (zap) the page from QEMU's address space does not 1713 * interact with the memory API and therefore leaves stale virtual to 1714 * physical mappings in the IOMMU if the page was previously pinned. We 1715 * therefore set discarding broken for each group added to a container, 1716 * whether the container is used individually or shared. This provides 1717 * us with options to allow devices within a group to opt-in and allow 1718 * discarding, so long as it is done consistently for a group (for instance 1719 * if the device is an mdev device where it is known that the host vendor 1720 * driver will never pin pages outside of the working set of the guest 1721 * driver, which would thus not be discarding candidates). 1722 * 1723 * The first opportunity to induce pinning occurs here where we attempt to 1724 * attach the group to existing containers within the AddressSpace. If any 1725 * pages are already zapped from the virtual address space, such as from 1726 * previous discards, new pinning will cause valid mappings to be 1727 * re-established. Likewise, when the overall MemoryListener for a new 1728 * container is registered, a replay of mappings within the AddressSpace 1729 * will occur, re-establishing any previously zapped pages as well. 1730 * 1731 * Especially virtio-balloon is currently only prevented from discarding 1732 * new memory, it will not yet set ram_block_discard_set_required() and 1733 * therefore, neither stops us here or deals with the sudden memory 1734 * consumption of inflated memory. 1735 */ 1736 ret = ram_block_discard_disable(true); 1737 if (ret) { 1738 error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken"); 1739 return ret; 1740 } 1741 1742 QLIST_FOREACH(container, &space->containers, next) { 1743 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { 1744 group->container = container; 1745 QLIST_INSERT_HEAD(&container->group_list, group, container_next); 1746 vfio_kvm_device_add_group(group); 1747 return 0; 1748 } 1749 } 1750 1751 fd = qemu_open_old("/dev/vfio/vfio", O_RDWR); 1752 if (fd < 0) { 1753 error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio"); 1754 ret = -errno; 1755 goto put_space_exit; 1756 } 1757 1758 ret = ioctl(fd, VFIO_GET_API_VERSION); 1759 if (ret != VFIO_API_VERSION) { 1760 error_setg(errp, "supported vfio version: %d, " 1761 "reported version: %d", VFIO_API_VERSION, ret); 1762 ret = -EINVAL; 1763 goto close_fd_exit; 1764 } 1765 1766 container = g_malloc0(sizeof(*container)); 1767 container->space = space; 1768 container->fd = fd; 1769 container->error = NULL; 1770 container->dirty_pages_supported = false; 1771 QLIST_INIT(&container->giommu_list); 1772 QLIST_INIT(&container->hostwin_list); 1773 1774 ret = vfio_init_container(container, group->fd, errp); 1775 if (ret) { 1776 goto free_container_exit; 1777 } 1778 1779 switch (container->iommu_type) { 1780 case VFIO_TYPE1v2_IOMMU: 1781 case VFIO_TYPE1_IOMMU: 1782 { 1783 struct vfio_iommu_type1_info *info; 1784 1785 /* 1786 * FIXME: This assumes that a Type1 IOMMU can map any 64-bit 1787 * IOVA whatsoever. That's not actually true, but the current 1788 * kernel interface doesn't tell us what it can map, and the 1789 * existing Type1 IOMMUs generally support any IOVA we're 1790 * going to actually try in practice. 1791 */ 1792 ret = vfio_get_iommu_info(container, &info); 1793 1794 if (ret || !(info->flags & VFIO_IOMMU_INFO_PGSIZES)) { 1795 /* Assume 4k IOVA page size */ 1796 info->iova_pgsizes = 4096; 1797 } 1798 vfio_host_win_add(container, 0, (hwaddr)-1, info->iova_pgsizes); 1799 container->pgsizes = info->iova_pgsizes; 1800 1801 if (!ret) { 1802 vfio_get_iommu_info_migration(container, info); 1803 } 1804 g_free(info); 1805 break; 1806 } 1807 case VFIO_SPAPR_TCE_v2_IOMMU: 1808 case VFIO_SPAPR_TCE_IOMMU: 1809 { 1810 struct vfio_iommu_spapr_tce_info info; 1811 bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU; 1812 1813 /* 1814 * The host kernel code implementing VFIO_IOMMU_DISABLE is called 1815 * when container fd is closed so we do not call it explicitly 1816 * in this file. 1817 */ 1818 if (!v2) { 1819 ret = ioctl(fd, VFIO_IOMMU_ENABLE); 1820 if (ret) { 1821 error_setg_errno(errp, errno, "failed to enable container"); 1822 ret = -errno; 1823 goto free_container_exit; 1824 } 1825 } else { 1826 container->prereg_listener = vfio_prereg_listener; 1827 1828 memory_listener_register(&container->prereg_listener, 1829 &address_space_memory); 1830 if (container->error) { 1831 memory_listener_unregister(&container->prereg_listener); 1832 ret = -1; 1833 error_propagate_prepend(errp, container->error, 1834 "RAM memory listener initialization failed: "); 1835 goto free_container_exit; 1836 } 1837 } 1838 1839 info.argsz = sizeof(info); 1840 ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info); 1841 if (ret) { 1842 error_setg_errno(errp, errno, 1843 "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed"); 1844 ret = -errno; 1845 if (v2) { 1846 memory_listener_unregister(&container->prereg_listener); 1847 } 1848 goto free_container_exit; 1849 } 1850 1851 if (v2) { 1852 container->pgsizes = info.ddw.pgsizes; 1853 /* 1854 * There is a default window in just created container. 1855 * To make region_add/del simpler, we better remove this 1856 * window now and let those iommu_listener callbacks 1857 * create/remove them when needed. 1858 */ 1859 ret = vfio_spapr_remove_window(container, info.dma32_window_start); 1860 if (ret) { 1861 error_setg_errno(errp, -ret, 1862 "failed to remove existing window"); 1863 goto free_container_exit; 1864 } 1865 } else { 1866 /* The default table uses 4K pages */ 1867 container->pgsizes = 0x1000; 1868 vfio_host_win_add(container, info.dma32_window_start, 1869 info.dma32_window_start + 1870 info.dma32_window_size - 1, 1871 0x1000); 1872 } 1873 } 1874 } 1875 1876 vfio_kvm_device_add_group(group); 1877 1878 QLIST_INIT(&container->group_list); 1879 QLIST_INSERT_HEAD(&space->containers, container, next); 1880 1881 group->container = container; 1882 QLIST_INSERT_HEAD(&container->group_list, group, container_next); 1883 1884 container->listener = vfio_memory_listener; 1885 1886 memory_listener_register(&container->listener, container->space->as); 1887 1888 if (container->error) { 1889 ret = -1; 1890 error_propagate_prepend(errp, container->error, 1891 "memory listener initialization failed: "); 1892 goto listener_release_exit; 1893 } 1894 1895 container->initialized = true; 1896 1897 return 0; 1898 listener_release_exit: 1899 QLIST_REMOVE(group, container_next); 1900 QLIST_REMOVE(container, next); 1901 vfio_kvm_device_del_group(group); 1902 vfio_listener_release(container); 1903 1904 free_container_exit: 1905 g_free(container); 1906 1907 close_fd_exit: 1908 close(fd); 1909 1910 put_space_exit: 1911 ram_block_discard_disable(false); 1912 vfio_put_address_space(space); 1913 1914 return ret; 1915 } 1916 1917 static void vfio_disconnect_container(VFIOGroup *group) 1918 { 1919 VFIOContainer *container = group->container; 1920 1921 QLIST_REMOVE(group, container_next); 1922 group->container = NULL; 1923 1924 /* 1925 * Explicitly release the listener first before unset container, 1926 * since unset may destroy the backend container if it's the last 1927 * group. 1928 */ 1929 if (QLIST_EMPTY(&container->group_list)) { 1930 vfio_listener_release(container); 1931 } 1932 1933 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { 1934 error_report("vfio: error disconnecting group %d from container", 1935 group->groupid); 1936 } 1937 1938 if (QLIST_EMPTY(&container->group_list)) { 1939 VFIOAddressSpace *space = container->space; 1940 VFIOGuestIOMMU *giommu, *tmp; 1941 1942 QLIST_REMOVE(container, next); 1943 1944 QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) { 1945 memory_region_unregister_iommu_notifier( 1946 MEMORY_REGION(giommu->iommu), &giommu->n); 1947 QLIST_REMOVE(giommu, giommu_next); 1948 g_free(giommu); 1949 } 1950 1951 trace_vfio_disconnect_container(container->fd); 1952 close(container->fd); 1953 g_free(container); 1954 1955 vfio_put_address_space(space); 1956 } 1957 } 1958 1959 VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp) 1960 { 1961 VFIOGroup *group; 1962 char path[32]; 1963 struct vfio_group_status status = { .argsz = sizeof(status) }; 1964 1965 QLIST_FOREACH(group, &vfio_group_list, next) { 1966 if (group->groupid == groupid) { 1967 /* Found it. Now is it already in the right context? */ 1968 if (group->container->space->as == as) { 1969 return group; 1970 } else { 1971 error_setg(errp, "group %d used in multiple address spaces", 1972 group->groupid); 1973 return NULL; 1974 } 1975 } 1976 } 1977 1978 group = g_malloc0(sizeof(*group)); 1979 1980 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid); 1981 group->fd = qemu_open_old(path, O_RDWR); 1982 if (group->fd < 0) { 1983 error_setg_errno(errp, errno, "failed to open %s", path); 1984 goto free_group_exit; 1985 } 1986 1987 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) { 1988 error_setg_errno(errp, errno, "failed to get group %d status", groupid); 1989 goto close_fd_exit; 1990 } 1991 1992 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) { 1993 error_setg(errp, "group %d is not viable", groupid); 1994 error_append_hint(errp, 1995 "Please ensure all devices within the iommu_group " 1996 "are bound to their vfio bus driver.\n"); 1997 goto close_fd_exit; 1998 } 1999 2000 group->groupid = groupid; 2001 QLIST_INIT(&group->device_list); 2002 2003 if (vfio_connect_container(group, as, errp)) { 2004 error_prepend(errp, "failed to setup container for group %d: ", 2005 groupid); 2006 goto close_fd_exit; 2007 } 2008 2009 if (QLIST_EMPTY(&vfio_group_list)) { 2010 qemu_register_reset(vfio_reset_handler, NULL); 2011 } 2012 2013 QLIST_INSERT_HEAD(&vfio_group_list, group, next); 2014 2015 return group; 2016 2017 close_fd_exit: 2018 close(group->fd); 2019 2020 free_group_exit: 2021 g_free(group); 2022 2023 return NULL; 2024 } 2025 2026 void vfio_put_group(VFIOGroup *group) 2027 { 2028 if (!group || !QLIST_EMPTY(&group->device_list)) { 2029 return; 2030 } 2031 2032 if (!group->ram_block_discard_allowed) { 2033 ram_block_discard_disable(false); 2034 } 2035 vfio_kvm_device_del_group(group); 2036 vfio_disconnect_container(group); 2037 QLIST_REMOVE(group, next); 2038 trace_vfio_put_group(group->fd); 2039 close(group->fd); 2040 g_free(group); 2041 2042 if (QLIST_EMPTY(&vfio_group_list)) { 2043 qemu_unregister_reset(vfio_reset_handler, NULL); 2044 } 2045 } 2046 2047 int vfio_get_device(VFIOGroup *group, const char *name, 2048 VFIODevice *vbasedev, Error **errp) 2049 { 2050 struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) }; 2051 int ret, fd; 2052 2053 fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name); 2054 if (fd < 0) { 2055 error_setg_errno(errp, errno, "error getting device from group %d", 2056 group->groupid); 2057 error_append_hint(errp, 2058 "Verify all devices in group %d are bound to vfio-<bus> " 2059 "or pci-stub and not already in use\n", group->groupid); 2060 return fd; 2061 } 2062 2063 ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info); 2064 if (ret) { 2065 error_setg_errno(errp, errno, "error getting device info"); 2066 close(fd); 2067 return ret; 2068 } 2069 2070 /* 2071 * Set discarding of RAM as not broken for this group if the driver knows 2072 * the device operates compatibly with discarding. Setting must be 2073 * consistent per group, but since compatibility is really only possible 2074 * with mdev currently, we expect singleton groups. 2075 */ 2076 if (vbasedev->ram_block_discard_allowed != 2077 group->ram_block_discard_allowed) { 2078 if (!QLIST_EMPTY(&group->device_list)) { 2079 error_setg(errp, "Inconsistent setting of support for discarding " 2080 "RAM (e.g., balloon) within group"); 2081 close(fd); 2082 return -1; 2083 } 2084 2085 if (!group->ram_block_discard_allowed) { 2086 group->ram_block_discard_allowed = true; 2087 ram_block_discard_disable(false); 2088 } 2089 } 2090 2091 vbasedev->fd = fd; 2092 vbasedev->group = group; 2093 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next); 2094 2095 vbasedev->num_irqs = dev_info.num_irqs; 2096 vbasedev->num_regions = dev_info.num_regions; 2097 vbasedev->flags = dev_info.flags; 2098 2099 trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions, 2100 dev_info.num_irqs); 2101 2102 vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET); 2103 return 0; 2104 } 2105 2106 void vfio_put_base_device(VFIODevice *vbasedev) 2107 { 2108 if (!vbasedev->group) { 2109 return; 2110 } 2111 QLIST_REMOVE(vbasedev, next); 2112 vbasedev->group = NULL; 2113 trace_vfio_put_base_device(vbasedev->fd); 2114 close(vbasedev->fd); 2115 } 2116 2117 int vfio_get_region_info(VFIODevice *vbasedev, int index, 2118 struct vfio_region_info **info) 2119 { 2120 size_t argsz = sizeof(struct vfio_region_info); 2121 2122 *info = g_malloc0(argsz); 2123 2124 (*info)->index = index; 2125 retry: 2126 (*info)->argsz = argsz; 2127 2128 if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) { 2129 g_free(*info); 2130 *info = NULL; 2131 return -errno; 2132 } 2133 2134 if ((*info)->argsz > argsz) { 2135 argsz = (*info)->argsz; 2136 *info = g_realloc(*info, argsz); 2137 2138 goto retry; 2139 } 2140 2141 return 0; 2142 } 2143 2144 int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type, 2145 uint32_t subtype, struct vfio_region_info **info) 2146 { 2147 int i; 2148 2149 for (i = 0; i < vbasedev->num_regions; i++) { 2150 struct vfio_info_cap_header *hdr; 2151 struct vfio_region_info_cap_type *cap_type; 2152 2153 if (vfio_get_region_info(vbasedev, i, info)) { 2154 continue; 2155 } 2156 2157 hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE); 2158 if (!hdr) { 2159 g_free(*info); 2160 continue; 2161 } 2162 2163 cap_type = container_of(hdr, struct vfio_region_info_cap_type, header); 2164 2165 trace_vfio_get_dev_region(vbasedev->name, i, 2166 cap_type->type, cap_type->subtype); 2167 2168 if (cap_type->type == type && cap_type->subtype == subtype) { 2169 return 0; 2170 } 2171 2172 g_free(*info); 2173 } 2174 2175 *info = NULL; 2176 return -ENODEV; 2177 } 2178 2179 bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type) 2180 { 2181 struct vfio_region_info *info = NULL; 2182 bool ret = false; 2183 2184 if (!vfio_get_region_info(vbasedev, region, &info)) { 2185 if (vfio_get_region_info_cap(info, cap_type)) { 2186 ret = true; 2187 } 2188 g_free(info); 2189 } 2190 2191 return ret; 2192 } 2193 2194 /* 2195 * Interfaces for IBM EEH (Enhanced Error Handling) 2196 */ 2197 static bool vfio_eeh_container_ok(VFIOContainer *container) 2198 { 2199 /* 2200 * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO 2201 * implementation is broken if there are multiple groups in a 2202 * container. The hardware works in units of Partitionable 2203 * Endpoints (== IOMMU groups) and the EEH operations naively 2204 * iterate across all groups in the container, without any logic 2205 * to make sure the groups have their state synchronized. For 2206 * certain operations (ENABLE) that might be ok, until an error 2207 * occurs, but for others (GET_STATE) it's clearly broken. 2208 */ 2209 2210 /* 2211 * XXX Once fixed kernels exist, test for them here 2212 */ 2213 2214 if (QLIST_EMPTY(&container->group_list)) { 2215 return false; 2216 } 2217 2218 if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) { 2219 return false; 2220 } 2221 2222 return true; 2223 } 2224 2225 static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op) 2226 { 2227 struct vfio_eeh_pe_op pe_op = { 2228 .argsz = sizeof(pe_op), 2229 .op = op, 2230 }; 2231 int ret; 2232 2233 if (!vfio_eeh_container_ok(container)) { 2234 error_report("vfio/eeh: EEH_PE_OP 0x%x: " 2235 "kernel requires a container with exactly one group", op); 2236 return -EPERM; 2237 } 2238 2239 ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op); 2240 if (ret < 0) { 2241 error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op); 2242 return -errno; 2243 } 2244 2245 return ret; 2246 } 2247 2248 static VFIOContainer *vfio_eeh_as_container(AddressSpace *as) 2249 { 2250 VFIOAddressSpace *space = vfio_get_address_space(as); 2251 VFIOContainer *container = NULL; 2252 2253 if (QLIST_EMPTY(&space->containers)) { 2254 /* No containers to act on */ 2255 goto out; 2256 } 2257 2258 container = QLIST_FIRST(&space->containers); 2259 2260 if (QLIST_NEXT(container, next)) { 2261 /* We don't yet have logic to synchronize EEH state across 2262 * multiple containers */ 2263 container = NULL; 2264 goto out; 2265 } 2266 2267 out: 2268 vfio_put_address_space(space); 2269 return container; 2270 } 2271 2272 bool vfio_eeh_as_ok(AddressSpace *as) 2273 { 2274 VFIOContainer *container = vfio_eeh_as_container(as); 2275 2276 return (container != NULL) && vfio_eeh_container_ok(container); 2277 } 2278 2279 int vfio_eeh_as_op(AddressSpace *as, uint32_t op) 2280 { 2281 VFIOContainer *container = vfio_eeh_as_container(as); 2282 2283 if (!container) { 2284 return -ENODEV; 2285 } 2286 return vfio_eeh_container_op(container, op); 2287 } 2288