1 /* 2 * generic functions used by VFIO devices 3 * 4 * Copyright Red Hat, Inc. 2012 5 * 6 * Authors: 7 * Alex Williamson <alex.williamson@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Based on qemu-kvm device-assignment: 13 * Adapted for KVM by Qumranet. 14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) 15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) 16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) 17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) 18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) 19 */ 20 21 #include "qemu/osdep.h" 22 #include <sys/ioctl.h> 23 #ifdef CONFIG_KVM 24 #include <linux/kvm.h> 25 #endif 26 #include <linux/vfio.h> 27 28 #include "hw/vfio/vfio-common.h" 29 #include "hw/vfio/vfio.h" 30 #include "exec/address-spaces.h" 31 #include "exec/memory.h" 32 #include "exec/ram_addr.h" 33 #include "hw/hw.h" 34 #include "qemu/error-report.h" 35 #include "qemu/main-loop.h" 36 #include "qemu/range.h" 37 #include "sysemu/kvm.h" 38 #include "sysemu/reset.h" 39 #include "sysemu/runstate.h" 40 #include "trace.h" 41 #include "qapi/error.h" 42 #include "migration/migration.h" 43 #include "sysemu/tpm.h" 44 45 VFIOGroupList vfio_group_list = 46 QLIST_HEAD_INITIALIZER(vfio_group_list); 47 static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces = 48 QLIST_HEAD_INITIALIZER(vfio_address_spaces); 49 50 #ifdef CONFIG_KVM 51 /* 52 * We have a single VFIO pseudo device per KVM VM. Once created it lives 53 * for the life of the VM. Closing the file descriptor only drops our 54 * reference to it and the device's reference to kvm. Therefore once 55 * initialized, this file descriptor is only released on QEMU exit and 56 * we'll re-use it should another vfio device be attached before then. 57 */ 58 static int vfio_kvm_device_fd = -1; 59 #endif 60 61 /* 62 * Common VFIO interrupt disable 63 */ 64 void vfio_disable_irqindex(VFIODevice *vbasedev, int index) 65 { 66 struct vfio_irq_set irq_set = { 67 .argsz = sizeof(irq_set), 68 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER, 69 .index = index, 70 .start = 0, 71 .count = 0, 72 }; 73 74 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); 75 } 76 77 void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index) 78 { 79 struct vfio_irq_set irq_set = { 80 .argsz = sizeof(irq_set), 81 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK, 82 .index = index, 83 .start = 0, 84 .count = 1, 85 }; 86 87 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); 88 } 89 90 void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index) 91 { 92 struct vfio_irq_set irq_set = { 93 .argsz = sizeof(irq_set), 94 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK, 95 .index = index, 96 .start = 0, 97 .count = 1, 98 }; 99 100 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); 101 } 102 103 static inline const char *action_to_str(int action) 104 { 105 switch (action) { 106 case VFIO_IRQ_SET_ACTION_MASK: 107 return "MASK"; 108 case VFIO_IRQ_SET_ACTION_UNMASK: 109 return "UNMASK"; 110 case VFIO_IRQ_SET_ACTION_TRIGGER: 111 return "TRIGGER"; 112 default: 113 return "UNKNOWN ACTION"; 114 } 115 } 116 117 static const char *index_to_str(VFIODevice *vbasedev, int index) 118 { 119 if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) { 120 return NULL; 121 } 122 123 switch (index) { 124 case VFIO_PCI_INTX_IRQ_INDEX: 125 return "INTX"; 126 case VFIO_PCI_MSI_IRQ_INDEX: 127 return "MSI"; 128 case VFIO_PCI_MSIX_IRQ_INDEX: 129 return "MSIX"; 130 case VFIO_PCI_ERR_IRQ_INDEX: 131 return "ERR"; 132 case VFIO_PCI_REQ_IRQ_INDEX: 133 return "REQ"; 134 default: 135 return NULL; 136 } 137 } 138 139 static int vfio_ram_block_discard_disable(VFIOContainer *container, bool state) 140 { 141 switch (container->iommu_type) { 142 case VFIO_TYPE1v2_IOMMU: 143 case VFIO_TYPE1_IOMMU: 144 /* 145 * We support coordinated discarding of RAM via the RamDiscardManager. 146 */ 147 return ram_block_uncoordinated_discard_disable(state); 148 default: 149 /* 150 * VFIO_SPAPR_TCE_IOMMU most probably works just fine with 151 * RamDiscardManager, however, it is completely untested. 152 * 153 * VFIO_SPAPR_TCE_v2_IOMMU with "DMA memory preregistering" does 154 * completely the opposite of managing mapping/pinning dynamically as 155 * required by RamDiscardManager. We would have to special-case sections 156 * with a RamDiscardManager. 157 */ 158 return ram_block_discard_disable(state); 159 } 160 } 161 162 int vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex, 163 int action, int fd, Error **errp) 164 { 165 struct vfio_irq_set *irq_set; 166 int argsz, ret = 0; 167 const char *name; 168 int32_t *pfd; 169 170 argsz = sizeof(*irq_set) + sizeof(*pfd); 171 172 irq_set = g_malloc0(argsz); 173 irq_set->argsz = argsz; 174 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action; 175 irq_set->index = index; 176 irq_set->start = subindex; 177 irq_set->count = 1; 178 pfd = (int32_t *)&irq_set->data; 179 *pfd = fd; 180 181 if (ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set)) { 182 ret = -errno; 183 } 184 g_free(irq_set); 185 186 if (!ret) { 187 return 0; 188 } 189 190 error_setg_errno(errp, -ret, "VFIO_DEVICE_SET_IRQS failure"); 191 192 name = index_to_str(vbasedev, index); 193 if (name) { 194 error_prepend(errp, "%s-%d: ", name, subindex); 195 } else { 196 error_prepend(errp, "index %d-%d: ", index, subindex); 197 } 198 error_prepend(errp, 199 "Failed to %s %s eventfd signaling for interrupt ", 200 fd < 0 ? "tear down" : "set up", action_to_str(action)); 201 return ret; 202 } 203 204 /* 205 * IO Port/MMIO - Beware of the endians, VFIO is always little endian 206 */ 207 void vfio_region_write(void *opaque, hwaddr addr, 208 uint64_t data, unsigned size) 209 { 210 VFIORegion *region = opaque; 211 VFIODevice *vbasedev = region->vbasedev; 212 union { 213 uint8_t byte; 214 uint16_t word; 215 uint32_t dword; 216 uint64_t qword; 217 } buf; 218 219 switch (size) { 220 case 1: 221 buf.byte = data; 222 break; 223 case 2: 224 buf.word = cpu_to_le16(data); 225 break; 226 case 4: 227 buf.dword = cpu_to_le32(data); 228 break; 229 case 8: 230 buf.qword = cpu_to_le64(data); 231 break; 232 default: 233 hw_error("vfio: unsupported write size, %u bytes", size); 234 break; 235 } 236 237 if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { 238 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64 239 ",%d) failed: %m", 240 __func__, vbasedev->name, region->nr, 241 addr, data, size); 242 } 243 244 trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size); 245 246 /* 247 * A read or write to a BAR always signals an INTx EOI. This will 248 * do nothing if not pending (including not in INTx mode). We assume 249 * that a BAR access is in response to an interrupt and that BAR 250 * accesses will service the interrupt. Unfortunately, we don't know 251 * which access will service the interrupt, so we're potentially 252 * getting quite a few host interrupts per guest interrupt. 253 */ 254 vbasedev->ops->vfio_eoi(vbasedev); 255 } 256 257 uint64_t vfio_region_read(void *opaque, 258 hwaddr addr, unsigned size) 259 { 260 VFIORegion *region = opaque; 261 VFIODevice *vbasedev = region->vbasedev; 262 union { 263 uint8_t byte; 264 uint16_t word; 265 uint32_t dword; 266 uint64_t qword; 267 } buf; 268 uint64_t data = 0; 269 270 if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { 271 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m", 272 __func__, vbasedev->name, region->nr, 273 addr, size); 274 return (uint64_t)-1; 275 } 276 switch (size) { 277 case 1: 278 data = buf.byte; 279 break; 280 case 2: 281 data = le16_to_cpu(buf.word); 282 break; 283 case 4: 284 data = le32_to_cpu(buf.dword); 285 break; 286 case 8: 287 data = le64_to_cpu(buf.qword); 288 break; 289 default: 290 hw_error("vfio: unsupported read size, %u bytes", size); 291 break; 292 } 293 294 trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data); 295 296 /* Same as write above */ 297 vbasedev->ops->vfio_eoi(vbasedev); 298 299 return data; 300 } 301 302 const MemoryRegionOps vfio_region_ops = { 303 .read = vfio_region_read, 304 .write = vfio_region_write, 305 .endianness = DEVICE_LITTLE_ENDIAN, 306 .valid = { 307 .min_access_size = 1, 308 .max_access_size = 8, 309 }, 310 .impl = { 311 .min_access_size = 1, 312 .max_access_size = 8, 313 }, 314 }; 315 316 /* 317 * Device state interfaces 318 */ 319 320 bool vfio_mig_active(void) 321 { 322 VFIOGroup *group; 323 VFIODevice *vbasedev; 324 325 if (QLIST_EMPTY(&vfio_group_list)) { 326 return false; 327 } 328 329 QLIST_FOREACH(group, &vfio_group_list, next) { 330 QLIST_FOREACH(vbasedev, &group->device_list, next) { 331 if (vbasedev->migration_blocker) { 332 return false; 333 } 334 } 335 } 336 return true; 337 } 338 339 static bool vfio_devices_all_dirty_tracking(VFIOContainer *container) 340 { 341 VFIOGroup *group; 342 VFIODevice *vbasedev; 343 MigrationState *ms = migrate_get_current(); 344 345 if (!migration_is_setup_or_active(ms->state)) { 346 return false; 347 } 348 349 QLIST_FOREACH(group, &container->group_list, container_next) { 350 QLIST_FOREACH(vbasedev, &group->device_list, next) { 351 VFIOMigration *migration = vbasedev->migration; 352 353 if (!migration) { 354 return false; 355 } 356 357 if ((vbasedev->pre_copy_dirty_page_tracking == ON_OFF_AUTO_OFF) 358 && (migration->device_state & VFIO_DEVICE_STATE_V1_RUNNING)) { 359 return false; 360 } 361 } 362 } 363 return true; 364 } 365 366 static bool vfio_devices_all_running_and_saving(VFIOContainer *container) 367 { 368 VFIOGroup *group; 369 VFIODevice *vbasedev; 370 MigrationState *ms = migrate_get_current(); 371 372 if (!migration_is_setup_or_active(ms->state)) { 373 return false; 374 } 375 376 QLIST_FOREACH(group, &container->group_list, container_next) { 377 QLIST_FOREACH(vbasedev, &group->device_list, next) { 378 VFIOMigration *migration = vbasedev->migration; 379 380 if (!migration) { 381 return false; 382 } 383 384 if ((migration->device_state & VFIO_DEVICE_STATE_V1_SAVING) && 385 (migration->device_state & VFIO_DEVICE_STATE_V1_RUNNING)) { 386 continue; 387 } else { 388 return false; 389 } 390 } 391 } 392 return true; 393 } 394 395 static int vfio_dma_unmap_bitmap(VFIOContainer *container, 396 hwaddr iova, ram_addr_t size, 397 IOMMUTLBEntry *iotlb) 398 { 399 struct vfio_iommu_type1_dma_unmap *unmap; 400 struct vfio_bitmap *bitmap; 401 uint64_t pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size(); 402 int ret; 403 404 unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap)); 405 406 unmap->argsz = sizeof(*unmap) + sizeof(*bitmap); 407 unmap->iova = iova; 408 unmap->size = size; 409 unmap->flags |= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP; 410 bitmap = (struct vfio_bitmap *)&unmap->data; 411 412 /* 413 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of 414 * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize 415 * to qemu_real_host_page_size. 416 */ 417 418 bitmap->pgsize = qemu_real_host_page_size(); 419 bitmap->size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) / 420 BITS_PER_BYTE; 421 422 if (bitmap->size > container->max_dirty_bitmap_size) { 423 error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, 424 (uint64_t)bitmap->size); 425 ret = -E2BIG; 426 goto unmap_exit; 427 } 428 429 bitmap->data = g_try_malloc0(bitmap->size); 430 if (!bitmap->data) { 431 ret = -ENOMEM; 432 goto unmap_exit; 433 } 434 435 ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap); 436 if (!ret) { 437 cpu_physical_memory_set_dirty_lebitmap((unsigned long *)bitmap->data, 438 iotlb->translated_addr, pages); 439 } else { 440 error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m"); 441 } 442 443 g_free(bitmap->data); 444 unmap_exit: 445 g_free(unmap); 446 return ret; 447 } 448 449 /* 450 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 451 */ 452 static int vfio_dma_unmap(VFIOContainer *container, 453 hwaddr iova, ram_addr_t size, 454 IOMMUTLBEntry *iotlb) 455 { 456 struct vfio_iommu_type1_dma_unmap unmap = { 457 .argsz = sizeof(unmap), 458 .flags = 0, 459 .iova = iova, 460 .size = size, 461 }; 462 463 if (iotlb && container->dirty_pages_supported && 464 vfio_devices_all_running_and_saving(container)) { 465 return vfio_dma_unmap_bitmap(container, iova, size, iotlb); 466 } 467 468 while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) { 469 /* 470 * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c 471 * v4.15) where an overflow in its wrap-around check prevents us from 472 * unmapping the last page of the address space. Test for the error 473 * condition and re-try the unmap excluding the last page. The 474 * expectation is that we've never mapped the last page anyway and this 475 * unmap request comes via vIOMMU support which also makes it unlikely 476 * that this page is used. This bug was introduced well after type1 v2 477 * support was introduced, so we shouldn't need to test for v1. A fix 478 * is queued for kernel v5.0 so this workaround can be removed once 479 * affected kernels are sufficiently deprecated. 480 */ 481 if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) && 482 container->iommu_type == VFIO_TYPE1v2_IOMMU) { 483 trace_vfio_dma_unmap_overflow_workaround(); 484 unmap.size -= 1ULL << ctz64(container->pgsizes); 485 continue; 486 } 487 error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno)); 488 return -errno; 489 } 490 491 return 0; 492 } 493 494 static int vfio_dma_map(VFIOContainer *container, hwaddr iova, 495 ram_addr_t size, void *vaddr, bool readonly) 496 { 497 struct vfio_iommu_type1_dma_map map = { 498 .argsz = sizeof(map), 499 .flags = VFIO_DMA_MAP_FLAG_READ, 500 .vaddr = (__u64)(uintptr_t)vaddr, 501 .iova = iova, 502 .size = size, 503 }; 504 505 if (!readonly) { 506 map.flags |= VFIO_DMA_MAP_FLAG_WRITE; 507 } 508 509 /* 510 * Try the mapping, if it fails with EBUSY, unmap the region and try 511 * again. This shouldn't be necessary, but we sometimes see it in 512 * the VGA ROM space. 513 */ 514 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || 515 (errno == EBUSY && vfio_dma_unmap(container, iova, size, NULL) == 0 && 516 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) { 517 return 0; 518 } 519 520 error_report("VFIO_MAP_DMA failed: %s", strerror(errno)); 521 return -errno; 522 } 523 524 static void vfio_host_win_add(VFIOContainer *container, 525 hwaddr min_iova, hwaddr max_iova, 526 uint64_t iova_pgsizes) 527 { 528 VFIOHostDMAWindow *hostwin; 529 530 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 531 if (ranges_overlap(hostwin->min_iova, 532 hostwin->max_iova - hostwin->min_iova + 1, 533 min_iova, 534 max_iova - min_iova + 1)) { 535 hw_error("%s: Overlapped IOMMU are not enabled", __func__); 536 } 537 } 538 539 hostwin = g_malloc0(sizeof(*hostwin)); 540 541 hostwin->min_iova = min_iova; 542 hostwin->max_iova = max_iova; 543 hostwin->iova_pgsizes = iova_pgsizes; 544 QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next); 545 } 546 547 static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova, 548 hwaddr max_iova) 549 { 550 VFIOHostDMAWindow *hostwin; 551 552 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 553 if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) { 554 QLIST_REMOVE(hostwin, hostwin_next); 555 g_free(hostwin); 556 return 0; 557 } 558 } 559 560 return -1; 561 } 562 563 static bool vfio_listener_skipped_section(MemoryRegionSection *section) 564 { 565 return (!memory_region_is_ram(section->mr) && 566 !memory_region_is_iommu(section->mr)) || 567 memory_region_is_protected(section->mr) || 568 /* 569 * Sizing an enabled 64-bit BAR can cause spurious mappings to 570 * addresses in the upper part of the 64-bit address space. These 571 * are never accessed by the CPU and beyond the address width of 572 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width. 573 */ 574 section->offset_within_address_space & (1ULL << 63); 575 } 576 577 /* Called with rcu_read_lock held. */ 578 static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, 579 ram_addr_t *ram_addr, bool *read_only) 580 { 581 MemoryRegion *mr; 582 hwaddr xlat; 583 hwaddr len = iotlb->addr_mask + 1; 584 bool writable = iotlb->perm & IOMMU_WO; 585 586 /* 587 * The IOMMU TLB entry we have just covers translation through 588 * this IOMMU to its immediate target. We need to translate 589 * it the rest of the way through to memory. 590 */ 591 mr = address_space_translate(&address_space_memory, 592 iotlb->translated_addr, 593 &xlat, &len, writable, 594 MEMTXATTRS_UNSPECIFIED); 595 if (!memory_region_is_ram(mr)) { 596 error_report("iommu map to non memory area %"HWADDR_PRIx"", 597 xlat); 598 return false; 599 } else if (memory_region_has_ram_discard_manager(mr)) { 600 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr); 601 MemoryRegionSection tmp = { 602 .mr = mr, 603 .offset_within_region = xlat, 604 .size = int128_make64(len), 605 }; 606 607 /* 608 * Malicious VMs can map memory into the IOMMU, which is expected 609 * to remain discarded. vfio will pin all pages, populating memory. 610 * Disallow that. vmstate priorities make sure any RamDiscardManager 611 * were already restored before IOMMUs are restored. 612 */ 613 if (!ram_discard_manager_is_populated(rdm, &tmp)) { 614 error_report("iommu map to discarded memory (e.g., unplugged via" 615 " virtio-mem): %"HWADDR_PRIx"", 616 iotlb->translated_addr); 617 return false; 618 } 619 620 /* 621 * Malicious VMs might trigger discarding of IOMMU-mapped memory. The 622 * pages will remain pinned inside vfio until unmapped, resulting in a 623 * higher memory consumption than expected. If memory would get 624 * populated again later, there would be an inconsistency between pages 625 * pinned by vfio and pages seen by QEMU. This is the case until 626 * unmapped from the IOMMU (e.g., during device reset). 627 * 628 * With malicious guests, we really only care about pinning more memory 629 * than expected. RLIMIT_MEMLOCK set for the user/process can never be 630 * exceeded and can be used to mitigate this problem. 631 */ 632 warn_report_once("Using vfio with vIOMMUs and coordinated discarding of" 633 " RAM (e.g., virtio-mem) works, however, malicious" 634 " guests can trigger pinning of more memory than" 635 " intended via an IOMMU. It's possible to mitigate " 636 " by setting/adjusting RLIMIT_MEMLOCK."); 637 } 638 639 /* 640 * Translation truncates length to the IOMMU page size, 641 * check that it did not truncate too much. 642 */ 643 if (len & iotlb->addr_mask) { 644 error_report("iommu has granularity incompatible with target AS"); 645 return false; 646 } 647 648 if (vaddr) { 649 *vaddr = memory_region_get_ram_ptr(mr) + xlat; 650 } 651 652 if (ram_addr) { 653 *ram_addr = memory_region_get_ram_addr(mr) + xlat; 654 } 655 656 if (read_only) { 657 *read_only = !writable || mr->readonly; 658 } 659 660 return true; 661 } 662 663 static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) 664 { 665 VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); 666 VFIOContainer *container = giommu->container; 667 hwaddr iova = iotlb->iova + giommu->iommu_offset; 668 void *vaddr; 669 int ret; 670 671 trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP", 672 iova, iova + iotlb->addr_mask); 673 674 if (iotlb->target_as != &address_space_memory) { 675 error_report("Wrong target AS \"%s\", only system memory is allowed", 676 iotlb->target_as->name ? iotlb->target_as->name : "none"); 677 return; 678 } 679 680 rcu_read_lock(); 681 682 if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { 683 bool read_only; 684 685 if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only)) { 686 goto out; 687 } 688 /* 689 * vaddr is only valid until rcu_read_unlock(). But after 690 * vfio_dma_map has set up the mapping the pages will be 691 * pinned by the kernel. This makes sure that the RAM backend 692 * of vaddr will always be there, even if the memory object is 693 * destroyed and its backing memory munmap-ed. 694 */ 695 ret = vfio_dma_map(container, iova, 696 iotlb->addr_mask + 1, vaddr, 697 read_only); 698 if (ret) { 699 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " 700 "0x%"HWADDR_PRIx", %p) = %d (%m)", 701 container, iova, 702 iotlb->addr_mask + 1, vaddr, ret); 703 } 704 } else { 705 ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1, iotlb); 706 if (ret) { 707 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " 708 "0x%"HWADDR_PRIx") = %d (%m)", 709 container, iova, 710 iotlb->addr_mask + 1, ret); 711 } 712 } 713 out: 714 rcu_read_unlock(); 715 } 716 717 static void vfio_ram_discard_notify_discard(RamDiscardListener *rdl, 718 MemoryRegionSection *section) 719 { 720 VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener, 721 listener); 722 const hwaddr size = int128_get64(section->size); 723 const hwaddr iova = section->offset_within_address_space; 724 int ret; 725 726 /* Unmap with a single call. */ 727 ret = vfio_dma_unmap(vrdl->container, iova, size , NULL); 728 if (ret) { 729 error_report("%s: vfio_dma_unmap() failed: %s", __func__, 730 strerror(-ret)); 731 } 732 } 733 734 static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl, 735 MemoryRegionSection *section) 736 { 737 VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener, 738 listener); 739 const hwaddr end = section->offset_within_region + 740 int128_get64(section->size); 741 hwaddr start, next, iova; 742 void *vaddr; 743 int ret; 744 745 /* 746 * Map in (aligned within memory region) minimum granularity, so we can 747 * unmap in minimum granularity later. 748 */ 749 for (start = section->offset_within_region; start < end; start = next) { 750 next = ROUND_UP(start + 1, vrdl->granularity); 751 next = MIN(next, end); 752 753 iova = start - section->offset_within_region + 754 section->offset_within_address_space; 755 vaddr = memory_region_get_ram_ptr(section->mr) + start; 756 757 ret = vfio_dma_map(vrdl->container, iova, next - start, 758 vaddr, section->readonly); 759 if (ret) { 760 /* Rollback */ 761 vfio_ram_discard_notify_discard(rdl, section); 762 return ret; 763 } 764 } 765 return 0; 766 } 767 768 static void vfio_register_ram_discard_listener(VFIOContainer *container, 769 MemoryRegionSection *section) 770 { 771 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr); 772 VFIORamDiscardListener *vrdl; 773 774 /* Ignore some corner cases not relevant in practice. */ 775 g_assert(QEMU_IS_ALIGNED(section->offset_within_region, TARGET_PAGE_SIZE)); 776 g_assert(QEMU_IS_ALIGNED(section->offset_within_address_space, 777 TARGET_PAGE_SIZE)); 778 g_assert(QEMU_IS_ALIGNED(int128_get64(section->size), TARGET_PAGE_SIZE)); 779 780 vrdl = g_new0(VFIORamDiscardListener, 1); 781 vrdl->container = container; 782 vrdl->mr = section->mr; 783 vrdl->offset_within_address_space = section->offset_within_address_space; 784 vrdl->size = int128_get64(section->size); 785 vrdl->granularity = ram_discard_manager_get_min_granularity(rdm, 786 section->mr); 787 788 g_assert(vrdl->granularity && is_power_of_2(vrdl->granularity)); 789 g_assert(container->pgsizes && 790 vrdl->granularity >= 1ULL << ctz64(container->pgsizes)); 791 792 ram_discard_listener_init(&vrdl->listener, 793 vfio_ram_discard_notify_populate, 794 vfio_ram_discard_notify_discard, true); 795 ram_discard_manager_register_listener(rdm, &vrdl->listener, section); 796 QLIST_INSERT_HEAD(&container->vrdl_list, vrdl, next); 797 798 /* 799 * Sanity-check if we have a theoretically problematic setup where we could 800 * exceed the maximum number of possible DMA mappings over time. We assume 801 * that each mapped section in the same address space as a RamDiscardManager 802 * section consumes exactly one DMA mapping, with the exception of 803 * RamDiscardManager sections; i.e., we don't expect to have gIOMMU sections 804 * in the same address space as RamDiscardManager sections. 805 * 806 * We assume that each section in the address space consumes one memslot. 807 * We take the number of KVM memory slots as a best guess for the maximum 808 * number of sections in the address space we could have over time, 809 * also consuming DMA mappings. 810 */ 811 if (container->dma_max_mappings) { 812 unsigned int vrdl_count = 0, vrdl_mappings = 0, max_memslots = 512; 813 814 #ifdef CONFIG_KVM 815 if (kvm_enabled()) { 816 max_memslots = kvm_get_max_memslots(); 817 } 818 #endif 819 820 QLIST_FOREACH(vrdl, &container->vrdl_list, next) { 821 hwaddr start, end; 822 823 start = QEMU_ALIGN_DOWN(vrdl->offset_within_address_space, 824 vrdl->granularity); 825 end = ROUND_UP(vrdl->offset_within_address_space + vrdl->size, 826 vrdl->granularity); 827 vrdl_mappings += (end - start) / vrdl->granularity; 828 vrdl_count++; 829 } 830 831 if (vrdl_mappings + max_memslots - vrdl_count > 832 container->dma_max_mappings) { 833 warn_report("%s: possibly running out of DMA mappings. E.g., try" 834 " increasing the 'block-size' of virtio-mem devies." 835 " Maximum possible DMA mappings: %d, Maximum possible" 836 " memslots: %d", __func__, container->dma_max_mappings, 837 max_memslots); 838 } 839 } 840 } 841 842 static void vfio_unregister_ram_discard_listener(VFIOContainer *container, 843 MemoryRegionSection *section) 844 { 845 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr); 846 VFIORamDiscardListener *vrdl = NULL; 847 848 QLIST_FOREACH(vrdl, &container->vrdl_list, next) { 849 if (vrdl->mr == section->mr && 850 vrdl->offset_within_address_space == 851 section->offset_within_address_space) { 852 break; 853 } 854 } 855 856 if (!vrdl) { 857 hw_error("vfio: Trying to unregister missing RAM discard listener"); 858 } 859 860 ram_discard_manager_unregister_listener(rdm, &vrdl->listener); 861 QLIST_REMOVE(vrdl, next); 862 g_free(vrdl); 863 } 864 865 static bool vfio_known_safe_misalignment(MemoryRegionSection *section) 866 { 867 MemoryRegion *mr = section->mr; 868 869 if (!TPM_IS_CRB(mr->owner)) { 870 return false; 871 } 872 873 /* this is a known safe misaligned region, just trace for debug purpose */ 874 trace_vfio_known_safe_misalignment(memory_region_name(mr), 875 section->offset_within_address_space, 876 section->offset_within_region, 877 qemu_real_host_page_size()); 878 return true; 879 } 880 881 static void vfio_listener_region_add(MemoryListener *listener, 882 MemoryRegionSection *section) 883 { 884 VFIOContainer *container = container_of(listener, VFIOContainer, listener); 885 hwaddr iova, end; 886 Int128 llend, llsize; 887 void *vaddr; 888 int ret; 889 VFIOHostDMAWindow *hostwin; 890 bool hostwin_found; 891 Error *err = NULL; 892 893 if (vfio_listener_skipped_section(section)) { 894 trace_vfio_listener_region_add_skip( 895 section->offset_within_address_space, 896 section->offset_within_address_space + 897 int128_get64(int128_sub(section->size, int128_one()))); 898 return; 899 } 900 901 if (unlikely((section->offset_within_address_space & 902 ~qemu_real_host_page_mask()) != 903 (section->offset_within_region & ~qemu_real_host_page_mask()))) { 904 if (!vfio_known_safe_misalignment(section)) { 905 error_report("%s received unaligned region %s iova=0x%"PRIx64 906 " offset_within_region=0x%"PRIx64 907 " qemu_real_host_page_size=0x%"PRIxPTR, 908 __func__, memory_region_name(section->mr), 909 section->offset_within_address_space, 910 section->offset_within_region, 911 qemu_real_host_page_size()); 912 } 913 return; 914 } 915 916 iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space); 917 llend = int128_make64(section->offset_within_address_space); 918 llend = int128_add(llend, section->size); 919 llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask())); 920 921 if (int128_ge(int128_make64(iova), llend)) { 922 if (memory_region_is_ram_device(section->mr)) { 923 trace_vfio_listener_region_add_no_dma_map( 924 memory_region_name(section->mr), 925 section->offset_within_address_space, 926 int128_getlo(section->size), 927 qemu_real_host_page_size()); 928 } 929 return; 930 } 931 end = int128_get64(int128_sub(llend, int128_one())); 932 933 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { 934 hwaddr pgsize = 0; 935 936 /* For now intersections are not allowed, we may relax this later */ 937 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 938 if (ranges_overlap(hostwin->min_iova, 939 hostwin->max_iova - hostwin->min_iova + 1, 940 section->offset_within_address_space, 941 int128_get64(section->size))) { 942 error_setg(&err, 943 "region [0x%"PRIx64",0x%"PRIx64"] overlaps with existing" 944 "host DMA window [0x%"PRIx64",0x%"PRIx64"]", 945 section->offset_within_address_space, 946 section->offset_within_address_space + 947 int128_get64(section->size) - 1, 948 hostwin->min_iova, hostwin->max_iova); 949 goto fail; 950 } 951 } 952 953 ret = vfio_spapr_create_window(container, section, &pgsize); 954 if (ret) { 955 error_setg_errno(&err, -ret, "Failed to create SPAPR window"); 956 goto fail; 957 } 958 959 vfio_host_win_add(container, section->offset_within_address_space, 960 section->offset_within_address_space + 961 int128_get64(section->size) - 1, pgsize); 962 #ifdef CONFIG_KVM 963 if (kvm_enabled()) { 964 VFIOGroup *group; 965 IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); 966 struct kvm_vfio_spapr_tce param; 967 struct kvm_device_attr attr = { 968 .group = KVM_DEV_VFIO_GROUP, 969 .attr = KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE, 970 .addr = (uint64_t)(unsigned long)¶m, 971 }; 972 973 if (!memory_region_iommu_get_attr(iommu_mr, IOMMU_ATTR_SPAPR_TCE_FD, 974 ¶m.tablefd)) { 975 QLIST_FOREACH(group, &container->group_list, container_next) { 976 param.groupfd = group->fd; 977 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { 978 error_report("vfio: failed to setup fd %d " 979 "for a group with fd %d: %s", 980 param.tablefd, param.groupfd, 981 strerror(errno)); 982 return; 983 } 984 trace_vfio_spapr_group_attach(param.groupfd, param.tablefd); 985 } 986 } 987 } 988 #endif 989 } 990 991 hostwin_found = false; 992 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 993 if (hostwin->min_iova <= iova && end <= hostwin->max_iova) { 994 hostwin_found = true; 995 break; 996 } 997 } 998 999 if (!hostwin_found) { 1000 error_setg(&err, "Container %p can't map guest IOVA region" 1001 " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, container, iova, end); 1002 goto fail; 1003 } 1004 1005 memory_region_ref(section->mr); 1006 1007 if (memory_region_is_iommu(section->mr)) { 1008 VFIOGuestIOMMU *giommu; 1009 IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); 1010 int iommu_idx; 1011 1012 trace_vfio_listener_region_add_iommu(iova, end); 1013 /* 1014 * FIXME: For VFIO iommu types which have KVM acceleration to 1015 * avoid bouncing all map/unmaps through qemu this way, this 1016 * would be the right place to wire that up (tell the KVM 1017 * device emulation the VFIO iommu handles to use). 1018 */ 1019 giommu = g_malloc0(sizeof(*giommu)); 1020 giommu->iommu_mr = iommu_mr; 1021 giommu->iommu_offset = section->offset_within_address_space - 1022 section->offset_within_region; 1023 giommu->container = container; 1024 llend = int128_add(int128_make64(section->offset_within_region), 1025 section->size); 1026 llend = int128_sub(llend, int128_one()); 1027 iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr, 1028 MEMTXATTRS_UNSPECIFIED); 1029 iommu_notifier_init(&giommu->n, vfio_iommu_map_notify, 1030 IOMMU_NOTIFIER_IOTLB_EVENTS, 1031 section->offset_within_region, 1032 int128_get64(llend), 1033 iommu_idx); 1034 1035 ret = memory_region_iommu_set_page_size_mask(giommu->iommu_mr, 1036 container->pgsizes, 1037 &err); 1038 if (ret) { 1039 g_free(giommu); 1040 goto fail; 1041 } 1042 1043 ret = memory_region_register_iommu_notifier(section->mr, &giommu->n, 1044 &err); 1045 if (ret) { 1046 g_free(giommu); 1047 goto fail; 1048 } 1049 QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); 1050 memory_region_iommu_replay(giommu->iommu_mr, &giommu->n); 1051 1052 return; 1053 } 1054 1055 /* Here we assume that memory_region_is_ram(section->mr)==true */ 1056 1057 /* 1058 * For RAM memory regions with a RamDiscardManager, we only want to map the 1059 * actually populated parts - and update the mapping whenever we're notified 1060 * about changes. 1061 */ 1062 if (memory_region_has_ram_discard_manager(section->mr)) { 1063 vfio_register_ram_discard_listener(container, section); 1064 return; 1065 } 1066 1067 vaddr = memory_region_get_ram_ptr(section->mr) + 1068 section->offset_within_region + 1069 (iova - section->offset_within_address_space); 1070 1071 trace_vfio_listener_region_add_ram(iova, end, vaddr); 1072 1073 llsize = int128_sub(llend, int128_make64(iova)); 1074 1075 if (memory_region_is_ram_device(section->mr)) { 1076 hwaddr pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1; 1077 1078 if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) { 1079 trace_vfio_listener_region_add_no_dma_map( 1080 memory_region_name(section->mr), 1081 section->offset_within_address_space, 1082 int128_getlo(section->size), 1083 pgmask + 1); 1084 return; 1085 } 1086 } 1087 1088 ret = vfio_dma_map(container, iova, int128_get64(llsize), 1089 vaddr, section->readonly); 1090 if (ret) { 1091 error_setg(&err, "vfio_dma_map(%p, 0x%"HWADDR_PRIx", " 1092 "0x%"HWADDR_PRIx", %p) = %d (%m)", 1093 container, iova, int128_get64(llsize), vaddr, ret); 1094 if (memory_region_is_ram_device(section->mr)) { 1095 /* Allow unexpected mappings not to be fatal for RAM devices */ 1096 error_report_err(err); 1097 return; 1098 } 1099 goto fail; 1100 } 1101 1102 return; 1103 1104 fail: 1105 if (memory_region_is_ram_device(section->mr)) { 1106 error_report("failed to vfio_dma_map. pci p2p may not work"); 1107 return; 1108 } 1109 /* 1110 * On the initfn path, store the first error in the container so we 1111 * can gracefully fail. Runtime, there's not much we can do other 1112 * than throw a hardware error. 1113 */ 1114 if (!container->initialized) { 1115 if (!container->error) { 1116 error_propagate_prepend(&container->error, err, 1117 "Region %s: ", 1118 memory_region_name(section->mr)); 1119 } else { 1120 error_free(err); 1121 } 1122 } else { 1123 error_report_err(err); 1124 hw_error("vfio: DMA mapping failed, unable to continue"); 1125 } 1126 } 1127 1128 static void vfio_listener_region_del(MemoryListener *listener, 1129 MemoryRegionSection *section) 1130 { 1131 VFIOContainer *container = container_of(listener, VFIOContainer, listener); 1132 hwaddr iova, end; 1133 Int128 llend, llsize; 1134 int ret; 1135 bool try_unmap = true; 1136 1137 if (vfio_listener_skipped_section(section)) { 1138 trace_vfio_listener_region_del_skip( 1139 section->offset_within_address_space, 1140 section->offset_within_address_space + 1141 int128_get64(int128_sub(section->size, int128_one()))); 1142 return; 1143 } 1144 1145 if (unlikely((section->offset_within_address_space & 1146 ~qemu_real_host_page_mask()) != 1147 (section->offset_within_region & ~qemu_real_host_page_mask()))) { 1148 if (!vfio_known_safe_misalignment(section)) { 1149 error_report("%s received unaligned region %s iova=0x%"PRIx64 1150 " offset_within_region=0x%"PRIx64 1151 " qemu_real_host_page_size=0x%"PRIxPTR, 1152 __func__, memory_region_name(section->mr), 1153 section->offset_within_address_space, 1154 section->offset_within_region, 1155 qemu_real_host_page_size()); 1156 } 1157 return; 1158 } 1159 1160 if (memory_region_is_iommu(section->mr)) { 1161 VFIOGuestIOMMU *giommu; 1162 1163 QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { 1164 if (MEMORY_REGION(giommu->iommu_mr) == section->mr && 1165 giommu->n.start == section->offset_within_region) { 1166 memory_region_unregister_iommu_notifier(section->mr, 1167 &giommu->n); 1168 QLIST_REMOVE(giommu, giommu_next); 1169 g_free(giommu); 1170 break; 1171 } 1172 } 1173 1174 /* 1175 * FIXME: We assume the one big unmap below is adequate to 1176 * remove any individual page mappings in the IOMMU which 1177 * might have been copied into VFIO. This works for a page table 1178 * based IOMMU where a big unmap flattens a large range of IO-PTEs. 1179 * That may not be true for all IOMMU types. 1180 */ 1181 } 1182 1183 iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space); 1184 llend = int128_make64(section->offset_within_address_space); 1185 llend = int128_add(llend, section->size); 1186 llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask())); 1187 1188 if (int128_ge(int128_make64(iova), llend)) { 1189 return; 1190 } 1191 end = int128_get64(int128_sub(llend, int128_one())); 1192 1193 llsize = int128_sub(llend, int128_make64(iova)); 1194 1195 trace_vfio_listener_region_del(iova, end); 1196 1197 if (memory_region_is_ram_device(section->mr)) { 1198 hwaddr pgmask; 1199 VFIOHostDMAWindow *hostwin; 1200 bool hostwin_found = false; 1201 1202 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 1203 if (hostwin->min_iova <= iova && end <= hostwin->max_iova) { 1204 hostwin_found = true; 1205 break; 1206 } 1207 } 1208 assert(hostwin_found); /* or region_add() would have failed */ 1209 1210 pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1; 1211 try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask)); 1212 } else if (memory_region_has_ram_discard_manager(section->mr)) { 1213 vfio_unregister_ram_discard_listener(container, section); 1214 /* Unregistering will trigger an unmap. */ 1215 try_unmap = false; 1216 } 1217 1218 if (try_unmap) { 1219 if (int128_eq(llsize, int128_2_64())) { 1220 /* The unmap ioctl doesn't accept a full 64-bit span. */ 1221 llsize = int128_rshift(llsize, 1); 1222 ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL); 1223 if (ret) { 1224 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " 1225 "0x%"HWADDR_PRIx") = %d (%m)", 1226 container, iova, int128_get64(llsize), ret); 1227 } 1228 iova += int128_get64(llsize); 1229 } 1230 ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL); 1231 if (ret) { 1232 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " 1233 "0x%"HWADDR_PRIx") = %d (%m)", 1234 container, iova, int128_get64(llsize), ret); 1235 } 1236 } 1237 1238 memory_region_unref(section->mr); 1239 1240 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { 1241 vfio_spapr_remove_window(container, 1242 section->offset_within_address_space); 1243 if (vfio_host_win_del(container, 1244 section->offset_within_address_space, 1245 section->offset_within_address_space + 1246 int128_get64(section->size) - 1) < 0) { 1247 hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx, 1248 __func__, section->offset_within_address_space); 1249 } 1250 } 1251 } 1252 1253 static void vfio_set_dirty_page_tracking(VFIOContainer *container, bool start) 1254 { 1255 int ret; 1256 struct vfio_iommu_type1_dirty_bitmap dirty = { 1257 .argsz = sizeof(dirty), 1258 }; 1259 1260 if (start) { 1261 dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_START; 1262 } else { 1263 dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP; 1264 } 1265 1266 ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, &dirty); 1267 if (ret) { 1268 error_report("Failed to set dirty tracking flag 0x%x errno: %d", 1269 dirty.flags, errno); 1270 } 1271 } 1272 1273 static void vfio_listener_log_global_start(MemoryListener *listener) 1274 { 1275 VFIOContainer *container = container_of(listener, VFIOContainer, listener); 1276 1277 vfio_set_dirty_page_tracking(container, true); 1278 } 1279 1280 static void vfio_listener_log_global_stop(MemoryListener *listener) 1281 { 1282 VFIOContainer *container = container_of(listener, VFIOContainer, listener); 1283 1284 vfio_set_dirty_page_tracking(container, false); 1285 } 1286 1287 static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, 1288 uint64_t size, ram_addr_t ram_addr) 1289 { 1290 struct vfio_iommu_type1_dirty_bitmap *dbitmap; 1291 struct vfio_iommu_type1_dirty_bitmap_get *range; 1292 uint64_t pages; 1293 int ret; 1294 1295 dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range)); 1296 1297 dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range); 1298 dbitmap->flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP; 1299 range = (struct vfio_iommu_type1_dirty_bitmap_get *)&dbitmap->data; 1300 range->iova = iova; 1301 range->size = size; 1302 1303 /* 1304 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of 1305 * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize 1306 * to qemu_real_host_page_size. 1307 */ 1308 range->bitmap.pgsize = qemu_real_host_page_size(); 1309 1310 pages = REAL_HOST_PAGE_ALIGN(range->size) / qemu_real_host_page_size(); 1311 range->bitmap.size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) / 1312 BITS_PER_BYTE; 1313 range->bitmap.data = g_try_malloc0(range->bitmap.size); 1314 if (!range->bitmap.data) { 1315 ret = -ENOMEM; 1316 goto err_out; 1317 } 1318 1319 ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap); 1320 if (ret) { 1321 error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64 1322 " size: 0x%"PRIx64" err: %d", (uint64_t)range->iova, 1323 (uint64_t)range->size, errno); 1324 goto err_out; 1325 } 1326 1327 cpu_physical_memory_set_dirty_lebitmap((unsigned long *)range->bitmap.data, 1328 ram_addr, pages); 1329 1330 trace_vfio_get_dirty_bitmap(container->fd, range->iova, range->size, 1331 range->bitmap.size, ram_addr); 1332 err_out: 1333 g_free(range->bitmap.data); 1334 g_free(dbitmap); 1335 1336 return ret; 1337 } 1338 1339 typedef struct { 1340 IOMMUNotifier n; 1341 VFIOGuestIOMMU *giommu; 1342 } vfio_giommu_dirty_notifier; 1343 1344 static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) 1345 { 1346 vfio_giommu_dirty_notifier *gdn = container_of(n, 1347 vfio_giommu_dirty_notifier, n); 1348 VFIOGuestIOMMU *giommu = gdn->giommu; 1349 VFIOContainer *container = giommu->container; 1350 hwaddr iova = iotlb->iova + giommu->iommu_offset; 1351 ram_addr_t translated_addr; 1352 1353 trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask); 1354 1355 if (iotlb->target_as != &address_space_memory) { 1356 error_report("Wrong target AS \"%s\", only system memory is allowed", 1357 iotlb->target_as->name ? iotlb->target_as->name : "none"); 1358 return; 1359 } 1360 1361 rcu_read_lock(); 1362 if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) { 1363 int ret; 1364 1365 ret = vfio_get_dirty_bitmap(container, iova, iotlb->addr_mask + 1, 1366 translated_addr); 1367 if (ret) { 1368 error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", " 1369 "0x%"HWADDR_PRIx") = %d (%m)", 1370 container, iova, 1371 iotlb->addr_mask + 1, ret); 1372 } 1373 } 1374 rcu_read_unlock(); 1375 } 1376 1377 static int vfio_ram_discard_get_dirty_bitmap(MemoryRegionSection *section, 1378 void *opaque) 1379 { 1380 const hwaddr size = int128_get64(section->size); 1381 const hwaddr iova = section->offset_within_address_space; 1382 const ram_addr_t ram_addr = memory_region_get_ram_addr(section->mr) + 1383 section->offset_within_region; 1384 VFIORamDiscardListener *vrdl = opaque; 1385 1386 /* 1387 * Sync the whole mapped region (spanning multiple individual mappings) 1388 * in one go. 1389 */ 1390 return vfio_get_dirty_bitmap(vrdl->container, iova, size, ram_addr); 1391 } 1392 1393 static int vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainer *container, 1394 MemoryRegionSection *section) 1395 { 1396 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr); 1397 VFIORamDiscardListener *vrdl = NULL; 1398 1399 QLIST_FOREACH(vrdl, &container->vrdl_list, next) { 1400 if (vrdl->mr == section->mr && 1401 vrdl->offset_within_address_space == 1402 section->offset_within_address_space) { 1403 break; 1404 } 1405 } 1406 1407 if (!vrdl) { 1408 hw_error("vfio: Trying to sync missing RAM discard listener"); 1409 } 1410 1411 /* 1412 * We only want/can synchronize the bitmap for actually mapped parts - 1413 * which correspond to populated parts. Replay all populated parts. 1414 */ 1415 return ram_discard_manager_replay_populated(rdm, section, 1416 vfio_ram_discard_get_dirty_bitmap, 1417 &vrdl); 1418 } 1419 1420 static int vfio_sync_dirty_bitmap(VFIOContainer *container, 1421 MemoryRegionSection *section) 1422 { 1423 ram_addr_t ram_addr; 1424 1425 if (memory_region_is_iommu(section->mr)) { 1426 VFIOGuestIOMMU *giommu; 1427 1428 QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { 1429 if (MEMORY_REGION(giommu->iommu_mr) == section->mr && 1430 giommu->n.start == section->offset_within_region) { 1431 Int128 llend; 1432 vfio_giommu_dirty_notifier gdn = { .giommu = giommu }; 1433 int idx = memory_region_iommu_attrs_to_index(giommu->iommu_mr, 1434 MEMTXATTRS_UNSPECIFIED); 1435 1436 llend = int128_add(int128_make64(section->offset_within_region), 1437 section->size); 1438 llend = int128_sub(llend, int128_one()); 1439 1440 iommu_notifier_init(&gdn.n, 1441 vfio_iommu_map_dirty_notify, 1442 IOMMU_NOTIFIER_MAP, 1443 section->offset_within_region, 1444 int128_get64(llend), 1445 idx); 1446 memory_region_iommu_replay(giommu->iommu_mr, &gdn.n); 1447 break; 1448 } 1449 } 1450 return 0; 1451 } else if (memory_region_has_ram_discard_manager(section->mr)) { 1452 return vfio_sync_ram_discard_listener_dirty_bitmap(container, section); 1453 } 1454 1455 ram_addr = memory_region_get_ram_addr(section->mr) + 1456 section->offset_within_region; 1457 1458 return vfio_get_dirty_bitmap(container, 1459 REAL_HOST_PAGE_ALIGN(section->offset_within_address_space), 1460 int128_get64(section->size), ram_addr); 1461 } 1462 1463 static void vfio_listener_log_sync(MemoryListener *listener, 1464 MemoryRegionSection *section) 1465 { 1466 VFIOContainer *container = container_of(listener, VFIOContainer, listener); 1467 1468 if (vfio_listener_skipped_section(section) || 1469 !container->dirty_pages_supported) { 1470 return; 1471 } 1472 1473 if (vfio_devices_all_dirty_tracking(container)) { 1474 vfio_sync_dirty_bitmap(container, section); 1475 } 1476 } 1477 1478 static const MemoryListener vfio_memory_listener = { 1479 .name = "vfio", 1480 .region_add = vfio_listener_region_add, 1481 .region_del = vfio_listener_region_del, 1482 .log_global_start = vfio_listener_log_global_start, 1483 .log_global_stop = vfio_listener_log_global_stop, 1484 .log_sync = vfio_listener_log_sync, 1485 }; 1486 1487 static void vfio_listener_release(VFIOContainer *container) 1488 { 1489 memory_listener_unregister(&container->listener); 1490 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { 1491 memory_listener_unregister(&container->prereg_listener); 1492 } 1493 } 1494 1495 static struct vfio_info_cap_header * 1496 vfio_get_cap(void *ptr, uint32_t cap_offset, uint16_t id) 1497 { 1498 struct vfio_info_cap_header *hdr; 1499 1500 for (hdr = ptr + cap_offset; hdr != ptr; hdr = ptr + hdr->next) { 1501 if (hdr->id == id) { 1502 return hdr; 1503 } 1504 } 1505 1506 return NULL; 1507 } 1508 1509 struct vfio_info_cap_header * 1510 vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id) 1511 { 1512 if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) { 1513 return NULL; 1514 } 1515 1516 return vfio_get_cap((void *)info, info->cap_offset, id); 1517 } 1518 1519 static struct vfio_info_cap_header * 1520 vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info *info, uint16_t id) 1521 { 1522 if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) { 1523 return NULL; 1524 } 1525 1526 return vfio_get_cap((void *)info, info->cap_offset, id); 1527 } 1528 1529 struct vfio_info_cap_header * 1530 vfio_get_device_info_cap(struct vfio_device_info *info, uint16_t id) 1531 { 1532 if (!(info->flags & VFIO_DEVICE_FLAGS_CAPS)) { 1533 return NULL; 1534 } 1535 1536 return vfio_get_cap((void *)info, info->cap_offset, id); 1537 } 1538 1539 bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info, 1540 unsigned int *avail) 1541 { 1542 struct vfio_info_cap_header *hdr; 1543 struct vfio_iommu_type1_info_dma_avail *cap; 1544 1545 /* If the capability cannot be found, assume no DMA limiting */ 1546 hdr = vfio_get_iommu_type1_info_cap(info, 1547 VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL); 1548 if (hdr == NULL) { 1549 return false; 1550 } 1551 1552 if (avail != NULL) { 1553 cap = (void *) hdr; 1554 *avail = cap->avail; 1555 } 1556 1557 return true; 1558 } 1559 1560 static int vfio_setup_region_sparse_mmaps(VFIORegion *region, 1561 struct vfio_region_info *info) 1562 { 1563 struct vfio_info_cap_header *hdr; 1564 struct vfio_region_info_cap_sparse_mmap *sparse; 1565 int i, j; 1566 1567 hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP); 1568 if (!hdr) { 1569 return -ENODEV; 1570 } 1571 1572 sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header); 1573 1574 trace_vfio_region_sparse_mmap_header(region->vbasedev->name, 1575 region->nr, sparse->nr_areas); 1576 1577 region->mmaps = g_new0(VFIOMmap, sparse->nr_areas); 1578 1579 for (i = 0, j = 0; i < sparse->nr_areas; i++) { 1580 if (sparse->areas[i].size) { 1581 trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset, 1582 sparse->areas[i].offset + 1583 sparse->areas[i].size - 1); 1584 region->mmaps[j].offset = sparse->areas[i].offset; 1585 region->mmaps[j].size = sparse->areas[i].size; 1586 j++; 1587 } 1588 } 1589 1590 region->nr_mmaps = j; 1591 region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap)); 1592 1593 return 0; 1594 } 1595 1596 int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region, 1597 int index, const char *name) 1598 { 1599 struct vfio_region_info *info; 1600 int ret; 1601 1602 ret = vfio_get_region_info(vbasedev, index, &info); 1603 if (ret) { 1604 return ret; 1605 } 1606 1607 region->vbasedev = vbasedev; 1608 region->flags = info->flags; 1609 region->size = info->size; 1610 region->fd_offset = info->offset; 1611 region->nr = index; 1612 1613 if (region->size) { 1614 region->mem = g_new0(MemoryRegion, 1); 1615 memory_region_init_io(region->mem, obj, &vfio_region_ops, 1616 region, name, region->size); 1617 1618 if (!vbasedev->no_mmap && 1619 region->flags & VFIO_REGION_INFO_FLAG_MMAP) { 1620 1621 ret = vfio_setup_region_sparse_mmaps(region, info); 1622 1623 if (ret) { 1624 region->nr_mmaps = 1; 1625 region->mmaps = g_new0(VFIOMmap, region->nr_mmaps); 1626 region->mmaps[0].offset = 0; 1627 region->mmaps[0].size = region->size; 1628 } 1629 } 1630 } 1631 1632 g_free(info); 1633 1634 trace_vfio_region_setup(vbasedev->name, index, name, 1635 region->flags, region->fd_offset, region->size); 1636 return 0; 1637 } 1638 1639 static void vfio_subregion_unmap(VFIORegion *region, int index) 1640 { 1641 trace_vfio_region_unmap(memory_region_name(®ion->mmaps[index].mem), 1642 region->mmaps[index].offset, 1643 region->mmaps[index].offset + 1644 region->mmaps[index].size - 1); 1645 memory_region_del_subregion(region->mem, ®ion->mmaps[index].mem); 1646 munmap(region->mmaps[index].mmap, region->mmaps[index].size); 1647 object_unparent(OBJECT(®ion->mmaps[index].mem)); 1648 region->mmaps[index].mmap = NULL; 1649 } 1650 1651 int vfio_region_mmap(VFIORegion *region) 1652 { 1653 int i, prot = 0; 1654 char *name; 1655 1656 if (!region->mem) { 1657 return 0; 1658 } 1659 1660 prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0; 1661 prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0; 1662 1663 for (i = 0; i < region->nr_mmaps; i++) { 1664 region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot, 1665 MAP_SHARED, region->vbasedev->fd, 1666 region->fd_offset + 1667 region->mmaps[i].offset); 1668 if (region->mmaps[i].mmap == MAP_FAILED) { 1669 int ret = -errno; 1670 1671 trace_vfio_region_mmap_fault(memory_region_name(region->mem), i, 1672 region->fd_offset + 1673 region->mmaps[i].offset, 1674 region->fd_offset + 1675 region->mmaps[i].offset + 1676 region->mmaps[i].size - 1, ret); 1677 1678 region->mmaps[i].mmap = NULL; 1679 1680 for (i--; i >= 0; i--) { 1681 vfio_subregion_unmap(region, i); 1682 } 1683 1684 return ret; 1685 } 1686 1687 name = g_strdup_printf("%s mmaps[%d]", 1688 memory_region_name(region->mem), i); 1689 memory_region_init_ram_device_ptr(®ion->mmaps[i].mem, 1690 memory_region_owner(region->mem), 1691 name, region->mmaps[i].size, 1692 region->mmaps[i].mmap); 1693 g_free(name); 1694 memory_region_add_subregion(region->mem, region->mmaps[i].offset, 1695 ®ion->mmaps[i].mem); 1696 1697 trace_vfio_region_mmap(memory_region_name(®ion->mmaps[i].mem), 1698 region->mmaps[i].offset, 1699 region->mmaps[i].offset + 1700 region->mmaps[i].size - 1); 1701 } 1702 1703 return 0; 1704 } 1705 1706 void vfio_region_unmap(VFIORegion *region) 1707 { 1708 int i; 1709 1710 if (!region->mem) { 1711 return; 1712 } 1713 1714 for (i = 0; i < region->nr_mmaps; i++) { 1715 if (region->mmaps[i].mmap) { 1716 vfio_subregion_unmap(region, i); 1717 } 1718 } 1719 } 1720 1721 void vfio_region_exit(VFIORegion *region) 1722 { 1723 int i; 1724 1725 if (!region->mem) { 1726 return; 1727 } 1728 1729 for (i = 0; i < region->nr_mmaps; i++) { 1730 if (region->mmaps[i].mmap) { 1731 memory_region_del_subregion(region->mem, ®ion->mmaps[i].mem); 1732 } 1733 } 1734 1735 trace_vfio_region_exit(region->vbasedev->name, region->nr); 1736 } 1737 1738 void vfio_region_finalize(VFIORegion *region) 1739 { 1740 int i; 1741 1742 if (!region->mem) { 1743 return; 1744 } 1745 1746 for (i = 0; i < region->nr_mmaps; i++) { 1747 if (region->mmaps[i].mmap) { 1748 munmap(region->mmaps[i].mmap, region->mmaps[i].size); 1749 object_unparent(OBJECT(®ion->mmaps[i].mem)); 1750 } 1751 } 1752 1753 object_unparent(OBJECT(region->mem)); 1754 1755 g_free(region->mem); 1756 g_free(region->mmaps); 1757 1758 trace_vfio_region_finalize(region->vbasedev->name, region->nr); 1759 1760 region->mem = NULL; 1761 region->mmaps = NULL; 1762 region->nr_mmaps = 0; 1763 region->size = 0; 1764 region->flags = 0; 1765 region->nr = 0; 1766 } 1767 1768 void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled) 1769 { 1770 int i; 1771 1772 if (!region->mem) { 1773 return; 1774 } 1775 1776 for (i = 0; i < region->nr_mmaps; i++) { 1777 if (region->mmaps[i].mmap) { 1778 memory_region_set_enabled(®ion->mmaps[i].mem, enabled); 1779 } 1780 } 1781 1782 trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem), 1783 enabled); 1784 } 1785 1786 void vfio_reset_handler(void *opaque) 1787 { 1788 VFIOGroup *group; 1789 VFIODevice *vbasedev; 1790 1791 QLIST_FOREACH(group, &vfio_group_list, next) { 1792 QLIST_FOREACH(vbasedev, &group->device_list, next) { 1793 if (vbasedev->dev->realized) { 1794 vbasedev->ops->vfio_compute_needs_reset(vbasedev); 1795 } 1796 } 1797 } 1798 1799 QLIST_FOREACH(group, &vfio_group_list, next) { 1800 QLIST_FOREACH(vbasedev, &group->device_list, next) { 1801 if (vbasedev->dev->realized && vbasedev->needs_reset) { 1802 vbasedev->ops->vfio_hot_reset_multi(vbasedev); 1803 } 1804 } 1805 } 1806 } 1807 1808 static void vfio_kvm_device_add_group(VFIOGroup *group) 1809 { 1810 #ifdef CONFIG_KVM 1811 struct kvm_device_attr attr = { 1812 .group = KVM_DEV_VFIO_GROUP, 1813 .attr = KVM_DEV_VFIO_GROUP_ADD, 1814 .addr = (uint64_t)(unsigned long)&group->fd, 1815 }; 1816 1817 if (!kvm_enabled()) { 1818 return; 1819 } 1820 1821 if (vfio_kvm_device_fd < 0) { 1822 struct kvm_create_device cd = { 1823 .type = KVM_DEV_TYPE_VFIO, 1824 }; 1825 1826 if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) { 1827 error_report("Failed to create KVM VFIO device: %m"); 1828 return; 1829 } 1830 1831 vfio_kvm_device_fd = cd.fd; 1832 } 1833 1834 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { 1835 error_report("Failed to add group %d to KVM VFIO device: %m", 1836 group->groupid); 1837 } 1838 #endif 1839 } 1840 1841 static void vfio_kvm_device_del_group(VFIOGroup *group) 1842 { 1843 #ifdef CONFIG_KVM 1844 struct kvm_device_attr attr = { 1845 .group = KVM_DEV_VFIO_GROUP, 1846 .attr = KVM_DEV_VFIO_GROUP_DEL, 1847 .addr = (uint64_t)(unsigned long)&group->fd, 1848 }; 1849 1850 if (vfio_kvm_device_fd < 0) { 1851 return; 1852 } 1853 1854 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { 1855 error_report("Failed to remove group %d from KVM VFIO device: %m", 1856 group->groupid); 1857 } 1858 #endif 1859 } 1860 1861 static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as) 1862 { 1863 VFIOAddressSpace *space; 1864 1865 QLIST_FOREACH(space, &vfio_address_spaces, list) { 1866 if (space->as == as) { 1867 return space; 1868 } 1869 } 1870 1871 /* No suitable VFIOAddressSpace, create a new one */ 1872 space = g_malloc0(sizeof(*space)); 1873 space->as = as; 1874 QLIST_INIT(&space->containers); 1875 1876 QLIST_INSERT_HEAD(&vfio_address_spaces, space, list); 1877 1878 return space; 1879 } 1880 1881 static void vfio_put_address_space(VFIOAddressSpace *space) 1882 { 1883 if (QLIST_EMPTY(&space->containers)) { 1884 QLIST_REMOVE(space, list); 1885 g_free(space); 1886 } 1887 } 1888 1889 /* 1890 * vfio_get_iommu_type - selects the richest iommu_type (v2 first) 1891 */ 1892 static int vfio_get_iommu_type(VFIOContainer *container, 1893 Error **errp) 1894 { 1895 int iommu_types[] = { VFIO_TYPE1v2_IOMMU, VFIO_TYPE1_IOMMU, 1896 VFIO_SPAPR_TCE_v2_IOMMU, VFIO_SPAPR_TCE_IOMMU }; 1897 int i; 1898 1899 for (i = 0; i < ARRAY_SIZE(iommu_types); i++) { 1900 if (ioctl(container->fd, VFIO_CHECK_EXTENSION, iommu_types[i])) { 1901 return iommu_types[i]; 1902 } 1903 } 1904 error_setg(errp, "No available IOMMU models"); 1905 return -EINVAL; 1906 } 1907 1908 static int vfio_init_container(VFIOContainer *container, int group_fd, 1909 Error **errp) 1910 { 1911 int iommu_type, ret; 1912 1913 iommu_type = vfio_get_iommu_type(container, errp); 1914 if (iommu_type < 0) { 1915 return iommu_type; 1916 } 1917 1918 ret = ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container->fd); 1919 if (ret) { 1920 error_setg_errno(errp, errno, "Failed to set group container"); 1921 return -errno; 1922 } 1923 1924 while (ioctl(container->fd, VFIO_SET_IOMMU, iommu_type)) { 1925 if (iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { 1926 /* 1927 * On sPAPR, despite the IOMMU subdriver always advertises v1 and 1928 * v2, the running platform may not support v2 and there is no 1929 * way to guess it until an IOMMU group gets added to the container. 1930 * So in case it fails with v2, try v1 as a fallback. 1931 */ 1932 iommu_type = VFIO_SPAPR_TCE_IOMMU; 1933 continue; 1934 } 1935 error_setg_errno(errp, errno, "Failed to set iommu for container"); 1936 return -errno; 1937 } 1938 1939 container->iommu_type = iommu_type; 1940 return 0; 1941 } 1942 1943 static int vfio_get_iommu_info(VFIOContainer *container, 1944 struct vfio_iommu_type1_info **info) 1945 { 1946 1947 size_t argsz = sizeof(struct vfio_iommu_type1_info); 1948 1949 *info = g_new0(struct vfio_iommu_type1_info, 1); 1950 again: 1951 (*info)->argsz = argsz; 1952 1953 if (ioctl(container->fd, VFIO_IOMMU_GET_INFO, *info)) { 1954 g_free(*info); 1955 *info = NULL; 1956 return -errno; 1957 } 1958 1959 if (((*info)->argsz > argsz)) { 1960 argsz = (*info)->argsz; 1961 *info = g_realloc(*info, argsz); 1962 goto again; 1963 } 1964 1965 return 0; 1966 } 1967 1968 static struct vfio_info_cap_header * 1969 vfio_get_iommu_info_cap(struct vfio_iommu_type1_info *info, uint16_t id) 1970 { 1971 struct vfio_info_cap_header *hdr; 1972 void *ptr = info; 1973 1974 if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) { 1975 return NULL; 1976 } 1977 1978 for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) { 1979 if (hdr->id == id) { 1980 return hdr; 1981 } 1982 } 1983 1984 return NULL; 1985 } 1986 1987 static void vfio_get_iommu_info_migration(VFIOContainer *container, 1988 struct vfio_iommu_type1_info *info) 1989 { 1990 struct vfio_info_cap_header *hdr; 1991 struct vfio_iommu_type1_info_cap_migration *cap_mig; 1992 1993 hdr = vfio_get_iommu_info_cap(info, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION); 1994 if (!hdr) { 1995 return; 1996 } 1997 1998 cap_mig = container_of(hdr, struct vfio_iommu_type1_info_cap_migration, 1999 header); 2000 2001 /* 2002 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of 2003 * qemu_real_host_page_size to mark those dirty. 2004 */ 2005 if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) { 2006 container->dirty_pages_supported = true; 2007 container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size; 2008 container->dirty_pgsizes = cap_mig->pgsize_bitmap; 2009 } 2010 } 2011 2012 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, 2013 Error **errp) 2014 { 2015 VFIOContainer *container; 2016 int ret, fd; 2017 VFIOAddressSpace *space; 2018 2019 space = vfio_get_address_space(as); 2020 2021 /* 2022 * VFIO is currently incompatible with discarding of RAM insofar as the 2023 * madvise to purge (zap) the page from QEMU's address space does not 2024 * interact with the memory API and therefore leaves stale virtual to 2025 * physical mappings in the IOMMU if the page was previously pinned. We 2026 * therefore set discarding broken for each group added to a container, 2027 * whether the container is used individually or shared. This provides 2028 * us with options to allow devices within a group to opt-in and allow 2029 * discarding, so long as it is done consistently for a group (for instance 2030 * if the device is an mdev device where it is known that the host vendor 2031 * driver will never pin pages outside of the working set of the guest 2032 * driver, which would thus not be discarding candidates). 2033 * 2034 * The first opportunity to induce pinning occurs here where we attempt to 2035 * attach the group to existing containers within the AddressSpace. If any 2036 * pages are already zapped from the virtual address space, such as from 2037 * previous discards, new pinning will cause valid mappings to be 2038 * re-established. Likewise, when the overall MemoryListener for a new 2039 * container is registered, a replay of mappings within the AddressSpace 2040 * will occur, re-establishing any previously zapped pages as well. 2041 * 2042 * Especially virtio-balloon is currently only prevented from discarding 2043 * new memory, it will not yet set ram_block_discard_set_required() and 2044 * therefore, neither stops us here or deals with the sudden memory 2045 * consumption of inflated memory. 2046 * 2047 * We do support discarding of memory coordinated via the RamDiscardManager 2048 * with some IOMMU types. vfio_ram_block_discard_disable() handles the 2049 * details once we know which type of IOMMU we are using. 2050 */ 2051 2052 QLIST_FOREACH(container, &space->containers, next) { 2053 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { 2054 ret = vfio_ram_block_discard_disable(container, true); 2055 if (ret) { 2056 error_setg_errno(errp, -ret, 2057 "Cannot set discarding of RAM broken"); 2058 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, 2059 &container->fd)) { 2060 error_report("vfio: error disconnecting group %d from" 2061 " container", group->groupid); 2062 } 2063 return ret; 2064 } 2065 group->container = container; 2066 QLIST_INSERT_HEAD(&container->group_list, group, container_next); 2067 vfio_kvm_device_add_group(group); 2068 return 0; 2069 } 2070 } 2071 2072 fd = qemu_open_old("/dev/vfio/vfio", O_RDWR); 2073 if (fd < 0) { 2074 error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio"); 2075 ret = -errno; 2076 goto put_space_exit; 2077 } 2078 2079 ret = ioctl(fd, VFIO_GET_API_VERSION); 2080 if (ret != VFIO_API_VERSION) { 2081 error_setg(errp, "supported vfio version: %d, " 2082 "reported version: %d", VFIO_API_VERSION, ret); 2083 ret = -EINVAL; 2084 goto close_fd_exit; 2085 } 2086 2087 container = g_malloc0(sizeof(*container)); 2088 container->space = space; 2089 container->fd = fd; 2090 container->error = NULL; 2091 container->dirty_pages_supported = false; 2092 container->dma_max_mappings = 0; 2093 QLIST_INIT(&container->giommu_list); 2094 QLIST_INIT(&container->hostwin_list); 2095 QLIST_INIT(&container->vrdl_list); 2096 2097 ret = vfio_init_container(container, group->fd, errp); 2098 if (ret) { 2099 goto free_container_exit; 2100 } 2101 2102 ret = vfio_ram_block_discard_disable(container, true); 2103 if (ret) { 2104 error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken"); 2105 goto free_container_exit; 2106 } 2107 2108 switch (container->iommu_type) { 2109 case VFIO_TYPE1v2_IOMMU: 2110 case VFIO_TYPE1_IOMMU: 2111 { 2112 struct vfio_iommu_type1_info *info; 2113 2114 /* 2115 * FIXME: This assumes that a Type1 IOMMU can map any 64-bit 2116 * IOVA whatsoever. That's not actually true, but the current 2117 * kernel interface doesn't tell us what it can map, and the 2118 * existing Type1 IOMMUs generally support any IOVA we're 2119 * going to actually try in practice. 2120 */ 2121 ret = vfio_get_iommu_info(container, &info); 2122 2123 if (ret || !(info->flags & VFIO_IOMMU_INFO_PGSIZES)) { 2124 /* Assume 4k IOVA page size */ 2125 info->iova_pgsizes = 4096; 2126 } 2127 vfio_host_win_add(container, 0, (hwaddr)-1, info->iova_pgsizes); 2128 container->pgsizes = info->iova_pgsizes; 2129 2130 /* The default in the kernel ("dma_entry_limit") is 65535. */ 2131 container->dma_max_mappings = 65535; 2132 if (!ret) { 2133 vfio_get_info_dma_avail(info, &container->dma_max_mappings); 2134 vfio_get_iommu_info_migration(container, info); 2135 } 2136 g_free(info); 2137 break; 2138 } 2139 case VFIO_SPAPR_TCE_v2_IOMMU: 2140 case VFIO_SPAPR_TCE_IOMMU: 2141 { 2142 struct vfio_iommu_spapr_tce_info info; 2143 bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU; 2144 2145 /* 2146 * The host kernel code implementing VFIO_IOMMU_DISABLE is called 2147 * when container fd is closed so we do not call it explicitly 2148 * in this file. 2149 */ 2150 if (!v2) { 2151 ret = ioctl(fd, VFIO_IOMMU_ENABLE); 2152 if (ret) { 2153 error_setg_errno(errp, errno, "failed to enable container"); 2154 ret = -errno; 2155 goto enable_discards_exit; 2156 } 2157 } else { 2158 container->prereg_listener = vfio_prereg_listener; 2159 2160 memory_listener_register(&container->prereg_listener, 2161 &address_space_memory); 2162 if (container->error) { 2163 memory_listener_unregister(&container->prereg_listener); 2164 ret = -1; 2165 error_propagate_prepend(errp, container->error, 2166 "RAM memory listener initialization failed: "); 2167 goto enable_discards_exit; 2168 } 2169 } 2170 2171 info.argsz = sizeof(info); 2172 ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info); 2173 if (ret) { 2174 error_setg_errno(errp, errno, 2175 "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed"); 2176 ret = -errno; 2177 if (v2) { 2178 memory_listener_unregister(&container->prereg_listener); 2179 } 2180 goto enable_discards_exit; 2181 } 2182 2183 if (v2) { 2184 container->pgsizes = info.ddw.pgsizes; 2185 /* 2186 * There is a default window in just created container. 2187 * To make region_add/del simpler, we better remove this 2188 * window now and let those iommu_listener callbacks 2189 * create/remove them when needed. 2190 */ 2191 ret = vfio_spapr_remove_window(container, info.dma32_window_start); 2192 if (ret) { 2193 error_setg_errno(errp, -ret, 2194 "failed to remove existing window"); 2195 goto enable_discards_exit; 2196 } 2197 } else { 2198 /* The default table uses 4K pages */ 2199 container->pgsizes = 0x1000; 2200 vfio_host_win_add(container, info.dma32_window_start, 2201 info.dma32_window_start + 2202 info.dma32_window_size - 1, 2203 0x1000); 2204 } 2205 } 2206 } 2207 2208 vfio_kvm_device_add_group(group); 2209 2210 QLIST_INIT(&container->group_list); 2211 QLIST_INSERT_HEAD(&space->containers, container, next); 2212 2213 group->container = container; 2214 QLIST_INSERT_HEAD(&container->group_list, group, container_next); 2215 2216 container->listener = vfio_memory_listener; 2217 2218 memory_listener_register(&container->listener, container->space->as); 2219 2220 if (container->error) { 2221 ret = -1; 2222 error_propagate_prepend(errp, container->error, 2223 "memory listener initialization failed: "); 2224 goto listener_release_exit; 2225 } 2226 2227 container->initialized = true; 2228 2229 return 0; 2230 listener_release_exit: 2231 QLIST_REMOVE(group, container_next); 2232 QLIST_REMOVE(container, next); 2233 vfio_kvm_device_del_group(group); 2234 vfio_listener_release(container); 2235 2236 enable_discards_exit: 2237 vfio_ram_block_discard_disable(container, false); 2238 2239 free_container_exit: 2240 g_free(container); 2241 2242 close_fd_exit: 2243 close(fd); 2244 2245 put_space_exit: 2246 vfio_put_address_space(space); 2247 2248 return ret; 2249 } 2250 2251 static void vfio_disconnect_container(VFIOGroup *group) 2252 { 2253 VFIOContainer *container = group->container; 2254 2255 QLIST_REMOVE(group, container_next); 2256 group->container = NULL; 2257 2258 /* 2259 * Explicitly release the listener first before unset container, 2260 * since unset may destroy the backend container if it's the last 2261 * group. 2262 */ 2263 if (QLIST_EMPTY(&container->group_list)) { 2264 vfio_listener_release(container); 2265 } 2266 2267 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { 2268 error_report("vfio: error disconnecting group %d from container", 2269 group->groupid); 2270 } 2271 2272 if (QLIST_EMPTY(&container->group_list)) { 2273 VFIOAddressSpace *space = container->space; 2274 VFIOGuestIOMMU *giommu, *tmp; 2275 VFIOHostDMAWindow *hostwin, *next; 2276 2277 QLIST_REMOVE(container, next); 2278 2279 QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) { 2280 memory_region_unregister_iommu_notifier( 2281 MEMORY_REGION(giommu->iommu_mr), &giommu->n); 2282 QLIST_REMOVE(giommu, giommu_next); 2283 g_free(giommu); 2284 } 2285 2286 QLIST_FOREACH_SAFE(hostwin, &container->hostwin_list, hostwin_next, 2287 next) { 2288 QLIST_REMOVE(hostwin, hostwin_next); 2289 g_free(hostwin); 2290 } 2291 2292 trace_vfio_disconnect_container(container->fd); 2293 close(container->fd); 2294 g_free(container); 2295 2296 vfio_put_address_space(space); 2297 } 2298 } 2299 2300 VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp) 2301 { 2302 VFIOGroup *group; 2303 char path[32]; 2304 struct vfio_group_status status = { .argsz = sizeof(status) }; 2305 2306 QLIST_FOREACH(group, &vfio_group_list, next) { 2307 if (group->groupid == groupid) { 2308 /* Found it. Now is it already in the right context? */ 2309 if (group->container->space->as == as) { 2310 return group; 2311 } else { 2312 error_setg(errp, "group %d used in multiple address spaces", 2313 group->groupid); 2314 return NULL; 2315 } 2316 } 2317 } 2318 2319 group = g_malloc0(sizeof(*group)); 2320 2321 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid); 2322 group->fd = qemu_open_old(path, O_RDWR); 2323 if (group->fd < 0) { 2324 error_setg_errno(errp, errno, "failed to open %s", path); 2325 goto free_group_exit; 2326 } 2327 2328 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) { 2329 error_setg_errno(errp, errno, "failed to get group %d status", groupid); 2330 goto close_fd_exit; 2331 } 2332 2333 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) { 2334 error_setg(errp, "group %d is not viable", groupid); 2335 error_append_hint(errp, 2336 "Please ensure all devices within the iommu_group " 2337 "are bound to their vfio bus driver.\n"); 2338 goto close_fd_exit; 2339 } 2340 2341 group->groupid = groupid; 2342 QLIST_INIT(&group->device_list); 2343 2344 if (vfio_connect_container(group, as, errp)) { 2345 error_prepend(errp, "failed to setup container for group %d: ", 2346 groupid); 2347 goto close_fd_exit; 2348 } 2349 2350 if (QLIST_EMPTY(&vfio_group_list)) { 2351 qemu_register_reset(vfio_reset_handler, NULL); 2352 } 2353 2354 QLIST_INSERT_HEAD(&vfio_group_list, group, next); 2355 2356 return group; 2357 2358 close_fd_exit: 2359 close(group->fd); 2360 2361 free_group_exit: 2362 g_free(group); 2363 2364 return NULL; 2365 } 2366 2367 void vfio_put_group(VFIOGroup *group) 2368 { 2369 if (!group || !QLIST_EMPTY(&group->device_list)) { 2370 return; 2371 } 2372 2373 if (!group->ram_block_discard_allowed) { 2374 vfio_ram_block_discard_disable(group->container, false); 2375 } 2376 vfio_kvm_device_del_group(group); 2377 vfio_disconnect_container(group); 2378 QLIST_REMOVE(group, next); 2379 trace_vfio_put_group(group->fd); 2380 close(group->fd); 2381 g_free(group); 2382 2383 if (QLIST_EMPTY(&vfio_group_list)) { 2384 qemu_unregister_reset(vfio_reset_handler, NULL); 2385 } 2386 } 2387 2388 int vfio_get_device(VFIOGroup *group, const char *name, 2389 VFIODevice *vbasedev, Error **errp) 2390 { 2391 struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) }; 2392 int ret, fd; 2393 2394 fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name); 2395 if (fd < 0) { 2396 error_setg_errno(errp, errno, "error getting device from group %d", 2397 group->groupid); 2398 error_append_hint(errp, 2399 "Verify all devices in group %d are bound to vfio-<bus> " 2400 "or pci-stub and not already in use\n", group->groupid); 2401 return fd; 2402 } 2403 2404 ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info); 2405 if (ret) { 2406 error_setg_errno(errp, errno, "error getting device info"); 2407 close(fd); 2408 return ret; 2409 } 2410 2411 /* 2412 * Set discarding of RAM as not broken for this group if the driver knows 2413 * the device operates compatibly with discarding. Setting must be 2414 * consistent per group, but since compatibility is really only possible 2415 * with mdev currently, we expect singleton groups. 2416 */ 2417 if (vbasedev->ram_block_discard_allowed != 2418 group->ram_block_discard_allowed) { 2419 if (!QLIST_EMPTY(&group->device_list)) { 2420 error_setg(errp, "Inconsistent setting of support for discarding " 2421 "RAM (e.g., balloon) within group"); 2422 close(fd); 2423 return -1; 2424 } 2425 2426 if (!group->ram_block_discard_allowed) { 2427 group->ram_block_discard_allowed = true; 2428 vfio_ram_block_discard_disable(group->container, false); 2429 } 2430 } 2431 2432 vbasedev->fd = fd; 2433 vbasedev->group = group; 2434 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next); 2435 2436 vbasedev->num_irqs = dev_info.num_irqs; 2437 vbasedev->num_regions = dev_info.num_regions; 2438 vbasedev->flags = dev_info.flags; 2439 2440 trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions, 2441 dev_info.num_irqs); 2442 2443 vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET); 2444 return 0; 2445 } 2446 2447 void vfio_put_base_device(VFIODevice *vbasedev) 2448 { 2449 if (!vbasedev->group) { 2450 return; 2451 } 2452 QLIST_REMOVE(vbasedev, next); 2453 vbasedev->group = NULL; 2454 trace_vfio_put_base_device(vbasedev->fd); 2455 close(vbasedev->fd); 2456 } 2457 2458 int vfio_get_region_info(VFIODevice *vbasedev, int index, 2459 struct vfio_region_info **info) 2460 { 2461 size_t argsz = sizeof(struct vfio_region_info); 2462 2463 *info = g_malloc0(argsz); 2464 2465 (*info)->index = index; 2466 retry: 2467 (*info)->argsz = argsz; 2468 2469 if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) { 2470 g_free(*info); 2471 *info = NULL; 2472 return -errno; 2473 } 2474 2475 if ((*info)->argsz > argsz) { 2476 argsz = (*info)->argsz; 2477 *info = g_realloc(*info, argsz); 2478 2479 goto retry; 2480 } 2481 2482 return 0; 2483 } 2484 2485 int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type, 2486 uint32_t subtype, struct vfio_region_info **info) 2487 { 2488 int i; 2489 2490 for (i = 0; i < vbasedev->num_regions; i++) { 2491 struct vfio_info_cap_header *hdr; 2492 struct vfio_region_info_cap_type *cap_type; 2493 2494 if (vfio_get_region_info(vbasedev, i, info)) { 2495 continue; 2496 } 2497 2498 hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE); 2499 if (!hdr) { 2500 g_free(*info); 2501 continue; 2502 } 2503 2504 cap_type = container_of(hdr, struct vfio_region_info_cap_type, header); 2505 2506 trace_vfio_get_dev_region(vbasedev->name, i, 2507 cap_type->type, cap_type->subtype); 2508 2509 if (cap_type->type == type && cap_type->subtype == subtype) { 2510 return 0; 2511 } 2512 2513 g_free(*info); 2514 } 2515 2516 *info = NULL; 2517 return -ENODEV; 2518 } 2519 2520 bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type) 2521 { 2522 struct vfio_region_info *info = NULL; 2523 bool ret = false; 2524 2525 if (!vfio_get_region_info(vbasedev, region, &info)) { 2526 if (vfio_get_region_info_cap(info, cap_type)) { 2527 ret = true; 2528 } 2529 g_free(info); 2530 } 2531 2532 return ret; 2533 } 2534 2535 /* 2536 * Interfaces for IBM EEH (Enhanced Error Handling) 2537 */ 2538 static bool vfio_eeh_container_ok(VFIOContainer *container) 2539 { 2540 /* 2541 * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO 2542 * implementation is broken if there are multiple groups in a 2543 * container. The hardware works in units of Partitionable 2544 * Endpoints (== IOMMU groups) and the EEH operations naively 2545 * iterate across all groups in the container, without any logic 2546 * to make sure the groups have their state synchronized. For 2547 * certain operations (ENABLE) that might be ok, until an error 2548 * occurs, but for others (GET_STATE) it's clearly broken. 2549 */ 2550 2551 /* 2552 * XXX Once fixed kernels exist, test for them here 2553 */ 2554 2555 if (QLIST_EMPTY(&container->group_list)) { 2556 return false; 2557 } 2558 2559 if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) { 2560 return false; 2561 } 2562 2563 return true; 2564 } 2565 2566 static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op) 2567 { 2568 struct vfio_eeh_pe_op pe_op = { 2569 .argsz = sizeof(pe_op), 2570 .op = op, 2571 }; 2572 int ret; 2573 2574 if (!vfio_eeh_container_ok(container)) { 2575 error_report("vfio/eeh: EEH_PE_OP 0x%x: " 2576 "kernel requires a container with exactly one group", op); 2577 return -EPERM; 2578 } 2579 2580 ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op); 2581 if (ret < 0) { 2582 error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op); 2583 return -errno; 2584 } 2585 2586 return ret; 2587 } 2588 2589 static VFIOContainer *vfio_eeh_as_container(AddressSpace *as) 2590 { 2591 VFIOAddressSpace *space = vfio_get_address_space(as); 2592 VFIOContainer *container = NULL; 2593 2594 if (QLIST_EMPTY(&space->containers)) { 2595 /* No containers to act on */ 2596 goto out; 2597 } 2598 2599 container = QLIST_FIRST(&space->containers); 2600 2601 if (QLIST_NEXT(container, next)) { 2602 /* We don't yet have logic to synchronize EEH state across 2603 * multiple containers */ 2604 container = NULL; 2605 goto out; 2606 } 2607 2608 out: 2609 vfio_put_address_space(space); 2610 return container; 2611 } 2612 2613 bool vfio_eeh_as_ok(AddressSpace *as) 2614 { 2615 VFIOContainer *container = vfio_eeh_as_container(as); 2616 2617 return (container != NULL) && vfio_eeh_container_ok(container); 2618 } 2619 2620 int vfio_eeh_as_op(AddressSpace *as, uint32_t op) 2621 { 2622 VFIOContainer *container = vfio_eeh_as_container(as); 2623 2624 if (!container) { 2625 return -ENODEV; 2626 } 2627 return vfio_eeh_container_op(container, op); 2628 } 2629