1 /* 2 * generic functions used by VFIO devices 3 * 4 * Copyright Red Hat, Inc. 2012 5 * 6 * Authors: 7 * Alex Williamson <alex.williamson@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Based on qemu-kvm device-assignment: 13 * Adapted for KVM by Qumranet. 14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) 15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) 16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) 17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) 18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) 19 */ 20 21 #include "qemu/osdep.h" 22 #include <sys/ioctl.h> 23 #ifdef CONFIG_KVM 24 #include <linux/kvm.h> 25 #endif 26 #include <linux/vfio.h> 27 28 #include "hw/vfio/vfio-common.h" 29 #include "hw/vfio/vfio.h" 30 #include "exec/address-spaces.h" 31 #include "exec/memory.h" 32 #include "hw/hw.h" 33 #include "qemu/error-report.h" 34 #include "qemu/range.h" 35 #include "sysemu/balloon.h" 36 #include "sysemu/kvm.h" 37 #include "sysemu/reset.h" 38 #include "trace.h" 39 #include "qapi/error.h" 40 41 VFIOGroupList vfio_group_list = 42 QLIST_HEAD_INITIALIZER(vfio_group_list); 43 static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces = 44 QLIST_HEAD_INITIALIZER(vfio_address_spaces); 45 46 #ifdef CONFIG_KVM 47 /* 48 * We have a single VFIO pseudo device per KVM VM. Once created it lives 49 * for the life of the VM. Closing the file descriptor only drops our 50 * reference to it and the device's reference to kvm. Therefore once 51 * initialized, this file descriptor is only released on QEMU exit and 52 * we'll re-use it should another vfio device be attached before then. 53 */ 54 static int vfio_kvm_device_fd = -1; 55 #endif 56 57 /* 58 * Common VFIO interrupt disable 59 */ 60 void vfio_disable_irqindex(VFIODevice *vbasedev, int index) 61 { 62 struct vfio_irq_set irq_set = { 63 .argsz = sizeof(irq_set), 64 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER, 65 .index = index, 66 .start = 0, 67 .count = 0, 68 }; 69 70 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); 71 } 72 73 void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index) 74 { 75 struct vfio_irq_set irq_set = { 76 .argsz = sizeof(irq_set), 77 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK, 78 .index = index, 79 .start = 0, 80 .count = 1, 81 }; 82 83 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); 84 } 85 86 void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index) 87 { 88 struct vfio_irq_set irq_set = { 89 .argsz = sizeof(irq_set), 90 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK, 91 .index = index, 92 .start = 0, 93 .count = 1, 94 }; 95 96 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); 97 } 98 99 static inline const char *action_to_str(int action) 100 { 101 switch (action) { 102 case VFIO_IRQ_SET_ACTION_MASK: 103 return "MASK"; 104 case VFIO_IRQ_SET_ACTION_UNMASK: 105 return "UNMASK"; 106 case VFIO_IRQ_SET_ACTION_TRIGGER: 107 return "TRIGGER"; 108 default: 109 return "UNKNOWN ACTION"; 110 } 111 } 112 113 static const char *index_to_str(VFIODevice *vbasedev, int index) 114 { 115 if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) { 116 return NULL; 117 } 118 119 switch (index) { 120 case VFIO_PCI_INTX_IRQ_INDEX: 121 return "INTX"; 122 case VFIO_PCI_MSI_IRQ_INDEX: 123 return "MSI"; 124 case VFIO_PCI_MSIX_IRQ_INDEX: 125 return "MSIX"; 126 case VFIO_PCI_ERR_IRQ_INDEX: 127 return "ERR"; 128 case VFIO_PCI_REQ_IRQ_INDEX: 129 return "REQ"; 130 default: 131 return NULL; 132 } 133 } 134 135 int vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex, 136 int action, int fd, Error **errp) 137 { 138 struct vfio_irq_set *irq_set; 139 int argsz, ret = 0; 140 const char *name; 141 int32_t *pfd; 142 143 argsz = sizeof(*irq_set) + sizeof(*pfd); 144 145 irq_set = g_malloc0(argsz); 146 irq_set->argsz = argsz; 147 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action; 148 irq_set->index = index; 149 irq_set->start = subindex; 150 irq_set->count = 1; 151 pfd = (int32_t *)&irq_set->data; 152 *pfd = fd; 153 154 if (ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set)) { 155 ret = -errno; 156 } 157 g_free(irq_set); 158 159 if (!ret) { 160 return 0; 161 } 162 163 error_setg_errno(errp, -ret, "VFIO_DEVICE_SET_IRQS failure"); 164 165 name = index_to_str(vbasedev, index); 166 if (name) { 167 error_prepend(errp, "%s-%d: ", name, subindex); 168 } else { 169 error_prepend(errp, "index %d-%d: ", index, subindex); 170 } 171 error_prepend(errp, 172 "Failed to %s %s eventfd signaling for interrupt ", 173 fd < 0 ? "tear down" : "set up", action_to_str(action)); 174 return ret; 175 } 176 177 /* 178 * IO Port/MMIO - Beware of the endians, VFIO is always little endian 179 */ 180 void vfio_region_write(void *opaque, hwaddr addr, 181 uint64_t data, unsigned size) 182 { 183 VFIORegion *region = opaque; 184 VFIODevice *vbasedev = region->vbasedev; 185 union { 186 uint8_t byte; 187 uint16_t word; 188 uint32_t dword; 189 uint64_t qword; 190 } buf; 191 192 switch (size) { 193 case 1: 194 buf.byte = data; 195 break; 196 case 2: 197 buf.word = cpu_to_le16(data); 198 break; 199 case 4: 200 buf.dword = cpu_to_le32(data); 201 break; 202 case 8: 203 buf.qword = cpu_to_le64(data); 204 break; 205 default: 206 hw_error("vfio: unsupported write size, %d bytes", size); 207 break; 208 } 209 210 if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { 211 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64 212 ",%d) failed: %m", 213 __func__, vbasedev->name, region->nr, 214 addr, data, size); 215 } 216 217 trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size); 218 219 /* 220 * A read or write to a BAR always signals an INTx EOI. This will 221 * do nothing if not pending (including not in INTx mode). We assume 222 * that a BAR access is in response to an interrupt and that BAR 223 * accesses will service the interrupt. Unfortunately, we don't know 224 * which access will service the interrupt, so we're potentially 225 * getting quite a few host interrupts per guest interrupt. 226 */ 227 vbasedev->ops->vfio_eoi(vbasedev); 228 } 229 230 uint64_t vfio_region_read(void *opaque, 231 hwaddr addr, unsigned size) 232 { 233 VFIORegion *region = opaque; 234 VFIODevice *vbasedev = region->vbasedev; 235 union { 236 uint8_t byte; 237 uint16_t word; 238 uint32_t dword; 239 uint64_t qword; 240 } buf; 241 uint64_t data = 0; 242 243 if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { 244 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m", 245 __func__, vbasedev->name, region->nr, 246 addr, size); 247 return (uint64_t)-1; 248 } 249 switch (size) { 250 case 1: 251 data = buf.byte; 252 break; 253 case 2: 254 data = le16_to_cpu(buf.word); 255 break; 256 case 4: 257 data = le32_to_cpu(buf.dword); 258 break; 259 case 8: 260 data = le64_to_cpu(buf.qword); 261 break; 262 default: 263 hw_error("vfio: unsupported read size, %d bytes", size); 264 break; 265 } 266 267 trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data); 268 269 /* Same as write above */ 270 vbasedev->ops->vfio_eoi(vbasedev); 271 272 return data; 273 } 274 275 const MemoryRegionOps vfio_region_ops = { 276 .read = vfio_region_read, 277 .write = vfio_region_write, 278 .endianness = DEVICE_LITTLE_ENDIAN, 279 .valid = { 280 .min_access_size = 1, 281 .max_access_size = 8, 282 }, 283 .impl = { 284 .min_access_size = 1, 285 .max_access_size = 8, 286 }, 287 }; 288 289 /* 290 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 291 */ 292 static int vfio_dma_unmap(VFIOContainer *container, 293 hwaddr iova, ram_addr_t size) 294 { 295 struct vfio_iommu_type1_dma_unmap unmap = { 296 .argsz = sizeof(unmap), 297 .flags = 0, 298 .iova = iova, 299 .size = size, 300 }; 301 302 while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) { 303 /* 304 * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c 305 * v4.15) where an overflow in its wrap-around check prevents us from 306 * unmapping the last page of the address space. Test for the error 307 * condition and re-try the unmap excluding the last page. The 308 * expectation is that we've never mapped the last page anyway and this 309 * unmap request comes via vIOMMU support which also makes it unlikely 310 * that this page is used. This bug was introduced well after type1 v2 311 * support was introduced, so we shouldn't need to test for v1. A fix 312 * is queued for kernel v5.0 so this workaround can be removed once 313 * affected kernels are sufficiently deprecated. 314 */ 315 if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) && 316 container->iommu_type == VFIO_TYPE1v2_IOMMU) { 317 trace_vfio_dma_unmap_overflow_workaround(); 318 unmap.size -= 1ULL << ctz64(container->pgsizes); 319 continue; 320 } 321 error_report("VFIO_UNMAP_DMA: %d", -errno); 322 return -errno; 323 } 324 325 return 0; 326 } 327 328 static int vfio_dma_map(VFIOContainer *container, hwaddr iova, 329 ram_addr_t size, void *vaddr, bool readonly) 330 { 331 struct vfio_iommu_type1_dma_map map = { 332 .argsz = sizeof(map), 333 .flags = VFIO_DMA_MAP_FLAG_READ, 334 .vaddr = (__u64)(uintptr_t)vaddr, 335 .iova = iova, 336 .size = size, 337 }; 338 339 if (!readonly) { 340 map.flags |= VFIO_DMA_MAP_FLAG_WRITE; 341 } 342 343 /* 344 * Try the mapping, if it fails with EBUSY, unmap the region and try 345 * again. This shouldn't be necessary, but we sometimes see it in 346 * the VGA ROM space. 347 */ 348 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || 349 (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 && 350 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) { 351 return 0; 352 } 353 354 error_report("VFIO_MAP_DMA: %d", -errno); 355 return -errno; 356 } 357 358 static void vfio_host_win_add(VFIOContainer *container, 359 hwaddr min_iova, hwaddr max_iova, 360 uint64_t iova_pgsizes) 361 { 362 VFIOHostDMAWindow *hostwin; 363 364 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 365 if (ranges_overlap(hostwin->min_iova, 366 hostwin->max_iova - hostwin->min_iova + 1, 367 min_iova, 368 max_iova - min_iova + 1)) { 369 hw_error("%s: Overlapped IOMMU are not enabled", __func__); 370 } 371 } 372 373 hostwin = g_malloc0(sizeof(*hostwin)); 374 375 hostwin->min_iova = min_iova; 376 hostwin->max_iova = max_iova; 377 hostwin->iova_pgsizes = iova_pgsizes; 378 QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next); 379 } 380 381 static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova, 382 hwaddr max_iova) 383 { 384 VFIOHostDMAWindow *hostwin; 385 386 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 387 if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) { 388 QLIST_REMOVE(hostwin, hostwin_next); 389 return 0; 390 } 391 } 392 393 return -1; 394 } 395 396 static bool vfio_listener_skipped_section(MemoryRegionSection *section) 397 { 398 return (!memory_region_is_ram(section->mr) && 399 !memory_region_is_iommu(section->mr)) || 400 /* 401 * Sizing an enabled 64-bit BAR can cause spurious mappings to 402 * addresses in the upper part of the 64-bit address space. These 403 * are never accessed by the CPU and beyond the address width of 404 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width. 405 */ 406 section->offset_within_address_space & (1ULL << 63); 407 } 408 409 /* Called with rcu_read_lock held. */ 410 static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr, 411 bool *read_only) 412 { 413 MemoryRegion *mr; 414 hwaddr xlat; 415 hwaddr len = iotlb->addr_mask + 1; 416 bool writable = iotlb->perm & IOMMU_WO; 417 418 /* 419 * The IOMMU TLB entry we have just covers translation through 420 * this IOMMU to its immediate target. We need to translate 421 * it the rest of the way through to memory. 422 */ 423 mr = address_space_translate(&address_space_memory, 424 iotlb->translated_addr, 425 &xlat, &len, writable, 426 MEMTXATTRS_UNSPECIFIED); 427 if (!memory_region_is_ram(mr)) { 428 error_report("iommu map to non memory area %"HWADDR_PRIx"", 429 xlat); 430 return false; 431 } 432 433 /* 434 * Translation truncates length to the IOMMU page size, 435 * check that it did not truncate too much. 436 */ 437 if (len & iotlb->addr_mask) { 438 error_report("iommu has granularity incompatible with target AS"); 439 return false; 440 } 441 442 *vaddr = memory_region_get_ram_ptr(mr) + xlat; 443 *read_only = !writable || mr->readonly; 444 445 return true; 446 } 447 448 static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) 449 { 450 VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); 451 VFIOContainer *container = giommu->container; 452 hwaddr iova = iotlb->iova + giommu->iommu_offset; 453 bool read_only; 454 void *vaddr; 455 int ret; 456 457 trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP", 458 iova, iova + iotlb->addr_mask); 459 460 if (iotlb->target_as != &address_space_memory) { 461 error_report("Wrong target AS \"%s\", only system memory is allowed", 462 iotlb->target_as->name ? iotlb->target_as->name : "none"); 463 return; 464 } 465 466 rcu_read_lock(); 467 468 if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { 469 if (!vfio_get_vaddr(iotlb, &vaddr, &read_only)) { 470 goto out; 471 } 472 /* 473 * vaddr is only valid until rcu_read_unlock(). But after 474 * vfio_dma_map has set up the mapping the pages will be 475 * pinned by the kernel. This makes sure that the RAM backend 476 * of vaddr will always be there, even if the memory object is 477 * destroyed and its backing memory munmap-ed. 478 */ 479 ret = vfio_dma_map(container, iova, 480 iotlb->addr_mask + 1, vaddr, 481 read_only); 482 if (ret) { 483 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " 484 "0x%"HWADDR_PRIx", %p) = %d (%m)", 485 container, iova, 486 iotlb->addr_mask + 1, vaddr, ret); 487 } 488 } else { 489 ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1); 490 if (ret) { 491 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " 492 "0x%"HWADDR_PRIx") = %d (%m)", 493 container, iova, 494 iotlb->addr_mask + 1, ret); 495 } 496 } 497 out: 498 rcu_read_unlock(); 499 } 500 501 static void vfio_listener_region_add(MemoryListener *listener, 502 MemoryRegionSection *section) 503 { 504 VFIOContainer *container = container_of(listener, VFIOContainer, listener); 505 hwaddr iova, end; 506 Int128 llend, llsize; 507 void *vaddr; 508 int ret; 509 VFIOHostDMAWindow *hostwin; 510 bool hostwin_found; 511 512 if (vfio_listener_skipped_section(section)) { 513 trace_vfio_listener_region_add_skip( 514 section->offset_within_address_space, 515 section->offset_within_address_space + 516 int128_get64(int128_sub(section->size, int128_one()))); 517 return; 518 } 519 520 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != 521 (section->offset_within_region & ~TARGET_PAGE_MASK))) { 522 error_report("%s received unaligned region", __func__); 523 return; 524 } 525 526 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); 527 llend = int128_make64(section->offset_within_address_space); 528 llend = int128_add(llend, section->size); 529 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); 530 531 if (int128_ge(int128_make64(iova), llend)) { 532 return; 533 } 534 end = int128_get64(int128_sub(llend, int128_one())); 535 536 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { 537 hwaddr pgsize = 0; 538 539 /* For now intersections are not allowed, we may relax this later */ 540 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 541 if (ranges_overlap(hostwin->min_iova, 542 hostwin->max_iova - hostwin->min_iova + 1, 543 section->offset_within_address_space, 544 int128_get64(section->size))) { 545 ret = -1; 546 goto fail; 547 } 548 } 549 550 ret = vfio_spapr_create_window(container, section, &pgsize); 551 if (ret) { 552 goto fail; 553 } 554 555 vfio_host_win_add(container, section->offset_within_address_space, 556 section->offset_within_address_space + 557 int128_get64(section->size) - 1, pgsize); 558 #ifdef CONFIG_KVM 559 if (kvm_enabled()) { 560 VFIOGroup *group; 561 IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); 562 struct kvm_vfio_spapr_tce param; 563 struct kvm_device_attr attr = { 564 .group = KVM_DEV_VFIO_GROUP, 565 .attr = KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE, 566 .addr = (uint64_t)(unsigned long)¶m, 567 }; 568 569 if (!memory_region_iommu_get_attr(iommu_mr, IOMMU_ATTR_SPAPR_TCE_FD, 570 ¶m.tablefd)) { 571 QLIST_FOREACH(group, &container->group_list, container_next) { 572 param.groupfd = group->fd; 573 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { 574 error_report("vfio: failed to setup fd %d " 575 "for a group with fd %d: %s", 576 param.tablefd, param.groupfd, 577 strerror(errno)); 578 return; 579 } 580 trace_vfio_spapr_group_attach(param.groupfd, param.tablefd); 581 } 582 } 583 } 584 #endif 585 } 586 587 hostwin_found = false; 588 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 589 if (hostwin->min_iova <= iova && end <= hostwin->max_iova) { 590 hostwin_found = true; 591 break; 592 } 593 } 594 595 if (!hostwin_found) { 596 error_report("vfio: IOMMU container %p can't map guest IOVA region" 597 " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, 598 container, iova, end); 599 ret = -EFAULT; 600 goto fail; 601 } 602 603 memory_region_ref(section->mr); 604 605 if (memory_region_is_iommu(section->mr)) { 606 VFIOGuestIOMMU *giommu; 607 IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); 608 int iommu_idx; 609 610 trace_vfio_listener_region_add_iommu(iova, end); 611 /* 612 * FIXME: For VFIO iommu types which have KVM acceleration to 613 * avoid bouncing all map/unmaps through qemu this way, this 614 * would be the right place to wire that up (tell the KVM 615 * device emulation the VFIO iommu handles to use). 616 */ 617 giommu = g_malloc0(sizeof(*giommu)); 618 giommu->iommu = iommu_mr; 619 giommu->iommu_offset = section->offset_within_address_space - 620 section->offset_within_region; 621 giommu->container = container; 622 llend = int128_add(int128_make64(section->offset_within_region), 623 section->size); 624 llend = int128_sub(llend, int128_one()); 625 iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr, 626 MEMTXATTRS_UNSPECIFIED); 627 iommu_notifier_init(&giommu->n, vfio_iommu_map_notify, 628 IOMMU_NOTIFIER_ALL, 629 section->offset_within_region, 630 int128_get64(llend), 631 iommu_idx); 632 QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); 633 634 memory_region_register_iommu_notifier(section->mr, &giommu->n); 635 memory_region_iommu_replay(giommu->iommu, &giommu->n); 636 637 return; 638 } 639 640 /* Here we assume that memory_region_is_ram(section->mr)==true */ 641 642 vaddr = memory_region_get_ram_ptr(section->mr) + 643 section->offset_within_region + 644 (iova - section->offset_within_address_space); 645 646 trace_vfio_listener_region_add_ram(iova, end, vaddr); 647 648 llsize = int128_sub(llend, int128_make64(iova)); 649 650 if (memory_region_is_ram_device(section->mr)) { 651 hwaddr pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1; 652 653 if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) { 654 trace_vfio_listener_region_add_no_dma_map( 655 memory_region_name(section->mr), 656 section->offset_within_address_space, 657 int128_getlo(section->size), 658 pgmask + 1); 659 return; 660 } 661 } 662 663 ret = vfio_dma_map(container, iova, int128_get64(llsize), 664 vaddr, section->readonly); 665 if (ret) { 666 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " 667 "0x%"HWADDR_PRIx", %p) = %d (%m)", 668 container, iova, int128_get64(llsize), vaddr, ret); 669 if (memory_region_is_ram_device(section->mr)) { 670 /* Allow unexpected mappings not to be fatal for RAM devices */ 671 return; 672 } 673 goto fail; 674 } 675 676 return; 677 678 fail: 679 if (memory_region_is_ram_device(section->mr)) { 680 error_report("failed to vfio_dma_map. pci p2p may not work"); 681 return; 682 } 683 /* 684 * On the initfn path, store the first error in the container so we 685 * can gracefully fail. Runtime, there's not much we can do other 686 * than throw a hardware error. 687 */ 688 if (!container->initialized) { 689 if (!container->error) { 690 container->error = ret; 691 } 692 } else { 693 hw_error("vfio: DMA mapping failed, unable to continue"); 694 } 695 } 696 697 static void vfio_listener_region_del(MemoryListener *listener, 698 MemoryRegionSection *section) 699 { 700 VFIOContainer *container = container_of(listener, VFIOContainer, listener); 701 hwaddr iova, end; 702 Int128 llend, llsize; 703 int ret; 704 bool try_unmap = true; 705 706 if (vfio_listener_skipped_section(section)) { 707 trace_vfio_listener_region_del_skip( 708 section->offset_within_address_space, 709 section->offset_within_address_space + 710 int128_get64(int128_sub(section->size, int128_one()))); 711 return; 712 } 713 714 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != 715 (section->offset_within_region & ~TARGET_PAGE_MASK))) { 716 error_report("%s received unaligned region", __func__); 717 return; 718 } 719 720 if (memory_region_is_iommu(section->mr)) { 721 VFIOGuestIOMMU *giommu; 722 723 QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { 724 if (MEMORY_REGION(giommu->iommu) == section->mr && 725 giommu->n.start == section->offset_within_region) { 726 memory_region_unregister_iommu_notifier(section->mr, 727 &giommu->n); 728 QLIST_REMOVE(giommu, giommu_next); 729 g_free(giommu); 730 break; 731 } 732 } 733 734 /* 735 * FIXME: We assume the one big unmap below is adequate to 736 * remove any individual page mappings in the IOMMU which 737 * might have been copied into VFIO. This works for a page table 738 * based IOMMU where a big unmap flattens a large range of IO-PTEs. 739 * That may not be true for all IOMMU types. 740 */ 741 } 742 743 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); 744 llend = int128_make64(section->offset_within_address_space); 745 llend = int128_add(llend, section->size); 746 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); 747 748 if (int128_ge(int128_make64(iova), llend)) { 749 return; 750 } 751 end = int128_get64(int128_sub(llend, int128_one())); 752 753 llsize = int128_sub(llend, int128_make64(iova)); 754 755 trace_vfio_listener_region_del(iova, end); 756 757 if (memory_region_is_ram_device(section->mr)) { 758 hwaddr pgmask; 759 VFIOHostDMAWindow *hostwin; 760 bool hostwin_found = false; 761 762 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 763 if (hostwin->min_iova <= iova && end <= hostwin->max_iova) { 764 hostwin_found = true; 765 break; 766 } 767 } 768 assert(hostwin_found); /* or region_add() would have failed */ 769 770 pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1; 771 try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask)); 772 } 773 774 if (try_unmap) { 775 ret = vfio_dma_unmap(container, iova, int128_get64(llsize)); 776 if (ret) { 777 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " 778 "0x%"HWADDR_PRIx") = %d (%m)", 779 container, iova, int128_get64(llsize), ret); 780 } 781 } 782 783 memory_region_unref(section->mr); 784 785 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { 786 vfio_spapr_remove_window(container, 787 section->offset_within_address_space); 788 if (vfio_host_win_del(container, 789 section->offset_within_address_space, 790 section->offset_within_address_space + 791 int128_get64(section->size) - 1) < 0) { 792 hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx, 793 __func__, section->offset_within_address_space); 794 } 795 } 796 } 797 798 static const MemoryListener vfio_memory_listener = { 799 .region_add = vfio_listener_region_add, 800 .region_del = vfio_listener_region_del, 801 }; 802 803 static void vfio_listener_release(VFIOContainer *container) 804 { 805 memory_listener_unregister(&container->listener); 806 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { 807 memory_listener_unregister(&container->prereg_listener); 808 } 809 } 810 811 struct vfio_info_cap_header * 812 vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id) 813 { 814 struct vfio_info_cap_header *hdr; 815 void *ptr = info; 816 817 if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) { 818 return NULL; 819 } 820 821 for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) { 822 if (hdr->id == id) { 823 return hdr; 824 } 825 } 826 827 return NULL; 828 } 829 830 static int vfio_setup_region_sparse_mmaps(VFIORegion *region, 831 struct vfio_region_info *info) 832 { 833 struct vfio_info_cap_header *hdr; 834 struct vfio_region_info_cap_sparse_mmap *sparse; 835 int i, j; 836 837 hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP); 838 if (!hdr) { 839 return -ENODEV; 840 } 841 842 sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header); 843 844 trace_vfio_region_sparse_mmap_header(region->vbasedev->name, 845 region->nr, sparse->nr_areas); 846 847 region->mmaps = g_new0(VFIOMmap, sparse->nr_areas); 848 849 for (i = 0, j = 0; i < sparse->nr_areas; i++) { 850 trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset, 851 sparse->areas[i].offset + 852 sparse->areas[i].size); 853 854 if (sparse->areas[i].size) { 855 region->mmaps[j].offset = sparse->areas[i].offset; 856 region->mmaps[j].size = sparse->areas[i].size; 857 j++; 858 } 859 } 860 861 region->nr_mmaps = j; 862 region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap)); 863 864 return 0; 865 } 866 867 int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region, 868 int index, const char *name) 869 { 870 struct vfio_region_info *info; 871 int ret; 872 873 ret = vfio_get_region_info(vbasedev, index, &info); 874 if (ret) { 875 return ret; 876 } 877 878 region->vbasedev = vbasedev; 879 region->flags = info->flags; 880 region->size = info->size; 881 region->fd_offset = info->offset; 882 region->nr = index; 883 884 if (region->size) { 885 region->mem = g_new0(MemoryRegion, 1); 886 memory_region_init_io(region->mem, obj, &vfio_region_ops, 887 region, name, region->size); 888 889 if (!vbasedev->no_mmap && 890 region->flags & VFIO_REGION_INFO_FLAG_MMAP) { 891 892 ret = vfio_setup_region_sparse_mmaps(region, info); 893 894 if (ret) { 895 region->nr_mmaps = 1; 896 region->mmaps = g_new0(VFIOMmap, region->nr_mmaps); 897 region->mmaps[0].offset = 0; 898 region->mmaps[0].size = region->size; 899 } 900 } 901 } 902 903 g_free(info); 904 905 trace_vfio_region_setup(vbasedev->name, index, name, 906 region->flags, region->fd_offset, region->size); 907 return 0; 908 } 909 910 int vfio_region_mmap(VFIORegion *region) 911 { 912 int i, prot = 0; 913 char *name; 914 915 if (!region->mem) { 916 return 0; 917 } 918 919 prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0; 920 prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0; 921 922 for (i = 0; i < region->nr_mmaps; i++) { 923 region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot, 924 MAP_SHARED, region->vbasedev->fd, 925 region->fd_offset + 926 region->mmaps[i].offset); 927 if (region->mmaps[i].mmap == MAP_FAILED) { 928 int ret = -errno; 929 930 trace_vfio_region_mmap_fault(memory_region_name(region->mem), i, 931 region->fd_offset + 932 region->mmaps[i].offset, 933 region->fd_offset + 934 region->mmaps[i].offset + 935 region->mmaps[i].size - 1, ret); 936 937 region->mmaps[i].mmap = NULL; 938 939 for (i--; i >= 0; i--) { 940 memory_region_del_subregion(region->mem, ®ion->mmaps[i].mem); 941 munmap(region->mmaps[i].mmap, region->mmaps[i].size); 942 object_unparent(OBJECT(®ion->mmaps[i].mem)); 943 region->mmaps[i].mmap = NULL; 944 } 945 946 return ret; 947 } 948 949 name = g_strdup_printf("%s mmaps[%d]", 950 memory_region_name(region->mem), i); 951 memory_region_init_ram_device_ptr(®ion->mmaps[i].mem, 952 memory_region_owner(region->mem), 953 name, region->mmaps[i].size, 954 region->mmaps[i].mmap); 955 g_free(name); 956 memory_region_add_subregion(region->mem, region->mmaps[i].offset, 957 ®ion->mmaps[i].mem); 958 959 trace_vfio_region_mmap(memory_region_name(®ion->mmaps[i].mem), 960 region->mmaps[i].offset, 961 region->mmaps[i].offset + 962 region->mmaps[i].size - 1); 963 } 964 965 return 0; 966 } 967 968 void vfio_region_exit(VFIORegion *region) 969 { 970 int i; 971 972 if (!region->mem) { 973 return; 974 } 975 976 for (i = 0; i < region->nr_mmaps; i++) { 977 if (region->mmaps[i].mmap) { 978 memory_region_del_subregion(region->mem, ®ion->mmaps[i].mem); 979 } 980 } 981 982 trace_vfio_region_exit(region->vbasedev->name, region->nr); 983 } 984 985 void vfio_region_finalize(VFIORegion *region) 986 { 987 int i; 988 989 if (!region->mem) { 990 return; 991 } 992 993 for (i = 0; i < region->nr_mmaps; i++) { 994 if (region->mmaps[i].mmap) { 995 munmap(region->mmaps[i].mmap, region->mmaps[i].size); 996 object_unparent(OBJECT(®ion->mmaps[i].mem)); 997 } 998 } 999 1000 object_unparent(OBJECT(region->mem)); 1001 1002 g_free(region->mem); 1003 g_free(region->mmaps); 1004 1005 trace_vfio_region_finalize(region->vbasedev->name, region->nr); 1006 1007 region->mem = NULL; 1008 region->mmaps = NULL; 1009 region->nr_mmaps = 0; 1010 region->size = 0; 1011 region->flags = 0; 1012 region->nr = 0; 1013 } 1014 1015 void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled) 1016 { 1017 int i; 1018 1019 if (!region->mem) { 1020 return; 1021 } 1022 1023 for (i = 0; i < region->nr_mmaps; i++) { 1024 if (region->mmaps[i].mmap) { 1025 memory_region_set_enabled(®ion->mmaps[i].mem, enabled); 1026 } 1027 } 1028 1029 trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem), 1030 enabled); 1031 } 1032 1033 void vfio_reset_handler(void *opaque) 1034 { 1035 VFIOGroup *group; 1036 VFIODevice *vbasedev; 1037 1038 QLIST_FOREACH(group, &vfio_group_list, next) { 1039 QLIST_FOREACH(vbasedev, &group->device_list, next) { 1040 if (vbasedev->dev->realized) { 1041 vbasedev->ops->vfio_compute_needs_reset(vbasedev); 1042 } 1043 } 1044 } 1045 1046 QLIST_FOREACH(group, &vfio_group_list, next) { 1047 QLIST_FOREACH(vbasedev, &group->device_list, next) { 1048 if (vbasedev->dev->realized && vbasedev->needs_reset) { 1049 vbasedev->ops->vfio_hot_reset_multi(vbasedev); 1050 } 1051 } 1052 } 1053 } 1054 1055 static void vfio_kvm_device_add_group(VFIOGroup *group) 1056 { 1057 #ifdef CONFIG_KVM 1058 struct kvm_device_attr attr = { 1059 .group = KVM_DEV_VFIO_GROUP, 1060 .attr = KVM_DEV_VFIO_GROUP_ADD, 1061 .addr = (uint64_t)(unsigned long)&group->fd, 1062 }; 1063 1064 if (!kvm_enabled()) { 1065 return; 1066 } 1067 1068 if (vfio_kvm_device_fd < 0) { 1069 struct kvm_create_device cd = { 1070 .type = KVM_DEV_TYPE_VFIO, 1071 }; 1072 1073 if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) { 1074 error_report("Failed to create KVM VFIO device: %m"); 1075 return; 1076 } 1077 1078 vfio_kvm_device_fd = cd.fd; 1079 } 1080 1081 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { 1082 error_report("Failed to add group %d to KVM VFIO device: %m", 1083 group->groupid); 1084 } 1085 #endif 1086 } 1087 1088 static void vfio_kvm_device_del_group(VFIOGroup *group) 1089 { 1090 #ifdef CONFIG_KVM 1091 struct kvm_device_attr attr = { 1092 .group = KVM_DEV_VFIO_GROUP, 1093 .attr = KVM_DEV_VFIO_GROUP_DEL, 1094 .addr = (uint64_t)(unsigned long)&group->fd, 1095 }; 1096 1097 if (vfio_kvm_device_fd < 0) { 1098 return; 1099 } 1100 1101 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { 1102 error_report("Failed to remove group %d from KVM VFIO device: %m", 1103 group->groupid); 1104 } 1105 #endif 1106 } 1107 1108 static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as) 1109 { 1110 VFIOAddressSpace *space; 1111 1112 QLIST_FOREACH(space, &vfio_address_spaces, list) { 1113 if (space->as == as) { 1114 return space; 1115 } 1116 } 1117 1118 /* No suitable VFIOAddressSpace, create a new one */ 1119 space = g_malloc0(sizeof(*space)); 1120 space->as = as; 1121 QLIST_INIT(&space->containers); 1122 1123 QLIST_INSERT_HEAD(&vfio_address_spaces, space, list); 1124 1125 return space; 1126 } 1127 1128 static void vfio_put_address_space(VFIOAddressSpace *space) 1129 { 1130 if (QLIST_EMPTY(&space->containers)) { 1131 QLIST_REMOVE(space, list); 1132 g_free(space); 1133 } 1134 } 1135 1136 /* 1137 * vfio_get_iommu_type - selects the richest iommu_type (v2 first) 1138 */ 1139 static int vfio_get_iommu_type(VFIOContainer *container, 1140 Error **errp) 1141 { 1142 int iommu_types[] = { VFIO_TYPE1v2_IOMMU, VFIO_TYPE1_IOMMU, 1143 VFIO_SPAPR_TCE_v2_IOMMU, VFIO_SPAPR_TCE_IOMMU }; 1144 int i; 1145 1146 for (i = 0; i < ARRAY_SIZE(iommu_types); i++) { 1147 if (ioctl(container->fd, VFIO_CHECK_EXTENSION, iommu_types[i])) { 1148 return iommu_types[i]; 1149 } 1150 } 1151 error_setg(errp, "No available IOMMU models"); 1152 return -EINVAL; 1153 } 1154 1155 static int vfio_init_container(VFIOContainer *container, int group_fd, 1156 Error **errp) 1157 { 1158 int iommu_type, ret; 1159 1160 iommu_type = vfio_get_iommu_type(container, errp); 1161 if (iommu_type < 0) { 1162 return iommu_type; 1163 } 1164 1165 ret = ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container->fd); 1166 if (ret) { 1167 error_setg_errno(errp, errno, "Failed to set group container"); 1168 return -errno; 1169 } 1170 1171 while (ioctl(container->fd, VFIO_SET_IOMMU, iommu_type)) { 1172 if (iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { 1173 /* 1174 * On sPAPR, despite the IOMMU subdriver always advertises v1 and 1175 * v2, the running platform may not support v2 and there is no 1176 * way to guess it until an IOMMU group gets added to the container. 1177 * So in case it fails with v2, try v1 as a fallback. 1178 */ 1179 iommu_type = VFIO_SPAPR_TCE_IOMMU; 1180 continue; 1181 } 1182 error_setg_errno(errp, errno, "Failed to set iommu for container"); 1183 return -errno; 1184 } 1185 1186 container->iommu_type = iommu_type; 1187 return 0; 1188 } 1189 1190 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, 1191 Error **errp) 1192 { 1193 VFIOContainer *container; 1194 int ret, fd; 1195 VFIOAddressSpace *space; 1196 1197 space = vfio_get_address_space(as); 1198 1199 /* 1200 * VFIO is currently incompatible with memory ballooning insofar as the 1201 * madvise to purge (zap) the page from QEMU's address space does not 1202 * interact with the memory API and therefore leaves stale virtual to 1203 * physical mappings in the IOMMU if the page was previously pinned. We 1204 * therefore add a balloon inhibit for each group added to a container, 1205 * whether the container is used individually or shared. This provides 1206 * us with options to allow devices within a group to opt-in and allow 1207 * ballooning, so long as it is done consistently for a group (for instance 1208 * if the device is an mdev device where it is known that the host vendor 1209 * driver will never pin pages outside of the working set of the guest 1210 * driver, which would thus not be ballooning candidates). 1211 * 1212 * The first opportunity to induce pinning occurs here where we attempt to 1213 * attach the group to existing containers within the AddressSpace. If any 1214 * pages are already zapped from the virtual address space, such as from a 1215 * previous ballooning opt-in, new pinning will cause valid mappings to be 1216 * re-established. Likewise, when the overall MemoryListener for a new 1217 * container is registered, a replay of mappings within the AddressSpace 1218 * will occur, re-establishing any previously zapped pages as well. 1219 * 1220 * NB. Balloon inhibiting does not currently block operation of the 1221 * balloon driver or revoke previously pinned pages, it only prevents 1222 * calling madvise to modify the virtual mapping of ballooned pages. 1223 */ 1224 qemu_balloon_inhibit(true); 1225 1226 QLIST_FOREACH(container, &space->containers, next) { 1227 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { 1228 group->container = container; 1229 QLIST_INSERT_HEAD(&container->group_list, group, container_next); 1230 vfio_kvm_device_add_group(group); 1231 return 0; 1232 } 1233 } 1234 1235 fd = qemu_open("/dev/vfio/vfio", O_RDWR); 1236 if (fd < 0) { 1237 error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio"); 1238 ret = -errno; 1239 goto put_space_exit; 1240 } 1241 1242 ret = ioctl(fd, VFIO_GET_API_VERSION); 1243 if (ret != VFIO_API_VERSION) { 1244 error_setg(errp, "supported vfio version: %d, " 1245 "reported version: %d", VFIO_API_VERSION, ret); 1246 ret = -EINVAL; 1247 goto close_fd_exit; 1248 } 1249 1250 container = g_malloc0(sizeof(*container)); 1251 container->space = space; 1252 container->fd = fd; 1253 QLIST_INIT(&container->giommu_list); 1254 QLIST_INIT(&container->hostwin_list); 1255 1256 ret = vfio_init_container(container, group->fd, errp); 1257 if (ret) { 1258 goto free_container_exit; 1259 } 1260 1261 switch (container->iommu_type) { 1262 case VFIO_TYPE1v2_IOMMU: 1263 case VFIO_TYPE1_IOMMU: 1264 { 1265 struct vfio_iommu_type1_info info; 1266 1267 /* 1268 * FIXME: This assumes that a Type1 IOMMU can map any 64-bit 1269 * IOVA whatsoever. That's not actually true, but the current 1270 * kernel interface doesn't tell us what it can map, and the 1271 * existing Type1 IOMMUs generally support any IOVA we're 1272 * going to actually try in practice. 1273 */ 1274 info.argsz = sizeof(info); 1275 ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info); 1276 /* Ignore errors */ 1277 if (ret || !(info.flags & VFIO_IOMMU_INFO_PGSIZES)) { 1278 /* Assume 4k IOVA page size */ 1279 info.iova_pgsizes = 4096; 1280 } 1281 vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes); 1282 container->pgsizes = info.iova_pgsizes; 1283 break; 1284 } 1285 case VFIO_SPAPR_TCE_v2_IOMMU: 1286 case VFIO_SPAPR_TCE_IOMMU: 1287 { 1288 struct vfio_iommu_spapr_tce_info info; 1289 bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU; 1290 1291 /* 1292 * The host kernel code implementing VFIO_IOMMU_DISABLE is called 1293 * when container fd is closed so we do not call it explicitly 1294 * in this file. 1295 */ 1296 if (!v2) { 1297 ret = ioctl(fd, VFIO_IOMMU_ENABLE); 1298 if (ret) { 1299 error_setg_errno(errp, errno, "failed to enable container"); 1300 ret = -errno; 1301 goto free_container_exit; 1302 } 1303 } else { 1304 container->prereg_listener = vfio_prereg_listener; 1305 1306 memory_listener_register(&container->prereg_listener, 1307 &address_space_memory); 1308 if (container->error) { 1309 memory_listener_unregister(&container->prereg_listener); 1310 ret = container->error; 1311 error_setg(errp, 1312 "RAM memory listener initialization failed for container"); 1313 goto free_container_exit; 1314 } 1315 } 1316 1317 info.argsz = sizeof(info); 1318 ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info); 1319 if (ret) { 1320 error_setg_errno(errp, errno, 1321 "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed"); 1322 ret = -errno; 1323 if (v2) { 1324 memory_listener_unregister(&container->prereg_listener); 1325 } 1326 goto free_container_exit; 1327 } 1328 1329 if (v2) { 1330 container->pgsizes = info.ddw.pgsizes; 1331 /* 1332 * There is a default window in just created container. 1333 * To make region_add/del simpler, we better remove this 1334 * window now and let those iommu_listener callbacks 1335 * create/remove them when needed. 1336 */ 1337 ret = vfio_spapr_remove_window(container, info.dma32_window_start); 1338 if (ret) { 1339 error_setg_errno(errp, -ret, 1340 "failed to remove existing window"); 1341 goto free_container_exit; 1342 } 1343 } else { 1344 /* The default table uses 4K pages */ 1345 container->pgsizes = 0x1000; 1346 vfio_host_win_add(container, info.dma32_window_start, 1347 info.dma32_window_start + 1348 info.dma32_window_size - 1, 1349 0x1000); 1350 } 1351 } 1352 } 1353 1354 vfio_kvm_device_add_group(group); 1355 1356 QLIST_INIT(&container->group_list); 1357 QLIST_INSERT_HEAD(&space->containers, container, next); 1358 1359 group->container = container; 1360 QLIST_INSERT_HEAD(&container->group_list, group, container_next); 1361 1362 container->listener = vfio_memory_listener; 1363 1364 memory_listener_register(&container->listener, container->space->as); 1365 1366 if (container->error) { 1367 ret = container->error; 1368 error_setg_errno(errp, -ret, 1369 "memory listener initialization failed for container"); 1370 goto listener_release_exit; 1371 } 1372 1373 container->initialized = true; 1374 1375 return 0; 1376 listener_release_exit: 1377 QLIST_REMOVE(group, container_next); 1378 QLIST_REMOVE(container, next); 1379 vfio_kvm_device_del_group(group); 1380 vfio_listener_release(container); 1381 1382 free_container_exit: 1383 g_free(container); 1384 1385 close_fd_exit: 1386 close(fd); 1387 1388 put_space_exit: 1389 qemu_balloon_inhibit(false); 1390 vfio_put_address_space(space); 1391 1392 return ret; 1393 } 1394 1395 static void vfio_disconnect_container(VFIOGroup *group) 1396 { 1397 VFIOContainer *container = group->container; 1398 1399 QLIST_REMOVE(group, container_next); 1400 group->container = NULL; 1401 1402 /* 1403 * Explicitly release the listener first before unset container, 1404 * since unset may destroy the backend container if it's the last 1405 * group. 1406 */ 1407 if (QLIST_EMPTY(&container->group_list)) { 1408 vfio_listener_release(container); 1409 } 1410 1411 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { 1412 error_report("vfio: error disconnecting group %d from container", 1413 group->groupid); 1414 } 1415 1416 if (QLIST_EMPTY(&container->group_list)) { 1417 VFIOAddressSpace *space = container->space; 1418 VFIOGuestIOMMU *giommu, *tmp; 1419 1420 QLIST_REMOVE(container, next); 1421 1422 QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) { 1423 memory_region_unregister_iommu_notifier( 1424 MEMORY_REGION(giommu->iommu), &giommu->n); 1425 QLIST_REMOVE(giommu, giommu_next); 1426 g_free(giommu); 1427 } 1428 1429 trace_vfio_disconnect_container(container->fd); 1430 close(container->fd); 1431 g_free(container); 1432 1433 vfio_put_address_space(space); 1434 } 1435 } 1436 1437 VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp) 1438 { 1439 VFIOGroup *group; 1440 char path[32]; 1441 struct vfio_group_status status = { .argsz = sizeof(status) }; 1442 1443 QLIST_FOREACH(group, &vfio_group_list, next) { 1444 if (group->groupid == groupid) { 1445 /* Found it. Now is it already in the right context? */ 1446 if (group->container->space->as == as) { 1447 return group; 1448 } else { 1449 error_setg(errp, "group %d used in multiple address spaces", 1450 group->groupid); 1451 return NULL; 1452 } 1453 } 1454 } 1455 1456 group = g_malloc0(sizeof(*group)); 1457 1458 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid); 1459 group->fd = qemu_open(path, O_RDWR); 1460 if (group->fd < 0) { 1461 error_setg_errno(errp, errno, "failed to open %s", path); 1462 goto free_group_exit; 1463 } 1464 1465 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) { 1466 error_setg_errno(errp, errno, "failed to get group %d status", groupid); 1467 goto close_fd_exit; 1468 } 1469 1470 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) { 1471 error_setg(errp, "group %d is not viable", groupid); 1472 error_append_hint(errp, 1473 "Please ensure all devices within the iommu_group " 1474 "are bound to their vfio bus driver.\n"); 1475 goto close_fd_exit; 1476 } 1477 1478 group->groupid = groupid; 1479 QLIST_INIT(&group->device_list); 1480 1481 if (vfio_connect_container(group, as, errp)) { 1482 error_prepend(errp, "failed to setup container for group %d: ", 1483 groupid); 1484 goto close_fd_exit; 1485 } 1486 1487 if (QLIST_EMPTY(&vfio_group_list)) { 1488 qemu_register_reset(vfio_reset_handler, NULL); 1489 } 1490 1491 QLIST_INSERT_HEAD(&vfio_group_list, group, next); 1492 1493 return group; 1494 1495 close_fd_exit: 1496 close(group->fd); 1497 1498 free_group_exit: 1499 g_free(group); 1500 1501 return NULL; 1502 } 1503 1504 void vfio_put_group(VFIOGroup *group) 1505 { 1506 if (!group || !QLIST_EMPTY(&group->device_list)) { 1507 return; 1508 } 1509 1510 if (!group->balloon_allowed) { 1511 qemu_balloon_inhibit(false); 1512 } 1513 vfio_kvm_device_del_group(group); 1514 vfio_disconnect_container(group); 1515 QLIST_REMOVE(group, next); 1516 trace_vfio_put_group(group->fd); 1517 close(group->fd); 1518 g_free(group); 1519 1520 if (QLIST_EMPTY(&vfio_group_list)) { 1521 qemu_unregister_reset(vfio_reset_handler, NULL); 1522 } 1523 } 1524 1525 int vfio_get_device(VFIOGroup *group, const char *name, 1526 VFIODevice *vbasedev, Error **errp) 1527 { 1528 struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) }; 1529 int ret, fd; 1530 1531 fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name); 1532 if (fd < 0) { 1533 error_setg_errno(errp, errno, "error getting device from group %d", 1534 group->groupid); 1535 error_append_hint(errp, 1536 "Verify all devices in group %d are bound to vfio-<bus> " 1537 "or pci-stub and not already in use\n", group->groupid); 1538 return fd; 1539 } 1540 1541 ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info); 1542 if (ret) { 1543 error_setg_errno(errp, errno, "error getting device info"); 1544 close(fd); 1545 return ret; 1546 } 1547 1548 /* 1549 * Clear the balloon inhibitor for this group if the driver knows the 1550 * device operates compatibly with ballooning. Setting must be consistent 1551 * per group, but since compatibility is really only possible with mdev 1552 * currently, we expect singleton groups. 1553 */ 1554 if (vbasedev->balloon_allowed != group->balloon_allowed) { 1555 if (!QLIST_EMPTY(&group->device_list)) { 1556 error_setg(errp, 1557 "Inconsistent device balloon setting within group"); 1558 close(fd); 1559 return -1; 1560 } 1561 1562 if (!group->balloon_allowed) { 1563 group->balloon_allowed = true; 1564 qemu_balloon_inhibit(false); 1565 } 1566 } 1567 1568 vbasedev->fd = fd; 1569 vbasedev->group = group; 1570 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next); 1571 1572 vbasedev->num_irqs = dev_info.num_irqs; 1573 vbasedev->num_regions = dev_info.num_regions; 1574 vbasedev->flags = dev_info.flags; 1575 1576 trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions, 1577 dev_info.num_irqs); 1578 1579 vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET); 1580 return 0; 1581 } 1582 1583 void vfio_put_base_device(VFIODevice *vbasedev) 1584 { 1585 if (!vbasedev->group) { 1586 return; 1587 } 1588 QLIST_REMOVE(vbasedev, next); 1589 vbasedev->group = NULL; 1590 trace_vfio_put_base_device(vbasedev->fd); 1591 close(vbasedev->fd); 1592 } 1593 1594 int vfio_get_region_info(VFIODevice *vbasedev, int index, 1595 struct vfio_region_info **info) 1596 { 1597 size_t argsz = sizeof(struct vfio_region_info); 1598 1599 *info = g_malloc0(argsz); 1600 1601 (*info)->index = index; 1602 retry: 1603 (*info)->argsz = argsz; 1604 1605 if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) { 1606 g_free(*info); 1607 *info = NULL; 1608 return -errno; 1609 } 1610 1611 if ((*info)->argsz > argsz) { 1612 argsz = (*info)->argsz; 1613 *info = g_realloc(*info, argsz); 1614 1615 goto retry; 1616 } 1617 1618 return 0; 1619 } 1620 1621 int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type, 1622 uint32_t subtype, struct vfio_region_info **info) 1623 { 1624 int i; 1625 1626 for (i = 0; i < vbasedev->num_regions; i++) { 1627 struct vfio_info_cap_header *hdr; 1628 struct vfio_region_info_cap_type *cap_type; 1629 1630 if (vfio_get_region_info(vbasedev, i, info)) { 1631 continue; 1632 } 1633 1634 hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE); 1635 if (!hdr) { 1636 g_free(*info); 1637 continue; 1638 } 1639 1640 cap_type = container_of(hdr, struct vfio_region_info_cap_type, header); 1641 1642 trace_vfio_get_dev_region(vbasedev->name, i, 1643 cap_type->type, cap_type->subtype); 1644 1645 if (cap_type->type == type && cap_type->subtype == subtype) { 1646 return 0; 1647 } 1648 1649 g_free(*info); 1650 } 1651 1652 *info = NULL; 1653 return -ENODEV; 1654 } 1655 1656 bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type) 1657 { 1658 struct vfio_region_info *info = NULL; 1659 bool ret = false; 1660 1661 if (!vfio_get_region_info(vbasedev, region, &info)) { 1662 if (vfio_get_region_info_cap(info, cap_type)) { 1663 ret = true; 1664 } 1665 g_free(info); 1666 } 1667 1668 return ret; 1669 } 1670 1671 /* 1672 * Interfaces for IBM EEH (Enhanced Error Handling) 1673 */ 1674 static bool vfio_eeh_container_ok(VFIOContainer *container) 1675 { 1676 /* 1677 * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO 1678 * implementation is broken if there are multiple groups in a 1679 * container. The hardware works in units of Partitionable 1680 * Endpoints (== IOMMU groups) and the EEH operations naively 1681 * iterate across all groups in the container, without any logic 1682 * to make sure the groups have their state synchronized. For 1683 * certain operations (ENABLE) that might be ok, until an error 1684 * occurs, but for others (GET_STATE) it's clearly broken. 1685 */ 1686 1687 /* 1688 * XXX Once fixed kernels exist, test for them here 1689 */ 1690 1691 if (QLIST_EMPTY(&container->group_list)) { 1692 return false; 1693 } 1694 1695 if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) { 1696 return false; 1697 } 1698 1699 return true; 1700 } 1701 1702 static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op) 1703 { 1704 struct vfio_eeh_pe_op pe_op = { 1705 .argsz = sizeof(pe_op), 1706 .op = op, 1707 }; 1708 int ret; 1709 1710 if (!vfio_eeh_container_ok(container)) { 1711 error_report("vfio/eeh: EEH_PE_OP 0x%x: " 1712 "kernel requires a container with exactly one group", op); 1713 return -EPERM; 1714 } 1715 1716 ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op); 1717 if (ret < 0) { 1718 error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op); 1719 return -errno; 1720 } 1721 1722 return ret; 1723 } 1724 1725 static VFIOContainer *vfio_eeh_as_container(AddressSpace *as) 1726 { 1727 VFIOAddressSpace *space = vfio_get_address_space(as); 1728 VFIOContainer *container = NULL; 1729 1730 if (QLIST_EMPTY(&space->containers)) { 1731 /* No containers to act on */ 1732 goto out; 1733 } 1734 1735 container = QLIST_FIRST(&space->containers); 1736 1737 if (QLIST_NEXT(container, next)) { 1738 /* We don't yet have logic to synchronize EEH state across 1739 * multiple containers */ 1740 container = NULL; 1741 goto out; 1742 } 1743 1744 out: 1745 vfio_put_address_space(space); 1746 return container; 1747 } 1748 1749 bool vfio_eeh_as_ok(AddressSpace *as) 1750 { 1751 VFIOContainer *container = vfio_eeh_as_container(as); 1752 1753 return (container != NULL) && vfio_eeh_container_ok(container); 1754 } 1755 1756 int vfio_eeh_as_op(AddressSpace *as, uint32_t op) 1757 { 1758 VFIOContainer *container = vfio_eeh_as_container(as); 1759 1760 if (!container) { 1761 return -ENODEV; 1762 } 1763 return vfio_eeh_container_op(container, op); 1764 } 1765