1 /* 2 * generic functions used by VFIO devices 3 * 4 * Copyright Red Hat, Inc. 2012 5 * 6 * Authors: 7 * Alex Williamson <alex.williamson@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Based on qemu-kvm device-assignment: 13 * Adapted for KVM by Qumranet. 14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) 15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) 16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) 17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) 18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) 19 */ 20 21 #include "qemu/osdep.h" 22 #include <sys/ioctl.h> 23 #ifdef CONFIG_KVM 24 #include <linux/kvm.h> 25 #endif 26 #include <linux/vfio.h> 27 28 #include "hw/vfio/vfio-common.h" 29 #include "hw/vfio/vfio.h" 30 #include "exec/address-spaces.h" 31 #include "exec/memory.h" 32 #include "hw/hw.h" 33 #include "qemu/error-report.h" 34 #include "qemu/range.h" 35 #include "sysemu/kvm.h" 36 #include "trace.h" 37 #include "qapi/error.h" 38 39 struct vfio_group_head vfio_group_list = 40 QLIST_HEAD_INITIALIZER(vfio_group_list); 41 struct vfio_as_head vfio_address_spaces = 42 QLIST_HEAD_INITIALIZER(vfio_address_spaces); 43 44 #ifdef CONFIG_KVM 45 /* 46 * We have a single VFIO pseudo device per KVM VM. Once created it lives 47 * for the life of the VM. Closing the file descriptor only drops our 48 * reference to it and the device's reference to kvm. Therefore once 49 * initialized, this file descriptor is only released on QEMU exit and 50 * we'll re-use it should another vfio device be attached before then. 51 */ 52 static int vfio_kvm_device_fd = -1; 53 #endif 54 55 /* 56 * Common VFIO interrupt disable 57 */ 58 void vfio_disable_irqindex(VFIODevice *vbasedev, int index) 59 { 60 struct vfio_irq_set irq_set = { 61 .argsz = sizeof(irq_set), 62 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER, 63 .index = index, 64 .start = 0, 65 .count = 0, 66 }; 67 68 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); 69 } 70 71 void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index) 72 { 73 struct vfio_irq_set irq_set = { 74 .argsz = sizeof(irq_set), 75 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK, 76 .index = index, 77 .start = 0, 78 .count = 1, 79 }; 80 81 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); 82 } 83 84 void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index) 85 { 86 struct vfio_irq_set irq_set = { 87 .argsz = sizeof(irq_set), 88 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK, 89 .index = index, 90 .start = 0, 91 .count = 1, 92 }; 93 94 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); 95 } 96 97 /* 98 * IO Port/MMIO - Beware of the endians, VFIO is always little endian 99 */ 100 void vfio_region_write(void *opaque, hwaddr addr, 101 uint64_t data, unsigned size) 102 { 103 VFIORegion *region = opaque; 104 VFIODevice *vbasedev = region->vbasedev; 105 union { 106 uint8_t byte; 107 uint16_t word; 108 uint32_t dword; 109 uint64_t qword; 110 } buf; 111 112 switch (size) { 113 case 1: 114 buf.byte = data; 115 break; 116 case 2: 117 buf.word = cpu_to_le16(data); 118 break; 119 case 4: 120 buf.dword = cpu_to_le32(data); 121 break; 122 default: 123 hw_error("vfio: unsupported write size, %d bytes", size); 124 break; 125 } 126 127 if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { 128 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64 129 ",%d) failed: %m", 130 __func__, vbasedev->name, region->nr, 131 addr, data, size); 132 } 133 134 trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size); 135 136 /* 137 * A read or write to a BAR always signals an INTx EOI. This will 138 * do nothing if not pending (including not in INTx mode). We assume 139 * that a BAR access is in response to an interrupt and that BAR 140 * accesses will service the interrupt. Unfortunately, we don't know 141 * which access will service the interrupt, so we're potentially 142 * getting quite a few host interrupts per guest interrupt. 143 */ 144 vbasedev->ops->vfio_eoi(vbasedev); 145 } 146 147 uint64_t vfio_region_read(void *opaque, 148 hwaddr addr, unsigned size) 149 { 150 VFIORegion *region = opaque; 151 VFIODevice *vbasedev = region->vbasedev; 152 union { 153 uint8_t byte; 154 uint16_t word; 155 uint32_t dword; 156 uint64_t qword; 157 } buf; 158 uint64_t data = 0; 159 160 if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { 161 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m", 162 __func__, vbasedev->name, region->nr, 163 addr, size); 164 return (uint64_t)-1; 165 } 166 switch (size) { 167 case 1: 168 data = buf.byte; 169 break; 170 case 2: 171 data = le16_to_cpu(buf.word); 172 break; 173 case 4: 174 data = le32_to_cpu(buf.dword); 175 break; 176 default: 177 hw_error("vfio: unsupported read size, %d bytes", size); 178 break; 179 } 180 181 trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data); 182 183 /* Same as write above */ 184 vbasedev->ops->vfio_eoi(vbasedev); 185 186 return data; 187 } 188 189 const MemoryRegionOps vfio_region_ops = { 190 .read = vfio_region_read, 191 .write = vfio_region_write, 192 .endianness = DEVICE_LITTLE_ENDIAN, 193 }; 194 195 /* 196 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 197 */ 198 static int vfio_dma_unmap(VFIOContainer *container, 199 hwaddr iova, ram_addr_t size) 200 { 201 struct vfio_iommu_type1_dma_unmap unmap = { 202 .argsz = sizeof(unmap), 203 .flags = 0, 204 .iova = iova, 205 .size = size, 206 }; 207 208 if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) { 209 error_report("VFIO_UNMAP_DMA: %d", -errno); 210 return -errno; 211 } 212 213 return 0; 214 } 215 216 static int vfio_dma_map(VFIOContainer *container, hwaddr iova, 217 ram_addr_t size, void *vaddr, bool readonly) 218 { 219 struct vfio_iommu_type1_dma_map map = { 220 .argsz = sizeof(map), 221 .flags = VFIO_DMA_MAP_FLAG_READ, 222 .vaddr = (__u64)(uintptr_t)vaddr, 223 .iova = iova, 224 .size = size, 225 }; 226 227 if (!readonly) { 228 map.flags |= VFIO_DMA_MAP_FLAG_WRITE; 229 } 230 231 /* 232 * Try the mapping, if it fails with EBUSY, unmap the region and try 233 * again. This shouldn't be necessary, but we sometimes see it in 234 * the VGA ROM space. 235 */ 236 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || 237 (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 && 238 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) { 239 return 0; 240 } 241 242 error_report("VFIO_MAP_DMA: %d", -errno); 243 return -errno; 244 } 245 246 static void vfio_host_win_add(VFIOContainer *container, 247 hwaddr min_iova, hwaddr max_iova, 248 uint64_t iova_pgsizes) 249 { 250 VFIOHostDMAWindow *hostwin; 251 252 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 253 if (ranges_overlap(hostwin->min_iova, 254 hostwin->max_iova - hostwin->min_iova + 1, 255 min_iova, 256 max_iova - min_iova + 1)) { 257 hw_error("%s: Overlapped IOMMU are not enabled", __func__); 258 } 259 } 260 261 hostwin = g_malloc0(sizeof(*hostwin)); 262 263 hostwin->min_iova = min_iova; 264 hostwin->max_iova = max_iova; 265 hostwin->iova_pgsizes = iova_pgsizes; 266 QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next); 267 } 268 269 static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova, 270 hwaddr max_iova) 271 { 272 VFIOHostDMAWindow *hostwin; 273 274 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 275 if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) { 276 QLIST_REMOVE(hostwin, hostwin_next); 277 return 0; 278 } 279 } 280 281 return -1; 282 } 283 284 static bool vfio_listener_skipped_section(MemoryRegionSection *section) 285 { 286 return (!memory_region_is_ram(section->mr) && 287 !memory_region_is_iommu(section->mr)) || 288 /* 289 * Sizing an enabled 64-bit BAR can cause spurious mappings to 290 * addresses in the upper part of the 64-bit address space. These 291 * are never accessed by the CPU and beyond the address width of 292 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width. 293 */ 294 section->offset_within_address_space & (1ULL << 63); 295 } 296 297 static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) 298 { 299 VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); 300 VFIOContainer *container = giommu->container; 301 hwaddr iova = iotlb->iova + giommu->iommu_offset; 302 MemoryRegion *mr; 303 hwaddr xlat; 304 hwaddr len = iotlb->addr_mask + 1; 305 void *vaddr; 306 int ret; 307 308 trace_vfio_iommu_map_notify(iova, iova + iotlb->addr_mask); 309 310 if (iotlb->target_as != &address_space_memory) { 311 error_report("Wrong target AS \"%s\", only system memory is allowed", 312 iotlb->target_as->name ? iotlb->target_as->name : "none"); 313 return; 314 } 315 316 /* 317 * The IOMMU TLB entry we have just covers translation through 318 * this IOMMU to its immediate target. We need to translate 319 * it the rest of the way through to memory. 320 */ 321 rcu_read_lock(); 322 mr = address_space_translate(&address_space_memory, 323 iotlb->translated_addr, 324 &xlat, &len, iotlb->perm & IOMMU_WO); 325 if (!memory_region_is_ram(mr)) { 326 error_report("iommu map to non memory area %"HWADDR_PRIx"", 327 xlat); 328 goto out; 329 } 330 /* 331 * Translation truncates length to the IOMMU page size, 332 * check that it did not truncate too much. 333 */ 334 if (len & iotlb->addr_mask) { 335 error_report("iommu has granularity incompatible with target AS"); 336 goto out; 337 } 338 339 if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { 340 vaddr = memory_region_get_ram_ptr(mr) + xlat; 341 ret = vfio_dma_map(container, iova, 342 iotlb->addr_mask + 1, vaddr, 343 !(iotlb->perm & IOMMU_WO) || mr->readonly); 344 if (ret) { 345 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " 346 "0x%"HWADDR_PRIx", %p) = %d (%m)", 347 container, iova, 348 iotlb->addr_mask + 1, vaddr, ret); 349 } 350 } else { 351 ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1); 352 if (ret) { 353 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " 354 "0x%"HWADDR_PRIx") = %d (%m)", 355 container, iova, 356 iotlb->addr_mask + 1, ret); 357 } 358 } 359 out: 360 rcu_read_unlock(); 361 } 362 363 static void vfio_listener_region_add(MemoryListener *listener, 364 MemoryRegionSection *section) 365 { 366 VFIOContainer *container = container_of(listener, VFIOContainer, listener); 367 hwaddr iova, end; 368 Int128 llend, llsize; 369 void *vaddr; 370 int ret; 371 VFIOHostDMAWindow *hostwin; 372 bool hostwin_found; 373 374 if (vfio_listener_skipped_section(section)) { 375 trace_vfio_listener_region_add_skip( 376 section->offset_within_address_space, 377 section->offset_within_address_space + 378 int128_get64(int128_sub(section->size, int128_one()))); 379 return; 380 } 381 382 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != 383 (section->offset_within_region & ~TARGET_PAGE_MASK))) { 384 error_report("%s received unaligned region", __func__); 385 return; 386 } 387 388 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); 389 llend = int128_make64(section->offset_within_address_space); 390 llend = int128_add(llend, section->size); 391 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); 392 393 if (int128_ge(int128_make64(iova), llend)) { 394 return; 395 } 396 end = int128_get64(int128_sub(llend, int128_one())); 397 398 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { 399 VFIOHostDMAWindow *hostwin; 400 hwaddr pgsize = 0; 401 402 /* For now intersections are not allowed, we may relax this later */ 403 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 404 if (ranges_overlap(hostwin->min_iova, 405 hostwin->max_iova - hostwin->min_iova + 1, 406 section->offset_within_address_space, 407 int128_get64(section->size))) { 408 ret = -1; 409 goto fail; 410 } 411 } 412 413 ret = vfio_spapr_create_window(container, section, &pgsize); 414 if (ret) { 415 goto fail; 416 } 417 418 vfio_host_win_add(container, section->offset_within_address_space, 419 section->offset_within_address_space + 420 int128_get64(section->size) - 1, pgsize); 421 } 422 423 hostwin_found = false; 424 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 425 if (hostwin->min_iova <= iova && end <= hostwin->max_iova) { 426 hostwin_found = true; 427 break; 428 } 429 } 430 431 if (!hostwin_found) { 432 error_report("vfio: IOMMU container %p can't map guest IOVA region" 433 " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, 434 container, iova, end); 435 ret = -EFAULT; 436 goto fail; 437 } 438 439 memory_region_ref(section->mr); 440 441 if (memory_region_is_iommu(section->mr)) { 442 VFIOGuestIOMMU *giommu; 443 444 trace_vfio_listener_region_add_iommu(iova, end); 445 /* 446 * FIXME: For VFIO iommu types which have KVM acceleration to 447 * avoid bouncing all map/unmaps through qemu this way, this 448 * would be the right place to wire that up (tell the KVM 449 * device emulation the VFIO iommu handles to use). 450 */ 451 giommu = g_malloc0(sizeof(*giommu)); 452 giommu->iommu = section->mr; 453 giommu->iommu_offset = section->offset_within_address_space - 454 section->offset_within_region; 455 giommu->container = container; 456 giommu->n.notify = vfio_iommu_map_notify; 457 giommu->n.notifier_flags = IOMMU_NOTIFIER_ALL; 458 QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); 459 460 memory_region_register_iommu_notifier(giommu->iommu, &giommu->n); 461 memory_region_iommu_replay(giommu->iommu, &giommu->n, false); 462 463 return; 464 } 465 466 /* Here we assume that memory_region_is_ram(section->mr)==true */ 467 468 vaddr = memory_region_get_ram_ptr(section->mr) + 469 section->offset_within_region + 470 (iova - section->offset_within_address_space); 471 472 trace_vfio_listener_region_add_ram(iova, end, vaddr); 473 474 llsize = int128_sub(llend, int128_make64(iova)); 475 476 ret = vfio_dma_map(container, iova, int128_get64(llsize), 477 vaddr, section->readonly); 478 if (ret) { 479 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " 480 "0x%"HWADDR_PRIx", %p) = %d (%m)", 481 container, iova, int128_get64(llsize), vaddr, ret); 482 goto fail; 483 } 484 485 return; 486 487 fail: 488 /* 489 * On the initfn path, store the first error in the container so we 490 * can gracefully fail. Runtime, there's not much we can do other 491 * than throw a hardware error. 492 */ 493 if (!container->initialized) { 494 if (!container->error) { 495 container->error = ret; 496 } 497 } else { 498 hw_error("vfio: DMA mapping failed, unable to continue"); 499 } 500 } 501 502 static void vfio_listener_region_del(MemoryListener *listener, 503 MemoryRegionSection *section) 504 { 505 VFIOContainer *container = container_of(listener, VFIOContainer, listener); 506 hwaddr iova, end; 507 Int128 llend, llsize; 508 int ret; 509 510 if (vfio_listener_skipped_section(section)) { 511 trace_vfio_listener_region_del_skip( 512 section->offset_within_address_space, 513 section->offset_within_address_space + 514 int128_get64(int128_sub(section->size, int128_one()))); 515 return; 516 } 517 518 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != 519 (section->offset_within_region & ~TARGET_PAGE_MASK))) { 520 error_report("%s received unaligned region", __func__); 521 return; 522 } 523 524 if (memory_region_is_iommu(section->mr)) { 525 VFIOGuestIOMMU *giommu; 526 527 QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { 528 if (giommu->iommu == section->mr) { 529 memory_region_unregister_iommu_notifier(giommu->iommu, 530 &giommu->n); 531 QLIST_REMOVE(giommu, giommu_next); 532 g_free(giommu); 533 break; 534 } 535 } 536 537 /* 538 * FIXME: We assume the one big unmap below is adequate to 539 * remove any individual page mappings in the IOMMU which 540 * might have been copied into VFIO. This works for a page table 541 * based IOMMU where a big unmap flattens a large range of IO-PTEs. 542 * That may not be true for all IOMMU types. 543 */ 544 } 545 546 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); 547 llend = int128_make64(section->offset_within_address_space); 548 llend = int128_add(llend, section->size); 549 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); 550 551 if (int128_ge(int128_make64(iova), llend)) { 552 return; 553 } 554 end = int128_get64(int128_sub(llend, int128_one())); 555 556 llsize = int128_sub(llend, int128_make64(iova)); 557 558 trace_vfio_listener_region_del(iova, end); 559 560 ret = vfio_dma_unmap(container, iova, int128_get64(llsize)); 561 memory_region_unref(section->mr); 562 if (ret) { 563 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " 564 "0x%"HWADDR_PRIx") = %d (%m)", 565 container, iova, int128_get64(llsize), ret); 566 } 567 568 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { 569 vfio_spapr_remove_window(container, 570 section->offset_within_address_space); 571 if (vfio_host_win_del(container, 572 section->offset_within_address_space, 573 section->offset_within_address_space + 574 int128_get64(section->size) - 1) < 0) { 575 hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx, 576 __func__, section->offset_within_address_space); 577 } 578 } 579 } 580 581 static const MemoryListener vfio_memory_listener = { 582 .region_add = vfio_listener_region_add, 583 .region_del = vfio_listener_region_del, 584 }; 585 586 static void vfio_listener_release(VFIOContainer *container) 587 { 588 memory_listener_unregister(&container->listener); 589 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { 590 memory_listener_unregister(&container->prereg_listener); 591 } 592 } 593 594 static struct vfio_info_cap_header * 595 vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id) 596 { 597 struct vfio_info_cap_header *hdr; 598 void *ptr = info; 599 600 if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) { 601 return NULL; 602 } 603 604 for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) { 605 if (hdr->id == id) { 606 return hdr; 607 } 608 } 609 610 return NULL; 611 } 612 613 static void vfio_setup_region_sparse_mmaps(VFIORegion *region, 614 struct vfio_region_info *info) 615 { 616 struct vfio_info_cap_header *hdr; 617 struct vfio_region_info_cap_sparse_mmap *sparse; 618 int i; 619 620 hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP); 621 if (!hdr) { 622 return; 623 } 624 625 sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header); 626 627 trace_vfio_region_sparse_mmap_header(region->vbasedev->name, 628 region->nr, sparse->nr_areas); 629 630 region->nr_mmaps = sparse->nr_areas; 631 region->mmaps = g_new0(VFIOMmap, region->nr_mmaps); 632 633 for (i = 0; i < region->nr_mmaps; i++) { 634 region->mmaps[i].offset = sparse->areas[i].offset; 635 region->mmaps[i].size = sparse->areas[i].size; 636 trace_vfio_region_sparse_mmap_entry(i, region->mmaps[i].offset, 637 region->mmaps[i].offset + 638 region->mmaps[i].size); 639 } 640 } 641 642 int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region, 643 int index, const char *name) 644 { 645 struct vfio_region_info *info; 646 int ret; 647 648 ret = vfio_get_region_info(vbasedev, index, &info); 649 if (ret) { 650 return ret; 651 } 652 653 region->vbasedev = vbasedev; 654 region->flags = info->flags; 655 region->size = info->size; 656 region->fd_offset = info->offset; 657 region->nr = index; 658 659 if (region->size) { 660 region->mem = g_new0(MemoryRegion, 1); 661 memory_region_init_io(region->mem, obj, &vfio_region_ops, 662 region, name, region->size); 663 664 if (!vbasedev->no_mmap && 665 region->flags & VFIO_REGION_INFO_FLAG_MMAP && 666 !(region->size & ~qemu_real_host_page_mask)) { 667 668 vfio_setup_region_sparse_mmaps(region, info); 669 670 if (!region->nr_mmaps) { 671 region->nr_mmaps = 1; 672 region->mmaps = g_new0(VFIOMmap, region->nr_mmaps); 673 region->mmaps[0].offset = 0; 674 region->mmaps[0].size = region->size; 675 } 676 } 677 } 678 679 g_free(info); 680 681 trace_vfio_region_setup(vbasedev->name, index, name, 682 region->flags, region->fd_offset, region->size); 683 return 0; 684 } 685 686 int vfio_region_mmap(VFIORegion *region) 687 { 688 int i, prot = 0; 689 char *name; 690 691 if (!region->mem) { 692 return 0; 693 } 694 695 prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0; 696 prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0; 697 698 for (i = 0; i < region->nr_mmaps; i++) { 699 region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot, 700 MAP_SHARED, region->vbasedev->fd, 701 region->fd_offset + 702 region->mmaps[i].offset); 703 if (region->mmaps[i].mmap == MAP_FAILED) { 704 int ret = -errno; 705 706 trace_vfio_region_mmap_fault(memory_region_name(region->mem), i, 707 region->fd_offset + 708 region->mmaps[i].offset, 709 region->fd_offset + 710 region->mmaps[i].offset + 711 region->mmaps[i].size - 1, ret); 712 713 region->mmaps[i].mmap = NULL; 714 715 for (i--; i >= 0; i--) { 716 memory_region_del_subregion(region->mem, ®ion->mmaps[i].mem); 717 munmap(region->mmaps[i].mmap, region->mmaps[i].size); 718 object_unparent(OBJECT(®ion->mmaps[i].mem)); 719 region->mmaps[i].mmap = NULL; 720 } 721 722 return ret; 723 } 724 725 name = g_strdup_printf("%s mmaps[%d]", 726 memory_region_name(region->mem), i); 727 memory_region_init_ram_ptr(®ion->mmaps[i].mem, 728 memory_region_owner(region->mem), 729 name, region->mmaps[i].size, 730 region->mmaps[i].mmap); 731 g_free(name); 732 memory_region_set_skip_dump(®ion->mmaps[i].mem); 733 memory_region_add_subregion(region->mem, region->mmaps[i].offset, 734 ®ion->mmaps[i].mem); 735 736 trace_vfio_region_mmap(memory_region_name(®ion->mmaps[i].mem), 737 region->mmaps[i].offset, 738 region->mmaps[i].offset + 739 region->mmaps[i].size - 1); 740 } 741 742 return 0; 743 } 744 745 void vfio_region_exit(VFIORegion *region) 746 { 747 int i; 748 749 if (!region->mem) { 750 return; 751 } 752 753 for (i = 0; i < region->nr_mmaps; i++) { 754 if (region->mmaps[i].mmap) { 755 memory_region_del_subregion(region->mem, ®ion->mmaps[i].mem); 756 } 757 } 758 759 trace_vfio_region_exit(region->vbasedev->name, region->nr); 760 } 761 762 void vfio_region_finalize(VFIORegion *region) 763 { 764 int i; 765 766 if (!region->mem) { 767 return; 768 } 769 770 for (i = 0; i < region->nr_mmaps; i++) { 771 if (region->mmaps[i].mmap) { 772 munmap(region->mmaps[i].mmap, region->mmaps[i].size); 773 object_unparent(OBJECT(®ion->mmaps[i].mem)); 774 } 775 } 776 777 object_unparent(OBJECT(region->mem)); 778 779 g_free(region->mem); 780 g_free(region->mmaps); 781 782 trace_vfio_region_finalize(region->vbasedev->name, region->nr); 783 } 784 785 void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled) 786 { 787 int i; 788 789 if (!region->mem) { 790 return; 791 } 792 793 for (i = 0; i < region->nr_mmaps; i++) { 794 if (region->mmaps[i].mmap) { 795 memory_region_set_enabled(®ion->mmaps[i].mem, enabled); 796 } 797 } 798 799 trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem), 800 enabled); 801 } 802 803 void vfio_reset_handler(void *opaque) 804 { 805 VFIOGroup *group; 806 VFIODevice *vbasedev; 807 808 QLIST_FOREACH(group, &vfio_group_list, next) { 809 QLIST_FOREACH(vbasedev, &group->device_list, next) { 810 vbasedev->ops->vfio_compute_needs_reset(vbasedev); 811 } 812 } 813 814 QLIST_FOREACH(group, &vfio_group_list, next) { 815 QLIST_FOREACH(vbasedev, &group->device_list, next) { 816 if (vbasedev->needs_reset) { 817 vbasedev->ops->vfio_hot_reset_multi(vbasedev); 818 } 819 } 820 } 821 } 822 823 static void vfio_kvm_device_add_group(VFIOGroup *group) 824 { 825 #ifdef CONFIG_KVM 826 struct kvm_device_attr attr = { 827 .group = KVM_DEV_VFIO_GROUP, 828 .attr = KVM_DEV_VFIO_GROUP_ADD, 829 .addr = (uint64_t)(unsigned long)&group->fd, 830 }; 831 832 if (!kvm_enabled()) { 833 return; 834 } 835 836 if (vfio_kvm_device_fd < 0) { 837 struct kvm_create_device cd = { 838 .type = KVM_DEV_TYPE_VFIO, 839 }; 840 841 if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) { 842 error_report("Failed to create KVM VFIO device: %m"); 843 return; 844 } 845 846 vfio_kvm_device_fd = cd.fd; 847 } 848 849 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { 850 error_report("Failed to add group %d to KVM VFIO device: %m", 851 group->groupid); 852 } 853 #endif 854 } 855 856 static void vfio_kvm_device_del_group(VFIOGroup *group) 857 { 858 #ifdef CONFIG_KVM 859 struct kvm_device_attr attr = { 860 .group = KVM_DEV_VFIO_GROUP, 861 .attr = KVM_DEV_VFIO_GROUP_DEL, 862 .addr = (uint64_t)(unsigned long)&group->fd, 863 }; 864 865 if (vfio_kvm_device_fd < 0) { 866 return; 867 } 868 869 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { 870 error_report("Failed to remove group %d from KVM VFIO device: %m", 871 group->groupid); 872 } 873 #endif 874 } 875 876 static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as) 877 { 878 VFIOAddressSpace *space; 879 880 QLIST_FOREACH(space, &vfio_address_spaces, list) { 881 if (space->as == as) { 882 return space; 883 } 884 } 885 886 /* No suitable VFIOAddressSpace, create a new one */ 887 space = g_malloc0(sizeof(*space)); 888 space->as = as; 889 QLIST_INIT(&space->containers); 890 891 QLIST_INSERT_HEAD(&vfio_address_spaces, space, list); 892 893 return space; 894 } 895 896 static void vfio_put_address_space(VFIOAddressSpace *space) 897 { 898 if (QLIST_EMPTY(&space->containers)) { 899 QLIST_REMOVE(space, list); 900 g_free(space); 901 } 902 } 903 904 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, 905 Error **errp) 906 { 907 VFIOContainer *container; 908 int ret, fd; 909 VFIOAddressSpace *space; 910 911 space = vfio_get_address_space(as); 912 913 QLIST_FOREACH(container, &space->containers, next) { 914 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { 915 group->container = container; 916 QLIST_INSERT_HEAD(&container->group_list, group, container_next); 917 return 0; 918 } 919 } 920 921 fd = qemu_open("/dev/vfio/vfio", O_RDWR); 922 if (fd < 0) { 923 error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio"); 924 ret = -errno; 925 goto put_space_exit; 926 } 927 928 ret = ioctl(fd, VFIO_GET_API_VERSION); 929 if (ret != VFIO_API_VERSION) { 930 error_setg(errp, "supported vfio version: %d, " 931 "reported version: %d", VFIO_API_VERSION, ret); 932 ret = -EINVAL; 933 goto close_fd_exit; 934 } 935 936 container = g_malloc0(sizeof(*container)); 937 container->space = space; 938 container->fd = fd; 939 if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU) || 940 ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU)) { 941 bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU); 942 struct vfio_iommu_type1_info info; 943 944 ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd); 945 if (ret) { 946 error_setg_errno(errp, errno, "failed to set group container"); 947 ret = -errno; 948 goto free_container_exit; 949 } 950 951 container->iommu_type = v2 ? VFIO_TYPE1v2_IOMMU : VFIO_TYPE1_IOMMU; 952 ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type); 953 if (ret) { 954 error_setg_errno(errp, errno, "failed to set iommu for container"); 955 ret = -errno; 956 goto free_container_exit; 957 } 958 959 /* 960 * FIXME: This assumes that a Type1 IOMMU can map any 64-bit 961 * IOVA whatsoever. That's not actually true, but the current 962 * kernel interface doesn't tell us what it can map, and the 963 * existing Type1 IOMMUs generally support any IOVA we're 964 * going to actually try in practice. 965 */ 966 info.argsz = sizeof(info); 967 ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info); 968 /* Ignore errors */ 969 if (ret || !(info.flags & VFIO_IOMMU_INFO_PGSIZES)) { 970 /* Assume 4k IOVA page size */ 971 info.iova_pgsizes = 4096; 972 } 973 vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes); 974 } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU) || 975 ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU)) { 976 struct vfio_iommu_spapr_tce_info info; 977 bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU); 978 979 ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd); 980 if (ret) { 981 error_setg_errno(errp, errno, "failed to set group container"); 982 ret = -errno; 983 goto free_container_exit; 984 } 985 container->iommu_type = 986 v2 ? VFIO_SPAPR_TCE_v2_IOMMU : VFIO_SPAPR_TCE_IOMMU; 987 ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type); 988 if (ret) { 989 error_setg_errno(errp, errno, "failed to set iommu for container"); 990 ret = -errno; 991 goto free_container_exit; 992 } 993 994 /* 995 * The host kernel code implementing VFIO_IOMMU_DISABLE is called 996 * when container fd is closed so we do not call it explicitly 997 * in this file. 998 */ 999 if (!v2) { 1000 ret = ioctl(fd, VFIO_IOMMU_ENABLE); 1001 if (ret) { 1002 error_setg_errno(errp, errno, "failed to enable container"); 1003 ret = -errno; 1004 goto free_container_exit; 1005 } 1006 } else { 1007 container->prereg_listener = vfio_prereg_listener; 1008 1009 memory_listener_register(&container->prereg_listener, 1010 &address_space_memory); 1011 if (container->error) { 1012 memory_listener_unregister(&container->prereg_listener); 1013 ret = container->error; 1014 error_setg(errp, 1015 "RAM memory listener initialization failed for container"); 1016 goto free_container_exit; 1017 } 1018 } 1019 1020 info.argsz = sizeof(info); 1021 ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info); 1022 if (ret) { 1023 error_setg_errno(errp, errno, 1024 "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed"); 1025 ret = -errno; 1026 if (v2) { 1027 memory_listener_unregister(&container->prereg_listener); 1028 } 1029 goto free_container_exit; 1030 } 1031 1032 if (v2) { 1033 /* 1034 * There is a default window in just created container. 1035 * To make region_add/del simpler, we better remove this 1036 * window now and let those iommu_listener callbacks 1037 * create/remove them when needed. 1038 */ 1039 ret = vfio_spapr_remove_window(container, info.dma32_window_start); 1040 if (ret) { 1041 error_setg_errno(errp, -ret, 1042 "failed to remove existing window"); 1043 goto free_container_exit; 1044 } 1045 } else { 1046 /* The default table uses 4K pages */ 1047 vfio_host_win_add(container, info.dma32_window_start, 1048 info.dma32_window_start + 1049 info.dma32_window_size - 1, 1050 0x1000); 1051 } 1052 } else { 1053 error_setg(errp, "No available IOMMU models"); 1054 ret = -EINVAL; 1055 goto free_container_exit; 1056 } 1057 1058 container->listener = vfio_memory_listener; 1059 1060 memory_listener_register(&container->listener, container->space->as); 1061 1062 if (container->error) { 1063 ret = container->error; 1064 error_setg_errno(errp, -ret, 1065 "memory listener initialization failed for container"); 1066 goto listener_release_exit; 1067 } 1068 1069 container->initialized = true; 1070 1071 QLIST_INIT(&container->group_list); 1072 QLIST_INSERT_HEAD(&space->containers, container, next); 1073 1074 group->container = container; 1075 QLIST_INSERT_HEAD(&container->group_list, group, container_next); 1076 1077 return 0; 1078 listener_release_exit: 1079 vfio_listener_release(container); 1080 1081 free_container_exit: 1082 g_free(container); 1083 1084 close_fd_exit: 1085 close(fd); 1086 1087 put_space_exit: 1088 vfio_put_address_space(space); 1089 1090 return ret; 1091 } 1092 1093 static void vfio_disconnect_container(VFIOGroup *group) 1094 { 1095 VFIOContainer *container = group->container; 1096 1097 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { 1098 error_report("vfio: error disconnecting group %d from container", 1099 group->groupid); 1100 } 1101 1102 QLIST_REMOVE(group, container_next); 1103 group->container = NULL; 1104 1105 if (QLIST_EMPTY(&container->group_list)) { 1106 VFIOAddressSpace *space = container->space; 1107 VFIOGuestIOMMU *giommu, *tmp; 1108 1109 vfio_listener_release(container); 1110 QLIST_REMOVE(container, next); 1111 1112 QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) { 1113 memory_region_unregister_iommu_notifier(giommu->iommu, &giommu->n); 1114 QLIST_REMOVE(giommu, giommu_next); 1115 g_free(giommu); 1116 } 1117 1118 trace_vfio_disconnect_container(container->fd); 1119 close(container->fd); 1120 g_free(container); 1121 1122 vfio_put_address_space(space); 1123 } 1124 } 1125 1126 VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp) 1127 { 1128 VFIOGroup *group; 1129 char path[32]; 1130 struct vfio_group_status status = { .argsz = sizeof(status) }; 1131 1132 QLIST_FOREACH(group, &vfio_group_list, next) { 1133 if (group->groupid == groupid) { 1134 /* Found it. Now is it already in the right context? */ 1135 if (group->container->space->as == as) { 1136 return group; 1137 } else { 1138 error_setg(errp, "group %d used in multiple address spaces", 1139 group->groupid); 1140 return NULL; 1141 } 1142 } 1143 } 1144 1145 group = g_malloc0(sizeof(*group)); 1146 1147 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid); 1148 group->fd = qemu_open(path, O_RDWR); 1149 if (group->fd < 0) { 1150 error_setg_errno(errp, errno, "failed to open %s", path); 1151 goto free_group_exit; 1152 } 1153 1154 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) { 1155 error_setg_errno(errp, errno, "failed to get group %d status", groupid); 1156 goto close_fd_exit; 1157 } 1158 1159 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) { 1160 error_setg(errp, "group %d is not viable", groupid); 1161 error_append_hint(errp, 1162 "Please ensure all devices within the iommu_group " 1163 "are bound to their vfio bus driver.\n"); 1164 goto close_fd_exit; 1165 } 1166 1167 group->groupid = groupid; 1168 QLIST_INIT(&group->device_list); 1169 1170 if (vfio_connect_container(group, as, errp)) { 1171 error_prepend(errp, "failed to setup container for group %d: ", 1172 groupid); 1173 goto close_fd_exit; 1174 } 1175 1176 if (QLIST_EMPTY(&vfio_group_list)) { 1177 qemu_register_reset(vfio_reset_handler, NULL); 1178 } 1179 1180 QLIST_INSERT_HEAD(&vfio_group_list, group, next); 1181 1182 vfio_kvm_device_add_group(group); 1183 1184 return group; 1185 1186 close_fd_exit: 1187 close(group->fd); 1188 1189 free_group_exit: 1190 g_free(group); 1191 1192 return NULL; 1193 } 1194 1195 void vfio_put_group(VFIOGroup *group) 1196 { 1197 if (!group || !QLIST_EMPTY(&group->device_list)) { 1198 return; 1199 } 1200 1201 vfio_kvm_device_del_group(group); 1202 vfio_disconnect_container(group); 1203 QLIST_REMOVE(group, next); 1204 trace_vfio_put_group(group->fd); 1205 close(group->fd); 1206 g_free(group); 1207 1208 if (QLIST_EMPTY(&vfio_group_list)) { 1209 qemu_unregister_reset(vfio_reset_handler, NULL); 1210 } 1211 } 1212 1213 int vfio_get_device(VFIOGroup *group, const char *name, 1214 VFIODevice *vbasedev, Error **errp) 1215 { 1216 struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) }; 1217 int ret, fd; 1218 1219 fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name); 1220 if (fd < 0) { 1221 error_setg_errno(errp, errno, "error getting device from group %d", 1222 group->groupid); 1223 error_append_hint(errp, 1224 "Verify all devices in group %d are bound to vfio-<bus> " 1225 "or pci-stub and not already in use\n", group->groupid); 1226 return fd; 1227 } 1228 1229 ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info); 1230 if (ret) { 1231 error_setg_errno(errp, errno, "error getting device info"); 1232 close(fd); 1233 return ret; 1234 } 1235 1236 vbasedev->fd = fd; 1237 vbasedev->group = group; 1238 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next); 1239 1240 vbasedev->num_irqs = dev_info.num_irqs; 1241 vbasedev->num_regions = dev_info.num_regions; 1242 vbasedev->flags = dev_info.flags; 1243 1244 trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions, 1245 dev_info.num_irqs); 1246 1247 vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET); 1248 return 0; 1249 } 1250 1251 void vfio_put_base_device(VFIODevice *vbasedev) 1252 { 1253 if (!vbasedev->group) { 1254 return; 1255 } 1256 QLIST_REMOVE(vbasedev, next); 1257 vbasedev->group = NULL; 1258 trace_vfio_put_base_device(vbasedev->fd); 1259 close(vbasedev->fd); 1260 } 1261 1262 int vfio_get_region_info(VFIODevice *vbasedev, int index, 1263 struct vfio_region_info **info) 1264 { 1265 size_t argsz = sizeof(struct vfio_region_info); 1266 1267 *info = g_malloc0(argsz); 1268 1269 (*info)->index = index; 1270 retry: 1271 (*info)->argsz = argsz; 1272 1273 if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) { 1274 g_free(*info); 1275 *info = NULL; 1276 return -errno; 1277 } 1278 1279 if ((*info)->argsz > argsz) { 1280 argsz = (*info)->argsz; 1281 *info = g_realloc(*info, argsz); 1282 1283 goto retry; 1284 } 1285 1286 return 0; 1287 } 1288 1289 int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type, 1290 uint32_t subtype, struct vfio_region_info **info) 1291 { 1292 int i; 1293 1294 for (i = 0; i < vbasedev->num_regions; i++) { 1295 struct vfio_info_cap_header *hdr; 1296 struct vfio_region_info_cap_type *cap_type; 1297 1298 if (vfio_get_region_info(vbasedev, i, info)) { 1299 continue; 1300 } 1301 1302 hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE); 1303 if (!hdr) { 1304 g_free(*info); 1305 continue; 1306 } 1307 1308 cap_type = container_of(hdr, struct vfio_region_info_cap_type, header); 1309 1310 trace_vfio_get_dev_region(vbasedev->name, i, 1311 cap_type->type, cap_type->subtype); 1312 1313 if (cap_type->type == type && cap_type->subtype == subtype) { 1314 return 0; 1315 } 1316 1317 g_free(*info); 1318 } 1319 1320 *info = NULL; 1321 return -ENODEV; 1322 } 1323 1324 /* 1325 * Interfaces for IBM EEH (Enhanced Error Handling) 1326 */ 1327 static bool vfio_eeh_container_ok(VFIOContainer *container) 1328 { 1329 /* 1330 * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO 1331 * implementation is broken if there are multiple groups in a 1332 * container. The hardware works in units of Partitionable 1333 * Endpoints (== IOMMU groups) and the EEH operations naively 1334 * iterate across all groups in the container, without any logic 1335 * to make sure the groups have their state synchronized. For 1336 * certain operations (ENABLE) that might be ok, until an error 1337 * occurs, but for others (GET_STATE) it's clearly broken. 1338 */ 1339 1340 /* 1341 * XXX Once fixed kernels exist, test for them here 1342 */ 1343 1344 if (QLIST_EMPTY(&container->group_list)) { 1345 return false; 1346 } 1347 1348 if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) { 1349 return false; 1350 } 1351 1352 return true; 1353 } 1354 1355 static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op) 1356 { 1357 struct vfio_eeh_pe_op pe_op = { 1358 .argsz = sizeof(pe_op), 1359 .op = op, 1360 }; 1361 int ret; 1362 1363 if (!vfio_eeh_container_ok(container)) { 1364 error_report("vfio/eeh: EEH_PE_OP 0x%x: " 1365 "kernel requires a container with exactly one group", op); 1366 return -EPERM; 1367 } 1368 1369 ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op); 1370 if (ret < 0) { 1371 error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op); 1372 return -errno; 1373 } 1374 1375 return ret; 1376 } 1377 1378 static VFIOContainer *vfio_eeh_as_container(AddressSpace *as) 1379 { 1380 VFIOAddressSpace *space = vfio_get_address_space(as); 1381 VFIOContainer *container = NULL; 1382 1383 if (QLIST_EMPTY(&space->containers)) { 1384 /* No containers to act on */ 1385 goto out; 1386 } 1387 1388 container = QLIST_FIRST(&space->containers); 1389 1390 if (QLIST_NEXT(container, next)) { 1391 /* We don't yet have logic to synchronize EEH state across 1392 * multiple containers */ 1393 container = NULL; 1394 goto out; 1395 } 1396 1397 out: 1398 vfio_put_address_space(space); 1399 return container; 1400 } 1401 1402 bool vfio_eeh_as_ok(AddressSpace *as) 1403 { 1404 VFIOContainer *container = vfio_eeh_as_container(as); 1405 1406 return (container != NULL) && vfio_eeh_container_ok(container); 1407 } 1408 1409 int vfio_eeh_as_op(AddressSpace *as, uint32_t op) 1410 { 1411 VFIOContainer *container = vfio_eeh_as_container(as); 1412 1413 if (!container) { 1414 return -ENODEV; 1415 } 1416 return vfio_eeh_container_op(container, op); 1417 } 1418