xref: /openbmc/qemu/hw/vfio/common.c (revision ec150c7e)
1 /*
2  * generic functions used by VFIO devices
3  *
4  * Copyright Red Hat, Inc. 2012
5  *
6  * Authors:
7  *  Alex Williamson <alex.williamson@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Based on qemu-kvm device-assignment:
13  *  Adapted for KVM by Qumranet.
14  *  Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15  *  Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16  *  Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17  *  Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18  *  Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
19  */
20 
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 #ifdef CONFIG_KVM
24 #include <linux/kvm.h>
25 #endif
26 #include <linux/vfio.h>
27 
28 #include "hw/vfio/vfio-common.h"
29 #include "hw/vfio/vfio.h"
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
32 #include "hw/hw.h"
33 #include "qemu/error-report.h"
34 #include "qemu/range.h"
35 #include "sysemu/balloon.h"
36 #include "sysemu/kvm.h"
37 #include "trace.h"
38 #include "qapi/error.h"
39 
40 VFIOGroupList vfio_group_list =
41     QLIST_HEAD_INITIALIZER(vfio_group_list);
42 static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces =
43     QLIST_HEAD_INITIALIZER(vfio_address_spaces);
44 
45 #ifdef CONFIG_KVM
46 /*
47  * We have a single VFIO pseudo device per KVM VM.  Once created it lives
48  * for the life of the VM.  Closing the file descriptor only drops our
49  * reference to it and the device's reference to kvm.  Therefore once
50  * initialized, this file descriptor is only released on QEMU exit and
51  * we'll re-use it should another vfio device be attached before then.
52  */
53 static int vfio_kvm_device_fd = -1;
54 #endif
55 
56 /*
57  * Common VFIO interrupt disable
58  */
59 void vfio_disable_irqindex(VFIODevice *vbasedev, int index)
60 {
61     struct vfio_irq_set irq_set = {
62         .argsz = sizeof(irq_set),
63         .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
64         .index = index,
65         .start = 0,
66         .count = 0,
67     };
68 
69     ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
70 }
71 
72 void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index)
73 {
74     struct vfio_irq_set irq_set = {
75         .argsz = sizeof(irq_set),
76         .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
77         .index = index,
78         .start = 0,
79         .count = 1,
80     };
81 
82     ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
83 }
84 
85 void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index)
86 {
87     struct vfio_irq_set irq_set = {
88         .argsz = sizeof(irq_set),
89         .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
90         .index = index,
91         .start = 0,
92         .count = 1,
93     };
94 
95     ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
96 }
97 
98 static inline const char *action_to_str(int action)
99 {
100     switch (action) {
101     case VFIO_IRQ_SET_ACTION_MASK:
102         return "MASK";
103     case VFIO_IRQ_SET_ACTION_UNMASK:
104         return "UNMASK";
105     case VFIO_IRQ_SET_ACTION_TRIGGER:
106         return "TRIGGER";
107     default:
108         return "UNKNOWN ACTION";
109     }
110 }
111 
112 static const char *index_to_str(VFIODevice *vbasedev, int index)
113 {
114     if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
115         return NULL;
116     }
117 
118     switch (index) {
119     case VFIO_PCI_INTX_IRQ_INDEX:
120         return "INTX";
121     case VFIO_PCI_MSI_IRQ_INDEX:
122         return "MSI";
123     case VFIO_PCI_MSIX_IRQ_INDEX:
124         return "MSIX";
125     case VFIO_PCI_ERR_IRQ_INDEX:
126         return "ERR";
127     case VFIO_PCI_REQ_IRQ_INDEX:
128         return "REQ";
129     default:
130         return NULL;
131     }
132 }
133 
134 int vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex,
135                            int action, int fd, Error **errp)
136 {
137     struct vfio_irq_set *irq_set;
138     int argsz, ret = 0;
139     const char *name;
140     int32_t *pfd;
141 
142     argsz = sizeof(*irq_set) + sizeof(*pfd);
143 
144     irq_set = g_malloc0(argsz);
145     irq_set->argsz = argsz;
146     irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action;
147     irq_set->index = index;
148     irq_set->start = subindex;
149     irq_set->count = 1;
150     pfd = (int32_t *)&irq_set->data;
151     *pfd = fd;
152 
153     if (ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
154         ret = -errno;
155     }
156     g_free(irq_set);
157 
158     if (!ret) {
159         return 0;
160     }
161 
162     error_setg_errno(errp, -ret, "VFIO_DEVICE_SET_IRQS failure");
163 
164     name = index_to_str(vbasedev, index);
165     if (name) {
166         error_prepend(errp, "%s-%d: ", name, subindex);
167     } else {
168         error_prepend(errp, "index %d-%d: ", index, subindex);
169     }
170     error_prepend(errp,
171                   "Failed to %s %s eventfd signaling for interrupt ",
172                   fd < 0 ? "tear down" : "set up", action_to_str(action));
173     return ret;
174 }
175 
176 /*
177  * IO Port/MMIO - Beware of the endians, VFIO is always little endian
178  */
179 void vfio_region_write(void *opaque, hwaddr addr,
180                        uint64_t data, unsigned size)
181 {
182     VFIORegion *region = opaque;
183     VFIODevice *vbasedev = region->vbasedev;
184     union {
185         uint8_t byte;
186         uint16_t word;
187         uint32_t dword;
188         uint64_t qword;
189     } buf;
190 
191     switch (size) {
192     case 1:
193         buf.byte = data;
194         break;
195     case 2:
196         buf.word = cpu_to_le16(data);
197         break;
198     case 4:
199         buf.dword = cpu_to_le32(data);
200         break;
201     case 8:
202         buf.qword = cpu_to_le64(data);
203         break;
204     default:
205         hw_error("vfio: unsupported write size, %d bytes", size);
206         break;
207     }
208 
209     if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
210         error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
211                      ",%d) failed: %m",
212                      __func__, vbasedev->name, region->nr,
213                      addr, data, size);
214     }
215 
216     trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
217 
218     /*
219      * A read or write to a BAR always signals an INTx EOI.  This will
220      * do nothing if not pending (including not in INTx mode).  We assume
221      * that a BAR access is in response to an interrupt and that BAR
222      * accesses will service the interrupt.  Unfortunately, we don't know
223      * which access will service the interrupt, so we're potentially
224      * getting quite a few host interrupts per guest interrupt.
225      */
226     vbasedev->ops->vfio_eoi(vbasedev);
227 }
228 
229 uint64_t vfio_region_read(void *opaque,
230                           hwaddr addr, unsigned size)
231 {
232     VFIORegion *region = opaque;
233     VFIODevice *vbasedev = region->vbasedev;
234     union {
235         uint8_t byte;
236         uint16_t word;
237         uint32_t dword;
238         uint64_t qword;
239     } buf;
240     uint64_t data = 0;
241 
242     if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
243         error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m",
244                      __func__, vbasedev->name, region->nr,
245                      addr, size);
246         return (uint64_t)-1;
247     }
248     switch (size) {
249     case 1:
250         data = buf.byte;
251         break;
252     case 2:
253         data = le16_to_cpu(buf.word);
254         break;
255     case 4:
256         data = le32_to_cpu(buf.dword);
257         break;
258     case 8:
259         data = le64_to_cpu(buf.qword);
260         break;
261     default:
262         hw_error("vfio: unsupported read size, %d bytes", size);
263         break;
264     }
265 
266     trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
267 
268     /* Same as write above */
269     vbasedev->ops->vfio_eoi(vbasedev);
270 
271     return data;
272 }
273 
274 const MemoryRegionOps vfio_region_ops = {
275     .read = vfio_region_read,
276     .write = vfio_region_write,
277     .endianness = DEVICE_LITTLE_ENDIAN,
278     .valid = {
279         .min_access_size = 1,
280         .max_access_size = 8,
281     },
282     .impl = {
283         .min_access_size = 1,
284         .max_access_size = 8,
285     },
286 };
287 
288 /*
289  * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
290  */
291 static int vfio_dma_unmap(VFIOContainer *container,
292                           hwaddr iova, ram_addr_t size)
293 {
294     struct vfio_iommu_type1_dma_unmap unmap = {
295         .argsz = sizeof(unmap),
296         .flags = 0,
297         .iova = iova,
298         .size = size,
299     };
300 
301     while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
302         /*
303          * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c
304          * v4.15) where an overflow in its wrap-around check prevents us from
305          * unmapping the last page of the address space.  Test for the error
306          * condition and re-try the unmap excluding the last page.  The
307          * expectation is that we've never mapped the last page anyway and this
308          * unmap request comes via vIOMMU support which also makes it unlikely
309          * that this page is used.  This bug was introduced well after type1 v2
310          * support was introduced, so we shouldn't need to test for v1.  A fix
311          * is queued for kernel v5.0 so this workaround can be removed once
312          * affected kernels are sufficiently deprecated.
313          */
314         if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) &&
315             container->iommu_type == VFIO_TYPE1v2_IOMMU) {
316             trace_vfio_dma_unmap_overflow_workaround();
317             unmap.size -= 1ULL << ctz64(container->pgsizes);
318             continue;
319         }
320         error_report("VFIO_UNMAP_DMA: %d", -errno);
321         return -errno;
322     }
323 
324     return 0;
325 }
326 
327 static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
328                         ram_addr_t size, void *vaddr, bool readonly)
329 {
330     struct vfio_iommu_type1_dma_map map = {
331         .argsz = sizeof(map),
332         .flags = VFIO_DMA_MAP_FLAG_READ,
333         .vaddr = (__u64)(uintptr_t)vaddr,
334         .iova = iova,
335         .size = size,
336     };
337 
338     if (!readonly) {
339         map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
340     }
341 
342     /*
343      * Try the mapping, if it fails with EBUSY, unmap the region and try
344      * again.  This shouldn't be necessary, but we sometimes see it in
345      * the VGA ROM space.
346      */
347     if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
348         (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 &&
349          ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
350         return 0;
351     }
352 
353     error_report("VFIO_MAP_DMA: %d", -errno);
354     return -errno;
355 }
356 
357 static void vfio_host_win_add(VFIOContainer *container,
358                               hwaddr min_iova, hwaddr max_iova,
359                               uint64_t iova_pgsizes)
360 {
361     VFIOHostDMAWindow *hostwin;
362 
363     QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
364         if (ranges_overlap(hostwin->min_iova,
365                            hostwin->max_iova - hostwin->min_iova + 1,
366                            min_iova,
367                            max_iova - min_iova + 1)) {
368             hw_error("%s: Overlapped IOMMU are not enabled", __func__);
369         }
370     }
371 
372     hostwin = g_malloc0(sizeof(*hostwin));
373 
374     hostwin->min_iova = min_iova;
375     hostwin->max_iova = max_iova;
376     hostwin->iova_pgsizes = iova_pgsizes;
377     QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next);
378 }
379 
380 static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova,
381                              hwaddr max_iova)
382 {
383     VFIOHostDMAWindow *hostwin;
384 
385     QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
386         if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) {
387             QLIST_REMOVE(hostwin, hostwin_next);
388             return 0;
389         }
390     }
391 
392     return -1;
393 }
394 
395 static bool vfio_listener_skipped_section(MemoryRegionSection *section)
396 {
397     return (!memory_region_is_ram(section->mr) &&
398             !memory_region_is_iommu(section->mr)) ||
399            /*
400             * Sizing an enabled 64-bit BAR can cause spurious mappings to
401             * addresses in the upper part of the 64-bit address space.  These
402             * are never accessed by the CPU and beyond the address width of
403             * some IOMMU hardware.  TODO: VFIO should tell us the IOMMU width.
404             */
405            section->offset_within_address_space & (1ULL << 63);
406 }
407 
408 /* Called with rcu_read_lock held.  */
409 static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr,
410                            bool *read_only)
411 {
412     MemoryRegion *mr;
413     hwaddr xlat;
414     hwaddr len = iotlb->addr_mask + 1;
415     bool writable = iotlb->perm & IOMMU_WO;
416 
417     /*
418      * The IOMMU TLB entry we have just covers translation through
419      * this IOMMU to its immediate target.  We need to translate
420      * it the rest of the way through to memory.
421      */
422     mr = address_space_translate(&address_space_memory,
423                                  iotlb->translated_addr,
424                                  &xlat, &len, writable,
425                                  MEMTXATTRS_UNSPECIFIED);
426     if (!memory_region_is_ram(mr)) {
427         error_report("iommu map to non memory area %"HWADDR_PRIx"",
428                      xlat);
429         return false;
430     }
431 
432     /*
433      * Translation truncates length to the IOMMU page size,
434      * check that it did not truncate too much.
435      */
436     if (len & iotlb->addr_mask) {
437         error_report("iommu has granularity incompatible with target AS");
438         return false;
439     }
440 
441     *vaddr = memory_region_get_ram_ptr(mr) + xlat;
442     *read_only = !writable || mr->readonly;
443 
444     return true;
445 }
446 
447 static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
448 {
449     VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
450     VFIOContainer *container = giommu->container;
451     hwaddr iova = iotlb->iova + giommu->iommu_offset;
452     bool read_only;
453     void *vaddr;
454     int ret;
455 
456     trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP",
457                                 iova, iova + iotlb->addr_mask);
458 
459     if (iotlb->target_as != &address_space_memory) {
460         error_report("Wrong target AS \"%s\", only system memory is allowed",
461                      iotlb->target_as->name ? iotlb->target_as->name : "none");
462         return;
463     }
464 
465     rcu_read_lock();
466 
467     if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
468         if (!vfio_get_vaddr(iotlb, &vaddr, &read_only)) {
469             goto out;
470         }
471         /*
472          * vaddr is only valid until rcu_read_unlock(). But after
473          * vfio_dma_map has set up the mapping the pages will be
474          * pinned by the kernel. This makes sure that the RAM backend
475          * of vaddr will always be there, even if the memory object is
476          * destroyed and its backing memory munmap-ed.
477          */
478         ret = vfio_dma_map(container, iova,
479                            iotlb->addr_mask + 1, vaddr,
480                            read_only);
481         if (ret) {
482             error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
483                          "0x%"HWADDR_PRIx", %p) = %d (%m)",
484                          container, iova,
485                          iotlb->addr_mask + 1, vaddr, ret);
486         }
487     } else {
488         ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1);
489         if (ret) {
490             error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
491                          "0x%"HWADDR_PRIx") = %d (%m)",
492                          container, iova,
493                          iotlb->addr_mask + 1, ret);
494         }
495     }
496 out:
497     rcu_read_unlock();
498 }
499 
500 static void vfio_listener_region_add(MemoryListener *listener,
501                                      MemoryRegionSection *section)
502 {
503     VFIOContainer *container = container_of(listener, VFIOContainer, listener);
504     hwaddr iova, end;
505     Int128 llend, llsize;
506     void *vaddr;
507     int ret;
508     VFIOHostDMAWindow *hostwin;
509     bool hostwin_found;
510 
511     if (vfio_listener_skipped_section(section)) {
512         trace_vfio_listener_region_add_skip(
513                 section->offset_within_address_space,
514                 section->offset_within_address_space +
515                 int128_get64(int128_sub(section->size, int128_one())));
516         return;
517     }
518 
519     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
520                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
521         error_report("%s received unaligned region", __func__);
522         return;
523     }
524 
525     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
526     llend = int128_make64(section->offset_within_address_space);
527     llend = int128_add(llend, section->size);
528     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
529 
530     if (int128_ge(int128_make64(iova), llend)) {
531         return;
532     }
533     end = int128_get64(int128_sub(llend, int128_one()));
534 
535     if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
536         hwaddr pgsize = 0;
537 
538         /* For now intersections are not allowed, we may relax this later */
539         QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
540             if (ranges_overlap(hostwin->min_iova,
541                                hostwin->max_iova - hostwin->min_iova + 1,
542                                section->offset_within_address_space,
543                                int128_get64(section->size))) {
544                 ret = -1;
545                 goto fail;
546             }
547         }
548 
549         ret = vfio_spapr_create_window(container, section, &pgsize);
550         if (ret) {
551             goto fail;
552         }
553 
554         vfio_host_win_add(container, section->offset_within_address_space,
555                           section->offset_within_address_space +
556                           int128_get64(section->size) - 1, pgsize);
557 #ifdef CONFIG_KVM
558         if (kvm_enabled()) {
559             VFIOGroup *group;
560             IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
561             struct kvm_vfio_spapr_tce param;
562             struct kvm_device_attr attr = {
563                 .group = KVM_DEV_VFIO_GROUP,
564                 .attr = KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE,
565                 .addr = (uint64_t)(unsigned long)&param,
566             };
567 
568             if (!memory_region_iommu_get_attr(iommu_mr, IOMMU_ATTR_SPAPR_TCE_FD,
569                                               &param.tablefd)) {
570                 QLIST_FOREACH(group, &container->group_list, container_next) {
571                     param.groupfd = group->fd;
572                     if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
573                         error_report("vfio: failed to setup fd %d "
574                                      "for a group with fd %d: %s",
575                                      param.tablefd, param.groupfd,
576                                      strerror(errno));
577                         return;
578                     }
579                     trace_vfio_spapr_group_attach(param.groupfd, param.tablefd);
580                 }
581             }
582         }
583 #endif
584     }
585 
586     hostwin_found = false;
587     QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
588         if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
589             hostwin_found = true;
590             break;
591         }
592     }
593 
594     if (!hostwin_found) {
595         error_report("vfio: IOMMU container %p can't map guest IOVA region"
596                      " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx,
597                      container, iova, end);
598         ret = -EFAULT;
599         goto fail;
600     }
601 
602     memory_region_ref(section->mr);
603 
604     if (memory_region_is_iommu(section->mr)) {
605         VFIOGuestIOMMU *giommu;
606         IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
607         int iommu_idx;
608 
609         trace_vfio_listener_region_add_iommu(iova, end);
610         /*
611          * FIXME: For VFIO iommu types which have KVM acceleration to
612          * avoid bouncing all map/unmaps through qemu this way, this
613          * would be the right place to wire that up (tell the KVM
614          * device emulation the VFIO iommu handles to use).
615          */
616         giommu = g_malloc0(sizeof(*giommu));
617         giommu->iommu = iommu_mr;
618         giommu->iommu_offset = section->offset_within_address_space -
619                                section->offset_within_region;
620         giommu->container = container;
621         llend = int128_add(int128_make64(section->offset_within_region),
622                            section->size);
623         llend = int128_sub(llend, int128_one());
624         iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
625                                                        MEMTXATTRS_UNSPECIFIED);
626         iommu_notifier_init(&giommu->n, vfio_iommu_map_notify,
627                             IOMMU_NOTIFIER_ALL,
628                             section->offset_within_region,
629                             int128_get64(llend),
630                             iommu_idx);
631         QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
632 
633         memory_region_register_iommu_notifier(section->mr, &giommu->n);
634         memory_region_iommu_replay(giommu->iommu, &giommu->n);
635 
636         return;
637     }
638 
639     /* Here we assume that memory_region_is_ram(section->mr)==true */
640 
641     vaddr = memory_region_get_ram_ptr(section->mr) +
642             section->offset_within_region +
643             (iova - section->offset_within_address_space);
644 
645     trace_vfio_listener_region_add_ram(iova, end, vaddr);
646 
647     llsize = int128_sub(llend, int128_make64(iova));
648 
649     if (memory_region_is_ram_device(section->mr)) {
650         hwaddr pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
651 
652         if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) {
653             trace_vfio_listener_region_add_no_dma_map(
654                 memory_region_name(section->mr),
655                 section->offset_within_address_space,
656                 int128_getlo(section->size),
657                 pgmask + 1);
658             return;
659         }
660     }
661 
662     ret = vfio_dma_map(container, iova, int128_get64(llsize),
663                        vaddr, section->readonly);
664     if (ret) {
665         error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
666                      "0x%"HWADDR_PRIx", %p) = %d (%m)",
667                      container, iova, int128_get64(llsize), vaddr, ret);
668         if (memory_region_is_ram_device(section->mr)) {
669             /* Allow unexpected mappings not to be fatal for RAM devices */
670             return;
671         }
672         goto fail;
673     }
674 
675     return;
676 
677 fail:
678     if (memory_region_is_ram_device(section->mr)) {
679         error_report("failed to vfio_dma_map. pci p2p may not work");
680         return;
681     }
682     /*
683      * On the initfn path, store the first error in the container so we
684      * can gracefully fail.  Runtime, there's not much we can do other
685      * than throw a hardware error.
686      */
687     if (!container->initialized) {
688         if (!container->error) {
689             container->error = ret;
690         }
691     } else {
692         hw_error("vfio: DMA mapping failed, unable to continue");
693     }
694 }
695 
696 static void vfio_listener_region_del(MemoryListener *listener,
697                                      MemoryRegionSection *section)
698 {
699     VFIOContainer *container = container_of(listener, VFIOContainer, listener);
700     hwaddr iova, end;
701     Int128 llend, llsize;
702     int ret;
703     bool try_unmap = true;
704 
705     if (vfio_listener_skipped_section(section)) {
706         trace_vfio_listener_region_del_skip(
707                 section->offset_within_address_space,
708                 section->offset_within_address_space +
709                 int128_get64(int128_sub(section->size, int128_one())));
710         return;
711     }
712 
713     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
714                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
715         error_report("%s received unaligned region", __func__);
716         return;
717     }
718 
719     if (memory_region_is_iommu(section->mr)) {
720         VFIOGuestIOMMU *giommu;
721 
722         QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
723             if (MEMORY_REGION(giommu->iommu) == section->mr &&
724                 giommu->n.start == section->offset_within_region) {
725                 memory_region_unregister_iommu_notifier(section->mr,
726                                                         &giommu->n);
727                 QLIST_REMOVE(giommu, giommu_next);
728                 g_free(giommu);
729                 break;
730             }
731         }
732 
733         /*
734          * FIXME: We assume the one big unmap below is adequate to
735          * remove any individual page mappings in the IOMMU which
736          * might have been copied into VFIO. This works for a page table
737          * based IOMMU where a big unmap flattens a large range of IO-PTEs.
738          * That may not be true for all IOMMU types.
739          */
740     }
741 
742     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
743     llend = int128_make64(section->offset_within_address_space);
744     llend = int128_add(llend, section->size);
745     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
746 
747     if (int128_ge(int128_make64(iova), llend)) {
748         return;
749     }
750     end = int128_get64(int128_sub(llend, int128_one()));
751 
752     llsize = int128_sub(llend, int128_make64(iova));
753 
754     trace_vfio_listener_region_del(iova, end);
755 
756     if (memory_region_is_ram_device(section->mr)) {
757         hwaddr pgmask;
758         VFIOHostDMAWindow *hostwin;
759         bool hostwin_found = false;
760 
761         QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
762             if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
763                 hostwin_found = true;
764                 break;
765             }
766         }
767         assert(hostwin_found); /* or region_add() would have failed */
768 
769         pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
770         try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask));
771     }
772 
773     if (try_unmap) {
774         ret = vfio_dma_unmap(container, iova, int128_get64(llsize));
775         if (ret) {
776             error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
777                          "0x%"HWADDR_PRIx") = %d (%m)",
778                          container, iova, int128_get64(llsize), ret);
779         }
780     }
781 
782     memory_region_unref(section->mr);
783 
784     if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
785         vfio_spapr_remove_window(container,
786                                  section->offset_within_address_space);
787         if (vfio_host_win_del(container,
788                               section->offset_within_address_space,
789                               section->offset_within_address_space +
790                               int128_get64(section->size) - 1) < 0) {
791             hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx,
792                      __func__, section->offset_within_address_space);
793         }
794     }
795 }
796 
797 static const MemoryListener vfio_memory_listener = {
798     .region_add = vfio_listener_region_add,
799     .region_del = vfio_listener_region_del,
800 };
801 
802 static void vfio_listener_release(VFIOContainer *container)
803 {
804     memory_listener_unregister(&container->listener);
805     if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
806         memory_listener_unregister(&container->prereg_listener);
807     }
808 }
809 
810 struct vfio_info_cap_header *
811 vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id)
812 {
813     struct vfio_info_cap_header *hdr;
814     void *ptr = info;
815 
816     if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) {
817         return NULL;
818     }
819 
820     for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
821         if (hdr->id == id) {
822             return hdr;
823         }
824     }
825 
826     return NULL;
827 }
828 
829 static int vfio_setup_region_sparse_mmaps(VFIORegion *region,
830                                           struct vfio_region_info *info)
831 {
832     struct vfio_info_cap_header *hdr;
833     struct vfio_region_info_cap_sparse_mmap *sparse;
834     int i, j;
835 
836     hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP);
837     if (!hdr) {
838         return -ENODEV;
839     }
840 
841     sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header);
842 
843     trace_vfio_region_sparse_mmap_header(region->vbasedev->name,
844                                          region->nr, sparse->nr_areas);
845 
846     region->mmaps = g_new0(VFIOMmap, sparse->nr_areas);
847 
848     for (i = 0, j = 0; i < sparse->nr_areas; i++) {
849         trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset,
850                                             sparse->areas[i].offset +
851                                             sparse->areas[i].size);
852 
853         if (sparse->areas[i].size) {
854             region->mmaps[j].offset = sparse->areas[i].offset;
855             region->mmaps[j].size = sparse->areas[i].size;
856             j++;
857         }
858     }
859 
860     region->nr_mmaps = j;
861     region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap));
862 
863     return 0;
864 }
865 
866 int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
867                       int index, const char *name)
868 {
869     struct vfio_region_info *info;
870     int ret;
871 
872     ret = vfio_get_region_info(vbasedev, index, &info);
873     if (ret) {
874         return ret;
875     }
876 
877     region->vbasedev = vbasedev;
878     region->flags = info->flags;
879     region->size = info->size;
880     region->fd_offset = info->offset;
881     region->nr = index;
882 
883     if (region->size) {
884         region->mem = g_new0(MemoryRegion, 1);
885         memory_region_init_io(region->mem, obj, &vfio_region_ops,
886                               region, name, region->size);
887 
888         if (!vbasedev->no_mmap &&
889             region->flags & VFIO_REGION_INFO_FLAG_MMAP) {
890 
891             ret = vfio_setup_region_sparse_mmaps(region, info);
892 
893             if (ret) {
894                 region->nr_mmaps = 1;
895                 region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
896                 region->mmaps[0].offset = 0;
897                 region->mmaps[0].size = region->size;
898             }
899         }
900     }
901 
902     g_free(info);
903 
904     trace_vfio_region_setup(vbasedev->name, index, name,
905                             region->flags, region->fd_offset, region->size);
906     return 0;
907 }
908 
909 int vfio_region_mmap(VFIORegion *region)
910 {
911     int i, prot = 0;
912     char *name;
913 
914     if (!region->mem) {
915         return 0;
916     }
917 
918     prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0;
919     prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0;
920 
921     for (i = 0; i < region->nr_mmaps; i++) {
922         region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot,
923                                      MAP_SHARED, region->vbasedev->fd,
924                                      region->fd_offset +
925                                      region->mmaps[i].offset);
926         if (region->mmaps[i].mmap == MAP_FAILED) {
927             int ret = -errno;
928 
929             trace_vfio_region_mmap_fault(memory_region_name(region->mem), i,
930                                          region->fd_offset +
931                                          region->mmaps[i].offset,
932                                          region->fd_offset +
933                                          region->mmaps[i].offset +
934                                          region->mmaps[i].size - 1, ret);
935 
936             region->mmaps[i].mmap = NULL;
937 
938             for (i--; i >= 0; i--) {
939                 memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
940                 munmap(region->mmaps[i].mmap, region->mmaps[i].size);
941                 object_unparent(OBJECT(&region->mmaps[i].mem));
942                 region->mmaps[i].mmap = NULL;
943             }
944 
945             return ret;
946         }
947 
948         name = g_strdup_printf("%s mmaps[%d]",
949                                memory_region_name(region->mem), i);
950         memory_region_init_ram_device_ptr(&region->mmaps[i].mem,
951                                           memory_region_owner(region->mem),
952                                           name, region->mmaps[i].size,
953                                           region->mmaps[i].mmap);
954         g_free(name);
955         memory_region_add_subregion(region->mem, region->mmaps[i].offset,
956                                     &region->mmaps[i].mem);
957 
958         trace_vfio_region_mmap(memory_region_name(&region->mmaps[i].mem),
959                                region->mmaps[i].offset,
960                                region->mmaps[i].offset +
961                                region->mmaps[i].size - 1);
962     }
963 
964     return 0;
965 }
966 
967 void vfio_region_exit(VFIORegion *region)
968 {
969     int i;
970 
971     if (!region->mem) {
972         return;
973     }
974 
975     for (i = 0; i < region->nr_mmaps; i++) {
976         if (region->mmaps[i].mmap) {
977             memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
978         }
979     }
980 
981     trace_vfio_region_exit(region->vbasedev->name, region->nr);
982 }
983 
984 void vfio_region_finalize(VFIORegion *region)
985 {
986     int i;
987 
988     if (!region->mem) {
989         return;
990     }
991 
992     for (i = 0; i < region->nr_mmaps; i++) {
993         if (region->mmaps[i].mmap) {
994             munmap(region->mmaps[i].mmap, region->mmaps[i].size);
995             object_unparent(OBJECT(&region->mmaps[i].mem));
996         }
997     }
998 
999     object_unparent(OBJECT(region->mem));
1000 
1001     g_free(region->mem);
1002     g_free(region->mmaps);
1003 
1004     trace_vfio_region_finalize(region->vbasedev->name, region->nr);
1005 
1006     region->mem = NULL;
1007     region->mmaps = NULL;
1008     region->nr_mmaps = 0;
1009     region->size = 0;
1010     region->flags = 0;
1011     region->nr = 0;
1012 }
1013 
1014 void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled)
1015 {
1016     int i;
1017 
1018     if (!region->mem) {
1019         return;
1020     }
1021 
1022     for (i = 0; i < region->nr_mmaps; i++) {
1023         if (region->mmaps[i].mmap) {
1024             memory_region_set_enabled(&region->mmaps[i].mem, enabled);
1025         }
1026     }
1027 
1028     trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem),
1029                                         enabled);
1030 }
1031 
1032 void vfio_reset_handler(void *opaque)
1033 {
1034     VFIOGroup *group;
1035     VFIODevice *vbasedev;
1036 
1037     QLIST_FOREACH(group, &vfio_group_list, next) {
1038         QLIST_FOREACH(vbasedev, &group->device_list, next) {
1039             if (vbasedev->dev->realized) {
1040                 vbasedev->ops->vfio_compute_needs_reset(vbasedev);
1041             }
1042         }
1043     }
1044 
1045     QLIST_FOREACH(group, &vfio_group_list, next) {
1046         QLIST_FOREACH(vbasedev, &group->device_list, next) {
1047             if (vbasedev->dev->realized && vbasedev->needs_reset) {
1048                 vbasedev->ops->vfio_hot_reset_multi(vbasedev);
1049             }
1050         }
1051     }
1052 }
1053 
1054 static void vfio_kvm_device_add_group(VFIOGroup *group)
1055 {
1056 #ifdef CONFIG_KVM
1057     struct kvm_device_attr attr = {
1058         .group = KVM_DEV_VFIO_GROUP,
1059         .attr = KVM_DEV_VFIO_GROUP_ADD,
1060         .addr = (uint64_t)(unsigned long)&group->fd,
1061     };
1062 
1063     if (!kvm_enabled()) {
1064         return;
1065     }
1066 
1067     if (vfio_kvm_device_fd < 0) {
1068         struct kvm_create_device cd = {
1069             .type = KVM_DEV_TYPE_VFIO,
1070         };
1071 
1072         if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
1073             error_report("Failed to create KVM VFIO device: %m");
1074             return;
1075         }
1076 
1077         vfio_kvm_device_fd = cd.fd;
1078     }
1079 
1080     if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
1081         error_report("Failed to add group %d to KVM VFIO device: %m",
1082                      group->groupid);
1083     }
1084 #endif
1085 }
1086 
1087 static void vfio_kvm_device_del_group(VFIOGroup *group)
1088 {
1089 #ifdef CONFIG_KVM
1090     struct kvm_device_attr attr = {
1091         .group = KVM_DEV_VFIO_GROUP,
1092         .attr = KVM_DEV_VFIO_GROUP_DEL,
1093         .addr = (uint64_t)(unsigned long)&group->fd,
1094     };
1095 
1096     if (vfio_kvm_device_fd < 0) {
1097         return;
1098     }
1099 
1100     if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
1101         error_report("Failed to remove group %d from KVM VFIO device: %m",
1102                      group->groupid);
1103     }
1104 #endif
1105 }
1106 
1107 static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
1108 {
1109     VFIOAddressSpace *space;
1110 
1111     QLIST_FOREACH(space, &vfio_address_spaces, list) {
1112         if (space->as == as) {
1113             return space;
1114         }
1115     }
1116 
1117     /* No suitable VFIOAddressSpace, create a new one */
1118     space = g_malloc0(sizeof(*space));
1119     space->as = as;
1120     QLIST_INIT(&space->containers);
1121 
1122     QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
1123 
1124     return space;
1125 }
1126 
1127 static void vfio_put_address_space(VFIOAddressSpace *space)
1128 {
1129     if (QLIST_EMPTY(&space->containers)) {
1130         QLIST_REMOVE(space, list);
1131         g_free(space);
1132     }
1133 }
1134 
1135 /*
1136  * vfio_get_iommu_type - selects the richest iommu_type (v2 first)
1137  */
1138 static int vfio_get_iommu_type(VFIOContainer *container,
1139                                Error **errp)
1140 {
1141     int iommu_types[] = { VFIO_TYPE1v2_IOMMU, VFIO_TYPE1_IOMMU,
1142                           VFIO_SPAPR_TCE_v2_IOMMU, VFIO_SPAPR_TCE_IOMMU };
1143     int i;
1144 
1145     for (i = 0; i < ARRAY_SIZE(iommu_types); i++) {
1146         if (ioctl(container->fd, VFIO_CHECK_EXTENSION, iommu_types[i])) {
1147             return iommu_types[i];
1148         }
1149     }
1150     error_setg(errp, "No available IOMMU models");
1151     return -EINVAL;
1152 }
1153 
1154 static int vfio_init_container(VFIOContainer *container, int group_fd,
1155                                Error **errp)
1156 {
1157     int iommu_type, ret;
1158 
1159     iommu_type = vfio_get_iommu_type(container, errp);
1160     if (iommu_type < 0) {
1161         return iommu_type;
1162     }
1163 
1164     ret = ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container->fd);
1165     if (ret) {
1166         error_setg_errno(errp, errno, "Failed to set group container");
1167         return -errno;
1168     }
1169 
1170     while (ioctl(container->fd, VFIO_SET_IOMMU, iommu_type)) {
1171         if (iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
1172             /*
1173              * On sPAPR, despite the IOMMU subdriver always advertises v1 and
1174              * v2, the running platform may not support v2 and there is no
1175              * way to guess it until an IOMMU group gets added to the container.
1176              * So in case it fails with v2, try v1 as a fallback.
1177              */
1178             iommu_type = VFIO_SPAPR_TCE_IOMMU;
1179             continue;
1180         }
1181         error_setg_errno(errp, errno, "Failed to set iommu for container");
1182         return -errno;
1183     }
1184 
1185     container->iommu_type = iommu_type;
1186     return 0;
1187 }
1188 
1189 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
1190                                   Error **errp)
1191 {
1192     VFIOContainer *container;
1193     int ret, fd;
1194     VFIOAddressSpace *space;
1195 
1196     space = vfio_get_address_space(as);
1197 
1198     /*
1199      * VFIO is currently incompatible with memory ballooning insofar as the
1200      * madvise to purge (zap) the page from QEMU's address space does not
1201      * interact with the memory API and therefore leaves stale virtual to
1202      * physical mappings in the IOMMU if the page was previously pinned.  We
1203      * therefore add a balloon inhibit for each group added to a container,
1204      * whether the container is used individually or shared.  This provides
1205      * us with options to allow devices within a group to opt-in and allow
1206      * ballooning, so long as it is done consistently for a group (for instance
1207      * if the device is an mdev device where it is known that the host vendor
1208      * driver will never pin pages outside of the working set of the guest
1209      * driver, which would thus not be ballooning candidates).
1210      *
1211      * The first opportunity to induce pinning occurs here where we attempt to
1212      * attach the group to existing containers within the AddressSpace.  If any
1213      * pages are already zapped from the virtual address space, such as from a
1214      * previous ballooning opt-in, new pinning will cause valid mappings to be
1215      * re-established.  Likewise, when the overall MemoryListener for a new
1216      * container is registered, a replay of mappings within the AddressSpace
1217      * will occur, re-establishing any previously zapped pages as well.
1218      *
1219      * NB. Balloon inhibiting does not currently block operation of the
1220      * balloon driver or revoke previously pinned pages, it only prevents
1221      * calling madvise to modify the virtual mapping of ballooned pages.
1222      */
1223     qemu_balloon_inhibit(true);
1224 
1225     QLIST_FOREACH(container, &space->containers, next) {
1226         if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
1227             group->container = container;
1228             QLIST_INSERT_HEAD(&container->group_list, group, container_next);
1229             vfio_kvm_device_add_group(group);
1230             return 0;
1231         }
1232     }
1233 
1234     fd = qemu_open("/dev/vfio/vfio", O_RDWR);
1235     if (fd < 0) {
1236         error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio");
1237         ret = -errno;
1238         goto put_space_exit;
1239     }
1240 
1241     ret = ioctl(fd, VFIO_GET_API_VERSION);
1242     if (ret != VFIO_API_VERSION) {
1243         error_setg(errp, "supported vfio version: %d, "
1244                    "reported version: %d", VFIO_API_VERSION, ret);
1245         ret = -EINVAL;
1246         goto close_fd_exit;
1247     }
1248 
1249     container = g_malloc0(sizeof(*container));
1250     container->space = space;
1251     container->fd = fd;
1252     QLIST_INIT(&container->giommu_list);
1253     QLIST_INIT(&container->hostwin_list);
1254 
1255     ret = vfio_init_container(container, group->fd, errp);
1256     if (ret) {
1257         goto free_container_exit;
1258     }
1259 
1260     switch (container->iommu_type) {
1261     case VFIO_TYPE1v2_IOMMU:
1262     case VFIO_TYPE1_IOMMU:
1263     {
1264         struct vfio_iommu_type1_info info;
1265 
1266         /*
1267          * FIXME: This assumes that a Type1 IOMMU can map any 64-bit
1268          * IOVA whatsoever.  That's not actually true, but the current
1269          * kernel interface doesn't tell us what it can map, and the
1270          * existing Type1 IOMMUs generally support any IOVA we're
1271          * going to actually try in practice.
1272          */
1273         info.argsz = sizeof(info);
1274         ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info);
1275         /* Ignore errors */
1276         if (ret || !(info.flags & VFIO_IOMMU_INFO_PGSIZES)) {
1277             /* Assume 4k IOVA page size */
1278             info.iova_pgsizes = 4096;
1279         }
1280         vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes);
1281         container->pgsizes = info.iova_pgsizes;
1282         break;
1283     }
1284     case VFIO_SPAPR_TCE_v2_IOMMU:
1285     case VFIO_SPAPR_TCE_IOMMU:
1286     {
1287         struct vfio_iommu_spapr_tce_info info;
1288         bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU;
1289 
1290         /*
1291          * The host kernel code implementing VFIO_IOMMU_DISABLE is called
1292          * when container fd is closed so we do not call it explicitly
1293          * in this file.
1294          */
1295         if (!v2) {
1296             ret = ioctl(fd, VFIO_IOMMU_ENABLE);
1297             if (ret) {
1298                 error_setg_errno(errp, errno, "failed to enable container");
1299                 ret = -errno;
1300                 goto free_container_exit;
1301             }
1302         } else {
1303             container->prereg_listener = vfio_prereg_listener;
1304 
1305             memory_listener_register(&container->prereg_listener,
1306                                      &address_space_memory);
1307             if (container->error) {
1308                 memory_listener_unregister(&container->prereg_listener);
1309                 ret = container->error;
1310                 error_setg(errp,
1311                     "RAM memory listener initialization failed for container");
1312                 goto free_container_exit;
1313             }
1314         }
1315 
1316         info.argsz = sizeof(info);
1317         ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
1318         if (ret) {
1319             error_setg_errno(errp, errno,
1320                              "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed");
1321             ret = -errno;
1322             if (v2) {
1323                 memory_listener_unregister(&container->prereg_listener);
1324             }
1325             goto free_container_exit;
1326         }
1327 
1328         if (v2) {
1329             container->pgsizes = info.ddw.pgsizes;
1330             /*
1331              * There is a default window in just created container.
1332              * To make region_add/del simpler, we better remove this
1333              * window now and let those iommu_listener callbacks
1334              * create/remove them when needed.
1335              */
1336             ret = vfio_spapr_remove_window(container, info.dma32_window_start);
1337             if (ret) {
1338                 error_setg_errno(errp, -ret,
1339                                  "failed to remove existing window");
1340                 goto free_container_exit;
1341             }
1342         } else {
1343             /* The default table uses 4K pages */
1344             container->pgsizes = 0x1000;
1345             vfio_host_win_add(container, info.dma32_window_start,
1346                               info.dma32_window_start +
1347                               info.dma32_window_size - 1,
1348                               0x1000);
1349         }
1350     }
1351     }
1352 
1353     vfio_kvm_device_add_group(group);
1354 
1355     QLIST_INIT(&container->group_list);
1356     QLIST_INSERT_HEAD(&space->containers, container, next);
1357 
1358     group->container = container;
1359     QLIST_INSERT_HEAD(&container->group_list, group, container_next);
1360 
1361     container->listener = vfio_memory_listener;
1362 
1363     memory_listener_register(&container->listener, container->space->as);
1364 
1365     if (container->error) {
1366         ret = container->error;
1367         error_setg_errno(errp, -ret,
1368                          "memory listener initialization failed for container");
1369         goto listener_release_exit;
1370     }
1371 
1372     container->initialized = true;
1373 
1374     return 0;
1375 listener_release_exit:
1376     QLIST_REMOVE(group, container_next);
1377     QLIST_REMOVE(container, next);
1378     vfio_kvm_device_del_group(group);
1379     vfio_listener_release(container);
1380 
1381 free_container_exit:
1382     g_free(container);
1383 
1384 close_fd_exit:
1385     close(fd);
1386 
1387 put_space_exit:
1388     qemu_balloon_inhibit(false);
1389     vfio_put_address_space(space);
1390 
1391     return ret;
1392 }
1393 
1394 static void vfio_disconnect_container(VFIOGroup *group)
1395 {
1396     VFIOContainer *container = group->container;
1397 
1398     QLIST_REMOVE(group, container_next);
1399     group->container = NULL;
1400 
1401     /*
1402      * Explicitly release the listener first before unset container,
1403      * since unset may destroy the backend container if it's the last
1404      * group.
1405      */
1406     if (QLIST_EMPTY(&container->group_list)) {
1407         vfio_listener_release(container);
1408     }
1409 
1410     if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
1411         error_report("vfio: error disconnecting group %d from container",
1412                      group->groupid);
1413     }
1414 
1415     if (QLIST_EMPTY(&container->group_list)) {
1416         VFIOAddressSpace *space = container->space;
1417         VFIOGuestIOMMU *giommu, *tmp;
1418 
1419         QLIST_REMOVE(container, next);
1420 
1421         QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) {
1422             memory_region_unregister_iommu_notifier(
1423                     MEMORY_REGION(giommu->iommu), &giommu->n);
1424             QLIST_REMOVE(giommu, giommu_next);
1425             g_free(giommu);
1426         }
1427 
1428         trace_vfio_disconnect_container(container->fd);
1429         close(container->fd);
1430         g_free(container);
1431 
1432         vfio_put_address_space(space);
1433     }
1434 }
1435 
1436 VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp)
1437 {
1438     VFIOGroup *group;
1439     char path[32];
1440     struct vfio_group_status status = { .argsz = sizeof(status) };
1441 
1442     QLIST_FOREACH(group, &vfio_group_list, next) {
1443         if (group->groupid == groupid) {
1444             /* Found it.  Now is it already in the right context? */
1445             if (group->container->space->as == as) {
1446                 return group;
1447             } else {
1448                 error_setg(errp, "group %d used in multiple address spaces",
1449                            group->groupid);
1450                 return NULL;
1451             }
1452         }
1453     }
1454 
1455     group = g_malloc0(sizeof(*group));
1456 
1457     snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
1458     group->fd = qemu_open(path, O_RDWR);
1459     if (group->fd < 0) {
1460         error_setg_errno(errp, errno, "failed to open %s", path);
1461         goto free_group_exit;
1462     }
1463 
1464     if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
1465         error_setg_errno(errp, errno, "failed to get group %d status", groupid);
1466         goto close_fd_exit;
1467     }
1468 
1469     if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
1470         error_setg(errp, "group %d is not viable", groupid);
1471         error_append_hint(errp,
1472                           "Please ensure all devices within the iommu_group "
1473                           "are bound to their vfio bus driver.\n");
1474         goto close_fd_exit;
1475     }
1476 
1477     group->groupid = groupid;
1478     QLIST_INIT(&group->device_list);
1479 
1480     if (vfio_connect_container(group, as, errp)) {
1481         error_prepend(errp, "failed to setup container for group %d: ",
1482                       groupid);
1483         goto close_fd_exit;
1484     }
1485 
1486     if (QLIST_EMPTY(&vfio_group_list)) {
1487         qemu_register_reset(vfio_reset_handler, NULL);
1488     }
1489 
1490     QLIST_INSERT_HEAD(&vfio_group_list, group, next);
1491 
1492     return group;
1493 
1494 close_fd_exit:
1495     close(group->fd);
1496 
1497 free_group_exit:
1498     g_free(group);
1499 
1500     return NULL;
1501 }
1502 
1503 void vfio_put_group(VFIOGroup *group)
1504 {
1505     if (!group || !QLIST_EMPTY(&group->device_list)) {
1506         return;
1507     }
1508 
1509     if (!group->balloon_allowed) {
1510         qemu_balloon_inhibit(false);
1511     }
1512     vfio_kvm_device_del_group(group);
1513     vfio_disconnect_container(group);
1514     QLIST_REMOVE(group, next);
1515     trace_vfio_put_group(group->fd);
1516     close(group->fd);
1517     g_free(group);
1518 
1519     if (QLIST_EMPTY(&vfio_group_list)) {
1520         qemu_unregister_reset(vfio_reset_handler, NULL);
1521     }
1522 }
1523 
1524 int vfio_get_device(VFIOGroup *group, const char *name,
1525                     VFIODevice *vbasedev, Error **errp)
1526 {
1527     struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
1528     int ret, fd;
1529 
1530     fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
1531     if (fd < 0) {
1532         error_setg_errno(errp, errno, "error getting device from group %d",
1533                          group->groupid);
1534         error_append_hint(errp,
1535                       "Verify all devices in group %d are bound to vfio-<bus> "
1536                       "or pci-stub and not already in use\n", group->groupid);
1537         return fd;
1538     }
1539 
1540     ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info);
1541     if (ret) {
1542         error_setg_errno(errp, errno, "error getting device info");
1543         close(fd);
1544         return ret;
1545     }
1546 
1547     /*
1548      * Clear the balloon inhibitor for this group if the driver knows the
1549      * device operates compatibly with ballooning.  Setting must be consistent
1550      * per group, but since compatibility is really only possible with mdev
1551      * currently, we expect singleton groups.
1552      */
1553     if (vbasedev->balloon_allowed != group->balloon_allowed) {
1554         if (!QLIST_EMPTY(&group->device_list)) {
1555             error_setg(errp,
1556                        "Inconsistent device balloon setting within group");
1557             close(fd);
1558             return -1;
1559         }
1560 
1561         if (!group->balloon_allowed) {
1562             group->balloon_allowed = true;
1563             qemu_balloon_inhibit(false);
1564         }
1565     }
1566 
1567     vbasedev->fd = fd;
1568     vbasedev->group = group;
1569     QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
1570 
1571     vbasedev->num_irqs = dev_info.num_irqs;
1572     vbasedev->num_regions = dev_info.num_regions;
1573     vbasedev->flags = dev_info.flags;
1574 
1575     trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions,
1576                           dev_info.num_irqs);
1577 
1578     vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
1579     return 0;
1580 }
1581 
1582 void vfio_put_base_device(VFIODevice *vbasedev)
1583 {
1584     if (!vbasedev->group) {
1585         return;
1586     }
1587     QLIST_REMOVE(vbasedev, next);
1588     vbasedev->group = NULL;
1589     trace_vfio_put_base_device(vbasedev->fd);
1590     close(vbasedev->fd);
1591 }
1592 
1593 int vfio_get_region_info(VFIODevice *vbasedev, int index,
1594                          struct vfio_region_info **info)
1595 {
1596     size_t argsz = sizeof(struct vfio_region_info);
1597 
1598     *info = g_malloc0(argsz);
1599 
1600     (*info)->index = index;
1601 retry:
1602     (*info)->argsz = argsz;
1603 
1604     if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) {
1605         g_free(*info);
1606         *info = NULL;
1607         return -errno;
1608     }
1609 
1610     if ((*info)->argsz > argsz) {
1611         argsz = (*info)->argsz;
1612         *info = g_realloc(*info, argsz);
1613 
1614         goto retry;
1615     }
1616 
1617     return 0;
1618 }
1619 
1620 int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type,
1621                              uint32_t subtype, struct vfio_region_info **info)
1622 {
1623     int i;
1624 
1625     for (i = 0; i < vbasedev->num_regions; i++) {
1626         struct vfio_info_cap_header *hdr;
1627         struct vfio_region_info_cap_type *cap_type;
1628 
1629         if (vfio_get_region_info(vbasedev, i, info)) {
1630             continue;
1631         }
1632 
1633         hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE);
1634         if (!hdr) {
1635             g_free(*info);
1636             continue;
1637         }
1638 
1639         cap_type = container_of(hdr, struct vfio_region_info_cap_type, header);
1640 
1641         trace_vfio_get_dev_region(vbasedev->name, i,
1642                                   cap_type->type, cap_type->subtype);
1643 
1644         if (cap_type->type == type && cap_type->subtype == subtype) {
1645             return 0;
1646         }
1647 
1648         g_free(*info);
1649     }
1650 
1651     *info = NULL;
1652     return -ENODEV;
1653 }
1654 
1655 bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type)
1656 {
1657     struct vfio_region_info *info = NULL;
1658     bool ret = false;
1659 
1660     if (!vfio_get_region_info(vbasedev, region, &info)) {
1661         if (vfio_get_region_info_cap(info, cap_type)) {
1662             ret = true;
1663         }
1664         g_free(info);
1665     }
1666 
1667     return ret;
1668 }
1669 
1670 /*
1671  * Interfaces for IBM EEH (Enhanced Error Handling)
1672  */
1673 static bool vfio_eeh_container_ok(VFIOContainer *container)
1674 {
1675     /*
1676      * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
1677      * implementation is broken if there are multiple groups in a
1678      * container.  The hardware works in units of Partitionable
1679      * Endpoints (== IOMMU groups) and the EEH operations naively
1680      * iterate across all groups in the container, without any logic
1681      * to make sure the groups have their state synchronized.  For
1682      * certain operations (ENABLE) that might be ok, until an error
1683      * occurs, but for others (GET_STATE) it's clearly broken.
1684      */
1685 
1686     /*
1687      * XXX Once fixed kernels exist, test for them here
1688      */
1689 
1690     if (QLIST_EMPTY(&container->group_list)) {
1691         return false;
1692     }
1693 
1694     if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) {
1695         return false;
1696     }
1697 
1698     return true;
1699 }
1700 
1701 static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op)
1702 {
1703     struct vfio_eeh_pe_op pe_op = {
1704         .argsz = sizeof(pe_op),
1705         .op = op,
1706     };
1707     int ret;
1708 
1709     if (!vfio_eeh_container_ok(container)) {
1710         error_report("vfio/eeh: EEH_PE_OP 0x%x: "
1711                      "kernel requires a container with exactly one group", op);
1712         return -EPERM;
1713     }
1714 
1715     ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op);
1716     if (ret < 0) {
1717         error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op);
1718         return -errno;
1719     }
1720 
1721     return ret;
1722 }
1723 
1724 static VFIOContainer *vfio_eeh_as_container(AddressSpace *as)
1725 {
1726     VFIOAddressSpace *space = vfio_get_address_space(as);
1727     VFIOContainer *container = NULL;
1728 
1729     if (QLIST_EMPTY(&space->containers)) {
1730         /* No containers to act on */
1731         goto out;
1732     }
1733 
1734     container = QLIST_FIRST(&space->containers);
1735 
1736     if (QLIST_NEXT(container, next)) {
1737         /* We don't yet have logic to synchronize EEH state across
1738          * multiple containers */
1739         container = NULL;
1740         goto out;
1741     }
1742 
1743 out:
1744     vfio_put_address_space(space);
1745     return container;
1746 }
1747 
1748 bool vfio_eeh_as_ok(AddressSpace *as)
1749 {
1750     VFIOContainer *container = vfio_eeh_as_container(as);
1751 
1752     return (container != NULL) && vfio_eeh_container_ok(container);
1753 }
1754 
1755 int vfio_eeh_as_op(AddressSpace *as, uint32_t op)
1756 {
1757     VFIOContainer *container = vfio_eeh_as_container(as);
1758 
1759     if (!container) {
1760         return -ENODEV;
1761     }
1762     return vfio_eeh_container_op(container, op);
1763 }
1764