xref: /openbmc/qemu/hw/vfio/common.c (revision 754cb9c0)
1 /*
2  * generic functions used by VFIO devices
3  *
4  * Copyright Red Hat, Inc. 2012
5  *
6  * Authors:
7  *  Alex Williamson <alex.williamson@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Based on qemu-kvm device-assignment:
13  *  Adapted for KVM by Qumranet.
14  *  Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15  *  Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16  *  Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17  *  Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18  *  Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
19  */
20 
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 #ifdef CONFIG_KVM
24 #include <linux/kvm.h>
25 #endif
26 #include <linux/vfio.h>
27 
28 #include "hw/vfio/vfio-common.h"
29 #include "hw/vfio/vfio.h"
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
32 #include "hw/hw.h"
33 #include "qemu/error-report.h"
34 #include "qemu/range.h"
35 #include "sysemu/balloon.h"
36 #include "sysemu/kvm.h"
37 #include "trace.h"
38 #include "qapi/error.h"
39 
40 VFIOGroupList vfio_group_list =
41     QLIST_HEAD_INITIALIZER(vfio_group_list);
42 static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces =
43     QLIST_HEAD_INITIALIZER(vfio_address_spaces);
44 
45 #ifdef CONFIG_KVM
46 /*
47  * We have a single VFIO pseudo device per KVM VM.  Once created it lives
48  * for the life of the VM.  Closing the file descriptor only drops our
49  * reference to it and the device's reference to kvm.  Therefore once
50  * initialized, this file descriptor is only released on QEMU exit and
51  * we'll re-use it should another vfio device be attached before then.
52  */
53 static int vfio_kvm_device_fd = -1;
54 #endif
55 
56 /*
57  * Common VFIO interrupt disable
58  */
59 void vfio_disable_irqindex(VFIODevice *vbasedev, int index)
60 {
61     struct vfio_irq_set irq_set = {
62         .argsz = sizeof(irq_set),
63         .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
64         .index = index,
65         .start = 0,
66         .count = 0,
67     };
68 
69     ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
70 }
71 
72 void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index)
73 {
74     struct vfio_irq_set irq_set = {
75         .argsz = sizeof(irq_set),
76         .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
77         .index = index,
78         .start = 0,
79         .count = 1,
80     };
81 
82     ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
83 }
84 
85 void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index)
86 {
87     struct vfio_irq_set irq_set = {
88         .argsz = sizeof(irq_set),
89         .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
90         .index = index,
91         .start = 0,
92         .count = 1,
93     };
94 
95     ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
96 }
97 
98 /*
99  * IO Port/MMIO - Beware of the endians, VFIO is always little endian
100  */
101 void vfio_region_write(void *opaque, hwaddr addr,
102                        uint64_t data, unsigned size)
103 {
104     VFIORegion *region = opaque;
105     VFIODevice *vbasedev = region->vbasedev;
106     union {
107         uint8_t byte;
108         uint16_t word;
109         uint32_t dword;
110         uint64_t qword;
111     } buf;
112 
113     switch (size) {
114     case 1:
115         buf.byte = data;
116         break;
117     case 2:
118         buf.word = cpu_to_le16(data);
119         break;
120     case 4:
121         buf.dword = cpu_to_le32(data);
122         break;
123     case 8:
124         buf.qword = cpu_to_le64(data);
125         break;
126     default:
127         hw_error("vfio: unsupported write size, %d bytes", size);
128         break;
129     }
130 
131     if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
132         error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
133                      ",%d) failed: %m",
134                      __func__, vbasedev->name, region->nr,
135                      addr, data, size);
136     }
137 
138     trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
139 
140     /*
141      * A read or write to a BAR always signals an INTx EOI.  This will
142      * do nothing if not pending (including not in INTx mode).  We assume
143      * that a BAR access is in response to an interrupt and that BAR
144      * accesses will service the interrupt.  Unfortunately, we don't know
145      * which access will service the interrupt, so we're potentially
146      * getting quite a few host interrupts per guest interrupt.
147      */
148     vbasedev->ops->vfio_eoi(vbasedev);
149 }
150 
151 uint64_t vfio_region_read(void *opaque,
152                           hwaddr addr, unsigned size)
153 {
154     VFIORegion *region = opaque;
155     VFIODevice *vbasedev = region->vbasedev;
156     union {
157         uint8_t byte;
158         uint16_t word;
159         uint32_t dword;
160         uint64_t qword;
161     } buf;
162     uint64_t data = 0;
163 
164     if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
165         error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m",
166                      __func__, vbasedev->name, region->nr,
167                      addr, size);
168         return (uint64_t)-1;
169     }
170     switch (size) {
171     case 1:
172         data = buf.byte;
173         break;
174     case 2:
175         data = le16_to_cpu(buf.word);
176         break;
177     case 4:
178         data = le32_to_cpu(buf.dword);
179         break;
180     case 8:
181         data = le64_to_cpu(buf.qword);
182         break;
183     default:
184         hw_error("vfio: unsupported read size, %d bytes", size);
185         break;
186     }
187 
188     trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
189 
190     /* Same as write above */
191     vbasedev->ops->vfio_eoi(vbasedev);
192 
193     return data;
194 }
195 
196 const MemoryRegionOps vfio_region_ops = {
197     .read = vfio_region_read,
198     .write = vfio_region_write,
199     .endianness = DEVICE_LITTLE_ENDIAN,
200     .valid = {
201         .min_access_size = 1,
202         .max_access_size = 8,
203     },
204     .impl = {
205         .min_access_size = 1,
206         .max_access_size = 8,
207     },
208 };
209 
210 /*
211  * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
212  */
213 static int vfio_dma_unmap(VFIOContainer *container,
214                           hwaddr iova, ram_addr_t size)
215 {
216     struct vfio_iommu_type1_dma_unmap unmap = {
217         .argsz = sizeof(unmap),
218         .flags = 0,
219         .iova = iova,
220         .size = size,
221     };
222 
223     while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
224         /*
225          * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c
226          * v4.15) where an overflow in its wrap-around check prevents us from
227          * unmapping the last page of the address space.  Test for the error
228          * condition and re-try the unmap excluding the last page.  The
229          * expectation is that we've never mapped the last page anyway and this
230          * unmap request comes via vIOMMU support which also makes it unlikely
231          * that this page is used.  This bug was introduced well after type1 v2
232          * support was introduced, so we shouldn't need to test for v1.  A fix
233          * is queued for kernel v5.0 so this workaround can be removed once
234          * affected kernels are sufficiently deprecated.
235          */
236         if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) &&
237             container->iommu_type == VFIO_TYPE1v2_IOMMU) {
238             trace_vfio_dma_unmap_overflow_workaround();
239             unmap.size -= 1ULL << ctz64(container->pgsizes);
240             continue;
241         }
242         error_report("VFIO_UNMAP_DMA: %d", -errno);
243         return -errno;
244     }
245 
246     return 0;
247 }
248 
249 static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
250                         ram_addr_t size, void *vaddr, bool readonly)
251 {
252     struct vfio_iommu_type1_dma_map map = {
253         .argsz = sizeof(map),
254         .flags = VFIO_DMA_MAP_FLAG_READ,
255         .vaddr = (__u64)(uintptr_t)vaddr,
256         .iova = iova,
257         .size = size,
258     };
259 
260     if (!readonly) {
261         map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
262     }
263 
264     /*
265      * Try the mapping, if it fails with EBUSY, unmap the region and try
266      * again.  This shouldn't be necessary, but we sometimes see it in
267      * the VGA ROM space.
268      */
269     if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
270         (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 &&
271          ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
272         return 0;
273     }
274 
275     error_report("VFIO_MAP_DMA: %d", -errno);
276     return -errno;
277 }
278 
279 static void vfio_host_win_add(VFIOContainer *container,
280                               hwaddr min_iova, hwaddr max_iova,
281                               uint64_t iova_pgsizes)
282 {
283     VFIOHostDMAWindow *hostwin;
284 
285     QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
286         if (ranges_overlap(hostwin->min_iova,
287                            hostwin->max_iova - hostwin->min_iova + 1,
288                            min_iova,
289                            max_iova - min_iova + 1)) {
290             hw_error("%s: Overlapped IOMMU are not enabled", __func__);
291         }
292     }
293 
294     hostwin = g_malloc0(sizeof(*hostwin));
295 
296     hostwin->min_iova = min_iova;
297     hostwin->max_iova = max_iova;
298     hostwin->iova_pgsizes = iova_pgsizes;
299     QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next);
300 }
301 
302 static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova,
303                              hwaddr max_iova)
304 {
305     VFIOHostDMAWindow *hostwin;
306 
307     QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
308         if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) {
309             QLIST_REMOVE(hostwin, hostwin_next);
310             return 0;
311         }
312     }
313 
314     return -1;
315 }
316 
317 static bool vfio_listener_skipped_section(MemoryRegionSection *section)
318 {
319     return (!memory_region_is_ram(section->mr) &&
320             !memory_region_is_iommu(section->mr)) ||
321            /*
322             * Sizing an enabled 64-bit BAR can cause spurious mappings to
323             * addresses in the upper part of the 64-bit address space.  These
324             * are never accessed by the CPU and beyond the address width of
325             * some IOMMU hardware.  TODO: VFIO should tell us the IOMMU width.
326             */
327            section->offset_within_address_space & (1ULL << 63);
328 }
329 
330 /* Called with rcu_read_lock held.  */
331 static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr,
332                            bool *read_only)
333 {
334     MemoryRegion *mr;
335     hwaddr xlat;
336     hwaddr len = iotlb->addr_mask + 1;
337     bool writable = iotlb->perm & IOMMU_WO;
338 
339     /*
340      * The IOMMU TLB entry we have just covers translation through
341      * this IOMMU to its immediate target.  We need to translate
342      * it the rest of the way through to memory.
343      */
344     mr = address_space_translate(&address_space_memory,
345                                  iotlb->translated_addr,
346                                  &xlat, &len, writable,
347                                  MEMTXATTRS_UNSPECIFIED);
348     if (!memory_region_is_ram(mr)) {
349         error_report("iommu map to non memory area %"HWADDR_PRIx"",
350                      xlat);
351         return false;
352     }
353 
354     /*
355      * Translation truncates length to the IOMMU page size,
356      * check that it did not truncate too much.
357      */
358     if (len & iotlb->addr_mask) {
359         error_report("iommu has granularity incompatible with target AS");
360         return false;
361     }
362 
363     *vaddr = memory_region_get_ram_ptr(mr) + xlat;
364     *read_only = !writable || mr->readonly;
365 
366     return true;
367 }
368 
369 static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
370 {
371     VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
372     VFIOContainer *container = giommu->container;
373     hwaddr iova = iotlb->iova + giommu->iommu_offset;
374     bool read_only;
375     void *vaddr;
376     int ret;
377 
378     trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP",
379                                 iova, iova + iotlb->addr_mask);
380 
381     if (iotlb->target_as != &address_space_memory) {
382         error_report("Wrong target AS \"%s\", only system memory is allowed",
383                      iotlb->target_as->name ? iotlb->target_as->name : "none");
384         return;
385     }
386 
387     rcu_read_lock();
388 
389     if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
390         if (!vfio_get_vaddr(iotlb, &vaddr, &read_only)) {
391             goto out;
392         }
393         /*
394          * vaddr is only valid until rcu_read_unlock(). But after
395          * vfio_dma_map has set up the mapping the pages will be
396          * pinned by the kernel. This makes sure that the RAM backend
397          * of vaddr will always be there, even if the memory object is
398          * destroyed and its backing memory munmap-ed.
399          */
400         ret = vfio_dma_map(container, iova,
401                            iotlb->addr_mask + 1, vaddr,
402                            read_only);
403         if (ret) {
404             error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
405                          "0x%"HWADDR_PRIx", %p) = %d (%m)",
406                          container, iova,
407                          iotlb->addr_mask + 1, vaddr, ret);
408         }
409     } else {
410         ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1);
411         if (ret) {
412             error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
413                          "0x%"HWADDR_PRIx") = %d (%m)",
414                          container, iova,
415                          iotlb->addr_mask + 1, ret);
416         }
417     }
418 out:
419     rcu_read_unlock();
420 }
421 
422 static void vfio_listener_region_add(MemoryListener *listener,
423                                      MemoryRegionSection *section)
424 {
425     VFIOContainer *container = container_of(listener, VFIOContainer, listener);
426     hwaddr iova, end;
427     Int128 llend, llsize;
428     void *vaddr;
429     int ret;
430     VFIOHostDMAWindow *hostwin;
431     bool hostwin_found;
432 
433     if (vfio_listener_skipped_section(section)) {
434         trace_vfio_listener_region_add_skip(
435                 section->offset_within_address_space,
436                 section->offset_within_address_space +
437                 int128_get64(int128_sub(section->size, int128_one())));
438         return;
439     }
440 
441     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
442                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
443         error_report("%s received unaligned region", __func__);
444         return;
445     }
446 
447     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
448     llend = int128_make64(section->offset_within_address_space);
449     llend = int128_add(llend, section->size);
450     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
451 
452     if (int128_ge(int128_make64(iova), llend)) {
453         return;
454     }
455     end = int128_get64(int128_sub(llend, int128_one()));
456 
457     if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
458         hwaddr pgsize = 0;
459 
460         /* For now intersections are not allowed, we may relax this later */
461         QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
462             if (ranges_overlap(hostwin->min_iova,
463                                hostwin->max_iova - hostwin->min_iova + 1,
464                                section->offset_within_address_space,
465                                int128_get64(section->size))) {
466                 ret = -1;
467                 goto fail;
468             }
469         }
470 
471         ret = vfio_spapr_create_window(container, section, &pgsize);
472         if (ret) {
473             goto fail;
474         }
475 
476         vfio_host_win_add(container, section->offset_within_address_space,
477                           section->offset_within_address_space +
478                           int128_get64(section->size) - 1, pgsize);
479 #ifdef CONFIG_KVM
480         if (kvm_enabled()) {
481             VFIOGroup *group;
482             IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
483             struct kvm_vfio_spapr_tce param;
484             struct kvm_device_attr attr = {
485                 .group = KVM_DEV_VFIO_GROUP,
486                 .attr = KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE,
487                 .addr = (uint64_t)(unsigned long)&param,
488             };
489 
490             if (!memory_region_iommu_get_attr(iommu_mr, IOMMU_ATTR_SPAPR_TCE_FD,
491                                               &param.tablefd)) {
492                 QLIST_FOREACH(group, &container->group_list, container_next) {
493                     param.groupfd = group->fd;
494                     if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
495                         error_report("vfio: failed to setup fd %d "
496                                      "for a group with fd %d: %s",
497                                      param.tablefd, param.groupfd,
498                                      strerror(errno));
499                         return;
500                     }
501                     trace_vfio_spapr_group_attach(param.groupfd, param.tablefd);
502                 }
503             }
504         }
505 #endif
506     }
507 
508     hostwin_found = false;
509     QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
510         if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
511             hostwin_found = true;
512             break;
513         }
514     }
515 
516     if (!hostwin_found) {
517         error_report("vfio: IOMMU container %p can't map guest IOVA region"
518                      " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx,
519                      container, iova, end);
520         ret = -EFAULT;
521         goto fail;
522     }
523 
524     memory_region_ref(section->mr);
525 
526     if (memory_region_is_iommu(section->mr)) {
527         VFIOGuestIOMMU *giommu;
528         IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
529         int iommu_idx;
530 
531         trace_vfio_listener_region_add_iommu(iova, end);
532         /*
533          * FIXME: For VFIO iommu types which have KVM acceleration to
534          * avoid bouncing all map/unmaps through qemu this way, this
535          * would be the right place to wire that up (tell the KVM
536          * device emulation the VFIO iommu handles to use).
537          */
538         giommu = g_malloc0(sizeof(*giommu));
539         giommu->iommu = iommu_mr;
540         giommu->iommu_offset = section->offset_within_address_space -
541                                section->offset_within_region;
542         giommu->container = container;
543         llend = int128_add(int128_make64(section->offset_within_region),
544                            section->size);
545         llend = int128_sub(llend, int128_one());
546         iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
547                                                        MEMTXATTRS_UNSPECIFIED);
548         iommu_notifier_init(&giommu->n, vfio_iommu_map_notify,
549                             IOMMU_NOTIFIER_ALL,
550                             section->offset_within_region,
551                             int128_get64(llend),
552                             iommu_idx);
553         QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
554 
555         memory_region_register_iommu_notifier(section->mr, &giommu->n);
556         memory_region_iommu_replay(giommu->iommu, &giommu->n);
557 
558         return;
559     }
560 
561     /* Here we assume that memory_region_is_ram(section->mr)==true */
562 
563     vaddr = memory_region_get_ram_ptr(section->mr) +
564             section->offset_within_region +
565             (iova - section->offset_within_address_space);
566 
567     trace_vfio_listener_region_add_ram(iova, end, vaddr);
568 
569     llsize = int128_sub(llend, int128_make64(iova));
570 
571     if (memory_region_is_ram_device(section->mr)) {
572         hwaddr pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
573 
574         if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) {
575             trace_vfio_listener_region_add_no_dma_map(
576                 memory_region_name(section->mr),
577                 section->offset_within_address_space,
578                 int128_getlo(section->size),
579                 pgmask + 1);
580             return;
581         }
582     }
583 
584     ret = vfio_dma_map(container, iova, int128_get64(llsize),
585                        vaddr, section->readonly);
586     if (ret) {
587         error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
588                      "0x%"HWADDR_PRIx", %p) = %d (%m)",
589                      container, iova, int128_get64(llsize), vaddr, ret);
590         if (memory_region_is_ram_device(section->mr)) {
591             /* Allow unexpected mappings not to be fatal for RAM devices */
592             return;
593         }
594         goto fail;
595     }
596 
597     return;
598 
599 fail:
600     if (memory_region_is_ram_device(section->mr)) {
601         error_report("failed to vfio_dma_map. pci p2p may not work");
602         return;
603     }
604     /*
605      * On the initfn path, store the first error in the container so we
606      * can gracefully fail.  Runtime, there's not much we can do other
607      * than throw a hardware error.
608      */
609     if (!container->initialized) {
610         if (!container->error) {
611             container->error = ret;
612         }
613     } else {
614         hw_error("vfio: DMA mapping failed, unable to continue");
615     }
616 }
617 
618 static void vfio_listener_region_del(MemoryListener *listener,
619                                      MemoryRegionSection *section)
620 {
621     VFIOContainer *container = container_of(listener, VFIOContainer, listener);
622     hwaddr iova, end;
623     Int128 llend, llsize;
624     int ret;
625     bool try_unmap = true;
626 
627     if (vfio_listener_skipped_section(section)) {
628         trace_vfio_listener_region_del_skip(
629                 section->offset_within_address_space,
630                 section->offset_within_address_space +
631                 int128_get64(int128_sub(section->size, int128_one())));
632         return;
633     }
634 
635     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
636                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
637         error_report("%s received unaligned region", __func__);
638         return;
639     }
640 
641     if (memory_region_is_iommu(section->mr)) {
642         VFIOGuestIOMMU *giommu;
643 
644         QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
645             if (MEMORY_REGION(giommu->iommu) == section->mr &&
646                 giommu->n.start == section->offset_within_region) {
647                 memory_region_unregister_iommu_notifier(section->mr,
648                                                         &giommu->n);
649                 QLIST_REMOVE(giommu, giommu_next);
650                 g_free(giommu);
651                 break;
652             }
653         }
654 
655         /*
656          * FIXME: We assume the one big unmap below is adequate to
657          * remove any individual page mappings in the IOMMU which
658          * might have been copied into VFIO. This works for a page table
659          * based IOMMU where a big unmap flattens a large range of IO-PTEs.
660          * That may not be true for all IOMMU types.
661          */
662     }
663 
664     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
665     llend = int128_make64(section->offset_within_address_space);
666     llend = int128_add(llend, section->size);
667     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
668 
669     if (int128_ge(int128_make64(iova), llend)) {
670         return;
671     }
672     end = int128_get64(int128_sub(llend, int128_one()));
673 
674     llsize = int128_sub(llend, int128_make64(iova));
675 
676     trace_vfio_listener_region_del(iova, end);
677 
678     if (memory_region_is_ram_device(section->mr)) {
679         hwaddr pgmask;
680         VFIOHostDMAWindow *hostwin;
681         bool hostwin_found = false;
682 
683         QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
684             if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
685                 hostwin_found = true;
686                 break;
687             }
688         }
689         assert(hostwin_found); /* or region_add() would have failed */
690 
691         pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
692         try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask));
693     }
694 
695     if (try_unmap) {
696         ret = vfio_dma_unmap(container, iova, int128_get64(llsize));
697         if (ret) {
698             error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
699                          "0x%"HWADDR_PRIx") = %d (%m)",
700                          container, iova, int128_get64(llsize), ret);
701         }
702     }
703 
704     memory_region_unref(section->mr);
705 
706     if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
707         vfio_spapr_remove_window(container,
708                                  section->offset_within_address_space);
709         if (vfio_host_win_del(container,
710                               section->offset_within_address_space,
711                               section->offset_within_address_space +
712                               int128_get64(section->size) - 1) < 0) {
713             hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx,
714                      __func__, section->offset_within_address_space);
715         }
716     }
717 }
718 
719 static const MemoryListener vfio_memory_listener = {
720     .region_add = vfio_listener_region_add,
721     .region_del = vfio_listener_region_del,
722 };
723 
724 static void vfio_listener_release(VFIOContainer *container)
725 {
726     memory_listener_unregister(&container->listener);
727     if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
728         memory_listener_unregister(&container->prereg_listener);
729     }
730 }
731 
732 static struct vfio_info_cap_header *
733 vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id)
734 {
735     struct vfio_info_cap_header *hdr;
736     void *ptr = info;
737 
738     if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) {
739         return NULL;
740     }
741 
742     for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
743         if (hdr->id == id) {
744             return hdr;
745         }
746     }
747 
748     return NULL;
749 }
750 
751 static int vfio_setup_region_sparse_mmaps(VFIORegion *region,
752                                           struct vfio_region_info *info)
753 {
754     struct vfio_info_cap_header *hdr;
755     struct vfio_region_info_cap_sparse_mmap *sparse;
756     int i, j;
757 
758     hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP);
759     if (!hdr) {
760         return -ENODEV;
761     }
762 
763     sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header);
764 
765     trace_vfio_region_sparse_mmap_header(region->vbasedev->name,
766                                          region->nr, sparse->nr_areas);
767 
768     region->mmaps = g_new0(VFIOMmap, sparse->nr_areas);
769 
770     for (i = 0, j = 0; i < sparse->nr_areas; i++) {
771         trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset,
772                                             sparse->areas[i].offset +
773                                             sparse->areas[i].size);
774 
775         if (sparse->areas[i].size) {
776             region->mmaps[j].offset = sparse->areas[i].offset;
777             region->mmaps[j].size = sparse->areas[i].size;
778             j++;
779         }
780     }
781 
782     region->nr_mmaps = j;
783     region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap));
784 
785     return 0;
786 }
787 
788 int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
789                       int index, const char *name)
790 {
791     struct vfio_region_info *info;
792     int ret;
793 
794     ret = vfio_get_region_info(vbasedev, index, &info);
795     if (ret) {
796         return ret;
797     }
798 
799     region->vbasedev = vbasedev;
800     region->flags = info->flags;
801     region->size = info->size;
802     region->fd_offset = info->offset;
803     region->nr = index;
804 
805     if (region->size) {
806         region->mem = g_new0(MemoryRegion, 1);
807         memory_region_init_io(region->mem, obj, &vfio_region_ops,
808                               region, name, region->size);
809 
810         if (!vbasedev->no_mmap &&
811             region->flags & VFIO_REGION_INFO_FLAG_MMAP) {
812 
813             ret = vfio_setup_region_sparse_mmaps(region, info);
814 
815             if (ret) {
816                 region->nr_mmaps = 1;
817                 region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
818                 region->mmaps[0].offset = 0;
819                 region->mmaps[0].size = region->size;
820             }
821         }
822     }
823 
824     g_free(info);
825 
826     trace_vfio_region_setup(vbasedev->name, index, name,
827                             region->flags, region->fd_offset, region->size);
828     return 0;
829 }
830 
831 int vfio_region_mmap(VFIORegion *region)
832 {
833     int i, prot = 0;
834     char *name;
835 
836     if (!region->mem) {
837         return 0;
838     }
839 
840     prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0;
841     prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0;
842 
843     for (i = 0; i < region->nr_mmaps; i++) {
844         region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot,
845                                      MAP_SHARED, region->vbasedev->fd,
846                                      region->fd_offset +
847                                      region->mmaps[i].offset);
848         if (region->mmaps[i].mmap == MAP_FAILED) {
849             int ret = -errno;
850 
851             trace_vfio_region_mmap_fault(memory_region_name(region->mem), i,
852                                          region->fd_offset +
853                                          region->mmaps[i].offset,
854                                          region->fd_offset +
855                                          region->mmaps[i].offset +
856                                          region->mmaps[i].size - 1, ret);
857 
858             region->mmaps[i].mmap = NULL;
859 
860             for (i--; i >= 0; i--) {
861                 memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
862                 munmap(region->mmaps[i].mmap, region->mmaps[i].size);
863                 object_unparent(OBJECT(&region->mmaps[i].mem));
864                 region->mmaps[i].mmap = NULL;
865             }
866 
867             return ret;
868         }
869 
870         name = g_strdup_printf("%s mmaps[%d]",
871                                memory_region_name(region->mem), i);
872         memory_region_init_ram_device_ptr(&region->mmaps[i].mem,
873                                           memory_region_owner(region->mem),
874                                           name, region->mmaps[i].size,
875                                           region->mmaps[i].mmap);
876         g_free(name);
877         memory_region_add_subregion(region->mem, region->mmaps[i].offset,
878                                     &region->mmaps[i].mem);
879 
880         trace_vfio_region_mmap(memory_region_name(&region->mmaps[i].mem),
881                                region->mmaps[i].offset,
882                                region->mmaps[i].offset +
883                                region->mmaps[i].size - 1);
884     }
885 
886     return 0;
887 }
888 
889 void vfio_region_exit(VFIORegion *region)
890 {
891     int i;
892 
893     if (!region->mem) {
894         return;
895     }
896 
897     for (i = 0; i < region->nr_mmaps; i++) {
898         if (region->mmaps[i].mmap) {
899             memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
900         }
901     }
902 
903     trace_vfio_region_exit(region->vbasedev->name, region->nr);
904 }
905 
906 void vfio_region_finalize(VFIORegion *region)
907 {
908     int i;
909 
910     if (!region->mem) {
911         return;
912     }
913 
914     for (i = 0; i < region->nr_mmaps; i++) {
915         if (region->mmaps[i].mmap) {
916             munmap(region->mmaps[i].mmap, region->mmaps[i].size);
917             object_unparent(OBJECT(&region->mmaps[i].mem));
918         }
919     }
920 
921     object_unparent(OBJECT(region->mem));
922 
923     g_free(region->mem);
924     g_free(region->mmaps);
925 
926     trace_vfio_region_finalize(region->vbasedev->name, region->nr);
927 
928     region->mem = NULL;
929     region->mmaps = NULL;
930     region->nr_mmaps = 0;
931     region->size = 0;
932     region->flags = 0;
933     region->nr = 0;
934 }
935 
936 void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled)
937 {
938     int i;
939 
940     if (!region->mem) {
941         return;
942     }
943 
944     for (i = 0; i < region->nr_mmaps; i++) {
945         if (region->mmaps[i].mmap) {
946             memory_region_set_enabled(&region->mmaps[i].mem, enabled);
947         }
948     }
949 
950     trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem),
951                                         enabled);
952 }
953 
954 void vfio_reset_handler(void *opaque)
955 {
956     VFIOGroup *group;
957     VFIODevice *vbasedev;
958 
959     QLIST_FOREACH(group, &vfio_group_list, next) {
960         QLIST_FOREACH(vbasedev, &group->device_list, next) {
961             if (vbasedev->dev->realized) {
962                 vbasedev->ops->vfio_compute_needs_reset(vbasedev);
963             }
964         }
965     }
966 
967     QLIST_FOREACH(group, &vfio_group_list, next) {
968         QLIST_FOREACH(vbasedev, &group->device_list, next) {
969             if (vbasedev->dev->realized && vbasedev->needs_reset) {
970                 vbasedev->ops->vfio_hot_reset_multi(vbasedev);
971             }
972         }
973     }
974 }
975 
976 static void vfio_kvm_device_add_group(VFIOGroup *group)
977 {
978 #ifdef CONFIG_KVM
979     struct kvm_device_attr attr = {
980         .group = KVM_DEV_VFIO_GROUP,
981         .attr = KVM_DEV_VFIO_GROUP_ADD,
982         .addr = (uint64_t)(unsigned long)&group->fd,
983     };
984 
985     if (!kvm_enabled()) {
986         return;
987     }
988 
989     if (vfio_kvm_device_fd < 0) {
990         struct kvm_create_device cd = {
991             .type = KVM_DEV_TYPE_VFIO,
992         };
993 
994         if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
995             error_report("Failed to create KVM VFIO device: %m");
996             return;
997         }
998 
999         vfio_kvm_device_fd = cd.fd;
1000     }
1001 
1002     if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
1003         error_report("Failed to add group %d to KVM VFIO device: %m",
1004                      group->groupid);
1005     }
1006 #endif
1007 }
1008 
1009 static void vfio_kvm_device_del_group(VFIOGroup *group)
1010 {
1011 #ifdef CONFIG_KVM
1012     struct kvm_device_attr attr = {
1013         .group = KVM_DEV_VFIO_GROUP,
1014         .attr = KVM_DEV_VFIO_GROUP_DEL,
1015         .addr = (uint64_t)(unsigned long)&group->fd,
1016     };
1017 
1018     if (vfio_kvm_device_fd < 0) {
1019         return;
1020     }
1021 
1022     if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
1023         error_report("Failed to remove group %d from KVM VFIO device: %m",
1024                      group->groupid);
1025     }
1026 #endif
1027 }
1028 
1029 static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
1030 {
1031     VFIOAddressSpace *space;
1032 
1033     QLIST_FOREACH(space, &vfio_address_spaces, list) {
1034         if (space->as == as) {
1035             return space;
1036         }
1037     }
1038 
1039     /* No suitable VFIOAddressSpace, create a new one */
1040     space = g_malloc0(sizeof(*space));
1041     space->as = as;
1042     QLIST_INIT(&space->containers);
1043 
1044     QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
1045 
1046     return space;
1047 }
1048 
1049 static void vfio_put_address_space(VFIOAddressSpace *space)
1050 {
1051     if (QLIST_EMPTY(&space->containers)) {
1052         QLIST_REMOVE(space, list);
1053         g_free(space);
1054     }
1055 }
1056 
1057 /*
1058  * vfio_get_iommu_type - selects the richest iommu_type (v2 first)
1059  */
1060 static int vfio_get_iommu_type(VFIOContainer *container,
1061                                Error **errp)
1062 {
1063     int iommu_types[] = { VFIO_TYPE1v2_IOMMU, VFIO_TYPE1_IOMMU,
1064                           VFIO_SPAPR_TCE_v2_IOMMU, VFIO_SPAPR_TCE_IOMMU };
1065     int i;
1066 
1067     for (i = 0; i < ARRAY_SIZE(iommu_types); i++) {
1068         if (ioctl(container->fd, VFIO_CHECK_EXTENSION, iommu_types[i])) {
1069             return iommu_types[i];
1070         }
1071     }
1072     error_setg(errp, "No available IOMMU models");
1073     return -EINVAL;
1074 }
1075 
1076 static int vfio_init_container(VFIOContainer *container, int group_fd,
1077                                Error **errp)
1078 {
1079     int iommu_type, ret;
1080 
1081     iommu_type = vfio_get_iommu_type(container, errp);
1082     if (iommu_type < 0) {
1083         return iommu_type;
1084     }
1085 
1086     ret = ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container->fd);
1087     if (ret) {
1088         error_setg_errno(errp, errno, "Failed to set group container");
1089         return -errno;
1090     }
1091 
1092     while (ioctl(container->fd, VFIO_SET_IOMMU, iommu_type)) {
1093         if (iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
1094             /*
1095              * On sPAPR, despite the IOMMU subdriver always advertises v1 and
1096              * v2, the running platform may not support v2 and there is no
1097              * way to guess it until an IOMMU group gets added to the container.
1098              * So in case it fails with v2, try v1 as a fallback.
1099              */
1100             iommu_type = VFIO_SPAPR_TCE_IOMMU;
1101             continue;
1102         }
1103         error_setg_errno(errp, errno, "Failed to set iommu for container");
1104         return -errno;
1105     }
1106 
1107     container->iommu_type = iommu_type;
1108     return 0;
1109 }
1110 
1111 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
1112                                   Error **errp)
1113 {
1114     VFIOContainer *container;
1115     int ret, fd;
1116     VFIOAddressSpace *space;
1117 
1118     space = vfio_get_address_space(as);
1119 
1120     /*
1121      * VFIO is currently incompatible with memory ballooning insofar as the
1122      * madvise to purge (zap) the page from QEMU's address space does not
1123      * interact with the memory API and therefore leaves stale virtual to
1124      * physical mappings in the IOMMU if the page was previously pinned.  We
1125      * therefore add a balloon inhibit for each group added to a container,
1126      * whether the container is used individually or shared.  This provides
1127      * us with options to allow devices within a group to opt-in and allow
1128      * ballooning, so long as it is done consistently for a group (for instance
1129      * if the device is an mdev device where it is known that the host vendor
1130      * driver will never pin pages outside of the working set of the guest
1131      * driver, which would thus not be ballooning candidates).
1132      *
1133      * The first opportunity to induce pinning occurs here where we attempt to
1134      * attach the group to existing containers within the AddressSpace.  If any
1135      * pages are already zapped from the virtual address space, such as from a
1136      * previous ballooning opt-in, new pinning will cause valid mappings to be
1137      * re-established.  Likewise, when the overall MemoryListener for a new
1138      * container is registered, a replay of mappings within the AddressSpace
1139      * will occur, re-establishing any previously zapped pages as well.
1140      *
1141      * NB. Balloon inhibiting does not currently block operation of the
1142      * balloon driver or revoke previously pinned pages, it only prevents
1143      * calling madvise to modify the virtual mapping of ballooned pages.
1144      */
1145     qemu_balloon_inhibit(true);
1146 
1147     QLIST_FOREACH(container, &space->containers, next) {
1148         if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
1149             group->container = container;
1150             QLIST_INSERT_HEAD(&container->group_list, group, container_next);
1151             vfio_kvm_device_add_group(group);
1152             return 0;
1153         }
1154     }
1155 
1156     fd = qemu_open("/dev/vfio/vfio", O_RDWR);
1157     if (fd < 0) {
1158         error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio");
1159         ret = -errno;
1160         goto put_space_exit;
1161     }
1162 
1163     ret = ioctl(fd, VFIO_GET_API_VERSION);
1164     if (ret != VFIO_API_VERSION) {
1165         error_setg(errp, "supported vfio version: %d, "
1166                    "reported version: %d", VFIO_API_VERSION, ret);
1167         ret = -EINVAL;
1168         goto close_fd_exit;
1169     }
1170 
1171     container = g_malloc0(sizeof(*container));
1172     container->space = space;
1173     container->fd = fd;
1174     QLIST_INIT(&container->giommu_list);
1175     QLIST_INIT(&container->hostwin_list);
1176 
1177     ret = vfio_init_container(container, group->fd, errp);
1178     if (ret) {
1179         goto free_container_exit;
1180     }
1181 
1182     switch (container->iommu_type) {
1183     case VFIO_TYPE1v2_IOMMU:
1184     case VFIO_TYPE1_IOMMU:
1185     {
1186         struct vfio_iommu_type1_info info;
1187 
1188         /*
1189          * FIXME: This assumes that a Type1 IOMMU can map any 64-bit
1190          * IOVA whatsoever.  That's not actually true, but the current
1191          * kernel interface doesn't tell us what it can map, and the
1192          * existing Type1 IOMMUs generally support any IOVA we're
1193          * going to actually try in practice.
1194          */
1195         info.argsz = sizeof(info);
1196         ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info);
1197         /* Ignore errors */
1198         if (ret || !(info.flags & VFIO_IOMMU_INFO_PGSIZES)) {
1199             /* Assume 4k IOVA page size */
1200             info.iova_pgsizes = 4096;
1201         }
1202         vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes);
1203         container->pgsizes = info.iova_pgsizes;
1204         break;
1205     }
1206     case VFIO_SPAPR_TCE_v2_IOMMU:
1207     case VFIO_SPAPR_TCE_IOMMU:
1208     {
1209         struct vfio_iommu_spapr_tce_info info;
1210         bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU;
1211 
1212         /*
1213          * The host kernel code implementing VFIO_IOMMU_DISABLE is called
1214          * when container fd is closed so we do not call it explicitly
1215          * in this file.
1216          */
1217         if (!v2) {
1218             ret = ioctl(fd, VFIO_IOMMU_ENABLE);
1219             if (ret) {
1220                 error_setg_errno(errp, errno, "failed to enable container");
1221                 ret = -errno;
1222                 goto free_container_exit;
1223             }
1224         } else {
1225             container->prereg_listener = vfio_prereg_listener;
1226 
1227             memory_listener_register(&container->prereg_listener,
1228                                      &address_space_memory);
1229             if (container->error) {
1230                 memory_listener_unregister(&container->prereg_listener);
1231                 ret = container->error;
1232                 error_setg(errp,
1233                     "RAM memory listener initialization failed for container");
1234                 goto free_container_exit;
1235             }
1236         }
1237 
1238         info.argsz = sizeof(info);
1239         ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
1240         if (ret) {
1241             error_setg_errno(errp, errno,
1242                              "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed");
1243             ret = -errno;
1244             if (v2) {
1245                 memory_listener_unregister(&container->prereg_listener);
1246             }
1247             goto free_container_exit;
1248         }
1249 
1250         if (v2) {
1251             container->pgsizes = info.ddw.pgsizes;
1252             /*
1253              * There is a default window in just created container.
1254              * To make region_add/del simpler, we better remove this
1255              * window now and let those iommu_listener callbacks
1256              * create/remove them when needed.
1257              */
1258             ret = vfio_spapr_remove_window(container, info.dma32_window_start);
1259             if (ret) {
1260                 error_setg_errno(errp, -ret,
1261                                  "failed to remove existing window");
1262                 goto free_container_exit;
1263             }
1264         } else {
1265             /* The default table uses 4K pages */
1266             container->pgsizes = 0x1000;
1267             vfio_host_win_add(container, info.dma32_window_start,
1268                               info.dma32_window_start +
1269                               info.dma32_window_size - 1,
1270                               0x1000);
1271         }
1272     }
1273     }
1274 
1275     vfio_kvm_device_add_group(group);
1276 
1277     QLIST_INIT(&container->group_list);
1278     QLIST_INSERT_HEAD(&space->containers, container, next);
1279 
1280     group->container = container;
1281     QLIST_INSERT_HEAD(&container->group_list, group, container_next);
1282 
1283     container->listener = vfio_memory_listener;
1284 
1285     memory_listener_register(&container->listener, container->space->as);
1286 
1287     if (container->error) {
1288         ret = container->error;
1289         error_setg_errno(errp, -ret,
1290                          "memory listener initialization failed for container");
1291         goto listener_release_exit;
1292     }
1293 
1294     container->initialized = true;
1295 
1296     return 0;
1297 listener_release_exit:
1298     QLIST_REMOVE(group, container_next);
1299     QLIST_REMOVE(container, next);
1300     vfio_kvm_device_del_group(group);
1301     vfio_listener_release(container);
1302 
1303 free_container_exit:
1304     g_free(container);
1305 
1306 close_fd_exit:
1307     close(fd);
1308 
1309 put_space_exit:
1310     qemu_balloon_inhibit(false);
1311     vfio_put_address_space(space);
1312 
1313     return ret;
1314 }
1315 
1316 static void vfio_disconnect_container(VFIOGroup *group)
1317 {
1318     VFIOContainer *container = group->container;
1319 
1320     QLIST_REMOVE(group, container_next);
1321     group->container = NULL;
1322 
1323     /*
1324      * Explicitly release the listener first before unset container,
1325      * since unset may destroy the backend container if it's the last
1326      * group.
1327      */
1328     if (QLIST_EMPTY(&container->group_list)) {
1329         vfio_listener_release(container);
1330     }
1331 
1332     if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
1333         error_report("vfio: error disconnecting group %d from container",
1334                      group->groupid);
1335     }
1336 
1337     if (QLIST_EMPTY(&container->group_list)) {
1338         VFIOAddressSpace *space = container->space;
1339         VFIOGuestIOMMU *giommu, *tmp;
1340 
1341         QLIST_REMOVE(container, next);
1342 
1343         QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) {
1344             memory_region_unregister_iommu_notifier(
1345                     MEMORY_REGION(giommu->iommu), &giommu->n);
1346             QLIST_REMOVE(giommu, giommu_next);
1347             g_free(giommu);
1348         }
1349 
1350         trace_vfio_disconnect_container(container->fd);
1351         close(container->fd);
1352         g_free(container);
1353 
1354         vfio_put_address_space(space);
1355     }
1356 }
1357 
1358 VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp)
1359 {
1360     VFIOGroup *group;
1361     char path[32];
1362     struct vfio_group_status status = { .argsz = sizeof(status) };
1363 
1364     QLIST_FOREACH(group, &vfio_group_list, next) {
1365         if (group->groupid == groupid) {
1366             /* Found it.  Now is it already in the right context? */
1367             if (group->container->space->as == as) {
1368                 return group;
1369             } else {
1370                 error_setg(errp, "group %d used in multiple address spaces",
1371                            group->groupid);
1372                 return NULL;
1373             }
1374         }
1375     }
1376 
1377     group = g_malloc0(sizeof(*group));
1378 
1379     snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
1380     group->fd = qemu_open(path, O_RDWR);
1381     if (group->fd < 0) {
1382         error_setg_errno(errp, errno, "failed to open %s", path);
1383         goto free_group_exit;
1384     }
1385 
1386     if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
1387         error_setg_errno(errp, errno, "failed to get group %d status", groupid);
1388         goto close_fd_exit;
1389     }
1390 
1391     if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
1392         error_setg(errp, "group %d is not viable", groupid);
1393         error_append_hint(errp,
1394                           "Please ensure all devices within the iommu_group "
1395                           "are bound to their vfio bus driver.\n");
1396         goto close_fd_exit;
1397     }
1398 
1399     group->groupid = groupid;
1400     QLIST_INIT(&group->device_list);
1401 
1402     if (vfio_connect_container(group, as, errp)) {
1403         error_prepend(errp, "failed to setup container for group %d: ",
1404                       groupid);
1405         goto close_fd_exit;
1406     }
1407 
1408     if (QLIST_EMPTY(&vfio_group_list)) {
1409         qemu_register_reset(vfio_reset_handler, NULL);
1410     }
1411 
1412     QLIST_INSERT_HEAD(&vfio_group_list, group, next);
1413 
1414     return group;
1415 
1416 close_fd_exit:
1417     close(group->fd);
1418 
1419 free_group_exit:
1420     g_free(group);
1421 
1422     return NULL;
1423 }
1424 
1425 void vfio_put_group(VFIOGroup *group)
1426 {
1427     if (!group || !QLIST_EMPTY(&group->device_list)) {
1428         return;
1429     }
1430 
1431     if (!group->balloon_allowed) {
1432         qemu_balloon_inhibit(false);
1433     }
1434     vfio_kvm_device_del_group(group);
1435     vfio_disconnect_container(group);
1436     QLIST_REMOVE(group, next);
1437     trace_vfio_put_group(group->fd);
1438     close(group->fd);
1439     g_free(group);
1440 
1441     if (QLIST_EMPTY(&vfio_group_list)) {
1442         qemu_unregister_reset(vfio_reset_handler, NULL);
1443     }
1444 }
1445 
1446 int vfio_get_device(VFIOGroup *group, const char *name,
1447                     VFIODevice *vbasedev, Error **errp)
1448 {
1449     struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
1450     int ret, fd;
1451 
1452     fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
1453     if (fd < 0) {
1454         error_setg_errno(errp, errno, "error getting device from group %d",
1455                          group->groupid);
1456         error_append_hint(errp,
1457                       "Verify all devices in group %d are bound to vfio-<bus> "
1458                       "or pci-stub and not already in use\n", group->groupid);
1459         return fd;
1460     }
1461 
1462     ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info);
1463     if (ret) {
1464         error_setg_errno(errp, errno, "error getting device info");
1465         close(fd);
1466         return ret;
1467     }
1468 
1469     /*
1470      * Clear the balloon inhibitor for this group if the driver knows the
1471      * device operates compatibly with ballooning.  Setting must be consistent
1472      * per group, but since compatibility is really only possible with mdev
1473      * currently, we expect singleton groups.
1474      */
1475     if (vbasedev->balloon_allowed != group->balloon_allowed) {
1476         if (!QLIST_EMPTY(&group->device_list)) {
1477             error_setg(errp,
1478                        "Inconsistent device balloon setting within group");
1479             close(fd);
1480             return -1;
1481         }
1482 
1483         if (!group->balloon_allowed) {
1484             group->balloon_allowed = true;
1485             qemu_balloon_inhibit(false);
1486         }
1487     }
1488 
1489     vbasedev->fd = fd;
1490     vbasedev->group = group;
1491     QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
1492 
1493     vbasedev->num_irqs = dev_info.num_irqs;
1494     vbasedev->num_regions = dev_info.num_regions;
1495     vbasedev->flags = dev_info.flags;
1496 
1497     trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions,
1498                           dev_info.num_irqs);
1499 
1500     vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
1501     return 0;
1502 }
1503 
1504 void vfio_put_base_device(VFIODevice *vbasedev)
1505 {
1506     if (!vbasedev->group) {
1507         return;
1508     }
1509     QLIST_REMOVE(vbasedev, next);
1510     vbasedev->group = NULL;
1511     trace_vfio_put_base_device(vbasedev->fd);
1512     close(vbasedev->fd);
1513 }
1514 
1515 int vfio_get_region_info(VFIODevice *vbasedev, int index,
1516                          struct vfio_region_info **info)
1517 {
1518     size_t argsz = sizeof(struct vfio_region_info);
1519 
1520     *info = g_malloc0(argsz);
1521 
1522     (*info)->index = index;
1523 retry:
1524     (*info)->argsz = argsz;
1525 
1526     if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) {
1527         g_free(*info);
1528         *info = NULL;
1529         return -errno;
1530     }
1531 
1532     if ((*info)->argsz > argsz) {
1533         argsz = (*info)->argsz;
1534         *info = g_realloc(*info, argsz);
1535 
1536         goto retry;
1537     }
1538 
1539     return 0;
1540 }
1541 
1542 int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type,
1543                              uint32_t subtype, struct vfio_region_info **info)
1544 {
1545     int i;
1546 
1547     for (i = 0; i < vbasedev->num_regions; i++) {
1548         struct vfio_info_cap_header *hdr;
1549         struct vfio_region_info_cap_type *cap_type;
1550 
1551         if (vfio_get_region_info(vbasedev, i, info)) {
1552             continue;
1553         }
1554 
1555         hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE);
1556         if (!hdr) {
1557             g_free(*info);
1558             continue;
1559         }
1560 
1561         cap_type = container_of(hdr, struct vfio_region_info_cap_type, header);
1562 
1563         trace_vfio_get_dev_region(vbasedev->name, i,
1564                                   cap_type->type, cap_type->subtype);
1565 
1566         if (cap_type->type == type && cap_type->subtype == subtype) {
1567             return 0;
1568         }
1569 
1570         g_free(*info);
1571     }
1572 
1573     *info = NULL;
1574     return -ENODEV;
1575 }
1576 
1577 bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type)
1578 {
1579     struct vfio_region_info *info = NULL;
1580     bool ret = false;
1581 
1582     if (!vfio_get_region_info(vbasedev, region, &info)) {
1583         if (vfio_get_region_info_cap(info, cap_type)) {
1584             ret = true;
1585         }
1586         g_free(info);
1587     }
1588 
1589     return ret;
1590 }
1591 
1592 /*
1593  * Interfaces for IBM EEH (Enhanced Error Handling)
1594  */
1595 static bool vfio_eeh_container_ok(VFIOContainer *container)
1596 {
1597     /*
1598      * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
1599      * implementation is broken if there are multiple groups in a
1600      * container.  The hardware works in units of Partitionable
1601      * Endpoints (== IOMMU groups) and the EEH operations naively
1602      * iterate across all groups in the container, without any logic
1603      * to make sure the groups have their state synchronized.  For
1604      * certain operations (ENABLE) that might be ok, until an error
1605      * occurs, but for others (GET_STATE) it's clearly broken.
1606      */
1607 
1608     /*
1609      * XXX Once fixed kernels exist, test for them here
1610      */
1611 
1612     if (QLIST_EMPTY(&container->group_list)) {
1613         return false;
1614     }
1615 
1616     if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) {
1617         return false;
1618     }
1619 
1620     return true;
1621 }
1622 
1623 static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op)
1624 {
1625     struct vfio_eeh_pe_op pe_op = {
1626         .argsz = sizeof(pe_op),
1627         .op = op,
1628     };
1629     int ret;
1630 
1631     if (!vfio_eeh_container_ok(container)) {
1632         error_report("vfio/eeh: EEH_PE_OP 0x%x: "
1633                      "kernel requires a container with exactly one group", op);
1634         return -EPERM;
1635     }
1636 
1637     ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op);
1638     if (ret < 0) {
1639         error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op);
1640         return -errno;
1641     }
1642 
1643     return ret;
1644 }
1645 
1646 static VFIOContainer *vfio_eeh_as_container(AddressSpace *as)
1647 {
1648     VFIOAddressSpace *space = vfio_get_address_space(as);
1649     VFIOContainer *container = NULL;
1650 
1651     if (QLIST_EMPTY(&space->containers)) {
1652         /* No containers to act on */
1653         goto out;
1654     }
1655 
1656     container = QLIST_FIRST(&space->containers);
1657 
1658     if (QLIST_NEXT(container, next)) {
1659         /* We don't yet have logic to synchronize EEH state across
1660          * multiple containers */
1661         container = NULL;
1662         goto out;
1663     }
1664 
1665 out:
1666     vfio_put_address_space(space);
1667     return container;
1668 }
1669 
1670 bool vfio_eeh_as_ok(AddressSpace *as)
1671 {
1672     VFIOContainer *container = vfio_eeh_as_container(as);
1673 
1674     return (container != NULL) && vfio_eeh_container_ok(container);
1675 }
1676 
1677 int vfio_eeh_as_op(AddressSpace *as, uint32_t op)
1678 {
1679     VFIOContainer *container = vfio_eeh_as_container(as);
1680 
1681     if (!container) {
1682         return -ENODEV;
1683     }
1684     return vfio_eeh_container_op(container, op);
1685 }
1686