xref: /openbmc/qemu/hw/vfio/common.c (revision 5a3d2c35)
1 /*
2  * generic functions used by VFIO devices
3  *
4  * Copyright Red Hat, Inc. 2012
5  *
6  * Authors:
7  *  Alex Williamson <alex.williamson@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Based on qemu-kvm device-assignment:
13  *  Adapted for KVM by Qumranet.
14  *  Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15  *  Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16  *  Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17  *  Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18  *  Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
19  */
20 
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 #ifdef CONFIG_KVM
24 #include <linux/kvm.h>
25 #endif
26 #include <linux/vfio.h>
27 
28 #include "hw/vfio/vfio-common.h"
29 #include "hw/vfio/vfio.h"
30 #include "hw/vfio/pci.h"
31 #include "exec/address-spaces.h"
32 #include "exec/memory.h"
33 #include "exec/ram_addr.h"
34 #include "hw/hw.h"
35 #include "qemu/error-report.h"
36 #include "qemu/main-loop.h"
37 #include "qemu/range.h"
38 #include "sysemu/kvm.h"
39 #include "sysemu/reset.h"
40 #include "sysemu/runstate.h"
41 #include "trace.h"
42 #include "qapi/error.h"
43 #include "migration/migration.h"
44 #include "migration/misc.h"
45 #include "migration/blocker.h"
46 #include "migration/qemu-file.h"
47 #include "sysemu/tpm.h"
48 
49 VFIOGroupList vfio_group_list =
50     QLIST_HEAD_INITIALIZER(vfio_group_list);
51 static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces =
52     QLIST_HEAD_INITIALIZER(vfio_address_spaces);
53 
54 #ifdef CONFIG_KVM
55 /*
56  * We have a single VFIO pseudo device per KVM VM.  Once created it lives
57  * for the life of the VM.  Closing the file descriptor only drops our
58  * reference to it and the device's reference to kvm.  Therefore once
59  * initialized, this file descriptor is only released on QEMU exit and
60  * we'll re-use it should another vfio device be attached before then.
61  */
62 static int vfio_kvm_device_fd = -1;
63 #endif
64 
65 /*
66  * Common VFIO interrupt disable
67  */
68 void vfio_disable_irqindex(VFIODevice *vbasedev, int index)
69 {
70     struct vfio_irq_set irq_set = {
71         .argsz = sizeof(irq_set),
72         .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
73         .index = index,
74         .start = 0,
75         .count = 0,
76     };
77 
78     ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
79 }
80 
81 void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index)
82 {
83     struct vfio_irq_set irq_set = {
84         .argsz = sizeof(irq_set),
85         .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
86         .index = index,
87         .start = 0,
88         .count = 1,
89     };
90 
91     ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
92 }
93 
94 void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index)
95 {
96     struct vfio_irq_set irq_set = {
97         .argsz = sizeof(irq_set),
98         .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
99         .index = index,
100         .start = 0,
101         .count = 1,
102     };
103 
104     ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
105 }
106 
107 static inline const char *action_to_str(int action)
108 {
109     switch (action) {
110     case VFIO_IRQ_SET_ACTION_MASK:
111         return "MASK";
112     case VFIO_IRQ_SET_ACTION_UNMASK:
113         return "UNMASK";
114     case VFIO_IRQ_SET_ACTION_TRIGGER:
115         return "TRIGGER";
116     default:
117         return "UNKNOWN ACTION";
118     }
119 }
120 
121 static const char *index_to_str(VFIODevice *vbasedev, int index)
122 {
123     if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
124         return NULL;
125     }
126 
127     switch (index) {
128     case VFIO_PCI_INTX_IRQ_INDEX:
129         return "INTX";
130     case VFIO_PCI_MSI_IRQ_INDEX:
131         return "MSI";
132     case VFIO_PCI_MSIX_IRQ_INDEX:
133         return "MSIX";
134     case VFIO_PCI_ERR_IRQ_INDEX:
135         return "ERR";
136     case VFIO_PCI_REQ_IRQ_INDEX:
137         return "REQ";
138     default:
139         return NULL;
140     }
141 }
142 
143 static int vfio_ram_block_discard_disable(VFIOContainer *container, bool state)
144 {
145     switch (container->iommu_type) {
146     case VFIO_TYPE1v2_IOMMU:
147     case VFIO_TYPE1_IOMMU:
148         /*
149          * We support coordinated discarding of RAM via the RamDiscardManager.
150          */
151         return ram_block_uncoordinated_discard_disable(state);
152     default:
153         /*
154          * VFIO_SPAPR_TCE_IOMMU most probably works just fine with
155          * RamDiscardManager, however, it is completely untested.
156          *
157          * VFIO_SPAPR_TCE_v2_IOMMU with "DMA memory preregistering" does
158          * completely the opposite of managing mapping/pinning dynamically as
159          * required by RamDiscardManager. We would have to special-case sections
160          * with a RamDiscardManager.
161          */
162         return ram_block_discard_disable(state);
163     }
164 }
165 
166 int vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex,
167                            int action, int fd, Error **errp)
168 {
169     struct vfio_irq_set *irq_set;
170     int argsz, ret = 0;
171     const char *name;
172     int32_t *pfd;
173 
174     argsz = sizeof(*irq_set) + sizeof(*pfd);
175 
176     irq_set = g_malloc0(argsz);
177     irq_set->argsz = argsz;
178     irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action;
179     irq_set->index = index;
180     irq_set->start = subindex;
181     irq_set->count = 1;
182     pfd = (int32_t *)&irq_set->data;
183     *pfd = fd;
184 
185     if (ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
186         ret = -errno;
187     }
188     g_free(irq_set);
189 
190     if (!ret) {
191         return 0;
192     }
193 
194     error_setg_errno(errp, -ret, "VFIO_DEVICE_SET_IRQS failure");
195 
196     name = index_to_str(vbasedev, index);
197     if (name) {
198         error_prepend(errp, "%s-%d: ", name, subindex);
199     } else {
200         error_prepend(errp, "index %d-%d: ", index, subindex);
201     }
202     error_prepend(errp,
203                   "Failed to %s %s eventfd signaling for interrupt ",
204                   fd < 0 ? "tear down" : "set up", action_to_str(action));
205     return ret;
206 }
207 
208 /*
209  * IO Port/MMIO - Beware of the endians, VFIO is always little endian
210  */
211 void vfio_region_write(void *opaque, hwaddr addr,
212                        uint64_t data, unsigned size)
213 {
214     VFIORegion *region = opaque;
215     VFIODevice *vbasedev = region->vbasedev;
216     union {
217         uint8_t byte;
218         uint16_t word;
219         uint32_t dword;
220         uint64_t qword;
221     } buf;
222 
223     switch (size) {
224     case 1:
225         buf.byte = data;
226         break;
227     case 2:
228         buf.word = cpu_to_le16(data);
229         break;
230     case 4:
231         buf.dword = cpu_to_le32(data);
232         break;
233     case 8:
234         buf.qword = cpu_to_le64(data);
235         break;
236     default:
237         hw_error("vfio: unsupported write size, %u bytes", size);
238         break;
239     }
240 
241     if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
242         error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
243                      ",%d) failed: %m",
244                      __func__, vbasedev->name, region->nr,
245                      addr, data, size);
246     }
247 
248     trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
249 
250     /*
251      * A read or write to a BAR always signals an INTx EOI.  This will
252      * do nothing if not pending (including not in INTx mode).  We assume
253      * that a BAR access is in response to an interrupt and that BAR
254      * accesses will service the interrupt.  Unfortunately, we don't know
255      * which access will service the interrupt, so we're potentially
256      * getting quite a few host interrupts per guest interrupt.
257      */
258     vbasedev->ops->vfio_eoi(vbasedev);
259 }
260 
261 uint64_t vfio_region_read(void *opaque,
262                           hwaddr addr, unsigned size)
263 {
264     VFIORegion *region = opaque;
265     VFIODevice *vbasedev = region->vbasedev;
266     union {
267         uint8_t byte;
268         uint16_t word;
269         uint32_t dword;
270         uint64_t qword;
271     } buf;
272     uint64_t data = 0;
273 
274     if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
275         error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m",
276                      __func__, vbasedev->name, region->nr,
277                      addr, size);
278         return (uint64_t)-1;
279     }
280     switch (size) {
281     case 1:
282         data = buf.byte;
283         break;
284     case 2:
285         data = le16_to_cpu(buf.word);
286         break;
287     case 4:
288         data = le32_to_cpu(buf.dword);
289         break;
290     case 8:
291         data = le64_to_cpu(buf.qword);
292         break;
293     default:
294         hw_error("vfio: unsupported read size, %u bytes", size);
295         break;
296     }
297 
298     trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
299 
300     /* Same as write above */
301     vbasedev->ops->vfio_eoi(vbasedev);
302 
303     return data;
304 }
305 
306 const MemoryRegionOps vfio_region_ops = {
307     .read = vfio_region_read,
308     .write = vfio_region_write,
309     .endianness = DEVICE_LITTLE_ENDIAN,
310     .valid = {
311         .min_access_size = 1,
312         .max_access_size = 8,
313     },
314     .impl = {
315         .min_access_size = 1,
316         .max_access_size = 8,
317     },
318 };
319 
320 /*
321  * Device state interfaces
322  */
323 
324 typedef struct {
325     unsigned long *bitmap;
326     hwaddr size;
327     hwaddr pages;
328 } VFIOBitmap;
329 
330 static int vfio_bitmap_alloc(VFIOBitmap *vbmap, hwaddr size)
331 {
332     vbmap->pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size();
333     vbmap->size = ROUND_UP(vbmap->pages, sizeof(__u64) * BITS_PER_BYTE) /
334                                          BITS_PER_BYTE;
335     vbmap->bitmap = g_try_malloc0(vbmap->size);
336     if (!vbmap->bitmap) {
337         return -ENOMEM;
338     }
339 
340     return 0;
341 }
342 
343 static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
344                                  uint64_t size, ram_addr_t ram_addr);
345 
346 bool vfio_mig_active(void)
347 {
348     VFIOGroup *group;
349     VFIODevice *vbasedev;
350 
351     if (QLIST_EMPTY(&vfio_group_list)) {
352         return false;
353     }
354 
355     QLIST_FOREACH(group, &vfio_group_list, next) {
356         QLIST_FOREACH(vbasedev, &group->device_list, next) {
357             if (vbasedev->migration_blocker) {
358                 return false;
359             }
360         }
361     }
362     return true;
363 }
364 
365 static Error *multiple_devices_migration_blocker;
366 
367 /*
368  * Multiple devices migration is allowed only if all devices support P2P
369  * migration. Single device migration is allowed regardless of P2P migration
370  * support.
371  */
372 static bool vfio_multiple_devices_migration_is_supported(void)
373 {
374     VFIOGroup *group;
375     VFIODevice *vbasedev;
376     unsigned int device_num = 0;
377     bool all_support_p2p = true;
378 
379     QLIST_FOREACH(group, &vfio_group_list, next) {
380         QLIST_FOREACH(vbasedev, &group->device_list, next) {
381             if (vbasedev->migration) {
382                 device_num++;
383 
384                 if (!(vbasedev->migration->mig_flags & VFIO_MIGRATION_P2P)) {
385                     all_support_p2p = false;
386                 }
387             }
388         }
389     }
390 
391     return all_support_p2p || device_num <= 1;
392 }
393 
394 int vfio_block_multiple_devices_migration(VFIODevice *vbasedev, Error **errp)
395 {
396     int ret;
397 
398     if (vfio_multiple_devices_migration_is_supported()) {
399         return 0;
400     }
401 
402     if (vbasedev->enable_migration == ON_OFF_AUTO_ON) {
403         error_setg(errp, "Multiple VFIO devices migration is supported only if "
404                          "all of them support P2P migration");
405         return -EINVAL;
406     }
407 
408     if (multiple_devices_migration_blocker) {
409         return 0;
410     }
411 
412     error_setg(&multiple_devices_migration_blocker,
413                "Multiple VFIO devices migration is supported only if all of "
414                "them support P2P migration");
415     ret = migrate_add_blocker(multiple_devices_migration_blocker, errp);
416     if (ret < 0) {
417         error_free(multiple_devices_migration_blocker);
418         multiple_devices_migration_blocker = NULL;
419     }
420 
421     return ret;
422 }
423 
424 void vfio_unblock_multiple_devices_migration(void)
425 {
426     if (!multiple_devices_migration_blocker ||
427         !vfio_multiple_devices_migration_is_supported()) {
428         return;
429     }
430 
431     migrate_del_blocker(multiple_devices_migration_blocker);
432     error_free(multiple_devices_migration_blocker);
433     multiple_devices_migration_blocker = NULL;
434 }
435 
436 bool vfio_viommu_preset(VFIODevice *vbasedev)
437 {
438     return vbasedev->group->container->space->as != &address_space_memory;
439 }
440 
441 static void vfio_set_migration_error(int err)
442 {
443     MigrationState *ms = migrate_get_current();
444 
445     if (migration_is_setup_or_active(ms->state)) {
446         WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) {
447             if (ms->to_dst_file) {
448                 qemu_file_set_error(ms->to_dst_file, err);
449             }
450         }
451     }
452 }
453 
454 bool vfio_device_state_is_running(VFIODevice *vbasedev)
455 {
456     VFIOMigration *migration = vbasedev->migration;
457 
458     return migration->device_state == VFIO_DEVICE_STATE_RUNNING ||
459            migration->device_state == VFIO_DEVICE_STATE_RUNNING_P2P;
460 }
461 
462 bool vfio_device_state_is_precopy(VFIODevice *vbasedev)
463 {
464     VFIOMigration *migration = vbasedev->migration;
465 
466     return migration->device_state == VFIO_DEVICE_STATE_PRE_COPY ||
467            migration->device_state == VFIO_DEVICE_STATE_PRE_COPY_P2P;
468 }
469 
470 static bool vfio_devices_all_dirty_tracking(VFIOContainer *container)
471 {
472     VFIOGroup *group;
473     VFIODevice *vbasedev;
474     MigrationState *ms = migrate_get_current();
475 
476     if (ms->state != MIGRATION_STATUS_ACTIVE &&
477         ms->state != MIGRATION_STATUS_DEVICE) {
478         return false;
479     }
480 
481     QLIST_FOREACH(group, &container->group_list, container_next) {
482         QLIST_FOREACH(vbasedev, &group->device_list, next) {
483             VFIOMigration *migration = vbasedev->migration;
484 
485             if (!migration) {
486                 return false;
487             }
488 
489             if (vbasedev->pre_copy_dirty_page_tracking == ON_OFF_AUTO_OFF &&
490                 (vfio_device_state_is_running(vbasedev) ||
491                  vfio_device_state_is_precopy(vbasedev))) {
492                 return false;
493             }
494         }
495     }
496     return true;
497 }
498 
499 static bool vfio_devices_all_device_dirty_tracking(VFIOContainer *container)
500 {
501     VFIOGroup *group;
502     VFIODevice *vbasedev;
503 
504     QLIST_FOREACH(group, &container->group_list, container_next) {
505         QLIST_FOREACH(vbasedev, &group->device_list, next) {
506             if (!vbasedev->dirty_pages_supported) {
507                 return false;
508             }
509         }
510     }
511 
512     return true;
513 }
514 
515 /*
516  * Check if all VFIO devices are running and migration is active, which is
517  * essentially equivalent to the migration being in pre-copy phase.
518  */
519 static bool vfio_devices_all_running_and_mig_active(VFIOContainer *container)
520 {
521     VFIOGroup *group;
522     VFIODevice *vbasedev;
523 
524     if (!migration_is_active(migrate_get_current())) {
525         return false;
526     }
527 
528     QLIST_FOREACH(group, &container->group_list, container_next) {
529         QLIST_FOREACH(vbasedev, &group->device_list, next) {
530             VFIOMigration *migration = vbasedev->migration;
531 
532             if (!migration) {
533                 return false;
534             }
535 
536             if (vfio_device_state_is_running(vbasedev) ||
537                 vfio_device_state_is_precopy(vbasedev)) {
538                 continue;
539             } else {
540                 return false;
541             }
542         }
543     }
544     return true;
545 }
546 
547 static int vfio_dma_unmap_bitmap(VFIOContainer *container,
548                                  hwaddr iova, ram_addr_t size,
549                                  IOMMUTLBEntry *iotlb)
550 {
551     struct vfio_iommu_type1_dma_unmap *unmap;
552     struct vfio_bitmap *bitmap;
553     VFIOBitmap vbmap;
554     int ret;
555 
556     ret = vfio_bitmap_alloc(&vbmap, size);
557     if (ret) {
558         return ret;
559     }
560 
561     unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap));
562 
563     unmap->argsz = sizeof(*unmap) + sizeof(*bitmap);
564     unmap->iova = iova;
565     unmap->size = size;
566     unmap->flags |= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP;
567     bitmap = (struct vfio_bitmap *)&unmap->data;
568 
569     /*
570      * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
571      * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize
572      * to qemu_real_host_page_size.
573      */
574     bitmap->pgsize = qemu_real_host_page_size();
575     bitmap->size = vbmap.size;
576     bitmap->data = (__u64 *)vbmap.bitmap;
577 
578     if (vbmap.size > container->max_dirty_bitmap_size) {
579         error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, vbmap.size);
580         ret = -E2BIG;
581         goto unmap_exit;
582     }
583 
584     ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap);
585     if (!ret) {
586         cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap,
587                 iotlb->translated_addr, vbmap.pages);
588     } else {
589         error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
590     }
591 
592 unmap_exit:
593     g_free(unmap);
594     g_free(vbmap.bitmap);
595 
596     return ret;
597 }
598 
599 /*
600  * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
601  */
602 static int vfio_dma_unmap(VFIOContainer *container,
603                           hwaddr iova, ram_addr_t size,
604                           IOMMUTLBEntry *iotlb)
605 {
606     struct vfio_iommu_type1_dma_unmap unmap = {
607         .argsz = sizeof(unmap),
608         .flags = 0,
609         .iova = iova,
610         .size = size,
611     };
612     bool need_dirty_sync = false;
613     int ret;
614 
615     if (iotlb && vfio_devices_all_running_and_mig_active(container)) {
616         if (!vfio_devices_all_device_dirty_tracking(container) &&
617             container->dirty_pages_supported) {
618             return vfio_dma_unmap_bitmap(container, iova, size, iotlb);
619         }
620 
621         need_dirty_sync = true;
622     }
623 
624     while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
625         /*
626          * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c
627          * v4.15) where an overflow in its wrap-around check prevents us from
628          * unmapping the last page of the address space.  Test for the error
629          * condition and re-try the unmap excluding the last page.  The
630          * expectation is that we've never mapped the last page anyway and this
631          * unmap request comes via vIOMMU support which also makes it unlikely
632          * that this page is used.  This bug was introduced well after type1 v2
633          * support was introduced, so we shouldn't need to test for v1.  A fix
634          * is queued for kernel v5.0 so this workaround can be removed once
635          * affected kernels are sufficiently deprecated.
636          */
637         if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) &&
638             container->iommu_type == VFIO_TYPE1v2_IOMMU) {
639             trace_vfio_dma_unmap_overflow_workaround();
640             unmap.size -= 1ULL << ctz64(container->pgsizes);
641             continue;
642         }
643         error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno));
644         return -errno;
645     }
646 
647     if (need_dirty_sync) {
648         ret = vfio_get_dirty_bitmap(container, iova, size,
649                                     iotlb->translated_addr);
650         if (ret) {
651             return ret;
652         }
653     }
654 
655     return 0;
656 }
657 
658 static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
659                         ram_addr_t size, void *vaddr, bool readonly)
660 {
661     struct vfio_iommu_type1_dma_map map = {
662         .argsz = sizeof(map),
663         .flags = VFIO_DMA_MAP_FLAG_READ,
664         .vaddr = (__u64)(uintptr_t)vaddr,
665         .iova = iova,
666         .size = size,
667     };
668 
669     if (!readonly) {
670         map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
671     }
672 
673     /*
674      * Try the mapping, if it fails with EBUSY, unmap the region and try
675      * again.  This shouldn't be necessary, but we sometimes see it in
676      * the VGA ROM space.
677      */
678     if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
679         (errno == EBUSY && vfio_dma_unmap(container, iova, size, NULL) == 0 &&
680          ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
681         return 0;
682     }
683 
684     error_report("VFIO_MAP_DMA failed: %s", strerror(errno));
685     return -errno;
686 }
687 
688 static void vfio_host_win_add(VFIOContainer *container,
689                               hwaddr min_iova, hwaddr max_iova,
690                               uint64_t iova_pgsizes)
691 {
692     VFIOHostDMAWindow *hostwin;
693 
694     QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
695         if (ranges_overlap(hostwin->min_iova,
696                            hostwin->max_iova - hostwin->min_iova + 1,
697                            min_iova,
698                            max_iova - min_iova + 1)) {
699             hw_error("%s: Overlapped IOMMU are not enabled", __func__);
700         }
701     }
702 
703     hostwin = g_malloc0(sizeof(*hostwin));
704 
705     hostwin->min_iova = min_iova;
706     hostwin->max_iova = max_iova;
707     hostwin->iova_pgsizes = iova_pgsizes;
708     QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next);
709 }
710 
711 static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova,
712                              hwaddr max_iova)
713 {
714     VFIOHostDMAWindow *hostwin;
715 
716     QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
717         if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) {
718             QLIST_REMOVE(hostwin, hostwin_next);
719             g_free(hostwin);
720             return 0;
721         }
722     }
723 
724     return -1;
725 }
726 
727 static bool vfio_listener_skipped_section(MemoryRegionSection *section)
728 {
729     return (!memory_region_is_ram(section->mr) &&
730             !memory_region_is_iommu(section->mr)) ||
731            memory_region_is_protected(section->mr) ||
732            /*
733             * Sizing an enabled 64-bit BAR can cause spurious mappings to
734             * addresses in the upper part of the 64-bit address space.  These
735             * are never accessed by the CPU and beyond the address width of
736             * some IOMMU hardware.  TODO: VFIO should tell us the IOMMU width.
737             */
738            section->offset_within_address_space & (1ULL << 63);
739 }
740 
741 /* Called with rcu_read_lock held.  */
742 static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
743                                ram_addr_t *ram_addr, bool *read_only)
744 {
745     bool ret, mr_has_discard_manager;
746 
747     ret = memory_get_xlat_addr(iotlb, vaddr, ram_addr, read_only,
748                                &mr_has_discard_manager);
749     if (ret && mr_has_discard_manager) {
750         /*
751          * Malicious VMs might trigger discarding of IOMMU-mapped memory. The
752          * pages will remain pinned inside vfio until unmapped, resulting in a
753          * higher memory consumption than expected. If memory would get
754          * populated again later, there would be an inconsistency between pages
755          * pinned by vfio and pages seen by QEMU. This is the case until
756          * unmapped from the IOMMU (e.g., during device reset).
757          *
758          * With malicious guests, we really only care about pinning more memory
759          * than expected. RLIMIT_MEMLOCK set for the user/process can never be
760          * exceeded and can be used to mitigate this problem.
761          */
762         warn_report_once("Using vfio with vIOMMUs and coordinated discarding of"
763                          " RAM (e.g., virtio-mem) works, however, malicious"
764                          " guests can trigger pinning of more memory than"
765                          " intended via an IOMMU. It's possible to mitigate "
766                          " by setting/adjusting RLIMIT_MEMLOCK.");
767     }
768     return ret;
769 }
770 
771 static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
772 {
773     VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
774     VFIOContainer *container = giommu->container;
775     hwaddr iova = iotlb->iova + giommu->iommu_offset;
776     void *vaddr;
777     int ret;
778 
779     trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP",
780                                 iova, iova + iotlb->addr_mask);
781 
782     if (iotlb->target_as != &address_space_memory) {
783         error_report("Wrong target AS \"%s\", only system memory is allowed",
784                      iotlb->target_as->name ? iotlb->target_as->name : "none");
785         vfio_set_migration_error(-EINVAL);
786         return;
787     }
788 
789     rcu_read_lock();
790 
791     if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
792         bool read_only;
793 
794         if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only)) {
795             goto out;
796         }
797         /*
798          * vaddr is only valid until rcu_read_unlock(). But after
799          * vfio_dma_map has set up the mapping the pages will be
800          * pinned by the kernel. This makes sure that the RAM backend
801          * of vaddr will always be there, even if the memory object is
802          * destroyed and its backing memory munmap-ed.
803          */
804         ret = vfio_dma_map(container, iova,
805                            iotlb->addr_mask + 1, vaddr,
806                            read_only);
807         if (ret) {
808             error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
809                          "0x%"HWADDR_PRIx", %p) = %d (%s)",
810                          container, iova,
811                          iotlb->addr_mask + 1, vaddr, ret, strerror(-ret));
812         }
813     } else {
814         ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1, iotlb);
815         if (ret) {
816             error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
817                          "0x%"HWADDR_PRIx") = %d (%s)",
818                          container, iova,
819                          iotlb->addr_mask + 1, ret, strerror(-ret));
820             vfio_set_migration_error(ret);
821         }
822     }
823 out:
824     rcu_read_unlock();
825 }
826 
827 static void vfio_ram_discard_notify_discard(RamDiscardListener *rdl,
828                                             MemoryRegionSection *section)
829 {
830     VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
831                                                 listener);
832     const hwaddr size = int128_get64(section->size);
833     const hwaddr iova = section->offset_within_address_space;
834     int ret;
835 
836     /* Unmap with a single call. */
837     ret = vfio_dma_unmap(vrdl->container, iova, size , NULL);
838     if (ret) {
839         error_report("%s: vfio_dma_unmap() failed: %s", __func__,
840                      strerror(-ret));
841     }
842 }
843 
844 static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl,
845                                             MemoryRegionSection *section)
846 {
847     VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
848                                                 listener);
849     const hwaddr end = section->offset_within_region +
850                        int128_get64(section->size);
851     hwaddr start, next, iova;
852     void *vaddr;
853     int ret;
854 
855     /*
856      * Map in (aligned within memory region) minimum granularity, so we can
857      * unmap in minimum granularity later.
858      */
859     for (start = section->offset_within_region; start < end; start = next) {
860         next = ROUND_UP(start + 1, vrdl->granularity);
861         next = MIN(next, end);
862 
863         iova = start - section->offset_within_region +
864                section->offset_within_address_space;
865         vaddr = memory_region_get_ram_ptr(section->mr) + start;
866 
867         ret = vfio_dma_map(vrdl->container, iova, next - start,
868                            vaddr, section->readonly);
869         if (ret) {
870             /* Rollback */
871             vfio_ram_discard_notify_discard(rdl, section);
872             return ret;
873         }
874     }
875     return 0;
876 }
877 
878 static void vfio_register_ram_discard_listener(VFIOContainer *container,
879                                                MemoryRegionSection *section)
880 {
881     RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
882     VFIORamDiscardListener *vrdl;
883 
884     /* Ignore some corner cases not relevant in practice. */
885     g_assert(QEMU_IS_ALIGNED(section->offset_within_region, TARGET_PAGE_SIZE));
886     g_assert(QEMU_IS_ALIGNED(section->offset_within_address_space,
887                              TARGET_PAGE_SIZE));
888     g_assert(QEMU_IS_ALIGNED(int128_get64(section->size), TARGET_PAGE_SIZE));
889 
890     vrdl = g_new0(VFIORamDiscardListener, 1);
891     vrdl->container = container;
892     vrdl->mr = section->mr;
893     vrdl->offset_within_address_space = section->offset_within_address_space;
894     vrdl->size = int128_get64(section->size);
895     vrdl->granularity = ram_discard_manager_get_min_granularity(rdm,
896                                                                 section->mr);
897 
898     g_assert(vrdl->granularity && is_power_of_2(vrdl->granularity));
899     g_assert(container->pgsizes &&
900              vrdl->granularity >= 1ULL << ctz64(container->pgsizes));
901 
902     ram_discard_listener_init(&vrdl->listener,
903                               vfio_ram_discard_notify_populate,
904                               vfio_ram_discard_notify_discard, true);
905     ram_discard_manager_register_listener(rdm, &vrdl->listener, section);
906     QLIST_INSERT_HEAD(&container->vrdl_list, vrdl, next);
907 
908     /*
909      * Sanity-check if we have a theoretically problematic setup where we could
910      * exceed the maximum number of possible DMA mappings over time. We assume
911      * that each mapped section in the same address space as a RamDiscardManager
912      * section consumes exactly one DMA mapping, with the exception of
913      * RamDiscardManager sections; i.e., we don't expect to have gIOMMU sections
914      * in the same address space as RamDiscardManager sections.
915      *
916      * We assume that each section in the address space consumes one memslot.
917      * We take the number of KVM memory slots as a best guess for the maximum
918      * number of sections in the address space we could have over time,
919      * also consuming DMA mappings.
920      */
921     if (container->dma_max_mappings) {
922         unsigned int vrdl_count = 0, vrdl_mappings = 0, max_memslots = 512;
923 
924 #ifdef CONFIG_KVM
925         if (kvm_enabled()) {
926             max_memslots = kvm_get_max_memslots();
927         }
928 #endif
929 
930         QLIST_FOREACH(vrdl, &container->vrdl_list, next) {
931             hwaddr start, end;
932 
933             start = QEMU_ALIGN_DOWN(vrdl->offset_within_address_space,
934                                     vrdl->granularity);
935             end = ROUND_UP(vrdl->offset_within_address_space + vrdl->size,
936                            vrdl->granularity);
937             vrdl_mappings += (end - start) / vrdl->granularity;
938             vrdl_count++;
939         }
940 
941         if (vrdl_mappings + max_memslots - vrdl_count >
942             container->dma_max_mappings) {
943             warn_report("%s: possibly running out of DMA mappings. E.g., try"
944                         " increasing the 'block-size' of virtio-mem devies."
945                         " Maximum possible DMA mappings: %d, Maximum possible"
946                         " memslots: %d", __func__, container->dma_max_mappings,
947                         max_memslots);
948         }
949     }
950 }
951 
952 static void vfio_unregister_ram_discard_listener(VFIOContainer *container,
953                                                  MemoryRegionSection *section)
954 {
955     RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
956     VFIORamDiscardListener *vrdl = NULL;
957 
958     QLIST_FOREACH(vrdl, &container->vrdl_list, next) {
959         if (vrdl->mr == section->mr &&
960             vrdl->offset_within_address_space ==
961             section->offset_within_address_space) {
962             break;
963         }
964     }
965 
966     if (!vrdl) {
967         hw_error("vfio: Trying to unregister missing RAM discard listener");
968     }
969 
970     ram_discard_manager_unregister_listener(rdm, &vrdl->listener);
971     QLIST_REMOVE(vrdl, next);
972     g_free(vrdl);
973 }
974 
975 static VFIOHostDMAWindow *vfio_find_hostwin(VFIOContainer *container,
976                                             hwaddr iova, hwaddr end)
977 {
978     VFIOHostDMAWindow *hostwin;
979     bool hostwin_found = false;
980 
981     QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
982         if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
983             hostwin_found = true;
984             break;
985         }
986     }
987 
988     return hostwin_found ? hostwin : NULL;
989 }
990 
991 static bool vfio_known_safe_misalignment(MemoryRegionSection *section)
992 {
993     MemoryRegion *mr = section->mr;
994 
995     if (!TPM_IS_CRB(mr->owner)) {
996         return false;
997     }
998 
999     /* this is a known safe misaligned region, just trace for debug purpose */
1000     trace_vfio_known_safe_misalignment(memory_region_name(mr),
1001                                        section->offset_within_address_space,
1002                                        section->offset_within_region,
1003                                        qemu_real_host_page_size());
1004     return true;
1005 }
1006 
1007 static bool vfio_listener_valid_section(MemoryRegionSection *section,
1008                                         const char *name)
1009 {
1010     if (vfio_listener_skipped_section(section)) {
1011         trace_vfio_listener_region_skip(name,
1012                 section->offset_within_address_space,
1013                 section->offset_within_address_space +
1014                 int128_get64(int128_sub(section->size, int128_one())));
1015         return false;
1016     }
1017 
1018     if (unlikely((section->offset_within_address_space &
1019                   ~qemu_real_host_page_mask()) !=
1020                  (section->offset_within_region & ~qemu_real_host_page_mask()))) {
1021         if (!vfio_known_safe_misalignment(section)) {
1022             error_report("%s received unaligned region %s iova=0x%"PRIx64
1023                          " offset_within_region=0x%"PRIx64
1024                          " qemu_real_host_page_size=0x%"PRIxPTR,
1025                          __func__, memory_region_name(section->mr),
1026                          section->offset_within_address_space,
1027                          section->offset_within_region,
1028                          qemu_real_host_page_size());
1029         }
1030         return false;
1031     }
1032 
1033     return true;
1034 }
1035 
1036 static bool vfio_get_section_iova_range(VFIOContainer *container,
1037                                         MemoryRegionSection *section,
1038                                         hwaddr *out_iova, hwaddr *out_end,
1039                                         Int128 *out_llend)
1040 {
1041     Int128 llend;
1042     hwaddr iova;
1043 
1044     iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space);
1045     llend = int128_make64(section->offset_within_address_space);
1046     llend = int128_add(llend, section->size);
1047     llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask()));
1048 
1049     if (int128_ge(int128_make64(iova), llend)) {
1050         return false;
1051     }
1052 
1053     *out_iova = iova;
1054     *out_end = int128_get64(int128_sub(llend, int128_one()));
1055     if (out_llend) {
1056         *out_llend = llend;
1057     }
1058     return true;
1059 }
1060 
1061 static void vfio_listener_region_add(MemoryListener *listener,
1062                                      MemoryRegionSection *section)
1063 {
1064     VFIOContainer *container = container_of(listener, VFIOContainer, listener);
1065     hwaddr iova, end;
1066     Int128 llend, llsize;
1067     void *vaddr;
1068     int ret;
1069     VFIOHostDMAWindow *hostwin;
1070     Error *err = NULL;
1071 
1072     if (!vfio_listener_valid_section(section, "region_add")) {
1073         return;
1074     }
1075 
1076     if (!vfio_get_section_iova_range(container, section, &iova, &end, &llend)) {
1077         if (memory_region_is_ram_device(section->mr)) {
1078             trace_vfio_listener_region_add_no_dma_map(
1079                 memory_region_name(section->mr),
1080                 section->offset_within_address_space,
1081                 int128_getlo(section->size),
1082                 qemu_real_host_page_size());
1083         }
1084         return;
1085     }
1086 
1087     if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
1088         hwaddr pgsize = 0;
1089 
1090         /* For now intersections are not allowed, we may relax this later */
1091         QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
1092             if (ranges_overlap(hostwin->min_iova,
1093                                hostwin->max_iova - hostwin->min_iova + 1,
1094                                section->offset_within_address_space,
1095                                int128_get64(section->size))) {
1096                 error_setg(&err,
1097                     "region [0x%"PRIx64",0x%"PRIx64"] overlaps with existing"
1098                     "host DMA window [0x%"PRIx64",0x%"PRIx64"]",
1099                     section->offset_within_address_space,
1100                     section->offset_within_address_space +
1101                         int128_get64(section->size) - 1,
1102                     hostwin->min_iova, hostwin->max_iova);
1103                 goto fail;
1104             }
1105         }
1106 
1107         ret = vfio_spapr_create_window(container, section, &pgsize);
1108         if (ret) {
1109             error_setg_errno(&err, -ret, "Failed to create SPAPR window");
1110             goto fail;
1111         }
1112 
1113         vfio_host_win_add(container, section->offset_within_address_space,
1114                           section->offset_within_address_space +
1115                           int128_get64(section->size) - 1, pgsize);
1116 #ifdef CONFIG_KVM
1117         if (kvm_enabled()) {
1118             VFIOGroup *group;
1119             IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
1120             struct kvm_vfio_spapr_tce param;
1121             struct kvm_device_attr attr = {
1122                 .group = KVM_DEV_VFIO_GROUP,
1123                 .attr = KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE,
1124                 .addr = (uint64_t)(unsigned long)&param,
1125             };
1126 
1127             if (!memory_region_iommu_get_attr(iommu_mr, IOMMU_ATTR_SPAPR_TCE_FD,
1128                                               &param.tablefd)) {
1129                 QLIST_FOREACH(group, &container->group_list, container_next) {
1130                     param.groupfd = group->fd;
1131                     if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
1132                         error_report("vfio: failed to setup fd %d "
1133                                      "for a group with fd %d: %s",
1134                                      param.tablefd, param.groupfd,
1135                                      strerror(errno));
1136                         return;
1137                     }
1138                     trace_vfio_spapr_group_attach(param.groupfd, param.tablefd);
1139                 }
1140             }
1141         }
1142 #endif
1143     }
1144 
1145     hostwin = vfio_find_hostwin(container, iova, end);
1146     if (!hostwin) {
1147         error_setg(&err, "Container %p can't map guest IOVA region"
1148                    " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, container, iova, end);
1149         goto fail;
1150     }
1151 
1152     memory_region_ref(section->mr);
1153 
1154     if (memory_region_is_iommu(section->mr)) {
1155         VFIOGuestIOMMU *giommu;
1156         IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
1157         int iommu_idx;
1158 
1159         trace_vfio_listener_region_add_iommu(iova, end);
1160         /*
1161          * FIXME: For VFIO iommu types which have KVM acceleration to
1162          * avoid bouncing all map/unmaps through qemu this way, this
1163          * would be the right place to wire that up (tell the KVM
1164          * device emulation the VFIO iommu handles to use).
1165          */
1166         giommu = g_malloc0(sizeof(*giommu));
1167         giommu->iommu_mr = iommu_mr;
1168         giommu->iommu_offset = section->offset_within_address_space -
1169                                section->offset_within_region;
1170         giommu->container = container;
1171         llend = int128_add(int128_make64(section->offset_within_region),
1172                            section->size);
1173         llend = int128_sub(llend, int128_one());
1174         iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
1175                                                        MEMTXATTRS_UNSPECIFIED);
1176         iommu_notifier_init(&giommu->n, vfio_iommu_map_notify,
1177                             IOMMU_NOTIFIER_IOTLB_EVENTS,
1178                             section->offset_within_region,
1179                             int128_get64(llend),
1180                             iommu_idx);
1181 
1182         ret = memory_region_iommu_set_page_size_mask(giommu->iommu_mr,
1183                                                      container->pgsizes,
1184                                                      &err);
1185         if (ret) {
1186             g_free(giommu);
1187             goto fail;
1188         }
1189 
1190         ret = memory_region_register_iommu_notifier(section->mr, &giommu->n,
1191                                                     &err);
1192         if (ret) {
1193             g_free(giommu);
1194             goto fail;
1195         }
1196         QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
1197         memory_region_iommu_replay(giommu->iommu_mr, &giommu->n);
1198 
1199         return;
1200     }
1201 
1202     /* Here we assume that memory_region_is_ram(section->mr)==true */
1203 
1204     /*
1205      * For RAM memory regions with a RamDiscardManager, we only want to map the
1206      * actually populated parts - and update the mapping whenever we're notified
1207      * about changes.
1208      */
1209     if (memory_region_has_ram_discard_manager(section->mr)) {
1210         vfio_register_ram_discard_listener(container, section);
1211         return;
1212     }
1213 
1214     vaddr = memory_region_get_ram_ptr(section->mr) +
1215             section->offset_within_region +
1216             (iova - section->offset_within_address_space);
1217 
1218     trace_vfio_listener_region_add_ram(iova, end, vaddr);
1219 
1220     llsize = int128_sub(llend, int128_make64(iova));
1221 
1222     if (memory_region_is_ram_device(section->mr)) {
1223         hwaddr pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
1224 
1225         if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) {
1226             trace_vfio_listener_region_add_no_dma_map(
1227                 memory_region_name(section->mr),
1228                 section->offset_within_address_space,
1229                 int128_getlo(section->size),
1230                 pgmask + 1);
1231             return;
1232         }
1233     }
1234 
1235     ret = vfio_dma_map(container, iova, int128_get64(llsize),
1236                        vaddr, section->readonly);
1237     if (ret) {
1238         error_setg(&err, "vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
1239                    "0x%"HWADDR_PRIx", %p) = %d (%s)",
1240                    container, iova, int128_get64(llsize), vaddr, ret,
1241                    strerror(-ret));
1242         if (memory_region_is_ram_device(section->mr)) {
1243             /* Allow unexpected mappings not to be fatal for RAM devices */
1244             error_report_err(err);
1245             return;
1246         }
1247         goto fail;
1248     }
1249 
1250     return;
1251 
1252 fail:
1253     if (memory_region_is_ram_device(section->mr)) {
1254         error_report("failed to vfio_dma_map. pci p2p may not work");
1255         return;
1256     }
1257     /*
1258      * On the initfn path, store the first error in the container so we
1259      * can gracefully fail.  Runtime, there's not much we can do other
1260      * than throw a hardware error.
1261      */
1262     if (!container->initialized) {
1263         if (!container->error) {
1264             error_propagate_prepend(&container->error, err,
1265                                     "Region %s: ",
1266                                     memory_region_name(section->mr));
1267         } else {
1268             error_free(err);
1269         }
1270     } else {
1271         error_report_err(err);
1272         hw_error("vfio: DMA mapping failed, unable to continue");
1273     }
1274 }
1275 
1276 static void vfio_listener_region_del(MemoryListener *listener,
1277                                      MemoryRegionSection *section)
1278 {
1279     VFIOContainer *container = container_of(listener, VFIOContainer, listener);
1280     hwaddr iova, end;
1281     Int128 llend, llsize;
1282     int ret;
1283     bool try_unmap = true;
1284 
1285     if (!vfio_listener_valid_section(section, "region_del")) {
1286         return;
1287     }
1288 
1289     if (memory_region_is_iommu(section->mr)) {
1290         VFIOGuestIOMMU *giommu;
1291 
1292         QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
1293             if (MEMORY_REGION(giommu->iommu_mr) == section->mr &&
1294                 giommu->n.start == section->offset_within_region) {
1295                 memory_region_unregister_iommu_notifier(section->mr,
1296                                                         &giommu->n);
1297                 QLIST_REMOVE(giommu, giommu_next);
1298                 g_free(giommu);
1299                 break;
1300             }
1301         }
1302 
1303         /*
1304          * FIXME: We assume the one big unmap below is adequate to
1305          * remove any individual page mappings in the IOMMU which
1306          * might have been copied into VFIO. This works for a page table
1307          * based IOMMU where a big unmap flattens a large range of IO-PTEs.
1308          * That may not be true for all IOMMU types.
1309          */
1310     }
1311 
1312     if (!vfio_get_section_iova_range(container, section, &iova, &end, &llend)) {
1313         return;
1314     }
1315 
1316     llsize = int128_sub(llend, int128_make64(iova));
1317 
1318     trace_vfio_listener_region_del(iova, end);
1319 
1320     if (memory_region_is_ram_device(section->mr)) {
1321         hwaddr pgmask;
1322         VFIOHostDMAWindow *hostwin;
1323 
1324         hostwin = vfio_find_hostwin(container, iova, end);
1325         assert(hostwin); /* or region_add() would have failed */
1326 
1327         pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
1328         try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask));
1329     } else if (memory_region_has_ram_discard_manager(section->mr)) {
1330         vfio_unregister_ram_discard_listener(container, section);
1331         /* Unregistering will trigger an unmap. */
1332         try_unmap = false;
1333     }
1334 
1335     if (try_unmap) {
1336         if (int128_eq(llsize, int128_2_64())) {
1337             /* The unmap ioctl doesn't accept a full 64-bit span. */
1338             llsize = int128_rshift(llsize, 1);
1339             ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL);
1340             if (ret) {
1341                 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
1342                              "0x%"HWADDR_PRIx") = %d (%s)",
1343                              container, iova, int128_get64(llsize), ret,
1344                              strerror(-ret));
1345             }
1346             iova += int128_get64(llsize);
1347         }
1348         ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL);
1349         if (ret) {
1350             error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
1351                          "0x%"HWADDR_PRIx") = %d (%s)",
1352                          container, iova, int128_get64(llsize), ret,
1353                          strerror(-ret));
1354         }
1355     }
1356 
1357     memory_region_unref(section->mr);
1358 
1359     if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
1360         vfio_spapr_remove_window(container,
1361                                  section->offset_within_address_space);
1362         if (vfio_host_win_del(container,
1363                               section->offset_within_address_space,
1364                               section->offset_within_address_space +
1365                               int128_get64(section->size) - 1) < 0) {
1366             hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx,
1367                      __func__, section->offset_within_address_space);
1368         }
1369     }
1370 }
1371 
1372 static int vfio_set_dirty_page_tracking(VFIOContainer *container, bool start)
1373 {
1374     int ret;
1375     struct vfio_iommu_type1_dirty_bitmap dirty = {
1376         .argsz = sizeof(dirty),
1377     };
1378 
1379     if (!container->dirty_pages_supported) {
1380         return 0;
1381     }
1382 
1383     if (start) {
1384         dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_START;
1385     } else {
1386         dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP;
1387     }
1388 
1389     ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, &dirty);
1390     if (ret) {
1391         ret = -errno;
1392         error_report("Failed to set dirty tracking flag 0x%x errno: %d",
1393                      dirty.flags, errno);
1394     }
1395 
1396     return ret;
1397 }
1398 
1399 typedef struct VFIODirtyRanges {
1400     hwaddr min32;
1401     hwaddr max32;
1402     hwaddr min64;
1403     hwaddr max64;
1404     hwaddr minpci64;
1405     hwaddr maxpci64;
1406 } VFIODirtyRanges;
1407 
1408 typedef struct VFIODirtyRangesListener {
1409     VFIOContainer *container;
1410     VFIODirtyRanges ranges;
1411     MemoryListener listener;
1412 } VFIODirtyRangesListener;
1413 
1414 static bool vfio_section_is_vfio_pci(MemoryRegionSection *section,
1415                                      VFIOContainer *container)
1416 {
1417     VFIOPCIDevice *pcidev;
1418     VFIODevice *vbasedev;
1419     VFIOGroup *group;
1420     Object *owner;
1421 
1422     owner = memory_region_owner(section->mr);
1423 
1424     QLIST_FOREACH(group, &container->group_list, container_next) {
1425         QLIST_FOREACH(vbasedev, &group->device_list, next) {
1426             if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
1427                 continue;
1428             }
1429             pcidev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
1430             if (OBJECT(pcidev) == owner) {
1431                 return true;
1432             }
1433         }
1434     }
1435 
1436     return false;
1437 }
1438 
1439 static void vfio_dirty_tracking_update(MemoryListener *listener,
1440                                        MemoryRegionSection *section)
1441 {
1442     VFIODirtyRangesListener *dirty = container_of(listener,
1443                                                   VFIODirtyRangesListener,
1444                                                   listener);
1445     VFIODirtyRanges *range = &dirty->ranges;
1446     hwaddr iova, end, *min, *max;
1447 
1448     if (!vfio_listener_valid_section(section, "tracking_update") ||
1449         !vfio_get_section_iova_range(dirty->container, section,
1450                                      &iova, &end, NULL)) {
1451         return;
1452     }
1453 
1454     /*
1455      * The address space passed to the dirty tracker is reduced to three ranges:
1456      * one for 32-bit DMA ranges, one for 64-bit DMA ranges and one for the
1457      * PCI 64-bit hole.
1458      *
1459      * The underlying reports of dirty will query a sub-interval of each of
1460      * these ranges.
1461      *
1462      * The purpose of the three range handling is to handle known cases of big
1463      * holes in the address space, like the x86 AMD 1T hole, and firmware (like
1464      * OVMF) which may relocate the pci-hole64 to the end of the address space.
1465      * The latter would otherwise generate large ranges for tracking, stressing
1466      * the limits of supported hardware. The pci-hole32 will always be below 4G
1467      * (overlapping or not) so it doesn't need special handling and is part of
1468      * the 32-bit range.
1469      *
1470      * The alternative would be an IOVATree but that has a much bigger runtime
1471      * overhead and unnecessary complexity.
1472      */
1473     if (vfio_section_is_vfio_pci(section, dirty->container) &&
1474         iova >= UINT32_MAX) {
1475         min = &range->minpci64;
1476         max = &range->maxpci64;
1477     } else {
1478         min = (end <= UINT32_MAX) ? &range->min32 : &range->min64;
1479         max = (end <= UINT32_MAX) ? &range->max32 : &range->max64;
1480     }
1481     if (*min > iova) {
1482         *min = iova;
1483     }
1484     if (*max < end) {
1485         *max = end;
1486     }
1487 
1488     trace_vfio_device_dirty_tracking_update(iova, end, *min, *max);
1489     return;
1490 }
1491 
1492 static const MemoryListener vfio_dirty_tracking_listener = {
1493     .name = "vfio-tracking",
1494     .region_add = vfio_dirty_tracking_update,
1495 };
1496 
1497 static void vfio_dirty_tracking_init(VFIOContainer *container,
1498                                      VFIODirtyRanges *ranges)
1499 {
1500     VFIODirtyRangesListener dirty;
1501 
1502     memset(&dirty, 0, sizeof(dirty));
1503     dirty.ranges.min32 = UINT32_MAX;
1504     dirty.ranges.min64 = UINT64_MAX;
1505     dirty.ranges.minpci64 = UINT64_MAX;
1506     dirty.listener = vfio_dirty_tracking_listener;
1507     dirty.container = container;
1508 
1509     memory_listener_register(&dirty.listener,
1510                              container->space->as);
1511 
1512     *ranges = dirty.ranges;
1513 
1514     /*
1515      * The memory listener is synchronous, and used to calculate the range
1516      * to dirty tracking. Unregister it after we are done as we are not
1517      * interested in any follow-up updates.
1518      */
1519     memory_listener_unregister(&dirty.listener);
1520 }
1521 
1522 static void vfio_devices_dma_logging_stop(VFIOContainer *container)
1523 {
1524     uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature),
1525                               sizeof(uint64_t))] = {};
1526     struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
1527     VFIODevice *vbasedev;
1528     VFIOGroup *group;
1529 
1530     feature->argsz = sizeof(buf);
1531     feature->flags = VFIO_DEVICE_FEATURE_SET |
1532                      VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP;
1533 
1534     QLIST_FOREACH(group, &container->group_list, container_next) {
1535         QLIST_FOREACH(vbasedev, &group->device_list, next) {
1536             if (!vbasedev->dirty_tracking) {
1537                 continue;
1538             }
1539 
1540             if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
1541                 warn_report("%s: Failed to stop DMA logging, err %d (%s)",
1542                              vbasedev->name, -errno, strerror(errno));
1543             }
1544             vbasedev->dirty_tracking = false;
1545         }
1546     }
1547 }
1548 
1549 static struct vfio_device_feature *
1550 vfio_device_feature_dma_logging_start_create(VFIOContainer *container,
1551                                              VFIODirtyRanges *tracking)
1552 {
1553     struct vfio_device_feature *feature;
1554     size_t feature_size;
1555     struct vfio_device_feature_dma_logging_control *control;
1556     struct vfio_device_feature_dma_logging_range *ranges;
1557 
1558     feature_size = sizeof(struct vfio_device_feature) +
1559                    sizeof(struct vfio_device_feature_dma_logging_control);
1560     feature = g_try_malloc0(feature_size);
1561     if (!feature) {
1562         errno = ENOMEM;
1563         return NULL;
1564     }
1565     feature->argsz = feature_size;
1566     feature->flags = VFIO_DEVICE_FEATURE_SET |
1567                      VFIO_DEVICE_FEATURE_DMA_LOGGING_START;
1568 
1569     control = (struct vfio_device_feature_dma_logging_control *)feature->data;
1570     control->page_size = qemu_real_host_page_size();
1571 
1572     /*
1573      * DMA logging uAPI guarantees to support at least a number of ranges that
1574      * fits into a single host kernel base page.
1575      */
1576     control->num_ranges = !!tracking->max32 + !!tracking->max64 +
1577         !!tracking->maxpci64;
1578     ranges = g_try_new0(struct vfio_device_feature_dma_logging_range,
1579                         control->num_ranges);
1580     if (!ranges) {
1581         g_free(feature);
1582         errno = ENOMEM;
1583 
1584         return NULL;
1585     }
1586 
1587     control->ranges = (__u64)(uintptr_t)ranges;
1588     if (tracking->max32) {
1589         ranges->iova = tracking->min32;
1590         ranges->length = (tracking->max32 - tracking->min32) + 1;
1591         ranges++;
1592     }
1593     if (tracking->max64) {
1594         ranges->iova = tracking->min64;
1595         ranges->length = (tracking->max64 - tracking->min64) + 1;
1596         ranges++;
1597     }
1598     if (tracking->maxpci64) {
1599         ranges->iova = tracking->minpci64;
1600         ranges->length = (tracking->maxpci64 - tracking->minpci64) + 1;
1601     }
1602 
1603     trace_vfio_device_dirty_tracking_start(control->num_ranges,
1604                                            tracking->min32, tracking->max32,
1605                                            tracking->min64, tracking->max64,
1606                                            tracking->minpci64, tracking->maxpci64);
1607 
1608     return feature;
1609 }
1610 
1611 static void vfio_device_feature_dma_logging_start_destroy(
1612     struct vfio_device_feature *feature)
1613 {
1614     struct vfio_device_feature_dma_logging_control *control =
1615         (struct vfio_device_feature_dma_logging_control *)feature->data;
1616     struct vfio_device_feature_dma_logging_range *ranges =
1617         (struct vfio_device_feature_dma_logging_range *)(uintptr_t)control->ranges;
1618 
1619     g_free(ranges);
1620     g_free(feature);
1621 }
1622 
1623 static int vfio_devices_dma_logging_start(VFIOContainer *container)
1624 {
1625     struct vfio_device_feature *feature;
1626     VFIODirtyRanges ranges;
1627     VFIODevice *vbasedev;
1628     VFIOGroup *group;
1629     int ret = 0;
1630 
1631     vfio_dirty_tracking_init(container, &ranges);
1632     feature = vfio_device_feature_dma_logging_start_create(container,
1633                                                            &ranges);
1634     if (!feature) {
1635         return -errno;
1636     }
1637 
1638     QLIST_FOREACH(group, &container->group_list, container_next) {
1639         QLIST_FOREACH(vbasedev, &group->device_list, next) {
1640             if (vbasedev->dirty_tracking) {
1641                 continue;
1642             }
1643 
1644             ret = ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature);
1645             if (ret) {
1646                 ret = -errno;
1647                 error_report("%s: Failed to start DMA logging, err %d (%s)",
1648                              vbasedev->name, ret, strerror(errno));
1649                 goto out;
1650             }
1651             vbasedev->dirty_tracking = true;
1652         }
1653     }
1654 
1655 out:
1656     if (ret) {
1657         vfio_devices_dma_logging_stop(container);
1658     }
1659 
1660     vfio_device_feature_dma_logging_start_destroy(feature);
1661 
1662     return ret;
1663 }
1664 
1665 static void vfio_listener_log_global_start(MemoryListener *listener)
1666 {
1667     VFIOContainer *container = container_of(listener, VFIOContainer, listener);
1668     int ret;
1669 
1670     if (vfio_devices_all_device_dirty_tracking(container)) {
1671         ret = vfio_devices_dma_logging_start(container);
1672     } else {
1673         ret = vfio_set_dirty_page_tracking(container, true);
1674     }
1675 
1676     if (ret) {
1677         error_report("vfio: Could not start dirty page tracking, err: %d (%s)",
1678                      ret, strerror(-ret));
1679         vfio_set_migration_error(ret);
1680     }
1681 }
1682 
1683 static void vfio_listener_log_global_stop(MemoryListener *listener)
1684 {
1685     VFIOContainer *container = container_of(listener, VFIOContainer, listener);
1686     int ret = 0;
1687 
1688     if (vfio_devices_all_device_dirty_tracking(container)) {
1689         vfio_devices_dma_logging_stop(container);
1690     } else {
1691         ret = vfio_set_dirty_page_tracking(container, false);
1692     }
1693 
1694     if (ret) {
1695         error_report("vfio: Could not stop dirty page tracking, err: %d (%s)",
1696                      ret, strerror(-ret));
1697         vfio_set_migration_error(ret);
1698     }
1699 }
1700 
1701 static int vfio_device_dma_logging_report(VFIODevice *vbasedev, hwaddr iova,
1702                                           hwaddr size, void *bitmap)
1703 {
1704     uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) +
1705                         sizeof(struct vfio_device_feature_dma_logging_report),
1706                         sizeof(__u64))] = {};
1707     struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
1708     struct vfio_device_feature_dma_logging_report *report =
1709         (struct vfio_device_feature_dma_logging_report *)feature->data;
1710 
1711     report->iova = iova;
1712     report->length = size;
1713     report->page_size = qemu_real_host_page_size();
1714     report->bitmap = (__u64)(uintptr_t)bitmap;
1715 
1716     feature->argsz = sizeof(buf);
1717     feature->flags = VFIO_DEVICE_FEATURE_GET |
1718                      VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT;
1719 
1720     if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
1721         return -errno;
1722     }
1723 
1724     return 0;
1725 }
1726 
1727 static int vfio_devices_query_dirty_bitmap(VFIOContainer *container,
1728                                            VFIOBitmap *vbmap, hwaddr iova,
1729                                            hwaddr size)
1730 {
1731     VFIODevice *vbasedev;
1732     VFIOGroup *group;
1733     int ret;
1734 
1735     QLIST_FOREACH(group, &container->group_list, container_next) {
1736         QLIST_FOREACH(vbasedev, &group->device_list, next) {
1737             ret = vfio_device_dma_logging_report(vbasedev, iova, size,
1738                                                  vbmap->bitmap);
1739             if (ret) {
1740                 error_report("%s: Failed to get DMA logging report, iova: "
1741                              "0x%" HWADDR_PRIx ", size: 0x%" HWADDR_PRIx
1742                              ", err: %d (%s)",
1743                              vbasedev->name, iova, size, ret, strerror(-ret));
1744 
1745                 return ret;
1746             }
1747         }
1748     }
1749 
1750     return 0;
1751 }
1752 
1753 static int vfio_query_dirty_bitmap(VFIOContainer *container, VFIOBitmap *vbmap,
1754                                    hwaddr iova, hwaddr size)
1755 {
1756     struct vfio_iommu_type1_dirty_bitmap *dbitmap;
1757     struct vfio_iommu_type1_dirty_bitmap_get *range;
1758     int ret;
1759 
1760     dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range));
1761 
1762     dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range);
1763     dbitmap->flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
1764     range = (struct vfio_iommu_type1_dirty_bitmap_get *)&dbitmap->data;
1765     range->iova = iova;
1766     range->size = size;
1767 
1768     /*
1769      * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
1770      * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize
1771      * to qemu_real_host_page_size.
1772      */
1773     range->bitmap.pgsize = qemu_real_host_page_size();
1774     range->bitmap.size = vbmap->size;
1775     range->bitmap.data = (__u64 *)vbmap->bitmap;
1776 
1777     ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap);
1778     if (ret) {
1779         ret = -errno;
1780         error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64
1781                 " size: 0x%"PRIx64" err: %d", (uint64_t)range->iova,
1782                 (uint64_t)range->size, errno);
1783     }
1784 
1785     g_free(dbitmap);
1786 
1787     return ret;
1788 }
1789 
1790 static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
1791                                  uint64_t size, ram_addr_t ram_addr)
1792 {
1793     bool all_device_dirty_tracking =
1794         vfio_devices_all_device_dirty_tracking(container);
1795     uint64_t dirty_pages;
1796     VFIOBitmap vbmap;
1797     int ret;
1798 
1799     if (!container->dirty_pages_supported && !all_device_dirty_tracking) {
1800         cpu_physical_memory_set_dirty_range(ram_addr, size,
1801                                             tcg_enabled() ? DIRTY_CLIENTS_ALL :
1802                                             DIRTY_CLIENTS_NOCODE);
1803         return 0;
1804     }
1805 
1806     ret = vfio_bitmap_alloc(&vbmap, size);
1807     if (ret) {
1808         return ret;
1809     }
1810 
1811     if (all_device_dirty_tracking) {
1812         ret = vfio_devices_query_dirty_bitmap(container, &vbmap, iova, size);
1813     } else {
1814         ret = vfio_query_dirty_bitmap(container, &vbmap, iova, size);
1815     }
1816 
1817     if (ret) {
1818         goto out;
1819     }
1820 
1821     dirty_pages = cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr,
1822                                                          vbmap.pages);
1823 
1824     trace_vfio_get_dirty_bitmap(container->fd, iova, size, vbmap.size,
1825                                 ram_addr, dirty_pages);
1826 out:
1827     g_free(vbmap.bitmap);
1828 
1829     return ret;
1830 }
1831 
1832 typedef struct {
1833     IOMMUNotifier n;
1834     VFIOGuestIOMMU *giommu;
1835 } vfio_giommu_dirty_notifier;
1836 
1837 static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
1838 {
1839     vfio_giommu_dirty_notifier *gdn = container_of(n,
1840                                                 vfio_giommu_dirty_notifier, n);
1841     VFIOGuestIOMMU *giommu = gdn->giommu;
1842     VFIOContainer *container = giommu->container;
1843     hwaddr iova = iotlb->iova + giommu->iommu_offset;
1844     ram_addr_t translated_addr;
1845     int ret = -EINVAL;
1846 
1847     trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask);
1848 
1849     if (iotlb->target_as != &address_space_memory) {
1850         error_report("Wrong target AS \"%s\", only system memory is allowed",
1851                      iotlb->target_as->name ? iotlb->target_as->name : "none");
1852         goto out;
1853     }
1854 
1855     rcu_read_lock();
1856     if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) {
1857         ret = vfio_get_dirty_bitmap(container, iova, iotlb->addr_mask + 1,
1858                                     translated_addr);
1859         if (ret) {
1860             error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", "
1861                          "0x%"HWADDR_PRIx") = %d (%s)",
1862                          container, iova, iotlb->addr_mask + 1, ret,
1863                          strerror(-ret));
1864         }
1865     }
1866     rcu_read_unlock();
1867 
1868 out:
1869     if (ret) {
1870         vfio_set_migration_error(ret);
1871     }
1872 }
1873 
1874 static int vfio_ram_discard_get_dirty_bitmap(MemoryRegionSection *section,
1875                                              void *opaque)
1876 {
1877     const hwaddr size = int128_get64(section->size);
1878     const hwaddr iova = section->offset_within_address_space;
1879     const ram_addr_t ram_addr = memory_region_get_ram_addr(section->mr) +
1880                                 section->offset_within_region;
1881     VFIORamDiscardListener *vrdl = opaque;
1882 
1883     /*
1884      * Sync the whole mapped region (spanning multiple individual mappings)
1885      * in one go.
1886      */
1887     return vfio_get_dirty_bitmap(vrdl->container, iova, size, ram_addr);
1888 }
1889 
1890 static int vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainer *container,
1891                                                    MemoryRegionSection *section)
1892 {
1893     RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
1894     VFIORamDiscardListener *vrdl = NULL;
1895 
1896     QLIST_FOREACH(vrdl, &container->vrdl_list, next) {
1897         if (vrdl->mr == section->mr &&
1898             vrdl->offset_within_address_space ==
1899             section->offset_within_address_space) {
1900             break;
1901         }
1902     }
1903 
1904     if (!vrdl) {
1905         hw_error("vfio: Trying to sync missing RAM discard listener");
1906     }
1907 
1908     /*
1909      * We only want/can synchronize the bitmap for actually mapped parts -
1910      * which correspond to populated parts. Replay all populated parts.
1911      */
1912     return ram_discard_manager_replay_populated(rdm, section,
1913                                               vfio_ram_discard_get_dirty_bitmap,
1914                                                 &vrdl);
1915 }
1916 
1917 static int vfio_sync_dirty_bitmap(VFIOContainer *container,
1918                                   MemoryRegionSection *section)
1919 {
1920     ram_addr_t ram_addr;
1921 
1922     if (memory_region_is_iommu(section->mr)) {
1923         VFIOGuestIOMMU *giommu;
1924 
1925         QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
1926             if (MEMORY_REGION(giommu->iommu_mr) == section->mr &&
1927                 giommu->n.start == section->offset_within_region) {
1928                 Int128 llend;
1929                 vfio_giommu_dirty_notifier gdn = { .giommu = giommu };
1930                 int idx = memory_region_iommu_attrs_to_index(giommu->iommu_mr,
1931                                                        MEMTXATTRS_UNSPECIFIED);
1932 
1933                 llend = int128_add(int128_make64(section->offset_within_region),
1934                                    section->size);
1935                 llend = int128_sub(llend, int128_one());
1936 
1937                 iommu_notifier_init(&gdn.n,
1938                                     vfio_iommu_map_dirty_notify,
1939                                     IOMMU_NOTIFIER_MAP,
1940                                     section->offset_within_region,
1941                                     int128_get64(llend),
1942                                     idx);
1943                 memory_region_iommu_replay(giommu->iommu_mr, &gdn.n);
1944                 break;
1945             }
1946         }
1947         return 0;
1948     } else if (memory_region_has_ram_discard_manager(section->mr)) {
1949         return vfio_sync_ram_discard_listener_dirty_bitmap(container, section);
1950     }
1951 
1952     ram_addr = memory_region_get_ram_addr(section->mr) +
1953                section->offset_within_region;
1954 
1955     return vfio_get_dirty_bitmap(container,
1956                    REAL_HOST_PAGE_ALIGN(section->offset_within_address_space),
1957                    int128_get64(section->size), ram_addr);
1958 }
1959 
1960 static void vfio_listener_log_sync(MemoryListener *listener,
1961         MemoryRegionSection *section)
1962 {
1963     VFIOContainer *container = container_of(listener, VFIOContainer, listener);
1964     int ret;
1965 
1966     if (vfio_listener_skipped_section(section)) {
1967         return;
1968     }
1969 
1970     if (vfio_devices_all_dirty_tracking(container)) {
1971         ret = vfio_sync_dirty_bitmap(container, section);
1972         if (ret) {
1973             error_report("vfio: Failed to sync dirty bitmap, err: %d (%s)", ret,
1974                          strerror(-ret));
1975             vfio_set_migration_error(ret);
1976         }
1977     }
1978 }
1979 
1980 static const MemoryListener vfio_memory_listener = {
1981     .name = "vfio",
1982     .region_add = vfio_listener_region_add,
1983     .region_del = vfio_listener_region_del,
1984     .log_global_start = vfio_listener_log_global_start,
1985     .log_global_stop = vfio_listener_log_global_stop,
1986     .log_sync = vfio_listener_log_sync,
1987 };
1988 
1989 static void vfio_listener_release(VFIOContainer *container)
1990 {
1991     memory_listener_unregister(&container->listener);
1992     if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
1993         memory_listener_unregister(&container->prereg_listener);
1994     }
1995 }
1996 
1997 static struct vfio_info_cap_header *
1998 vfio_get_cap(void *ptr, uint32_t cap_offset, uint16_t id)
1999 {
2000     struct vfio_info_cap_header *hdr;
2001 
2002     for (hdr = ptr + cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
2003         if (hdr->id == id) {
2004             return hdr;
2005         }
2006     }
2007 
2008     return NULL;
2009 }
2010 
2011 struct vfio_info_cap_header *
2012 vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id)
2013 {
2014     if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) {
2015         return NULL;
2016     }
2017 
2018     return vfio_get_cap((void *)info, info->cap_offset, id);
2019 }
2020 
2021 static struct vfio_info_cap_header *
2022 vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info *info, uint16_t id)
2023 {
2024     if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) {
2025         return NULL;
2026     }
2027 
2028     return vfio_get_cap((void *)info, info->cap_offset, id);
2029 }
2030 
2031 struct vfio_info_cap_header *
2032 vfio_get_device_info_cap(struct vfio_device_info *info, uint16_t id)
2033 {
2034     if (!(info->flags & VFIO_DEVICE_FLAGS_CAPS)) {
2035         return NULL;
2036     }
2037 
2038     return vfio_get_cap((void *)info, info->cap_offset, id);
2039 }
2040 
2041 bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info,
2042                              unsigned int *avail)
2043 {
2044     struct vfio_info_cap_header *hdr;
2045     struct vfio_iommu_type1_info_dma_avail *cap;
2046 
2047     /* If the capability cannot be found, assume no DMA limiting */
2048     hdr = vfio_get_iommu_type1_info_cap(info,
2049                                         VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL);
2050     if (hdr == NULL) {
2051         return false;
2052     }
2053 
2054     if (avail != NULL) {
2055         cap = (void *) hdr;
2056         *avail = cap->avail;
2057     }
2058 
2059     return true;
2060 }
2061 
2062 static int vfio_setup_region_sparse_mmaps(VFIORegion *region,
2063                                           struct vfio_region_info *info)
2064 {
2065     struct vfio_info_cap_header *hdr;
2066     struct vfio_region_info_cap_sparse_mmap *sparse;
2067     int i, j;
2068 
2069     hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP);
2070     if (!hdr) {
2071         return -ENODEV;
2072     }
2073 
2074     sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header);
2075 
2076     trace_vfio_region_sparse_mmap_header(region->vbasedev->name,
2077                                          region->nr, sparse->nr_areas);
2078 
2079     region->mmaps = g_new0(VFIOMmap, sparse->nr_areas);
2080 
2081     for (i = 0, j = 0; i < sparse->nr_areas; i++) {
2082         if (sparse->areas[i].size) {
2083             trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset,
2084                                             sparse->areas[i].offset +
2085                                             sparse->areas[i].size - 1);
2086             region->mmaps[j].offset = sparse->areas[i].offset;
2087             region->mmaps[j].size = sparse->areas[i].size;
2088             j++;
2089         }
2090     }
2091 
2092     region->nr_mmaps = j;
2093     region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap));
2094 
2095     return 0;
2096 }
2097 
2098 int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
2099                       int index, const char *name)
2100 {
2101     struct vfio_region_info *info;
2102     int ret;
2103 
2104     ret = vfio_get_region_info(vbasedev, index, &info);
2105     if (ret) {
2106         return ret;
2107     }
2108 
2109     region->vbasedev = vbasedev;
2110     region->flags = info->flags;
2111     region->size = info->size;
2112     region->fd_offset = info->offset;
2113     region->nr = index;
2114 
2115     if (region->size) {
2116         region->mem = g_new0(MemoryRegion, 1);
2117         memory_region_init_io(region->mem, obj, &vfio_region_ops,
2118                               region, name, region->size);
2119 
2120         if (!vbasedev->no_mmap &&
2121             region->flags & VFIO_REGION_INFO_FLAG_MMAP) {
2122 
2123             ret = vfio_setup_region_sparse_mmaps(region, info);
2124 
2125             if (ret) {
2126                 region->nr_mmaps = 1;
2127                 region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
2128                 region->mmaps[0].offset = 0;
2129                 region->mmaps[0].size = region->size;
2130             }
2131         }
2132     }
2133 
2134     g_free(info);
2135 
2136     trace_vfio_region_setup(vbasedev->name, index, name,
2137                             region->flags, region->fd_offset, region->size);
2138     return 0;
2139 }
2140 
2141 static void vfio_subregion_unmap(VFIORegion *region, int index)
2142 {
2143     trace_vfio_region_unmap(memory_region_name(&region->mmaps[index].mem),
2144                             region->mmaps[index].offset,
2145                             region->mmaps[index].offset +
2146                             region->mmaps[index].size - 1);
2147     memory_region_del_subregion(region->mem, &region->mmaps[index].mem);
2148     munmap(region->mmaps[index].mmap, region->mmaps[index].size);
2149     object_unparent(OBJECT(&region->mmaps[index].mem));
2150     region->mmaps[index].mmap = NULL;
2151 }
2152 
2153 int vfio_region_mmap(VFIORegion *region)
2154 {
2155     int i, prot = 0;
2156     char *name;
2157 
2158     if (!region->mem) {
2159         return 0;
2160     }
2161 
2162     prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0;
2163     prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0;
2164 
2165     for (i = 0; i < region->nr_mmaps; i++) {
2166         region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot,
2167                                      MAP_SHARED, region->vbasedev->fd,
2168                                      region->fd_offset +
2169                                      region->mmaps[i].offset);
2170         if (region->mmaps[i].mmap == MAP_FAILED) {
2171             int ret = -errno;
2172 
2173             trace_vfio_region_mmap_fault(memory_region_name(region->mem), i,
2174                                          region->fd_offset +
2175                                          region->mmaps[i].offset,
2176                                          region->fd_offset +
2177                                          region->mmaps[i].offset +
2178                                          region->mmaps[i].size - 1, ret);
2179 
2180             region->mmaps[i].mmap = NULL;
2181 
2182             for (i--; i >= 0; i--) {
2183                 vfio_subregion_unmap(region, i);
2184             }
2185 
2186             return ret;
2187         }
2188 
2189         name = g_strdup_printf("%s mmaps[%d]",
2190                                memory_region_name(region->mem), i);
2191         memory_region_init_ram_device_ptr(&region->mmaps[i].mem,
2192                                           memory_region_owner(region->mem),
2193                                           name, region->mmaps[i].size,
2194                                           region->mmaps[i].mmap);
2195         g_free(name);
2196         memory_region_add_subregion(region->mem, region->mmaps[i].offset,
2197                                     &region->mmaps[i].mem);
2198 
2199         trace_vfio_region_mmap(memory_region_name(&region->mmaps[i].mem),
2200                                region->mmaps[i].offset,
2201                                region->mmaps[i].offset +
2202                                region->mmaps[i].size - 1);
2203     }
2204 
2205     return 0;
2206 }
2207 
2208 void vfio_region_unmap(VFIORegion *region)
2209 {
2210     int i;
2211 
2212     if (!region->mem) {
2213         return;
2214     }
2215 
2216     for (i = 0; i < region->nr_mmaps; i++) {
2217         if (region->mmaps[i].mmap) {
2218             vfio_subregion_unmap(region, i);
2219         }
2220     }
2221 }
2222 
2223 void vfio_region_exit(VFIORegion *region)
2224 {
2225     int i;
2226 
2227     if (!region->mem) {
2228         return;
2229     }
2230 
2231     for (i = 0; i < region->nr_mmaps; i++) {
2232         if (region->mmaps[i].mmap) {
2233             memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
2234         }
2235     }
2236 
2237     trace_vfio_region_exit(region->vbasedev->name, region->nr);
2238 }
2239 
2240 void vfio_region_finalize(VFIORegion *region)
2241 {
2242     int i;
2243 
2244     if (!region->mem) {
2245         return;
2246     }
2247 
2248     for (i = 0; i < region->nr_mmaps; i++) {
2249         if (region->mmaps[i].mmap) {
2250             munmap(region->mmaps[i].mmap, region->mmaps[i].size);
2251             object_unparent(OBJECT(&region->mmaps[i].mem));
2252         }
2253     }
2254 
2255     object_unparent(OBJECT(region->mem));
2256 
2257     g_free(region->mem);
2258     g_free(region->mmaps);
2259 
2260     trace_vfio_region_finalize(region->vbasedev->name, region->nr);
2261 
2262     region->mem = NULL;
2263     region->mmaps = NULL;
2264     region->nr_mmaps = 0;
2265     region->size = 0;
2266     region->flags = 0;
2267     region->nr = 0;
2268 }
2269 
2270 void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled)
2271 {
2272     int i;
2273 
2274     if (!region->mem) {
2275         return;
2276     }
2277 
2278     for (i = 0; i < region->nr_mmaps; i++) {
2279         if (region->mmaps[i].mmap) {
2280             memory_region_set_enabled(&region->mmaps[i].mem, enabled);
2281         }
2282     }
2283 
2284     trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem),
2285                                         enabled);
2286 }
2287 
2288 void vfio_reset_handler(void *opaque)
2289 {
2290     VFIOGroup *group;
2291     VFIODevice *vbasedev;
2292 
2293     QLIST_FOREACH(group, &vfio_group_list, next) {
2294         QLIST_FOREACH(vbasedev, &group->device_list, next) {
2295             if (vbasedev->dev->realized) {
2296                 vbasedev->ops->vfio_compute_needs_reset(vbasedev);
2297             }
2298         }
2299     }
2300 
2301     QLIST_FOREACH(group, &vfio_group_list, next) {
2302         QLIST_FOREACH(vbasedev, &group->device_list, next) {
2303             if (vbasedev->dev->realized && vbasedev->needs_reset) {
2304                 vbasedev->ops->vfio_hot_reset_multi(vbasedev);
2305             }
2306         }
2307     }
2308 }
2309 
2310 static void vfio_kvm_device_add_group(VFIOGroup *group)
2311 {
2312 #ifdef CONFIG_KVM
2313     struct kvm_device_attr attr = {
2314         .group = KVM_DEV_VFIO_GROUP,
2315         .attr = KVM_DEV_VFIO_GROUP_ADD,
2316         .addr = (uint64_t)(unsigned long)&group->fd,
2317     };
2318 
2319     if (!kvm_enabled()) {
2320         return;
2321     }
2322 
2323     if (vfio_kvm_device_fd < 0) {
2324         struct kvm_create_device cd = {
2325             .type = KVM_DEV_TYPE_VFIO,
2326         };
2327 
2328         if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
2329             error_report("Failed to create KVM VFIO device: %m");
2330             return;
2331         }
2332 
2333         vfio_kvm_device_fd = cd.fd;
2334     }
2335 
2336     if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
2337         error_report("Failed to add group %d to KVM VFIO device: %m",
2338                      group->groupid);
2339     }
2340 #endif
2341 }
2342 
2343 static void vfio_kvm_device_del_group(VFIOGroup *group)
2344 {
2345 #ifdef CONFIG_KVM
2346     struct kvm_device_attr attr = {
2347         .group = KVM_DEV_VFIO_GROUP,
2348         .attr = KVM_DEV_VFIO_GROUP_DEL,
2349         .addr = (uint64_t)(unsigned long)&group->fd,
2350     };
2351 
2352     if (vfio_kvm_device_fd < 0) {
2353         return;
2354     }
2355 
2356     if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
2357         error_report("Failed to remove group %d from KVM VFIO device: %m",
2358                      group->groupid);
2359     }
2360 #endif
2361 }
2362 
2363 static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
2364 {
2365     VFIOAddressSpace *space;
2366 
2367     QLIST_FOREACH(space, &vfio_address_spaces, list) {
2368         if (space->as == as) {
2369             return space;
2370         }
2371     }
2372 
2373     /* No suitable VFIOAddressSpace, create a new one */
2374     space = g_malloc0(sizeof(*space));
2375     space->as = as;
2376     QLIST_INIT(&space->containers);
2377 
2378     QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
2379 
2380     return space;
2381 }
2382 
2383 static void vfio_put_address_space(VFIOAddressSpace *space)
2384 {
2385     if (QLIST_EMPTY(&space->containers)) {
2386         QLIST_REMOVE(space, list);
2387         g_free(space);
2388     }
2389 }
2390 
2391 /*
2392  * vfio_get_iommu_type - selects the richest iommu_type (v2 first)
2393  */
2394 static int vfio_get_iommu_type(VFIOContainer *container,
2395                                Error **errp)
2396 {
2397     int iommu_types[] = { VFIO_TYPE1v2_IOMMU, VFIO_TYPE1_IOMMU,
2398                           VFIO_SPAPR_TCE_v2_IOMMU, VFIO_SPAPR_TCE_IOMMU };
2399     int i;
2400 
2401     for (i = 0; i < ARRAY_SIZE(iommu_types); i++) {
2402         if (ioctl(container->fd, VFIO_CHECK_EXTENSION, iommu_types[i])) {
2403             return iommu_types[i];
2404         }
2405     }
2406     error_setg(errp, "No available IOMMU models");
2407     return -EINVAL;
2408 }
2409 
2410 static int vfio_init_container(VFIOContainer *container, int group_fd,
2411                                Error **errp)
2412 {
2413     int iommu_type, ret;
2414 
2415     iommu_type = vfio_get_iommu_type(container, errp);
2416     if (iommu_type < 0) {
2417         return iommu_type;
2418     }
2419 
2420     ret = ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container->fd);
2421     if (ret) {
2422         error_setg_errno(errp, errno, "Failed to set group container");
2423         return -errno;
2424     }
2425 
2426     while (ioctl(container->fd, VFIO_SET_IOMMU, iommu_type)) {
2427         if (iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
2428             /*
2429              * On sPAPR, despite the IOMMU subdriver always advertises v1 and
2430              * v2, the running platform may not support v2 and there is no
2431              * way to guess it until an IOMMU group gets added to the container.
2432              * So in case it fails with v2, try v1 as a fallback.
2433              */
2434             iommu_type = VFIO_SPAPR_TCE_IOMMU;
2435             continue;
2436         }
2437         error_setg_errno(errp, errno, "Failed to set iommu for container");
2438         return -errno;
2439     }
2440 
2441     container->iommu_type = iommu_type;
2442     return 0;
2443 }
2444 
2445 static int vfio_get_iommu_info(VFIOContainer *container,
2446                                struct vfio_iommu_type1_info **info)
2447 {
2448 
2449     size_t argsz = sizeof(struct vfio_iommu_type1_info);
2450 
2451     *info = g_new0(struct vfio_iommu_type1_info, 1);
2452 again:
2453     (*info)->argsz = argsz;
2454 
2455     if (ioctl(container->fd, VFIO_IOMMU_GET_INFO, *info)) {
2456         g_free(*info);
2457         *info = NULL;
2458         return -errno;
2459     }
2460 
2461     if (((*info)->argsz > argsz)) {
2462         argsz = (*info)->argsz;
2463         *info = g_realloc(*info, argsz);
2464         goto again;
2465     }
2466 
2467     return 0;
2468 }
2469 
2470 static struct vfio_info_cap_header *
2471 vfio_get_iommu_info_cap(struct vfio_iommu_type1_info *info, uint16_t id)
2472 {
2473     struct vfio_info_cap_header *hdr;
2474     void *ptr = info;
2475 
2476     if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) {
2477         return NULL;
2478     }
2479 
2480     for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
2481         if (hdr->id == id) {
2482             return hdr;
2483         }
2484     }
2485 
2486     return NULL;
2487 }
2488 
2489 static void vfio_get_iommu_info_migration(VFIOContainer *container,
2490                                          struct vfio_iommu_type1_info *info)
2491 {
2492     struct vfio_info_cap_header *hdr;
2493     struct vfio_iommu_type1_info_cap_migration *cap_mig;
2494 
2495     hdr = vfio_get_iommu_info_cap(info, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION);
2496     if (!hdr) {
2497         return;
2498     }
2499 
2500     cap_mig = container_of(hdr, struct vfio_iommu_type1_info_cap_migration,
2501                             header);
2502 
2503     /*
2504      * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
2505      * qemu_real_host_page_size to mark those dirty.
2506      */
2507     if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) {
2508         container->dirty_pages_supported = true;
2509         container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size;
2510         container->dirty_pgsizes = cap_mig->pgsize_bitmap;
2511     }
2512 }
2513 
2514 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
2515                                   Error **errp)
2516 {
2517     VFIOContainer *container;
2518     int ret, fd;
2519     VFIOAddressSpace *space;
2520 
2521     space = vfio_get_address_space(as);
2522 
2523     /*
2524      * VFIO is currently incompatible with discarding of RAM insofar as the
2525      * madvise to purge (zap) the page from QEMU's address space does not
2526      * interact with the memory API and therefore leaves stale virtual to
2527      * physical mappings in the IOMMU if the page was previously pinned.  We
2528      * therefore set discarding broken for each group added to a container,
2529      * whether the container is used individually or shared.  This provides
2530      * us with options to allow devices within a group to opt-in and allow
2531      * discarding, so long as it is done consistently for a group (for instance
2532      * if the device is an mdev device where it is known that the host vendor
2533      * driver will never pin pages outside of the working set of the guest
2534      * driver, which would thus not be discarding candidates).
2535      *
2536      * The first opportunity to induce pinning occurs here where we attempt to
2537      * attach the group to existing containers within the AddressSpace.  If any
2538      * pages are already zapped from the virtual address space, such as from
2539      * previous discards, new pinning will cause valid mappings to be
2540      * re-established.  Likewise, when the overall MemoryListener for a new
2541      * container is registered, a replay of mappings within the AddressSpace
2542      * will occur, re-establishing any previously zapped pages as well.
2543      *
2544      * Especially virtio-balloon is currently only prevented from discarding
2545      * new memory, it will not yet set ram_block_discard_set_required() and
2546      * therefore, neither stops us here or deals with the sudden memory
2547      * consumption of inflated memory.
2548      *
2549      * We do support discarding of memory coordinated via the RamDiscardManager
2550      * with some IOMMU types. vfio_ram_block_discard_disable() handles the
2551      * details once we know which type of IOMMU we are using.
2552      */
2553 
2554     QLIST_FOREACH(container, &space->containers, next) {
2555         if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
2556             ret = vfio_ram_block_discard_disable(container, true);
2557             if (ret) {
2558                 error_setg_errno(errp, -ret,
2559                                  "Cannot set discarding of RAM broken");
2560                 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER,
2561                           &container->fd)) {
2562                     error_report("vfio: error disconnecting group %d from"
2563                                  " container", group->groupid);
2564                 }
2565                 return ret;
2566             }
2567             group->container = container;
2568             QLIST_INSERT_HEAD(&container->group_list, group, container_next);
2569             vfio_kvm_device_add_group(group);
2570             return 0;
2571         }
2572     }
2573 
2574     fd = qemu_open_old("/dev/vfio/vfio", O_RDWR);
2575     if (fd < 0) {
2576         error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio");
2577         ret = -errno;
2578         goto put_space_exit;
2579     }
2580 
2581     ret = ioctl(fd, VFIO_GET_API_VERSION);
2582     if (ret != VFIO_API_VERSION) {
2583         error_setg(errp, "supported vfio version: %d, "
2584                    "reported version: %d", VFIO_API_VERSION, ret);
2585         ret = -EINVAL;
2586         goto close_fd_exit;
2587     }
2588 
2589     container = g_malloc0(sizeof(*container));
2590     container->space = space;
2591     container->fd = fd;
2592     container->error = NULL;
2593     container->dirty_pages_supported = false;
2594     container->dma_max_mappings = 0;
2595     QLIST_INIT(&container->giommu_list);
2596     QLIST_INIT(&container->hostwin_list);
2597     QLIST_INIT(&container->vrdl_list);
2598 
2599     ret = vfio_init_container(container, group->fd, errp);
2600     if (ret) {
2601         goto free_container_exit;
2602     }
2603 
2604     ret = vfio_ram_block_discard_disable(container, true);
2605     if (ret) {
2606         error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken");
2607         goto free_container_exit;
2608     }
2609 
2610     switch (container->iommu_type) {
2611     case VFIO_TYPE1v2_IOMMU:
2612     case VFIO_TYPE1_IOMMU:
2613     {
2614         struct vfio_iommu_type1_info *info;
2615 
2616         ret = vfio_get_iommu_info(container, &info);
2617         if (ret) {
2618             error_setg_errno(errp, -ret, "Failed to get VFIO IOMMU info");
2619             goto enable_discards_exit;
2620         }
2621 
2622         if (info->flags & VFIO_IOMMU_INFO_PGSIZES) {
2623             container->pgsizes = info->iova_pgsizes;
2624         } else {
2625             container->pgsizes = qemu_real_host_page_size();
2626         }
2627 
2628         if (!vfio_get_info_dma_avail(info, &container->dma_max_mappings)) {
2629             container->dma_max_mappings = 65535;
2630         }
2631         vfio_get_iommu_info_migration(container, info);
2632         g_free(info);
2633 
2634         /*
2635          * FIXME: We should parse VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE
2636          * information to get the actual window extent rather than assume
2637          * a 64-bit IOVA address space.
2638          */
2639         vfio_host_win_add(container, 0, (hwaddr)-1, container->pgsizes);
2640 
2641         break;
2642     }
2643     case VFIO_SPAPR_TCE_v2_IOMMU:
2644     case VFIO_SPAPR_TCE_IOMMU:
2645     {
2646         struct vfio_iommu_spapr_tce_info info;
2647         bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU;
2648 
2649         /*
2650          * The host kernel code implementing VFIO_IOMMU_DISABLE is called
2651          * when container fd is closed so we do not call it explicitly
2652          * in this file.
2653          */
2654         if (!v2) {
2655             ret = ioctl(fd, VFIO_IOMMU_ENABLE);
2656             if (ret) {
2657                 error_setg_errno(errp, errno, "failed to enable container");
2658                 ret = -errno;
2659                 goto enable_discards_exit;
2660             }
2661         } else {
2662             container->prereg_listener = vfio_prereg_listener;
2663 
2664             memory_listener_register(&container->prereg_listener,
2665                                      &address_space_memory);
2666             if (container->error) {
2667                 memory_listener_unregister(&container->prereg_listener);
2668                 ret = -1;
2669                 error_propagate_prepend(errp, container->error,
2670                     "RAM memory listener initialization failed: ");
2671                 goto enable_discards_exit;
2672             }
2673         }
2674 
2675         info.argsz = sizeof(info);
2676         ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
2677         if (ret) {
2678             error_setg_errno(errp, errno,
2679                              "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed");
2680             ret = -errno;
2681             if (v2) {
2682                 memory_listener_unregister(&container->prereg_listener);
2683             }
2684             goto enable_discards_exit;
2685         }
2686 
2687         if (v2) {
2688             container->pgsizes = info.ddw.pgsizes;
2689             /*
2690              * There is a default window in just created container.
2691              * To make region_add/del simpler, we better remove this
2692              * window now and let those iommu_listener callbacks
2693              * create/remove them when needed.
2694              */
2695             ret = vfio_spapr_remove_window(container, info.dma32_window_start);
2696             if (ret) {
2697                 error_setg_errno(errp, -ret,
2698                                  "failed to remove existing window");
2699                 goto enable_discards_exit;
2700             }
2701         } else {
2702             /* The default table uses 4K pages */
2703             container->pgsizes = 0x1000;
2704             vfio_host_win_add(container, info.dma32_window_start,
2705                               info.dma32_window_start +
2706                               info.dma32_window_size - 1,
2707                               0x1000);
2708         }
2709     }
2710     }
2711 
2712     vfio_kvm_device_add_group(group);
2713 
2714     QLIST_INIT(&container->group_list);
2715     QLIST_INSERT_HEAD(&space->containers, container, next);
2716 
2717     group->container = container;
2718     QLIST_INSERT_HEAD(&container->group_list, group, container_next);
2719 
2720     container->listener = vfio_memory_listener;
2721 
2722     memory_listener_register(&container->listener, container->space->as);
2723 
2724     if (container->error) {
2725         ret = -1;
2726         error_propagate_prepend(errp, container->error,
2727             "memory listener initialization failed: ");
2728         goto listener_release_exit;
2729     }
2730 
2731     container->initialized = true;
2732 
2733     return 0;
2734 listener_release_exit:
2735     QLIST_REMOVE(group, container_next);
2736     QLIST_REMOVE(container, next);
2737     vfio_kvm_device_del_group(group);
2738     vfio_listener_release(container);
2739 
2740 enable_discards_exit:
2741     vfio_ram_block_discard_disable(container, false);
2742 
2743 free_container_exit:
2744     g_free(container);
2745 
2746 close_fd_exit:
2747     close(fd);
2748 
2749 put_space_exit:
2750     vfio_put_address_space(space);
2751 
2752     return ret;
2753 }
2754 
2755 static void vfio_disconnect_container(VFIOGroup *group)
2756 {
2757     VFIOContainer *container = group->container;
2758 
2759     QLIST_REMOVE(group, container_next);
2760     group->container = NULL;
2761 
2762     /*
2763      * Explicitly release the listener first before unset container,
2764      * since unset may destroy the backend container if it's the last
2765      * group.
2766      */
2767     if (QLIST_EMPTY(&container->group_list)) {
2768         vfio_listener_release(container);
2769     }
2770 
2771     if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
2772         error_report("vfio: error disconnecting group %d from container",
2773                      group->groupid);
2774     }
2775 
2776     if (QLIST_EMPTY(&container->group_list)) {
2777         VFIOAddressSpace *space = container->space;
2778         VFIOGuestIOMMU *giommu, *tmp;
2779         VFIOHostDMAWindow *hostwin, *next;
2780 
2781         QLIST_REMOVE(container, next);
2782 
2783         QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) {
2784             memory_region_unregister_iommu_notifier(
2785                     MEMORY_REGION(giommu->iommu_mr), &giommu->n);
2786             QLIST_REMOVE(giommu, giommu_next);
2787             g_free(giommu);
2788         }
2789 
2790         QLIST_FOREACH_SAFE(hostwin, &container->hostwin_list, hostwin_next,
2791                            next) {
2792             QLIST_REMOVE(hostwin, hostwin_next);
2793             g_free(hostwin);
2794         }
2795 
2796         trace_vfio_disconnect_container(container->fd);
2797         close(container->fd);
2798         g_free(container);
2799 
2800         vfio_put_address_space(space);
2801     }
2802 }
2803 
2804 VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp)
2805 {
2806     VFIOGroup *group;
2807     char path[32];
2808     struct vfio_group_status status = { .argsz = sizeof(status) };
2809 
2810     QLIST_FOREACH(group, &vfio_group_list, next) {
2811         if (group->groupid == groupid) {
2812             /* Found it.  Now is it already in the right context? */
2813             if (group->container->space->as == as) {
2814                 return group;
2815             } else {
2816                 error_setg(errp, "group %d used in multiple address spaces",
2817                            group->groupid);
2818                 return NULL;
2819             }
2820         }
2821     }
2822 
2823     group = g_malloc0(sizeof(*group));
2824 
2825     snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
2826     group->fd = qemu_open_old(path, O_RDWR);
2827     if (group->fd < 0) {
2828         error_setg_errno(errp, errno, "failed to open %s", path);
2829         goto free_group_exit;
2830     }
2831 
2832     if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
2833         error_setg_errno(errp, errno, "failed to get group %d status", groupid);
2834         goto close_fd_exit;
2835     }
2836 
2837     if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
2838         error_setg(errp, "group %d is not viable", groupid);
2839         error_append_hint(errp,
2840                           "Please ensure all devices within the iommu_group "
2841                           "are bound to their vfio bus driver.\n");
2842         goto close_fd_exit;
2843     }
2844 
2845     group->groupid = groupid;
2846     QLIST_INIT(&group->device_list);
2847 
2848     if (vfio_connect_container(group, as, errp)) {
2849         error_prepend(errp, "failed to setup container for group %d: ",
2850                       groupid);
2851         goto close_fd_exit;
2852     }
2853 
2854     if (QLIST_EMPTY(&vfio_group_list)) {
2855         qemu_register_reset(vfio_reset_handler, NULL);
2856     }
2857 
2858     QLIST_INSERT_HEAD(&vfio_group_list, group, next);
2859 
2860     return group;
2861 
2862 close_fd_exit:
2863     close(group->fd);
2864 
2865 free_group_exit:
2866     g_free(group);
2867 
2868     return NULL;
2869 }
2870 
2871 void vfio_put_group(VFIOGroup *group)
2872 {
2873     if (!group || !QLIST_EMPTY(&group->device_list)) {
2874         return;
2875     }
2876 
2877     if (!group->ram_block_discard_allowed) {
2878         vfio_ram_block_discard_disable(group->container, false);
2879     }
2880     vfio_kvm_device_del_group(group);
2881     vfio_disconnect_container(group);
2882     QLIST_REMOVE(group, next);
2883     trace_vfio_put_group(group->fd);
2884     close(group->fd);
2885     g_free(group);
2886 
2887     if (QLIST_EMPTY(&vfio_group_list)) {
2888         qemu_unregister_reset(vfio_reset_handler, NULL);
2889     }
2890 }
2891 
2892 struct vfio_device_info *vfio_get_device_info(int fd)
2893 {
2894     struct vfio_device_info *info;
2895     uint32_t argsz = sizeof(*info);
2896 
2897     info = g_malloc0(argsz);
2898 
2899 retry:
2900     info->argsz = argsz;
2901 
2902     if (ioctl(fd, VFIO_DEVICE_GET_INFO, info)) {
2903         g_free(info);
2904         return NULL;
2905     }
2906 
2907     if (info->argsz > argsz) {
2908         argsz = info->argsz;
2909         info = g_realloc(info, argsz);
2910         goto retry;
2911     }
2912 
2913     return info;
2914 }
2915 
2916 int vfio_get_device(VFIOGroup *group, const char *name,
2917                     VFIODevice *vbasedev, Error **errp)
2918 {
2919     g_autofree struct vfio_device_info *info = NULL;
2920     int fd;
2921 
2922     fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
2923     if (fd < 0) {
2924         error_setg_errno(errp, errno, "error getting device from group %d",
2925                          group->groupid);
2926         error_append_hint(errp,
2927                       "Verify all devices in group %d are bound to vfio-<bus> "
2928                       "or pci-stub and not already in use\n", group->groupid);
2929         return fd;
2930     }
2931 
2932     info = vfio_get_device_info(fd);
2933     if (!info) {
2934         error_setg_errno(errp, errno, "error getting device info");
2935         close(fd);
2936         return -1;
2937     }
2938 
2939     /*
2940      * Set discarding of RAM as not broken for this group if the driver knows
2941      * the device operates compatibly with discarding.  Setting must be
2942      * consistent per group, but since compatibility is really only possible
2943      * with mdev currently, we expect singleton groups.
2944      */
2945     if (vbasedev->ram_block_discard_allowed !=
2946         group->ram_block_discard_allowed) {
2947         if (!QLIST_EMPTY(&group->device_list)) {
2948             error_setg(errp, "Inconsistent setting of support for discarding "
2949                        "RAM (e.g., balloon) within group");
2950             close(fd);
2951             return -1;
2952         }
2953 
2954         if (!group->ram_block_discard_allowed) {
2955             group->ram_block_discard_allowed = true;
2956             vfio_ram_block_discard_disable(group->container, false);
2957         }
2958     }
2959 
2960     vbasedev->fd = fd;
2961     vbasedev->group = group;
2962     QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
2963 
2964     vbasedev->num_irqs = info->num_irqs;
2965     vbasedev->num_regions = info->num_regions;
2966     vbasedev->flags = info->flags;
2967 
2968     trace_vfio_get_device(name, info->flags, info->num_regions, info->num_irqs);
2969 
2970     vbasedev->reset_works = !!(info->flags & VFIO_DEVICE_FLAGS_RESET);
2971 
2972     return 0;
2973 }
2974 
2975 void vfio_put_base_device(VFIODevice *vbasedev)
2976 {
2977     if (!vbasedev->group) {
2978         return;
2979     }
2980     QLIST_REMOVE(vbasedev, next);
2981     vbasedev->group = NULL;
2982     trace_vfio_put_base_device(vbasedev->fd);
2983     close(vbasedev->fd);
2984 }
2985 
2986 int vfio_get_region_info(VFIODevice *vbasedev, int index,
2987                          struct vfio_region_info **info)
2988 {
2989     size_t argsz = sizeof(struct vfio_region_info);
2990 
2991     *info = g_malloc0(argsz);
2992 
2993     (*info)->index = index;
2994 retry:
2995     (*info)->argsz = argsz;
2996 
2997     if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) {
2998         g_free(*info);
2999         *info = NULL;
3000         return -errno;
3001     }
3002 
3003     if ((*info)->argsz > argsz) {
3004         argsz = (*info)->argsz;
3005         *info = g_realloc(*info, argsz);
3006 
3007         goto retry;
3008     }
3009 
3010     return 0;
3011 }
3012 
3013 int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type,
3014                              uint32_t subtype, struct vfio_region_info **info)
3015 {
3016     int i;
3017 
3018     for (i = 0; i < vbasedev->num_regions; i++) {
3019         struct vfio_info_cap_header *hdr;
3020         struct vfio_region_info_cap_type *cap_type;
3021 
3022         if (vfio_get_region_info(vbasedev, i, info)) {
3023             continue;
3024         }
3025 
3026         hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE);
3027         if (!hdr) {
3028             g_free(*info);
3029             continue;
3030         }
3031 
3032         cap_type = container_of(hdr, struct vfio_region_info_cap_type, header);
3033 
3034         trace_vfio_get_dev_region(vbasedev->name, i,
3035                                   cap_type->type, cap_type->subtype);
3036 
3037         if (cap_type->type == type && cap_type->subtype == subtype) {
3038             return 0;
3039         }
3040 
3041         g_free(*info);
3042     }
3043 
3044     *info = NULL;
3045     return -ENODEV;
3046 }
3047 
3048 bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type)
3049 {
3050     struct vfio_region_info *info = NULL;
3051     bool ret = false;
3052 
3053     if (!vfio_get_region_info(vbasedev, region, &info)) {
3054         if (vfio_get_region_info_cap(info, cap_type)) {
3055             ret = true;
3056         }
3057         g_free(info);
3058     }
3059 
3060     return ret;
3061 }
3062 
3063 /*
3064  * Interfaces for IBM EEH (Enhanced Error Handling)
3065  */
3066 static bool vfio_eeh_container_ok(VFIOContainer *container)
3067 {
3068     /*
3069      * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
3070      * implementation is broken if there are multiple groups in a
3071      * container.  The hardware works in units of Partitionable
3072      * Endpoints (== IOMMU groups) and the EEH operations naively
3073      * iterate across all groups in the container, without any logic
3074      * to make sure the groups have their state synchronized.  For
3075      * certain operations (ENABLE) that might be ok, until an error
3076      * occurs, but for others (GET_STATE) it's clearly broken.
3077      */
3078 
3079     /*
3080      * XXX Once fixed kernels exist, test for them here
3081      */
3082 
3083     if (QLIST_EMPTY(&container->group_list)) {
3084         return false;
3085     }
3086 
3087     if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) {
3088         return false;
3089     }
3090 
3091     return true;
3092 }
3093 
3094 static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op)
3095 {
3096     struct vfio_eeh_pe_op pe_op = {
3097         .argsz = sizeof(pe_op),
3098         .op = op,
3099     };
3100     int ret;
3101 
3102     if (!vfio_eeh_container_ok(container)) {
3103         error_report("vfio/eeh: EEH_PE_OP 0x%x: "
3104                      "kernel requires a container with exactly one group", op);
3105         return -EPERM;
3106     }
3107 
3108     ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op);
3109     if (ret < 0) {
3110         error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op);
3111         return -errno;
3112     }
3113 
3114     return ret;
3115 }
3116 
3117 static VFIOContainer *vfio_eeh_as_container(AddressSpace *as)
3118 {
3119     VFIOAddressSpace *space = vfio_get_address_space(as);
3120     VFIOContainer *container = NULL;
3121 
3122     if (QLIST_EMPTY(&space->containers)) {
3123         /* No containers to act on */
3124         goto out;
3125     }
3126 
3127     container = QLIST_FIRST(&space->containers);
3128 
3129     if (QLIST_NEXT(container, next)) {
3130         /* We don't yet have logic to synchronize EEH state across
3131          * multiple containers */
3132         container = NULL;
3133         goto out;
3134     }
3135 
3136 out:
3137     vfio_put_address_space(space);
3138     return container;
3139 }
3140 
3141 bool vfio_eeh_as_ok(AddressSpace *as)
3142 {
3143     VFIOContainer *container = vfio_eeh_as_container(as);
3144 
3145     return (container != NULL) && vfio_eeh_container_ok(container);
3146 }
3147 
3148 int vfio_eeh_as_op(AddressSpace *as, uint32_t op)
3149 {
3150     VFIOContainer *container = vfio_eeh_as_container(as);
3151 
3152     if (!container) {
3153         return -ENODEV;
3154     }
3155     return vfio_eeh_container_op(container, op);
3156 }
3157