xref: /openbmc/qemu/hw/vfio/common.c (revision daa76aa416b1e18ab1fac650ff53d966d8f21f68)
1 /*
2  * generic functions used by VFIO devices
3  *
4  * Copyright Red Hat, Inc. 2012
5  *
6  * Authors:
7  *  Alex Williamson <alex.williamson@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Based on qemu-kvm device-assignment:
13  *  Adapted for KVM by Qumranet.
14  *  Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15  *  Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16  *  Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17  *  Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18  *  Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
19  */
20 
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 #include <linux/vfio.h>
24 
25 #include "hw/vfio/vfio-common.h"
26 #include "hw/vfio/vfio.h"
27 #include "exec/address-spaces.h"
28 #include "exec/memory.h"
29 #include "hw/hw.h"
30 #include "qemu/error-report.h"
31 #include "sysemu/kvm.h"
32 #ifdef CONFIG_KVM
33 #include "linux/kvm.h"
34 #endif
35 #include "trace.h"
36 
37 struct vfio_group_head vfio_group_list =
38     QLIST_HEAD_INITIALIZER(vfio_group_list);
39 struct vfio_as_head vfio_address_spaces =
40     QLIST_HEAD_INITIALIZER(vfio_address_spaces);
41 
42 #ifdef CONFIG_KVM
43 /*
44  * We have a single VFIO pseudo device per KVM VM.  Once created it lives
45  * for the life of the VM.  Closing the file descriptor only drops our
46  * reference to it and the device's reference to kvm.  Therefore once
47  * initialized, this file descriptor is only released on QEMU exit and
48  * we'll re-use it should another vfio device be attached before then.
49  */
50 static int vfio_kvm_device_fd = -1;
51 #endif
52 
53 /*
54  * Common VFIO interrupt disable
55  */
56 void vfio_disable_irqindex(VFIODevice *vbasedev, int index)
57 {
58     struct vfio_irq_set irq_set = {
59         .argsz = sizeof(irq_set),
60         .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
61         .index = index,
62         .start = 0,
63         .count = 0,
64     };
65 
66     ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
67 }
68 
69 void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index)
70 {
71     struct vfio_irq_set irq_set = {
72         .argsz = sizeof(irq_set),
73         .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
74         .index = index,
75         .start = 0,
76         .count = 1,
77     };
78 
79     ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
80 }
81 
82 void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index)
83 {
84     struct vfio_irq_set irq_set = {
85         .argsz = sizeof(irq_set),
86         .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
87         .index = index,
88         .start = 0,
89         .count = 1,
90     };
91 
92     ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
93 }
94 
95 /*
96  * IO Port/MMIO - Beware of the endians, VFIO is always little endian
97  */
98 void vfio_region_write(void *opaque, hwaddr addr,
99                        uint64_t data, unsigned size)
100 {
101     VFIORegion *region = opaque;
102     VFIODevice *vbasedev = region->vbasedev;
103     union {
104         uint8_t byte;
105         uint16_t word;
106         uint32_t dword;
107         uint64_t qword;
108     } buf;
109 
110     switch (size) {
111     case 1:
112         buf.byte = data;
113         break;
114     case 2:
115         buf.word = cpu_to_le16(data);
116         break;
117     case 4:
118         buf.dword = cpu_to_le32(data);
119         break;
120     default:
121         hw_error("vfio: unsupported write size, %d bytes", size);
122         break;
123     }
124 
125     if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
126         error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
127                      ",%d) failed: %m",
128                      __func__, vbasedev->name, region->nr,
129                      addr, data, size);
130     }
131 
132     trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
133 
134     /*
135      * A read or write to a BAR always signals an INTx EOI.  This will
136      * do nothing if not pending (including not in INTx mode).  We assume
137      * that a BAR access is in response to an interrupt and that BAR
138      * accesses will service the interrupt.  Unfortunately, we don't know
139      * which access will service the interrupt, so we're potentially
140      * getting quite a few host interrupts per guest interrupt.
141      */
142     vbasedev->ops->vfio_eoi(vbasedev);
143 }
144 
145 uint64_t vfio_region_read(void *opaque,
146                           hwaddr addr, unsigned size)
147 {
148     VFIORegion *region = opaque;
149     VFIODevice *vbasedev = region->vbasedev;
150     union {
151         uint8_t byte;
152         uint16_t word;
153         uint32_t dword;
154         uint64_t qword;
155     } buf;
156     uint64_t data = 0;
157 
158     if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
159         error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m",
160                      __func__, vbasedev->name, region->nr,
161                      addr, size);
162         return (uint64_t)-1;
163     }
164     switch (size) {
165     case 1:
166         data = buf.byte;
167         break;
168     case 2:
169         data = le16_to_cpu(buf.word);
170         break;
171     case 4:
172         data = le32_to_cpu(buf.dword);
173         break;
174     default:
175         hw_error("vfio: unsupported read size, %d bytes", size);
176         break;
177     }
178 
179     trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
180 
181     /* Same as write above */
182     vbasedev->ops->vfio_eoi(vbasedev);
183 
184     return data;
185 }
186 
187 const MemoryRegionOps vfio_region_ops = {
188     .read = vfio_region_read,
189     .write = vfio_region_write,
190     .endianness = DEVICE_LITTLE_ENDIAN,
191 };
192 
193 /*
194  * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
195  */
196 static int vfio_dma_unmap(VFIOContainer *container,
197                           hwaddr iova, ram_addr_t size)
198 {
199     struct vfio_iommu_type1_dma_unmap unmap = {
200         .argsz = sizeof(unmap),
201         .flags = 0,
202         .iova = iova,
203         .size = size,
204     };
205 
206     if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
207         error_report("VFIO_UNMAP_DMA: %d", -errno);
208         return -errno;
209     }
210 
211     return 0;
212 }
213 
214 static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
215                         ram_addr_t size, void *vaddr, bool readonly)
216 {
217     struct vfio_iommu_type1_dma_map map = {
218         .argsz = sizeof(map),
219         .flags = VFIO_DMA_MAP_FLAG_READ,
220         .vaddr = (__u64)(uintptr_t)vaddr,
221         .iova = iova,
222         .size = size,
223     };
224 
225     if (!readonly) {
226         map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
227     }
228 
229     /*
230      * Try the mapping, if it fails with EBUSY, unmap the region and try
231      * again.  This shouldn't be necessary, but we sometimes see it in
232      * the VGA ROM space.
233      */
234     if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
235         (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 &&
236          ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
237         return 0;
238     }
239 
240     error_report("VFIO_MAP_DMA: %d", -errno);
241     return -errno;
242 }
243 
244 static bool vfio_listener_skipped_section(MemoryRegionSection *section)
245 {
246     return (!memory_region_is_ram(section->mr) &&
247             !memory_region_is_iommu(section->mr)) ||
248            /*
249             * Sizing an enabled 64-bit BAR can cause spurious mappings to
250             * addresses in the upper part of the 64-bit address space.  These
251             * are never accessed by the CPU and beyond the address width of
252             * some IOMMU hardware.  TODO: VFIO should tell us the IOMMU width.
253             */
254            section->offset_within_address_space & (1ULL << 63);
255 }
256 
257 static void vfio_iommu_map_notify(Notifier *n, void *data)
258 {
259     VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
260     VFIOContainer *container = giommu->container;
261     IOMMUTLBEntry *iotlb = data;
262     hwaddr iova = iotlb->iova + giommu->iommu_offset;
263     MemoryRegion *mr;
264     hwaddr xlat;
265     hwaddr len = iotlb->addr_mask + 1;
266     void *vaddr;
267     int ret;
268 
269     trace_vfio_iommu_map_notify(iova, iova + iotlb->addr_mask);
270 
271     if (iotlb->target_as != &address_space_memory) {
272         error_report("Wrong target AS \"%s\", only system memory is allowed",
273                      iotlb->target_as->name ? iotlb->target_as->name : "none");
274         return;
275     }
276 
277     /*
278      * The IOMMU TLB entry we have just covers translation through
279      * this IOMMU to its immediate target.  We need to translate
280      * it the rest of the way through to memory.
281      */
282     rcu_read_lock();
283     mr = address_space_translate(&address_space_memory,
284                                  iotlb->translated_addr,
285                                  &xlat, &len, iotlb->perm & IOMMU_WO);
286     if (!memory_region_is_ram(mr)) {
287         error_report("iommu map to non memory area %"HWADDR_PRIx"",
288                      xlat);
289         goto out;
290     }
291     /*
292      * Translation truncates length to the IOMMU page size,
293      * check that it did not truncate too much.
294      */
295     if (len & iotlb->addr_mask) {
296         error_report("iommu has granularity incompatible with target AS");
297         goto out;
298     }
299 
300     if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
301         vaddr = memory_region_get_ram_ptr(mr) + xlat;
302         ret = vfio_dma_map(container, iova,
303                            iotlb->addr_mask + 1, vaddr,
304                            !(iotlb->perm & IOMMU_WO) || mr->readonly);
305         if (ret) {
306             error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
307                          "0x%"HWADDR_PRIx", %p) = %d (%m)",
308                          container, iova,
309                          iotlb->addr_mask + 1, vaddr, ret);
310         }
311     } else {
312         ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1);
313         if (ret) {
314             error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
315                          "0x%"HWADDR_PRIx") = %d (%m)",
316                          container, iova,
317                          iotlb->addr_mask + 1, ret);
318         }
319     }
320 out:
321     rcu_read_unlock();
322 }
323 
324 static hwaddr vfio_container_granularity(VFIOContainer *container)
325 {
326     return (hwaddr)1 << ctz64(container->iova_pgsizes);
327 }
328 
329 static void vfio_listener_region_add(MemoryListener *listener,
330                                      MemoryRegionSection *section)
331 {
332     VFIOContainer *container = container_of(listener, VFIOContainer, listener);
333     hwaddr iova, end;
334     Int128 llend, llsize;
335     void *vaddr;
336     int ret;
337 
338     if (vfio_listener_skipped_section(section)) {
339         trace_vfio_listener_region_add_skip(
340                 section->offset_within_address_space,
341                 section->offset_within_address_space +
342                 int128_get64(int128_sub(section->size, int128_one())));
343         return;
344     }
345 
346     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
347                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
348         error_report("%s received unaligned region", __func__);
349         return;
350     }
351 
352     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
353     llend = int128_make64(section->offset_within_address_space);
354     llend = int128_add(llend, section->size);
355     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
356 
357     if (int128_ge(int128_make64(iova), llend)) {
358         return;
359     }
360     end = int128_get64(int128_sub(llend, int128_one()));
361 
362     if ((iova < container->min_iova) || (end > container->max_iova)) {
363         error_report("vfio: IOMMU container %p can't map guest IOVA region"
364                      " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx,
365                      container, iova, end);
366         ret = -EFAULT;
367         goto fail;
368     }
369 
370     memory_region_ref(section->mr);
371 
372     if (memory_region_is_iommu(section->mr)) {
373         VFIOGuestIOMMU *giommu;
374 
375         trace_vfio_listener_region_add_iommu(iova, end);
376         /*
377          * FIXME: We should do some checking to see if the
378          * capabilities of the host VFIO IOMMU are adequate to model
379          * the guest IOMMU
380          *
381          * FIXME: For VFIO iommu types which have KVM acceleration to
382          * avoid bouncing all map/unmaps through qemu this way, this
383          * would be the right place to wire that up (tell the KVM
384          * device emulation the VFIO iommu handles to use).
385          */
386         giommu = g_malloc0(sizeof(*giommu));
387         giommu->iommu = section->mr;
388         giommu->iommu_offset = section->offset_within_address_space -
389                                section->offset_within_region;
390         giommu->container = container;
391         giommu->n.notify = vfio_iommu_map_notify;
392         QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
393 
394         memory_region_register_iommu_notifier(giommu->iommu, &giommu->n);
395         memory_region_iommu_replay(giommu->iommu, &giommu->n,
396                                    vfio_container_granularity(container),
397                                    false);
398 
399         return;
400     }
401 
402     /* Here we assume that memory_region_is_ram(section->mr)==true */
403 
404     vaddr = memory_region_get_ram_ptr(section->mr) +
405             section->offset_within_region +
406             (iova - section->offset_within_address_space);
407 
408     trace_vfio_listener_region_add_ram(iova, end, vaddr);
409 
410     llsize = int128_sub(llend, int128_make64(iova));
411 
412     ret = vfio_dma_map(container, iova, int128_get64(llsize),
413                        vaddr, section->readonly);
414     if (ret) {
415         error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
416                      "0x%"HWADDR_PRIx", %p) = %d (%m)",
417                      container, iova, int128_get64(llsize), vaddr, ret);
418         goto fail;
419     }
420 
421     return;
422 
423 fail:
424     /*
425      * On the initfn path, store the first error in the container so we
426      * can gracefully fail.  Runtime, there's not much we can do other
427      * than throw a hardware error.
428      */
429     if (!container->initialized) {
430         if (!container->error) {
431             container->error = ret;
432         }
433     } else {
434         hw_error("vfio: DMA mapping failed, unable to continue");
435     }
436 }
437 
438 static void vfio_listener_region_del(MemoryListener *listener,
439                                      MemoryRegionSection *section)
440 {
441     VFIOContainer *container = container_of(listener, VFIOContainer, listener);
442     hwaddr iova, end;
443     Int128 llend, llsize;
444     int ret;
445 
446     if (vfio_listener_skipped_section(section)) {
447         trace_vfio_listener_region_del_skip(
448                 section->offset_within_address_space,
449                 section->offset_within_address_space +
450                 int128_get64(int128_sub(section->size, int128_one())));
451         return;
452     }
453 
454     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
455                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
456         error_report("%s received unaligned region", __func__);
457         return;
458     }
459 
460     if (memory_region_is_iommu(section->mr)) {
461         VFIOGuestIOMMU *giommu;
462 
463         QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
464             if (giommu->iommu == section->mr) {
465                 memory_region_unregister_iommu_notifier(&giommu->n);
466                 QLIST_REMOVE(giommu, giommu_next);
467                 g_free(giommu);
468                 break;
469             }
470         }
471 
472         /*
473          * FIXME: We assume the one big unmap below is adequate to
474          * remove any individual page mappings in the IOMMU which
475          * might have been copied into VFIO. This works for a page table
476          * based IOMMU where a big unmap flattens a large range of IO-PTEs.
477          * That may not be true for all IOMMU types.
478          */
479     }
480 
481     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
482     llend = int128_make64(section->offset_within_address_space);
483     llend = int128_add(llend, section->size);
484     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
485 
486     if (int128_ge(int128_make64(iova), llend)) {
487         return;
488     }
489     end = int128_get64(int128_sub(llend, int128_one()));
490 
491     llsize = int128_sub(llend, int128_make64(iova));
492 
493     trace_vfio_listener_region_del(iova, end);
494 
495     ret = vfio_dma_unmap(container, iova, int128_get64(llsize));
496     memory_region_unref(section->mr);
497     if (ret) {
498         error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
499                      "0x%"HWADDR_PRIx") = %d (%m)",
500                      container, iova, int128_get64(llsize), ret);
501     }
502 }
503 
504 static const MemoryListener vfio_memory_listener = {
505     .region_add = vfio_listener_region_add,
506     .region_del = vfio_listener_region_del,
507 };
508 
509 static void vfio_listener_release(VFIOContainer *container)
510 {
511     memory_listener_unregister(&container->listener);
512 }
513 
514 static struct vfio_info_cap_header *
515 vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id)
516 {
517     struct vfio_info_cap_header *hdr;
518     void *ptr = info;
519 
520     if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) {
521         return NULL;
522     }
523 
524     for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
525         if (hdr->id == id) {
526             return hdr;
527         }
528     }
529 
530     return NULL;
531 }
532 
533 static void vfio_setup_region_sparse_mmaps(VFIORegion *region,
534                                            struct vfio_region_info *info)
535 {
536     struct vfio_info_cap_header *hdr;
537     struct vfio_region_info_cap_sparse_mmap *sparse;
538     int i;
539 
540     hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP);
541     if (!hdr) {
542         return;
543     }
544 
545     sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header);
546 
547     trace_vfio_region_sparse_mmap_header(region->vbasedev->name,
548                                          region->nr, sparse->nr_areas);
549 
550     region->nr_mmaps = sparse->nr_areas;
551     region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
552 
553     for (i = 0; i < region->nr_mmaps; i++) {
554         region->mmaps[i].offset = sparse->areas[i].offset;
555         region->mmaps[i].size = sparse->areas[i].size;
556         trace_vfio_region_sparse_mmap_entry(i, region->mmaps[i].offset,
557                                             region->mmaps[i].offset +
558                                             region->mmaps[i].size);
559     }
560 }
561 
562 int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
563                       int index, const char *name)
564 {
565     struct vfio_region_info *info;
566     int ret;
567 
568     ret = vfio_get_region_info(vbasedev, index, &info);
569     if (ret) {
570         return ret;
571     }
572 
573     region->vbasedev = vbasedev;
574     region->flags = info->flags;
575     region->size = info->size;
576     region->fd_offset = info->offset;
577     region->nr = index;
578 
579     if (region->size) {
580         region->mem = g_new0(MemoryRegion, 1);
581         memory_region_init_io(region->mem, obj, &vfio_region_ops,
582                               region, name, region->size);
583 
584         if (!vbasedev->no_mmap &&
585             region->flags & VFIO_REGION_INFO_FLAG_MMAP &&
586             !(region->size & ~qemu_real_host_page_mask)) {
587 
588             vfio_setup_region_sparse_mmaps(region, info);
589 
590             if (!region->nr_mmaps) {
591                 region->nr_mmaps = 1;
592                 region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
593                 region->mmaps[0].offset = 0;
594                 region->mmaps[0].size = region->size;
595             }
596         }
597     }
598 
599     g_free(info);
600 
601     trace_vfio_region_setup(vbasedev->name, index, name,
602                             region->flags, region->fd_offset, region->size);
603     return 0;
604 }
605 
606 int vfio_region_mmap(VFIORegion *region)
607 {
608     int i, prot = 0;
609     char *name;
610 
611     if (!region->mem) {
612         return 0;
613     }
614 
615     prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0;
616     prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0;
617 
618     for (i = 0; i < region->nr_mmaps; i++) {
619         region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot,
620                                      MAP_SHARED, region->vbasedev->fd,
621                                      region->fd_offset +
622                                      region->mmaps[i].offset);
623         if (region->mmaps[i].mmap == MAP_FAILED) {
624             int ret = -errno;
625 
626             trace_vfio_region_mmap_fault(memory_region_name(region->mem), i,
627                                          region->fd_offset +
628                                          region->mmaps[i].offset,
629                                          region->fd_offset +
630                                          region->mmaps[i].offset +
631                                          region->mmaps[i].size - 1, ret);
632 
633             region->mmaps[i].mmap = NULL;
634 
635             for (i--; i >= 0; i--) {
636                 memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
637                 munmap(region->mmaps[i].mmap, region->mmaps[i].size);
638                 object_unparent(OBJECT(&region->mmaps[i].mem));
639                 region->mmaps[i].mmap = NULL;
640             }
641 
642             return ret;
643         }
644 
645         name = g_strdup_printf("%s mmaps[%d]",
646                                memory_region_name(region->mem), i);
647         memory_region_init_ram_ptr(&region->mmaps[i].mem,
648                                    memory_region_owner(region->mem),
649                                    name, region->mmaps[i].size,
650                                    region->mmaps[i].mmap);
651         g_free(name);
652         memory_region_set_skip_dump(&region->mmaps[i].mem);
653         memory_region_add_subregion(region->mem, region->mmaps[i].offset,
654                                     &region->mmaps[i].mem);
655 
656         trace_vfio_region_mmap(memory_region_name(&region->mmaps[i].mem),
657                                region->mmaps[i].offset,
658                                region->mmaps[i].offset +
659                                region->mmaps[i].size - 1);
660     }
661 
662     return 0;
663 }
664 
665 void vfio_region_exit(VFIORegion *region)
666 {
667     int i;
668 
669     if (!region->mem) {
670         return;
671     }
672 
673     for (i = 0; i < region->nr_mmaps; i++) {
674         if (region->mmaps[i].mmap) {
675             memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
676         }
677     }
678 
679     trace_vfio_region_exit(region->vbasedev->name, region->nr);
680 }
681 
682 void vfio_region_finalize(VFIORegion *region)
683 {
684     int i;
685 
686     if (!region->mem) {
687         return;
688     }
689 
690     for (i = 0; i < region->nr_mmaps; i++) {
691         if (region->mmaps[i].mmap) {
692             munmap(region->mmaps[i].mmap, region->mmaps[i].size);
693             object_unparent(OBJECT(&region->mmaps[i].mem));
694         }
695     }
696 
697     object_unparent(OBJECT(region->mem));
698 
699     g_free(region->mem);
700     g_free(region->mmaps);
701 
702     trace_vfio_region_finalize(region->vbasedev->name, region->nr);
703 }
704 
705 void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled)
706 {
707     int i;
708 
709     if (!region->mem) {
710         return;
711     }
712 
713     for (i = 0; i < region->nr_mmaps; i++) {
714         if (region->mmaps[i].mmap) {
715             memory_region_set_enabled(&region->mmaps[i].mem, enabled);
716         }
717     }
718 
719     trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem),
720                                         enabled);
721 }
722 
723 void vfio_reset_handler(void *opaque)
724 {
725     VFIOGroup *group;
726     VFIODevice *vbasedev;
727 
728     QLIST_FOREACH(group, &vfio_group_list, next) {
729         QLIST_FOREACH(vbasedev, &group->device_list, next) {
730             vbasedev->ops->vfio_compute_needs_reset(vbasedev);
731         }
732     }
733 
734     QLIST_FOREACH(group, &vfio_group_list, next) {
735         QLIST_FOREACH(vbasedev, &group->device_list, next) {
736             if (vbasedev->needs_reset) {
737                 vbasedev->ops->vfio_hot_reset_multi(vbasedev);
738             }
739         }
740     }
741 }
742 
743 static void vfio_kvm_device_add_group(VFIOGroup *group)
744 {
745 #ifdef CONFIG_KVM
746     struct kvm_device_attr attr = {
747         .group = KVM_DEV_VFIO_GROUP,
748         .attr = KVM_DEV_VFIO_GROUP_ADD,
749         .addr = (uint64_t)(unsigned long)&group->fd,
750     };
751 
752     if (!kvm_enabled()) {
753         return;
754     }
755 
756     if (vfio_kvm_device_fd < 0) {
757         struct kvm_create_device cd = {
758             .type = KVM_DEV_TYPE_VFIO,
759         };
760 
761         if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
762             error_report("Failed to create KVM VFIO device: %m");
763             return;
764         }
765 
766         vfio_kvm_device_fd = cd.fd;
767     }
768 
769     if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
770         error_report("Failed to add group %d to KVM VFIO device: %m",
771                      group->groupid);
772     }
773 #endif
774 }
775 
776 static void vfio_kvm_device_del_group(VFIOGroup *group)
777 {
778 #ifdef CONFIG_KVM
779     struct kvm_device_attr attr = {
780         .group = KVM_DEV_VFIO_GROUP,
781         .attr = KVM_DEV_VFIO_GROUP_DEL,
782         .addr = (uint64_t)(unsigned long)&group->fd,
783     };
784 
785     if (vfio_kvm_device_fd < 0) {
786         return;
787     }
788 
789     if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
790         error_report("Failed to remove group %d from KVM VFIO device: %m",
791                      group->groupid);
792     }
793 #endif
794 }
795 
796 static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
797 {
798     VFIOAddressSpace *space;
799 
800     QLIST_FOREACH(space, &vfio_address_spaces, list) {
801         if (space->as == as) {
802             return space;
803         }
804     }
805 
806     /* No suitable VFIOAddressSpace, create a new one */
807     space = g_malloc0(sizeof(*space));
808     space->as = as;
809     QLIST_INIT(&space->containers);
810 
811     QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
812 
813     return space;
814 }
815 
816 static void vfio_put_address_space(VFIOAddressSpace *space)
817 {
818     if (QLIST_EMPTY(&space->containers)) {
819         QLIST_REMOVE(space, list);
820         g_free(space);
821     }
822 }
823 
824 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
825 {
826     VFIOContainer *container;
827     int ret, fd;
828     VFIOAddressSpace *space;
829 
830     space = vfio_get_address_space(as);
831 
832     QLIST_FOREACH(container, &space->containers, next) {
833         if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
834             group->container = container;
835             QLIST_INSERT_HEAD(&container->group_list, group, container_next);
836             return 0;
837         }
838     }
839 
840     fd = qemu_open("/dev/vfio/vfio", O_RDWR);
841     if (fd < 0) {
842         error_report("vfio: failed to open /dev/vfio/vfio: %m");
843         ret = -errno;
844         goto put_space_exit;
845     }
846 
847     ret = ioctl(fd, VFIO_GET_API_VERSION);
848     if (ret != VFIO_API_VERSION) {
849         error_report("vfio: supported vfio version: %d, "
850                      "reported version: %d", VFIO_API_VERSION, ret);
851         ret = -EINVAL;
852         goto close_fd_exit;
853     }
854 
855     container = g_malloc0(sizeof(*container));
856     container->space = space;
857     container->fd = fd;
858     if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU) ||
859         ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU)) {
860         bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU);
861         struct vfio_iommu_type1_info info;
862 
863         ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
864         if (ret) {
865             error_report("vfio: failed to set group container: %m");
866             ret = -errno;
867             goto free_container_exit;
868         }
869 
870         ret = ioctl(fd, VFIO_SET_IOMMU,
871                     v2 ? VFIO_TYPE1v2_IOMMU : VFIO_TYPE1_IOMMU);
872         if (ret) {
873             error_report("vfio: failed to set iommu for container: %m");
874             ret = -errno;
875             goto free_container_exit;
876         }
877 
878         /*
879          * FIXME: This assumes that a Type1 IOMMU can map any 64-bit
880          * IOVA whatsoever.  That's not actually true, but the current
881          * kernel interface doesn't tell us what it can map, and the
882          * existing Type1 IOMMUs generally support any IOVA we're
883          * going to actually try in practice.
884          */
885         container->min_iova = 0;
886         container->max_iova = (hwaddr)-1;
887 
888         /* Assume just 4K IOVA page size */
889         container->iova_pgsizes = 0x1000;
890         info.argsz = sizeof(info);
891         ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info);
892         /* Ignore errors */
893         if ((ret == 0) && (info.flags & VFIO_IOMMU_INFO_PGSIZES)) {
894             container->iova_pgsizes = info.iova_pgsizes;
895         }
896     } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU)) {
897         struct vfio_iommu_spapr_tce_info info;
898 
899         ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
900         if (ret) {
901             error_report("vfio: failed to set group container: %m");
902             ret = -errno;
903             goto free_container_exit;
904         }
905         ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_SPAPR_TCE_IOMMU);
906         if (ret) {
907             error_report("vfio: failed to set iommu for container: %m");
908             ret = -errno;
909             goto free_container_exit;
910         }
911 
912         /*
913          * The host kernel code implementing VFIO_IOMMU_DISABLE is called
914          * when container fd is closed so we do not call it explicitly
915          * in this file.
916          */
917         ret = ioctl(fd, VFIO_IOMMU_ENABLE);
918         if (ret) {
919             error_report("vfio: failed to enable container: %m");
920             ret = -errno;
921             goto free_container_exit;
922         }
923 
924         /*
925          * This only considers the host IOMMU's 32-bit window.  At
926          * some point we need to add support for the optional 64-bit
927          * window and dynamic windows
928          */
929         info.argsz = sizeof(info);
930         ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
931         if (ret) {
932             error_report("vfio: VFIO_IOMMU_SPAPR_TCE_GET_INFO failed: %m");
933             ret = -errno;
934             goto free_container_exit;
935         }
936         container->min_iova = info.dma32_window_start;
937         container->max_iova = container->min_iova + info.dma32_window_size - 1;
938 
939         /* Assume just 4K IOVA pages for now */
940         container->iova_pgsizes = 0x1000;
941     } else {
942         error_report("vfio: No available IOMMU models");
943         ret = -EINVAL;
944         goto free_container_exit;
945     }
946 
947     container->listener = vfio_memory_listener;
948 
949     memory_listener_register(&container->listener, container->space->as);
950 
951     if (container->error) {
952         ret = container->error;
953         error_report("vfio: memory listener initialization failed for container");
954         goto listener_release_exit;
955     }
956 
957     container->initialized = true;
958 
959     QLIST_INIT(&container->group_list);
960     QLIST_INSERT_HEAD(&space->containers, container, next);
961 
962     group->container = container;
963     QLIST_INSERT_HEAD(&container->group_list, group, container_next);
964 
965     return 0;
966 listener_release_exit:
967     vfio_listener_release(container);
968 
969 free_container_exit:
970     g_free(container);
971 
972 close_fd_exit:
973     close(fd);
974 
975 put_space_exit:
976     vfio_put_address_space(space);
977 
978     return ret;
979 }
980 
981 static void vfio_disconnect_container(VFIOGroup *group)
982 {
983     VFIOContainer *container = group->container;
984 
985     if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
986         error_report("vfio: error disconnecting group %d from container",
987                      group->groupid);
988     }
989 
990     QLIST_REMOVE(group, container_next);
991     group->container = NULL;
992 
993     if (QLIST_EMPTY(&container->group_list)) {
994         VFIOAddressSpace *space = container->space;
995         VFIOGuestIOMMU *giommu, *tmp;
996 
997         vfio_listener_release(container);
998         QLIST_REMOVE(container, next);
999 
1000         QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) {
1001             memory_region_unregister_iommu_notifier(&giommu->n);
1002             QLIST_REMOVE(giommu, giommu_next);
1003             g_free(giommu);
1004         }
1005 
1006         trace_vfio_disconnect_container(container->fd);
1007         close(container->fd);
1008         g_free(container);
1009 
1010         vfio_put_address_space(space);
1011     }
1012 }
1013 
1014 VFIOGroup *vfio_get_group(int groupid, AddressSpace *as)
1015 {
1016     VFIOGroup *group;
1017     char path[32];
1018     struct vfio_group_status status = { .argsz = sizeof(status) };
1019 
1020     QLIST_FOREACH(group, &vfio_group_list, next) {
1021         if (group->groupid == groupid) {
1022             /* Found it.  Now is it already in the right context? */
1023             if (group->container->space->as == as) {
1024                 return group;
1025             } else {
1026                 error_report("vfio: group %d used in multiple address spaces",
1027                              group->groupid);
1028                 return NULL;
1029             }
1030         }
1031     }
1032 
1033     group = g_malloc0(sizeof(*group));
1034 
1035     snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
1036     group->fd = qemu_open(path, O_RDWR);
1037     if (group->fd < 0) {
1038         error_report("vfio: error opening %s: %m", path);
1039         goto free_group_exit;
1040     }
1041 
1042     if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
1043         error_report("vfio: error getting group status: %m");
1044         goto close_fd_exit;
1045     }
1046 
1047     if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
1048         error_report("vfio: error, group %d is not viable, please ensure "
1049                      "all devices within the iommu_group are bound to their "
1050                      "vfio bus driver.", groupid);
1051         goto close_fd_exit;
1052     }
1053 
1054     group->groupid = groupid;
1055     QLIST_INIT(&group->device_list);
1056 
1057     if (vfio_connect_container(group, as)) {
1058         error_report("vfio: failed to setup container for group %d", groupid);
1059         goto close_fd_exit;
1060     }
1061 
1062     if (QLIST_EMPTY(&vfio_group_list)) {
1063         qemu_register_reset(vfio_reset_handler, NULL);
1064     }
1065 
1066     QLIST_INSERT_HEAD(&vfio_group_list, group, next);
1067 
1068     vfio_kvm_device_add_group(group);
1069 
1070     return group;
1071 
1072 close_fd_exit:
1073     close(group->fd);
1074 
1075 free_group_exit:
1076     g_free(group);
1077 
1078     return NULL;
1079 }
1080 
1081 void vfio_put_group(VFIOGroup *group)
1082 {
1083     if (!group || !QLIST_EMPTY(&group->device_list)) {
1084         return;
1085     }
1086 
1087     vfio_kvm_device_del_group(group);
1088     vfio_disconnect_container(group);
1089     QLIST_REMOVE(group, next);
1090     trace_vfio_put_group(group->fd);
1091     close(group->fd);
1092     g_free(group);
1093 
1094     if (QLIST_EMPTY(&vfio_group_list)) {
1095         qemu_unregister_reset(vfio_reset_handler, NULL);
1096     }
1097 }
1098 
1099 int vfio_get_device(VFIOGroup *group, const char *name,
1100                        VFIODevice *vbasedev)
1101 {
1102     struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
1103     int ret, fd;
1104 
1105     fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
1106     if (fd < 0) {
1107         error_report("vfio: error getting device %s from group %d: %m",
1108                      name, group->groupid);
1109         error_printf("Verify all devices in group %d are bound to vfio-<bus> "
1110                      "or pci-stub and not already in use\n", group->groupid);
1111         return fd;
1112     }
1113 
1114     ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info);
1115     if (ret) {
1116         error_report("vfio: error getting device info: %m");
1117         close(fd);
1118         return ret;
1119     }
1120 
1121     vbasedev->fd = fd;
1122     vbasedev->group = group;
1123     QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
1124 
1125     vbasedev->num_irqs = dev_info.num_irqs;
1126     vbasedev->num_regions = dev_info.num_regions;
1127     vbasedev->flags = dev_info.flags;
1128 
1129     trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions,
1130                           dev_info.num_irqs);
1131 
1132     vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
1133     return 0;
1134 }
1135 
1136 void vfio_put_base_device(VFIODevice *vbasedev)
1137 {
1138     if (!vbasedev->group) {
1139         return;
1140     }
1141     QLIST_REMOVE(vbasedev, next);
1142     vbasedev->group = NULL;
1143     trace_vfio_put_base_device(vbasedev->fd);
1144     close(vbasedev->fd);
1145 }
1146 
1147 int vfio_get_region_info(VFIODevice *vbasedev, int index,
1148                          struct vfio_region_info **info)
1149 {
1150     size_t argsz = sizeof(struct vfio_region_info);
1151 
1152     *info = g_malloc0(argsz);
1153 
1154     (*info)->index = index;
1155 retry:
1156     (*info)->argsz = argsz;
1157 
1158     if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) {
1159         g_free(*info);
1160         *info = NULL;
1161         return -errno;
1162     }
1163 
1164     if ((*info)->argsz > argsz) {
1165         argsz = (*info)->argsz;
1166         *info = g_realloc(*info, argsz);
1167 
1168         goto retry;
1169     }
1170 
1171     return 0;
1172 }
1173 
1174 int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type,
1175                              uint32_t subtype, struct vfio_region_info **info)
1176 {
1177     int i;
1178 
1179     for (i = 0; i < vbasedev->num_regions; i++) {
1180         struct vfio_info_cap_header *hdr;
1181         struct vfio_region_info_cap_type *cap_type;
1182 
1183         if (vfio_get_region_info(vbasedev, i, info)) {
1184             continue;
1185         }
1186 
1187         hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE);
1188         if (!hdr) {
1189             g_free(*info);
1190             continue;
1191         }
1192 
1193         cap_type = container_of(hdr, struct vfio_region_info_cap_type, header);
1194 
1195         trace_vfio_get_dev_region(vbasedev->name, i,
1196                                   cap_type->type, cap_type->subtype);
1197 
1198         if (cap_type->type == type && cap_type->subtype == subtype) {
1199             return 0;
1200         }
1201 
1202         g_free(*info);
1203     }
1204 
1205     *info = NULL;
1206     return -ENODEV;
1207 }
1208 
1209 /*
1210  * Interfaces for IBM EEH (Enhanced Error Handling)
1211  */
1212 static bool vfio_eeh_container_ok(VFIOContainer *container)
1213 {
1214     /*
1215      * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
1216      * implementation is broken if there are multiple groups in a
1217      * container.  The hardware works in units of Partitionable
1218      * Endpoints (== IOMMU groups) and the EEH operations naively
1219      * iterate across all groups in the container, without any logic
1220      * to make sure the groups have their state synchronized.  For
1221      * certain operations (ENABLE) that might be ok, until an error
1222      * occurs, but for others (GET_STATE) it's clearly broken.
1223      */
1224 
1225     /*
1226      * XXX Once fixed kernels exist, test for them here
1227      */
1228 
1229     if (QLIST_EMPTY(&container->group_list)) {
1230         return false;
1231     }
1232 
1233     if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) {
1234         return false;
1235     }
1236 
1237     return true;
1238 }
1239 
1240 static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op)
1241 {
1242     struct vfio_eeh_pe_op pe_op = {
1243         .argsz = sizeof(pe_op),
1244         .op = op,
1245     };
1246     int ret;
1247 
1248     if (!vfio_eeh_container_ok(container)) {
1249         error_report("vfio/eeh: EEH_PE_OP 0x%x: "
1250                      "kernel requires a container with exactly one group", op);
1251         return -EPERM;
1252     }
1253 
1254     ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op);
1255     if (ret < 0) {
1256         error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op);
1257         return -errno;
1258     }
1259 
1260     return ret;
1261 }
1262 
1263 static VFIOContainer *vfio_eeh_as_container(AddressSpace *as)
1264 {
1265     VFIOAddressSpace *space = vfio_get_address_space(as);
1266     VFIOContainer *container = NULL;
1267 
1268     if (QLIST_EMPTY(&space->containers)) {
1269         /* No containers to act on */
1270         goto out;
1271     }
1272 
1273     container = QLIST_FIRST(&space->containers);
1274 
1275     if (QLIST_NEXT(container, next)) {
1276         /* We don't yet have logic to synchronize EEH state across
1277          * multiple containers */
1278         container = NULL;
1279         goto out;
1280     }
1281 
1282 out:
1283     vfio_put_address_space(space);
1284     return container;
1285 }
1286 
1287 bool vfio_eeh_as_ok(AddressSpace *as)
1288 {
1289     VFIOContainer *container = vfio_eeh_as_container(as);
1290 
1291     return (container != NULL) && vfio_eeh_container_ok(container);
1292 }
1293 
1294 int vfio_eeh_as_op(AddressSpace *as, uint32_t op)
1295 {
1296     VFIOContainer *container = vfio_eeh_as_container(as);
1297 
1298     if (!container) {
1299         return -ENODEV;
1300     }
1301     return vfio_eeh_container_op(container, op);
1302 }
1303