xref: /openbmc/qemu/hw/vfio/common.c (revision 63785678)
1 /*
2  * generic functions used by VFIO devices
3  *
4  * Copyright Red Hat, Inc. 2012
5  *
6  * Authors:
7  *  Alex Williamson <alex.williamson@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Based on qemu-kvm device-assignment:
13  *  Adapted for KVM by Qumranet.
14  *  Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15  *  Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16  *  Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17  *  Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18  *  Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
19  */
20 
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 #include <sys/mman.h>
24 #include <linux/vfio.h>
25 
26 #include "hw/vfio/vfio-common.h"
27 #include "hw/vfio/vfio.h"
28 #include "exec/address-spaces.h"
29 #include "exec/memory.h"
30 #include "hw/hw.h"
31 #include "qemu/error-report.h"
32 #include "sysemu/kvm.h"
33 #include "trace.h"
34 
35 struct vfio_group_head vfio_group_list =
36     QLIST_HEAD_INITIALIZER(vfio_group_list);
37 struct vfio_as_head vfio_address_spaces =
38     QLIST_HEAD_INITIALIZER(vfio_address_spaces);
39 
40 #ifdef CONFIG_KVM
41 /*
42  * We have a single VFIO pseudo device per KVM VM.  Once created it lives
43  * for the life of the VM.  Closing the file descriptor only drops our
44  * reference to it and the device's reference to kvm.  Therefore once
45  * initialized, this file descriptor is only released on QEMU exit and
46  * we'll re-use it should another vfio device be attached before then.
47  */
48 static int vfio_kvm_device_fd = -1;
49 #endif
50 
51 /*
52  * Common VFIO interrupt disable
53  */
54 void vfio_disable_irqindex(VFIODevice *vbasedev, int index)
55 {
56     struct vfio_irq_set irq_set = {
57         .argsz = sizeof(irq_set),
58         .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
59         .index = index,
60         .start = 0,
61         .count = 0,
62     };
63 
64     ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
65 }
66 
67 void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index)
68 {
69     struct vfio_irq_set irq_set = {
70         .argsz = sizeof(irq_set),
71         .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
72         .index = index,
73         .start = 0,
74         .count = 1,
75     };
76 
77     ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
78 }
79 
80 void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index)
81 {
82     struct vfio_irq_set irq_set = {
83         .argsz = sizeof(irq_set),
84         .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
85         .index = index,
86         .start = 0,
87         .count = 1,
88     };
89 
90     ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
91 }
92 
93 /*
94  * IO Port/MMIO - Beware of the endians, VFIO is always little endian
95  */
96 void vfio_region_write(void *opaque, hwaddr addr,
97                        uint64_t data, unsigned size)
98 {
99     VFIORegion *region = opaque;
100     VFIODevice *vbasedev = region->vbasedev;
101     union {
102         uint8_t byte;
103         uint16_t word;
104         uint32_t dword;
105         uint64_t qword;
106     } buf;
107 
108     switch (size) {
109     case 1:
110         buf.byte = data;
111         break;
112     case 2:
113         buf.word = cpu_to_le16(data);
114         break;
115     case 4:
116         buf.dword = cpu_to_le32(data);
117         break;
118     default:
119         hw_error("vfio: unsupported write size, %d bytes", size);
120         break;
121     }
122 
123     if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
124         error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
125                      ",%d) failed: %m",
126                      __func__, vbasedev->name, region->nr,
127                      addr, data, size);
128     }
129 
130     trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
131 
132     /*
133      * A read or write to a BAR always signals an INTx EOI.  This will
134      * do nothing if not pending (including not in INTx mode).  We assume
135      * that a BAR access is in response to an interrupt and that BAR
136      * accesses will service the interrupt.  Unfortunately, we don't know
137      * which access will service the interrupt, so we're potentially
138      * getting quite a few host interrupts per guest interrupt.
139      */
140     vbasedev->ops->vfio_eoi(vbasedev);
141 }
142 
143 uint64_t vfio_region_read(void *opaque,
144                           hwaddr addr, unsigned size)
145 {
146     VFIORegion *region = opaque;
147     VFIODevice *vbasedev = region->vbasedev;
148     union {
149         uint8_t byte;
150         uint16_t word;
151         uint32_t dword;
152         uint64_t qword;
153     } buf;
154     uint64_t data = 0;
155 
156     if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
157         error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m",
158                      __func__, vbasedev->name, region->nr,
159                      addr, size);
160         return (uint64_t)-1;
161     }
162     switch (size) {
163     case 1:
164         data = buf.byte;
165         break;
166     case 2:
167         data = le16_to_cpu(buf.word);
168         break;
169     case 4:
170         data = le32_to_cpu(buf.dword);
171         break;
172     default:
173         hw_error("vfio: unsupported read size, %d bytes", size);
174         break;
175     }
176 
177     trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
178 
179     /* Same as write above */
180     vbasedev->ops->vfio_eoi(vbasedev);
181 
182     return data;
183 }
184 
185 const MemoryRegionOps vfio_region_ops = {
186     .read = vfio_region_read,
187     .write = vfio_region_write,
188     .endianness = DEVICE_LITTLE_ENDIAN,
189 };
190 
191 /*
192  * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
193  */
194 static int vfio_dma_unmap(VFIOContainer *container,
195                           hwaddr iova, ram_addr_t size)
196 {
197     struct vfio_iommu_type1_dma_unmap unmap = {
198         .argsz = sizeof(unmap),
199         .flags = 0,
200         .iova = iova,
201         .size = size,
202     };
203 
204     if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
205         error_report("VFIO_UNMAP_DMA: %d", -errno);
206         return -errno;
207     }
208 
209     return 0;
210 }
211 
212 static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
213                         ram_addr_t size, void *vaddr, bool readonly)
214 {
215     struct vfio_iommu_type1_dma_map map = {
216         .argsz = sizeof(map),
217         .flags = VFIO_DMA_MAP_FLAG_READ,
218         .vaddr = (__u64)(uintptr_t)vaddr,
219         .iova = iova,
220         .size = size,
221     };
222 
223     if (!readonly) {
224         map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
225     }
226 
227     /*
228      * Try the mapping, if it fails with EBUSY, unmap the region and try
229      * again.  This shouldn't be necessary, but we sometimes see it in
230      * the VGA ROM space.
231      */
232     if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
233         (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 &&
234          ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
235         return 0;
236     }
237 
238     error_report("VFIO_MAP_DMA: %d", -errno);
239     return -errno;
240 }
241 
242 static bool vfio_listener_skipped_section(MemoryRegionSection *section)
243 {
244     return (!memory_region_is_ram(section->mr) &&
245             !memory_region_is_iommu(section->mr)) ||
246            /*
247             * Sizing an enabled 64-bit BAR can cause spurious mappings to
248             * addresses in the upper part of the 64-bit address space.  These
249             * are never accessed by the CPU and beyond the address width of
250             * some IOMMU hardware.  TODO: VFIO should tell us the IOMMU width.
251             */
252            section->offset_within_address_space & (1ULL << 63);
253 }
254 
255 static void vfio_iommu_map_notify(Notifier *n, void *data)
256 {
257     VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
258     VFIOContainer *container = giommu->container;
259     IOMMUTLBEntry *iotlb = data;
260     MemoryRegion *mr;
261     hwaddr xlat;
262     hwaddr len = iotlb->addr_mask + 1;
263     void *vaddr;
264     int ret;
265 
266     trace_vfio_iommu_map_notify(iotlb->iova,
267                                 iotlb->iova + iotlb->addr_mask);
268 
269     /*
270      * The IOMMU TLB entry we have just covers translation through
271      * this IOMMU to its immediate target.  We need to translate
272      * it the rest of the way through to memory.
273      */
274     rcu_read_lock();
275     mr = address_space_translate(&address_space_memory,
276                                  iotlb->translated_addr,
277                                  &xlat, &len, iotlb->perm & IOMMU_WO);
278     if (!memory_region_is_ram(mr)) {
279         error_report("iommu map to non memory area %"HWADDR_PRIx"",
280                      xlat);
281         goto out;
282     }
283     /*
284      * Translation truncates length to the IOMMU page size,
285      * check that it did not truncate too much.
286      */
287     if (len & iotlb->addr_mask) {
288         error_report("iommu has granularity incompatible with target AS");
289         goto out;
290     }
291 
292     if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
293         vaddr = memory_region_get_ram_ptr(mr) + xlat;
294         ret = vfio_dma_map(container, iotlb->iova,
295                            iotlb->addr_mask + 1, vaddr,
296                            !(iotlb->perm & IOMMU_WO) || mr->readonly);
297         if (ret) {
298             error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
299                          "0x%"HWADDR_PRIx", %p) = %d (%m)",
300                          container, iotlb->iova,
301                          iotlb->addr_mask + 1, vaddr, ret);
302         }
303     } else {
304         ret = vfio_dma_unmap(container, iotlb->iova, iotlb->addr_mask + 1);
305         if (ret) {
306             error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
307                          "0x%"HWADDR_PRIx") = %d (%m)",
308                          container, iotlb->iova,
309                          iotlb->addr_mask + 1, ret);
310         }
311     }
312 out:
313     rcu_read_unlock();
314 }
315 
316 static hwaddr vfio_container_granularity(VFIOContainer *container)
317 {
318     return (hwaddr)1 << ctz64(container->iova_pgsizes);
319 }
320 
321 static void vfio_listener_region_add(MemoryListener *listener,
322                                      MemoryRegionSection *section)
323 {
324     VFIOContainer *container = container_of(listener, VFIOContainer, listener);
325     hwaddr iova, end;
326     Int128 llend, llsize;
327     void *vaddr;
328     int ret;
329 
330     if (vfio_listener_skipped_section(section)) {
331         trace_vfio_listener_region_add_skip(
332                 section->offset_within_address_space,
333                 section->offset_within_address_space +
334                 int128_get64(int128_sub(section->size, int128_one())));
335         return;
336     }
337 
338     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
339                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
340         error_report("%s received unaligned region", __func__);
341         return;
342     }
343 
344     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
345     llend = int128_make64(section->offset_within_address_space);
346     llend = int128_add(llend, section->size);
347     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
348 
349     if (int128_ge(int128_make64(iova), llend)) {
350         return;
351     }
352     end = int128_get64(int128_sub(llend, int128_one()));
353 
354     if ((iova < container->min_iova) || (end > container->max_iova)) {
355         error_report("vfio: IOMMU container %p can't map guest IOVA region"
356                      " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx,
357                      container, iova, end);
358         ret = -EFAULT;
359         goto fail;
360     }
361 
362     memory_region_ref(section->mr);
363 
364     if (memory_region_is_iommu(section->mr)) {
365         VFIOGuestIOMMU *giommu;
366 
367         trace_vfio_listener_region_add_iommu(iova, end);
368         /*
369          * FIXME: We should do some checking to see if the
370          * capabilities of the host VFIO IOMMU are adequate to model
371          * the guest IOMMU
372          *
373          * FIXME: For VFIO iommu types which have KVM acceleration to
374          * avoid bouncing all map/unmaps through qemu this way, this
375          * would be the right place to wire that up (tell the KVM
376          * device emulation the VFIO iommu handles to use).
377          */
378         giommu = g_malloc0(sizeof(*giommu));
379         giommu->iommu = section->mr;
380         giommu->container = container;
381         giommu->n.notify = vfio_iommu_map_notify;
382         QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
383 
384         memory_region_register_iommu_notifier(giommu->iommu, &giommu->n);
385         memory_region_iommu_replay(giommu->iommu, &giommu->n,
386                                    vfio_container_granularity(container),
387                                    false);
388 
389         return;
390     }
391 
392     /* Here we assume that memory_region_is_ram(section->mr)==true */
393 
394     vaddr = memory_region_get_ram_ptr(section->mr) +
395             section->offset_within_region +
396             (iova - section->offset_within_address_space);
397 
398     trace_vfio_listener_region_add_ram(iova, end, vaddr);
399 
400     llsize = int128_sub(llend, int128_make64(iova));
401 
402     ret = vfio_dma_map(container, iova, int128_get64(llsize),
403                        vaddr, section->readonly);
404     if (ret) {
405         error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
406                      "0x%"HWADDR_PRIx", %p) = %d (%m)",
407                      container, iova, int128_get64(llsize), vaddr, ret);
408         goto fail;
409     }
410 
411     return;
412 
413 fail:
414     /*
415      * On the initfn path, store the first error in the container so we
416      * can gracefully fail.  Runtime, there's not much we can do other
417      * than throw a hardware error.
418      */
419     if (!container->initialized) {
420         if (!container->error) {
421             container->error = ret;
422         }
423     } else {
424         hw_error("vfio: DMA mapping failed, unable to continue");
425     }
426 }
427 
428 static void vfio_listener_region_del(MemoryListener *listener,
429                                      MemoryRegionSection *section)
430 {
431     VFIOContainer *container = container_of(listener, VFIOContainer, listener);
432     hwaddr iova, end;
433     int ret;
434 
435     if (vfio_listener_skipped_section(section)) {
436         trace_vfio_listener_region_del_skip(
437                 section->offset_within_address_space,
438                 section->offset_within_address_space +
439                 int128_get64(int128_sub(section->size, int128_one())));
440         return;
441     }
442 
443     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
444                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
445         error_report("%s received unaligned region", __func__);
446         return;
447     }
448 
449     if (memory_region_is_iommu(section->mr)) {
450         VFIOGuestIOMMU *giommu;
451 
452         QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
453             if (giommu->iommu == section->mr) {
454                 memory_region_unregister_iommu_notifier(&giommu->n);
455                 QLIST_REMOVE(giommu, giommu_next);
456                 g_free(giommu);
457                 break;
458             }
459         }
460 
461         /*
462          * FIXME: We assume the one big unmap below is adequate to
463          * remove any individual page mappings in the IOMMU which
464          * might have been copied into VFIO. This works for a page table
465          * based IOMMU where a big unmap flattens a large range of IO-PTEs.
466          * That may not be true for all IOMMU types.
467          */
468     }
469 
470     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
471     end = (section->offset_within_address_space + int128_get64(section->size)) &
472           TARGET_PAGE_MASK;
473 
474     if (iova >= end) {
475         return;
476     }
477 
478     trace_vfio_listener_region_del(iova, end - 1);
479 
480     ret = vfio_dma_unmap(container, iova, end - iova);
481     memory_region_unref(section->mr);
482     if (ret) {
483         error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
484                      "0x%"HWADDR_PRIx") = %d (%m)",
485                      container, iova, end - iova, ret);
486     }
487 }
488 
489 static const MemoryListener vfio_memory_listener = {
490     .region_add = vfio_listener_region_add,
491     .region_del = vfio_listener_region_del,
492 };
493 
494 static void vfio_listener_release(VFIOContainer *container)
495 {
496     memory_listener_unregister(&container->listener);
497 }
498 
499 int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
500                       int index, const char *name)
501 {
502     struct vfio_region_info *info;
503     int ret;
504 
505     ret = vfio_get_region_info(vbasedev, index, &info);
506     if (ret) {
507         return ret;
508     }
509 
510     region->vbasedev = vbasedev;
511     region->flags = info->flags;
512     region->size = info->size;
513     region->fd_offset = info->offset;
514     region->nr = index;
515 
516     if (region->size) {
517         region->mem = g_new0(MemoryRegion, 1);
518         memory_region_init_io(region->mem, obj, &vfio_region_ops,
519                               region, name, region->size);
520 
521         if (!vbasedev->no_mmap &&
522             region->flags & VFIO_REGION_INFO_FLAG_MMAP &&
523             !(region->size & ~qemu_real_host_page_mask)) {
524 
525             region->nr_mmaps = 1;
526             region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
527 
528             region->mmaps[0].offset = 0;
529             region->mmaps[0].size = region->size;
530         }
531     }
532 
533     g_free(info);
534 
535     trace_vfio_region_setup(vbasedev->name, index, name,
536                             region->flags, region->fd_offset, region->size);
537     return 0;
538 }
539 
540 int vfio_region_mmap(VFIORegion *region)
541 {
542     int i, prot = 0;
543     char *name;
544 
545     if (!region->mem) {
546         return 0;
547     }
548 
549     prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0;
550     prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0;
551 
552     for (i = 0; i < region->nr_mmaps; i++) {
553         region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot,
554                                      MAP_SHARED, region->vbasedev->fd,
555                                      region->fd_offset +
556                                      region->mmaps[i].offset);
557         if (region->mmaps[i].mmap == MAP_FAILED) {
558             int ret = -errno;
559 
560             trace_vfio_region_mmap_fault(memory_region_name(region->mem), i,
561                                          region->fd_offset +
562                                          region->mmaps[i].offset,
563                                          region->fd_offset +
564                                          region->mmaps[i].offset +
565                                          region->mmaps[i].size - 1, ret);
566 
567             region->mmaps[i].mmap = NULL;
568 
569             for (i--; i >= 0; i--) {
570                 memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
571                 munmap(region->mmaps[i].mmap, region->mmaps[i].size);
572                 object_unparent(OBJECT(&region->mmaps[i].mem));
573                 region->mmaps[i].mmap = NULL;
574             }
575 
576             return ret;
577         }
578 
579         name = g_strdup_printf("%s mmaps[%d]",
580                                memory_region_name(region->mem), i);
581         memory_region_init_ram_ptr(&region->mmaps[i].mem,
582                                    memory_region_owner(region->mem),
583                                    name, region->mmaps[i].size,
584                                    region->mmaps[i].mmap);
585         g_free(name);
586         memory_region_set_skip_dump(&region->mmaps[i].mem);
587         memory_region_add_subregion(region->mem, region->mmaps[i].offset,
588                                     &region->mmaps[i].mem);
589 
590         trace_vfio_region_mmap(memory_region_name(&region->mmaps[i].mem),
591                                region->mmaps[i].offset,
592                                region->mmaps[i].offset +
593                                region->mmaps[i].size - 1);
594     }
595 
596     return 0;
597 }
598 
599 void vfio_region_exit(VFIORegion *region)
600 {
601     int i;
602 
603     if (!region->mem) {
604         return;
605     }
606 
607     for (i = 0; i < region->nr_mmaps; i++) {
608         if (region->mmaps[i].mmap) {
609             memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
610         }
611     }
612 
613     trace_vfio_region_exit(region->vbasedev->name, region->nr);
614 }
615 
616 void vfio_region_finalize(VFIORegion *region)
617 {
618     int i;
619 
620     if (!region->mem) {
621         return;
622     }
623 
624     for (i = 0; i < region->nr_mmaps; i++) {
625         if (region->mmaps[i].mmap) {
626             munmap(region->mmaps[i].mmap, region->mmaps[i].size);
627             object_unparent(OBJECT(&region->mmaps[i].mem));
628         }
629     }
630 
631     object_unparent(OBJECT(region->mem));
632 
633     g_free(region->mem);
634     g_free(region->mmaps);
635 
636     trace_vfio_region_finalize(region->vbasedev->name, region->nr);
637 }
638 
639 void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled)
640 {
641     int i;
642 
643     if (!region->mem) {
644         return;
645     }
646 
647     for (i = 0; i < region->nr_mmaps; i++) {
648         if (region->mmaps[i].mmap) {
649             memory_region_set_enabled(&region->mmaps[i].mem, enabled);
650         }
651     }
652 
653     trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem),
654                                         enabled);
655 }
656 
657 void vfio_reset_handler(void *opaque)
658 {
659     VFIOGroup *group;
660     VFIODevice *vbasedev;
661 
662     QLIST_FOREACH(group, &vfio_group_list, next) {
663         QLIST_FOREACH(vbasedev, &group->device_list, next) {
664             vbasedev->ops->vfio_compute_needs_reset(vbasedev);
665         }
666     }
667 
668     QLIST_FOREACH(group, &vfio_group_list, next) {
669         QLIST_FOREACH(vbasedev, &group->device_list, next) {
670             if (vbasedev->needs_reset) {
671                 vbasedev->ops->vfio_hot_reset_multi(vbasedev);
672             }
673         }
674     }
675 }
676 
677 static void vfio_kvm_device_add_group(VFIOGroup *group)
678 {
679 #ifdef CONFIG_KVM
680     struct kvm_device_attr attr = {
681         .group = KVM_DEV_VFIO_GROUP,
682         .attr = KVM_DEV_VFIO_GROUP_ADD,
683         .addr = (uint64_t)(unsigned long)&group->fd,
684     };
685 
686     if (!kvm_enabled()) {
687         return;
688     }
689 
690     if (vfio_kvm_device_fd < 0) {
691         struct kvm_create_device cd = {
692             .type = KVM_DEV_TYPE_VFIO,
693         };
694 
695         if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
696             error_report("Failed to create KVM VFIO device: %m");
697             return;
698         }
699 
700         vfio_kvm_device_fd = cd.fd;
701     }
702 
703     if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
704         error_report("Failed to add group %d to KVM VFIO device: %m",
705                      group->groupid);
706     }
707 #endif
708 }
709 
710 static void vfio_kvm_device_del_group(VFIOGroup *group)
711 {
712 #ifdef CONFIG_KVM
713     struct kvm_device_attr attr = {
714         .group = KVM_DEV_VFIO_GROUP,
715         .attr = KVM_DEV_VFIO_GROUP_DEL,
716         .addr = (uint64_t)(unsigned long)&group->fd,
717     };
718 
719     if (vfio_kvm_device_fd < 0) {
720         return;
721     }
722 
723     if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
724         error_report("Failed to remove group %d from KVM VFIO device: %m",
725                      group->groupid);
726     }
727 #endif
728 }
729 
730 static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
731 {
732     VFIOAddressSpace *space;
733 
734     QLIST_FOREACH(space, &vfio_address_spaces, list) {
735         if (space->as == as) {
736             return space;
737         }
738     }
739 
740     /* No suitable VFIOAddressSpace, create a new one */
741     space = g_malloc0(sizeof(*space));
742     space->as = as;
743     QLIST_INIT(&space->containers);
744 
745     QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
746 
747     return space;
748 }
749 
750 static void vfio_put_address_space(VFIOAddressSpace *space)
751 {
752     if (QLIST_EMPTY(&space->containers)) {
753         QLIST_REMOVE(space, list);
754         g_free(space);
755     }
756 }
757 
758 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
759 {
760     VFIOContainer *container;
761     int ret, fd;
762     VFIOAddressSpace *space;
763 
764     space = vfio_get_address_space(as);
765 
766     QLIST_FOREACH(container, &space->containers, next) {
767         if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
768             group->container = container;
769             QLIST_INSERT_HEAD(&container->group_list, group, container_next);
770             return 0;
771         }
772     }
773 
774     fd = qemu_open("/dev/vfio/vfio", O_RDWR);
775     if (fd < 0) {
776         error_report("vfio: failed to open /dev/vfio/vfio: %m");
777         ret = -errno;
778         goto put_space_exit;
779     }
780 
781     ret = ioctl(fd, VFIO_GET_API_VERSION);
782     if (ret != VFIO_API_VERSION) {
783         error_report("vfio: supported vfio version: %d, "
784                      "reported version: %d", VFIO_API_VERSION, ret);
785         ret = -EINVAL;
786         goto close_fd_exit;
787     }
788 
789     container = g_malloc0(sizeof(*container));
790     container->space = space;
791     container->fd = fd;
792     if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU) ||
793         ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU)) {
794         bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU);
795         struct vfio_iommu_type1_info info;
796 
797         ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
798         if (ret) {
799             error_report("vfio: failed to set group container: %m");
800             ret = -errno;
801             goto free_container_exit;
802         }
803 
804         ret = ioctl(fd, VFIO_SET_IOMMU,
805                     v2 ? VFIO_TYPE1v2_IOMMU : VFIO_TYPE1_IOMMU);
806         if (ret) {
807             error_report("vfio: failed to set iommu for container: %m");
808             ret = -errno;
809             goto free_container_exit;
810         }
811 
812         /*
813          * FIXME: This assumes that a Type1 IOMMU can map any 64-bit
814          * IOVA whatsoever.  That's not actually true, but the current
815          * kernel interface doesn't tell us what it can map, and the
816          * existing Type1 IOMMUs generally support any IOVA we're
817          * going to actually try in practice.
818          */
819         container->min_iova = 0;
820         container->max_iova = (hwaddr)-1;
821 
822         /* Assume just 4K IOVA page size */
823         container->iova_pgsizes = 0x1000;
824         info.argsz = sizeof(info);
825         ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info);
826         /* Ignore errors */
827         if ((ret == 0) && (info.flags & VFIO_IOMMU_INFO_PGSIZES)) {
828             container->iova_pgsizes = info.iova_pgsizes;
829         }
830     } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU)) {
831         struct vfio_iommu_spapr_tce_info info;
832 
833         ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
834         if (ret) {
835             error_report("vfio: failed to set group container: %m");
836             ret = -errno;
837             goto free_container_exit;
838         }
839         ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_SPAPR_TCE_IOMMU);
840         if (ret) {
841             error_report("vfio: failed to set iommu for container: %m");
842             ret = -errno;
843             goto free_container_exit;
844         }
845 
846         /*
847          * The host kernel code implementing VFIO_IOMMU_DISABLE is called
848          * when container fd is closed so we do not call it explicitly
849          * in this file.
850          */
851         ret = ioctl(fd, VFIO_IOMMU_ENABLE);
852         if (ret) {
853             error_report("vfio: failed to enable container: %m");
854             ret = -errno;
855             goto free_container_exit;
856         }
857 
858         /*
859          * This only considers the host IOMMU's 32-bit window.  At
860          * some point we need to add support for the optional 64-bit
861          * window and dynamic windows
862          */
863         info.argsz = sizeof(info);
864         ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
865         if (ret) {
866             error_report("vfio: VFIO_IOMMU_SPAPR_TCE_GET_INFO failed: %m");
867             ret = -errno;
868             goto free_container_exit;
869         }
870         container->min_iova = info.dma32_window_start;
871         container->max_iova = container->min_iova + info.dma32_window_size - 1;
872 
873         /* Assume just 4K IOVA pages for now */
874         container->iova_pgsizes = 0x1000;
875     } else {
876         error_report("vfio: No available IOMMU models");
877         ret = -EINVAL;
878         goto free_container_exit;
879     }
880 
881     container->listener = vfio_memory_listener;
882 
883     memory_listener_register(&container->listener, container->space->as);
884 
885     if (container->error) {
886         ret = container->error;
887         error_report("vfio: memory listener initialization failed for container");
888         goto listener_release_exit;
889     }
890 
891     container->initialized = true;
892 
893     QLIST_INIT(&container->group_list);
894     QLIST_INSERT_HEAD(&space->containers, container, next);
895 
896     group->container = container;
897     QLIST_INSERT_HEAD(&container->group_list, group, container_next);
898 
899     return 0;
900 listener_release_exit:
901     vfio_listener_release(container);
902 
903 free_container_exit:
904     g_free(container);
905 
906 close_fd_exit:
907     close(fd);
908 
909 put_space_exit:
910     vfio_put_address_space(space);
911 
912     return ret;
913 }
914 
915 static void vfio_disconnect_container(VFIOGroup *group)
916 {
917     VFIOContainer *container = group->container;
918 
919     if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
920         error_report("vfio: error disconnecting group %d from container",
921                      group->groupid);
922     }
923 
924     QLIST_REMOVE(group, container_next);
925     group->container = NULL;
926 
927     if (QLIST_EMPTY(&container->group_list)) {
928         VFIOAddressSpace *space = container->space;
929         VFIOGuestIOMMU *giommu, *tmp;
930 
931         vfio_listener_release(container);
932         QLIST_REMOVE(container, next);
933 
934         QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) {
935             memory_region_unregister_iommu_notifier(&giommu->n);
936             QLIST_REMOVE(giommu, giommu_next);
937             g_free(giommu);
938         }
939 
940         trace_vfio_disconnect_container(container->fd);
941         close(container->fd);
942         g_free(container);
943 
944         vfio_put_address_space(space);
945     }
946 }
947 
948 VFIOGroup *vfio_get_group(int groupid, AddressSpace *as)
949 {
950     VFIOGroup *group;
951     char path[32];
952     struct vfio_group_status status = { .argsz = sizeof(status) };
953 
954     QLIST_FOREACH(group, &vfio_group_list, next) {
955         if (group->groupid == groupid) {
956             /* Found it.  Now is it already in the right context? */
957             if (group->container->space->as == as) {
958                 return group;
959             } else {
960                 error_report("vfio: group %d used in multiple address spaces",
961                              group->groupid);
962                 return NULL;
963             }
964         }
965     }
966 
967     group = g_malloc0(sizeof(*group));
968 
969     snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
970     group->fd = qemu_open(path, O_RDWR);
971     if (group->fd < 0) {
972         error_report("vfio: error opening %s: %m", path);
973         goto free_group_exit;
974     }
975 
976     if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
977         error_report("vfio: error getting group status: %m");
978         goto close_fd_exit;
979     }
980 
981     if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
982         error_report("vfio: error, group %d is not viable, please ensure "
983                      "all devices within the iommu_group are bound to their "
984                      "vfio bus driver.", groupid);
985         goto close_fd_exit;
986     }
987 
988     group->groupid = groupid;
989     QLIST_INIT(&group->device_list);
990 
991     if (vfio_connect_container(group, as)) {
992         error_report("vfio: failed to setup container for group %d", groupid);
993         goto close_fd_exit;
994     }
995 
996     if (QLIST_EMPTY(&vfio_group_list)) {
997         qemu_register_reset(vfio_reset_handler, NULL);
998     }
999 
1000     QLIST_INSERT_HEAD(&vfio_group_list, group, next);
1001 
1002     vfio_kvm_device_add_group(group);
1003 
1004     return group;
1005 
1006 close_fd_exit:
1007     close(group->fd);
1008 
1009 free_group_exit:
1010     g_free(group);
1011 
1012     return NULL;
1013 }
1014 
1015 void vfio_put_group(VFIOGroup *group)
1016 {
1017     if (!group || !QLIST_EMPTY(&group->device_list)) {
1018         return;
1019     }
1020 
1021     vfio_kvm_device_del_group(group);
1022     vfio_disconnect_container(group);
1023     QLIST_REMOVE(group, next);
1024     trace_vfio_put_group(group->fd);
1025     close(group->fd);
1026     g_free(group);
1027 
1028     if (QLIST_EMPTY(&vfio_group_list)) {
1029         qemu_unregister_reset(vfio_reset_handler, NULL);
1030     }
1031 }
1032 
1033 int vfio_get_device(VFIOGroup *group, const char *name,
1034                        VFIODevice *vbasedev)
1035 {
1036     struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
1037     int ret, fd;
1038 
1039     fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
1040     if (fd < 0) {
1041         error_report("vfio: error getting device %s from group %d: %m",
1042                      name, group->groupid);
1043         error_printf("Verify all devices in group %d are bound to vfio-<bus> "
1044                      "or pci-stub and not already in use\n", group->groupid);
1045         return fd;
1046     }
1047 
1048     ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info);
1049     if (ret) {
1050         error_report("vfio: error getting device info: %m");
1051         close(fd);
1052         return ret;
1053     }
1054 
1055     vbasedev->fd = fd;
1056     vbasedev->group = group;
1057     QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
1058 
1059     vbasedev->num_irqs = dev_info.num_irqs;
1060     vbasedev->num_regions = dev_info.num_regions;
1061     vbasedev->flags = dev_info.flags;
1062 
1063     trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions,
1064                           dev_info.num_irqs);
1065 
1066     vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
1067     return 0;
1068 }
1069 
1070 void vfio_put_base_device(VFIODevice *vbasedev)
1071 {
1072     if (!vbasedev->group) {
1073         return;
1074     }
1075     QLIST_REMOVE(vbasedev, next);
1076     vbasedev->group = NULL;
1077     trace_vfio_put_base_device(vbasedev->fd);
1078     close(vbasedev->fd);
1079 }
1080 
1081 int vfio_get_region_info(VFIODevice *vbasedev, int index,
1082                          struct vfio_region_info **info)
1083 {
1084     size_t argsz = sizeof(struct vfio_region_info);
1085 
1086     *info = g_malloc0(argsz);
1087 
1088     (*info)->index = index;
1089     (*info)->argsz = argsz;
1090 
1091     if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) {
1092         g_free(*info);
1093         return -errno;
1094     }
1095 
1096     return 0;
1097 }
1098 
1099 /*
1100  * Interfaces for IBM EEH (Enhanced Error Handling)
1101  */
1102 static bool vfio_eeh_container_ok(VFIOContainer *container)
1103 {
1104     /*
1105      * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
1106      * implementation is broken if there are multiple groups in a
1107      * container.  The hardware works in units of Partitionable
1108      * Endpoints (== IOMMU groups) and the EEH operations naively
1109      * iterate across all groups in the container, without any logic
1110      * to make sure the groups have their state synchronized.  For
1111      * certain operations (ENABLE) that might be ok, until an error
1112      * occurs, but for others (GET_STATE) it's clearly broken.
1113      */
1114 
1115     /*
1116      * XXX Once fixed kernels exist, test for them here
1117      */
1118 
1119     if (QLIST_EMPTY(&container->group_list)) {
1120         return false;
1121     }
1122 
1123     if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) {
1124         return false;
1125     }
1126 
1127     return true;
1128 }
1129 
1130 static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op)
1131 {
1132     struct vfio_eeh_pe_op pe_op = {
1133         .argsz = sizeof(pe_op),
1134         .op = op,
1135     };
1136     int ret;
1137 
1138     if (!vfio_eeh_container_ok(container)) {
1139         error_report("vfio/eeh: EEH_PE_OP 0x%x: "
1140                      "kernel requires a container with exactly one group", op);
1141         return -EPERM;
1142     }
1143 
1144     ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op);
1145     if (ret < 0) {
1146         error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op);
1147         return -errno;
1148     }
1149 
1150     return 0;
1151 }
1152 
1153 static VFIOContainer *vfio_eeh_as_container(AddressSpace *as)
1154 {
1155     VFIOAddressSpace *space = vfio_get_address_space(as);
1156     VFIOContainer *container = NULL;
1157 
1158     if (QLIST_EMPTY(&space->containers)) {
1159         /* No containers to act on */
1160         goto out;
1161     }
1162 
1163     container = QLIST_FIRST(&space->containers);
1164 
1165     if (QLIST_NEXT(container, next)) {
1166         /* We don't yet have logic to synchronize EEH state across
1167          * multiple containers */
1168         container = NULL;
1169         goto out;
1170     }
1171 
1172 out:
1173     vfio_put_address_space(space);
1174     return container;
1175 }
1176 
1177 bool vfio_eeh_as_ok(AddressSpace *as)
1178 {
1179     VFIOContainer *container = vfio_eeh_as_container(as);
1180 
1181     return (container != NULL) && vfio_eeh_container_ok(container);
1182 }
1183 
1184 int vfio_eeh_as_op(AddressSpace *as, uint32_t op)
1185 {
1186     VFIOContainer *container = vfio_eeh_as_container(as);
1187 
1188     if (!container) {
1189         return -ENODEV;
1190     }
1191     return vfio_eeh_container_op(container, op);
1192 }
1193