xref: /openbmc/qemu/hw/vfio/common.c (revision 87776ab7)
1 /*
2  * generic functions used by VFIO devices
3  *
4  * Copyright Red Hat, Inc. 2012
5  *
6  * Authors:
7  *  Alex Williamson <alex.williamson@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Based on qemu-kvm device-assignment:
13  *  Adapted for KVM by Qumranet.
14  *  Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15  *  Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16  *  Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17  *  Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18  *  Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
19  */
20 
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 #include <sys/mman.h>
24 #include <linux/vfio.h>
25 
26 #include "hw/vfio/vfio-common.h"
27 #include "hw/vfio/vfio.h"
28 #include "exec/address-spaces.h"
29 #include "exec/memory.h"
30 #include "hw/hw.h"
31 #include "qemu/error-report.h"
32 #include "sysemu/kvm.h"
33 #ifdef CONFIG_KVM
34 #include "linux/kvm.h"
35 #endif
36 #include "trace.h"
37 
38 struct vfio_group_head vfio_group_list =
39     QLIST_HEAD_INITIALIZER(vfio_group_list);
40 struct vfio_as_head vfio_address_spaces =
41     QLIST_HEAD_INITIALIZER(vfio_address_spaces);
42 
43 #ifdef CONFIG_KVM
44 /*
45  * We have a single VFIO pseudo device per KVM VM.  Once created it lives
46  * for the life of the VM.  Closing the file descriptor only drops our
47  * reference to it and the device's reference to kvm.  Therefore once
48  * initialized, this file descriptor is only released on QEMU exit and
49  * we'll re-use it should another vfio device be attached before then.
50  */
51 static int vfio_kvm_device_fd = -1;
52 #endif
53 
54 /*
55  * Common VFIO interrupt disable
56  */
57 void vfio_disable_irqindex(VFIODevice *vbasedev, int index)
58 {
59     struct vfio_irq_set irq_set = {
60         .argsz = sizeof(irq_set),
61         .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
62         .index = index,
63         .start = 0,
64         .count = 0,
65     };
66 
67     ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
68 }
69 
70 void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index)
71 {
72     struct vfio_irq_set irq_set = {
73         .argsz = sizeof(irq_set),
74         .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
75         .index = index,
76         .start = 0,
77         .count = 1,
78     };
79 
80     ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
81 }
82 
83 void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index)
84 {
85     struct vfio_irq_set irq_set = {
86         .argsz = sizeof(irq_set),
87         .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
88         .index = index,
89         .start = 0,
90         .count = 1,
91     };
92 
93     ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
94 }
95 
96 /*
97  * IO Port/MMIO - Beware of the endians, VFIO is always little endian
98  */
99 void vfio_region_write(void *opaque, hwaddr addr,
100                        uint64_t data, unsigned size)
101 {
102     VFIORegion *region = opaque;
103     VFIODevice *vbasedev = region->vbasedev;
104     union {
105         uint8_t byte;
106         uint16_t word;
107         uint32_t dword;
108         uint64_t qword;
109     } buf;
110 
111     switch (size) {
112     case 1:
113         buf.byte = data;
114         break;
115     case 2:
116         buf.word = cpu_to_le16(data);
117         break;
118     case 4:
119         buf.dword = cpu_to_le32(data);
120         break;
121     default:
122         hw_error("vfio: unsupported write size, %d bytes", size);
123         break;
124     }
125 
126     if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
127         error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
128                      ",%d) failed: %m",
129                      __func__, vbasedev->name, region->nr,
130                      addr, data, size);
131     }
132 
133     trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
134 
135     /*
136      * A read or write to a BAR always signals an INTx EOI.  This will
137      * do nothing if not pending (including not in INTx mode).  We assume
138      * that a BAR access is in response to an interrupt and that BAR
139      * accesses will service the interrupt.  Unfortunately, we don't know
140      * which access will service the interrupt, so we're potentially
141      * getting quite a few host interrupts per guest interrupt.
142      */
143     vbasedev->ops->vfio_eoi(vbasedev);
144 }
145 
146 uint64_t vfio_region_read(void *opaque,
147                           hwaddr addr, unsigned size)
148 {
149     VFIORegion *region = opaque;
150     VFIODevice *vbasedev = region->vbasedev;
151     union {
152         uint8_t byte;
153         uint16_t word;
154         uint32_t dword;
155         uint64_t qword;
156     } buf;
157     uint64_t data = 0;
158 
159     if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
160         error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m",
161                      __func__, vbasedev->name, region->nr,
162                      addr, size);
163         return (uint64_t)-1;
164     }
165     switch (size) {
166     case 1:
167         data = buf.byte;
168         break;
169     case 2:
170         data = le16_to_cpu(buf.word);
171         break;
172     case 4:
173         data = le32_to_cpu(buf.dword);
174         break;
175     default:
176         hw_error("vfio: unsupported read size, %d bytes", size);
177         break;
178     }
179 
180     trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
181 
182     /* Same as write above */
183     vbasedev->ops->vfio_eoi(vbasedev);
184 
185     return data;
186 }
187 
188 const MemoryRegionOps vfio_region_ops = {
189     .read = vfio_region_read,
190     .write = vfio_region_write,
191     .endianness = DEVICE_LITTLE_ENDIAN,
192 };
193 
194 /*
195  * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
196  */
197 static int vfio_dma_unmap(VFIOContainer *container,
198                           hwaddr iova, ram_addr_t size)
199 {
200     struct vfio_iommu_type1_dma_unmap unmap = {
201         .argsz = sizeof(unmap),
202         .flags = 0,
203         .iova = iova,
204         .size = size,
205     };
206 
207     if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
208         error_report("VFIO_UNMAP_DMA: %d", -errno);
209         return -errno;
210     }
211 
212     return 0;
213 }
214 
215 static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
216                         ram_addr_t size, void *vaddr, bool readonly)
217 {
218     struct vfio_iommu_type1_dma_map map = {
219         .argsz = sizeof(map),
220         .flags = VFIO_DMA_MAP_FLAG_READ,
221         .vaddr = (__u64)(uintptr_t)vaddr,
222         .iova = iova,
223         .size = size,
224     };
225 
226     if (!readonly) {
227         map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
228     }
229 
230     /*
231      * Try the mapping, if it fails with EBUSY, unmap the region and try
232      * again.  This shouldn't be necessary, but we sometimes see it in
233      * the VGA ROM space.
234      */
235     if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
236         (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 &&
237          ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
238         return 0;
239     }
240 
241     error_report("VFIO_MAP_DMA: %d", -errno);
242     return -errno;
243 }
244 
245 static bool vfio_listener_skipped_section(MemoryRegionSection *section)
246 {
247     return (!memory_region_is_ram(section->mr) &&
248             !memory_region_is_iommu(section->mr)) ||
249            /*
250             * Sizing an enabled 64-bit BAR can cause spurious mappings to
251             * addresses in the upper part of the 64-bit address space.  These
252             * are never accessed by the CPU and beyond the address width of
253             * some IOMMU hardware.  TODO: VFIO should tell us the IOMMU width.
254             */
255            section->offset_within_address_space & (1ULL << 63);
256 }
257 
258 static void vfio_iommu_map_notify(Notifier *n, void *data)
259 {
260     VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
261     VFIOContainer *container = giommu->container;
262     IOMMUTLBEntry *iotlb = data;
263     MemoryRegion *mr;
264     hwaddr xlat;
265     hwaddr len = iotlb->addr_mask + 1;
266     void *vaddr;
267     int ret;
268 
269     trace_vfio_iommu_map_notify(iotlb->iova,
270                                 iotlb->iova + iotlb->addr_mask);
271 
272     /*
273      * The IOMMU TLB entry we have just covers translation through
274      * this IOMMU to its immediate target.  We need to translate
275      * it the rest of the way through to memory.
276      */
277     rcu_read_lock();
278     mr = address_space_translate(&address_space_memory,
279                                  iotlb->translated_addr,
280                                  &xlat, &len, iotlb->perm & IOMMU_WO);
281     if (!memory_region_is_ram(mr)) {
282         error_report("iommu map to non memory area %"HWADDR_PRIx"",
283                      xlat);
284         goto out;
285     }
286     /*
287      * Translation truncates length to the IOMMU page size,
288      * check that it did not truncate too much.
289      */
290     if (len & iotlb->addr_mask) {
291         error_report("iommu has granularity incompatible with target AS");
292         goto out;
293     }
294 
295     if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
296         vaddr = memory_region_get_ram_ptr(mr) + xlat;
297         ret = vfio_dma_map(container, iotlb->iova,
298                            iotlb->addr_mask + 1, vaddr,
299                            !(iotlb->perm & IOMMU_WO) || mr->readonly);
300         if (ret) {
301             error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
302                          "0x%"HWADDR_PRIx", %p) = %d (%m)",
303                          container, iotlb->iova,
304                          iotlb->addr_mask + 1, vaddr, ret);
305         }
306     } else {
307         ret = vfio_dma_unmap(container, iotlb->iova, iotlb->addr_mask + 1);
308         if (ret) {
309             error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
310                          "0x%"HWADDR_PRIx") = %d (%m)",
311                          container, iotlb->iova,
312                          iotlb->addr_mask + 1, ret);
313         }
314     }
315 out:
316     rcu_read_unlock();
317 }
318 
319 static hwaddr vfio_container_granularity(VFIOContainer *container)
320 {
321     return (hwaddr)1 << ctz64(container->iova_pgsizes);
322 }
323 
324 static void vfio_listener_region_add(MemoryListener *listener,
325                                      MemoryRegionSection *section)
326 {
327     VFIOContainer *container = container_of(listener, VFIOContainer, listener);
328     hwaddr iova, end;
329     Int128 llend, llsize;
330     void *vaddr;
331     int ret;
332 
333     if (vfio_listener_skipped_section(section)) {
334         trace_vfio_listener_region_add_skip(
335                 section->offset_within_address_space,
336                 section->offset_within_address_space +
337                 int128_get64(int128_sub(section->size, int128_one())));
338         return;
339     }
340 
341     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
342                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
343         error_report("%s received unaligned region", __func__);
344         return;
345     }
346 
347     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
348     llend = int128_make64(section->offset_within_address_space);
349     llend = int128_add(llend, section->size);
350     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
351 
352     if (int128_ge(int128_make64(iova), llend)) {
353         return;
354     }
355     end = int128_get64(int128_sub(llend, int128_one()));
356 
357     if ((iova < container->min_iova) || (end > container->max_iova)) {
358         error_report("vfio: IOMMU container %p can't map guest IOVA region"
359                      " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx,
360                      container, iova, end);
361         ret = -EFAULT;
362         goto fail;
363     }
364 
365     memory_region_ref(section->mr);
366 
367     if (memory_region_is_iommu(section->mr)) {
368         VFIOGuestIOMMU *giommu;
369 
370         trace_vfio_listener_region_add_iommu(iova, end);
371         /*
372          * FIXME: We should do some checking to see if the
373          * capabilities of the host VFIO IOMMU are adequate to model
374          * the guest IOMMU
375          *
376          * FIXME: For VFIO iommu types which have KVM acceleration to
377          * avoid bouncing all map/unmaps through qemu this way, this
378          * would be the right place to wire that up (tell the KVM
379          * device emulation the VFIO iommu handles to use).
380          */
381         giommu = g_malloc0(sizeof(*giommu));
382         giommu->iommu = section->mr;
383         giommu->container = container;
384         giommu->n.notify = vfio_iommu_map_notify;
385         QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
386 
387         memory_region_register_iommu_notifier(giommu->iommu, &giommu->n);
388         memory_region_iommu_replay(giommu->iommu, &giommu->n,
389                                    vfio_container_granularity(container),
390                                    false);
391 
392         return;
393     }
394 
395     /* Here we assume that memory_region_is_ram(section->mr)==true */
396 
397     vaddr = memory_region_get_ram_ptr(section->mr) +
398             section->offset_within_region +
399             (iova - section->offset_within_address_space);
400 
401     trace_vfio_listener_region_add_ram(iova, end, vaddr);
402 
403     llsize = int128_sub(llend, int128_make64(iova));
404 
405     ret = vfio_dma_map(container, iova, int128_get64(llsize),
406                        vaddr, section->readonly);
407     if (ret) {
408         error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
409                      "0x%"HWADDR_PRIx", %p) = %d (%m)",
410                      container, iova, int128_get64(llsize), vaddr, ret);
411         goto fail;
412     }
413 
414     return;
415 
416 fail:
417     /*
418      * On the initfn path, store the first error in the container so we
419      * can gracefully fail.  Runtime, there's not much we can do other
420      * than throw a hardware error.
421      */
422     if (!container->initialized) {
423         if (!container->error) {
424             container->error = ret;
425         }
426     } else {
427         hw_error("vfio: DMA mapping failed, unable to continue");
428     }
429 }
430 
431 static void vfio_listener_region_del(MemoryListener *listener,
432                                      MemoryRegionSection *section)
433 {
434     VFIOContainer *container = container_of(listener, VFIOContainer, listener);
435     hwaddr iova, end;
436     int ret;
437 
438     if (vfio_listener_skipped_section(section)) {
439         trace_vfio_listener_region_del_skip(
440                 section->offset_within_address_space,
441                 section->offset_within_address_space +
442                 int128_get64(int128_sub(section->size, int128_one())));
443         return;
444     }
445 
446     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
447                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
448         error_report("%s received unaligned region", __func__);
449         return;
450     }
451 
452     if (memory_region_is_iommu(section->mr)) {
453         VFIOGuestIOMMU *giommu;
454 
455         QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
456             if (giommu->iommu == section->mr) {
457                 memory_region_unregister_iommu_notifier(&giommu->n);
458                 QLIST_REMOVE(giommu, giommu_next);
459                 g_free(giommu);
460                 break;
461             }
462         }
463 
464         /*
465          * FIXME: We assume the one big unmap below is adequate to
466          * remove any individual page mappings in the IOMMU which
467          * might have been copied into VFIO. This works for a page table
468          * based IOMMU where a big unmap flattens a large range of IO-PTEs.
469          * That may not be true for all IOMMU types.
470          */
471     }
472 
473     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
474     end = (section->offset_within_address_space + int128_get64(section->size)) &
475           TARGET_PAGE_MASK;
476 
477     if (iova >= end) {
478         return;
479     }
480 
481     trace_vfio_listener_region_del(iova, end - 1);
482 
483     ret = vfio_dma_unmap(container, iova, end - iova);
484     memory_region_unref(section->mr);
485     if (ret) {
486         error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
487                      "0x%"HWADDR_PRIx") = %d (%m)",
488                      container, iova, end - iova, ret);
489     }
490 }
491 
492 static const MemoryListener vfio_memory_listener = {
493     .region_add = vfio_listener_region_add,
494     .region_del = vfio_listener_region_del,
495 };
496 
497 static void vfio_listener_release(VFIOContainer *container)
498 {
499     memory_listener_unregister(&container->listener);
500 }
501 
502 int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
503                       int index, const char *name)
504 {
505     struct vfio_region_info *info;
506     int ret;
507 
508     ret = vfio_get_region_info(vbasedev, index, &info);
509     if (ret) {
510         return ret;
511     }
512 
513     region->vbasedev = vbasedev;
514     region->flags = info->flags;
515     region->size = info->size;
516     region->fd_offset = info->offset;
517     region->nr = index;
518 
519     if (region->size) {
520         region->mem = g_new0(MemoryRegion, 1);
521         memory_region_init_io(region->mem, obj, &vfio_region_ops,
522                               region, name, region->size);
523 
524         if (!vbasedev->no_mmap &&
525             region->flags & VFIO_REGION_INFO_FLAG_MMAP &&
526             !(region->size & ~qemu_real_host_page_mask)) {
527 
528             region->nr_mmaps = 1;
529             region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
530 
531             region->mmaps[0].offset = 0;
532             region->mmaps[0].size = region->size;
533         }
534     }
535 
536     g_free(info);
537 
538     trace_vfio_region_setup(vbasedev->name, index, name,
539                             region->flags, region->fd_offset, region->size);
540     return 0;
541 }
542 
543 int vfio_region_mmap(VFIORegion *region)
544 {
545     int i, prot = 0;
546     char *name;
547 
548     if (!region->mem) {
549         return 0;
550     }
551 
552     prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0;
553     prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0;
554 
555     for (i = 0; i < region->nr_mmaps; i++) {
556         region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot,
557                                      MAP_SHARED, region->vbasedev->fd,
558                                      region->fd_offset +
559                                      region->mmaps[i].offset);
560         if (region->mmaps[i].mmap == MAP_FAILED) {
561             int ret = -errno;
562 
563             trace_vfio_region_mmap_fault(memory_region_name(region->mem), i,
564                                          region->fd_offset +
565                                          region->mmaps[i].offset,
566                                          region->fd_offset +
567                                          region->mmaps[i].offset +
568                                          region->mmaps[i].size - 1, ret);
569 
570             region->mmaps[i].mmap = NULL;
571 
572             for (i--; i >= 0; i--) {
573                 memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
574                 munmap(region->mmaps[i].mmap, region->mmaps[i].size);
575                 object_unparent(OBJECT(&region->mmaps[i].mem));
576                 region->mmaps[i].mmap = NULL;
577             }
578 
579             return ret;
580         }
581 
582         name = g_strdup_printf("%s mmaps[%d]",
583                                memory_region_name(region->mem), i);
584         memory_region_init_ram_ptr(&region->mmaps[i].mem,
585                                    memory_region_owner(region->mem),
586                                    name, region->mmaps[i].size,
587                                    region->mmaps[i].mmap);
588         g_free(name);
589         memory_region_set_skip_dump(&region->mmaps[i].mem);
590         memory_region_add_subregion(region->mem, region->mmaps[i].offset,
591                                     &region->mmaps[i].mem);
592 
593         trace_vfio_region_mmap(memory_region_name(&region->mmaps[i].mem),
594                                region->mmaps[i].offset,
595                                region->mmaps[i].offset +
596                                region->mmaps[i].size - 1);
597     }
598 
599     return 0;
600 }
601 
602 void vfio_region_exit(VFIORegion *region)
603 {
604     int i;
605 
606     if (!region->mem) {
607         return;
608     }
609 
610     for (i = 0; i < region->nr_mmaps; i++) {
611         if (region->mmaps[i].mmap) {
612             memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
613         }
614     }
615 
616     trace_vfio_region_exit(region->vbasedev->name, region->nr);
617 }
618 
619 void vfio_region_finalize(VFIORegion *region)
620 {
621     int i;
622 
623     if (!region->mem) {
624         return;
625     }
626 
627     for (i = 0; i < region->nr_mmaps; i++) {
628         if (region->mmaps[i].mmap) {
629             munmap(region->mmaps[i].mmap, region->mmaps[i].size);
630             object_unparent(OBJECT(&region->mmaps[i].mem));
631         }
632     }
633 
634     object_unparent(OBJECT(region->mem));
635 
636     g_free(region->mem);
637     g_free(region->mmaps);
638 
639     trace_vfio_region_finalize(region->vbasedev->name, region->nr);
640 }
641 
642 void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled)
643 {
644     int i;
645 
646     if (!region->mem) {
647         return;
648     }
649 
650     for (i = 0; i < region->nr_mmaps; i++) {
651         if (region->mmaps[i].mmap) {
652             memory_region_set_enabled(&region->mmaps[i].mem, enabled);
653         }
654     }
655 
656     trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem),
657                                         enabled);
658 }
659 
660 void vfio_reset_handler(void *opaque)
661 {
662     VFIOGroup *group;
663     VFIODevice *vbasedev;
664 
665     QLIST_FOREACH(group, &vfio_group_list, next) {
666         QLIST_FOREACH(vbasedev, &group->device_list, next) {
667             vbasedev->ops->vfio_compute_needs_reset(vbasedev);
668         }
669     }
670 
671     QLIST_FOREACH(group, &vfio_group_list, next) {
672         QLIST_FOREACH(vbasedev, &group->device_list, next) {
673             if (vbasedev->needs_reset) {
674                 vbasedev->ops->vfio_hot_reset_multi(vbasedev);
675             }
676         }
677     }
678 }
679 
680 static void vfio_kvm_device_add_group(VFIOGroup *group)
681 {
682 #ifdef CONFIG_KVM
683     struct kvm_device_attr attr = {
684         .group = KVM_DEV_VFIO_GROUP,
685         .attr = KVM_DEV_VFIO_GROUP_ADD,
686         .addr = (uint64_t)(unsigned long)&group->fd,
687     };
688 
689     if (!kvm_enabled()) {
690         return;
691     }
692 
693     if (vfio_kvm_device_fd < 0) {
694         struct kvm_create_device cd = {
695             .type = KVM_DEV_TYPE_VFIO,
696         };
697 
698         if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
699             error_report("Failed to create KVM VFIO device: %m");
700             return;
701         }
702 
703         vfio_kvm_device_fd = cd.fd;
704     }
705 
706     if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
707         error_report("Failed to add group %d to KVM VFIO device: %m",
708                      group->groupid);
709     }
710 #endif
711 }
712 
713 static void vfio_kvm_device_del_group(VFIOGroup *group)
714 {
715 #ifdef CONFIG_KVM
716     struct kvm_device_attr attr = {
717         .group = KVM_DEV_VFIO_GROUP,
718         .attr = KVM_DEV_VFIO_GROUP_DEL,
719         .addr = (uint64_t)(unsigned long)&group->fd,
720     };
721 
722     if (vfio_kvm_device_fd < 0) {
723         return;
724     }
725 
726     if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
727         error_report("Failed to remove group %d from KVM VFIO device: %m",
728                      group->groupid);
729     }
730 #endif
731 }
732 
733 static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
734 {
735     VFIOAddressSpace *space;
736 
737     QLIST_FOREACH(space, &vfio_address_spaces, list) {
738         if (space->as == as) {
739             return space;
740         }
741     }
742 
743     /* No suitable VFIOAddressSpace, create a new one */
744     space = g_malloc0(sizeof(*space));
745     space->as = as;
746     QLIST_INIT(&space->containers);
747 
748     QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
749 
750     return space;
751 }
752 
753 static void vfio_put_address_space(VFIOAddressSpace *space)
754 {
755     if (QLIST_EMPTY(&space->containers)) {
756         QLIST_REMOVE(space, list);
757         g_free(space);
758     }
759 }
760 
761 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
762 {
763     VFIOContainer *container;
764     int ret, fd;
765     VFIOAddressSpace *space;
766 
767     space = vfio_get_address_space(as);
768 
769     QLIST_FOREACH(container, &space->containers, next) {
770         if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
771             group->container = container;
772             QLIST_INSERT_HEAD(&container->group_list, group, container_next);
773             return 0;
774         }
775     }
776 
777     fd = qemu_open("/dev/vfio/vfio", O_RDWR);
778     if (fd < 0) {
779         error_report("vfio: failed to open /dev/vfio/vfio: %m");
780         ret = -errno;
781         goto put_space_exit;
782     }
783 
784     ret = ioctl(fd, VFIO_GET_API_VERSION);
785     if (ret != VFIO_API_VERSION) {
786         error_report("vfio: supported vfio version: %d, "
787                      "reported version: %d", VFIO_API_VERSION, ret);
788         ret = -EINVAL;
789         goto close_fd_exit;
790     }
791 
792     container = g_malloc0(sizeof(*container));
793     container->space = space;
794     container->fd = fd;
795     if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU) ||
796         ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU)) {
797         bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU);
798         struct vfio_iommu_type1_info info;
799 
800         ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
801         if (ret) {
802             error_report("vfio: failed to set group container: %m");
803             ret = -errno;
804             goto free_container_exit;
805         }
806 
807         ret = ioctl(fd, VFIO_SET_IOMMU,
808                     v2 ? VFIO_TYPE1v2_IOMMU : VFIO_TYPE1_IOMMU);
809         if (ret) {
810             error_report("vfio: failed to set iommu for container: %m");
811             ret = -errno;
812             goto free_container_exit;
813         }
814 
815         /*
816          * FIXME: This assumes that a Type1 IOMMU can map any 64-bit
817          * IOVA whatsoever.  That's not actually true, but the current
818          * kernel interface doesn't tell us what it can map, and the
819          * existing Type1 IOMMUs generally support any IOVA we're
820          * going to actually try in practice.
821          */
822         container->min_iova = 0;
823         container->max_iova = (hwaddr)-1;
824 
825         /* Assume just 4K IOVA page size */
826         container->iova_pgsizes = 0x1000;
827         info.argsz = sizeof(info);
828         ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info);
829         /* Ignore errors */
830         if ((ret == 0) && (info.flags & VFIO_IOMMU_INFO_PGSIZES)) {
831             container->iova_pgsizes = info.iova_pgsizes;
832         }
833     } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU)) {
834         struct vfio_iommu_spapr_tce_info info;
835 
836         ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
837         if (ret) {
838             error_report("vfio: failed to set group container: %m");
839             ret = -errno;
840             goto free_container_exit;
841         }
842         ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_SPAPR_TCE_IOMMU);
843         if (ret) {
844             error_report("vfio: failed to set iommu for container: %m");
845             ret = -errno;
846             goto free_container_exit;
847         }
848 
849         /*
850          * The host kernel code implementing VFIO_IOMMU_DISABLE is called
851          * when container fd is closed so we do not call it explicitly
852          * in this file.
853          */
854         ret = ioctl(fd, VFIO_IOMMU_ENABLE);
855         if (ret) {
856             error_report("vfio: failed to enable container: %m");
857             ret = -errno;
858             goto free_container_exit;
859         }
860 
861         /*
862          * This only considers the host IOMMU's 32-bit window.  At
863          * some point we need to add support for the optional 64-bit
864          * window and dynamic windows
865          */
866         info.argsz = sizeof(info);
867         ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
868         if (ret) {
869             error_report("vfio: VFIO_IOMMU_SPAPR_TCE_GET_INFO failed: %m");
870             ret = -errno;
871             goto free_container_exit;
872         }
873         container->min_iova = info.dma32_window_start;
874         container->max_iova = container->min_iova + info.dma32_window_size - 1;
875 
876         /* Assume just 4K IOVA pages for now */
877         container->iova_pgsizes = 0x1000;
878     } else {
879         error_report("vfio: No available IOMMU models");
880         ret = -EINVAL;
881         goto free_container_exit;
882     }
883 
884     container->listener = vfio_memory_listener;
885 
886     memory_listener_register(&container->listener, container->space->as);
887 
888     if (container->error) {
889         ret = container->error;
890         error_report("vfio: memory listener initialization failed for container");
891         goto listener_release_exit;
892     }
893 
894     container->initialized = true;
895 
896     QLIST_INIT(&container->group_list);
897     QLIST_INSERT_HEAD(&space->containers, container, next);
898 
899     group->container = container;
900     QLIST_INSERT_HEAD(&container->group_list, group, container_next);
901 
902     return 0;
903 listener_release_exit:
904     vfio_listener_release(container);
905 
906 free_container_exit:
907     g_free(container);
908 
909 close_fd_exit:
910     close(fd);
911 
912 put_space_exit:
913     vfio_put_address_space(space);
914 
915     return ret;
916 }
917 
918 static void vfio_disconnect_container(VFIOGroup *group)
919 {
920     VFIOContainer *container = group->container;
921 
922     if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
923         error_report("vfio: error disconnecting group %d from container",
924                      group->groupid);
925     }
926 
927     QLIST_REMOVE(group, container_next);
928     group->container = NULL;
929 
930     if (QLIST_EMPTY(&container->group_list)) {
931         VFIOAddressSpace *space = container->space;
932         VFIOGuestIOMMU *giommu, *tmp;
933 
934         vfio_listener_release(container);
935         QLIST_REMOVE(container, next);
936 
937         QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) {
938             memory_region_unregister_iommu_notifier(&giommu->n);
939             QLIST_REMOVE(giommu, giommu_next);
940             g_free(giommu);
941         }
942 
943         trace_vfio_disconnect_container(container->fd);
944         close(container->fd);
945         g_free(container);
946 
947         vfio_put_address_space(space);
948     }
949 }
950 
951 VFIOGroup *vfio_get_group(int groupid, AddressSpace *as)
952 {
953     VFIOGroup *group;
954     char path[32];
955     struct vfio_group_status status = { .argsz = sizeof(status) };
956 
957     QLIST_FOREACH(group, &vfio_group_list, next) {
958         if (group->groupid == groupid) {
959             /* Found it.  Now is it already in the right context? */
960             if (group->container->space->as == as) {
961                 return group;
962             } else {
963                 error_report("vfio: group %d used in multiple address spaces",
964                              group->groupid);
965                 return NULL;
966             }
967         }
968     }
969 
970     group = g_malloc0(sizeof(*group));
971 
972     snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
973     group->fd = qemu_open(path, O_RDWR);
974     if (group->fd < 0) {
975         error_report("vfio: error opening %s: %m", path);
976         goto free_group_exit;
977     }
978 
979     if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
980         error_report("vfio: error getting group status: %m");
981         goto close_fd_exit;
982     }
983 
984     if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
985         error_report("vfio: error, group %d is not viable, please ensure "
986                      "all devices within the iommu_group are bound to their "
987                      "vfio bus driver.", groupid);
988         goto close_fd_exit;
989     }
990 
991     group->groupid = groupid;
992     QLIST_INIT(&group->device_list);
993 
994     if (vfio_connect_container(group, as)) {
995         error_report("vfio: failed to setup container for group %d", groupid);
996         goto close_fd_exit;
997     }
998 
999     if (QLIST_EMPTY(&vfio_group_list)) {
1000         qemu_register_reset(vfio_reset_handler, NULL);
1001     }
1002 
1003     QLIST_INSERT_HEAD(&vfio_group_list, group, next);
1004 
1005     vfio_kvm_device_add_group(group);
1006 
1007     return group;
1008 
1009 close_fd_exit:
1010     close(group->fd);
1011 
1012 free_group_exit:
1013     g_free(group);
1014 
1015     return NULL;
1016 }
1017 
1018 void vfio_put_group(VFIOGroup *group)
1019 {
1020     if (!group || !QLIST_EMPTY(&group->device_list)) {
1021         return;
1022     }
1023 
1024     vfio_kvm_device_del_group(group);
1025     vfio_disconnect_container(group);
1026     QLIST_REMOVE(group, next);
1027     trace_vfio_put_group(group->fd);
1028     close(group->fd);
1029     g_free(group);
1030 
1031     if (QLIST_EMPTY(&vfio_group_list)) {
1032         qemu_unregister_reset(vfio_reset_handler, NULL);
1033     }
1034 }
1035 
1036 int vfio_get_device(VFIOGroup *group, const char *name,
1037                        VFIODevice *vbasedev)
1038 {
1039     struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
1040     int ret, fd;
1041 
1042     fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
1043     if (fd < 0) {
1044         error_report("vfio: error getting device %s from group %d: %m",
1045                      name, group->groupid);
1046         error_printf("Verify all devices in group %d are bound to vfio-<bus> "
1047                      "or pci-stub and not already in use\n", group->groupid);
1048         return fd;
1049     }
1050 
1051     ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info);
1052     if (ret) {
1053         error_report("vfio: error getting device info: %m");
1054         close(fd);
1055         return ret;
1056     }
1057 
1058     vbasedev->fd = fd;
1059     vbasedev->group = group;
1060     QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
1061 
1062     vbasedev->num_irqs = dev_info.num_irqs;
1063     vbasedev->num_regions = dev_info.num_regions;
1064     vbasedev->flags = dev_info.flags;
1065 
1066     trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions,
1067                           dev_info.num_irqs);
1068 
1069     vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
1070     return 0;
1071 }
1072 
1073 void vfio_put_base_device(VFIODevice *vbasedev)
1074 {
1075     if (!vbasedev->group) {
1076         return;
1077     }
1078     QLIST_REMOVE(vbasedev, next);
1079     vbasedev->group = NULL;
1080     trace_vfio_put_base_device(vbasedev->fd);
1081     close(vbasedev->fd);
1082 }
1083 
1084 int vfio_get_region_info(VFIODevice *vbasedev, int index,
1085                          struct vfio_region_info **info)
1086 {
1087     size_t argsz = sizeof(struct vfio_region_info);
1088 
1089     *info = g_malloc0(argsz);
1090 
1091     (*info)->index = index;
1092     (*info)->argsz = argsz;
1093 
1094     if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) {
1095         g_free(*info);
1096         return -errno;
1097     }
1098 
1099     return 0;
1100 }
1101 
1102 /*
1103  * Interfaces for IBM EEH (Enhanced Error Handling)
1104  */
1105 static bool vfio_eeh_container_ok(VFIOContainer *container)
1106 {
1107     /*
1108      * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
1109      * implementation is broken if there are multiple groups in a
1110      * container.  The hardware works in units of Partitionable
1111      * Endpoints (== IOMMU groups) and the EEH operations naively
1112      * iterate across all groups in the container, without any logic
1113      * to make sure the groups have their state synchronized.  For
1114      * certain operations (ENABLE) that might be ok, until an error
1115      * occurs, but for others (GET_STATE) it's clearly broken.
1116      */
1117 
1118     /*
1119      * XXX Once fixed kernels exist, test for them here
1120      */
1121 
1122     if (QLIST_EMPTY(&container->group_list)) {
1123         return false;
1124     }
1125 
1126     if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) {
1127         return false;
1128     }
1129 
1130     return true;
1131 }
1132 
1133 static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op)
1134 {
1135     struct vfio_eeh_pe_op pe_op = {
1136         .argsz = sizeof(pe_op),
1137         .op = op,
1138     };
1139     int ret;
1140 
1141     if (!vfio_eeh_container_ok(container)) {
1142         error_report("vfio/eeh: EEH_PE_OP 0x%x: "
1143                      "kernel requires a container with exactly one group", op);
1144         return -EPERM;
1145     }
1146 
1147     ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op);
1148     if (ret < 0) {
1149         error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op);
1150         return -errno;
1151     }
1152 
1153     return 0;
1154 }
1155 
1156 static VFIOContainer *vfio_eeh_as_container(AddressSpace *as)
1157 {
1158     VFIOAddressSpace *space = vfio_get_address_space(as);
1159     VFIOContainer *container = NULL;
1160 
1161     if (QLIST_EMPTY(&space->containers)) {
1162         /* No containers to act on */
1163         goto out;
1164     }
1165 
1166     container = QLIST_FIRST(&space->containers);
1167 
1168     if (QLIST_NEXT(container, next)) {
1169         /* We don't yet have logic to synchronize EEH state across
1170          * multiple containers */
1171         container = NULL;
1172         goto out;
1173     }
1174 
1175 out:
1176     vfio_put_address_space(space);
1177     return container;
1178 }
1179 
1180 bool vfio_eeh_as_ok(AddressSpace *as)
1181 {
1182     VFIOContainer *container = vfio_eeh_as_container(as);
1183 
1184     return (container != NULL) && vfio_eeh_container_ok(container);
1185 }
1186 
1187 int vfio_eeh_as_op(AddressSpace *as, uint32_t op)
1188 {
1189     VFIOContainer *container = vfio_eeh_as_container(as);
1190 
1191     if (!container) {
1192         return -ENODEV;
1193     }
1194     return vfio_eeh_container_op(container, op);
1195 }
1196