xref: /openbmc/qemu/hw/virtio/virtio-iommu.c (revision 0aa7f10c)
1 /*
2  * virtio-iommu device
3  *
4  * Copyright (c) 2020 Red Hat, Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2 or later, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/iov.h"
23 #include "qemu/range.h"
24 #include "qemu/reserved-region.h"
25 #include "exec/target_page.h"
26 #include "hw/qdev-properties.h"
27 #include "hw/virtio/virtio.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/reset.h"
30 #include "sysemu/sysemu.h"
31 #include "qemu/reserved-region.h"
32 #include "qemu/units.h"
33 #include "qapi/error.h"
34 #include "qemu/error-report.h"
35 #include "trace.h"
36 
37 #include "standard-headers/linux/virtio_ids.h"
38 
39 #include "hw/virtio/virtio-bus.h"
40 #include "hw/virtio/virtio-iommu.h"
41 #include "hw/pci/pci_bus.h"
42 #include "hw/pci/pci.h"
43 
44 /* Max size */
45 #define VIOMMU_DEFAULT_QUEUE_SIZE 256
46 #define VIOMMU_PROBE_SIZE 512
47 
48 typedef struct VirtIOIOMMUDomain {
49     uint32_t id;
50     bool bypass;
51     GTree *mappings;
52     QLIST_HEAD(, VirtIOIOMMUEndpoint) endpoint_list;
53 } VirtIOIOMMUDomain;
54 
55 typedef struct VirtIOIOMMUEndpoint {
56     uint32_t id;
57     VirtIOIOMMUDomain *domain;
58     IOMMUMemoryRegion *iommu_mr;
59     QLIST_ENTRY(VirtIOIOMMUEndpoint) next;
60 } VirtIOIOMMUEndpoint;
61 
62 typedef struct VirtIOIOMMUInterval {
63     uint64_t low;
64     uint64_t high;
65 } VirtIOIOMMUInterval;
66 
67 typedef struct VirtIOIOMMUMapping {
68     uint64_t phys_addr;
69     uint32_t flags;
70 } VirtIOIOMMUMapping;
71 
72 struct hiod_key {
73     PCIBus *bus;
74     uint8_t devfn;
75 };
76 
77 static inline uint16_t virtio_iommu_get_bdf(IOMMUDevice *dev)
78 {
79     return PCI_BUILD_BDF(pci_bus_num(dev->bus), dev->devfn);
80 }
81 
82 static bool virtio_iommu_device_bypassed(IOMMUDevice *sdev)
83 {
84     uint32_t sid;
85     bool bypassed;
86     VirtIOIOMMU *s = sdev->viommu;
87     VirtIOIOMMUEndpoint *ep;
88 
89     sid = virtio_iommu_get_bdf(sdev);
90 
91     qemu_rec_mutex_lock(&s->mutex);
92     /* need to check bypass before system reset */
93     if (!s->endpoints) {
94         bypassed = s->config.bypass;
95         goto unlock;
96     }
97 
98     ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid));
99     if (!ep || !ep->domain) {
100         bypassed = s->config.bypass;
101     } else {
102         bypassed = ep->domain->bypass;
103     }
104 
105 unlock:
106     qemu_rec_mutex_unlock(&s->mutex);
107     return bypassed;
108 }
109 
110 /* Return whether the device is using IOMMU translation. */
111 static bool virtio_iommu_switch_address_space(IOMMUDevice *sdev)
112 {
113     bool use_remapping;
114 
115     assert(sdev);
116 
117     use_remapping = !virtio_iommu_device_bypassed(sdev);
118 
119     trace_virtio_iommu_switch_address_space(pci_bus_num(sdev->bus),
120                                             PCI_SLOT(sdev->devfn),
121                                             PCI_FUNC(sdev->devfn),
122                                             use_remapping);
123 
124     /* Turn off first then on the other */
125     if (use_remapping) {
126         memory_region_set_enabled(&sdev->bypass_mr, false);
127         memory_region_set_enabled(MEMORY_REGION(&sdev->iommu_mr), true);
128     } else {
129         memory_region_set_enabled(MEMORY_REGION(&sdev->iommu_mr), false);
130         memory_region_set_enabled(&sdev->bypass_mr, true);
131     }
132 
133     return use_remapping;
134 }
135 
136 static void virtio_iommu_switch_address_space_all(VirtIOIOMMU *s)
137 {
138     GHashTableIter iter;
139     IOMMUPciBus *iommu_pci_bus;
140     int i;
141 
142     g_hash_table_iter_init(&iter, s->as_by_busptr);
143     while (g_hash_table_iter_next(&iter, NULL, (void **)&iommu_pci_bus)) {
144         for (i = 0; i < PCI_DEVFN_MAX; i++) {
145             if (!iommu_pci_bus->pbdev[i]) {
146                 continue;
147             }
148             virtio_iommu_switch_address_space(iommu_pci_bus->pbdev[i]);
149         }
150     }
151 }
152 
153 /**
154  * The bus number is used for lookup when SID based operations occur.
155  * In that case we lazily populate the IOMMUPciBus array from the bus hash
156  * table. At the time the IOMMUPciBus is created (iommu_find_add_as), the bus
157  * numbers may not be always initialized yet.
158  */
159 static IOMMUPciBus *iommu_find_iommu_pcibus(VirtIOIOMMU *s, uint8_t bus_num)
160 {
161     IOMMUPciBus *iommu_pci_bus = s->iommu_pcibus_by_bus_num[bus_num];
162 
163     if (!iommu_pci_bus) {
164         GHashTableIter iter;
165 
166         g_hash_table_iter_init(&iter, s->as_by_busptr);
167         while (g_hash_table_iter_next(&iter, NULL, (void **)&iommu_pci_bus)) {
168             if (pci_bus_num(iommu_pci_bus->bus) == bus_num) {
169                 s->iommu_pcibus_by_bus_num[bus_num] = iommu_pci_bus;
170                 return iommu_pci_bus;
171             }
172         }
173         return NULL;
174     }
175     return iommu_pci_bus;
176 }
177 
178 static IOMMUMemoryRegion *virtio_iommu_mr(VirtIOIOMMU *s, uint32_t sid)
179 {
180     uint8_t bus_n, devfn;
181     IOMMUPciBus *iommu_pci_bus;
182     IOMMUDevice *dev;
183 
184     bus_n = PCI_BUS_NUM(sid);
185     iommu_pci_bus = iommu_find_iommu_pcibus(s, bus_n);
186     if (iommu_pci_bus) {
187         devfn = sid & (PCI_DEVFN_MAX - 1);
188         dev = iommu_pci_bus->pbdev[devfn];
189         if (dev) {
190             return &dev->iommu_mr;
191         }
192     }
193     return NULL;
194 }
195 
196 static gint interval_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
197 {
198     VirtIOIOMMUInterval *inta = (VirtIOIOMMUInterval *)a;
199     VirtIOIOMMUInterval *intb = (VirtIOIOMMUInterval *)b;
200 
201     if (inta->high < intb->low) {
202         return -1;
203     } else if (intb->high < inta->low) {
204         return 1;
205     } else {
206         return 0;
207     }
208 }
209 
210 static void virtio_iommu_notify_map_unmap(IOMMUMemoryRegion *mr,
211                                           IOMMUTLBEvent *event,
212                                           hwaddr virt_start, hwaddr virt_end)
213 {
214     uint64_t delta = virt_end - virt_start;
215 
216     event->entry.iova = virt_start;
217     event->entry.addr_mask = delta;
218 
219     if (delta == UINT64_MAX) {
220         memory_region_notify_iommu(mr, 0, *event);
221     }
222 
223     while (virt_start != virt_end + 1) {
224         uint64_t mask = dma_aligned_pow2_mask(virt_start, virt_end, 64);
225 
226         event->entry.addr_mask = mask;
227         event->entry.iova = virt_start;
228         memory_region_notify_iommu(mr, 0, *event);
229         virt_start += mask + 1;
230         if (event->entry.perm != IOMMU_NONE) {
231             event->entry.translated_addr += mask + 1;
232         }
233     }
234 }
235 
236 static void virtio_iommu_notify_map(IOMMUMemoryRegion *mr, hwaddr virt_start,
237                                     hwaddr virt_end, hwaddr paddr,
238                                     uint32_t flags)
239 {
240     IOMMUTLBEvent event;
241     IOMMUAccessFlags perm = IOMMU_ACCESS_FLAG(flags & VIRTIO_IOMMU_MAP_F_READ,
242                                               flags & VIRTIO_IOMMU_MAP_F_WRITE);
243 
244     if (!(mr->iommu_notify_flags & IOMMU_NOTIFIER_MAP) ||
245         (flags & VIRTIO_IOMMU_MAP_F_MMIO) || !perm) {
246         return;
247     }
248 
249     trace_virtio_iommu_notify_map(mr->parent_obj.name, virt_start, virt_end,
250                                   paddr, perm);
251 
252     event.type = IOMMU_NOTIFIER_MAP;
253     event.entry.target_as = &address_space_memory;
254     event.entry.perm = perm;
255     event.entry.translated_addr = paddr;
256 
257     virtio_iommu_notify_map_unmap(mr, &event, virt_start, virt_end);
258 }
259 
260 static void virtio_iommu_notify_unmap(IOMMUMemoryRegion *mr, hwaddr virt_start,
261                                       hwaddr virt_end)
262 {
263     IOMMUTLBEvent event;
264 
265     if (!(mr->iommu_notify_flags & IOMMU_NOTIFIER_UNMAP)) {
266         return;
267     }
268 
269     trace_virtio_iommu_notify_unmap(mr->parent_obj.name, virt_start, virt_end);
270 
271     event.type = IOMMU_NOTIFIER_UNMAP;
272     event.entry.target_as = &address_space_memory;
273     event.entry.perm = IOMMU_NONE;
274     event.entry.translated_addr = 0;
275 
276     virtio_iommu_notify_map_unmap(mr, &event, virt_start, virt_end);
277 }
278 
279 static gboolean virtio_iommu_notify_unmap_cb(gpointer key, gpointer value,
280                                              gpointer data)
281 {
282     VirtIOIOMMUInterval *interval = (VirtIOIOMMUInterval *) key;
283     IOMMUMemoryRegion *mr = (IOMMUMemoryRegion *) data;
284 
285     virtio_iommu_notify_unmap(mr, interval->low, interval->high);
286 
287     return false;
288 }
289 
290 static gboolean virtio_iommu_notify_map_cb(gpointer key, gpointer value,
291                                            gpointer data)
292 {
293     VirtIOIOMMUMapping *mapping = (VirtIOIOMMUMapping *) value;
294     VirtIOIOMMUInterval *interval = (VirtIOIOMMUInterval *) key;
295     IOMMUMemoryRegion *mr = (IOMMUMemoryRegion *) data;
296 
297     virtio_iommu_notify_map(mr, interval->low, interval->high,
298                             mapping->phys_addr, mapping->flags);
299 
300     return false;
301 }
302 
303 static void virtio_iommu_detach_endpoint_from_domain(VirtIOIOMMUEndpoint *ep)
304 {
305     VirtIOIOMMUDomain *domain = ep->domain;
306     IOMMUDevice *sdev = container_of(ep->iommu_mr, IOMMUDevice, iommu_mr);
307 
308     if (!ep->domain) {
309         return;
310     }
311     g_tree_foreach(domain->mappings, virtio_iommu_notify_unmap_cb,
312                    ep->iommu_mr);
313     QLIST_REMOVE(ep, next);
314     ep->domain = NULL;
315     virtio_iommu_switch_address_space(sdev);
316 }
317 
318 static VirtIOIOMMUEndpoint *virtio_iommu_get_endpoint(VirtIOIOMMU *s,
319                                                       uint32_t ep_id)
320 {
321     VirtIOIOMMUEndpoint *ep;
322     IOMMUMemoryRegion *mr;
323 
324     ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id));
325     if (ep) {
326         return ep;
327     }
328     mr = virtio_iommu_mr(s, ep_id);
329     if (!mr) {
330         return NULL;
331     }
332     ep = g_malloc0(sizeof(*ep));
333     ep->id = ep_id;
334     ep->iommu_mr = mr;
335     trace_virtio_iommu_get_endpoint(ep_id);
336     g_tree_insert(s->endpoints, GUINT_TO_POINTER(ep_id), ep);
337     return ep;
338 }
339 
340 static void virtio_iommu_put_endpoint(gpointer data)
341 {
342     VirtIOIOMMUEndpoint *ep = (VirtIOIOMMUEndpoint *)data;
343 
344     if (ep->domain) {
345         virtio_iommu_detach_endpoint_from_domain(ep);
346     }
347 
348     trace_virtio_iommu_put_endpoint(ep->id);
349     g_free(ep);
350 }
351 
352 static VirtIOIOMMUDomain *virtio_iommu_get_domain(VirtIOIOMMU *s,
353                                                   uint32_t domain_id,
354                                                   bool bypass)
355 {
356     VirtIOIOMMUDomain *domain;
357 
358     domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id));
359     if (domain) {
360         if (domain->bypass != bypass) {
361             return NULL;
362         }
363         return domain;
364     }
365     domain = g_malloc0(sizeof(*domain));
366     domain->id = domain_id;
367     domain->mappings = g_tree_new_full((GCompareDataFunc)interval_cmp,
368                                    NULL, (GDestroyNotify)g_free,
369                                    (GDestroyNotify)g_free);
370     domain->bypass = bypass;
371     g_tree_insert(s->domains, GUINT_TO_POINTER(domain_id), domain);
372     QLIST_INIT(&domain->endpoint_list);
373     trace_virtio_iommu_get_domain(domain_id);
374     return domain;
375 }
376 
377 static void virtio_iommu_put_domain(gpointer data)
378 {
379     VirtIOIOMMUDomain *domain = (VirtIOIOMMUDomain *)data;
380     VirtIOIOMMUEndpoint *iter, *tmp;
381 
382     QLIST_FOREACH_SAFE(iter, &domain->endpoint_list, next, tmp) {
383         virtio_iommu_detach_endpoint_from_domain(iter);
384     }
385     g_tree_destroy(domain->mappings);
386     trace_virtio_iommu_put_domain(domain->id);
387     g_free(domain);
388 }
389 
390 static void add_prop_resv_regions(IOMMUDevice *sdev)
391 {
392     VirtIOIOMMU *s = sdev->viommu;
393     int i;
394 
395     for (i = 0; i < s->nr_prop_resv_regions; i++) {
396         ReservedRegion *reg = g_new0(ReservedRegion, 1);
397 
398         *reg = s->prop_resv_regions[i];
399         sdev->resv_regions = resv_region_list_insert(sdev->resv_regions, reg);
400     }
401 }
402 
403 static AddressSpace *virtio_iommu_find_add_as(PCIBus *bus, void *opaque,
404                                               int devfn)
405 {
406     VirtIOIOMMU *s = opaque;
407     IOMMUPciBus *sbus = g_hash_table_lookup(s->as_by_busptr, bus);
408     static uint32_t mr_index;
409     IOMMUDevice *sdev;
410 
411     if (!sbus) {
412         sbus = g_malloc0(sizeof(IOMMUPciBus) +
413                          sizeof(IOMMUDevice *) * PCI_DEVFN_MAX);
414         sbus->bus = bus;
415         g_hash_table_insert(s->as_by_busptr, bus, sbus);
416     }
417 
418     sdev = sbus->pbdev[devfn];
419     if (!sdev) {
420         char *name = g_strdup_printf("%s-%d-%d",
421                                      TYPE_VIRTIO_IOMMU_MEMORY_REGION,
422                                      mr_index++, devfn);
423         sdev = sbus->pbdev[devfn] = g_new0(IOMMUDevice, 1);
424 
425         sdev->viommu = s;
426         sdev->bus = bus;
427         sdev->devfn = devfn;
428 
429         trace_virtio_iommu_init_iommu_mr(name);
430 
431         memory_region_init(&sdev->root, OBJECT(s), name, UINT64_MAX);
432         address_space_init(&sdev->as, &sdev->root, TYPE_VIRTIO_IOMMU);
433         add_prop_resv_regions(sdev);
434 
435         /*
436          * Build the IOMMU disabled container with aliases to the
437          * shared MRs.  Note that aliasing to a shared memory region
438          * could help the memory API to detect same FlatViews so we
439          * can have devices to share the same FlatView when in bypass
440          * mode. (either by not configuring virtio-iommu driver or with
441          * "iommu=pt").  It will greatly reduce the total number of
442          * FlatViews of the system hence VM runs faster.
443          */
444         memory_region_init_alias(&sdev->bypass_mr, OBJECT(s),
445                                  "system", get_system_memory(), 0,
446                                  memory_region_size(get_system_memory()));
447 
448         memory_region_init_iommu(&sdev->iommu_mr, sizeof(sdev->iommu_mr),
449                                  TYPE_VIRTIO_IOMMU_MEMORY_REGION,
450                                  OBJECT(s), name,
451                                  UINT64_MAX);
452 
453         /*
454          * Hook both the containers under the root container, we
455          * switch between iommu & bypass MRs by enable/disable
456          * corresponding sub-containers
457          */
458         memory_region_add_subregion_overlap(&sdev->root, 0,
459                                             MEMORY_REGION(&sdev->iommu_mr),
460                                             0);
461         memory_region_add_subregion_overlap(&sdev->root, 0,
462                                             &sdev->bypass_mr, 0);
463 
464         virtio_iommu_switch_address_space(sdev);
465         g_free(name);
466     }
467     return &sdev->as;
468 }
469 
470 static gboolean hiod_equal(gconstpointer v1, gconstpointer v2)
471 {
472     const struct hiod_key *key1 = v1;
473     const struct hiod_key *key2 = v2;
474 
475     return (key1->bus == key2->bus) && (key1->devfn == key2->devfn);
476 }
477 
478 static guint hiod_hash(gconstpointer v)
479 {
480     const struct hiod_key *key = v;
481     guint value = (guint)(uintptr_t)key->bus;
482 
483     return (guint)(value << 8 | key->devfn);
484 }
485 
486 static void hiod_destroy(gpointer v)
487 {
488     object_unref(v);
489 }
490 
491 static HostIOMMUDevice *
492 get_host_iommu_device(VirtIOIOMMU *viommu, PCIBus *bus, int devfn) {
493     struct hiod_key key = {
494         .bus = bus,
495         .devfn = devfn,
496     };
497 
498     return g_hash_table_lookup(viommu->host_iommu_devices, &key);
499 }
500 
501 /**
502  * rebuild_resv_regions: rebuild resv regions with both the
503  * info of host resv ranges and property set resv ranges
504  */
505 static int rebuild_resv_regions(IOMMUDevice *sdev)
506 {
507     GList *l;
508     int i = 0;
509 
510     /* free the existing list and rebuild it from scratch */
511     g_list_free_full(sdev->resv_regions, g_free);
512     sdev->resv_regions = NULL;
513 
514     /* First add host reserved regions if any, all tagged as RESERVED */
515     for (l = sdev->host_resv_ranges; l; l = l->next) {
516         ReservedRegion *reg = g_new0(ReservedRegion, 1);
517         Range *r = (Range *)l->data;
518 
519         reg->type = VIRTIO_IOMMU_RESV_MEM_T_RESERVED;
520         range_set_bounds(&reg->range, range_lob(r), range_upb(r));
521         sdev->resv_regions = resv_region_list_insert(sdev->resv_regions, reg);
522         trace_virtio_iommu_host_resv_regions(sdev->iommu_mr.parent_obj.name, i,
523                                              range_lob(&reg->range),
524                                              range_upb(&reg->range));
525         i++;
526     }
527     /*
528      * then add higher priority reserved regions set by the machine
529      * through properties
530      */
531     add_prop_resv_regions(sdev);
532     return 0;
533 }
534 
535 static int virtio_iommu_set_host_iova_ranges(VirtIOIOMMU *s, PCIBus *bus,
536                                              int devfn, GList *iova_ranges,
537                                              Error **errp)
538 {
539     IOMMUPciBus *sbus = g_hash_table_lookup(s->as_by_busptr, bus);
540     IOMMUDevice *sdev;
541     GList *current_ranges;
542     GList *l, *tmp, *new_ranges = NULL;
543     int ret = -EINVAL;
544 
545     if (!sbus) {
546         error_report("%s no sbus", __func__);
547     }
548 
549     sdev = sbus->pbdev[devfn];
550 
551     current_ranges = sdev->host_resv_ranges;
552 
553     g_assert(!sdev->probe_done);
554 
555     /* check that each new resv region is included in an existing one */
556     if (sdev->host_resv_ranges) {
557         range_inverse_array(iova_ranges,
558                             &new_ranges,
559                             0, UINT64_MAX);
560 
561         for (tmp = new_ranges; tmp; tmp = tmp->next) {
562             Range *newr = (Range *)tmp->data;
563             bool included = false;
564 
565             for (l = current_ranges; l; l = l->next) {
566                 Range * r = (Range *)l->data;
567 
568                 if (range_contains_range(r, newr)) {
569                     included = true;
570                     break;
571                 }
572             }
573             if (!included) {
574                 goto error;
575             }
576         }
577         /* all new reserved ranges are included in existing ones */
578         ret = 0;
579         goto out;
580     }
581 
582     range_inverse_array(iova_ranges,
583                         &sdev->host_resv_ranges,
584                         0, UINT64_MAX);
585     rebuild_resv_regions(sdev);
586 
587     return 0;
588 error:
589     error_setg(errp, "%s Conflicting host reserved ranges set!",
590                __func__);
591 out:
592     g_list_free_full(new_ranges, g_free);
593     return ret;
594 }
595 
596 static bool virtio_iommu_set_iommu_device(PCIBus *bus, void *opaque, int devfn,
597                                           HostIOMMUDevice *hiod, Error **errp)
598 {
599     VirtIOIOMMU *viommu = opaque;
600     HostIOMMUDeviceClass *hiodc = HOST_IOMMU_DEVICE_GET_CLASS(hiod);
601     struct hiod_key *new_key;
602     GList *host_iova_ranges = NULL;
603 
604     assert(hiod);
605 
606     if (get_host_iommu_device(viommu, bus, devfn)) {
607         error_setg(errp, "Host IOMMU device already exists");
608         return false;
609     }
610 
611     if (hiodc->get_iova_ranges) {
612         int ret;
613         host_iova_ranges = hiodc->get_iova_ranges(hiod, errp);
614         if (!host_iova_ranges) {
615             return true; /* some old kernels may not support that capability */
616         }
617         ret = virtio_iommu_set_host_iova_ranges(viommu, hiod->aliased_bus,
618                                                 hiod->aliased_devfn,
619                                                 host_iova_ranges, errp);
620         if (ret) {
621             g_list_free_full(host_iova_ranges, g_free);
622             return false;
623         }
624     }
625 
626     new_key = g_malloc(sizeof(*new_key));
627     new_key->bus = bus;
628     new_key->devfn = devfn;
629 
630     object_ref(hiod);
631     g_hash_table_insert(viommu->host_iommu_devices, new_key, hiod);
632     g_list_free_full(host_iova_ranges, g_free);
633 
634     return true;
635 }
636 
637 static void
638 virtio_iommu_unset_iommu_device(PCIBus *bus, void *opaque, int devfn)
639 {
640     VirtIOIOMMU *viommu = opaque;
641     HostIOMMUDevice *hiod;
642     struct hiod_key key = {
643         .bus = bus,
644         .devfn = devfn,
645     };
646 
647     hiod = g_hash_table_lookup(viommu->host_iommu_devices, &key);
648     if (!hiod) {
649         return;
650     }
651 
652     g_hash_table_remove(viommu->host_iommu_devices, &key);
653 }
654 
655 static const PCIIOMMUOps virtio_iommu_ops = {
656     .get_address_space = virtio_iommu_find_add_as,
657     .set_iommu_device = virtio_iommu_set_iommu_device,
658     .unset_iommu_device = virtio_iommu_unset_iommu_device,
659 };
660 
661 static int virtio_iommu_attach(VirtIOIOMMU *s,
662                                struct virtio_iommu_req_attach *req)
663 {
664     uint32_t domain_id = le32_to_cpu(req->domain);
665     uint32_t ep_id = le32_to_cpu(req->endpoint);
666     uint32_t flags = le32_to_cpu(req->flags);
667     VirtIOIOMMUDomain *domain;
668     VirtIOIOMMUEndpoint *ep;
669     IOMMUDevice *sdev;
670 
671     trace_virtio_iommu_attach(domain_id, ep_id);
672 
673     if (flags & ~VIRTIO_IOMMU_ATTACH_F_BYPASS) {
674         return VIRTIO_IOMMU_S_INVAL;
675     }
676 
677     ep = virtio_iommu_get_endpoint(s, ep_id);
678     if (!ep) {
679         return VIRTIO_IOMMU_S_NOENT;
680     }
681 
682     if (ep->domain) {
683         VirtIOIOMMUDomain *previous_domain = ep->domain;
684         /*
685          * the device is already attached to a domain,
686          * detach it first
687          */
688         virtio_iommu_detach_endpoint_from_domain(ep);
689         if (QLIST_EMPTY(&previous_domain->endpoint_list)) {
690             g_tree_remove(s->domains, GUINT_TO_POINTER(previous_domain->id));
691         }
692     }
693 
694     domain = virtio_iommu_get_domain(s, domain_id,
695                                      flags & VIRTIO_IOMMU_ATTACH_F_BYPASS);
696     if (!domain) {
697         /* Incompatible bypass flag */
698         return VIRTIO_IOMMU_S_INVAL;
699     }
700     QLIST_INSERT_HEAD(&domain->endpoint_list, ep, next);
701 
702     ep->domain = domain;
703     sdev = container_of(ep->iommu_mr, IOMMUDevice, iommu_mr);
704     virtio_iommu_switch_address_space(sdev);
705 
706     /* Replay domain mappings on the associated memory region */
707     g_tree_foreach(domain->mappings, virtio_iommu_notify_map_cb,
708                    ep->iommu_mr);
709 
710     return VIRTIO_IOMMU_S_OK;
711 }
712 
713 static int virtio_iommu_detach(VirtIOIOMMU *s,
714                                struct virtio_iommu_req_detach *req)
715 {
716     uint32_t domain_id = le32_to_cpu(req->domain);
717     uint32_t ep_id = le32_to_cpu(req->endpoint);
718     VirtIOIOMMUDomain *domain;
719     VirtIOIOMMUEndpoint *ep;
720 
721     trace_virtio_iommu_detach(domain_id, ep_id);
722 
723     ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id));
724     if (!ep) {
725         return VIRTIO_IOMMU_S_NOENT;
726     }
727 
728     domain = ep->domain;
729 
730     if (!domain || domain->id != domain_id) {
731         return VIRTIO_IOMMU_S_INVAL;
732     }
733 
734     virtio_iommu_detach_endpoint_from_domain(ep);
735 
736     if (QLIST_EMPTY(&domain->endpoint_list)) {
737         g_tree_remove(s->domains, GUINT_TO_POINTER(domain->id));
738     }
739     return VIRTIO_IOMMU_S_OK;
740 }
741 
742 static int virtio_iommu_map(VirtIOIOMMU *s,
743                             struct virtio_iommu_req_map *req)
744 {
745     uint32_t domain_id = le32_to_cpu(req->domain);
746     uint64_t phys_start = le64_to_cpu(req->phys_start);
747     uint64_t virt_start = le64_to_cpu(req->virt_start);
748     uint64_t virt_end = le64_to_cpu(req->virt_end);
749     uint32_t flags = le32_to_cpu(req->flags);
750     VirtIOIOMMUDomain *domain;
751     VirtIOIOMMUInterval *interval;
752     VirtIOIOMMUMapping *mapping;
753     VirtIOIOMMUEndpoint *ep;
754 
755     if (flags & ~VIRTIO_IOMMU_MAP_F_MASK) {
756         return VIRTIO_IOMMU_S_INVAL;
757     }
758 
759     domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id));
760     if (!domain) {
761         return VIRTIO_IOMMU_S_NOENT;
762     }
763 
764     if (domain->bypass) {
765         return VIRTIO_IOMMU_S_INVAL;
766     }
767 
768     interval = g_malloc0(sizeof(*interval));
769 
770     interval->low = virt_start;
771     interval->high = virt_end;
772 
773     mapping = g_tree_lookup(domain->mappings, (gpointer)interval);
774     if (mapping) {
775         g_free(interval);
776         return VIRTIO_IOMMU_S_INVAL;
777     }
778 
779     trace_virtio_iommu_map(domain_id, virt_start, virt_end, phys_start, flags);
780 
781     mapping = g_malloc0(sizeof(*mapping));
782     mapping->phys_addr = phys_start;
783     mapping->flags = flags;
784 
785     g_tree_insert(domain->mappings, interval, mapping);
786 
787     QLIST_FOREACH(ep, &domain->endpoint_list, next) {
788         virtio_iommu_notify_map(ep->iommu_mr, virt_start, virt_end, phys_start,
789                                 flags);
790     }
791 
792     return VIRTIO_IOMMU_S_OK;
793 }
794 
795 static int virtio_iommu_unmap(VirtIOIOMMU *s,
796                               struct virtio_iommu_req_unmap *req)
797 {
798     uint32_t domain_id = le32_to_cpu(req->domain);
799     uint64_t virt_start = le64_to_cpu(req->virt_start);
800     uint64_t virt_end = le64_to_cpu(req->virt_end);
801     VirtIOIOMMUMapping *iter_val;
802     VirtIOIOMMUInterval interval, *iter_key;
803     VirtIOIOMMUDomain *domain;
804     VirtIOIOMMUEndpoint *ep;
805     int ret = VIRTIO_IOMMU_S_OK;
806 
807     trace_virtio_iommu_unmap(domain_id, virt_start, virt_end);
808 
809     domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id));
810     if (!domain) {
811         return VIRTIO_IOMMU_S_NOENT;
812     }
813 
814     if (domain->bypass) {
815         return VIRTIO_IOMMU_S_INVAL;
816     }
817 
818     interval.low = virt_start;
819     interval.high = virt_end;
820 
821     while (g_tree_lookup_extended(domain->mappings, &interval,
822                                   (void **)&iter_key, (void**)&iter_val)) {
823         uint64_t current_low = iter_key->low;
824         uint64_t current_high = iter_key->high;
825 
826         if (interval.low <= current_low && interval.high >= current_high) {
827             QLIST_FOREACH(ep, &domain->endpoint_list, next) {
828                 virtio_iommu_notify_unmap(ep->iommu_mr, current_low,
829                                           current_high);
830             }
831             g_tree_remove(domain->mappings, iter_key);
832             trace_virtio_iommu_unmap_done(domain_id, current_low, current_high);
833         } else {
834             ret = VIRTIO_IOMMU_S_RANGE;
835             break;
836         }
837     }
838     return ret;
839 }
840 
841 static ssize_t virtio_iommu_fill_resv_mem_prop(IOMMUDevice *sdev, uint32_t ep,
842                                                uint8_t *buf, size_t free)
843 {
844     struct virtio_iommu_probe_resv_mem prop = {};
845     size_t size = sizeof(prop), length = size - sizeof(prop.head), total;
846     GList *l;
847 
848     total = size * g_list_length(sdev->resv_regions);
849     if (total > free) {
850         return -ENOSPC;
851     }
852 
853     for (l = sdev->resv_regions; l; l = l->next) {
854         ReservedRegion *reg = l->data;
855         unsigned subtype = reg->type;
856         Range *range = &reg->range;
857 
858         assert(subtype == VIRTIO_IOMMU_RESV_MEM_T_RESERVED ||
859                subtype == VIRTIO_IOMMU_RESV_MEM_T_MSI);
860         prop.head.type = cpu_to_le16(VIRTIO_IOMMU_PROBE_T_RESV_MEM);
861         prop.head.length = cpu_to_le16(length);
862         prop.subtype = subtype;
863         prop.start = cpu_to_le64(range_lob(range));
864         prop.end = cpu_to_le64(range_upb(range));
865 
866         memcpy(buf, &prop, size);
867 
868         trace_virtio_iommu_fill_resv_property(ep, prop.subtype,
869                                               prop.start, prop.end);
870         buf += size;
871     }
872     return total;
873 }
874 
875 /**
876  * virtio_iommu_probe - Fill the probe request buffer with
877  * the properties the device is able to return
878  */
879 static int virtio_iommu_probe(VirtIOIOMMU *s,
880                               struct virtio_iommu_req_probe *req,
881                               uint8_t *buf)
882 {
883     uint32_t ep_id = le32_to_cpu(req->endpoint);
884     IOMMUMemoryRegion *iommu_mr = virtio_iommu_mr(s, ep_id);
885     size_t free = VIOMMU_PROBE_SIZE;
886     IOMMUDevice *sdev;
887     ssize_t count;
888 
889     if (!iommu_mr) {
890         return VIRTIO_IOMMU_S_NOENT;
891     }
892 
893     sdev = container_of(iommu_mr, IOMMUDevice, iommu_mr);
894 
895     count = virtio_iommu_fill_resv_mem_prop(sdev, ep_id, buf, free);
896     if (count < 0) {
897         return VIRTIO_IOMMU_S_INVAL;
898     }
899     buf += count;
900     free -= count;
901     sdev->probe_done = true;
902 
903     return VIRTIO_IOMMU_S_OK;
904 }
905 
906 static int virtio_iommu_iov_to_req(struct iovec *iov,
907                                    unsigned int iov_cnt,
908                                    void *req, size_t payload_sz)
909 {
910     size_t sz = iov_to_buf(iov, iov_cnt, 0, req, payload_sz);
911 
912     if (unlikely(sz != payload_sz)) {
913         return VIRTIO_IOMMU_S_INVAL;
914     }
915     return 0;
916 }
917 
918 #define virtio_iommu_handle_req(__req)                                  \
919 static int virtio_iommu_handle_ ## __req(VirtIOIOMMU *s,                \
920                                          struct iovec *iov,             \
921                                          unsigned int iov_cnt)          \
922 {                                                                       \
923     struct virtio_iommu_req_ ## __req req;                              \
924     int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req,               \
925                     sizeof(req) - sizeof(struct virtio_iommu_req_tail));\
926                                                                         \
927     return ret ? ret : virtio_iommu_ ## __req(s, &req);                 \
928 }
929 
930 virtio_iommu_handle_req(attach)
931 virtio_iommu_handle_req(detach)
932 virtio_iommu_handle_req(map)
933 virtio_iommu_handle_req(unmap)
934 
935 static int virtio_iommu_handle_probe(VirtIOIOMMU *s,
936                                      struct iovec *iov,
937                                      unsigned int iov_cnt,
938                                      uint8_t *buf)
939 {
940     struct virtio_iommu_req_probe req;
941     int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, sizeof(req));
942 
943     return ret ? ret : virtio_iommu_probe(s, &req, buf);
944 }
945 
946 static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq)
947 {
948     VirtIOIOMMU *s = VIRTIO_IOMMU(vdev);
949     struct virtio_iommu_req_head head;
950     struct virtio_iommu_req_tail tail = {};
951     VirtQueueElement *elem;
952     unsigned int iov_cnt;
953     struct iovec *iov;
954     void *buf = NULL;
955     size_t sz;
956 
957     for (;;) {
958         size_t output_size = sizeof(tail);
959 
960         elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
961         if (!elem) {
962             return;
963         }
964 
965         if (iov_size(elem->in_sg, elem->in_num) < sizeof(tail) ||
966             iov_size(elem->out_sg, elem->out_num) < sizeof(head)) {
967             virtio_error(vdev, "virtio-iommu bad head/tail size");
968             virtqueue_detach_element(vq, elem, 0);
969             g_free(elem);
970             break;
971         }
972 
973         iov_cnt = elem->out_num;
974         iov = elem->out_sg;
975         sz = iov_to_buf(iov, iov_cnt, 0, &head, sizeof(head));
976         if (unlikely(sz != sizeof(head))) {
977             qemu_log_mask(LOG_GUEST_ERROR,
978                           "%s: read %zu bytes from command head"
979                           "but expected %zu\n", __func__, sz, sizeof(head));
980             tail.status = VIRTIO_IOMMU_S_DEVERR;
981             goto out;
982         }
983         qemu_rec_mutex_lock(&s->mutex);
984         switch (head.type) {
985         case VIRTIO_IOMMU_T_ATTACH:
986             tail.status = virtio_iommu_handle_attach(s, iov, iov_cnt);
987             break;
988         case VIRTIO_IOMMU_T_DETACH:
989             tail.status = virtio_iommu_handle_detach(s, iov, iov_cnt);
990             break;
991         case VIRTIO_IOMMU_T_MAP:
992             tail.status = virtio_iommu_handle_map(s, iov, iov_cnt);
993             break;
994         case VIRTIO_IOMMU_T_UNMAP:
995             tail.status = virtio_iommu_handle_unmap(s, iov, iov_cnt);
996             break;
997         case VIRTIO_IOMMU_T_PROBE:
998         {
999             struct virtio_iommu_req_tail *ptail;
1000 
1001             output_size = s->config.probe_size + sizeof(tail);
1002             buf = g_malloc0(output_size);
1003 
1004             ptail = buf + s->config.probe_size;
1005             ptail->status = virtio_iommu_handle_probe(s, iov, iov_cnt, buf);
1006             break;
1007         }
1008         default:
1009             tail.status = VIRTIO_IOMMU_S_UNSUPP;
1010         }
1011         qemu_rec_mutex_unlock(&s->mutex);
1012 
1013 out:
1014         sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
1015                           buf ? buf : &tail, output_size);
1016         if (unlikely(sz != output_size)) {
1017             qemu_log_mask(LOG_GUEST_ERROR,
1018                           "%s: wrote %zu bytes to command response"
1019                           "but response size is %zu\n",
1020                           __func__, sz, output_size);
1021             tail.status = VIRTIO_IOMMU_S_DEVERR;
1022             /*
1023              * We checked that sizeof(tail) can fit to elem->in_sg at the
1024              * beginning of the loop
1025              */
1026             output_size = sizeof(tail);
1027             g_free(buf);
1028             buf = NULL;
1029             sz = iov_from_buf(elem->in_sg,
1030                               elem->in_num,
1031                               0,
1032                               &tail,
1033                               output_size);
1034         }
1035         assert(sz == output_size);
1036 
1037         virtqueue_push(vq, elem, sz);
1038         virtio_notify(vdev, vq);
1039         g_free(elem);
1040         g_free(buf);
1041         buf = NULL;
1042     }
1043 }
1044 
1045 static void virtio_iommu_report_fault(VirtIOIOMMU *viommu, uint8_t reason,
1046                                       int flags, uint32_t endpoint,
1047                                       uint64_t address)
1048 {
1049     VirtIODevice *vdev = &viommu->parent_obj;
1050     VirtQueue *vq = viommu->event_vq;
1051     struct virtio_iommu_fault fault;
1052     VirtQueueElement *elem;
1053     size_t sz;
1054 
1055     memset(&fault, 0, sizeof(fault));
1056     fault.reason = reason;
1057     fault.flags = cpu_to_le32(flags);
1058     fault.endpoint = cpu_to_le32(endpoint);
1059     fault.address = cpu_to_le64(address);
1060 
1061     elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
1062 
1063     if (!elem) {
1064         error_report_once(
1065             "no buffer available in event queue to report event");
1066         return;
1067     }
1068 
1069     if (iov_size(elem->in_sg, elem->in_num) < sizeof(fault)) {
1070         virtio_error(vdev, "error buffer of wrong size");
1071         virtqueue_detach_element(vq, elem, 0);
1072         g_free(elem);
1073         return;
1074     }
1075 
1076     sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
1077                       &fault, sizeof(fault));
1078     assert(sz == sizeof(fault));
1079 
1080     trace_virtio_iommu_report_fault(reason, flags, endpoint, address);
1081     virtqueue_push(vq, elem, sz);
1082     virtio_notify(vdev, vq);
1083     g_free(elem);
1084 
1085 }
1086 
1087 static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
1088                                             IOMMUAccessFlags flag,
1089                                             int iommu_idx)
1090 {
1091     IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr);
1092     VirtIOIOMMUInterval interval, *mapping_key;
1093     VirtIOIOMMUMapping *mapping_value;
1094     VirtIOIOMMU *s = sdev->viommu;
1095     bool read_fault, write_fault;
1096     VirtIOIOMMUEndpoint *ep;
1097     uint32_t sid, flags;
1098     bool bypass_allowed;
1099     int granule;
1100     bool found;
1101     GList *l;
1102 
1103     interval.low = addr;
1104     interval.high = addr + 1;
1105     granule = ctz64(s->config.page_size_mask);
1106 
1107     IOMMUTLBEntry entry = {
1108         .target_as = &address_space_memory,
1109         .iova = addr,
1110         .translated_addr = addr,
1111         .addr_mask = BIT_ULL(granule) - 1,
1112         .perm = IOMMU_NONE,
1113     };
1114 
1115     bypass_allowed = s->config.bypass;
1116 
1117     sid = virtio_iommu_get_bdf(sdev);
1118 
1119     trace_virtio_iommu_translate(mr->parent_obj.name, sid, addr, flag);
1120     qemu_rec_mutex_lock(&s->mutex);
1121 
1122     ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid));
1123 
1124     if (bypass_allowed)
1125         assert(ep && ep->domain && !ep->domain->bypass);
1126 
1127     if (!ep) {
1128         if (!bypass_allowed) {
1129             error_report_once("%s sid=%d is not known!!", __func__, sid);
1130             virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_UNKNOWN,
1131                                       VIRTIO_IOMMU_FAULT_F_ADDRESS,
1132                                       sid, addr);
1133         } else {
1134             entry.perm = flag;
1135         }
1136         goto unlock;
1137     }
1138 
1139     for (l = sdev->resv_regions; l; l = l->next) {
1140         ReservedRegion *reg = l->data;
1141 
1142         if (range_contains(&reg->range, addr)) {
1143             switch (reg->type) {
1144             case VIRTIO_IOMMU_RESV_MEM_T_MSI:
1145                 entry.perm = flag;
1146                 break;
1147             case VIRTIO_IOMMU_RESV_MEM_T_RESERVED:
1148             default:
1149                 virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING,
1150                                           VIRTIO_IOMMU_FAULT_F_ADDRESS,
1151                                           sid, addr);
1152                 break;
1153             }
1154             goto unlock;
1155         }
1156     }
1157 
1158     if (!ep->domain) {
1159         if (!bypass_allowed) {
1160             error_report_once("%s %02x:%02x.%01x not attached to any domain",
1161                               __func__, PCI_BUS_NUM(sid),
1162                               PCI_SLOT(sid), PCI_FUNC(sid));
1163             virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_DOMAIN,
1164                                       VIRTIO_IOMMU_FAULT_F_ADDRESS,
1165                                       sid, addr);
1166         } else {
1167             entry.perm = flag;
1168         }
1169         goto unlock;
1170     } else if (ep->domain->bypass) {
1171         entry.perm = flag;
1172         goto unlock;
1173     }
1174 
1175     found = g_tree_lookup_extended(ep->domain->mappings, (gpointer)(&interval),
1176                                    (void **)&mapping_key,
1177                                    (void **)&mapping_value);
1178     if (!found) {
1179         error_report_once("%s no mapping for 0x%"PRIx64" for sid=%d",
1180                           __func__, addr, sid);
1181         virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING,
1182                                   VIRTIO_IOMMU_FAULT_F_ADDRESS,
1183                                   sid, addr);
1184         goto unlock;
1185     }
1186 
1187     read_fault = (flag & IOMMU_RO) &&
1188                     !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_READ);
1189     write_fault = (flag & IOMMU_WO) &&
1190                     !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_WRITE);
1191 
1192     flags = read_fault ? VIRTIO_IOMMU_FAULT_F_READ : 0;
1193     flags |= write_fault ? VIRTIO_IOMMU_FAULT_F_WRITE : 0;
1194     if (flags) {
1195         error_report_once("%s permission error on 0x%"PRIx64"(%d): allowed=%d",
1196                           __func__, addr, flag, mapping_value->flags);
1197         flags |= VIRTIO_IOMMU_FAULT_F_ADDRESS;
1198         virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING,
1199                                   flags | VIRTIO_IOMMU_FAULT_F_ADDRESS,
1200                                   sid, addr);
1201         goto unlock;
1202     }
1203     entry.translated_addr = addr - mapping_key->low + mapping_value->phys_addr;
1204     entry.perm = flag;
1205     trace_virtio_iommu_translate_out(addr, entry.translated_addr, sid);
1206 
1207 unlock:
1208     qemu_rec_mutex_unlock(&s->mutex);
1209     return entry;
1210 }
1211 
1212 static void virtio_iommu_get_config(VirtIODevice *vdev, uint8_t *config_data)
1213 {
1214     VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev);
1215     struct virtio_iommu_config *dev_config = &dev->config;
1216     struct virtio_iommu_config *out_config = (void *)config_data;
1217 
1218     out_config->page_size_mask = cpu_to_le64(dev_config->page_size_mask);
1219     out_config->input_range.start = cpu_to_le64(dev_config->input_range.start);
1220     out_config->input_range.end = cpu_to_le64(dev_config->input_range.end);
1221     out_config->domain_range.start = cpu_to_le32(dev_config->domain_range.start);
1222     out_config->domain_range.end = cpu_to_le32(dev_config->domain_range.end);
1223     out_config->probe_size = cpu_to_le32(dev_config->probe_size);
1224     out_config->bypass = dev_config->bypass;
1225 
1226     trace_virtio_iommu_get_config(dev_config->page_size_mask,
1227                                   dev_config->input_range.start,
1228                                   dev_config->input_range.end,
1229                                   dev_config->domain_range.start,
1230                                   dev_config->domain_range.end,
1231                                   dev_config->probe_size,
1232                                   dev_config->bypass);
1233 }
1234 
1235 static void virtio_iommu_set_config(VirtIODevice *vdev,
1236                                     const uint8_t *config_data)
1237 {
1238     VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev);
1239     struct virtio_iommu_config *dev_config = &dev->config;
1240     const struct virtio_iommu_config *in_config = (void *)config_data;
1241 
1242     if (in_config->bypass != dev_config->bypass) {
1243         if (!virtio_vdev_has_feature(vdev, VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
1244             virtio_error(vdev, "cannot set config.bypass");
1245             return;
1246         } else if (in_config->bypass != 0 && in_config->bypass != 1) {
1247             virtio_error(vdev, "invalid config.bypass value '%u'",
1248                          in_config->bypass);
1249             return;
1250         }
1251         dev_config->bypass = in_config->bypass;
1252         virtio_iommu_switch_address_space_all(dev);
1253     }
1254 
1255     trace_virtio_iommu_set_config(in_config->bypass);
1256 }
1257 
1258 static uint64_t virtio_iommu_get_features(VirtIODevice *vdev, uint64_t f,
1259                                           Error **errp)
1260 {
1261     VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev);
1262 
1263     f |= dev->features;
1264     trace_virtio_iommu_get_features(f);
1265     return f;
1266 }
1267 
1268 static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
1269 {
1270     guint ua = GPOINTER_TO_UINT(a);
1271     guint ub = GPOINTER_TO_UINT(b);
1272     return (ua > ub) - (ua < ub);
1273 }
1274 
1275 static gboolean virtio_iommu_remap(gpointer key, gpointer value, gpointer data)
1276 {
1277     VirtIOIOMMUMapping *mapping = (VirtIOIOMMUMapping *) value;
1278     VirtIOIOMMUInterval *interval = (VirtIOIOMMUInterval *) key;
1279     IOMMUMemoryRegion *mr = (IOMMUMemoryRegion *) data;
1280 
1281     trace_virtio_iommu_remap(mr->parent_obj.name, interval->low, interval->high,
1282                              mapping->phys_addr);
1283     virtio_iommu_notify_map(mr, interval->low, interval->high,
1284                             mapping->phys_addr, mapping->flags);
1285     return false;
1286 }
1287 
1288 static void virtio_iommu_replay(IOMMUMemoryRegion *mr, IOMMUNotifier *n)
1289 {
1290     IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr);
1291     VirtIOIOMMU *s = sdev->viommu;
1292     uint32_t sid;
1293     VirtIOIOMMUEndpoint *ep;
1294 
1295     sid = virtio_iommu_get_bdf(sdev);
1296 
1297     qemu_rec_mutex_lock(&s->mutex);
1298 
1299     if (!s->endpoints) {
1300         goto unlock;
1301     }
1302 
1303     ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid));
1304     if (!ep || !ep->domain) {
1305         goto unlock;
1306     }
1307 
1308     g_tree_foreach(ep->domain->mappings, virtio_iommu_remap, mr);
1309 
1310 unlock:
1311     qemu_rec_mutex_unlock(&s->mutex);
1312 }
1313 
1314 static int virtio_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu_mr,
1315                                             IOMMUNotifierFlag old,
1316                                             IOMMUNotifierFlag new,
1317                                             Error **errp)
1318 {
1319     if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
1320         error_setg(errp, "Virtio-iommu does not support dev-iotlb yet");
1321         return -EINVAL;
1322     }
1323 
1324     if (old == IOMMU_NOTIFIER_NONE) {
1325         trace_virtio_iommu_notify_flag_add(iommu_mr->parent_obj.name);
1326     } else if (new == IOMMU_NOTIFIER_NONE) {
1327         trace_virtio_iommu_notify_flag_del(iommu_mr->parent_obj.name);
1328     }
1329     return 0;
1330 }
1331 
1332 /*
1333  * The default mask depends on the "granule" property. For example, with
1334  * 4k granule, it is -(4 * KiB). When an assigned device has page size
1335  * restrictions due to the hardware IOMMU configuration, apply this restriction
1336  * to the mask.
1337  */
1338 static int virtio_iommu_set_page_size_mask(IOMMUMemoryRegion *mr,
1339                                            uint64_t new_mask,
1340                                            Error **errp)
1341 {
1342     IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr);
1343     VirtIOIOMMU *s = sdev->viommu;
1344     uint64_t cur_mask = s->config.page_size_mask;
1345 
1346     trace_virtio_iommu_set_page_size_mask(mr->parent_obj.name, cur_mask,
1347                                           new_mask);
1348 
1349     if ((cur_mask & new_mask) == 0) {
1350         error_setg(errp, "virtio-iommu %s reports a page size mask 0x%"PRIx64
1351                    " incompatible with currently supported mask 0x%"PRIx64,
1352                    mr->parent_obj.name, new_mask, cur_mask);
1353         return -1;
1354     }
1355 
1356     /*
1357      * Once the granule is frozen we can't change the mask anymore. If by
1358      * chance the hotplugged device supports the same granule, we can still
1359      * accept it.
1360      */
1361     if (s->granule_frozen) {
1362         int cur_granule = ctz64(cur_mask);
1363 
1364         if (!(BIT_ULL(cur_granule) & new_mask)) {
1365             error_setg(errp, "virtio-iommu %s does not support frozen granule 0x%llx",
1366                        mr->parent_obj.name, BIT_ULL(cur_granule));
1367             return -1;
1368         }
1369         return 0;
1370     }
1371 
1372     s->config.page_size_mask &= new_mask;
1373     return 0;
1374 }
1375 
1376 static void virtio_iommu_system_reset(void *opaque)
1377 {
1378     VirtIOIOMMU *s = opaque;
1379 
1380     trace_virtio_iommu_system_reset();
1381 
1382     memset(s->iommu_pcibus_by_bus_num, 0, sizeof(s->iommu_pcibus_by_bus_num));
1383 
1384     /*
1385      * config.bypass is sticky across device reset, but should be restored on
1386      * system reset
1387      */
1388     s->config.bypass = s->boot_bypass;
1389     virtio_iommu_switch_address_space_all(s);
1390 
1391 }
1392 
1393 static void virtio_iommu_freeze_granule(Notifier *notifier, void *data)
1394 {
1395     VirtIOIOMMU *s = container_of(notifier, VirtIOIOMMU, machine_done);
1396     int granule;
1397 
1398     if (likely(s->config.bypass)) {
1399         /*
1400          * Transient IOMMU MR enable to collect page_size_mask requirements
1401          * through memory_region_iommu_set_page_size_mask() called by
1402          * VFIO region_add() callback
1403          */
1404          s->config.bypass = false;
1405          virtio_iommu_switch_address_space_all(s);
1406          /* restore default */
1407          s->config.bypass = true;
1408          virtio_iommu_switch_address_space_all(s);
1409     }
1410     s->granule_frozen = true;
1411     granule = ctz64(s->config.page_size_mask);
1412     trace_virtio_iommu_freeze_granule(BIT_ULL(granule));
1413 }
1414 
1415 static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
1416 {
1417     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1418     VirtIOIOMMU *s = VIRTIO_IOMMU(dev);
1419 
1420     virtio_init(vdev, VIRTIO_ID_IOMMU, sizeof(struct virtio_iommu_config));
1421 
1422     s->req_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE,
1423                              virtio_iommu_handle_command);
1424     s->event_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE, NULL);
1425 
1426     /*
1427      * config.bypass is needed to get initial address space early, such as
1428      * in vfio realize
1429      */
1430     s->config.bypass = s->boot_bypass;
1431     if (s->aw_bits < 32 || s->aw_bits > 64) {
1432         error_setg(errp, "aw-bits must be within [32,64]");
1433         return;
1434     }
1435     s->config.input_range.end =
1436         s->aw_bits == 64 ? UINT64_MAX : BIT_ULL(s->aw_bits) - 1;
1437 
1438     switch (s->granule_mode) {
1439     case GRANULE_MODE_4K:
1440         s->config.page_size_mask = -(4 * KiB);
1441         break;
1442     case GRANULE_MODE_8K:
1443         s->config.page_size_mask = -(8 * KiB);
1444         break;
1445     case GRANULE_MODE_16K:
1446         s->config.page_size_mask = -(16 * KiB);
1447         break;
1448     case GRANULE_MODE_64K:
1449         s->config.page_size_mask = -(64 * KiB);
1450         break;
1451     case GRANULE_MODE_HOST:
1452         s->config.page_size_mask = qemu_real_host_page_mask();
1453         break;
1454     default:
1455         error_setg(errp, "Unsupported granule mode");
1456     }
1457     s->config.domain_range.end = UINT32_MAX;
1458     s->config.probe_size = VIOMMU_PROBE_SIZE;
1459 
1460     virtio_add_feature(&s->features, VIRTIO_RING_F_EVENT_IDX);
1461     virtio_add_feature(&s->features, VIRTIO_RING_F_INDIRECT_DESC);
1462     virtio_add_feature(&s->features, VIRTIO_F_VERSION_1);
1463     virtio_add_feature(&s->features, VIRTIO_IOMMU_F_INPUT_RANGE);
1464     virtio_add_feature(&s->features, VIRTIO_IOMMU_F_DOMAIN_RANGE);
1465     virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MAP_UNMAP);
1466     virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MMIO);
1467     virtio_add_feature(&s->features, VIRTIO_IOMMU_F_PROBE);
1468     virtio_add_feature(&s->features, VIRTIO_IOMMU_F_BYPASS_CONFIG);
1469 
1470     qemu_rec_mutex_init(&s->mutex);
1471 
1472     s->as_by_busptr = g_hash_table_new_full(NULL, NULL, NULL, g_free);
1473 
1474     s->host_iommu_devices = g_hash_table_new_full(hiod_hash, hiod_equal,
1475                                                   g_free, hiod_destroy);
1476 
1477     if (s->primary_bus) {
1478         pci_setup_iommu(s->primary_bus, &virtio_iommu_ops, s);
1479     } else {
1480         error_setg(errp, "VIRTIO-IOMMU is not attached to any PCI bus!");
1481     }
1482 
1483     s->machine_done.notify = virtio_iommu_freeze_granule;
1484     qemu_add_machine_init_done_notifier(&s->machine_done);
1485 
1486     qemu_register_reset(virtio_iommu_system_reset, s);
1487 }
1488 
1489 static void virtio_iommu_device_unrealize(DeviceState *dev)
1490 {
1491     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1492     VirtIOIOMMU *s = VIRTIO_IOMMU(dev);
1493 
1494     qemu_unregister_reset(virtio_iommu_system_reset, s);
1495     qemu_remove_machine_init_done_notifier(&s->machine_done);
1496 
1497     g_hash_table_destroy(s->as_by_busptr);
1498     if (s->domains) {
1499         g_tree_destroy(s->domains);
1500     }
1501     if (s->endpoints) {
1502         g_tree_destroy(s->endpoints);
1503     }
1504 
1505     qemu_rec_mutex_destroy(&s->mutex);
1506 
1507     virtio_delete_queue(s->req_vq);
1508     virtio_delete_queue(s->event_vq);
1509     virtio_cleanup(vdev);
1510 }
1511 
1512 static void virtio_iommu_device_reset(VirtIODevice *vdev)
1513 {
1514     VirtIOIOMMU *s = VIRTIO_IOMMU(vdev);
1515 
1516     trace_virtio_iommu_device_reset();
1517 
1518     if (s->domains) {
1519         g_tree_destroy(s->domains);
1520     }
1521     if (s->endpoints) {
1522         g_tree_destroy(s->endpoints);
1523     }
1524     s->domains = g_tree_new_full((GCompareDataFunc)int_cmp,
1525                                  NULL, NULL, virtio_iommu_put_domain);
1526     s->endpoints = g_tree_new_full((GCompareDataFunc)int_cmp,
1527                                    NULL, NULL, virtio_iommu_put_endpoint);
1528 }
1529 
1530 static void virtio_iommu_set_status(VirtIODevice *vdev, uint8_t status)
1531 {
1532     trace_virtio_iommu_device_status(status);
1533 }
1534 
1535 static void virtio_iommu_instance_init(Object *obj)
1536 {
1537 }
1538 
1539 #define VMSTATE_INTERVAL                               \
1540 {                                                      \
1541     .name = "interval",                                \
1542     .version_id = 1,                                   \
1543     .minimum_version_id = 1,                           \
1544     .fields = (const VMStateField[]) {                 \
1545         VMSTATE_UINT64(low, VirtIOIOMMUInterval),      \
1546         VMSTATE_UINT64(high, VirtIOIOMMUInterval),     \
1547         VMSTATE_END_OF_LIST()                          \
1548     }                                                  \
1549 }
1550 
1551 #define VMSTATE_MAPPING                               \
1552 {                                                     \
1553     .name = "mapping",                                \
1554     .version_id = 1,                                  \
1555     .minimum_version_id = 1,                          \
1556     .fields = (const VMStateField[]) {                \
1557         VMSTATE_UINT64(phys_addr, VirtIOIOMMUMapping),\
1558         VMSTATE_UINT32(flags, VirtIOIOMMUMapping),    \
1559         VMSTATE_END_OF_LIST()                         \
1560     },                                                \
1561 }
1562 
1563 static const VMStateDescription vmstate_interval_mapping[2] = {
1564     VMSTATE_MAPPING,   /* value */
1565     VMSTATE_INTERVAL   /* key   */
1566 };
1567 
1568 static int domain_preload(void *opaque)
1569 {
1570     VirtIOIOMMUDomain *domain = opaque;
1571 
1572     domain->mappings = g_tree_new_full((GCompareDataFunc)interval_cmp,
1573                                        NULL, g_free, g_free);
1574     return 0;
1575 }
1576 
1577 static const VMStateDescription vmstate_endpoint = {
1578     .name = "endpoint",
1579     .version_id = 1,
1580     .minimum_version_id = 1,
1581     .fields = (const VMStateField[]) {
1582         VMSTATE_UINT32(id, VirtIOIOMMUEndpoint),
1583         VMSTATE_END_OF_LIST()
1584     }
1585 };
1586 
1587 static const VMStateDescription vmstate_domain = {
1588     .name = "domain",
1589     .version_id = 2,
1590     .minimum_version_id = 2,
1591     .pre_load = domain_preload,
1592     .fields = (const VMStateField[]) {
1593         VMSTATE_UINT32(id, VirtIOIOMMUDomain),
1594         VMSTATE_GTREE_V(mappings, VirtIOIOMMUDomain, 1,
1595                         vmstate_interval_mapping,
1596                         VirtIOIOMMUInterval, VirtIOIOMMUMapping),
1597         VMSTATE_QLIST_V(endpoint_list, VirtIOIOMMUDomain, 1,
1598                         vmstate_endpoint, VirtIOIOMMUEndpoint, next),
1599         VMSTATE_BOOL_V(bypass, VirtIOIOMMUDomain, 2),
1600         VMSTATE_END_OF_LIST()
1601     }
1602 };
1603 
1604 static gboolean reconstruct_endpoints(gpointer key, gpointer value,
1605                                       gpointer data)
1606 {
1607     VirtIOIOMMU *s = (VirtIOIOMMU *)data;
1608     VirtIOIOMMUDomain *d = (VirtIOIOMMUDomain *)value;
1609     VirtIOIOMMUEndpoint *iter;
1610     IOMMUMemoryRegion *mr;
1611 
1612     QLIST_FOREACH(iter, &d->endpoint_list, next) {
1613         mr = virtio_iommu_mr(s, iter->id);
1614         assert(mr);
1615 
1616         iter->domain = d;
1617         iter->iommu_mr = mr;
1618         g_tree_insert(s->endpoints, GUINT_TO_POINTER(iter->id), iter);
1619     }
1620     return false; /* continue the domain traversal */
1621 }
1622 
1623 static int iommu_post_load(void *opaque, int version_id)
1624 {
1625     VirtIOIOMMU *s = opaque;
1626 
1627     g_tree_foreach(s->domains, reconstruct_endpoints, s);
1628 
1629     /*
1630      * Memory regions are dynamically turned on/off depending on
1631      * 'config.bypass' and attached domain type if there is. After
1632      * migration, we need to make sure the memory regions are
1633      * still correct.
1634      */
1635     virtio_iommu_switch_address_space_all(s);
1636     return 0;
1637 }
1638 
1639 static const VMStateDescription vmstate_virtio_iommu_device = {
1640     .name = "virtio-iommu-device",
1641     .minimum_version_id = 2,
1642     .version_id = 2,
1643     .post_load = iommu_post_load,
1644     .fields = (const VMStateField[]) {
1645         VMSTATE_GTREE_DIRECT_KEY_V(domains, VirtIOIOMMU, 2,
1646                                    &vmstate_domain, VirtIOIOMMUDomain),
1647         VMSTATE_UINT8_V(config.bypass, VirtIOIOMMU, 2),
1648         VMSTATE_END_OF_LIST()
1649     },
1650 };
1651 
1652 static const VMStateDescription vmstate_virtio_iommu = {
1653     .name = "virtio-iommu",
1654     .minimum_version_id = 2,
1655     .priority = MIG_PRI_IOMMU,
1656     .version_id = 2,
1657     .fields = (const VMStateField[]) {
1658         VMSTATE_VIRTIO_DEVICE,
1659         VMSTATE_END_OF_LIST()
1660     },
1661 };
1662 
1663 static Property virtio_iommu_properties[] = {
1664     DEFINE_PROP_LINK("primary-bus", VirtIOIOMMU, primary_bus,
1665                      TYPE_PCI_BUS, PCIBus *),
1666     DEFINE_PROP_BOOL("boot-bypass", VirtIOIOMMU, boot_bypass, true),
1667     DEFINE_PROP_GRANULE_MODE("granule", VirtIOIOMMU, granule_mode,
1668                              GRANULE_MODE_HOST),
1669     DEFINE_PROP_UINT8("aw-bits", VirtIOIOMMU, aw_bits, 64),
1670     DEFINE_PROP_END_OF_LIST(),
1671 };
1672 
1673 static void virtio_iommu_class_init(ObjectClass *klass, void *data)
1674 {
1675     DeviceClass *dc = DEVICE_CLASS(klass);
1676     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1677 
1678     device_class_set_props(dc, virtio_iommu_properties);
1679     dc->vmsd = &vmstate_virtio_iommu;
1680 
1681     set_bit(DEVICE_CATEGORY_MISC, dc->categories);
1682     vdc->realize = virtio_iommu_device_realize;
1683     vdc->unrealize = virtio_iommu_device_unrealize;
1684     vdc->reset = virtio_iommu_device_reset;
1685     vdc->get_config = virtio_iommu_get_config;
1686     vdc->set_config = virtio_iommu_set_config;
1687     vdc->get_features = virtio_iommu_get_features;
1688     vdc->set_status = virtio_iommu_set_status;
1689     vdc->vmsd = &vmstate_virtio_iommu_device;
1690 }
1691 
1692 static void virtio_iommu_memory_region_class_init(ObjectClass *klass,
1693                                                   void *data)
1694 {
1695     IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
1696 
1697     imrc->translate = virtio_iommu_translate;
1698     imrc->replay = virtio_iommu_replay;
1699     imrc->notify_flag_changed = virtio_iommu_notify_flag_changed;
1700     imrc->iommu_set_page_size_mask = virtio_iommu_set_page_size_mask;
1701 }
1702 
1703 static const TypeInfo virtio_iommu_info = {
1704     .name = TYPE_VIRTIO_IOMMU,
1705     .parent = TYPE_VIRTIO_DEVICE,
1706     .instance_size = sizeof(VirtIOIOMMU),
1707     .instance_init = virtio_iommu_instance_init,
1708     .class_init = virtio_iommu_class_init,
1709 };
1710 
1711 static const TypeInfo virtio_iommu_memory_region_info = {
1712     .parent = TYPE_IOMMU_MEMORY_REGION,
1713     .name = TYPE_VIRTIO_IOMMU_MEMORY_REGION,
1714     .class_init = virtio_iommu_memory_region_class_init,
1715 };
1716 
1717 static void virtio_register_types(void)
1718 {
1719     type_register_static(&virtio_iommu_info);
1720     type_register_static(&virtio_iommu_memory_region_info);
1721 }
1722 
1723 type_init(virtio_register_types)
1724