xref: /openbmc/qemu/hw/virtio/virtio-iommu.c (revision 6c848c19)
1 /*
2  * virtio-iommu device
3  *
4  * Copyright (c) 2020 Red Hat, Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2 or later, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/iov.h"
23 #include "qemu/range.h"
24 #include "qemu/reserved-region.h"
25 #include "exec/target_page.h"
26 #include "hw/qdev-properties.h"
27 #include "hw/virtio/virtio.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/reset.h"
30 #include "sysemu/sysemu.h"
31 #include "qemu/reserved-region.h"
32 #include "qapi/error.h"
33 #include "qemu/error-report.h"
34 #include "trace.h"
35 
36 #include "standard-headers/linux/virtio_ids.h"
37 
38 #include "hw/virtio/virtio-bus.h"
39 #include "hw/virtio/virtio-iommu.h"
40 #include "hw/pci/pci_bus.h"
41 #include "hw/pci/pci.h"
42 
43 /* Max size */
44 #define VIOMMU_DEFAULT_QUEUE_SIZE 256
45 #define VIOMMU_PROBE_SIZE 512
46 
47 typedef struct VirtIOIOMMUDomain {
48     uint32_t id;
49     bool bypass;
50     GTree *mappings;
51     QLIST_HEAD(, VirtIOIOMMUEndpoint) endpoint_list;
52 } VirtIOIOMMUDomain;
53 
54 typedef struct VirtIOIOMMUEndpoint {
55     uint32_t id;
56     VirtIOIOMMUDomain *domain;
57     IOMMUMemoryRegion *iommu_mr;
58     QLIST_ENTRY(VirtIOIOMMUEndpoint) next;
59 } VirtIOIOMMUEndpoint;
60 
61 typedef struct VirtIOIOMMUInterval {
62     uint64_t low;
63     uint64_t high;
64 } VirtIOIOMMUInterval;
65 
66 typedef struct VirtIOIOMMUMapping {
67     uint64_t phys_addr;
68     uint32_t flags;
69 } VirtIOIOMMUMapping;
70 
71 static inline uint16_t virtio_iommu_get_bdf(IOMMUDevice *dev)
72 {
73     return PCI_BUILD_BDF(pci_bus_num(dev->bus), dev->devfn);
74 }
75 
76 static bool virtio_iommu_device_bypassed(IOMMUDevice *sdev)
77 {
78     uint32_t sid;
79     bool bypassed;
80     VirtIOIOMMU *s = sdev->viommu;
81     VirtIOIOMMUEndpoint *ep;
82 
83     sid = virtio_iommu_get_bdf(sdev);
84 
85     qemu_rec_mutex_lock(&s->mutex);
86     /* need to check bypass before system reset */
87     if (!s->endpoints) {
88         bypassed = s->config.bypass;
89         goto unlock;
90     }
91 
92     ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid));
93     if (!ep || !ep->domain) {
94         bypassed = s->config.bypass;
95     } else {
96         bypassed = ep->domain->bypass;
97     }
98 
99 unlock:
100     qemu_rec_mutex_unlock(&s->mutex);
101     return bypassed;
102 }
103 
104 /* Return whether the device is using IOMMU translation. */
105 static bool virtio_iommu_switch_address_space(IOMMUDevice *sdev)
106 {
107     bool use_remapping;
108 
109     assert(sdev);
110 
111     use_remapping = !virtio_iommu_device_bypassed(sdev);
112 
113     trace_virtio_iommu_switch_address_space(pci_bus_num(sdev->bus),
114                                             PCI_SLOT(sdev->devfn),
115                                             PCI_FUNC(sdev->devfn),
116                                             use_remapping);
117 
118     /* Turn off first then on the other */
119     if (use_remapping) {
120         memory_region_set_enabled(&sdev->bypass_mr, false);
121         memory_region_set_enabled(MEMORY_REGION(&sdev->iommu_mr), true);
122     } else {
123         memory_region_set_enabled(MEMORY_REGION(&sdev->iommu_mr), false);
124         memory_region_set_enabled(&sdev->bypass_mr, true);
125     }
126 
127     return use_remapping;
128 }
129 
130 static void virtio_iommu_switch_address_space_all(VirtIOIOMMU *s)
131 {
132     GHashTableIter iter;
133     IOMMUPciBus *iommu_pci_bus;
134     int i;
135 
136     g_hash_table_iter_init(&iter, s->as_by_busptr);
137     while (g_hash_table_iter_next(&iter, NULL, (void **)&iommu_pci_bus)) {
138         for (i = 0; i < PCI_DEVFN_MAX; i++) {
139             if (!iommu_pci_bus->pbdev[i]) {
140                 continue;
141             }
142             virtio_iommu_switch_address_space(iommu_pci_bus->pbdev[i]);
143         }
144     }
145 }
146 
147 /**
148  * The bus number is used for lookup when SID based operations occur.
149  * In that case we lazily populate the IOMMUPciBus array from the bus hash
150  * table. At the time the IOMMUPciBus is created (iommu_find_add_as), the bus
151  * numbers may not be always initialized yet.
152  */
153 static IOMMUPciBus *iommu_find_iommu_pcibus(VirtIOIOMMU *s, uint8_t bus_num)
154 {
155     IOMMUPciBus *iommu_pci_bus = s->iommu_pcibus_by_bus_num[bus_num];
156 
157     if (!iommu_pci_bus) {
158         GHashTableIter iter;
159 
160         g_hash_table_iter_init(&iter, s->as_by_busptr);
161         while (g_hash_table_iter_next(&iter, NULL, (void **)&iommu_pci_bus)) {
162             if (pci_bus_num(iommu_pci_bus->bus) == bus_num) {
163                 s->iommu_pcibus_by_bus_num[bus_num] = iommu_pci_bus;
164                 return iommu_pci_bus;
165             }
166         }
167         return NULL;
168     }
169     return iommu_pci_bus;
170 }
171 
172 static IOMMUMemoryRegion *virtio_iommu_mr(VirtIOIOMMU *s, uint32_t sid)
173 {
174     uint8_t bus_n, devfn;
175     IOMMUPciBus *iommu_pci_bus;
176     IOMMUDevice *dev;
177 
178     bus_n = PCI_BUS_NUM(sid);
179     iommu_pci_bus = iommu_find_iommu_pcibus(s, bus_n);
180     if (iommu_pci_bus) {
181         devfn = sid & (PCI_DEVFN_MAX - 1);
182         dev = iommu_pci_bus->pbdev[devfn];
183         if (dev) {
184             return &dev->iommu_mr;
185         }
186     }
187     return NULL;
188 }
189 
190 static gint interval_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
191 {
192     VirtIOIOMMUInterval *inta = (VirtIOIOMMUInterval *)a;
193     VirtIOIOMMUInterval *intb = (VirtIOIOMMUInterval *)b;
194 
195     if (inta->high < intb->low) {
196         return -1;
197     } else if (intb->high < inta->low) {
198         return 1;
199     } else {
200         return 0;
201     }
202 }
203 
204 static void virtio_iommu_notify_map_unmap(IOMMUMemoryRegion *mr,
205                                           IOMMUTLBEvent *event,
206                                           hwaddr virt_start, hwaddr virt_end)
207 {
208     uint64_t delta = virt_end - virt_start;
209 
210     event->entry.iova = virt_start;
211     event->entry.addr_mask = delta;
212 
213     if (delta == UINT64_MAX) {
214         memory_region_notify_iommu(mr, 0, *event);
215     }
216 
217     while (virt_start != virt_end + 1) {
218         uint64_t mask = dma_aligned_pow2_mask(virt_start, virt_end, 64);
219 
220         event->entry.addr_mask = mask;
221         event->entry.iova = virt_start;
222         memory_region_notify_iommu(mr, 0, *event);
223         virt_start += mask + 1;
224         if (event->entry.perm != IOMMU_NONE) {
225             event->entry.translated_addr += mask + 1;
226         }
227     }
228 }
229 
230 static void virtio_iommu_notify_map(IOMMUMemoryRegion *mr, hwaddr virt_start,
231                                     hwaddr virt_end, hwaddr paddr,
232                                     uint32_t flags)
233 {
234     IOMMUTLBEvent event;
235     IOMMUAccessFlags perm = IOMMU_ACCESS_FLAG(flags & VIRTIO_IOMMU_MAP_F_READ,
236                                               flags & VIRTIO_IOMMU_MAP_F_WRITE);
237 
238     if (!(mr->iommu_notify_flags & IOMMU_NOTIFIER_MAP) ||
239         (flags & VIRTIO_IOMMU_MAP_F_MMIO) || !perm) {
240         return;
241     }
242 
243     trace_virtio_iommu_notify_map(mr->parent_obj.name, virt_start, virt_end,
244                                   paddr, perm);
245 
246     event.type = IOMMU_NOTIFIER_MAP;
247     event.entry.target_as = &address_space_memory;
248     event.entry.perm = perm;
249     event.entry.translated_addr = paddr;
250 
251     virtio_iommu_notify_map_unmap(mr, &event, virt_start, virt_end);
252 }
253 
254 static void virtio_iommu_notify_unmap(IOMMUMemoryRegion *mr, hwaddr virt_start,
255                                       hwaddr virt_end)
256 {
257     IOMMUTLBEvent event;
258 
259     if (!(mr->iommu_notify_flags & IOMMU_NOTIFIER_UNMAP)) {
260         return;
261     }
262 
263     trace_virtio_iommu_notify_unmap(mr->parent_obj.name, virt_start, virt_end);
264 
265     event.type = IOMMU_NOTIFIER_UNMAP;
266     event.entry.target_as = &address_space_memory;
267     event.entry.perm = IOMMU_NONE;
268     event.entry.translated_addr = 0;
269 
270     virtio_iommu_notify_map_unmap(mr, &event, virt_start, virt_end);
271 }
272 
273 static gboolean virtio_iommu_notify_unmap_cb(gpointer key, gpointer value,
274                                              gpointer data)
275 {
276     VirtIOIOMMUInterval *interval = (VirtIOIOMMUInterval *) key;
277     IOMMUMemoryRegion *mr = (IOMMUMemoryRegion *) data;
278 
279     virtio_iommu_notify_unmap(mr, interval->low, interval->high);
280 
281     return false;
282 }
283 
284 static gboolean virtio_iommu_notify_map_cb(gpointer key, gpointer value,
285                                            gpointer data)
286 {
287     VirtIOIOMMUMapping *mapping = (VirtIOIOMMUMapping *) value;
288     VirtIOIOMMUInterval *interval = (VirtIOIOMMUInterval *) key;
289     IOMMUMemoryRegion *mr = (IOMMUMemoryRegion *) data;
290 
291     virtio_iommu_notify_map(mr, interval->low, interval->high,
292                             mapping->phys_addr, mapping->flags);
293 
294     return false;
295 }
296 
297 static void virtio_iommu_detach_endpoint_from_domain(VirtIOIOMMUEndpoint *ep)
298 {
299     VirtIOIOMMUDomain *domain = ep->domain;
300     IOMMUDevice *sdev = container_of(ep->iommu_mr, IOMMUDevice, iommu_mr);
301 
302     if (!ep->domain) {
303         return;
304     }
305     g_tree_foreach(domain->mappings, virtio_iommu_notify_unmap_cb,
306                    ep->iommu_mr);
307     QLIST_REMOVE(ep, next);
308     ep->domain = NULL;
309     virtio_iommu_switch_address_space(sdev);
310 }
311 
312 static VirtIOIOMMUEndpoint *virtio_iommu_get_endpoint(VirtIOIOMMU *s,
313                                                       uint32_t ep_id)
314 {
315     VirtIOIOMMUEndpoint *ep;
316     IOMMUMemoryRegion *mr;
317 
318     ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id));
319     if (ep) {
320         return ep;
321     }
322     mr = virtio_iommu_mr(s, ep_id);
323     if (!mr) {
324         return NULL;
325     }
326     ep = g_malloc0(sizeof(*ep));
327     ep->id = ep_id;
328     ep->iommu_mr = mr;
329     trace_virtio_iommu_get_endpoint(ep_id);
330     g_tree_insert(s->endpoints, GUINT_TO_POINTER(ep_id), ep);
331     return ep;
332 }
333 
334 static void virtio_iommu_put_endpoint(gpointer data)
335 {
336     VirtIOIOMMUEndpoint *ep = (VirtIOIOMMUEndpoint *)data;
337 
338     if (ep->domain) {
339         virtio_iommu_detach_endpoint_from_domain(ep);
340     }
341 
342     trace_virtio_iommu_put_endpoint(ep->id);
343     g_free(ep);
344 }
345 
346 static VirtIOIOMMUDomain *virtio_iommu_get_domain(VirtIOIOMMU *s,
347                                                   uint32_t domain_id,
348                                                   bool bypass)
349 {
350     VirtIOIOMMUDomain *domain;
351 
352     domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id));
353     if (domain) {
354         if (domain->bypass != bypass) {
355             return NULL;
356         }
357         return domain;
358     }
359     domain = g_malloc0(sizeof(*domain));
360     domain->id = domain_id;
361     domain->mappings = g_tree_new_full((GCompareDataFunc)interval_cmp,
362                                    NULL, (GDestroyNotify)g_free,
363                                    (GDestroyNotify)g_free);
364     domain->bypass = bypass;
365     g_tree_insert(s->domains, GUINT_TO_POINTER(domain_id), domain);
366     QLIST_INIT(&domain->endpoint_list);
367     trace_virtio_iommu_get_domain(domain_id);
368     return domain;
369 }
370 
371 static void virtio_iommu_put_domain(gpointer data)
372 {
373     VirtIOIOMMUDomain *domain = (VirtIOIOMMUDomain *)data;
374     VirtIOIOMMUEndpoint *iter, *tmp;
375 
376     QLIST_FOREACH_SAFE(iter, &domain->endpoint_list, next, tmp) {
377         virtio_iommu_detach_endpoint_from_domain(iter);
378     }
379     g_tree_destroy(domain->mappings);
380     trace_virtio_iommu_put_domain(domain->id);
381     g_free(domain);
382 }
383 
384 static void add_prop_resv_regions(IOMMUDevice *sdev)
385 {
386     VirtIOIOMMU *s = sdev->viommu;
387     int i;
388 
389     for (i = 0; i < s->nr_prop_resv_regions; i++) {
390         ReservedRegion *reg = g_new0(ReservedRegion, 1);
391 
392         *reg = s->prop_resv_regions[i];
393         sdev->resv_regions = resv_region_list_insert(sdev->resv_regions, reg);
394     }
395 }
396 
397 static AddressSpace *virtio_iommu_find_add_as(PCIBus *bus, void *opaque,
398                                               int devfn)
399 {
400     VirtIOIOMMU *s = opaque;
401     IOMMUPciBus *sbus = g_hash_table_lookup(s->as_by_busptr, bus);
402     static uint32_t mr_index;
403     IOMMUDevice *sdev;
404 
405     if (!sbus) {
406         sbus = g_malloc0(sizeof(IOMMUPciBus) +
407                          sizeof(IOMMUDevice *) * PCI_DEVFN_MAX);
408         sbus->bus = bus;
409         g_hash_table_insert(s->as_by_busptr, bus, sbus);
410     }
411 
412     sdev = sbus->pbdev[devfn];
413     if (!sdev) {
414         char *name = g_strdup_printf("%s-%d-%d",
415                                      TYPE_VIRTIO_IOMMU_MEMORY_REGION,
416                                      mr_index++, devfn);
417         sdev = sbus->pbdev[devfn] = g_new0(IOMMUDevice, 1);
418 
419         sdev->viommu = s;
420         sdev->bus = bus;
421         sdev->devfn = devfn;
422 
423         trace_virtio_iommu_init_iommu_mr(name);
424 
425         memory_region_init(&sdev->root, OBJECT(s), name, UINT64_MAX);
426         address_space_init(&sdev->as, &sdev->root, TYPE_VIRTIO_IOMMU);
427         add_prop_resv_regions(sdev);
428 
429         /*
430          * Build the IOMMU disabled container with aliases to the
431          * shared MRs.  Note that aliasing to a shared memory region
432          * could help the memory API to detect same FlatViews so we
433          * can have devices to share the same FlatView when in bypass
434          * mode. (either by not configuring virtio-iommu driver or with
435          * "iommu=pt").  It will greatly reduce the total number of
436          * FlatViews of the system hence VM runs faster.
437          */
438         memory_region_init_alias(&sdev->bypass_mr, OBJECT(s),
439                                  "system", get_system_memory(), 0,
440                                  memory_region_size(get_system_memory()));
441 
442         memory_region_init_iommu(&sdev->iommu_mr, sizeof(sdev->iommu_mr),
443                                  TYPE_VIRTIO_IOMMU_MEMORY_REGION,
444                                  OBJECT(s), name,
445                                  UINT64_MAX);
446 
447         /*
448          * Hook both the containers under the root container, we
449          * switch between iommu & bypass MRs by enable/disable
450          * corresponding sub-containers
451          */
452         memory_region_add_subregion_overlap(&sdev->root, 0,
453                                             MEMORY_REGION(&sdev->iommu_mr),
454                                             0);
455         memory_region_add_subregion_overlap(&sdev->root, 0,
456                                             &sdev->bypass_mr, 0);
457 
458         virtio_iommu_switch_address_space(sdev);
459         g_free(name);
460     }
461     return &sdev->as;
462 }
463 
464 static const PCIIOMMUOps virtio_iommu_ops = {
465     .get_address_space = virtio_iommu_find_add_as,
466 };
467 
468 static int virtio_iommu_attach(VirtIOIOMMU *s,
469                                struct virtio_iommu_req_attach *req)
470 {
471     uint32_t domain_id = le32_to_cpu(req->domain);
472     uint32_t ep_id = le32_to_cpu(req->endpoint);
473     uint32_t flags = le32_to_cpu(req->flags);
474     VirtIOIOMMUDomain *domain;
475     VirtIOIOMMUEndpoint *ep;
476     IOMMUDevice *sdev;
477 
478     trace_virtio_iommu_attach(domain_id, ep_id);
479 
480     if (flags & ~VIRTIO_IOMMU_ATTACH_F_BYPASS) {
481         return VIRTIO_IOMMU_S_INVAL;
482     }
483 
484     ep = virtio_iommu_get_endpoint(s, ep_id);
485     if (!ep) {
486         return VIRTIO_IOMMU_S_NOENT;
487     }
488 
489     if (ep->domain) {
490         VirtIOIOMMUDomain *previous_domain = ep->domain;
491         /*
492          * the device is already attached to a domain,
493          * detach it first
494          */
495         virtio_iommu_detach_endpoint_from_domain(ep);
496         if (QLIST_EMPTY(&previous_domain->endpoint_list)) {
497             g_tree_remove(s->domains, GUINT_TO_POINTER(previous_domain->id));
498         }
499     }
500 
501     domain = virtio_iommu_get_domain(s, domain_id,
502                                      flags & VIRTIO_IOMMU_ATTACH_F_BYPASS);
503     if (!domain) {
504         /* Incompatible bypass flag */
505         return VIRTIO_IOMMU_S_INVAL;
506     }
507     QLIST_INSERT_HEAD(&domain->endpoint_list, ep, next);
508 
509     ep->domain = domain;
510     sdev = container_of(ep->iommu_mr, IOMMUDevice, iommu_mr);
511     virtio_iommu_switch_address_space(sdev);
512 
513     /* Replay domain mappings on the associated memory region */
514     g_tree_foreach(domain->mappings, virtio_iommu_notify_map_cb,
515                    ep->iommu_mr);
516 
517     return VIRTIO_IOMMU_S_OK;
518 }
519 
520 static int virtio_iommu_detach(VirtIOIOMMU *s,
521                                struct virtio_iommu_req_detach *req)
522 {
523     uint32_t domain_id = le32_to_cpu(req->domain);
524     uint32_t ep_id = le32_to_cpu(req->endpoint);
525     VirtIOIOMMUDomain *domain;
526     VirtIOIOMMUEndpoint *ep;
527 
528     trace_virtio_iommu_detach(domain_id, ep_id);
529 
530     ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id));
531     if (!ep) {
532         return VIRTIO_IOMMU_S_NOENT;
533     }
534 
535     domain = ep->domain;
536 
537     if (!domain || domain->id != domain_id) {
538         return VIRTIO_IOMMU_S_INVAL;
539     }
540 
541     virtio_iommu_detach_endpoint_from_domain(ep);
542 
543     if (QLIST_EMPTY(&domain->endpoint_list)) {
544         g_tree_remove(s->domains, GUINT_TO_POINTER(domain->id));
545     }
546     return VIRTIO_IOMMU_S_OK;
547 }
548 
549 static int virtio_iommu_map(VirtIOIOMMU *s,
550                             struct virtio_iommu_req_map *req)
551 {
552     uint32_t domain_id = le32_to_cpu(req->domain);
553     uint64_t phys_start = le64_to_cpu(req->phys_start);
554     uint64_t virt_start = le64_to_cpu(req->virt_start);
555     uint64_t virt_end = le64_to_cpu(req->virt_end);
556     uint32_t flags = le32_to_cpu(req->flags);
557     VirtIOIOMMUDomain *domain;
558     VirtIOIOMMUInterval *interval;
559     VirtIOIOMMUMapping *mapping;
560     VirtIOIOMMUEndpoint *ep;
561 
562     if (flags & ~VIRTIO_IOMMU_MAP_F_MASK) {
563         return VIRTIO_IOMMU_S_INVAL;
564     }
565 
566     domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id));
567     if (!domain) {
568         return VIRTIO_IOMMU_S_NOENT;
569     }
570 
571     if (domain->bypass) {
572         return VIRTIO_IOMMU_S_INVAL;
573     }
574 
575     interval = g_malloc0(sizeof(*interval));
576 
577     interval->low = virt_start;
578     interval->high = virt_end;
579 
580     mapping = g_tree_lookup(domain->mappings, (gpointer)interval);
581     if (mapping) {
582         g_free(interval);
583         return VIRTIO_IOMMU_S_INVAL;
584     }
585 
586     trace_virtio_iommu_map(domain_id, virt_start, virt_end, phys_start, flags);
587 
588     mapping = g_malloc0(sizeof(*mapping));
589     mapping->phys_addr = phys_start;
590     mapping->flags = flags;
591 
592     g_tree_insert(domain->mappings, interval, mapping);
593 
594     QLIST_FOREACH(ep, &domain->endpoint_list, next) {
595         virtio_iommu_notify_map(ep->iommu_mr, virt_start, virt_end, phys_start,
596                                 flags);
597     }
598 
599     return VIRTIO_IOMMU_S_OK;
600 }
601 
602 static int virtio_iommu_unmap(VirtIOIOMMU *s,
603                               struct virtio_iommu_req_unmap *req)
604 {
605     uint32_t domain_id = le32_to_cpu(req->domain);
606     uint64_t virt_start = le64_to_cpu(req->virt_start);
607     uint64_t virt_end = le64_to_cpu(req->virt_end);
608     VirtIOIOMMUMapping *iter_val;
609     VirtIOIOMMUInterval interval, *iter_key;
610     VirtIOIOMMUDomain *domain;
611     VirtIOIOMMUEndpoint *ep;
612     int ret = VIRTIO_IOMMU_S_OK;
613 
614     trace_virtio_iommu_unmap(domain_id, virt_start, virt_end);
615 
616     domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id));
617     if (!domain) {
618         return VIRTIO_IOMMU_S_NOENT;
619     }
620 
621     if (domain->bypass) {
622         return VIRTIO_IOMMU_S_INVAL;
623     }
624 
625     interval.low = virt_start;
626     interval.high = virt_end;
627 
628     while (g_tree_lookup_extended(domain->mappings, &interval,
629                                   (void **)&iter_key, (void**)&iter_val)) {
630         uint64_t current_low = iter_key->low;
631         uint64_t current_high = iter_key->high;
632 
633         if (interval.low <= current_low && interval.high >= current_high) {
634             QLIST_FOREACH(ep, &domain->endpoint_list, next) {
635                 virtio_iommu_notify_unmap(ep->iommu_mr, current_low,
636                                           current_high);
637             }
638             g_tree_remove(domain->mappings, iter_key);
639             trace_virtio_iommu_unmap_done(domain_id, current_low, current_high);
640         } else {
641             ret = VIRTIO_IOMMU_S_RANGE;
642             break;
643         }
644     }
645     return ret;
646 }
647 
648 static ssize_t virtio_iommu_fill_resv_mem_prop(IOMMUDevice *sdev, uint32_t ep,
649                                                uint8_t *buf, size_t free)
650 {
651     struct virtio_iommu_probe_resv_mem prop = {};
652     size_t size = sizeof(prop), length = size - sizeof(prop.head), total;
653     GList *l;
654 
655     total = size * g_list_length(sdev->resv_regions);
656     if (total > free) {
657         return -ENOSPC;
658     }
659 
660     for (l = sdev->resv_regions; l; l = l->next) {
661         ReservedRegion *reg = l->data;
662         unsigned subtype = reg->type;
663         Range *range = &reg->range;
664 
665         assert(subtype == VIRTIO_IOMMU_RESV_MEM_T_RESERVED ||
666                subtype == VIRTIO_IOMMU_RESV_MEM_T_MSI);
667         prop.head.type = cpu_to_le16(VIRTIO_IOMMU_PROBE_T_RESV_MEM);
668         prop.head.length = cpu_to_le16(length);
669         prop.subtype = subtype;
670         prop.start = cpu_to_le64(range_lob(range));
671         prop.end = cpu_to_le64(range_upb(range));
672 
673         memcpy(buf, &prop, size);
674 
675         trace_virtio_iommu_fill_resv_property(ep, prop.subtype,
676                                               prop.start, prop.end);
677         buf += size;
678     }
679     return total;
680 }
681 
682 /**
683  * virtio_iommu_probe - Fill the probe request buffer with
684  * the properties the device is able to return
685  */
686 static int virtio_iommu_probe(VirtIOIOMMU *s,
687                               struct virtio_iommu_req_probe *req,
688                               uint8_t *buf)
689 {
690     uint32_t ep_id = le32_to_cpu(req->endpoint);
691     IOMMUMemoryRegion *iommu_mr = virtio_iommu_mr(s, ep_id);
692     size_t free = VIOMMU_PROBE_SIZE;
693     IOMMUDevice *sdev;
694     ssize_t count;
695 
696     if (!iommu_mr) {
697         return VIRTIO_IOMMU_S_NOENT;
698     }
699 
700     sdev = container_of(iommu_mr, IOMMUDevice, iommu_mr);
701 
702     count = virtio_iommu_fill_resv_mem_prop(sdev, ep_id, buf, free);
703     if (count < 0) {
704         return VIRTIO_IOMMU_S_INVAL;
705     }
706     buf += count;
707     free -= count;
708     sdev->probe_done = true;
709 
710     return VIRTIO_IOMMU_S_OK;
711 }
712 
713 static int virtio_iommu_iov_to_req(struct iovec *iov,
714                                    unsigned int iov_cnt,
715                                    void *req, size_t payload_sz)
716 {
717     size_t sz = iov_to_buf(iov, iov_cnt, 0, req, payload_sz);
718 
719     if (unlikely(sz != payload_sz)) {
720         return VIRTIO_IOMMU_S_INVAL;
721     }
722     return 0;
723 }
724 
725 #define virtio_iommu_handle_req(__req)                                  \
726 static int virtio_iommu_handle_ ## __req(VirtIOIOMMU *s,                \
727                                          struct iovec *iov,             \
728                                          unsigned int iov_cnt)          \
729 {                                                                       \
730     struct virtio_iommu_req_ ## __req req;                              \
731     int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req,               \
732                     sizeof(req) - sizeof(struct virtio_iommu_req_tail));\
733                                                                         \
734     return ret ? ret : virtio_iommu_ ## __req(s, &req);                 \
735 }
736 
737 virtio_iommu_handle_req(attach)
738 virtio_iommu_handle_req(detach)
739 virtio_iommu_handle_req(map)
740 virtio_iommu_handle_req(unmap)
741 
742 static int virtio_iommu_handle_probe(VirtIOIOMMU *s,
743                                      struct iovec *iov,
744                                      unsigned int iov_cnt,
745                                      uint8_t *buf)
746 {
747     struct virtio_iommu_req_probe req;
748     int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, sizeof(req));
749 
750     return ret ? ret : virtio_iommu_probe(s, &req, buf);
751 }
752 
753 static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq)
754 {
755     VirtIOIOMMU *s = VIRTIO_IOMMU(vdev);
756     struct virtio_iommu_req_head head;
757     struct virtio_iommu_req_tail tail = {};
758     VirtQueueElement *elem;
759     unsigned int iov_cnt;
760     struct iovec *iov;
761     void *buf = NULL;
762     size_t sz;
763 
764     for (;;) {
765         size_t output_size = sizeof(tail);
766 
767         elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
768         if (!elem) {
769             return;
770         }
771 
772         if (iov_size(elem->in_sg, elem->in_num) < sizeof(tail) ||
773             iov_size(elem->out_sg, elem->out_num) < sizeof(head)) {
774             virtio_error(vdev, "virtio-iommu bad head/tail size");
775             virtqueue_detach_element(vq, elem, 0);
776             g_free(elem);
777             break;
778         }
779 
780         iov_cnt = elem->out_num;
781         iov = elem->out_sg;
782         sz = iov_to_buf(iov, iov_cnt, 0, &head, sizeof(head));
783         if (unlikely(sz != sizeof(head))) {
784             tail.status = VIRTIO_IOMMU_S_DEVERR;
785             goto out;
786         }
787         qemu_rec_mutex_lock(&s->mutex);
788         switch (head.type) {
789         case VIRTIO_IOMMU_T_ATTACH:
790             tail.status = virtio_iommu_handle_attach(s, iov, iov_cnt);
791             break;
792         case VIRTIO_IOMMU_T_DETACH:
793             tail.status = virtio_iommu_handle_detach(s, iov, iov_cnt);
794             break;
795         case VIRTIO_IOMMU_T_MAP:
796             tail.status = virtio_iommu_handle_map(s, iov, iov_cnt);
797             break;
798         case VIRTIO_IOMMU_T_UNMAP:
799             tail.status = virtio_iommu_handle_unmap(s, iov, iov_cnt);
800             break;
801         case VIRTIO_IOMMU_T_PROBE:
802         {
803             struct virtio_iommu_req_tail *ptail;
804 
805             output_size = s->config.probe_size + sizeof(tail);
806             buf = g_malloc0(output_size);
807 
808             ptail = buf + s->config.probe_size;
809             ptail->status = virtio_iommu_handle_probe(s, iov, iov_cnt, buf);
810             break;
811         }
812         default:
813             tail.status = VIRTIO_IOMMU_S_UNSUPP;
814         }
815         qemu_rec_mutex_unlock(&s->mutex);
816 
817 out:
818         sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
819                           buf ? buf : &tail, output_size);
820         assert(sz == output_size);
821 
822         virtqueue_push(vq, elem, sz);
823         virtio_notify(vdev, vq);
824         g_free(elem);
825         g_free(buf);
826         buf = NULL;
827     }
828 }
829 
830 static void virtio_iommu_report_fault(VirtIOIOMMU *viommu, uint8_t reason,
831                                       int flags, uint32_t endpoint,
832                                       uint64_t address)
833 {
834     VirtIODevice *vdev = &viommu->parent_obj;
835     VirtQueue *vq = viommu->event_vq;
836     struct virtio_iommu_fault fault;
837     VirtQueueElement *elem;
838     size_t sz;
839 
840     memset(&fault, 0, sizeof(fault));
841     fault.reason = reason;
842     fault.flags = cpu_to_le32(flags);
843     fault.endpoint = cpu_to_le32(endpoint);
844     fault.address = cpu_to_le64(address);
845 
846     elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
847 
848     if (!elem) {
849         error_report_once(
850             "no buffer available in event queue to report event");
851         return;
852     }
853 
854     if (iov_size(elem->in_sg, elem->in_num) < sizeof(fault)) {
855         virtio_error(vdev, "error buffer of wrong size");
856         virtqueue_detach_element(vq, elem, 0);
857         g_free(elem);
858         return;
859     }
860 
861     sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
862                       &fault, sizeof(fault));
863     assert(sz == sizeof(fault));
864 
865     trace_virtio_iommu_report_fault(reason, flags, endpoint, address);
866     virtqueue_push(vq, elem, sz);
867     virtio_notify(vdev, vq);
868     g_free(elem);
869 
870 }
871 
872 static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
873                                             IOMMUAccessFlags flag,
874                                             int iommu_idx)
875 {
876     IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr);
877     VirtIOIOMMUInterval interval, *mapping_key;
878     VirtIOIOMMUMapping *mapping_value;
879     VirtIOIOMMU *s = sdev->viommu;
880     bool read_fault, write_fault;
881     VirtIOIOMMUEndpoint *ep;
882     uint32_t sid, flags;
883     bool bypass_allowed;
884     int granule;
885     bool found;
886     GList *l;
887 
888     interval.low = addr;
889     interval.high = addr + 1;
890     granule = ctz64(s->config.page_size_mask);
891 
892     IOMMUTLBEntry entry = {
893         .target_as = &address_space_memory,
894         .iova = addr,
895         .translated_addr = addr,
896         .addr_mask = BIT_ULL(granule) - 1,
897         .perm = IOMMU_NONE,
898     };
899 
900     bypass_allowed = s->config.bypass;
901 
902     sid = virtio_iommu_get_bdf(sdev);
903 
904     trace_virtio_iommu_translate(mr->parent_obj.name, sid, addr, flag);
905     qemu_rec_mutex_lock(&s->mutex);
906 
907     ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid));
908 
909     if (bypass_allowed)
910         assert(ep && ep->domain && !ep->domain->bypass);
911 
912     if (!ep) {
913         if (!bypass_allowed) {
914             error_report_once("%s sid=%d is not known!!", __func__, sid);
915             virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_UNKNOWN,
916                                       VIRTIO_IOMMU_FAULT_F_ADDRESS,
917                                       sid, addr);
918         } else {
919             entry.perm = flag;
920         }
921         goto unlock;
922     }
923 
924     for (l = sdev->resv_regions; l; l = l->next) {
925         ReservedRegion *reg = l->data;
926 
927         if (range_contains(&reg->range, addr)) {
928             switch (reg->type) {
929             case VIRTIO_IOMMU_RESV_MEM_T_MSI:
930                 entry.perm = flag;
931                 break;
932             case VIRTIO_IOMMU_RESV_MEM_T_RESERVED:
933             default:
934                 virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING,
935                                           VIRTIO_IOMMU_FAULT_F_ADDRESS,
936                                           sid, addr);
937                 break;
938             }
939             goto unlock;
940         }
941     }
942 
943     if (!ep->domain) {
944         if (!bypass_allowed) {
945             error_report_once("%s %02x:%02x.%01x not attached to any domain",
946                               __func__, PCI_BUS_NUM(sid),
947                               PCI_SLOT(sid), PCI_FUNC(sid));
948             virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_DOMAIN,
949                                       VIRTIO_IOMMU_FAULT_F_ADDRESS,
950                                       sid, addr);
951         } else {
952             entry.perm = flag;
953         }
954         goto unlock;
955     } else if (ep->domain->bypass) {
956         entry.perm = flag;
957         goto unlock;
958     }
959 
960     found = g_tree_lookup_extended(ep->domain->mappings, (gpointer)(&interval),
961                                    (void **)&mapping_key,
962                                    (void **)&mapping_value);
963     if (!found) {
964         error_report_once("%s no mapping for 0x%"PRIx64" for sid=%d",
965                           __func__, addr, sid);
966         virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING,
967                                   VIRTIO_IOMMU_FAULT_F_ADDRESS,
968                                   sid, addr);
969         goto unlock;
970     }
971 
972     read_fault = (flag & IOMMU_RO) &&
973                     !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_READ);
974     write_fault = (flag & IOMMU_WO) &&
975                     !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_WRITE);
976 
977     flags = read_fault ? VIRTIO_IOMMU_FAULT_F_READ : 0;
978     flags |= write_fault ? VIRTIO_IOMMU_FAULT_F_WRITE : 0;
979     if (flags) {
980         error_report_once("%s permission error on 0x%"PRIx64"(%d): allowed=%d",
981                           __func__, addr, flag, mapping_value->flags);
982         flags |= VIRTIO_IOMMU_FAULT_F_ADDRESS;
983         virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING,
984                                   flags | VIRTIO_IOMMU_FAULT_F_ADDRESS,
985                                   sid, addr);
986         goto unlock;
987     }
988     entry.translated_addr = addr - mapping_key->low + mapping_value->phys_addr;
989     entry.perm = flag;
990     trace_virtio_iommu_translate_out(addr, entry.translated_addr, sid);
991 
992 unlock:
993     qemu_rec_mutex_unlock(&s->mutex);
994     return entry;
995 }
996 
997 static void virtio_iommu_get_config(VirtIODevice *vdev, uint8_t *config_data)
998 {
999     VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev);
1000     struct virtio_iommu_config *dev_config = &dev->config;
1001     struct virtio_iommu_config *out_config = (void *)config_data;
1002 
1003     out_config->page_size_mask = cpu_to_le64(dev_config->page_size_mask);
1004     out_config->input_range.start = cpu_to_le64(dev_config->input_range.start);
1005     out_config->input_range.end = cpu_to_le64(dev_config->input_range.end);
1006     out_config->domain_range.start = cpu_to_le32(dev_config->domain_range.start);
1007     out_config->domain_range.end = cpu_to_le32(dev_config->domain_range.end);
1008     out_config->probe_size = cpu_to_le32(dev_config->probe_size);
1009     out_config->bypass = dev_config->bypass;
1010 
1011     trace_virtio_iommu_get_config(dev_config->page_size_mask,
1012                                   dev_config->input_range.start,
1013                                   dev_config->input_range.end,
1014                                   dev_config->domain_range.start,
1015                                   dev_config->domain_range.end,
1016                                   dev_config->probe_size,
1017                                   dev_config->bypass);
1018 }
1019 
1020 static void virtio_iommu_set_config(VirtIODevice *vdev,
1021                                     const uint8_t *config_data)
1022 {
1023     VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev);
1024     struct virtio_iommu_config *dev_config = &dev->config;
1025     const struct virtio_iommu_config *in_config = (void *)config_data;
1026 
1027     if (in_config->bypass != dev_config->bypass) {
1028         if (!virtio_vdev_has_feature(vdev, VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
1029             virtio_error(vdev, "cannot set config.bypass");
1030             return;
1031         } else if (in_config->bypass != 0 && in_config->bypass != 1) {
1032             virtio_error(vdev, "invalid config.bypass value '%u'",
1033                          in_config->bypass);
1034             return;
1035         }
1036         dev_config->bypass = in_config->bypass;
1037         virtio_iommu_switch_address_space_all(dev);
1038     }
1039 
1040     trace_virtio_iommu_set_config(in_config->bypass);
1041 }
1042 
1043 static uint64_t virtio_iommu_get_features(VirtIODevice *vdev, uint64_t f,
1044                                           Error **errp)
1045 {
1046     VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev);
1047 
1048     f |= dev->features;
1049     trace_virtio_iommu_get_features(f);
1050     return f;
1051 }
1052 
1053 static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
1054 {
1055     guint ua = GPOINTER_TO_UINT(a);
1056     guint ub = GPOINTER_TO_UINT(b);
1057     return (ua > ub) - (ua < ub);
1058 }
1059 
1060 static gboolean virtio_iommu_remap(gpointer key, gpointer value, gpointer data)
1061 {
1062     VirtIOIOMMUMapping *mapping = (VirtIOIOMMUMapping *) value;
1063     VirtIOIOMMUInterval *interval = (VirtIOIOMMUInterval *) key;
1064     IOMMUMemoryRegion *mr = (IOMMUMemoryRegion *) data;
1065 
1066     trace_virtio_iommu_remap(mr->parent_obj.name, interval->low, interval->high,
1067                              mapping->phys_addr);
1068     virtio_iommu_notify_map(mr, interval->low, interval->high,
1069                             mapping->phys_addr, mapping->flags);
1070     return false;
1071 }
1072 
1073 static void virtio_iommu_replay(IOMMUMemoryRegion *mr, IOMMUNotifier *n)
1074 {
1075     IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr);
1076     VirtIOIOMMU *s = sdev->viommu;
1077     uint32_t sid;
1078     VirtIOIOMMUEndpoint *ep;
1079 
1080     sid = virtio_iommu_get_bdf(sdev);
1081 
1082     qemu_rec_mutex_lock(&s->mutex);
1083 
1084     if (!s->endpoints) {
1085         goto unlock;
1086     }
1087 
1088     ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid));
1089     if (!ep || !ep->domain) {
1090         goto unlock;
1091     }
1092 
1093     g_tree_foreach(ep->domain->mappings, virtio_iommu_remap, mr);
1094 
1095 unlock:
1096     qemu_rec_mutex_unlock(&s->mutex);
1097 }
1098 
1099 static int virtio_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu_mr,
1100                                             IOMMUNotifierFlag old,
1101                                             IOMMUNotifierFlag new,
1102                                             Error **errp)
1103 {
1104     if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
1105         error_setg(errp, "Virtio-iommu does not support dev-iotlb yet");
1106         return -EINVAL;
1107     }
1108 
1109     if (old == IOMMU_NOTIFIER_NONE) {
1110         trace_virtio_iommu_notify_flag_add(iommu_mr->parent_obj.name);
1111     } else if (new == IOMMU_NOTIFIER_NONE) {
1112         trace_virtio_iommu_notify_flag_del(iommu_mr->parent_obj.name);
1113     }
1114     return 0;
1115 }
1116 
1117 /*
1118  * The default mask (TARGET_PAGE_MASK) is the smallest supported guest granule,
1119  * for example 0xfffffffffffff000. When an assigned device has page size
1120  * restrictions due to the hardware IOMMU configuration, apply this restriction
1121  * to the mask.
1122  */
1123 static int virtio_iommu_set_page_size_mask(IOMMUMemoryRegion *mr,
1124                                            uint64_t new_mask,
1125                                            Error **errp)
1126 {
1127     IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr);
1128     VirtIOIOMMU *s = sdev->viommu;
1129     uint64_t cur_mask = s->config.page_size_mask;
1130 
1131     trace_virtio_iommu_set_page_size_mask(mr->parent_obj.name, cur_mask,
1132                                           new_mask);
1133 
1134     if ((cur_mask & new_mask) == 0) {
1135         error_setg(errp, "virtio-iommu %s reports a page size mask 0x%"PRIx64
1136                    " incompatible with currently supported mask 0x%"PRIx64,
1137                    mr->parent_obj.name, new_mask, cur_mask);
1138         return -1;
1139     }
1140 
1141     /*
1142      * Once the granule is frozen we can't change the mask anymore. If by
1143      * chance the hotplugged device supports the same granule, we can still
1144      * accept it.
1145      */
1146     if (s->granule_frozen) {
1147         int cur_granule = ctz64(cur_mask);
1148 
1149         if (!(BIT_ULL(cur_granule) & new_mask)) {
1150             error_setg(errp, "virtio-iommu %s does not support frozen granule 0x%llx",
1151                        mr->parent_obj.name, BIT_ULL(cur_granule));
1152             return -1;
1153         }
1154         return 0;
1155     }
1156 
1157     s->config.page_size_mask &= new_mask;
1158     return 0;
1159 }
1160 
1161 /**
1162  * rebuild_resv_regions: rebuild resv regions with both the
1163  * info of host resv ranges and property set resv ranges
1164  */
1165 static int rebuild_resv_regions(IOMMUDevice *sdev)
1166 {
1167     GList *l;
1168     int i = 0;
1169 
1170     /* free the existing list and rebuild it from scratch */
1171     g_list_free_full(sdev->resv_regions, g_free);
1172     sdev->resv_regions = NULL;
1173 
1174     /* First add host reserved regions if any, all tagged as RESERVED */
1175     for (l = sdev->host_resv_ranges; l; l = l->next) {
1176         ReservedRegion *reg = g_new0(ReservedRegion, 1);
1177         Range *r = (Range *)l->data;
1178 
1179         reg->type = VIRTIO_IOMMU_RESV_MEM_T_RESERVED;
1180         range_set_bounds(&reg->range, range_lob(r), range_upb(r));
1181         sdev->resv_regions = resv_region_list_insert(sdev->resv_regions, reg);
1182         trace_virtio_iommu_host_resv_regions(sdev->iommu_mr.parent_obj.name, i,
1183                                              range_lob(&reg->range),
1184                                              range_upb(&reg->range));
1185         i++;
1186     }
1187     /*
1188      * then add higher priority reserved regions set by the machine
1189      * through properties
1190      */
1191     add_prop_resv_regions(sdev);
1192     return 0;
1193 }
1194 
1195 /**
1196  * virtio_iommu_set_iova_ranges: Conveys the usable IOVA ranges
1197  *
1198  * The function turns those into reserved ranges. Once some
1199  * reserved ranges have been set, new reserved regions cannot be
1200  * added outside of the original ones.
1201  *
1202  * @mr: IOMMU MR
1203  * @iova_ranges: list of usable IOVA ranges
1204  * @errp: error handle
1205  */
1206 static int virtio_iommu_set_iova_ranges(IOMMUMemoryRegion *mr,
1207                                         GList *iova_ranges,
1208                                         Error **errp)
1209 {
1210     IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr);
1211     GList *current_ranges = sdev->host_resv_ranges;
1212     GList *l, *tmp, *new_ranges = NULL;
1213     int ret = -EINVAL;
1214 
1215     /* check that each new resv region is included in an existing one */
1216     if (sdev->host_resv_ranges) {
1217         range_inverse_array(iova_ranges,
1218                             &new_ranges,
1219                             0, UINT64_MAX);
1220 
1221         for (tmp = new_ranges; tmp; tmp = tmp->next) {
1222             Range *newr = (Range *)tmp->data;
1223             bool included = false;
1224 
1225             for (l = current_ranges; l; l = l->next) {
1226                 Range * r = (Range *)l->data;
1227 
1228                 if (range_contains_range(r, newr)) {
1229                     included = true;
1230                     break;
1231                 }
1232             }
1233             if (!included) {
1234                 goto error;
1235             }
1236         }
1237         /* all new reserved ranges are included in existing ones */
1238         ret = 0;
1239         goto out;
1240     }
1241 
1242     if (sdev->probe_done) {
1243         warn_report("%s: Notified about new host reserved regions after probe",
1244                     mr->parent_obj.name);
1245     }
1246 
1247     range_inverse_array(iova_ranges,
1248                         &sdev->host_resv_ranges,
1249                         0, UINT64_MAX);
1250     rebuild_resv_regions(sdev);
1251 
1252     return 0;
1253 error:
1254     error_setg(errp, "IOMMU mr=%s Conflicting host reserved ranges set!",
1255                mr->parent_obj.name);
1256 out:
1257     g_list_free_full(new_ranges, g_free);
1258     return ret;
1259 }
1260 
1261 static void virtio_iommu_system_reset(void *opaque)
1262 {
1263     VirtIOIOMMU *s = opaque;
1264 
1265     trace_virtio_iommu_system_reset();
1266 
1267     /*
1268      * config.bypass is sticky across device reset, but should be restored on
1269      * system reset
1270      */
1271     s->config.bypass = s->boot_bypass;
1272     virtio_iommu_switch_address_space_all(s);
1273 
1274 }
1275 
1276 static void virtio_iommu_freeze_granule(Notifier *notifier, void *data)
1277 {
1278     VirtIOIOMMU *s = container_of(notifier, VirtIOIOMMU, machine_done);
1279     int granule;
1280 
1281     if (likely(s->config.bypass)) {
1282         /*
1283          * Transient IOMMU MR enable to collect page_size_mask requirements
1284          * through memory_region_iommu_set_page_size_mask() called by
1285          * VFIO region_add() callback
1286          */
1287          s->config.bypass = false;
1288          virtio_iommu_switch_address_space_all(s);
1289          /* restore default */
1290          s->config.bypass = true;
1291          virtio_iommu_switch_address_space_all(s);
1292     }
1293     s->granule_frozen = true;
1294     granule = ctz64(s->config.page_size_mask);
1295     trace_virtio_iommu_freeze_granule(BIT_ULL(granule));
1296 }
1297 
1298 static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
1299 {
1300     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1301     VirtIOIOMMU *s = VIRTIO_IOMMU(dev);
1302 
1303     virtio_init(vdev, VIRTIO_ID_IOMMU, sizeof(struct virtio_iommu_config));
1304 
1305     memset(s->iommu_pcibus_by_bus_num, 0, sizeof(s->iommu_pcibus_by_bus_num));
1306 
1307     s->req_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE,
1308                              virtio_iommu_handle_command);
1309     s->event_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE, NULL);
1310 
1311     /*
1312      * config.bypass is needed to get initial address space early, such as
1313      * in vfio realize
1314      */
1315     s->config.bypass = s->boot_bypass;
1316     s->config.page_size_mask = qemu_target_page_mask();
1317     s->config.input_range.end = UINT64_MAX;
1318     s->config.domain_range.end = UINT32_MAX;
1319     s->config.probe_size = VIOMMU_PROBE_SIZE;
1320 
1321     virtio_add_feature(&s->features, VIRTIO_RING_F_EVENT_IDX);
1322     virtio_add_feature(&s->features, VIRTIO_RING_F_INDIRECT_DESC);
1323     virtio_add_feature(&s->features, VIRTIO_F_VERSION_1);
1324     virtio_add_feature(&s->features, VIRTIO_IOMMU_F_INPUT_RANGE);
1325     virtio_add_feature(&s->features, VIRTIO_IOMMU_F_DOMAIN_RANGE);
1326     virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MAP_UNMAP);
1327     virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MMIO);
1328     virtio_add_feature(&s->features, VIRTIO_IOMMU_F_PROBE);
1329     virtio_add_feature(&s->features, VIRTIO_IOMMU_F_BYPASS_CONFIG);
1330 
1331     qemu_rec_mutex_init(&s->mutex);
1332 
1333     s->as_by_busptr = g_hash_table_new_full(NULL, NULL, NULL, g_free);
1334 
1335     if (s->primary_bus) {
1336         pci_setup_iommu(s->primary_bus, &virtio_iommu_ops, s);
1337     } else {
1338         error_setg(errp, "VIRTIO-IOMMU is not attached to any PCI bus!");
1339     }
1340 
1341     s->machine_done.notify = virtio_iommu_freeze_granule;
1342     qemu_add_machine_init_done_notifier(&s->machine_done);
1343 
1344     qemu_register_reset(virtio_iommu_system_reset, s);
1345 }
1346 
1347 static void virtio_iommu_device_unrealize(DeviceState *dev)
1348 {
1349     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1350     VirtIOIOMMU *s = VIRTIO_IOMMU(dev);
1351 
1352     qemu_unregister_reset(virtio_iommu_system_reset, s);
1353     qemu_remove_machine_init_done_notifier(&s->machine_done);
1354 
1355     g_hash_table_destroy(s->as_by_busptr);
1356     if (s->domains) {
1357         g_tree_destroy(s->domains);
1358     }
1359     if (s->endpoints) {
1360         g_tree_destroy(s->endpoints);
1361     }
1362 
1363     qemu_rec_mutex_destroy(&s->mutex);
1364 
1365     virtio_delete_queue(s->req_vq);
1366     virtio_delete_queue(s->event_vq);
1367     virtio_cleanup(vdev);
1368 }
1369 
1370 static void virtio_iommu_device_reset(VirtIODevice *vdev)
1371 {
1372     VirtIOIOMMU *s = VIRTIO_IOMMU(vdev);
1373 
1374     trace_virtio_iommu_device_reset();
1375 
1376     if (s->domains) {
1377         g_tree_destroy(s->domains);
1378     }
1379     if (s->endpoints) {
1380         g_tree_destroy(s->endpoints);
1381     }
1382     s->domains = g_tree_new_full((GCompareDataFunc)int_cmp,
1383                                  NULL, NULL, virtio_iommu_put_domain);
1384     s->endpoints = g_tree_new_full((GCompareDataFunc)int_cmp,
1385                                    NULL, NULL, virtio_iommu_put_endpoint);
1386 }
1387 
1388 static void virtio_iommu_set_status(VirtIODevice *vdev, uint8_t status)
1389 {
1390     trace_virtio_iommu_device_status(status);
1391 }
1392 
1393 static void virtio_iommu_instance_init(Object *obj)
1394 {
1395 }
1396 
1397 #define VMSTATE_INTERVAL                               \
1398 {                                                      \
1399     .name = "interval",                                \
1400     .version_id = 1,                                   \
1401     .minimum_version_id = 1,                           \
1402     .fields = (const VMStateField[]) {                 \
1403         VMSTATE_UINT64(low, VirtIOIOMMUInterval),      \
1404         VMSTATE_UINT64(high, VirtIOIOMMUInterval),     \
1405         VMSTATE_END_OF_LIST()                          \
1406     }                                                  \
1407 }
1408 
1409 #define VMSTATE_MAPPING                               \
1410 {                                                     \
1411     .name = "mapping",                                \
1412     .version_id = 1,                                  \
1413     .minimum_version_id = 1,                          \
1414     .fields = (const VMStateField[]) {                \
1415         VMSTATE_UINT64(phys_addr, VirtIOIOMMUMapping),\
1416         VMSTATE_UINT32(flags, VirtIOIOMMUMapping),    \
1417         VMSTATE_END_OF_LIST()                         \
1418     },                                                \
1419 }
1420 
1421 static const VMStateDescription vmstate_interval_mapping[2] = {
1422     VMSTATE_MAPPING,   /* value */
1423     VMSTATE_INTERVAL   /* key   */
1424 };
1425 
1426 static int domain_preload(void *opaque)
1427 {
1428     VirtIOIOMMUDomain *domain = opaque;
1429 
1430     domain->mappings = g_tree_new_full((GCompareDataFunc)interval_cmp,
1431                                        NULL, g_free, g_free);
1432     return 0;
1433 }
1434 
1435 static const VMStateDescription vmstate_endpoint = {
1436     .name = "endpoint",
1437     .version_id = 1,
1438     .minimum_version_id = 1,
1439     .fields = (const VMStateField[]) {
1440         VMSTATE_UINT32(id, VirtIOIOMMUEndpoint),
1441         VMSTATE_END_OF_LIST()
1442     }
1443 };
1444 
1445 static const VMStateDescription vmstate_domain = {
1446     .name = "domain",
1447     .version_id = 2,
1448     .minimum_version_id = 2,
1449     .pre_load = domain_preload,
1450     .fields = (const VMStateField[]) {
1451         VMSTATE_UINT32(id, VirtIOIOMMUDomain),
1452         VMSTATE_GTREE_V(mappings, VirtIOIOMMUDomain, 1,
1453                         vmstate_interval_mapping,
1454                         VirtIOIOMMUInterval, VirtIOIOMMUMapping),
1455         VMSTATE_QLIST_V(endpoint_list, VirtIOIOMMUDomain, 1,
1456                         vmstate_endpoint, VirtIOIOMMUEndpoint, next),
1457         VMSTATE_BOOL_V(bypass, VirtIOIOMMUDomain, 2),
1458         VMSTATE_END_OF_LIST()
1459     }
1460 };
1461 
1462 static gboolean reconstruct_endpoints(gpointer key, gpointer value,
1463                                       gpointer data)
1464 {
1465     VirtIOIOMMU *s = (VirtIOIOMMU *)data;
1466     VirtIOIOMMUDomain *d = (VirtIOIOMMUDomain *)value;
1467     VirtIOIOMMUEndpoint *iter;
1468     IOMMUMemoryRegion *mr;
1469 
1470     QLIST_FOREACH(iter, &d->endpoint_list, next) {
1471         mr = virtio_iommu_mr(s, iter->id);
1472         assert(mr);
1473 
1474         iter->domain = d;
1475         iter->iommu_mr = mr;
1476         g_tree_insert(s->endpoints, GUINT_TO_POINTER(iter->id), iter);
1477     }
1478     return false; /* continue the domain traversal */
1479 }
1480 
1481 static int iommu_post_load(void *opaque, int version_id)
1482 {
1483     VirtIOIOMMU *s = opaque;
1484 
1485     g_tree_foreach(s->domains, reconstruct_endpoints, s);
1486 
1487     /*
1488      * Memory regions are dynamically turned on/off depending on
1489      * 'config.bypass' and attached domain type if there is. After
1490      * migration, we need to make sure the memory regions are
1491      * still correct.
1492      */
1493     virtio_iommu_switch_address_space_all(s);
1494     return 0;
1495 }
1496 
1497 static const VMStateDescription vmstate_virtio_iommu_device = {
1498     .name = "virtio-iommu-device",
1499     .minimum_version_id = 2,
1500     .version_id = 2,
1501     .post_load = iommu_post_load,
1502     .fields = (const VMStateField[]) {
1503         VMSTATE_GTREE_DIRECT_KEY_V(domains, VirtIOIOMMU, 2,
1504                                    &vmstate_domain, VirtIOIOMMUDomain),
1505         VMSTATE_UINT8_V(config.bypass, VirtIOIOMMU, 2),
1506         VMSTATE_END_OF_LIST()
1507     },
1508 };
1509 
1510 static const VMStateDescription vmstate_virtio_iommu = {
1511     .name = "virtio-iommu",
1512     .minimum_version_id = 2,
1513     .priority = MIG_PRI_IOMMU,
1514     .version_id = 2,
1515     .fields = (const VMStateField[]) {
1516         VMSTATE_VIRTIO_DEVICE,
1517         VMSTATE_END_OF_LIST()
1518     },
1519 };
1520 
1521 static Property virtio_iommu_properties[] = {
1522     DEFINE_PROP_LINK("primary-bus", VirtIOIOMMU, primary_bus,
1523                      TYPE_PCI_BUS, PCIBus *),
1524     DEFINE_PROP_BOOL("boot-bypass", VirtIOIOMMU, boot_bypass, true),
1525     DEFINE_PROP_END_OF_LIST(),
1526 };
1527 
1528 static void virtio_iommu_class_init(ObjectClass *klass, void *data)
1529 {
1530     DeviceClass *dc = DEVICE_CLASS(klass);
1531     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1532 
1533     device_class_set_props(dc, virtio_iommu_properties);
1534     dc->vmsd = &vmstate_virtio_iommu;
1535 
1536     set_bit(DEVICE_CATEGORY_MISC, dc->categories);
1537     vdc->realize = virtio_iommu_device_realize;
1538     vdc->unrealize = virtio_iommu_device_unrealize;
1539     vdc->reset = virtio_iommu_device_reset;
1540     vdc->get_config = virtio_iommu_get_config;
1541     vdc->set_config = virtio_iommu_set_config;
1542     vdc->get_features = virtio_iommu_get_features;
1543     vdc->set_status = virtio_iommu_set_status;
1544     vdc->vmsd = &vmstate_virtio_iommu_device;
1545 }
1546 
1547 static void virtio_iommu_memory_region_class_init(ObjectClass *klass,
1548                                                   void *data)
1549 {
1550     IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
1551 
1552     imrc->translate = virtio_iommu_translate;
1553     imrc->replay = virtio_iommu_replay;
1554     imrc->notify_flag_changed = virtio_iommu_notify_flag_changed;
1555     imrc->iommu_set_page_size_mask = virtio_iommu_set_page_size_mask;
1556     imrc->iommu_set_iova_ranges = virtio_iommu_set_iova_ranges;
1557 }
1558 
1559 static const TypeInfo virtio_iommu_info = {
1560     .name = TYPE_VIRTIO_IOMMU,
1561     .parent = TYPE_VIRTIO_DEVICE,
1562     .instance_size = sizeof(VirtIOIOMMU),
1563     .instance_init = virtio_iommu_instance_init,
1564     .class_init = virtio_iommu_class_init,
1565 };
1566 
1567 static const TypeInfo virtio_iommu_memory_region_info = {
1568     .parent = TYPE_IOMMU_MEMORY_REGION,
1569     .name = TYPE_VIRTIO_IOMMU_MEMORY_REGION,
1570     .class_init = virtio_iommu_memory_region_class_init,
1571 };
1572 
1573 static void virtio_register_types(void)
1574 {
1575     type_register_static(&virtio_iommu_info);
1576     type_register_static(&virtio_iommu_memory_region_info);
1577 }
1578 
1579 type_init(virtio_register_types)
1580