xref: /openbmc/qemu/hw/virtio/vhost.c (revision 06d4c71f)
1 /*
2  * vhost support
3  *
4  * Copyright Red Hat, Inc. 2010
5  *
6  * Authors:
7  *  Michael S. Tsirkin <mst@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15 
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "hw/virtio/vhost.h"
19 #include "qemu/atomic.h"
20 #include "qemu/range.h"
21 #include "qemu/error-report.h"
22 #include "qemu/memfd.h"
23 #include "standard-headers/linux/vhost_types.h"
24 #include "exec/address-spaces.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "hw/virtio/virtio-access.h"
27 #include "migration/blocker.h"
28 #include "migration/qemu-file-types.h"
29 #include "sysemu/dma.h"
30 #include "trace.h"
31 
32 /* enabled until disconnected backend stabilizes */
33 #define _VHOST_DEBUG 1
34 
35 #ifdef _VHOST_DEBUG
36 #define VHOST_OPS_DEBUG(fmt, ...) \
37     do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
38                       strerror(errno), errno); } while (0)
39 #else
40 #define VHOST_OPS_DEBUG(fmt, ...) \
41     do { } while (0)
42 #endif
43 
44 static struct vhost_log *vhost_log;
45 static struct vhost_log *vhost_log_shm;
46 
47 static unsigned int used_memslots;
48 static QLIST_HEAD(, vhost_dev) vhost_devices =
49     QLIST_HEAD_INITIALIZER(vhost_devices);
50 
51 bool vhost_has_free_slot(void)
52 {
53     unsigned int slots_limit = ~0U;
54     struct vhost_dev *hdev;
55 
56     QLIST_FOREACH(hdev, &vhost_devices, entry) {
57         unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
58         slots_limit = MIN(slots_limit, r);
59     }
60     return slots_limit > used_memslots;
61 }
62 
63 static void vhost_dev_sync_region(struct vhost_dev *dev,
64                                   MemoryRegionSection *section,
65                                   uint64_t mfirst, uint64_t mlast,
66                                   uint64_t rfirst, uint64_t rlast)
67 {
68     vhost_log_chunk_t *log = dev->log->log;
69 
70     uint64_t start = MAX(mfirst, rfirst);
71     uint64_t end = MIN(mlast, rlast);
72     vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
73     vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
74     uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK);
75 
76     if (end < start) {
77         return;
78     }
79     assert(end / VHOST_LOG_CHUNK < dev->log_size);
80     assert(start / VHOST_LOG_CHUNK < dev->log_size);
81 
82     for (;from < to; ++from) {
83         vhost_log_chunk_t log;
84         /* We first check with non-atomic: much cheaper,
85          * and we expect non-dirty to be the common case. */
86         if (!*from) {
87             addr += VHOST_LOG_CHUNK;
88             continue;
89         }
90         /* Data must be read atomically. We don't really need barrier semantics
91          * but it's easier to use atomic_* than roll our own. */
92         log = atomic_xchg(from, 0);
93         while (log) {
94             int bit = ctzl(log);
95             hwaddr page_addr;
96             hwaddr section_offset;
97             hwaddr mr_offset;
98             page_addr = addr + bit * VHOST_LOG_PAGE;
99             section_offset = page_addr - section->offset_within_address_space;
100             mr_offset = section_offset + section->offset_within_region;
101             memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
102             log &= ~(0x1ull << bit);
103         }
104         addr += VHOST_LOG_CHUNK;
105     }
106 }
107 
108 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
109                                    MemoryRegionSection *section,
110                                    hwaddr first,
111                                    hwaddr last)
112 {
113     int i;
114     hwaddr start_addr;
115     hwaddr end_addr;
116 
117     if (!dev->log_enabled || !dev->started) {
118         return 0;
119     }
120     start_addr = section->offset_within_address_space;
121     end_addr = range_get_last(start_addr, int128_get64(section->size));
122     start_addr = MAX(first, start_addr);
123     end_addr = MIN(last, end_addr);
124 
125     for (i = 0; i < dev->mem->nregions; ++i) {
126         struct vhost_memory_region *reg = dev->mem->regions + i;
127         vhost_dev_sync_region(dev, section, start_addr, end_addr,
128                               reg->guest_phys_addr,
129                               range_get_last(reg->guest_phys_addr,
130                                              reg->memory_size));
131     }
132     for (i = 0; i < dev->nvqs; ++i) {
133         struct vhost_virtqueue *vq = dev->vqs + i;
134 
135         if (!vq->used_phys && !vq->used_size) {
136             continue;
137         }
138 
139         vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
140                               range_get_last(vq->used_phys, vq->used_size));
141     }
142     return 0;
143 }
144 
145 static void vhost_log_sync(MemoryListener *listener,
146                           MemoryRegionSection *section)
147 {
148     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
149                                          memory_listener);
150     vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
151 }
152 
153 static void vhost_log_sync_range(struct vhost_dev *dev,
154                                  hwaddr first, hwaddr last)
155 {
156     int i;
157     /* FIXME: this is N^2 in number of sections */
158     for (i = 0; i < dev->n_mem_sections; ++i) {
159         MemoryRegionSection *section = &dev->mem_sections[i];
160         vhost_sync_dirty_bitmap(dev, section, first, last);
161     }
162 }
163 
164 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
165 {
166     uint64_t log_size = 0;
167     int i;
168     for (i = 0; i < dev->mem->nregions; ++i) {
169         struct vhost_memory_region *reg = dev->mem->regions + i;
170         uint64_t last = range_get_last(reg->guest_phys_addr,
171                                        reg->memory_size);
172         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
173     }
174     for (i = 0; i < dev->nvqs; ++i) {
175         struct vhost_virtqueue *vq = dev->vqs + i;
176 
177         if (!vq->used_phys && !vq->used_size) {
178             continue;
179         }
180 
181         uint64_t last = vq->used_phys + vq->used_size - 1;
182         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
183     }
184     return log_size;
185 }
186 
187 static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
188 {
189     Error *err = NULL;
190     struct vhost_log *log;
191     uint64_t logsize = size * sizeof(*(log->log));
192     int fd = -1;
193 
194     log = g_new0(struct vhost_log, 1);
195     if (share) {
196         log->log = qemu_memfd_alloc("vhost-log", logsize,
197                                     F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
198                                     &fd, &err);
199         if (err) {
200             error_report_err(err);
201             g_free(log);
202             return NULL;
203         }
204         memset(log->log, 0, logsize);
205     } else {
206         log->log = g_malloc0(logsize);
207     }
208 
209     log->size = size;
210     log->refcnt = 1;
211     log->fd = fd;
212 
213     return log;
214 }
215 
216 static struct vhost_log *vhost_log_get(uint64_t size, bool share)
217 {
218     struct vhost_log *log = share ? vhost_log_shm : vhost_log;
219 
220     if (!log || log->size != size) {
221         log = vhost_log_alloc(size, share);
222         if (share) {
223             vhost_log_shm = log;
224         } else {
225             vhost_log = log;
226         }
227     } else {
228         ++log->refcnt;
229     }
230 
231     return log;
232 }
233 
234 static void vhost_log_put(struct vhost_dev *dev, bool sync)
235 {
236     struct vhost_log *log = dev->log;
237 
238     if (!log) {
239         return;
240     }
241 
242     --log->refcnt;
243     if (log->refcnt == 0) {
244         /* Sync only the range covered by the old log */
245         if (dev->log_size && sync) {
246             vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
247         }
248 
249         if (vhost_log == log) {
250             g_free(log->log);
251             vhost_log = NULL;
252         } else if (vhost_log_shm == log) {
253             qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
254                             log->fd);
255             vhost_log_shm = NULL;
256         }
257 
258         g_free(log);
259     }
260 
261     dev->log = NULL;
262     dev->log_size = 0;
263 }
264 
265 static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
266 {
267     return dev->vhost_ops->vhost_requires_shm_log &&
268            dev->vhost_ops->vhost_requires_shm_log(dev);
269 }
270 
271 static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
272 {
273     struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
274     uint64_t log_base = (uintptr_t)log->log;
275     int r;
276 
277     /* inform backend of log switching, this must be done before
278        releasing the current log, to ensure no logging is lost */
279     r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
280     if (r < 0) {
281         VHOST_OPS_DEBUG("vhost_set_log_base failed");
282     }
283 
284     vhost_log_put(dev, true);
285     dev->log = log;
286     dev->log_size = size;
287 }
288 
289 static int vhost_dev_has_iommu(struct vhost_dev *dev)
290 {
291     VirtIODevice *vdev = dev->vdev;
292 
293     /*
294      * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support
295      * incremental memory mapping API via IOTLB API. For platform that
296      * does not have IOMMU, there's no need to enable this feature
297      * which may cause unnecessary IOTLB miss/update trnasactions.
298      */
299     return vdev->dma_as != &address_space_memory &&
300            virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
301 }
302 
303 static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
304                               hwaddr *plen, bool is_write)
305 {
306     if (!vhost_dev_has_iommu(dev)) {
307         return cpu_physical_memory_map(addr, plen, is_write);
308     } else {
309         return (void *)(uintptr_t)addr;
310     }
311 }
312 
313 static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
314                                hwaddr len, int is_write,
315                                hwaddr access_len)
316 {
317     if (!vhost_dev_has_iommu(dev)) {
318         cpu_physical_memory_unmap(buffer, len, is_write, access_len);
319     }
320 }
321 
322 static int vhost_verify_ring_part_mapping(void *ring_hva,
323                                           uint64_t ring_gpa,
324                                           uint64_t ring_size,
325                                           void *reg_hva,
326                                           uint64_t reg_gpa,
327                                           uint64_t reg_size)
328 {
329     uint64_t hva_ring_offset;
330     uint64_t ring_last = range_get_last(ring_gpa, ring_size);
331     uint64_t reg_last = range_get_last(reg_gpa, reg_size);
332 
333     if (ring_last < reg_gpa || ring_gpa > reg_last) {
334         return 0;
335     }
336     /* check that whole ring's is mapped */
337     if (ring_last > reg_last) {
338         return -ENOMEM;
339     }
340     /* check that ring's MemoryRegion wasn't replaced */
341     hva_ring_offset = ring_gpa - reg_gpa;
342     if (ring_hva != reg_hva + hva_ring_offset) {
343         return -EBUSY;
344     }
345 
346     return 0;
347 }
348 
349 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
350                                       void *reg_hva,
351                                       uint64_t reg_gpa,
352                                       uint64_t reg_size)
353 {
354     int i, j;
355     int r = 0;
356     const char *part_name[] = {
357         "descriptor table",
358         "available ring",
359         "used ring"
360     };
361 
362     if (vhost_dev_has_iommu(dev)) {
363         return 0;
364     }
365 
366     for (i = 0; i < dev->nvqs; ++i) {
367         struct vhost_virtqueue *vq = dev->vqs + i;
368 
369         if (vq->desc_phys == 0) {
370             continue;
371         }
372 
373         j = 0;
374         r = vhost_verify_ring_part_mapping(
375                 vq->desc, vq->desc_phys, vq->desc_size,
376                 reg_hva, reg_gpa, reg_size);
377         if (r) {
378             break;
379         }
380 
381         j++;
382         r = vhost_verify_ring_part_mapping(
383                 vq->avail, vq->avail_phys, vq->avail_size,
384                 reg_hva, reg_gpa, reg_size);
385         if (r) {
386             break;
387         }
388 
389         j++;
390         r = vhost_verify_ring_part_mapping(
391                 vq->used, vq->used_phys, vq->used_size,
392                 reg_hva, reg_gpa, reg_size);
393         if (r) {
394             break;
395         }
396     }
397 
398     if (r == -ENOMEM) {
399         error_report("Unable to map %s for ring %d", part_name[j], i);
400     } else if (r == -EBUSY) {
401         error_report("%s relocated for ring %d", part_name[j], i);
402     }
403     return r;
404 }
405 
406 static bool vhost_section(struct vhost_dev *dev, MemoryRegionSection *section)
407 {
408     bool result;
409     bool log_dirty = memory_region_get_dirty_log_mask(section->mr) &
410                      ~(1 << DIRTY_MEMORY_MIGRATION);
411     result = memory_region_is_ram(section->mr) &&
412         !memory_region_is_rom(section->mr);
413 
414     /* Vhost doesn't handle any block which is doing dirty-tracking other
415      * than migration; this typically fires on VGA areas.
416      */
417     result &= !log_dirty;
418 
419     if (result && dev->vhost_ops->vhost_backend_mem_section_filter) {
420         result &=
421             dev->vhost_ops->vhost_backend_mem_section_filter(dev, section);
422     }
423 
424     trace_vhost_section(section->mr->name, result);
425     return result;
426 }
427 
428 static void vhost_begin(MemoryListener *listener)
429 {
430     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
431                                          memory_listener);
432     dev->tmp_sections = NULL;
433     dev->n_tmp_sections = 0;
434 }
435 
436 static void vhost_commit(MemoryListener *listener)
437 {
438     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
439                                          memory_listener);
440     MemoryRegionSection *old_sections;
441     int n_old_sections;
442     uint64_t log_size;
443     size_t regions_size;
444     int r;
445     int i;
446     bool changed = false;
447 
448     /* Note we can be called before the device is started, but then
449      * starting the device calls set_mem_table, so we need to have
450      * built the data structures.
451      */
452     old_sections = dev->mem_sections;
453     n_old_sections = dev->n_mem_sections;
454     dev->mem_sections = dev->tmp_sections;
455     dev->n_mem_sections = dev->n_tmp_sections;
456 
457     if (dev->n_mem_sections != n_old_sections) {
458         changed = true;
459     } else {
460         /* Same size, lets check the contents */
461         for (int i = 0; i < n_old_sections; i++) {
462             if (!MemoryRegionSection_eq(&old_sections[i],
463                                         &dev->mem_sections[i])) {
464                 changed = true;
465                 break;
466             }
467         }
468     }
469 
470     trace_vhost_commit(dev->started, changed);
471     if (!changed) {
472         goto out;
473     }
474 
475     /* Rebuild the regions list from the new sections list */
476     regions_size = offsetof(struct vhost_memory, regions) +
477                        dev->n_mem_sections * sizeof dev->mem->regions[0];
478     dev->mem = g_realloc(dev->mem, regions_size);
479     dev->mem->nregions = dev->n_mem_sections;
480     used_memslots = dev->mem->nregions;
481     for (i = 0; i < dev->n_mem_sections; i++) {
482         struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
483         struct MemoryRegionSection *mrs = dev->mem_sections + i;
484 
485         cur_vmr->guest_phys_addr = mrs->offset_within_address_space;
486         cur_vmr->memory_size     = int128_get64(mrs->size);
487         cur_vmr->userspace_addr  =
488             (uintptr_t)memory_region_get_ram_ptr(mrs->mr) +
489             mrs->offset_within_region;
490         cur_vmr->flags_padding   = 0;
491     }
492 
493     if (!dev->started) {
494         goto out;
495     }
496 
497     for (i = 0; i < dev->mem->nregions; i++) {
498         if (vhost_verify_ring_mappings(dev,
499                        (void *)(uintptr_t)dev->mem->regions[i].userspace_addr,
500                        dev->mem->regions[i].guest_phys_addr,
501                        dev->mem->regions[i].memory_size)) {
502             error_report("Verify ring failure on region %d", i);
503             abort();
504         }
505     }
506 
507     if (!dev->log_enabled) {
508         r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
509         if (r < 0) {
510             VHOST_OPS_DEBUG("vhost_set_mem_table failed");
511         }
512         goto out;
513     }
514     log_size = vhost_get_log_size(dev);
515     /* We allocate an extra 4K bytes to log,
516      * to reduce the * number of reallocations. */
517 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
518     /* To log more, must increase log size before table update. */
519     if (dev->log_size < log_size) {
520         vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
521     }
522     r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
523     if (r < 0) {
524         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
525     }
526     /* To log less, can only decrease log size after table update. */
527     if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
528         vhost_dev_log_resize(dev, log_size);
529     }
530 
531 out:
532     /* Deref the old list of sections, this must happen _after_ the
533      * vhost_set_mem_table to ensure the client isn't still using the
534      * section we're about to unref.
535      */
536     while (n_old_sections--) {
537         memory_region_unref(old_sections[n_old_sections].mr);
538     }
539     g_free(old_sections);
540     return;
541 }
542 
543 /* Adds the section data to the tmp_section structure.
544  * It relies on the listener calling us in memory address order
545  * and for each region (via the _add and _nop methods) to
546  * join neighbours.
547  */
548 static void vhost_region_add_section(struct vhost_dev *dev,
549                                      MemoryRegionSection *section)
550 {
551     bool need_add = true;
552     uint64_t mrs_size = int128_get64(section->size);
553     uint64_t mrs_gpa = section->offset_within_address_space;
554     uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) +
555                          section->offset_within_region;
556     RAMBlock *mrs_rb = section->mr->ram_block;
557 
558     trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size,
559                                    mrs_host);
560 
561     if (dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER) {
562         /* Round the section to it's page size */
563         /* First align the start down to a page boundary */
564         size_t mrs_page = qemu_ram_pagesize(mrs_rb);
565         uint64_t alignage = mrs_host & (mrs_page - 1);
566         if (alignage) {
567             mrs_host -= alignage;
568             mrs_size += alignage;
569             mrs_gpa  -= alignage;
570         }
571         /* Now align the size up to a page boundary */
572         alignage = mrs_size & (mrs_page - 1);
573         if (alignage) {
574             mrs_size += mrs_page - alignage;
575         }
576         trace_vhost_region_add_section_aligned(section->mr->name, mrs_gpa,
577                                                mrs_size, mrs_host);
578     }
579 
580     if (dev->n_tmp_sections) {
581         /* Since we already have at least one section, lets see if
582          * this extends it; since we're scanning in order, we only
583          * have to look at the last one, and the FlatView that calls
584          * us shouldn't have overlaps.
585          */
586         MemoryRegionSection *prev_sec = dev->tmp_sections +
587                                                (dev->n_tmp_sections - 1);
588         uint64_t prev_gpa_start = prev_sec->offset_within_address_space;
589         uint64_t prev_size = int128_get64(prev_sec->size);
590         uint64_t prev_gpa_end   = range_get_last(prev_gpa_start, prev_size);
591         uint64_t prev_host_start =
592                         (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) +
593                         prev_sec->offset_within_region;
594         uint64_t prev_host_end   = range_get_last(prev_host_start, prev_size);
595 
596         if (mrs_gpa <= (prev_gpa_end + 1)) {
597             /* OK, looks like overlapping/intersecting - it's possible that
598              * the rounding to page sizes has made them overlap, but they should
599              * match up in the same RAMBlock if they do.
600              */
601             if (mrs_gpa < prev_gpa_start) {
602                 error_report("%s:Section '%s' rounded to %"PRIx64
603                              " prior to previous '%s' %"PRIx64,
604                              __func__, section->mr->name, mrs_gpa,
605                              prev_sec->mr->name, prev_gpa_start);
606                 /* A way to cleanly fail here would be better */
607                 return;
608             }
609             /* Offset from the start of the previous GPA to this GPA */
610             size_t offset = mrs_gpa - prev_gpa_start;
611 
612             if (prev_host_start + offset == mrs_host &&
613                 section->mr == prev_sec->mr &&
614                 (!dev->vhost_ops->vhost_backend_can_merge ||
615                  dev->vhost_ops->vhost_backend_can_merge(dev,
616                     mrs_host, mrs_size,
617                     prev_host_start, prev_size))) {
618                 uint64_t max_end = MAX(prev_host_end, mrs_host + mrs_size);
619                 need_add = false;
620                 prev_sec->offset_within_address_space =
621                     MIN(prev_gpa_start, mrs_gpa);
622                 prev_sec->offset_within_region =
623                     MIN(prev_host_start, mrs_host) -
624                     (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr);
625                 prev_sec->size = int128_make64(max_end - MIN(prev_host_start,
626                                                mrs_host));
627                 trace_vhost_region_add_section_merge(section->mr->name,
628                                         int128_get64(prev_sec->size),
629                                         prev_sec->offset_within_address_space,
630                                         prev_sec->offset_within_region);
631             } else {
632                 /* adjoining regions are fine, but overlapping ones with
633                  * different blocks/offsets shouldn't happen
634                  */
635                 if (mrs_gpa != prev_gpa_end + 1) {
636                     error_report("%s: Overlapping but not coherent sections "
637                                  "at %"PRIx64,
638                                  __func__, mrs_gpa);
639                     return;
640                 }
641             }
642         }
643     }
644 
645     if (need_add) {
646         ++dev->n_tmp_sections;
647         dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections,
648                                     dev->n_tmp_sections);
649         dev->tmp_sections[dev->n_tmp_sections - 1] = *section;
650         /* The flatview isn't stable and we don't use it, making it NULL
651          * means we can memcmp the list.
652          */
653         dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
654         memory_region_ref(section->mr);
655     }
656 }
657 
658 /* Used for both add and nop callbacks */
659 static void vhost_region_addnop(MemoryListener *listener,
660                                 MemoryRegionSection *section)
661 {
662     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
663                                          memory_listener);
664 
665     if (!vhost_section(dev, section)) {
666         return;
667     }
668     vhost_region_add_section(dev, section);
669 }
670 
671 static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
672 {
673     struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
674     struct vhost_dev *hdev = iommu->hdev;
675     hwaddr iova = iotlb->iova + iommu->iommu_offset;
676 
677     if (vhost_backend_invalidate_device_iotlb(hdev, iova,
678                                               iotlb->addr_mask + 1)) {
679         error_report("Fail to invalidate device iotlb");
680     }
681 }
682 
683 static void vhost_iommu_region_add(MemoryListener *listener,
684                                    MemoryRegionSection *section)
685 {
686     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
687                                          iommu_listener);
688     struct vhost_iommu *iommu;
689     Int128 end;
690     int iommu_idx, ret;
691     IOMMUMemoryRegion *iommu_mr;
692     Error *err = NULL;
693 
694     if (!memory_region_is_iommu(section->mr)) {
695         return;
696     }
697 
698     iommu_mr = IOMMU_MEMORY_REGION(section->mr);
699 
700     iommu = g_malloc0(sizeof(*iommu));
701     end = int128_add(int128_make64(section->offset_within_region),
702                      section->size);
703     end = int128_sub(end, int128_one());
704     iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
705                                                    MEMTXATTRS_UNSPECIFIED);
706     iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
707                         IOMMU_NOTIFIER_UNMAP,
708                         section->offset_within_region,
709                         int128_get64(end),
710                         iommu_idx);
711     iommu->mr = section->mr;
712     iommu->iommu_offset = section->offset_within_address_space -
713                           section->offset_within_region;
714     iommu->hdev = dev;
715     ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, &err);
716     if (ret) {
717         error_report_err(err);
718         exit(1);
719     }
720     QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
721     /* TODO: can replay help performance here? */
722 }
723 
724 static void vhost_iommu_region_del(MemoryListener *listener,
725                                    MemoryRegionSection *section)
726 {
727     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
728                                          iommu_listener);
729     struct vhost_iommu *iommu;
730 
731     if (!memory_region_is_iommu(section->mr)) {
732         return;
733     }
734 
735     QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
736         if (iommu->mr == section->mr &&
737             iommu->n.start == section->offset_within_region) {
738             memory_region_unregister_iommu_notifier(iommu->mr,
739                                                     &iommu->n);
740             QLIST_REMOVE(iommu, iommu_next);
741             g_free(iommu);
742             break;
743         }
744     }
745 }
746 
747 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
748                                     struct vhost_virtqueue *vq,
749                                     unsigned idx, bool enable_log)
750 {
751     struct vhost_vring_addr addr = {
752         .index = idx,
753         .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
754         .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
755         .used_user_addr = (uint64_t)(unsigned long)vq->used,
756         .log_guest_addr = vq->used_phys,
757         .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
758     };
759     int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
760     if (r < 0) {
761         VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
762         return -errno;
763     }
764     return 0;
765 }
766 
767 static int vhost_dev_set_features(struct vhost_dev *dev,
768                                   bool enable_log)
769 {
770     uint64_t features = dev->acked_features;
771     int r;
772     if (enable_log) {
773         features |= 0x1ULL << VHOST_F_LOG_ALL;
774     }
775     if (!vhost_dev_has_iommu(dev)) {
776         features &= ~(0x1ULL << VIRTIO_F_IOMMU_PLATFORM);
777     }
778     r = dev->vhost_ops->vhost_set_features(dev, features);
779     if (r < 0) {
780         VHOST_OPS_DEBUG("vhost_set_features failed");
781     }
782     return r < 0 ? -errno : 0;
783 }
784 
785 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
786 {
787     int r, i, idx;
788     r = vhost_dev_set_features(dev, enable_log);
789     if (r < 0) {
790         goto err_features;
791     }
792     for (i = 0; i < dev->nvqs; ++i) {
793         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
794         r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
795                                      enable_log);
796         if (r < 0) {
797             goto err_vq;
798         }
799     }
800     return 0;
801 err_vq:
802     for (; i >= 0; --i) {
803         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
804         vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
805                                  dev->log_enabled);
806     }
807     vhost_dev_set_features(dev, dev->log_enabled);
808 err_features:
809     return r;
810 }
811 
812 static int vhost_migration_log(MemoryListener *listener, int enable)
813 {
814     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
815                                          memory_listener);
816     int r;
817     if (!!enable == dev->log_enabled) {
818         return 0;
819     }
820     if (!dev->started) {
821         dev->log_enabled = enable;
822         return 0;
823     }
824     if (!enable) {
825         r = vhost_dev_set_log(dev, false);
826         if (r < 0) {
827             return r;
828         }
829         vhost_log_put(dev, false);
830     } else {
831         vhost_dev_log_resize(dev, vhost_get_log_size(dev));
832         r = vhost_dev_set_log(dev, true);
833         if (r < 0) {
834             return r;
835         }
836     }
837     dev->log_enabled = enable;
838     return 0;
839 }
840 
841 static void vhost_log_global_start(MemoryListener *listener)
842 {
843     int r;
844 
845     r = vhost_migration_log(listener, true);
846     if (r < 0) {
847         abort();
848     }
849 }
850 
851 static void vhost_log_global_stop(MemoryListener *listener)
852 {
853     int r;
854 
855     r = vhost_migration_log(listener, false);
856     if (r < 0) {
857         abort();
858     }
859 }
860 
861 static void vhost_log_start(MemoryListener *listener,
862                             MemoryRegionSection *section,
863                             int old, int new)
864 {
865     /* FIXME: implement */
866 }
867 
868 static void vhost_log_stop(MemoryListener *listener,
869                            MemoryRegionSection *section,
870                            int old, int new)
871 {
872     /* FIXME: implement */
873 }
874 
875 /* The vhost driver natively knows how to handle the vrings of non
876  * cross-endian legacy devices and modern devices. Only legacy devices
877  * exposed to a bi-endian guest may require the vhost driver to use a
878  * specific endianness.
879  */
880 static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
881 {
882     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
883         return false;
884     }
885 #ifdef HOST_WORDS_BIGENDIAN
886     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
887 #else
888     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
889 #endif
890 }
891 
892 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
893                                                    bool is_big_endian,
894                                                    int vhost_vq_index)
895 {
896     struct vhost_vring_state s = {
897         .index = vhost_vq_index,
898         .num = is_big_endian
899     };
900 
901     if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
902         return 0;
903     }
904 
905     VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
906     if (errno == ENOTTY) {
907         error_report("vhost does not support cross-endian");
908         return -ENOSYS;
909     }
910 
911     return -errno;
912 }
913 
914 static int vhost_memory_region_lookup(struct vhost_dev *hdev,
915                                       uint64_t gpa, uint64_t *uaddr,
916                                       uint64_t *len)
917 {
918     int i;
919 
920     for (i = 0; i < hdev->mem->nregions; i++) {
921         struct vhost_memory_region *reg = hdev->mem->regions + i;
922 
923         if (gpa >= reg->guest_phys_addr &&
924             reg->guest_phys_addr + reg->memory_size > gpa) {
925             *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
926             *len = reg->guest_phys_addr + reg->memory_size - gpa;
927             return 0;
928         }
929     }
930 
931     return -EFAULT;
932 }
933 
934 int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
935 {
936     IOMMUTLBEntry iotlb;
937     uint64_t uaddr, len;
938     int ret = -EFAULT;
939 
940     RCU_READ_LOCK_GUARD();
941 
942     trace_vhost_iotlb_miss(dev, 1);
943 
944     iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
945                                           iova, write,
946                                           MEMTXATTRS_UNSPECIFIED);
947     if (iotlb.target_as != NULL) {
948         ret = vhost_memory_region_lookup(dev, iotlb.translated_addr,
949                                          &uaddr, &len);
950         if (ret) {
951             trace_vhost_iotlb_miss(dev, 3);
952             error_report("Fail to lookup the translated address "
953                          "%"PRIx64, iotlb.translated_addr);
954             goto out;
955         }
956 
957         len = MIN(iotlb.addr_mask + 1, len);
958         iova = iova & ~iotlb.addr_mask;
959 
960         ret = vhost_backend_update_device_iotlb(dev, iova, uaddr,
961                                                 len, iotlb.perm);
962         if (ret) {
963             trace_vhost_iotlb_miss(dev, 4);
964             error_report("Fail to update device iotlb");
965             goto out;
966         }
967     }
968 
969     trace_vhost_iotlb_miss(dev, 2);
970 
971 out:
972     return ret;
973 }
974 
975 static int vhost_virtqueue_start(struct vhost_dev *dev,
976                                 struct VirtIODevice *vdev,
977                                 struct vhost_virtqueue *vq,
978                                 unsigned idx)
979 {
980     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
981     VirtioBusState *vbus = VIRTIO_BUS(qbus);
982     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
983     hwaddr s, l, a;
984     int r;
985     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
986     struct vhost_vring_file file = {
987         .index = vhost_vq_index
988     };
989     struct vhost_vring_state state = {
990         .index = vhost_vq_index
991     };
992     struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
993 
994     a = virtio_queue_get_desc_addr(vdev, idx);
995     if (a == 0) {
996         /* Queue might not be ready for start */
997         return 0;
998     }
999 
1000     vq->num = state.num = virtio_queue_get_num(vdev, idx);
1001     r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
1002     if (r) {
1003         VHOST_OPS_DEBUG("vhost_set_vring_num failed");
1004         return -errno;
1005     }
1006 
1007     state.num = virtio_queue_get_last_avail_idx(vdev, idx);
1008     r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
1009     if (r) {
1010         VHOST_OPS_DEBUG("vhost_set_vring_base failed");
1011         return -errno;
1012     }
1013 
1014     if (vhost_needs_vring_endian(vdev)) {
1015         r = vhost_virtqueue_set_vring_endian_legacy(dev,
1016                                                     virtio_is_big_endian(vdev),
1017                                                     vhost_vq_index);
1018         if (r) {
1019             return -errno;
1020         }
1021     }
1022 
1023     vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
1024     vq->desc_phys = a;
1025     vq->desc = vhost_memory_map(dev, a, &l, false);
1026     if (!vq->desc || l != s) {
1027         r = -ENOMEM;
1028         goto fail_alloc_desc;
1029     }
1030     vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
1031     vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
1032     vq->avail = vhost_memory_map(dev, a, &l, false);
1033     if (!vq->avail || l != s) {
1034         r = -ENOMEM;
1035         goto fail_alloc_avail;
1036     }
1037     vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
1038     vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
1039     vq->used = vhost_memory_map(dev, a, &l, true);
1040     if (!vq->used || l != s) {
1041         r = -ENOMEM;
1042         goto fail_alloc_used;
1043     }
1044 
1045     r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
1046     if (r < 0) {
1047         r = -errno;
1048         goto fail_alloc;
1049     }
1050 
1051     file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
1052     r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
1053     if (r) {
1054         VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
1055         r = -errno;
1056         goto fail_kick;
1057     }
1058 
1059     /* Clear and discard previous events if any. */
1060     event_notifier_test_and_clear(&vq->masked_notifier);
1061 
1062     /* Init vring in unmasked state, unless guest_notifier_mask
1063      * will do it later.
1064      */
1065     if (!vdev->use_guest_notifier_mask) {
1066         /* TODO: check and handle errors. */
1067         vhost_virtqueue_mask(dev, vdev, idx, false);
1068     }
1069 
1070     if (k->query_guest_notifiers &&
1071         k->query_guest_notifiers(qbus->parent) &&
1072         virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
1073         file.fd = -1;
1074         r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1075         if (r) {
1076             goto fail_vector;
1077         }
1078     }
1079 
1080     return 0;
1081 
1082 fail_vector:
1083 fail_kick:
1084 fail_alloc:
1085     vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1086                        0, 0);
1087 fail_alloc_used:
1088     vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1089                        0, 0);
1090 fail_alloc_avail:
1091     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1092                        0, 0);
1093 fail_alloc_desc:
1094     return r;
1095 }
1096 
1097 static void vhost_virtqueue_stop(struct vhost_dev *dev,
1098                                     struct VirtIODevice *vdev,
1099                                     struct vhost_virtqueue *vq,
1100                                     unsigned idx)
1101 {
1102     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1103     struct vhost_vring_state state = {
1104         .index = vhost_vq_index,
1105     };
1106     int r;
1107 
1108     if (virtio_queue_get_desc_addr(vdev, idx) == 0) {
1109         /* Don't stop the virtqueue which might have not been started */
1110         return;
1111     }
1112 
1113     r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
1114     if (r < 0) {
1115         VHOST_OPS_DEBUG("vhost VQ %u ring restore failed: %d", idx, r);
1116         /* Connection to the backend is broken, so let's sync internal
1117          * last avail idx to the device used idx.
1118          */
1119         virtio_queue_restore_last_avail_idx(vdev, idx);
1120     } else {
1121         virtio_queue_set_last_avail_idx(vdev, idx, state.num);
1122     }
1123     virtio_queue_invalidate_signalled_used(vdev, idx);
1124     virtio_queue_update_used_idx(vdev, idx);
1125 
1126     /* In the cross-endian case, we need to reset the vring endianness to
1127      * native as legacy devices expect so by default.
1128      */
1129     if (vhost_needs_vring_endian(vdev)) {
1130         vhost_virtqueue_set_vring_endian_legacy(dev,
1131                                                 !virtio_is_big_endian(vdev),
1132                                                 vhost_vq_index);
1133     }
1134 
1135     vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1136                        1, virtio_queue_get_used_size(vdev, idx));
1137     vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1138                        0, virtio_queue_get_avail_size(vdev, idx));
1139     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1140                        0, virtio_queue_get_desc_size(vdev, idx));
1141 }
1142 
1143 static void vhost_eventfd_add(MemoryListener *listener,
1144                               MemoryRegionSection *section,
1145                               bool match_data, uint64_t data, EventNotifier *e)
1146 {
1147 }
1148 
1149 static void vhost_eventfd_del(MemoryListener *listener,
1150                               MemoryRegionSection *section,
1151                               bool match_data, uint64_t data, EventNotifier *e)
1152 {
1153 }
1154 
1155 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1156                                                 int n, uint32_t timeout)
1157 {
1158     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1159     struct vhost_vring_state state = {
1160         .index = vhost_vq_index,
1161         .num = timeout,
1162     };
1163     int r;
1164 
1165     if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1166         return -EINVAL;
1167     }
1168 
1169     r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1170     if (r) {
1171         VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
1172         return r;
1173     }
1174 
1175     return 0;
1176 }
1177 
1178 static int vhost_virtqueue_init(struct vhost_dev *dev,
1179                                 struct vhost_virtqueue *vq, int n)
1180 {
1181     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1182     struct vhost_vring_file file = {
1183         .index = vhost_vq_index,
1184     };
1185     int r = event_notifier_init(&vq->masked_notifier, 0);
1186     if (r < 0) {
1187         return r;
1188     }
1189 
1190     file.fd = event_notifier_get_fd(&vq->masked_notifier);
1191     r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1192     if (r) {
1193         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1194         r = -errno;
1195         goto fail_call;
1196     }
1197 
1198     vq->dev = dev;
1199 
1200     return 0;
1201 fail_call:
1202     event_notifier_cleanup(&vq->masked_notifier);
1203     return r;
1204 }
1205 
1206 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1207 {
1208     event_notifier_cleanup(&vq->masked_notifier);
1209 }
1210 
1211 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1212                    VhostBackendType backend_type, uint32_t busyloop_timeout)
1213 {
1214     uint64_t features;
1215     int i, r, n_initialized_vqs = 0;
1216     Error *local_err = NULL;
1217 
1218     hdev->vdev = NULL;
1219     hdev->migration_blocker = NULL;
1220 
1221     r = vhost_set_backend_type(hdev, backend_type);
1222     assert(r >= 0);
1223 
1224     r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1225     if (r < 0) {
1226         goto fail;
1227     }
1228 
1229     r = hdev->vhost_ops->vhost_set_owner(hdev);
1230     if (r < 0) {
1231         VHOST_OPS_DEBUG("vhost_set_owner failed");
1232         goto fail;
1233     }
1234 
1235     r = hdev->vhost_ops->vhost_get_features(hdev, &features);
1236     if (r < 0) {
1237         VHOST_OPS_DEBUG("vhost_get_features failed");
1238         goto fail;
1239     }
1240 
1241     for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1242         r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1243         if (r < 0) {
1244             goto fail;
1245         }
1246     }
1247 
1248     if (busyloop_timeout) {
1249         for (i = 0; i < hdev->nvqs; ++i) {
1250             r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1251                                                      busyloop_timeout);
1252             if (r < 0) {
1253                 goto fail_busyloop;
1254             }
1255         }
1256     }
1257 
1258     hdev->features = features;
1259 
1260     hdev->memory_listener = (MemoryListener) {
1261         .begin = vhost_begin,
1262         .commit = vhost_commit,
1263         .region_add = vhost_region_addnop,
1264         .region_nop = vhost_region_addnop,
1265         .log_start = vhost_log_start,
1266         .log_stop = vhost_log_stop,
1267         .log_sync = vhost_log_sync,
1268         .log_global_start = vhost_log_global_start,
1269         .log_global_stop = vhost_log_global_stop,
1270         .eventfd_add = vhost_eventfd_add,
1271         .eventfd_del = vhost_eventfd_del,
1272         .priority = 10
1273     };
1274 
1275     hdev->iommu_listener = (MemoryListener) {
1276         .region_add = vhost_iommu_region_add,
1277         .region_del = vhost_iommu_region_del,
1278     };
1279 
1280     if (hdev->migration_blocker == NULL) {
1281         if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1282             error_setg(&hdev->migration_blocker,
1283                        "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1284         } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_alloc_check()) {
1285             error_setg(&hdev->migration_blocker,
1286                        "Migration disabled: failed to allocate shared memory");
1287         }
1288     }
1289 
1290     if (hdev->migration_blocker != NULL) {
1291         r = migrate_add_blocker(hdev->migration_blocker, &local_err);
1292         if (local_err) {
1293             error_report_err(local_err);
1294             error_free(hdev->migration_blocker);
1295             goto fail_busyloop;
1296         }
1297     }
1298 
1299     hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1300     hdev->n_mem_sections = 0;
1301     hdev->mem_sections = NULL;
1302     hdev->log = NULL;
1303     hdev->log_size = 0;
1304     hdev->log_enabled = false;
1305     hdev->started = false;
1306     memory_listener_register(&hdev->memory_listener, &address_space_memory);
1307     QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1308 
1309     if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1310         error_report("vhost backend memory slots limit is less"
1311                 " than current number of present memory slots");
1312         r = -1;
1313         if (busyloop_timeout) {
1314             goto fail_busyloop;
1315         } else {
1316             goto fail;
1317         }
1318     }
1319 
1320     return 0;
1321 
1322 fail_busyloop:
1323     while (--i >= 0) {
1324         vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1325     }
1326 fail:
1327     hdev->nvqs = n_initialized_vqs;
1328     vhost_dev_cleanup(hdev);
1329     return r;
1330 }
1331 
1332 void vhost_dev_cleanup(struct vhost_dev *hdev)
1333 {
1334     int i;
1335 
1336     for (i = 0; i < hdev->nvqs; ++i) {
1337         vhost_virtqueue_cleanup(hdev->vqs + i);
1338     }
1339     if (hdev->mem) {
1340         /* those are only safe after successful init */
1341         memory_listener_unregister(&hdev->memory_listener);
1342         QLIST_REMOVE(hdev, entry);
1343     }
1344     if (hdev->migration_blocker) {
1345         migrate_del_blocker(hdev->migration_blocker);
1346         error_free(hdev->migration_blocker);
1347     }
1348     g_free(hdev->mem);
1349     g_free(hdev->mem_sections);
1350     if (hdev->vhost_ops) {
1351         hdev->vhost_ops->vhost_backend_cleanup(hdev);
1352     }
1353     assert(!hdev->log);
1354 
1355     memset(hdev, 0, sizeof(struct vhost_dev));
1356 }
1357 
1358 /* Stop processing guest IO notifications in qemu.
1359  * Start processing them in vhost in kernel.
1360  */
1361 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1362 {
1363     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1364     int i, r, e;
1365 
1366     /* We will pass the notifiers to the kernel, make sure that QEMU
1367      * doesn't interfere.
1368      */
1369     r = virtio_device_grab_ioeventfd(vdev);
1370     if (r < 0) {
1371         error_report("binding does not support host notifiers");
1372         goto fail;
1373     }
1374 
1375     for (i = 0; i < hdev->nvqs; ++i) {
1376         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1377                                          true);
1378         if (r < 0) {
1379             error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1380             goto fail_vq;
1381         }
1382     }
1383 
1384     return 0;
1385 fail_vq:
1386     while (--i >= 0) {
1387         e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1388                                          false);
1389         if (e < 0) {
1390             error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1391         }
1392         assert (e >= 0);
1393         virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1394     }
1395     virtio_device_release_ioeventfd(vdev);
1396 fail:
1397     return r;
1398 }
1399 
1400 /* Stop processing guest IO notifications in vhost.
1401  * Start processing them in qemu.
1402  * This might actually run the qemu handlers right away,
1403  * so virtio in qemu must be completely setup when this is called.
1404  */
1405 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1406 {
1407     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1408     int i, r;
1409 
1410     for (i = 0; i < hdev->nvqs; ++i) {
1411         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1412                                          false);
1413         if (r < 0) {
1414             error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1415         }
1416         assert (r >= 0);
1417         virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1418     }
1419     virtio_device_release_ioeventfd(vdev);
1420 }
1421 
1422 /* Test and clear event pending status.
1423  * Should be called after unmask to avoid losing events.
1424  */
1425 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1426 {
1427     struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1428     assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1429     return event_notifier_test_and_clear(&vq->masked_notifier);
1430 }
1431 
1432 /* Mask/unmask events from this vq. */
1433 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1434                          bool mask)
1435 {
1436     struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1437     int r, index = n - hdev->vq_index;
1438     struct vhost_vring_file file;
1439 
1440     /* should only be called after backend is connected */
1441     assert(hdev->vhost_ops);
1442 
1443     if (mask) {
1444         assert(vdev->use_guest_notifier_mask);
1445         file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1446     } else {
1447         file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1448     }
1449 
1450     file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1451     r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1452     if (r < 0) {
1453         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1454     }
1455 }
1456 
1457 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1458                             uint64_t features)
1459 {
1460     const int *bit = feature_bits;
1461     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1462         uint64_t bit_mask = (1ULL << *bit);
1463         if (!(hdev->features & bit_mask)) {
1464             features &= ~bit_mask;
1465         }
1466         bit++;
1467     }
1468     return features;
1469 }
1470 
1471 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1472                         uint64_t features)
1473 {
1474     const int *bit = feature_bits;
1475     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1476         uint64_t bit_mask = (1ULL << *bit);
1477         if (features & bit_mask) {
1478             hdev->acked_features |= bit_mask;
1479         }
1480         bit++;
1481     }
1482 }
1483 
1484 int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
1485                          uint32_t config_len)
1486 {
1487     assert(hdev->vhost_ops);
1488 
1489     if (hdev->vhost_ops->vhost_get_config) {
1490         return hdev->vhost_ops->vhost_get_config(hdev, config, config_len);
1491     }
1492 
1493     return -1;
1494 }
1495 
1496 int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
1497                          uint32_t offset, uint32_t size, uint32_t flags)
1498 {
1499     assert(hdev->vhost_ops);
1500 
1501     if (hdev->vhost_ops->vhost_set_config) {
1502         return hdev->vhost_ops->vhost_set_config(hdev, data, offset,
1503                                                  size, flags);
1504     }
1505 
1506     return -1;
1507 }
1508 
1509 void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
1510                                    const VhostDevConfigOps *ops)
1511 {
1512     hdev->config_ops = ops;
1513 }
1514 
1515 void vhost_dev_free_inflight(struct vhost_inflight *inflight)
1516 {
1517     if (inflight && inflight->addr) {
1518         qemu_memfd_free(inflight->addr, inflight->size, inflight->fd);
1519         inflight->addr = NULL;
1520         inflight->fd = -1;
1521     }
1522 }
1523 
1524 static int vhost_dev_resize_inflight(struct vhost_inflight *inflight,
1525                                      uint64_t new_size)
1526 {
1527     Error *err = NULL;
1528     int fd = -1;
1529     void *addr = qemu_memfd_alloc("vhost-inflight", new_size,
1530                                   F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
1531                                   &fd, &err);
1532 
1533     if (err) {
1534         error_report_err(err);
1535         return -1;
1536     }
1537 
1538     vhost_dev_free_inflight(inflight);
1539     inflight->offset = 0;
1540     inflight->addr = addr;
1541     inflight->fd = fd;
1542     inflight->size = new_size;
1543 
1544     return 0;
1545 }
1546 
1547 void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f)
1548 {
1549     if (inflight->addr) {
1550         qemu_put_be64(f, inflight->size);
1551         qemu_put_be16(f, inflight->queue_size);
1552         qemu_put_buffer(f, inflight->addr, inflight->size);
1553     } else {
1554         qemu_put_be64(f, 0);
1555     }
1556 }
1557 
1558 int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f)
1559 {
1560     uint64_t size;
1561 
1562     size = qemu_get_be64(f);
1563     if (!size) {
1564         return 0;
1565     }
1566 
1567     if (inflight->size != size) {
1568         if (vhost_dev_resize_inflight(inflight, size)) {
1569             return -1;
1570         }
1571     }
1572     inflight->queue_size = qemu_get_be16(f);
1573 
1574     qemu_get_buffer(f, inflight->addr, size);
1575 
1576     return 0;
1577 }
1578 
1579 int vhost_dev_set_inflight(struct vhost_dev *dev,
1580                            struct vhost_inflight *inflight)
1581 {
1582     int r;
1583 
1584     if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) {
1585         r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight);
1586         if (r) {
1587             VHOST_OPS_DEBUG("vhost_set_inflight_fd failed");
1588             return -errno;
1589         }
1590     }
1591 
1592     return 0;
1593 }
1594 
1595 int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
1596                            struct vhost_inflight *inflight)
1597 {
1598     int r;
1599 
1600     if (dev->vhost_ops->vhost_get_inflight_fd) {
1601         r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight);
1602         if (r) {
1603             VHOST_OPS_DEBUG("vhost_get_inflight_fd failed");
1604             return -errno;
1605         }
1606     }
1607 
1608     return 0;
1609 }
1610 
1611 /* Host notifiers must be enabled at this point. */
1612 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1613 {
1614     int i, r;
1615 
1616     /* should only be called after backend is connected */
1617     assert(hdev->vhost_ops);
1618 
1619     hdev->started = true;
1620     hdev->vdev = vdev;
1621 
1622     r = vhost_dev_set_features(hdev, hdev->log_enabled);
1623     if (r < 0) {
1624         goto fail_features;
1625     }
1626 
1627     if (vhost_dev_has_iommu(hdev)) {
1628         memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
1629     }
1630 
1631     r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
1632     if (r < 0) {
1633         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
1634         r = -errno;
1635         goto fail_mem;
1636     }
1637     for (i = 0; i < hdev->nvqs; ++i) {
1638         r = vhost_virtqueue_start(hdev,
1639                                   vdev,
1640                                   hdev->vqs + i,
1641                                   hdev->vq_index + i);
1642         if (r < 0) {
1643             goto fail_vq;
1644         }
1645     }
1646 
1647     if (hdev->log_enabled) {
1648         uint64_t log_base;
1649 
1650         hdev->log_size = vhost_get_log_size(hdev);
1651         hdev->log = vhost_log_get(hdev->log_size,
1652                                   vhost_dev_log_is_shared(hdev));
1653         log_base = (uintptr_t)hdev->log->log;
1654         r = hdev->vhost_ops->vhost_set_log_base(hdev,
1655                                                 hdev->log_size ? log_base : 0,
1656                                                 hdev->log);
1657         if (r < 0) {
1658             VHOST_OPS_DEBUG("vhost_set_log_base failed");
1659             r = -errno;
1660             goto fail_log;
1661         }
1662     }
1663 
1664     if (vhost_dev_has_iommu(hdev)) {
1665         hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);
1666 
1667         /* Update used ring information for IOTLB to work correctly,
1668          * vhost-kernel code requires for this.*/
1669         for (i = 0; i < hdev->nvqs; ++i) {
1670             struct vhost_virtqueue *vq = hdev->vqs + i;
1671             vhost_device_iotlb_miss(hdev, vq->used_phys, true);
1672         }
1673     }
1674     return 0;
1675 fail_log:
1676     vhost_log_put(hdev, false);
1677 fail_vq:
1678     while (--i >= 0) {
1679         vhost_virtqueue_stop(hdev,
1680                              vdev,
1681                              hdev->vqs + i,
1682                              hdev->vq_index + i);
1683     }
1684 
1685 fail_mem:
1686 fail_features:
1687 
1688     hdev->started = false;
1689     return r;
1690 }
1691 
1692 /* Host notifiers must be enabled at this point. */
1693 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1694 {
1695     int i;
1696 
1697     /* should only be called after backend is connected */
1698     assert(hdev->vhost_ops);
1699 
1700     for (i = 0; i < hdev->nvqs; ++i) {
1701         vhost_virtqueue_stop(hdev,
1702                              vdev,
1703                              hdev->vqs + i,
1704                              hdev->vq_index + i);
1705     }
1706 
1707     if (vhost_dev_has_iommu(hdev)) {
1708         hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
1709         memory_listener_unregister(&hdev->iommu_listener);
1710     }
1711     vhost_log_put(hdev, true);
1712     hdev->started = false;
1713     hdev->vdev = NULL;
1714 }
1715 
1716 int vhost_net_set_backend(struct vhost_dev *hdev,
1717                           struct vhost_vring_file *file)
1718 {
1719     if (hdev->vhost_ops->vhost_net_set_backend) {
1720         return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
1721     }
1722 
1723     return -1;
1724 }
1725