xref: /openbmc/qemu/hw/virtio/vhost.c (revision 80e5db30)
1 /*
2  * vhost support
3  *
4  * Copyright Red Hat, Inc. 2010
5  *
6  * Authors:
7  *  Michael S. Tsirkin <mst@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15 
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "hw/virtio/vhost.h"
19 #include "hw/hw.h"
20 #include "qemu/atomic.h"
21 #include "qemu/range.h"
22 #include "qemu/error-report.h"
23 #include "qemu/memfd.h"
24 #include <linux/vhost.h>
25 #include "exec/address-spaces.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "migration/migration.h"
29 #include "sysemu/dma.h"
30 
31 /* enabled until disconnected backend stabilizes */
32 #define _VHOST_DEBUG 1
33 
34 #ifdef _VHOST_DEBUG
35 #define VHOST_OPS_DEBUG(fmt, ...) \
36     do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
37                       strerror(errno), errno); } while (0)
38 #else
39 #define VHOST_OPS_DEBUG(fmt, ...) \
40     do { } while (0)
41 #endif
42 
43 static struct vhost_log *vhost_log;
44 static struct vhost_log *vhost_log_shm;
45 
46 static unsigned int used_memslots;
47 static QLIST_HEAD(, vhost_dev) vhost_devices =
48     QLIST_HEAD_INITIALIZER(vhost_devices);
49 
50 bool vhost_has_free_slot(void)
51 {
52     unsigned int slots_limit = ~0U;
53     struct vhost_dev *hdev;
54 
55     QLIST_FOREACH(hdev, &vhost_devices, entry) {
56         unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
57         slots_limit = MIN(slots_limit, r);
58     }
59     return slots_limit > used_memslots;
60 }
61 
62 static void vhost_dev_sync_region(struct vhost_dev *dev,
63                                   MemoryRegionSection *section,
64                                   uint64_t mfirst, uint64_t mlast,
65                                   uint64_t rfirst, uint64_t rlast)
66 {
67     vhost_log_chunk_t *log = dev->log->log;
68 
69     uint64_t start = MAX(mfirst, rfirst);
70     uint64_t end = MIN(mlast, rlast);
71     vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
72     vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
73     uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
74 
75     if (end < start) {
76         return;
77     }
78     assert(end / VHOST_LOG_CHUNK < dev->log_size);
79     assert(start / VHOST_LOG_CHUNK < dev->log_size);
80 
81     for (;from < to; ++from) {
82         vhost_log_chunk_t log;
83         /* We first check with non-atomic: much cheaper,
84          * and we expect non-dirty to be the common case. */
85         if (!*from) {
86             addr += VHOST_LOG_CHUNK;
87             continue;
88         }
89         /* Data must be read atomically. We don't really need barrier semantics
90          * but it's easier to use atomic_* than roll our own. */
91         log = atomic_xchg(from, 0);
92         while (log) {
93             int bit = ctzl(log);
94             hwaddr page_addr;
95             hwaddr section_offset;
96             hwaddr mr_offset;
97             page_addr = addr + bit * VHOST_LOG_PAGE;
98             section_offset = page_addr - section->offset_within_address_space;
99             mr_offset = section_offset + section->offset_within_region;
100             memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
101             log &= ~(0x1ull << bit);
102         }
103         addr += VHOST_LOG_CHUNK;
104     }
105 }
106 
107 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
108                                    MemoryRegionSection *section,
109                                    hwaddr first,
110                                    hwaddr last)
111 {
112     int i;
113     hwaddr start_addr;
114     hwaddr end_addr;
115 
116     if (!dev->log_enabled || !dev->started) {
117         return 0;
118     }
119     start_addr = section->offset_within_address_space;
120     end_addr = range_get_last(start_addr, int128_get64(section->size));
121     start_addr = MAX(first, start_addr);
122     end_addr = MIN(last, end_addr);
123 
124     for (i = 0; i < dev->mem->nregions; ++i) {
125         struct vhost_memory_region *reg = dev->mem->regions + i;
126         vhost_dev_sync_region(dev, section, start_addr, end_addr,
127                               reg->guest_phys_addr,
128                               range_get_last(reg->guest_phys_addr,
129                                              reg->memory_size));
130     }
131     for (i = 0; i < dev->nvqs; ++i) {
132         struct vhost_virtqueue *vq = dev->vqs + i;
133         vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
134                               range_get_last(vq->used_phys, vq->used_size));
135     }
136     return 0;
137 }
138 
139 static void vhost_log_sync(MemoryListener *listener,
140                           MemoryRegionSection *section)
141 {
142     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
143                                          memory_listener);
144     vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
145 }
146 
147 static void vhost_log_sync_range(struct vhost_dev *dev,
148                                  hwaddr first, hwaddr last)
149 {
150     int i;
151     /* FIXME: this is N^2 in number of sections */
152     for (i = 0; i < dev->n_mem_sections; ++i) {
153         MemoryRegionSection *section = &dev->mem_sections[i];
154         vhost_sync_dirty_bitmap(dev, section, first, last);
155     }
156 }
157 
158 /* Assign/unassign. Keep an unsorted array of non-overlapping
159  * memory regions in dev->mem. */
160 static void vhost_dev_unassign_memory(struct vhost_dev *dev,
161                                       uint64_t start_addr,
162                                       uint64_t size)
163 {
164     int from, to, n = dev->mem->nregions;
165     /* Track overlapping/split regions for sanity checking. */
166     int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
167 
168     for (from = 0, to = 0; from < n; ++from, ++to) {
169         struct vhost_memory_region *reg = dev->mem->regions + to;
170         uint64_t reglast;
171         uint64_t memlast;
172         uint64_t change;
173 
174         /* clone old region */
175         if (to != from) {
176             memcpy(reg, dev->mem->regions + from, sizeof *reg);
177         }
178 
179         /* No overlap is simple */
180         if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
181                             start_addr, size)) {
182             continue;
183         }
184 
185         /* Split only happens if supplied region
186          * is in the middle of an existing one. Thus it can not
187          * overlap with any other existing region. */
188         assert(!split);
189 
190         reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
191         memlast = range_get_last(start_addr, size);
192 
193         /* Remove whole region */
194         if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
195             --dev->mem->nregions;
196             --to;
197             ++overlap_middle;
198             continue;
199         }
200 
201         /* Shrink region */
202         if (memlast >= reglast) {
203             reg->memory_size = start_addr - reg->guest_phys_addr;
204             assert(reg->memory_size);
205             assert(!overlap_end);
206             ++overlap_end;
207             continue;
208         }
209 
210         /* Shift region */
211         if (start_addr <= reg->guest_phys_addr) {
212             change = memlast + 1 - reg->guest_phys_addr;
213             reg->memory_size -= change;
214             reg->guest_phys_addr += change;
215             reg->userspace_addr += change;
216             assert(reg->memory_size);
217             assert(!overlap_start);
218             ++overlap_start;
219             continue;
220         }
221 
222         /* This only happens if supplied region
223          * is in the middle of an existing one. Thus it can not
224          * overlap with any other existing region. */
225         assert(!overlap_start);
226         assert(!overlap_end);
227         assert(!overlap_middle);
228         /* Split region: shrink first part, shift second part. */
229         memcpy(dev->mem->regions + n, reg, sizeof *reg);
230         reg->memory_size = start_addr - reg->guest_phys_addr;
231         assert(reg->memory_size);
232         change = memlast + 1 - reg->guest_phys_addr;
233         reg = dev->mem->regions + n;
234         reg->memory_size -= change;
235         assert(reg->memory_size);
236         reg->guest_phys_addr += change;
237         reg->userspace_addr += change;
238         /* Never add more than 1 region */
239         assert(dev->mem->nregions == n);
240         ++dev->mem->nregions;
241         ++split;
242     }
243 }
244 
245 /* Called after unassign, so no regions overlap the given range. */
246 static void vhost_dev_assign_memory(struct vhost_dev *dev,
247                                     uint64_t start_addr,
248                                     uint64_t size,
249                                     uint64_t uaddr)
250 {
251     int from, to;
252     struct vhost_memory_region *merged = NULL;
253     for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
254         struct vhost_memory_region *reg = dev->mem->regions + to;
255         uint64_t prlast, urlast;
256         uint64_t pmlast, umlast;
257         uint64_t s, e, u;
258 
259         /* clone old region */
260         if (to != from) {
261             memcpy(reg, dev->mem->regions + from, sizeof *reg);
262         }
263         prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
264         pmlast = range_get_last(start_addr, size);
265         urlast = range_get_last(reg->userspace_addr, reg->memory_size);
266         umlast = range_get_last(uaddr, size);
267 
268         /* check for overlapping regions: should never happen. */
269         assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
270         /* Not an adjacent or overlapping region - do not merge. */
271         if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
272             (pmlast + 1 != reg->guest_phys_addr ||
273              umlast + 1 != reg->userspace_addr)) {
274             continue;
275         }
276 
277         if (dev->vhost_ops->vhost_backend_can_merge &&
278             !dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size,
279                                                      reg->userspace_addr,
280                                                      reg->memory_size)) {
281             continue;
282         }
283 
284         if (merged) {
285             --to;
286             assert(to >= 0);
287         } else {
288             merged = reg;
289         }
290         u = MIN(uaddr, reg->userspace_addr);
291         s = MIN(start_addr, reg->guest_phys_addr);
292         e = MAX(pmlast, prlast);
293         uaddr = merged->userspace_addr = u;
294         start_addr = merged->guest_phys_addr = s;
295         size = merged->memory_size = e - s + 1;
296         assert(merged->memory_size);
297     }
298 
299     if (!merged) {
300         struct vhost_memory_region *reg = dev->mem->regions + to;
301         memset(reg, 0, sizeof *reg);
302         reg->memory_size = size;
303         assert(reg->memory_size);
304         reg->guest_phys_addr = start_addr;
305         reg->userspace_addr = uaddr;
306         ++to;
307     }
308     assert(to <= dev->mem->nregions + 1);
309     dev->mem->nregions = to;
310 }
311 
312 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
313 {
314     uint64_t log_size = 0;
315     int i;
316     for (i = 0; i < dev->mem->nregions; ++i) {
317         struct vhost_memory_region *reg = dev->mem->regions + i;
318         uint64_t last = range_get_last(reg->guest_phys_addr,
319                                        reg->memory_size);
320         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
321     }
322     for (i = 0; i < dev->nvqs; ++i) {
323         struct vhost_virtqueue *vq = dev->vqs + i;
324         uint64_t last = vq->used_phys + vq->used_size - 1;
325         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
326     }
327     return log_size;
328 }
329 
330 static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
331 {
332     struct vhost_log *log;
333     uint64_t logsize = size * sizeof(*(log->log));
334     int fd = -1;
335 
336     log = g_new0(struct vhost_log, 1);
337     if (share) {
338         log->log = qemu_memfd_alloc("vhost-log", logsize,
339                                     F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
340                                     &fd);
341         memset(log->log, 0, logsize);
342     } else {
343         log->log = g_malloc0(logsize);
344     }
345 
346     log->size = size;
347     log->refcnt = 1;
348     log->fd = fd;
349 
350     return log;
351 }
352 
353 static struct vhost_log *vhost_log_get(uint64_t size, bool share)
354 {
355     struct vhost_log *log = share ? vhost_log_shm : vhost_log;
356 
357     if (!log || log->size != size) {
358         log = vhost_log_alloc(size, share);
359         if (share) {
360             vhost_log_shm = log;
361         } else {
362             vhost_log = log;
363         }
364     } else {
365         ++log->refcnt;
366     }
367 
368     return log;
369 }
370 
371 static void vhost_log_put(struct vhost_dev *dev, bool sync)
372 {
373     struct vhost_log *log = dev->log;
374 
375     if (!log) {
376         return;
377     }
378     dev->log = NULL;
379     dev->log_size = 0;
380 
381     --log->refcnt;
382     if (log->refcnt == 0) {
383         /* Sync only the range covered by the old log */
384         if (dev->log_size && sync) {
385             vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
386         }
387 
388         if (vhost_log == log) {
389             g_free(log->log);
390             vhost_log = NULL;
391         } else if (vhost_log_shm == log) {
392             qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
393                             log->fd);
394             vhost_log_shm = NULL;
395         }
396 
397         g_free(log);
398     }
399 }
400 
401 static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
402 {
403     return dev->vhost_ops->vhost_requires_shm_log &&
404            dev->vhost_ops->vhost_requires_shm_log(dev);
405 }
406 
407 static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
408 {
409     struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
410     uint64_t log_base = (uintptr_t)log->log;
411     int r;
412 
413     /* inform backend of log switching, this must be done before
414        releasing the current log, to ensure no logging is lost */
415     r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
416     if (r < 0) {
417         VHOST_OPS_DEBUG("vhost_set_log_base failed");
418     }
419 
420     vhost_log_put(dev, true);
421     dev->log = log;
422     dev->log_size = size;
423 }
424 
425 static int vhost_dev_has_iommu(struct vhost_dev *dev)
426 {
427     VirtIODevice *vdev = dev->vdev;
428     AddressSpace *dma_as = vdev->dma_as;
429 
430     return memory_region_is_iommu(dma_as->root) &&
431            virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
432 }
433 
434 static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
435                               hwaddr *plen, int is_write)
436 {
437     if (!vhost_dev_has_iommu(dev)) {
438         return cpu_physical_memory_map(addr, plen, is_write);
439     } else {
440         return (void *)(uintptr_t)addr;
441     }
442 }
443 
444 static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
445                                hwaddr len, int is_write,
446                                hwaddr access_len)
447 {
448     if (!vhost_dev_has_iommu(dev)) {
449         cpu_physical_memory_unmap(buffer, len, is_write, access_len);
450     }
451 }
452 
453 static int vhost_verify_ring_part_mapping(struct vhost_dev *dev,
454                                           void *part,
455                                           uint64_t part_addr,
456                                           uint64_t part_size,
457                                           uint64_t start_addr,
458                                           uint64_t size)
459 {
460     hwaddr l;
461     void *p;
462     int r = 0;
463 
464     if (!ranges_overlap(start_addr, size, part_addr, part_size)) {
465         return 0;
466     }
467     l = part_size;
468     p = vhost_memory_map(dev, part_addr, &l, 1);
469     if (!p || l != part_size) {
470         r = -ENOMEM;
471     }
472     if (p != part) {
473         r = -EBUSY;
474     }
475     vhost_memory_unmap(dev, p, l, 0, 0);
476     return r;
477 }
478 
479 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
480                                       uint64_t start_addr,
481                                       uint64_t size)
482 {
483     int i, j;
484     int r = 0;
485     const char *part_name[] = {
486         "descriptor table",
487         "available ring",
488         "used ring"
489     };
490 
491     for (i = 0; i < dev->nvqs; ++i) {
492         struct vhost_virtqueue *vq = dev->vqs + i;
493 
494         j = 0;
495         r = vhost_verify_ring_part_mapping(dev, vq->desc, vq->desc_phys,
496                                            vq->desc_size, start_addr, size);
497         if (!r) {
498             break;
499         }
500 
501         j++;
502         r = vhost_verify_ring_part_mapping(dev, vq->avail, vq->avail_phys,
503                                            vq->avail_size, start_addr, size);
504         if (!r) {
505             break;
506         }
507 
508         j++;
509         r = vhost_verify_ring_part_mapping(dev, vq->used, vq->used_phys,
510                                            vq->used_size, start_addr, size);
511         if (!r) {
512             break;
513         }
514     }
515 
516     if (r == -ENOMEM) {
517         error_report("Unable to map %s for ring %d", part_name[j], i);
518     } else if (r == -EBUSY) {
519         error_report("%s relocated for ring %d", part_name[j], i);
520     }
521     return r;
522 }
523 
524 static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
525 						      uint64_t start_addr,
526 						      uint64_t size)
527 {
528     int i, n = dev->mem->nregions;
529     for (i = 0; i < n; ++i) {
530         struct vhost_memory_region *reg = dev->mem->regions + i;
531         if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
532                            start_addr, size)) {
533             return reg;
534         }
535     }
536     return NULL;
537 }
538 
539 static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
540                                  uint64_t start_addr,
541                                  uint64_t size,
542                                  uint64_t uaddr)
543 {
544     struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
545     uint64_t reglast;
546     uint64_t memlast;
547 
548     if (!reg) {
549         return true;
550     }
551 
552     reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
553     memlast = range_get_last(start_addr, size);
554 
555     /* Need to extend region? */
556     if (start_addr < reg->guest_phys_addr || memlast > reglast) {
557         return true;
558     }
559     /* userspace_addr changed? */
560     return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
561 }
562 
563 static void vhost_set_memory(MemoryListener *listener,
564                              MemoryRegionSection *section,
565                              bool add)
566 {
567     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
568                                          memory_listener);
569     hwaddr start_addr = section->offset_within_address_space;
570     ram_addr_t size = int128_get64(section->size);
571     bool log_dirty =
572         memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
573     int s = offsetof(struct vhost_memory, regions) +
574         (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
575     void *ram;
576 
577     dev->mem = g_realloc(dev->mem, s);
578 
579     if (log_dirty) {
580         add = false;
581     }
582 
583     assert(size);
584 
585     /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
586     ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
587     if (add) {
588         if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
589             /* Region exists with same address. Nothing to do. */
590             return;
591         }
592     } else {
593         if (!vhost_dev_find_reg(dev, start_addr, size)) {
594             /* Removing region that we don't access. Nothing to do. */
595             return;
596         }
597     }
598 
599     vhost_dev_unassign_memory(dev, start_addr, size);
600     if (add) {
601         /* Add given mapping, merging adjacent regions if any */
602         vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
603     } else {
604         /* Remove old mapping for this memory, if any. */
605         vhost_dev_unassign_memory(dev, start_addr, size);
606     }
607     dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
608     dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
609     dev->memory_changed = true;
610     used_memslots = dev->mem->nregions;
611 }
612 
613 static bool vhost_section(MemoryRegionSection *section)
614 {
615     return memory_region_is_ram(section->mr);
616 }
617 
618 static void vhost_begin(MemoryListener *listener)
619 {
620     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
621                                          memory_listener);
622     dev->mem_changed_end_addr = 0;
623     dev->mem_changed_start_addr = -1;
624 }
625 
626 static void vhost_commit(MemoryListener *listener)
627 {
628     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
629                                          memory_listener);
630     hwaddr start_addr = 0;
631     ram_addr_t size = 0;
632     uint64_t log_size;
633     int r;
634 
635     if (!dev->memory_changed) {
636         return;
637     }
638     if (!dev->started) {
639         return;
640     }
641     if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
642         return;
643     }
644 
645     if (dev->started) {
646         start_addr = dev->mem_changed_start_addr;
647         size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
648 
649         r = vhost_verify_ring_mappings(dev, start_addr, size);
650         assert(r >= 0);
651     }
652 
653     if (!dev->log_enabled) {
654         r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
655         if (r < 0) {
656             VHOST_OPS_DEBUG("vhost_set_mem_table failed");
657         }
658         dev->memory_changed = false;
659         return;
660     }
661     log_size = vhost_get_log_size(dev);
662     /* We allocate an extra 4K bytes to log,
663      * to reduce the * number of reallocations. */
664 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
665     /* To log more, must increase log size before table update. */
666     if (dev->log_size < log_size) {
667         vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
668     }
669     r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
670     if (r < 0) {
671         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
672     }
673     /* To log less, can only decrease log size after table update. */
674     if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
675         vhost_dev_log_resize(dev, log_size);
676     }
677     dev->memory_changed = false;
678 }
679 
680 static void vhost_region_add(MemoryListener *listener,
681                              MemoryRegionSection *section)
682 {
683     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
684                                          memory_listener);
685 
686     if (!vhost_section(section)) {
687         return;
688     }
689 
690     ++dev->n_mem_sections;
691     dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
692                                 dev->n_mem_sections);
693     dev->mem_sections[dev->n_mem_sections - 1] = *section;
694     memory_region_ref(section->mr);
695     vhost_set_memory(listener, section, true);
696 }
697 
698 static void vhost_region_del(MemoryListener *listener,
699                              MemoryRegionSection *section)
700 {
701     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
702                                          memory_listener);
703     int i;
704 
705     if (!vhost_section(section)) {
706         return;
707     }
708 
709     vhost_set_memory(listener, section, false);
710     memory_region_unref(section->mr);
711     for (i = 0; i < dev->n_mem_sections; ++i) {
712         if (dev->mem_sections[i].offset_within_address_space
713             == section->offset_within_address_space) {
714             --dev->n_mem_sections;
715             memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
716                     (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
717             break;
718         }
719     }
720 }
721 
722 static void vhost_region_nop(MemoryListener *listener,
723                              MemoryRegionSection *section)
724 {
725 }
726 
727 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
728                                     struct vhost_virtqueue *vq,
729                                     unsigned idx, bool enable_log)
730 {
731     struct vhost_vring_addr addr = {
732         .index = idx,
733         .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
734         .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
735         .used_user_addr = (uint64_t)(unsigned long)vq->used,
736         .log_guest_addr = vq->used_phys,
737         .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
738     };
739     int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
740     if (r < 0) {
741         VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
742         return -errno;
743     }
744     return 0;
745 }
746 
747 static int vhost_dev_set_features(struct vhost_dev *dev,
748                                   bool enable_log)
749 {
750     uint64_t features = dev->acked_features;
751     int r;
752     if (enable_log) {
753         features |= 0x1ULL << VHOST_F_LOG_ALL;
754     }
755     r = dev->vhost_ops->vhost_set_features(dev, features);
756     if (r < 0) {
757         VHOST_OPS_DEBUG("vhost_set_features failed");
758     }
759     return r < 0 ? -errno : 0;
760 }
761 
762 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
763 {
764     int r, i, idx;
765     r = vhost_dev_set_features(dev, enable_log);
766     if (r < 0) {
767         goto err_features;
768     }
769     for (i = 0; i < dev->nvqs; ++i) {
770         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
771         r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
772                                      enable_log);
773         if (r < 0) {
774             goto err_vq;
775         }
776     }
777     return 0;
778 err_vq:
779     for (; i >= 0; --i) {
780         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
781         vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
782                                  dev->log_enabled);
783     }
784     vhost_dev_set_features(dev, dev->log_enabled);
785 err_features:
786     return r;
787 }
788 
789 static int vhost_migration_log(MemoryListener *listener, int enable)
790 {
791     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
792                                          memory_listener);
793     int r;
794     if (!!enable == dev->log_enabled) {
795         return 0;
796     }
797     if (!dev->started) {
798         dev->log_enabled = enable;
799         return 0;
800     }
801     if (!enable) {
802         r = vhost_dev_set_log(dev, false);
803         if (r < 0) {
804             return r;
805         }
806         vhost_log_put(dev, false);
807     } else {
808         vhost_dev_log_resize(dev, vhost_get_log_size(dev));
809         r = vhost_dev_set_log(dev, true);
810         if (r < 0) {
811             return r;
812         }
813     }
814     dev->log_enabled = enable;
815     return 0;
816 }
817 
818 static void vhost_log_global_start(MemoryListener *listener)
819 {
820     int r;
821 
822     r = vhost_migration_log(listener, true);
823     if (r < 0) {
824         abort();
825     }
826 }
827 
828 static void vhost_log_global_stop(MemoryListener *listener)
829 {
830     int r;
831 
832     r = vhost_migration_log(listener, false);
833     if (r < 0) {
834         abort();
835     }
836 }
837 
838 static void vhost_log_start(MemoryListener *listener,
839                             MemoryRegionSection *section,
840                             int old, int new)
841 {
842     /* FIXME: implement */
843 }
844 
845 static void vhost_log_stop(MemoryListener *listener,
846                            MemoryRegionSection *section,
847                            int old, int new)
848 {
849     /* FIXME: implement */
850 }
851 
852 /* The vhost driver natively knows how to handle the vrings of non
853  * cross-endian legacy devices and modern devices. Only legacy devices
854  * exposed to a bi-endian guest may require the vhost driver to use a
855  * specific endianness.
856  */
857 static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
858 {
859     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
860         return false;
861     }
862 #ifdef HOST_WORDS_BIGENDIAN
863     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
864 #else
865     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
866 #endif
867 }
868 
869 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
870                                                    bool is_big_endian,
871                                                    int vhost_vq_index)
872 {
873     struct vhost_vring_state s = {
874         .index = vhost_vq_index,
875         .num = is_big_endian
876     };
877 
878     if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
879         return 0;
880     }
881 
882     VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
883     if (errno == ENOTTY) {
884         error_report("vhost does not support cross-endian");
885         return -ENOSYS;
886     }
887 
888     return -errno;
889 }
890 
891 static int vhost_memory_region_lookup(struct vhost_dev *hdev,
892                                       uint64_t gpa, uint64_t *uaddr,
893                                       uint64_t *len)
894 {
895     int i;
896 
897     for (i = 0; i < hdev->mem->nregions; i++) {
898         struct vhost_memory_region *reg = hdev->mem->regions + i;
899 
900         if (gpa >= reg->guest_phys_addr &&
901             reg->guest_phys_addr + reg->memory_size > gpa) {
902             *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
903             *len = reg->guest_phys_addr + reg->memory_size - gpa;
904             return 0;
905         }
906     }
907 
908     return -EFAULT;
909 }
910 
911 void vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
912 {
913     IOMMUTLBEntry iotlb;
914     uint64_t uaddr, len;
915 
916     rcu_read_lock();
917 
918     iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
919                                           iova, write);
920     if (iotlb.target_as != NULL) {
921         if (vhost_memory_region_lookup(dev, iotlb.translated_addr,
922                                        &uaddr, &len)) {
923             error_report("Fail to lookup the translated address "
924                          "%"PRIx64, iotlb.translated_addr);
925             goto out;
926         }
927 
928         len = MIN(iotlb.addr_mask + 1, len);
929         iova = iova & ~iotlb.addr_mask;
930 
931         if (dev->vhost_ops->vhost_update_device_iotlb(dev, iova, uaddr,
932                                                       len, iotlb.perm)) {
933             error_report("Fail to update device iotlb");
934             goto out;
935         }
936     }
937 out:
938     rcu_read_unlock();
939 }
940 
941 static int vhost_virtqueue_start(struct vhost_dev *dev,
942                                 struct VirtIODevice *vdev,
943                                 struct vhost_virtqueue *vq,
944                                 unsigned idx)
945 {
946     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
947     VirtioBusState *vbus = VIRTIO_BUS(qbus);
948     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
949     hwaddr s, l, a;
950     int r;
951     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
952     struct vhost_vring_file file = {
953         .index = vhost_vq_index
954     };
955     struct vhost_vring_state state = {
956         .index = vhost_vq_index
957     };
958     struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
959 
960 
961     vq->num = state.num = virtio_queue_get_num(vdev, idx);
962     r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
963     if (r) {
964         VHOST_OPS_DEBUG("vhost_set_vring_num failed");
965         return -errno;
966     }
967 
968     state.num = virtio_queue_get_last_avail_idx(vdev, idx);
969     r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
970     if (r) {
971         VHOST_OPS_DEBUG("vhost_set_vring_base failed");
972         return -errno;
973     }
974 
975     if (vhost_needs_vring_endian(vdev)) {
976         r = vhost_virtqueue_set_vring_endian_legacy(dev,
977                                                     virtio_is_big_endian(vdev),
978                                                     vhost_vq_index);
979         if (r) {
980             return -errno;
981         }
982     }
983 
984     vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
985     vq->desc_phys = a = virtio_queue_get_desc_addr(vdev, idx);
986     vq->desc = vhost_memory_map(dev, a, &l, 0);
987     if (!vq->desc || l != s) {
988         r = -ENOMEM;
989         goto fail_alloc_desc;
990     }
991     vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
992     vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
993     vq->avail = vhost_memory_map(dev, a, &l, 0);
994     if (!vq->avail || l != s) {
995         r = -ENOMEM;
996         goto fail_alloc_avail;
997     }
998     vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
999     vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
1000     vq->used = vhost_memory_map(dev, a, &l, 1);
1001     if (!vq->used || l != s) {
1002         r = -ENOMEM;
1003         goto fail_alloc_used;
1004     }
1005 
1006     r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
1007     if (r < 0) {
1008         r = -errno;
1009         goto fail_alloc;
1010     }
1011 
1012     file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
1013     r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
1014     if (r) {
1015         VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
1016         r = -errno;
1017         goto fail_kick;
1018     }
1019 
1020     /* Clear and discard previous events if any. */
1021     event_notifier_test_and_clear(&vq->masked_notifier);
1022 
1023     /* Init vring in unmasked state, unless guest_notifier_mask
1024      * will do it later.
1025      */
1026     if (!vdev->use_guest_notifier_mask) {
1027         /* TODO: check and handle errors. */
1028         vhost_virtqueue_mask(dev, vdev, idx, false);
1029     }
1030 
1031     if (k->query_guest_notifiers &&
1032         k->query_guest_notifiers(qbus->parent) &&
1033         virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
1034         file.fd = -1;
1035         r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1036         if (r) {
1037             goto fail_vector;
1038         }
1039     }
1040 
1041     return 0;
1042 
1043 fail_vector:
1044 fail_kick:
1045 fail_alloc:
1046     vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1047                        0, 0);
1048 fail_alloc_used:
1049     vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1050                        0, 0);
1051 fail_alloc_avail:
1052     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1053                        0, 0);
1054 fail_alloc_desc:
1055     return r;
1056 }
1057 
1058 static void vhost_virtqueue_stop(struct vhost_dev *dev,
1059                                     struct VirtIODevice *vdev,
1060                                     struct vhost_virtqueue *vq,
1061                                     unsigned idx)
1062 {
1063     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1064     struct vhost_vring_state state = {
1065         .index = vhost_vq_index,
1066     };
1067     int r;
1068 
1069     r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
1070     if (r < 0) {
1071         VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r);
1072     } else {
1073         virtio_queue_set_last_avail_idx(vdev, idx, state.num);
1074     }
1075     virtio_queue_invalidate_signalled_used(vdev, idx);
1076     virtio_queue_update_used_idx(vdev, idx);
1077 
1078     /* In the cross-endian case, we need to reset the vring endianness to
1079      * native as legacy devices expect so by default.
1080      */
1081     if (vhost_needs_vring_endian(vdev)) {
1082         vhost_virtqueue_set_vring_endian_legacy(dev,
1083                                                 !virtio_is_big_endian(vdev),
1084                                                 vhost_vq_index);
1085     }
1086 
1087     vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1088                        1, virtio_queue_get_used_size(vdev, idx));
1089     vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1090                        0, virtio_queue_get_avail_size(vdev, idx));
1091     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1092                        0, virtio_queue_get_desc_size(vdev, idx));
1093 }
1094 
1095 static void vhost_eventfd_add(MemoryListener *listener,
1096                               MemoryRegionSection *section,
1097                               bool match_data, uint64_t data, EventNotifier *e)
1098 {
1099 }
1100 
1101 static void vhost_eventfd_del(MemoryListener *listener,
1102                               MemoryRegionSection *section,
1103                               bool match_data, uint64_t data, EventNotifier *e)
1104 {
1105 }
1106 
1107 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1108                                                 int n, uint32_t timeout)
1109 {
1110     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1111     struct vhost_vring_state state = {
1112         .index = vhost_vq_index,
1113         .num = timeout,
1114     };
1115     int r;
1116 
1117     if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1118         return -EINVAL;
1119     }
1120 
1121     r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1122     if (r) {
1123         VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
1124         return r;
1125     }
1126 
1127     return 0;
1128 }
1129 
1130 static int vhost_virtqueue_init(struct vhost_dev *dev,
1131                                 struct vhost_virtqueue *vq, int n)
1132 {
1133     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1134     struct vhost_vring_file file = {
1135         .index = vhost_vq_index,
1136     };
1137     int r = event_notifier_init(&vq->masked_notifier, 0);
1138     if (r < 0) {
1139         return r;
1140     }
1141 
1142     file.fd = event_notifier_get_fd(&vq->masked_notifier);
1143     r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1144     if (r) {
1145         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1146         r = -errno;
1147         goto fail_call;
1148     }
1149 
1150     vq->dev = dev;
1151 
1152     return 0;
1153 fail_call:
1154     event_notifier_cleanup(&vq->masked_notifier);
1155     return r;
1156 }
1157 
1158 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1159 {
1160     event_notifier_cleanup(&vq->masked_notifier);
1161 }
1162 
1163 static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
1164 {
1165     struct vhost_dev *hdev = container_of(n, struct vhost_dev, n);
1166 
1167     if (hdev->vhost_ops->vhost_invalidate_device_iotlb(hdev,
1168                                                        iotlb->iova,
1169                                                        iotlb->addr_mask + 1)) {
1170         error_report("Fail to invalidate device iotlb");
1171     }
1172 }
1173 
1174 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1175                    VhostBackendType backend_type, uint32_t busyloop_timeout)
1176 {
1177     uint64_t features;
1178     int i, r, n_initialized_vqs = 0;
1179 
1180     hdev->vdev = NULL;
1181     hdev->migration_blocker = NULL;
1182 
1183     r = vhost_set_backend_type(hdev, backend_type);
1184     assert(r >= 0);
1185 
1186     r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1187     if (r < 0) {
1188         goto fail;
1189     }
1190 
1191     if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1192         error_report("vhost backend memory slots limit is less"
1193                 " than current number of present memory slots");
1194         r = -1;
1195         goto fail;
1196     }
1197 
1198     r = hdev->vhost_ops->vhost_set_owner(hdev);
1199     if (r < 0) {
1200         VHOST_OPS_DEBUG("vhost_set_owner failed");
1201         goto fail;
1202     }
1203 
1204     r = hdev->vhost_ops->vhost_get_features(hdev, &features);
1205     if (r < 0) {
1206         VHOST_OPS_DEBUG("vhost_get_features failed");
1207         goto fail;
1208     }
1209 
1210     for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1211         r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1212         if (r < 0) {
1213             goto fail;
1214         }
1215     }
1216 
1217     if (busyloop_timeout) {
1218         for (i = 0; i < hdev->nvqs; ++i) {
1219             r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1220                                                      busyloop_timeout);
1221             if (r < 0) {
1222                 goto fail_busyloop;
1223             }
1224         }
1225     }
1226 
1227     hdev->features = features;
1228 
1229     hdev->memory_listener = (MemoryListener) {
1230         .begin = vhost_begin,
1231         .commit = vhost_commit,
1232         .region_add = vhost_region_add,
1233         .region_del = vhost_region_del,
1234         .region_nop = vhost_region_nop,
1235         .log_start = vhost_log_start,
1236         .log_stop = vhost_log_stop,
1237         .log_sync = vhost_log_sync,
1238         .log_global_start = vhost_log_global_start,
1239         .log_global_stop = vhost_log_global_stop,
1240         .eventfd_add = vhost_eventfd_add,
1241         .eventfd_del = vhost_eventfd_del,
1242         .priority = 10
1243     };
1244 
1245     hdev->n.notify = vhost_iommu_unmap_notify;
1246     hdev->n.notifier_flags = IOMMU_NOTIFIER_UNMAP;
1247 
1248     if (hdev->migration_blocker == NULL) {
1249         if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1250             error_setg(&hdev->migration_blocker,
1251                        "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1252         } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_check()) {
1253             error_setg(&hdev->migration_blocker,
1254                        "Migration disabled: failed to allocate shared memory");
1255         }
1256     }
1257 
1258     if (hdev->migration_blocker != NULL) {
1259         migrate_add_blocker(hdev->migration_blocker);
1260     }
1261 
1262     hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1263     hdev->n_mem_sections = 0;
1264     hdev->mem_sections = NULL;
1265     hdev->log = NULL;
1266     hdev->log_size = 0;
1267     hdev->log_enabled = false;
1268     hdev->started = false;
1269     hdev->memory_changed = false;
1270     memory_listener_register(&hdev->memory_listener, &address_space_memory);
1271     QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1272     return 0;
1273 
1274 fail_busyloop:
1275     while (--i >= 0) {
1276         vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1277     }
1278 fail:
1279     hdev->nvqs = n_initialized_vqs;
1280     vhost_dev_cleanup(hdev);
1281     return r;
1282 }
1283 
1284 void vhost_dev_cleanup(struct vhost_dev *hdev)
1285 {
1286     int i;
1287 
1288     for (i = 0; i < hdev->nvqs; ++i) {
1289         vhost_virtqueue_cleanup(hdev->vqs + i);
1290     }
1291     if (hdev->mem) {
1292         /* those are only safe after successful init */
1293         memory_listener_unregister(&hdev->memory_listener);
1294         QLIST_REMOVE(hdev, entry);
1295     }
1296     if (hdev->migration_blocker) {
1297         migrate_del_blocker(hdev->migration_blocker);
1298         error_free(hdev->migration_blocker);
1299     }
1300     g_free(hdev->mem);
1301     g_free(hdev->mem_sections);
1302     if (hdev->vhost_ops) {
1303         hdev->vhost_ops->vhost_backend_cleanup(hdev);
1304     }
1305     assert(!hdev->log);
1306 
1307     memset(hdev, 0, sizeof(struct vhost_dev));
1308 }
1309 
1310 /* Stop processing guest IO notifications in qemu.
1311  * Start processing them in vhost in kernel.
1312  */
1313 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1314 {
1315     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1316     int i, r, e;
1317 
1318     /* We will pass the notifiers to the kernel, make sure that QEMU
1319      * doesn't interfere.
1320      */
1321     r = virtio_device_grab_ioeventfd(vdev);
1322     if (r < 0) {
1323         error_report("binding does not support host notifiers");
1324         goto fail;
1325     }
1326 
1327     for (i = 0; i < hdev->nvqs; ++i) {
1328         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1329                                          true);
1330         if (r < 0) {
1331             error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1332             goto fail_vq;
1333         }
1334     }
1335 
1336     return 0;
1337 fail_vq:
1338     while (--i >= 0) {
1339         e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1340                                          false);
1341         if (e < 0) {
1342             error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1343         }
1344         assert (e >= 0);
1345     }
1346     virtio_device_release_ioeventfd(vdev);
1347 fail:
1348     return r;
1349 }
1350 
1351 /* Stop processing guest IO notifications in vhost.
1352  * Start processing them in qemu.
1353  * This might actually run the qemu handlers right away,
1354  * so virtio in qemu must be completely setup when this is called.
1355  */
1356 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1357 {
1358     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1359     int i, r;
1360 
1361     for (i = 0; i < hdev->nvqs; ++i) {
1362         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1363                                          false);
1364         if (r < 0) {
1365             error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1366         }
1367         assert (r >= 0);
1368     }
1369     virtio_device_release_ioeventfd(vdev);
1370 }
1371 
1372 /* Test and clear event pending status.
1373  * Should be called after unmask to avoid losing events.
1374  */
1375 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1376 {
1377     struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1378     assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1379     return event_notifier_test_and_clear(&vq->masked_notifier);
1380 }
1381 
1382 /* Mask/unmask events from this vq. */
1383 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1384                          bool mask)
1385 {
1386     struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1387     int r, index = n - hdev->vq_index;
1388     struct vhost_vring_file file;
1389 
1390     /* should only be called after backend is connected */
1391     assert(hdev->vhost_ops);
1392 
1393     if (mask) {
1394         assert(vdev->use_guest_notifier_mask);
1395         file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1396     } else {
1397         file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1398     }
1399 
1400     file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1401     r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1402     if (r < 0) {
1403         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1404     }
1405 }
1406 
1407 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1408                             uint64_t features)
1409 {
1410     const int *bit = feature_bits;
1411     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1412         uint64_t bit_mask = (1ULL << *bit);
1413         if (!(hdev->features & bit_mask)) {
1414             features &= ~bit_mask;
1415         }
1416         bit++;
1417     }
1418     return features;
1419 }
1420 
1421 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1422                         uint64_t features)
1423 {
1424     const int *bit = feature_bits;
1425     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1426         uint64_t bit_mask = (1ULL << *bit);
1427         if (features & bit_mask) {
1428             hdev->acked_features |= bit_mask;
1429         }
1430         bit++;
1431     }
1432 }
1433 
1434 /* Host notifiers must be enabled at this point. */
1435 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1436 {
1437     int i, r;
1438 
1439     /* should only be called after backend is connected */
1440     assert(hdev->vhost_ops);
1441 
1442     hdev->started = true;
1443     hdev->vdev = vdev;
1444 
1445     r = vhost_dev_set_features(hdev, hdev->log_enabled);
1446     if (r < 0) {
1447         goto fail_features;
1448     }
1449 
1450     if (vhost_dev_has_iommu(hdev)) {
1451         memory_region_register_iommu_notifier(vdev->dma_as->root,
1452                                               &hdev->n);
1453     }
1454 
1455     r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
1456     if (r < 0) {
1457         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
1458         r = -errno;
1459         goto fail_mem;
1460     }
1461     for (i = 0; i < hdev->nvqs; ++i) {
1462         r = vhost_virtqueue_start(hdev,
1463                                   vdev,
1464                                   hdev->vqs + i,
1465                                   hdev->vq_index + i);
1466         if (r < 0) {
1467             goto fail_vq;
1468         }
1469     }
1470 
1471     if (hdev->log_enabled) {
1472         uint64_t log_base;
1473 
1474         hdev->log_size = vhost_get_log_size(hdev);
1475         hdev->log = vhost_log_get(hdev->log_size,
1476                                   vhost_dev_log_is_shared(hdev));
1477         log_base = (uintptr_t)hdev->log->log;
1478         r = hdev->vhost_ops->vhost_set_log_base(hdev,
1479                                                 hdev->log_size ? log_base : 0,
1480                                                 hdev->log);
1481         if (r < 0) {
1482             VHOST_OPS_DEBUG("vhost_set_log_base failed");
1483             r = -errno;
1484             goto fail_log;
1485         }
1486     }
1487 
1488     if (vhost_dev_has_iommu(hdev)) {
1489         hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);
1490 
1491         /* Update used ring information for IOTLB to work correctly,
1492          * vhost-kernel code requires for this.*/
1493         for (i = 0; i < hdev->nvqs; ++i) {
1494             struct vhost_virtqueue *vq = hdev->vqs + i;
1495             vhost_device_iotlb_miss(hdev, vq->used_phys, true);
1496         }
1497     }
1498     return 0;
1499 fail_log:
1500     vhost_log_put(hdev, false);
1501 fail_vq:
1502     while (--i >= 0) {
1503         vhost_virtqueue_stop(hdev,
1504                              vdev,
1505                              hdev->vqs + i,
1506                              hdev->vq_index + i);
1507     }
1508     i = hdev->nvqs;
1509 
1510 fail_mem:
1511 fail_features:
1512 
1513     hdev->started = false;
1514     return r;
1515 }
1516 
1517 /* Host notifiers must be enabled at this point. */
1518 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1519 {
1520     int i;
1521 
1522     /* should only be called after backend is connected */
1523     assert(hdev->vhost_ops);
1524 
1525     for (i = 0; i < hdev->nvqs; ++i) {
1526         vhost_virtqueue_stop(hdev,
1527                              vdev,
1528                              hdev->vqs + i,
1529                              hdev->vq_index + i);
1530     }
1531 
1532     if (vhost_dev_has_iommu(hdev)) {
1533         hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
1534         memory_region_unregister_iommu_notifier(vdev->dma_as->root,
1535                                                 &hdev->n);
1536     }
1537     vhost_log_put(hdev, true);
1538     hdev->started = false;
1539     hdev->vdev = NULL;
1540 }
1541 
1542 int vhost_net_set_backend(struct vhost_dev *hdev,
1543                           struct vhost_vring_file *file)
1544 {
1545     if (hdev->vhost_ops->vhost_net_set_backend) {
1546         return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
1547     }
1548 
1549     return -1;
1550 }
1551