xref: /openbmc/qemu/hw/virtio/vhost.c (revision bc5c4f21)
1 /*
2  * vhost support
3  *
4  * Copyright Red Hat, Inc. 2010
5  *
6  * Authors:
7  *  Michael S. Tsirkin <mst@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15 
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "hw/virtio/vhost.h"
19 #include "hw/hw.h"
20 #include "qemu/atomic.h"
21 #include "qemu/range.h"
22 #include "qemu/error-report.h"
23 #include "qemu/memfd.h"
24 #include <linux/vhost.h>
25 #include "exec/address-spaces.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "migration/migration.h"
29 #include "sysemu/dma.h"
30 
31 /* enabled until disconnected backend stabilizes */
32 #define _VHOST_DEBUG 1
33 
34 #ifdef _VHOST_DEBUG
35 #define VHOST_OPS_DEBUG(fmt, ...) \
36     do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
37                       strerror(errno), errno); } while (0)
38 #else
39 #define VHOST_OPS_DEBUG(fmt, ...) \
40     do { } while (0)
41 #endif
42 
43 static struct vhost_log *vhost_log;
44 static struct vhost_log *vhost_log_shm;
45 
46 static unsigned int used_memslots;
47 static QLIST_HEAD(, vhost_dev) vhost_devices =
48     QLIST_HEAD_INITIALIZER(vhost_devices);
49 
50 bool vhost_has_free_slot(void)
51 {
52     unsigned int slots_limit = ~0U;
53     struct vhost_dev *hdev;
54 
55     QLIST_FOREACH(hdev, &vhost_devices, entry) {
56         unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
57         slots_limit = MIN(slots_limit, r);
58     }
59     return slots_limit > used_memslots;
60 }
61 
62 static void vhost_dev_sync_region(struct vhost_dev *dev,
63                                   MemoryRegionSection *section,
64                                   uint64_t mfirst, uint64_t mlast,
65                                   uint64_t rfirst, uint64_t rlast)
66 {
67     vhost_log_chunk_t *log = dev->log->log;
68 
69     uint64_t start = MAX(mfirst, rfirst);
70     uint64_t end = MIN(mlast, rlast);
71     vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
72     vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
73     uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
74 
75     if (end < start) {
76         return;
77     }
78     assert(end / VHOST_LOG_CHUNK < dev->log_size);
79     assert(start / VHOST_LOG_CHUNK < dev->log_size);
80 
81     for (;from < to; ++from) {
82         vhost_log_chunk_t log;
83         /* We first check with non-atomic: much cheaper,
84          * and we expect non-dirty to be the common case. */
85         if (!*from) {
86             addr += VHOST_LOG_CHUNK;
87             continue;
88         }
89         /* Data must be read atomically. We don't really need barrier semantics
90          * but it's easier to use atomic_* than roll our own. */
91         log = atomic_xchg(from, 0);
92         while (log) {
93             int bit = ctzl(log);
94             hwaddr page_addr;
95             hwaddr section_offset;
96             hwaddr mr_offset;
97             page_addr = addr + bit * VHOST_LOG_PAGE;
98             section_offset = page_addr - section->offset_within_address_space;
99             mr_offset = section_offset + section->offset_within_region;
100             memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
101             log &= ~(0x1ull << bit);
102         }
103         addr += VHOST_LOG_CHUNK;
104     }
105 }
106 
107 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
108                                    MemoryRegionSection *section,
109                                    hwaddr first,
110                                    hwaddr last)
111 {
112     int i;
113     hwaddr start_addr;
114     hwaddr end_addr;
115 
116     if (!dev->log_enabled || !dev->started) {
117         return 0;
118     }
119     start_addr = section->offset_within_address_space;
120     end_addr = range_get_last(start_addr, int128_get64(section->size));
121     start_addr = MAX(first, start_addr);
122     end_addr = MIN(last, end_addr);
123 
124     for (i = 0; i < dev->mem->nregions; ++i) {
125         struct vhost_memory_region *reg = dev->mem->regions + i;
126         vhost_dev_sync_region(dev, section, start_addr, end_addr,
127                               reg->guest_phys_addr,
128                               range_get_last(reg->guest_phys_addr,
129                                              reg->memory_size));
130     }
131     for (i = 0; i < dev->nvqs; ++i) {
132         struct vhost_virtqueue *vq = dev->vqs + i;
133         vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
134                               range_get_last(vq->used_phys, vq->used_size));
135     }
136     return 0;
137 }
138 
139 static void vhost_log_sync(MemoryListener *listener,
140                           MemoryRegionSection *section)
141 {
142     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
143                                          memory_listener);
144     vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
145 }
146 
147 static void vhost_log_sync_range(struct vhost_dev *dev,
148                                  hwaddr first, hwaddr last)
149 {
150     int i;
151     /* FIXME: this is N^2 in number of sections */
152     for (i = 0; i < dev->n_mem_sections; ++i) {
153         MemoryRegionSection *section = &dev->mem_sections[i];
154         vhost_sync_dirty_bitmap(dev, section, first, last);
155     }
156 }
157 
158 /* Assign/unassign. Keep an unsorted array of non-overlapping
159  * memory regions in dev->mem. */
160 static void vhost_dev_unassign_memory(struct vhost_dev *dev,
161                                       uint64_t start_addr,
162                                       uint64_t size)
163 {
164     int from, to, n = dev->mem->nregions;
165     /* Track overlapping/split regions for sanity checking. */
166     int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
167 
168     for (from = 0, to = 0; from < n; ++from, ++to) {
169         struct vhost_memory_region *reg = dev->mem->regions + to;
170         uint64_t reglast;
171         uint64_t memlast;
172         uint64_t change;
173 
174         /* clone old region */
175         if (to != from) {
176             memcpy(reg, dev->mem->regions + from, sizeof *reg);
177         }
178 
179         /* No overlap is simple */
180         if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
181                             start_addr, size)) {
182             continue;
183         }
184 
185         /* Split only happens if supplied region
186          * is in the middle of an existing one. Thus it can not
187          * overlap with any other existing region. */
188         assert(!split);
189 
190         reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
191         memlast = range_get_last(start_addr, size);
192 
193         /* Remove whole region */
194         if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
195             --dev->mem->nregions;
196             --to;
197             ++overlap_middle;
198             continue;
199         }
200 
201         /* Shrink region */
202         if (memlast >= reglast) {
203             reg->memory_size = start_addr - reg->guest_phys_addr;
204             assert(reg->memory_size);
205             assert(!overlap_end);
206             ++overlap_end;
207             continue;
208         }
209 
210         /* Shift region */
211         if (start_addr <= reg->guest_phys_addr) {
212             change = memlast + 1 - reg->guest_phys_addr;
213             reg->memory_size -= change;
214             reg->guest_phys_addr += change;
215             reg->userspace_addr += change;
216             assert(reg->memory_size);
217             assert(!overlap_start);
218             ++overlap_start;
219             continue;
220         }
221 
222         /* This only happens if supplied region
223          * is in the middle of an existing one. Thus it can not
224          * overlap with any other existing region. */
225         assert(!overlap_start);
226         assert(!overlap_end);
227         assert(!overlap_middle);
228         /* Split region: shrink first part, shift second part. */
229         memcpy(dev->mem->regions + n, reg, sizeof *reg);
230         reg->memory_size = start_addr - reg->guest_phys_addr;
231         assert(reg->memory_size);
232         change = memlast + 1 - reg->guest_phys_addr;
233         reg = dev->mem->regions + n;
234         reg->memory_size -= change;
235         assert(reg->memory_size);
236         reg->guest_phys_addr += change;
237         reg->userspace_addr += change;
238         /* Never add more than 1 region */
239         assert(dev->mem->nregions == n);
240         ++dev->mem->nregions;
241         ++split;
242     }
243 }
244 
245 /* Called after unassign, so no regions overlap the given range. */
246 static void vhost_dev_assign_memory(struct vhost_dev *dev,
247                                     uint64_t start_addr,
248                                     uint64_t size,
249                                     uint64_t uaddr)
250 {
251     int from, to;
252     struct vhost_memory_region *merged = NULL;
253     for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
254         struct vhost_memory_region *reg = dev->mem->regions + to;
255         uint64_t prlast, urlast;
256         uint64_t pmlast, umlast;
257         uint64_t s, e, u;
258 
259         /* clone old region */
260         if (to != from) {
261             memcpy(reg, dev->mem->regions + from, sizeof *reg);
262         }
263         prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
264         pmlast = range_get_last(start_addr, size);
265         urlast = range_get_last(reg->userspace_addr, reg->memory_size);
266         umlast = range_get_last(uaddr, size);
267 
268         /* check for overlapping regions: should never happen. */
269         assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
270         /* Not an adjacent or overlapping region - do not merge. */
271         if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
272             (pmlast + 1 != reg->guest_phys_addr ||
273              umlast + 1 != reg->userspace_addr)) {
274             continue;
275         }
276 
277         if (dev->vhost_ops->vhost_backend_can_merge &&
278             !dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size,
279                                                      reg->userspace_addr,
280                                                      reg->memory_size)) {
281             continue;
282         }
283 
284         if (merged) {
285             --to;
286             assert(to >= 0);
287         } else {
288             merged = reg;
289         }
290         u = MIN(uaddr, reg->userspace_addr);
291         s = MIN(start_addr, reg->guest_phys_addr);
292         e = MAX(pmlast, prlast);
293         uaddr = merged->userspace_addr = u;
294         start_addr = merged->guest_phys_addr = s;
295         size = merged->memory_size = e - s + 1;
296         assert(merged->memory_size);
297     }
298 
299     if (!merged) {
300         struct vhost_memory_region *reg = dev->mem->regions + to;
301         memset(reg, 0, sizeof *reg);
302         reg->memory_size = size;
303         assert(reg->memory_size);
304         reg->guest_phys_addr = start_addr;
305         reg->userspace_addr = uaddr;
306         ++to;
307     }
308     assert(to <= dev->mem->nregions + 1);
309     dev->mem->nregions = to;
310 }
311 
312 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
313 {
314     uint64_t log_size = 0;
315     int i;
316     for (i = 0; i < dev->mem->nregions; ++i) {
317         struct vhost_memory_region *reg = dev->mem->regions + i;
318         uint64_t last = range_get_last(reg->guest_phys_addr,
319                                        reg->memory_size);
320         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
321     }
322     for (i = 0; i < dev->nvqs; ++i) {
323         struct vhost_virtqueue *vq = dev->vqs + i;
324         uint64_t last = vq->used_phys + vq->used_size - 1;
325         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
326     }
327     return log_size;
328 }
329 
330 static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
331 {
332     struct vhost_log *log;
333     uint64_t logsize = size * sizeof(*(log->log));
334     int fd = -1;
335 
336     log = g_new0(struct vhost_log, 1);
337     if (share) {
338         log->log = qemu_memfd_alloc("vhost-log", logsize,
339                                     F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
340                                     &fd);
341         memset(log->log, 0, logsize);
342     } else {
343         log->log = g_malloc0(logsize);
344     }
345 
346     log->size = size;
347     log->refcnt = 1;
348     log->fd = fd;
349 
350     return log;
351 }
352 
353 static struct vhost_log *vhost_log_get(uint64_t size, bool share)
354 {
355     struct vhost_log *log = share ? vhost_log_shm : vhost_log;
356 
357     if (!log || log->size != size) {
358         log = vhost_log_alloc(size, share);
359         if (share) {
360             vhost_log_shm = log;
361         } else {
362             vhost_log = log;
363         }
364     } else {
365         ++log->refcnt;
366     }
367 
368     return log;
369 }
370 
371 static void vhost_log_put(struct vhost_dev *dev, bool sync)
372 {
373     struct vhost_log *log = dev->log;
374 
375     if (!log) {
376         return;
377     }
378     dev->log = NULL;
379     dev->log_size = 0;
380 
381     --log->refcnt;
382     if (log->refcnt == 0) {
383         /* Sync only the range covered by the old log */
384         if (dev->log_size && sync) {
385             vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
386         }
387 
388         if (vhost_log == log) {
389             g_free(log->log);
390             vhost_log = NULL;
391         } else if (vhost_log_shm == log) {
392             qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
393                             log->fd);
394             vhost_log_shm = NULL;
395         }
396 
397         g_free(log);
398     }
399 }
400 
401 static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
402 {
403     return dev->vhost_ops->vhost_requires_shm_log &&
404            dev->vhost_ops->vhost_requires_shm_log(dev);
405 }
406 
407 static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
408 {
409     struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
410     uint64_t log_base = (uintptr_t)log->log;
411     int r;
412 
413     /* inform backend of log switching, this must be done before
414        releasing the current log, to ensure no logging is lost */
415     r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
416     if (r < 0) {
417         VHOST_OPS_DEBUG("vhost_set_log_base failed");
418     }
419 
420     vhost_log_put(dev, true);
421     dev->log = log;
422     dev->log_size = size;
423 }
424 
425 static int vhost_dev_has_iommu(struct vhost_dev *dev)
426 {
427     VirtIODevice *vdev = dev->vdev;
428     AddressSpace *dma_as = vdev->dma_as;
429 
430     return memory_region_is_iommu(dma_as->root) &&
431            virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
432 }
433 
434 static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
435                               hwaddr *plen, int is_write)
436 {
437     if (!vhost_dev_has_iommu(dev)) {
438         return cpu_physical_memory_map(addr, plen, is_write);
439     } else {
440         return (void *)(uintptr_t)addr;
441     }
442 }
443 
444 static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
445                                hwaddr len, int is_write,
446                                hwaddr access_len)
447 {
448     if (!vhost_dev_has_iommu(dev)) {
449         cpu_physical_memory_unmap(buffer, len, is_write, access_len);
450     }
451 }
452 
453 static int vhost_verify_ring_part_mapping(struct vhost_dev *dev,
454                                           void *part,
455                                           uint64_t part_addr,
456                                           uint64_t part_size,
457                                           uint64_t start_addr,
458                                           uint64_t size)
459 {
460     hwaddr l;
461     void *p;
462     int r = 0;
463 
464     if (!ranges_overlap(start_addr, size, part_addr, part_size)) {
465         return 0;
466     }
467     l = part_size;
468     p = vhost_memory_map(dev, part_addr, &l, 1);
469     if (!p || l != part_size) {
470         r = -ENOMEM;
471     }
472     if (p != part) {
473         r = -EBUSY;
474     }
475     vhost_memory_unmap(dev, p, l, 0, 0);
476     return r;
477 }
478 
479 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
480                                       uint64_t start_addr,
481                                       uint64_t size)
482 {
483     int i, j;
484     int r = 0;
485     const char *part_name[] = {
486         "descriptor table",
487         "available ring",
488         "used ring"
489     };
490 
491     for (i = 0; i < dev->nvqs; ++i) {
492         struct vhost_virtqueue *vq = dev->vqs + i;
493 
494         j = 0;
495         r = vhost_verify_ring_part_mapping(dev, vq->desc, vq->desc_phys,
496                                            vq->desc_size, start_addr, size);
497         if (!r) {
498             break;
499         }
500 
501         j++;
502         r = vhost_verify_ring_part_mapping(dev, vq->avail, vq->avail_phys,
503                                            vq->avail_size, start_addr, size);
504         if (!r) {
505             break;
506         }
507 
508         j++;
509         r = vhost_verify_ring_part_mapping(dev, vq->used, vq->used_phys,
510                                            vq->used_size, start_addr, size);
511         if (!r) {
512             break;
513         }
514     }
515 
516     if (r == -ENOMEM) {
517         error_report("Unable to map %s for ring %d", part_name[j], i);
518     } else if (r == -EBUSY) {
519         error_report("%s relocated for ring %d", part_name[j], i);
520     }
521     return r;
522 }
523 
524 static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
525 						      uint64_t start_addr,
526 						      uint64_t size)
527 {
528     int i, n = dev->mem->nregions;
529     for (i = 0; i < n; ++i) {
530         struct vhost_memory_region *reg = dev->mem->regions + i;
531         if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
532                            start_addr, size)) {
533             return reg;
534         }
535     }
536     return NULL;
537 }
538 
539 static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
540                                  uint64_t start_addr,
541                                  uint64_t size,
542                                  uint64_t uaddr)
543 {
544     struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
545     uint64_t reglast;
546     uint64_t memlast;
547 
548     if (!reg) {
549         return true;
550     }
551 
552     reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
553     memlast = range_get_last(start_addr, size);
554 
555     /* Need to extend region? */
556     if (start_addr < reg->guest_phys_addr || memlast > reglast) {
557         return true;
558     }
559     /* userspace_addr changed? */
560     return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
561 }
562 
563 static void vhost_set_memory(MemoryListener *listener,
564                              MemoryRegionSection *section,
565                              bool add)
566 {
567     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
568                                          memory_listener);
569     hwaddr start_addr = section->offset_within_address_space;
570     ram_addr_t size = int128_get64(section->size);
571     bool log_dirty =
572         memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
573     int s = offsetof(struct vhost_memory, regions) +
574         (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
575     void *ram;
576 
577     dev->mem = g_realloc(dev->mem, s);
578 
579     if (log_dirty) {
580         add = false;
581     }
582 
583     assert(size);
584 
585     /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
586     ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
587     if (add) {
588         if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
589             /* Region exists with same address. Nothing to do. */
590             return;
591         }
592     } else {
593         if (!vhost_dev_find_reg(dev, start_addr, size)) {
594             /* Removing region that we don't access. Nothing to do. */
595             return;
596         }
597     }
598 
599     vhost_dev_unassign_memory(dev, start_addr, size);
600     if (add) {
601         /* Add given mapping, merging adjacent regions if any */
602         vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
603     } else {
604         /* Remove old mapping for this memory, if any. */
605         vhost_dev_unassign_memory(dev, start_addr, size);
606     }
607     dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
608     dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
609     dev->memory_changed = true;
610     used_memslots = dev->mem->nregions;
611 }
612 
613 static bool vhost_section(MemoryRegionSection *section)
614 {
615     return memory_region_is_ram(section->mr) &&
616         !memory_region_is_rom(section->mr);
617 }
618 
619 static void vhost_begin(MemoryListener *listener)
620 {
621     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
622                                          memory_listener);
623     dev->mem_changed_end_addr = 0;
624     dev->mem_changed_start_addr = -1;
625 }
626 
627 static void vhost_commit(MemoryListener *listener)
628 {
629     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
630                                          memory_listener);
631     hwaddr start_addr = 0;
632     ram_addr_t size = 0;
633     uint64_t log_size;
634     int r;
635 
636     if (!dev->memory_changed) {
637         return;
638     }
639     if (!dev->started) {
640         return;
641     }
642     if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
643         return;
644     }
645 
646     if (dev->started) {
647         start_addr = dev->mem_changed_start_addr;
648         size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
649 
650         r = vhost_verify_ring_mappings(dev, start_addr, size);
651         assert(r >= 0);
652     }
653 
654     if (!dev->log_enabled) {
655         r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
656         if (r < 0) {
657             VHOST_OPS_DEBUG("vhost_set_mem_table failed");
658         }
659         dev->memory_changed = false;
660         return;
661     }
662     log_size = vhost_get_log_size(dev);
663     /* We allocate an extra 4K bytes to log,
664      * to reduce the * number of reallocations. */
665 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
666     /* To log more, must increase log size before table update. */
667     if (dev->log_size < log_size) {
668         vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
669     }
670     r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
671     if (r < 0) {
672         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
673     }
674     /* To log less, can only decrease log size after table update. */
675     if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
676         vhost_dev_log_resize(dev, log_size);
677     }
678     dev->memory_changed = false;
679 }
680 
681 static void vhost_region_add(MemoryListener *listener,
682                              MemoryRegionSection *section)
683 {
684     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
685                                          memory_listener);
686 
687     if (!vhost_section(section)) {
688         return;
689     }
690 
691     ++dev->n_mem_sections;
692     dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
693                                 dev->n_mem_sections);
694     dev->mem_sections[dev->n_mem_sections - 1] = *section;
695     memory_region_ref(section->mr);
696     vhost_set_memory(listener, section, true);
697 }
698 
699 static void vhost_region_del(MemoryListener *listener,
700                              MemoryRegionSection *section)
701 {
702     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
703                                          memory_listener);
704     int i;
705 
706     if (!vhost_section(section)) {
707         return;
708     }
709 
710     vhost_set_memory(listener, section, false);
711     memory_region_unref(section->mr);
712     for (i = 0; i < dev->n_mem_sections; ++i) {
713         if (dev->mem_sections[i].offset_within_address_space
714             == section->offset_within_address_space) {
715             --dev->n_mem_sections;
716             memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
717                     (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
718             break;
719         }
720     }
721 }
722 
723 static void vhost_region_nop(MemoryListener *listener,
724                              MemoryRegionSection *section)
725 {
726 }
727 
728 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
729                                     struct vhost_virtqueue *vq,
730                                     unsigned idx, bool enable_log)
731 {
732     struct vhost_vring_addr addr = {
733         .index = idx,
734         .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
735         .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
736         .used_user_addr = (uint64_t)(unsigned long)vq->used,
737         .log_guest_addr = vq->used_phys,
738         .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
739     };
740     int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
741     if (r < 0) {
742         VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
743         return -errno;
744     }
745     return 0;
746 }
747 
748 static int vhost_dev_set_features(struct vhost_dev *dev,
749                                   bool enable_log)
750 {
751     uint64_t features = dev->acked_features;
752     int r;
753     if (enable_log) {
754         features |= 0x1ULL << VHOST_F_LOG_ALL;
755     }
756     r = dev->vhost_ops->vhost_set_features(dev, features);
757     if (r < 0) {
758         VHOST_OPS_DEBUG("vhost_set_features failed");
759     }
760     return r < 0 ? -errno : 0;
761 }
762 
763 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
764 {
765     int r, i, idx;
766     r = vhost_dev_set_features(dev, enable_log);
767     if (r < 0) {
768         goto err_features;
769     }
770     for (i = 0; i < dev->nvqs; ++i) {
771         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
772         r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
773                                      enable_log);
774         if (r < 0) {
775             goto err_vq;
776         }
777     }
778     return 0;
779 err_vq:
780     for (; i >= 0; --i) {
781         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
782         vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
783                                  dev->log_enabled);
784     }
785     vhost_dev_set_features(dev, dev->log_enabled);
786 err_features:
787     return r;
788 }
789 
790 static int vhost_migration_log(MemoryListener *listener, int enable)
791 {
792     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
793                                          memory_listener);
794     int r;
795     if (!!enable == dev->log_enabled) {
796         return 0;
797     }
798     if (!dev->started) {
799         dev->log_enabled = enable;
800         return 0;
801     }
802     if (!enable) {
803         r = vhost_dev_set_log(dev, false);
804         if (r < 0) {
805             return r;
806         }
807         vhost_log_put(dev, false);
808     } else {
809         vhost_dev_log_resize(dev, vhost_get_log_size(dev));
810         r = vhost_dev_set_log(dev, true);
811         if (r < 0) {
812             return r;
813         }
814     }
815     dev->log_enabled = enable;
816     return 0;
817 }
818 
819 static void vhost_log_global_start(MemoryListener *listener)
820 {
821     int r;
822 
823     r = vhost_migration_log(listener, true);
824     if (r < 0) {
825         abort();
826     }
827 }
828 
829 static void vhost_log_global_stop(MemoryListener *listener)
830 {
831     int r;
832 
833     r = vhost_migration_log(listener, false);
834     if (r < 0) {
835         abort();
836     }
837 }
838 
839 static void vhost_log_start(MemoryListener *listener,
840                             MemoryRegionSection *section,
841                             int old, int new)
842 {
843     /* FIXME: implement */
844 }
845 
846 static void vhost_log_stop(MemoryListener *listener,
847                            MemoryRegionSection *section,
848                            int old, int new)
849 {
850     /* FIXME: implement */
851 }
852 
853 /* The vhost driver natively knows how to handle the vrings of non
854  * cross-endian legacy devices and modern devices. Only legacy devices
855  * exposed to a bi-endian guest may require the vhost driver to use a
856  * specific endianness.
857  */
858 static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
859 {
860     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
861         return false;
862     }
863 #ifdef HOST_WORDS_BIGENDIAN
864     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
865 #else
866     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
867 #endif
868 }
869 
870 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
871                                                    bool is_big_endian,
872                                                    int vhost_vq_index)
873 {
874     struct vhost_vring_state s = {
875         .index = vhost_vq_index,
876         .num = is_big_endian
877     };
878 
879     if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
880         return 0;
881     }
882 
883     VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
884     if (errno == ENOTTY) {
885         error_report("vhost does not support cross-endian");
886         return -ENOSYS;
887     }
888 
889     return -errno;
890 }
891 
892 static int vhost_memory_region_lookup(struct vhost_dev *hdev,
893                                       uint64_t gpa, uint64_t *uaddr,
894                                       uint64_t *len)
895 {
896     int i;
897 
898     for (i = 0; i < hdev->mem->nregions; i++) {
899         struct vhost_memory_region *reg = hdev->mem->regions + i;
900 
901         if (gpa >= reg->guest_phys_addr &&
902             reg->guest_phys_addr + reg->memory_size > gpa) {
903             *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
904             *len = reg->guest_phys_addr + reg->memory_size - gpa;
905             return 0;
906         }
907     }
908 
909     return -EFAULT;
910 }
911 
912 void vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
913 {
914     IOMMUTLBEntry iotlb;
915     uint64_t uaddr, len;
916 
917     rcu_read_lock();
918 
919     iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
920                                           iova, write);
921     if (iotlb.target_as != NULL) {
922         if (vhost_memory_region_lookup(dev, iotlb.translated_addr,
923                                        &uaddr, &len)) {
924             error_report("Fail to lookup the translated address "
925                          "%"PRIx64, iotlb.translated_addr);
926             goto out;
927         }
928 
929         len = MIN(iotlb.addr_mask + 1, len);
930         iova = iova & ~iotlb.addr_mask;
931 
932         if (dev->vhost_ops->vhost_update_device_iotlb(dev, iova, uaddr,
933                                                       len, iotlb.perm)) {
934             error_report("Fail to update device iotlb");
935             goto out;
936         }
937     }
938 out:
939     rcu_read_unlock();
940 }
941 
942 static int vhost_virtqueue_start(struct vhost_dev *dev,
943                                 struct VirtIODevice *vdev,
944                                 struct vhost_virtqueue *vq,
945                                 unsigned idx)
946 {
947     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
948     VirtioBusState *vbus = VIRTIO_BUS(qbus);
949     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
950     hwaddr s, l, a;
951     int r;
952     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
953     struct vhost_vring_file file = {
954         .index = vhost_vq_index
955     };
956     struct vhost_vring_state state = {
957         .index = vhost_vq_index
958     };
959     struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
960 
961 
962     vq->num = state.num = virtio_queue_get_num(vdev, idx);
963     r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
964     if (r) {
965         VHOST_OPS_DEBUG("vhost_set_vring_num failed");
966         return -errno;
967     }
968 
969     state.num = virtio_queue_get_last_avail_idx(vdev, idx);
970     r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
971     if (r) {
972         VHOST_OPS_DEBUG("vhost_set_vring_base failed");
973         return -errno;
974     }
975 
976     if (vhost_needs_vring_endian(vdev)) {
977         r = vhost_virtqueue_set_vring_endian_legacy(dev,
978                                                     virtio_is_big_endian(vdev),
979                                                     vhost_vq_index);
980         if (r) {
981             return -errno;
982         }
983     }
984 
985     vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
986     vq->desc_phys = a = virtio_queue_get_desc_addr(vdev, idx);
987     vq->desc = vhost_memory_map(dev, a, &l, 0);
988     if (!vq->desc || l != s) {
989         r = -ENOMEM;
990         goto fail_alloc_desc;
991     }
992     vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
993     vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
994     vq->avail = vhost_memory_map(dev, a, &l, 0);
995     if (!vq->avail || l != s) {
996         r = -ENOMEM;
997         goto fail_alloc_avail;
998     }
999     vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
1000     vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
1001     vq->used = vhost_memory_map(dev, a, &l, 1);
1002     if (!vq->used || l != s) {
1003         r = -ENOMEM;
1004         goto fail_alloc_used;
1005     }
1006 
1007     r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
1008     if (r < 0) {
1009         r = -errno;
1010         goto fail_alloc;
1011     }
1012 
1013     file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
1014     r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
1015     if (r) {
1016         VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
1017         r = -errno;
1018         goto fail_kick;
1019     }
1020 
1021     /* Clear and discard previous events if any. */
1022     event_notifier_test_and_clear(&vq->masked_notifier);
1023 
1024     /* Init vring in unmasked state, unless guest_notifier_mask
1025      * will do it later.
1026      */
1027     if (!vdev->use_guest_notifier_mask) {
1028         /* TODO: check and handle errors. */
1029         vhost_virtqueue_mask(dev, vdev, idx, false);
1030     }
1031 
1032     if (k->query_guest_notifiers &&
1033         k->query_guest_notifiers(qbus->parent) &&
1034         virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
1035         file.fd = -1;
1036         r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1037         if (r) {
1038             goto fail_vector;
1039         }
1040     }
1041 
1042     return 0;
1043 
1044 fail_vector:
1045 fail_kick:
1046 fail_alloc:
1047     vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1048                        0, 0);
1049 fail_alloc_used:
1050     vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1051                        0, 0);
1052 fail_alloc_avail:
1053     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1054                        0, 0);
1055 fail_alloc_desc:
1056     return r;
1057 }
1058 
1059 static void vhost_virtqueue_stop(struct vhost_dev *dev,
1060                                     struct VirtIODevice *vdev,
1061                                     struct vhost_virtqueue *vq,
1062                                     unsigned idx)
1063 {
1064     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1065     struct vhost_vring_state state = {
1066         .index = vhost_vq_index,
1067     };
1068     int r;
1069 
1070     r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
1071     if (r < 0) {
1072         VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r);
1073     } else {
1074         virtio_queue_set_last_avail_idx(vdev, idx, state.num);
1075     }
1076     virtio_queue_invalidate_signalled_used(vdev, idx);
1077     virtio_queue_update_used_idx(vdev, idx);
1078 
1079     /* In the cross-endian case, we need to reset the vring endianness to
1080      * native as legacy devices expect so by default.
1081      */
1082     if (vhost_needs_vring_endian(vdev)) {
1083         vhost_virtqueue_set_vring_endian_legacy(dev,
1084                                                 !virtio_is_big_endian(vdev),
1085                                                 vhost_vq_index);
1086     }
1087 
1088     vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1089                        1, virtio_queue_get_used_size(vdev, idx));
1090     vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1091                        0, virtio_queue_get_avail_size(vdev, idx));
1092     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1093                        0, virtio_queue_get_desc_size(vdev, idx));
1094 }
1095 
1096 static void vhost_eventfd_add(MemoryListener *listener,
1097                               MemoryRegionSection *section,
1098                               bool match_data, uint64_t data, EventNotifier *e)
1099 {
1100 }
1101 
1102 static void vhost_eventfd_del(MemoryListener *listener,
1103                               MemoryRegionSection *section,
1104                               bool match_data, uint64_t data, EventNotifier *e)
1105 {
1106 }
1107 
1108 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1109                                                 int n, uint32_t timeout)
1110 {
1111     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1112     struct vhost_vring_state state = {
1113         .index = vhost_vq_index,
1114         .num = timeout,
1115     };
1116     int r;
1117 
1118     if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1119         return -EINVAL;
1120     }
1121 
1122     r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1123     if (r) {
1124         VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
1125         return r;
1126     }
1127 
1128     return 0;
1129 }
1130 
1131 static int vhost_virtqueue_init(struct vhost_dev *dev,
1132                                 struct vhost_virtqueue *vq, int n)
1133 {
1134     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1135     struct vhost_vring_file file = {
1136         .index = vhost_vq_index,
1137     };
1138     int r = event_notifier_init(&vq->masked_notifier, 0);
1139     if (r < 0) {
1140         return r;
1141     }
1142 
1143     file.fd = event_notifier_get_fd(&vq->masked_notifier);
1144     r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1145     if (r) {
1146         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1147         r = -errno;
1148         goto fail_call;
1149     }
1150 
1151     vq->dev = dev;
1152 
1153     return 0;
1154 fail_call:
1155     event_notifier_cleanup(&vq->masked_notifier);
1156     return r;
1157 }
1158 
1159 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1160 {
1161     event_notifier_cleanup(&vq->masked_notifier);
1162 }
1163 
1164 static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
1165 {
1166     struct vhost_dev *hdev = container_of(n, struct vhost_dev, n);
1167 
1168     if (hdev->vhost_ops->vhost_invalidate_device_iotlb(hdev,
1169                                                        iotlb->iova,
1170                                                        iotlb->addr_mask + 1)) {
1171         error_report("Fail to invalidate device iotlb");
1172     }
1173 }
1174 
1175 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1176                    VhostBackendType backend_type, uint32_t busyloop_timeout)
1177 {
1178     uint64_t features;
1179     int i, r, n_initialized_vqs = 0;
1180     Error *local_err = NULL;
1181 
1182     hdev->vdev = NULL;
1183     hdev->migration_blocker = NULL;
1184 
1185     r = vhost_set_backend_type(hdev, backend_type);
1186     assert(r >= 0);
1187 
1188     r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1189     if (r < 0) {
1190         goto fail;
1191     }
1192 
1193     if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1194         error_report("vhost backend memory slots limit is less"
1195                 " than current number of present memory slots");
1196         r = -1;
1197         goto fail;
1198     }
1199 
1200     r = hdev->vhost_ops->vhost_set_owner(hdev);
1201     if (r < 0) {
1202         VHOST_OPS_DEBUG("vhost_set_owner failed");
1203         goto fail;
1204     }
1205 
1206     r = hdev->vhost_ops->vhost_get_features(hdev, &features);
1207     if (r < 0) {
1208         VHOST_OPS_DEBUG("vhost_get_features failed");
1209         goto fail;
1210     }
1211 
1212     for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1213         r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1214         if (r < 0) {
1215             goto fail;
1216         }
1217     }
1218 
1219     if (busyloop_timeout) {
1220         for (i = 0; i < hdev->nvqs; ++i) {
1221             r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1222                                                      busyloop_timeout);
1223             if (r < 0) {
1224                 goto fail_busyloop;
1225             }
1226         }
1227     }
1228 
1229     hdev->features = features;
1230 
1231     hdev->memory_listener = (MemoryListener) {
1232         .begin = vhost_begin,
1233         .commit = vhost_commit,
1234         .region_add = vhost_region_add,
1235         .region_del = vhost_region_del,
1236         .region_nop = vhost_region_nop,
1237         .log_start = vhost_log_start,
1238         .log_stop = vhost_log_stop,
1239         .log_sync = vhost_log_sync,
1240         .log_global_start = vhost_log_global_start,
1241         .log_global_stop = vhost_log_global_stop,
1242         .eventfd_add = vhost_eventfd_add,
1243         .eventfd_del = vhost_eventfd_del,
1244         .priority = 10
1245     };
1246 
1247     hdev->n.notify = vhost_iommu_unmap_notify;
1248     hdev->n.notifier_flags = IOMMU_NOTIFIER_UNMAP;
1249 
1250     if (hdev->migration_blocker == NULL) {
1251         if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1252             error_setg(&hdev->migration_blocker,
1253                        "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1254         } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_check()) {
1255             error_setg(&hdev->migration_blocker,
1256                        "Migration disabled: failed to allocate shared memory");
1257         }
1258     }
1259 
1260     if (hdev->migration_blocker != NULL) {
1261         r = migrate_add_blocker(hdev->migration_blocker, &local_err);
1262         if (local_err) {
1263             error_report_err(local_err);
1264             error_free(hdev->migration_blocker);
1265             goto fail_busyloop;
1266         }
1267     }
1268 
1269     hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1270     hdev->n_mem_sections = 0;
1271     hdev->mem_sections = NULL;
1272     hdev->log = NULL;
1273     hdev->log_size = 0;
1274     hdev->log_enabled = false;
1275     hdev->started = false;
1276     hdev->memory_changed = false;
1277     memory_listener_register(&hdev->memory_listener, &address_space_memory);
1278     QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1279     return 0;
1280 
1281 fail_busyloop:
1282     while (--i >= 0) {
1283         vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1284     }
1285 fail:
1286     hdev->nvqs = n_initialized_vqs;
1287     vhost_dev_cleanup(hdev);
1288     return r;
1289 }
1290 
1291 void vhost_dev_cleanup(struct vhost_dev *hdev)
1292 {
1293     int i;
1294 
1295     for (i = 0; i < hdev->nvqs; ++i) {
1296         vhost_virtqueue_cleanup(hdev->vqs + i);
1297     }
1298     if (hdev->mem) {
1299         /* those are only safe after successful init */
1300         memory_listener_unregister(&hdev->memory_listener);
1301         QLIST_REMOVE(hdev, entry);
1302     }
1303     if (hdev->migration_blocker) {
1304         migrate_del_blocker(hdev->migration_blocker);
1305         error_free(hdev->migration_blocker);
1306     }
1307     g_free(hdev->mem);
1308     g_free(hdev->mem_sections);
1309     if (hdev->vhost_ops) {
1310         hdev->vhost_ops->vhost_backend_cleanup(hdev);
1311     }
1312     assert(!hdev->log);
1313 
1314     memset(hdev, 0, sizeof(struct vhost_dev));
1315 }
1316 
1317 /* Stop processing guest IO notifications in qemu.
1318  * Start processing them in vhost in kernel.
1319  */
1320 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1321 {
1322     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1323     int i, r, e;
1324 
1325     /* We will pass the notifiers to the kernel, make sure that QEMU
1326      * doesn't interfere.
1327      */
1328     r = virtio_device_grab_ioeventfd(vdev);
1329     if (r < 0) {
1330         error_report("binding does not support host notifiers");
1331         goto fail;
1332     }
1333 
1334     for (i = 0; i < hdev->nvqs; ++i) {
1335         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1336                                          true);
1337         if (r < 0) {
1338             error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1339             goto fail_vq;
1340         }
1341     }
1342 
1343     return 0;
1344 fail_vq:
1345     while (--i >= 0) {
1346         e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1347                                          false);
1348         if (e < 0) {
1349             error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1350         }
1351         assert (e >= 0);
1352     }
1353     virtio_device_release_ioeventfd(vdev);
1354 fail:
1355     return r;
1356 }
1357 
1358 /* Stop processing guest IO notifications in vhost.
1359  * Start processing them in qemu.
1360  * This might actually run the qemu handlers right away,
1361  * so virtio in qemu must be completely setup when this is called.
1362  */
1363 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1364 {
1365     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1366     int i, r;
1367 
1368     for (i = 0; i < hdev->nvqs; ++i) {
1369         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1370                                          false);
1371         if (r < 0) {
1372             error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1373         }
1374         assert (r >= 0);
1375     }
1376     virtio_device_release_ioeventfd(vdev);
1377 }
1378 
1379 /* Test and clear event pending status.
1380  * Should be called after unmask to avoid losing events.
1381  */
1382 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1383 {
1384     struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1385     assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1386     return event_notifier_test_and_clear(&vq->masked_notifier);
1387 }
1388 
1389 /* Mask/unmask events from this vq. */
1390 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1391                          bool mask)
1392 {
1393     struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1394     int r, index = n - hdev->vq_index;
1395     struct vhost_vring_file file;
1396 
1397     /* should only be called after backend is connected */
1398     assert(hdev->vhost_ops);
1399 
1400     if (mask) {
1401         assert(vdev->use_guest_notifier_mask);
1402         file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1403     } else {
1404         file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1405     }
1406 
1407     file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1408     r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1409     if (r < 0) {
1410         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1411     }
1412 }
1413 
1414 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1415                             uint64_t features)
1416 {
1417     const int *bit = feature_bits;
1418     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1419         uint64_t bit_mask = (1ULL << *bit);
1420         if (!(hdev->features & bit_mask)) {
1421             features &= ~bit_mask;
1422         }
1423         bit++;
1424     }
1425     return features;
1426 }
1427 
1428 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1429                         uint64_t features)
1430 {
1431     const int *bit = feature_bits;
1432     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1433         uint64_t bit_mask = (1ULL << *bit);
1434         if (features & bit_mask) {
1435             hdev->acked_features |= bit_mask;
1436         }
1437         bit++;
1438     }
1439 }
1440 
1441 /* Host notifiers must be enabled at this point. */
1442 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1443 {
1444     int i, r;
1445 
1446     /* should only be called after backend is connected */
1447     assert(hdev->vhost_ops);
1448 
1449     hdev->started = true;
1450     hdev->vdev = vdev;
1451 
1452     r = vhost_dev_set_features(hdev, hdev->log_enabled);
1453     if (r < 0) {
1454         goto fail_features;
1455     }
1456 
1457     if (vhost_dev_has_iommu(hdev)) {
1458         memory_region_register_iommu_notifier(vdev->dma_as->root,
1459                                               &hdev->n);
1460     }
1461 
1462     r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
1463     if (r < 0) {
1464         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
1465         r = -errno;
1466         goto fail_mem;
1467     }
1468     for (i = 0; i < hdev->nvqs; ++i) {
1469         r = vhost_virtqueue_start(hdev,
1470                                   vdev,
1471                                   hdev->vqs + i,
1472                                   hdev->vq_index + i);
1473         if (r < 0) {
1474             goto fail_vq;
1475         }
1476     }
1477 
1478     if (hdev->log_enabled) {
1479         uint64_t log_base;
1480 
1481         hdev->log_size = vhost_get_log_size(hdev);
1482         hdev->log = vhost_log_get(hdev->log_size,
1483                                   vhost_dev_log_is_shared(hdev));
1484         log_base = (uintptr_t)hdev->log->log;
1485         r = hdev->vhost_ops->vhost_set_log_base(hdev,
1486                                                 hdev->log_size ? log_base : 0,
1487                                                 hdev->log);
1488         if (r < 0) {
1489             VHOST_OPS_DEBUG("vhost_set_log_base failed");
1490             r = -errno;
1491             goto fail_log;
1492         }
1493     }
1494 
1495     if (vhost_dev_has_iommu(hdev)) {
1496         hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);
1497 
1498         /* Update used ring information for IOTLB to work correctly,
1499          * vhost-kernel code requires for this.*/
1500         for (i = 0; i < hdev->nvqs; ++i) {
1501             struct vhost_virtqueue *vq = hdev->vqs + i;
1502             vhost_device_iotlb_miss(hdev, vq->used_phys, true);
1503         }
1504     }
1505     return 0;
1506 fail_log:
1507     vhost_log_put(hdev, false);
1508 fail_vq:
1509     while (--i >= 0) {
1510         vhost_virtqueue_stop(hdev,
1511                              vdev,
1512                              hdev->vqs + i,
1513                              hdev->vq_index + i);
1514     }
1515     i = hdev->nvqs;
1516 
1517 fail_mem:
1518 fail_features:
1519 
1520     hdev->started = false;
1521     return r;
1522 }
1523 
1524 /* Host notifiers must be enabled at this point. */
1525 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1526 {
1527     int i;
1528 
1529     /* should only be called after backend is connected */
1530     assert(hdev->vhost_ops);
1531 
1532     for (i = 0; i < hdev->nvqs; ++i) {
1533         vhost_virtqueue_stop(hdev,
1534                              vdev,
1535                              hdev->vqs + i,
1536                              hdev->vq_index + i);
1537     }
1538 
1539     if (vhost_dev_has_iommu(hdev)) {
1540         hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
1541         memory_region_unregister_iommu_notifier(vdev->dma_as->root,
1542                                                 &hdev->n);
1543     }
1544     vhost_log_put(hdev, true);
1545     hdev->started = false;
1546     hdev->vdev = NULL;
1547 }
1548 
1549 int vhost_net_set_backend(struct vhost_dev *hdev,
1550                           struct vhost_vring_file *file)
1551 {
1552     if (hdev->vhost_ops->vhost_net_set_backend) {
1553         return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
1554     }
1555 
1556     return -1;
1557 }
1558