xref: /openbmc/qemu/hw/virtio/vhost.c (revision 795c40b8)
1 /*
2  * vhost support
3  *
4  * Copyright Red Hat, Inc. 2010
5  *
6  * Authors:
7  *  Michael S. Tsirkin <mst@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15 
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "hw/virtio/vhost.h"
19 #include "hw/hw.h"
20 #include "qemu/atomic.h"
21 #include "qemu/range.h"
22 #include "qemu/error-report.h"
23 #include "qemu/memfd.h"
24 #include <linux/vhost.h>
25 #include "exec/address-spaces.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "migration/blocker.h"
29 #include "sysemu/dma.h"
30 
31 /* enabled until disconnected backend stabilizes */
32 #define _VHOST_DEBUG 1
33 
34 #ifdef _VHOST_DEBUG
35 #define VHOST_OPS_DEBUG(fmt, ...) \
36     do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
37                       strerror(errno), errno); } while (0)
38 #else
39 #define VHOST_OPS_DEBUG(fmt, ...) \
40     do { } while (0)
41 #endif
42 
43 static struct vhost_log *vhost_log;
44 static struct vhost_log *vhost_log_shm;
45 
46 static unsigned int used_memslots;
47 static QLIST_HEAD(, vhost_dev) vhost_devices =
48     QLIST_HEAD_INITIALIZER(vhost_devices);
49 
50 bool vhost_has_free_slot(void)
51 {
52     unsigned int slots_limit = ~0U;
53     struct vhost_dev *hdev;
54 
55     QLIST_FOREACH(hdev, &vhost_devices, entry) {
56         unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
57         slots_limit = MIN(slots_limit, r);
58     }
59     return slots_limit > used_memslots;
60 }
61 
62 static void vhost_dev_sync_region(struct vhost_dev *dev,
63                                   MemoryRegionSection *section,
64                                   uint64_t mfirst, uint64_t mlast,
65                                   uint64_t rfirst, uint64_t rlast)
66 {
67     vhost_log_chunk_t *log = dev->log->log;
68 
69     uint64_t start = MAX(mfirst, rfirst);
70     uint64_t end = MIN(mlast, rlast);
71     vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
72     vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
73     uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
74 
75     if (end < start) {
76         return;
77     }
78     assert(end / VHOST_LOG_CHUNK < dev->log_size);
79     assert(start / VHOST_LOG_CHUNK < dev->log_size);
80 
81     for (;from < to; ++from) {
82         vhost_log_chunk_t log;
83         /* We first check with non-atomic: much cheaper,
84          * and we expect non-dirty to be the common case. */
85         if (!*from) {
86             addr += VHOST_LOG_CHUNK;
87             continue;
88         }
89         /* Data must be read atomically. We don't really need barrier semantics
90          * but it's easier to use atomic_* than roll our own. */
91         log = atomic_xchg(from, 0);
92         while (log) {
93             int bit = ctzl(log);
94             hwaddr page_addr;
95             hwaddr section_offset;
96             hwaddr mr_offset;
97             page_addr = addr + bit * VHOST_LOG_PAGE;
98             section_offset = page_addr - section->offset_within_address_space;
99             mr_offset = section_offset + section->offset_within_region;
100             memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
101             log &= ~(0x1ull << bit);
102         }
103         addr += VHOST_LOG_CHUNK;
104     }
105 }
106 
107 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
108                                    MemoryRegionSection *section,
109                                    hwaddr first,
110                                    hwaddr last)
111 {
112     int i;
113     hwaddr start_addr;
114     hwaddr end_addr;
115 
116     if (!dev->log_enabled || !dev->started) {
117         return 0;
118     }
119     start_addr = section->offset_within_address_space;
120     end_addr = range_get_last(start_addr, int128_get64(section->size));
121     start_addr = MAX(first, start_addr);
122     end_addr = MIN(last, end_addr);
123 
124     for (i = 0; i < dev->mem->nregions; ++i) {
125         struct vhost_memory_region *reg = dev->mem->regions + i;
126         vhost_dev_sync_region(dev, section, start_addr, end_addr,
127                               reg->guest_phys_addr,
128                               range_get_last(reg->guest_phys_addr,
129                                              reg->memory_size));
130     }
131     for (i = 0; i < dev->nvqs; ++i) {
132         struct vhost_virtqueue *vq = dev->vqs + i;
133         vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
134                               range_get_last(vq->used_phys, vq->used_size));
135     }
136     return 0;
137 }
138 
139 static void vhost_log_sync(MemoryListener *listener,
140                           MemoryRegionSection *section)
141 {
142     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
143                                          memory_listener);
144     vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
145 }
146 
147 static void vhost_log_sync_range(struct vhost_dev *dev,
148                                  hwaddr first, hwaddr last)
149 {
150     int i;
151     /* FIXME: this is N^2 in number of sections */
152     for (i = 0; i < dev->n_mem_sections; ++i) {
153         MemoryRegionSection *section = &dev->mem_sections[i];
154         vhost_sync_dirty_bitmap(dev, section, first, last);
155     }
156 }
157 
158 /* Assign/unassign. Keep an unsorted array of non-overlapping
159  * memory regions in dev->mem. */
160 static void vhost_dev_unassign_memory(struct vhost_dev *dev,
161                                       uint64_t start_addr,
162                                       uint64_t size)
163 {
164     int from, to, n = dev->mem->nregions;
165     /* Track overlapping/split regions for sanity checking. */
166     int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
167 
168     for (from = 0, to = 0; from < n; ++from, ++to) {
169         struct vhost_memory_region *reg = dev->mem->regions + to;
170         uint64_t reglast;
171         uint64_t memlast;
172         uint64_t change;
173 
174         /* clone old region */
175         if (to != from) {
176             memcpy(reg, dev->mem->regions + from, sizeof *reg);
177         }
178 
179         /* No overlap is simple */
180         if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
181                             start_addr, size)) {
182             continue;
183         }
184 
185         /* Split only happens if supplied region
186          * is in the middle of an existing one. Thus it can not
187          * overlap with any other existing region. */
188         assert(!split);
189 
190         reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
191         memlast = range_get_last(start_addr, size);
192 
193         /* Remove whole region */
194         if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
195             --dev->mem->nregions;
196             --to;
197             ++overlap_middle;
198             continue;
199         }
200 
201         /* Shrink region */
202         if (memlast >= reglast) {
203             reg->memory_size = start_addr - reg->guest_phys_addr;
204             assert(reg->memory_size);
205             assert(!overlap_end);
206             ++overlap_end;
207             continue;
208         }
209 
210         /* Shift region */
211         if (start_addr <= reg->guest_phys_addr) {
212             change = memlast + 1 - reg->guest_phys_addr;
213             reg->memory_size -= change;
214             reg->guest_phys_addr += change;
215             reg->userspace_addr += change;
216             assert(reg->memory_size);
217             assert(!overlap_start);
218             ++overlap_start;
219             continue;
220         }
221 
222         /* This only happens if supplied region
223          * is in the middle of an existing one. Thus it can not
224          * overlap with any other existing region. */
225         assert(!overlap_start);
226         assert(!overlap_end);
227         assert(!overlap_middle);
228         /* Split region: shrink first part, shift second part. */
229         memcpy(dev->mem->regions + n, reg, sizeof *reg);
230         reg->memory_size = start_addr - reg->guest_phys_addr;
231         assert(reg->memory_size);
232         change = memlast + 1 - reg->guest_phys_addr;
233         reg = dev->mem->regions + n;
234         reg->memory_size -= change;
235         assert(reg->memory_size);
236         reg->guest_phys_addr += change;
237         reg->userspace_addr += change;
238         /* Never add more than 1 region */
239         assert(dev->mem->nregions == n);
240         ++dev->mem->nregions;
241         ++split;
242     }
243 }
244 
245 /* Called after unassign, so no regions overlap the given range. */
246 static void vhost_dev_assign_memory(struct vhost_dev *dev,
247                                     uint64_t start_addr,
248                                     uint64_t size,
249                                     uint64_t uaddr)
250 {
251     int from, to;
252     struct vhost_memory_region *merged = NULL;
253     for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
254         struct vhost_memory_region *reg = dev->mem->regions + to;
255         uint64_t prlast, urlast;
256         uint64_t pmlast, umlast;
257         uint64_t s, e, u;
258 
259         /* clone old region */
260         if (to != from) {
261             memcpy(reg, dev->mem->regions + from, sizeof *reg);
262         }
263         prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
264         pmlast = range_get_last(start_addr, size);
265         urlast = range_get_last(reg->userspace_addr, reg->memory_size);
266         umlast = range_get_last(uaddr, size);
267 
268         /* check for overlapping regions: should never happen. */
269         assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
270         /* Not an adjacent or overlapping region - do not merge. */
271         if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
272             (pmlast + 1 != reg->guest_phys_addr ||
273              umlast + 1 != reg->userspace_addr)) {
274             continue;
275         }
276 
277         if (dev->vhost_ops->vhost_backend_can_merge &&
278             !dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size,
279                                                      reg->userspace_addr,
280                                                      reg->memory_size)) {
281             continue;
282         }
283 
284         if (merged) {
285             --to;
286             assert(to >= 0);
287         } else {
288             merged = reg;
289         }
290         u = MIN(uaddr, reg->userspace_addr);
291         s = MIN(start_addr, reg->guest_phys_addr);
292         e = MAX(pmlast, prlast);
293         uaddr = merged->userspace_addr = u;
294         start_addr = merged->guest_phys_addr = s;
295         size = merged->memory_size = e - s + 1;
296         assert(merged->memory_size);
297     }
298 
299     if (!merged) {
300         struct vhost_memory_region *reg = dev->mem->regions + to;
301         memset(reg, 0, sizeof *reg);
302         reg->memory_size = size;
303         assert(reg->memory_size);
304         reg->guest_phys_addr = start_addr;
305         reg->userspace_addr = uaddr;
306         ++to;
307     }
308     assert(to <= dev->mem->nregions + 1);
309     dev->mem->nregions = to;
310 }
311 
312 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
313 {
314     uint64_t log_size = 0;
315     int i;
316     for (i = 0; i < dev->mem->nregions; ++i) {
317         struct vhost_memory_region *reg = dev->mem->regions + i;
318         uint64_t last = range_get_last(reg->guest_phys_addr,
319                                        reg->memory_size);
320         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
321     }
322     for (i = 0; i < dev->nvqs; ++i) {
323         struct vhost_virtqueue *vq = dev->vqs + i;
324         uint64_t last = vq->used_phys + vq->used_size - 1;
325         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
326     }
327     return log_size;
328 }
329 
330 static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
331 {
332     struct vhost_log *log;
333     uint64_t logsize = size * sizeof(*(log->log));
334     int fd = -1;
335 
336     log = g_new0(struct vhost_log, 1);
337     if (share) {
338         log->log = qemu_memfd_alloc("vhost-log", logsize,
339                                     F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
340                                     &fd);
341         memset(log->log, 0, logsize);
342     } else {
343         log->log = g_malloc0(logsize);
344     }
345 
346     log->size = size;
347     log->refcnt = 1;
348     log->fd = fd;
349 
350     return log;
351 }
352 
353 static struct vhost_log *vhost_log_get(uint64_t size, bool share)
354 {
355     struct vhost_log *log = share ? vhost_log_shm : vhost_log;
356 
357     if (!log || log->size != size) {
358         log = vhost_log_alloc(size, share);
359         if (share) {
360             vhost_log_shm = log;
361         } else {
362             vhost_log = log;
363         }
364     } else {
365         ++log->refcnt;
366     }
367 
368     return log;
369 }
370 
371 static void vhost_log_put(struct vhost_dev *dev, bool sync)
372 {
373     struct vhost_log *log = dev->log;
374 
375     if (!log) {
376         return;
377     }
378     dev->log = NULL;
379     dev->log_size = 0;
380 
381     --log->refcnt;
382     if (log->refcnt == 0) {
383         /* Sync only the range covered by the old log */
384         if (dev->log_size && sync) {
385             vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
386         }
387 
388         if (vhost_log == log) {
389             g_free(log->log);
390             vhost_log = NULL;
391         } else if (vhost_log_shm == log) {
392             qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
393                             log->fd);
394             vhost_log_shm = NULL;
395         }
396 
397         g_free(log);
398     }
399 }
400 
401 static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
402 {
403     return dev->vhost_ops->vhost_requires_shm_log &&
404            dev->vhost_ops->vhost_requires_shm_log(dev);
405 }
406 
407 static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
408 {
409     struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
410     uint64_t log_base = (uintptr_t)log->log;
411     int r;
412 
413     /* inform backend of log switching, this must be done before
414        releasing the current log, to ensure no logging is lost */
415     r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
416     if (r < 0) {
417         VHOST_OPS_DEBUG("vhost_set_log_base failed");
418     }
419 
420     vhost_log_put(dev, true);
421     dev->log = log;
422     dev->log_size = size;
423 }
424 
425 static int vhost_dev_has_iommu(struct vhost_dev *dev)
426 {
427     VirtIODevice *vdev = dev->vdev;
428 
429     return virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
430 }
431 
432 static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
433                               hwaddr *plen, int is_write)
434 {
435     if (!vhost_dev_has_iommu(dev)) {
436         return cpu_physical_memory_map(addr, plen, is_write);
437     } else {
438         return (void *)(uintptr_t)addr;
439     }
440 }
441 
442 static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
443                                hwaddr len, int is_write,
444                                hwaddr access_len)
445 {
446     if (!vhost_dev_has_iommu(dev)) {
447         cpu_physical_memory_unmap(buffer, len, is_write, access_len);
448     }
449 }
450 
451 static int vhost_verify_ring_part_mapping(struct vhost_dev *dev,
452                                           void *part,
453                                           uint64_t part_addr,
454                                           uint64_t part_size,
455                                           uint64_t start_addr,
456                                           uint64_t size)
457 {
458     hwaddr l;
459     void *p;
460     int r = 0;
461 
462     if (!ranges_overlap(start_addr, size, part_addr, part_size)) {
463         return 0;
464     }
465     l = part_size;
466     p = vhost_memory_map(dev, part_addr, &l, 1);
467     if (!p || l != part_size) {
468         r = -ENOMEM;
469     }
470     if (p != part) {
471         r = -EBUSY;
472     }
473     vhost_memory_unmap(dev, p, l, 0, 0);
474     return r;
475 }
476 
477 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
478                                       uint64_t start_addr,
479                                       uint64_t size)
480 {
481     int i, j;
482     int r = 0;
483     const char *part_name[] = {
484         "descriptor table",
485         "available ring",
486         "used ring"
487     };
488 
489     for (i = 0; i < dev->nvqs; ++i) {
490         struct vhost_virtqueue *vq = dev->vqs + i;
491 
492         j = 0;
493         r = vhost_verify_ring_part_mapping(dev, vq->desc, vq->desc_phys,
494                                            vq->desc_size, start_addr, size);
495         if (!r) {
496             break;
497         }
498 
499         j++;
500         r = vhost_verify_ring_part_mapping(dev, vq->avail, vq->avail_phys,
501                                            vq->avail_size, start_addr, size);
502         if (!r) {
503             break;
504         }
505 
506         j++;
507         r = vhost_verify_ring_part_mapping(dev, vq->used, vq->used_phys,
508                                            vq->used_size, start_addr, size);
509         if (!r) {
510             break;
511         }
512     }
513 
514     if (r == -ENOMEM) {
515         error_report("Unable to map %s for ring %d", part_name[j], i);
516     } else if (r == -EBUSY) {
517         error_report("%s relocated for ring %d", part_name[j], i);
518     }
519     return r;
520 }
521 
522 static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
523 						      uint64_t start_addr,
524 						      uint64_t size)
525 {
526     int i, n = dev->mem->nregions;
527     for (i = 0; i < n; ++i) {
528         struct vhost_memory_region *reg = dev->mem->regions + i;
529         if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
530                            start_addr, size)) {
531             return reg;
532         }
533     }
534     return NULL;
535 }
536 
537 static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
538                                  uint64_t start_addr,
539                                  uint64_t size,
540                                  uint64_t uaddr)
541 {
542     struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
543     uint64_t reglast;
544     uint64_t memlast;
545 
546     if (!reg) {
547         return true;
548     }
549 
550     reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
551     memlast = range_get_last(start_addr, size);
552 
553     /* Need to extend region? */
554     if (start_addr < reg->guest_phys_addr || memlast > reglast) {
555         return true;
556     }
557     /* userspace_addr changed? */
558     return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
559 }
560 
561 static void vhost_set_memory(MemoryListener *listener,
562                              MemoryRegionSection *section,
563                              bool add)
564 {
565     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
566                                          memory_listener);
567     hwaddr start_addr = section->offset_within_address_space;
568     ram_addr_t size = int128_get64(section->size);
569     bool log_dirty =
570         memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
571     int s = offsetof(struct vhost_memory, regions) +
572         (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
573     void *ram;
574 
575     dev->mem = g_realloc(dev->mem, s);
576 
577     if (log_dirty) {
578         add = false;
579     }
580 
581     assert(size);
582 
583     /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
584     ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
585     if (add) {
586         if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
587             /* Region exists with same address. Nothing to do. */
588             return;
589         }
590     } else {
591         if (!vhost_dev_find_reg(dev, start_addr, size)) {
592             /* Removing region that we don't access. Nothing to do. */
593             return;
594         }
595     }
596 
597     vhost_dev_unassign_memory(dev, start_addr, size);
598     if (add) {
599         /* Add given mapping, merging adjacent regions if any */
600         vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
601     } else {
602         /* Remove old mapping for this memory, if any. */
603         vhost_dev_unassign_memory(dev, start_addr, size);
604     }
605     dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
606     dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
607     dev->memory_changed = true;
608     used_memslots = dev->mem->nregions;
609 }
610 
611 static bool vhost_section(MemoryRegionSection *section)
612 {
613     return memory_region_is_ram(section->mr) &&
614         !memory_region_is_rom(section->mr);
615 }
616 
617 static void vhost_begin(MemoryListener *listener)
618 {
619     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
620                                          memory_listener);
621     dev->mem_changed_end_addr = 0;
622     dev->mem_changed_start_addr = -1;
623 }
624 
625 static void vhost_commit(MemoryListener *listener)
626 {
627     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
628                                          memory_listener);
629     hwaddr start_addr = 0;
630     ram_addr_t size = 0;
631     uint64_t log_size;
632     int r;
633 
634     if (!dev->memory_changed) {
635         return;
636     }
637     if (!dev->started) {
638         return;
639     }
640     if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
641         return;
642     }
643 
644     if (dev->started) {
645         start_addr = dev->mem_changed_start_addr;
646         size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
647 
648         r = vhost_verify_ring_mappings(dev, start_addr, size);
649         assert(r >= 0);
650     }
651 
652     if (!dev->log_enabled) {
653         r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
654         if (r < 0) {
655             VHOST_OPS_DEBUG("vhost_set_mem_table failed");
656         }
657         dev->memory_changed = false;
658         return;
659     }
660     log_size = vhost_get_log_size(dev);
661     /* We allocate an extra 4K bytes to log,
662      * to reduce the * number of reallocations. */
663 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
664     /* To log more, must increase log size before table update. */
665     if (dev->log_size < log_size) {
666         vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
667     }
668     r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
669     if (r < 0) {
670         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
671     }
672     /* To log less, can only decrease log size after table update. */
673     if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
674         vhost_dev_log_resize(dev, log_size);
675     }
676     dev->memory_changed = false;
677 }
678 
679 static void vhost_region_add(MemoryListener *listener,
680                              MemoryRegionSection *section)
681 {
682     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
683                                          memory_listener);
684 
685     if (!vhost_section(section)) {
686         return;
687     }
688 
689     ++dev->n_mem_sections;
690     dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
691                                 dev->n_mem_sections);
692     dev->mem_sections[dev->n_mem_sections - 1] = *section;
693     memory_region_ref(section->mr);
694     vhost_set_memory(listener, section, true);
695 }
696 
697 static void vhost_region_del(MemoryListener *listener,
698                              MemoryRegionSection *section)
699 {
700     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
701                                          memory_listener);
702     int i;
703 
704     if (!vhost_section(section)) {
705         return;
706     }
707 
708     vhost_set_memory(listener, section, false);
709     memory_region_unref(section->mr);
710     for (i = 0; i < dev->n_mem_sections; ++i) {
711         if (dev->mem_sections[i].offset_within_address_space
712             == section->offset_within_address_space) {
713             --dev->n_mem_sections;
714             memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
715                     (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
716             break;
717         }
718     }
719 }
720 
721 static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
722 {
723     struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
724     struct vhost_dev *hdev = iommu->hdev;
725     hwaddr iova = iotlb->iova + iommu->iommu_offset;
726 
727     if (hdev->vhost_ops->vhost_invalidate_device_iotlb(hdev, iova,
728                                                        iotlb->addr_mask + 1)) {
729         error_report("Fail to invalidate device iotlb");
730     }
731 }
732 
733 static void vhost_iommu_region_add(MemoryListener *listener,
734                                    MemoryRegionSection *section)
735 {
736     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
737                                          iommu_listener);
738     struct vhost_iommu *iommu;
739     Int128 end;
740 
741     if (!memory_region_is_iommu(section->mr)) {
742         return;
743     }
744 
745     iommu = g_malloc0(sizeof(*iommu));
746     end = int128_add(int128_make64(section->offset_within_region),
747                      section->size);
748     end = int128_sub(end, int128_one());
749     iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
750                         IOMMU_NOTIFIER_UNMAP,
751                         section->offset_within_region,
752                         int128_get64(end));
753     iommu->mr = section->mr;
754     iommu->iommu_offset = section->offset_within_address_space -
755                           section->offset_within_region;
756     iommu->hdev = dev;
757     memory_region_register_iommu_notifier(section->mr, &iommu->n);
758     QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
759     /* TODO: can replay help performance here? */
760 }
761 
762 static void vhost_iommu_region_del(MemoryListener *listener,
763                                    MemoryRegionSection *section)
764 {
765     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
766                                          iommu_listener);
767     struct vhost_iommu *iommu;
768 
769     if (!memory_region_is_iommu(section->mr)) {
770         return;
771     }
772 
773     QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
774         if (iommu->mr == section->mr &&
775             iommu->n.start == section->offset_within_region) {
776             memory_region_unregister_iommu_notifier(iommu->mr,
777                                                     &iommu->n);
778             QLIST_REMOVE(iommu, iommu_next);
779             g_free(iommu);
780             break;
781         }
782     }
783 }
784 
785 static void vhost_region_nop(MemoryListener *listener,
786                              MemoryRegionSection *section)
787 {
788 }
789 
790 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
791                                     struct vhost_virtqueue *vq,
792                                     unsigned idx, bool enable_log)
793 {
794     struct vhost_vring_addr addr = {
795         .index = idx,
796         .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
797         .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
798         .used_user_addr = (uint64_t)(unsigned long)vq->used,
799         .log_guest_addr = vq->used_phys,
800         .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
801     };
802     int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
803     if (r < 0) {
804         VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
805         return -errno;
806     }
807     return 0;
808 }
809 
810 static int vhost_dev_set_features(struct vhost_dev *dev,
811                                   bool enable_log)
812 {
813     uint64_t features = dev->acked_features;
814     int r;
815     if (enable_log) {
816         features |= 0x1ULL << VHOST_F_LOG_ALL;
817     }
818     r = dev->vhost_ops->vhost_set_features(dev, features);
819     if (r < 0) {
820         VHOST_OPS_DEBUG("vhost_set_features failed");
821     }
822     return r < 0 ? -errno : 0;
823 }
824 
825 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
826 {
827     int r, i, idx;
828     r = vhost_dev_set_features(dev, enable_log);
829     if (r < 0) {
830         goto err_features;
831     }
832     for (i = 0; i < dev->nvqs; ++i) {
833         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
834         r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
835                                      enable_log);
836         if (r < 0) {
837             goto err_vq;
838         }
839     }
840     return 0;
841 err_vq:
842     for (; i >= 0; --i) {
843         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
844         vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
845                                  dev->log_enabled);
846     }
847     vhost_dev_set_features(dev, dev->log_enabled);
848 err_features:
849     return r;
850 }
851 
852 static int vhost_migration_log(MemoryListener *listener, int enable)
853 {
854     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
855                                          memory_listener);
856     int r;
857     if (!!enable == dev->log_enabled) {
858         return 0;
859     }
860     if (!dev->started) {
861         dev->log_enabled = enable;
862         return 0;
863     }
864     if (!enable) {
865         r = vhost_dev_set_log(dev, false);
866         if (r < 0) {
867             return r;
868         }
869         vhost_log_put(dev, false);
870     } else {
871         vhost_dev_log_resize(dev, vhost_get_log_size(dev));
872         r = vhost_dev_set_log(dev, true);
873         if (r < 0) {
874             return r;
875         }
876     }
877     dev->log_enabled = enable;
878     return 0;
879 }
880 
881 static void vhost_log_global_start(MemoryListener *listener)
882 {
883     int r;
884 
885     r = vhost_migration_log(listener, true);
886     if (r < 0) {
887         abort();
888     }
889 }
890 
891 static void vhost_log_global_stop(MemoryListener *listener)
892 {
893     int r;
894 
895     r = vhost_migration_log(listener, false);
896     if (r < 0) {
897         abort();
898     }
899 }
900 
901 static void vhost_log_start(MemoryListener *listener,
902                             MemoryRegionSection *section,
903                             int old, int new)
904 {
905     /* FIXME: implement */
906 }
907 
908 static void vhost_log_stop(MemoryListener *listener,
909                            MemoryRegionSection *section,
910                            int old, int new)
911 {
912     /* FIXME: implement */
913 }
914 
915 /* The vhost driver natively knows how to handle the vrings of non
916  * cross-endian legacy devices and modern devices. Only legacy devices
917  * exposed to a bi-endian guest may require the vhost driver to use a
918  * specific endianness.
919  */
920 static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
921 {
922     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
923         return false;
924     }
925 #ifdef HOST_WORDS_BIGENDIAN
926     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
927 #else
928     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
929 #endif
930 }
931 
932 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
933                                                    bool is_big_endian,
934                                                    int vhost_vq_index)
935 {
936     struct vhost_vring_state s = {
937         .index = vhost_vq_index,
938         .num = is_big_endian
939     };
940 
941     if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
942         return 0;
943     }
944 
945     VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
946     if (errno == ENOTTY) {
947         error_report("vhost does not support cross-endian");
948         return -ENOSYS;
949     }
950 
951     return -errno;
952 }
953 
954 static int vhost_memory_region_lookup(struct vhost_dev *hdev,
955                                       uint64_t gpa, uint64_t *uaddr,
956                                       uint64_t *len)
957 {
958     int i;
959 
960     for (i = 0; i < hdev->mem->nregions; i++) {
961         struct vhost_memory_region *reg = hdev->mem->regions + i;
962 
963         if (gpa >= reg->guest_phys_addr &&
964             reg->guest_phys_addr + reg->memory_size > gpa) {
965             *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
966             *len = reg->guest_phys_addr + reg->memory_size - gpa;
967             return 0;
968         }
969     }
970 
971     return -EFAULT;
972 }
973 
974 void vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
975 {
976     IOMMUTLBEntry iotlb;
977     uint64_t uaddr, len;
978 
979     rcu_read_lock();
980 
981     iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
982                                           iova, write);
983     if (iotlb.target_as != NULL) {
984         if (vhost_memory_region_lookup(dev, iotlb.translated_addr,
985                                        &uaddr, &len)) {
986             error_report("Fail to lookup the translated address "
987                          "%"PRIx64, iotlb.translated_addr);
988             goto out;
989         }
990 
991         len = MIN(iotlb.addr_mask + 1, len);
992         iova = iova & ~iotlb.addr_mask;
993 
994         if (dev->vhost_ops->vhost_update_device_iotlb(dev, iova, uaddr,
995                                                       len, iotlb.perm)) {
996             error_report("Fail to update device iotlb");
997             goto out;
998         }
999     }
1000 out:
1001     rcu_read_unlock();
1002 }
1003 
1004 static int vhost_virtqueue_start(struct vhost_dev *dev,
1005                                 struct VirtIODevice *vdev,
1006                                 struct vhost_virtqueue *vq,
1007                                 unsigned idx)
1008 {
1009     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1010     VirtioBusState *vbus = VIRTIO_BUS(qbus);
1011     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
1012     hwaddr s, l, a;
1013     int r;
1014     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1015     struct vhost_vring_file file = {
1016         .index = vhost_vq_index
1017     };
1018     struct vhost_vring_state state = {
1019         .index = vhost_vq_index
1020     };
1021     struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
1022 
1023 
1024     vq->num = state.num = virtio_queue_get_num(vdev, idx);
1025     r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
1026     if (r) {
1027         VHOST_OPS_DEBUG("vhost_set_vring_num failed");
1028         return -errno;
1029     }
1030 
1031     state.num = virtio_queue_get_last_avail_idx(vdev, idx);
1032     r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
1033     if (r) {
1034         VHOST_OPS_DEBUG("vhost_set_vring_base failed");
1035         return -errno;
1036     }
1037 
1038     if (vhost_needs_vring_endian(vdev)) {
1039         r = vhost_virtqueue_set_vring_endian_legacy(dev,
1040                                                     virtio_is_big_endian(vdev),
1041                                                     vhost_vq_index);
1042         if (r) {
1043             return -errno;
1044         }
1045     }
1046 
1047     vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
1048     vq->desc_phys = a = virtio_queue_get_desc_addr(vdev, idx);
1049     vq->desc = vhost_memory_map(dev, a, &l, 0);
1050     if (!vq->desc || l != s) {
1051         r = -ENOMEM;
1052         goto fail_alloc_desc;
1053     }
1054     vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
1055     vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
1056     vq->avail = vhost_memory_map(dev, a, &l, 0);
1057     if (!vq->avail || l != s) {
1058         r = -ENOMEM;
1059         goto fail_alloc_avail;
1060     }
1061     vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
1062     vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
1063     vq->used = vhost_memory_map(dev, a, &l, 1);
1064     if (!vq->used || l != s) {
1065         r = -ENOMEM;
1066         goto fail_alloc_used;
1067     }
1068 
1069     r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
1070     if (r < 0) {
1071         r = -errno;
1072         goto fail_alloc;
1073     }
1074 
1075     file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
1076     r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
1077     if (r) {
1078         VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
1079         r = -errno;
1080         goto fail_kick;
1081     }
1082 
1083     /* Clear and discard previous events if any. */
1084     event_notifier_test_and_clear(&vq->masked_notifier);
1085 
1086     /* Init vring in unmasked state, unless guest_notifier_mask
1087      * will do it later.
1088      */
1089     if (!vdev->use_guest_notifier_mask) {
1090         /* TODO: check and handle errors. */
1091         vhost_virtqueue_mask(dev, vdev, idx, false);
1092     }
1093 
1094     if (k->query_guest_notifiers &&
1095         k->query_guest_notifiers(qbus->parent) &&
1096         virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
1097         file.fd = -1;
1098         r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1099         if (r) {
1100             goto fail_vector;
1101         }
1102     }
1103 
1104     return 0;
1105 
1106 fail_vector:
1107 fail_kick:
1108 fail_alloc:
1109     vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1110                        0, 0);
1111 fail_alloc_used:
1112     vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1113                        0, 0);
1114 fail_alloc_avail:
1115     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1116                        0, 0);
1117 fail_alloc_desc:
1118     return r;
1119 }
1120 
1121 static void vhost_virtqueue_stop(struct vhost_dev *dev,
1122                                     struct VirtIODevice *vdev,
1123                                     struct vhost_virtqueue *vq,
1124                                     unsigned idx)
1125 {
1126     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1127     struct vhost_vring_state state = {
1128         .index = vhost_vq_index,
1129     };
1130     int r;
1131 
1132     r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
1133     if (r < 0) {
1134         VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r);
1135     } else {
1136         virtio_queue_set_last_avail_idx(vdev, idx, state.num);
1137     }
1138     virtio_queue_invalidate_signalled_used(vdev, idx);
1139     virtio_queue_update_used_idx(vdev, idx);
1140 
1141     /* In the cross-endian case, we need to reset the vring endianness to
1142      * native as legacy devices expect so by default.
1143      */
1144     if (vhost_needs_vring_endian(vdev)) {
1145         vhost_virtqueue_set_vring_endian_legacy(dev,
1146                                                 !virtio_is_big_endian(vdev),
1147                                                 vhost_vq_index);
1148     }
1149 
1150     vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1151                        1, virtio_queue_get_used_size(vdev, idx));
1152     vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1153                        0, virtio_queue_get_avail_size(vdev, idx));
1154     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1155                        0, virtio_queue_get_desc_size(vdev, idx));
1156 }
1157 
1158 static void vhost_eventfd_add(MemoryListener *listener,
1159                               MemoryRegionSection *section,
1160                               bool match_data, uint64_t data, EventNotifier *e)
1161 {
1162 }
1163 
1164 static void vhost_eventfd_del(MemoryListener *listener,
1165                               MemoryRegionSection *section,
1166                               bool match_data, uint64_t data, EventNotifier *e)
1167 {
1168 }
1169 
1170 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1171                                                 int n, uint32_t timeout)
1172 {
1173     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1174     struct vhost_vring_state state = {
1175         .index = vhost_vq_index,
1176         .num = timeout,
1177     };
1178     int r;
1179 
1180     if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1181         return -EINVAL;
1182     }
1183 
1184     r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1185     if (r) {
1186         VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
1187         return r;
1188     }
1189 
1190     return 0;
1191 }
1192 
1193 static int vhost_virtqueue_init(struct vhost_dev *dev,
1194                                 struct vhost_virtqueue *vq, int n)
1195 {
1196     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1197     struct vhost_vring_file file = {
1198         .index = vhost_vq_index,
1199     };
1200     int r = event_notifier_init(&vq->masked_notifier, 0);
1201     if (r < 0) {
1202         return r;
1203     }
1204 
1205     file.fd = event_notifier_get_fd(&vq->masked_notifier);
1206     r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1207     if (r) {
1208         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1209         r = -errno;
1210         goto fail_call;
1211     }
1212 
1213     vq->dev = dev;
1214 
1215     return 0;
1216 fail_call:
1217     event_notifier_cleanup(&vq->masked_notifier);
1218     return r;
1219 }
1220 
1221 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1222 {
1223     event_notifier_cleanup(&vq->masked_notifier);
1224 }
1225 
1226 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1227                    VhostBackendType backend_type, uint32_t busyloop_timeout)
1228 {
1229     uint64_t features;
1230     int i, r, n_initialized_vqs = 0;
1231     Error *local_err = NULL;
1232 
1233     hdev->vdev = NULL;
1234     hdev->migration_blocker = NULL;
1235 
1236     r = vhost_set_backend_type(hdev, backend_type);
1237     assert(r >= 0);
1238 
1239     r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1240     if (r < 0) {
1241         goto fail;
1242     }
1243 
1244     if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1245         error_report("vhost backend memory slots limit is less"
1246                 " than current number of present memory slots");
1247         r = -1;
1248         goto fail;
1249     }
1250 
1251     r = hdev->vhost_ops->vhost_set_owner(hdev);
1252     if (r < 0) {
1253         VHOST_OPS_DEBUG("vhost_set_owner failed");
1254         goto fail;
1255     }
1256 
1257     r = hdev->vhost_ops->vhost_get_features(hdev, &features);
1258     if (r < 0) {
1259         VHOST_OPS_DEBUG("vhost_get_features failed");
1260         goto fail;
1261     }
1262 
1263     for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1264         r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1265         if (r < 0) {
1266             goto fail;
1267         }
1268     }
1269 
1270     if (busyloop_timeout) {
1271         for (i = 0; i < hdev->nvqs; ++i) {
1272             r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1273                                                      busyloop_timeout);
1274             if (r < 0) {
1275                 goto fail_busyloop;
1276             }
1277         }
1278     }
1279 
1280     hdev->features = features;
1281 
1282     hdev->memory_listener = (MemoryListener) {
1283         .begin = vhost_begin,
1284         .commit = vhost_commit,
1285         .region_add = vhost_region_add,
1286         .region_del = vhost_region_del,
1287         .region_nop = vhost_region_nop,
1288         .log_start = vhost_log_start,
1289         .log_stop = vhost_log_stop,
1290         .log_sync = vhost_log_sync,
1291         .log_global_start = vhost_log_global_start,
1292         .log_global_stop = vhost_log_global_stop,
1293         .eventfd_add = vhost_eventfd_add,
1294         .eventfd_del = vhost_eventfd_del,
1295         .priority = 10
1296     };
1297 
1298     hdev->iommu_listener = (MemoryListener) {
1299         .region_add = vhost_iommu_region_add,
1300         .region_del = vhost_iommu_region_del,
1301     };
1302 
1303     if (hdev->migration_blocker == NULL) {
1304         if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1305             error_setg(&hdev->migration_blocker,
1306                        "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1307         } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_check()) {
1308             error_setg(&hdev->migration_blocker,
1309                        "Migration disabled: failed to allocate shared memory");
1310         }
1311     }
1312 
1313     if (hdev->migration_blocker != NULL) {
1314         r = migrate_add_blocker(hdev->migration_blocker, &local_err);
1315         if (local_err) {
1316             error_report_err(local_err);
1317             error_free(hdev->migration_blocker);
1318             goto fail_busyloop;
1319         }
1320     }
1321 
1322     hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1323     hdev->n_mem_sections = 0;
1324     hdev->mem_sections = NULL;
1325     hdev->log = NULL;
1326     hdev->log_size = 0;
1327     hdev->log_enabled = false;
1328     hdev->started = false;
1329     hdev->memory_changed = false;
1330     memory_listener_register(&hdev->memory_listener, &address_space_memory);
1331     QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1332     return 0;
1333 
1334 fail_busyloop:
1335     while (--i >= 0) {
1336         vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1337     }
1338 fail:
1339     hdev->nvqs = n_initialized_vqs;
1340     vhost_dev_cleanup(hdev);
1341     return r;
1342 }
1343 
1344 void vhost_dev_cleanup(struct vhost_dev *hdev)
1345 {
1346     int i;
1347 
1348     for (i = 0; i < hdev->nvqs; ++i) {
1349         vhost_virtqueue_cleanup(hdev->vqs + i);
1350     }
1351     if (hdev->mem) {
1352         /* those are only safe after successful init */
1353         memory_listener_unregister(&hdev->memory_listener);
1354         QLIST_REMOVE(hdev, entry);
1355     }
1356     if (hdev->migration_blocker) {
1357         migrate_del_blocker(hdev->migration_blocker);
1358         error_free(hdev->migration_blocker);
1359     }
1360     g_free(hdev->mem);
1361     g_free(hdev->mem_sections);
1362     if (hdev->vhost_ops) {
1363         hdev->vhost_ops->vhost_backend_cleanup(hdev);
1364     }
1365     assert(!hdev->log);
1366 
1367     memset(hdev, 0, sizeof(struct vhost_dev));
1368 }
1369 
1370 /* Stop processing guest IO notifications in qemu.
1371  * Start processing them in vhost in kernel.
1372  */
1373 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1374 {
1375     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1376     int i, r, e;
1377 
1378     /* We will pass the notifiers to the kernel, make sure that QEMU
1379      * doesn't interfere.
1380      */
1381     r = virtio_device_grab_ioeventfd(vdev);
1382     if (r < 0) {
1383         error_report("binding does not support host notifiers");
1384         goto fail;
1385     }
1386 
1387     for (i = 0; i < hdev->nvqs; ++i) {
1388         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1389                                          true);
1390         if (r < 0) {
1391             error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1392             goto fail_vq;
1393         }
1394     }
1395 
1396     return 0;
1397 fail_vq:
1398     while (--i >= 0) {
1399         e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1400                                          false);
1401         if (e < 0) {
1402             error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1403         }
1404         assert (e >= 0);
1405     }
1406     virtio_device_release_ioeventfd(vdev);
1407 fail:
1408     return r;
1409 }
1410 
1411 /* Stop processing guest IO notifications in vhost.
1412  * Start processing them in qemu.
1413  * This might actually run the qemu handlers right away,
1414  * so virtio in qemu must be completely setup when this is called.
1415  */
1416 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1417 {
1418     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1419     int i, r;
1420 
1421     for (i = 0; i < hdev->nvqs; ++i) {
1422         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1423                                          false);
1424         if (r < 0) {
1425             error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1426         }
1427         assert (r >= 0);
1428     }
1429     virtio_device_release_ioeventfd(vdev);
1430 }
1431 
1432 /* Test and clear event pending status.
1433  * Should be called after unmask to avoid losing events.
1434  */
1435 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1436 {
1437     struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1438     assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1439     return event_notifier_test_and_clear(&vq->masked_notifier);
1440 }
1441 
1442 /* Mask/unmask events from this vq. */
1443 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1444                          bool mask)
1445 {
1446     struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1447     int r, index = n - hdev->vq_index;
1448     struct vhost_vring_file file;
1449 
1450     /* should only be called after backend is connected */
1451     assert(hdev->vhost_ops);
1452 
1453     if (mask) {
1454         assert(vdev->use_guest_notifier_mask);
1455         file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1456     } else {
1457         file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1458     }
1459 
1460     file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1461     r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1462     if (r < 0) {
1463         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1464     }
1465 }
1466 
1467 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1468                             uint64_t features)
1469 {
1470     const int *bit = feature_bits;
1471     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1472         uint64_t bit_mask = (1ULL << *bit);
1473         if (!(hdev->features & bit_mask)) {
1474             features &= ~bit_mask;
1475         }
1476         bit++;
1477     }
1478     return features;
1479 }
1480 
1481 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1482                         uint64_t features)
1483 {
1484     const int *bit = feature_bits;
1485     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1486         uint64_t bit_mask = (1ULL << *bit);
1487         if (features & bit_mask) {
1488             hdev->acked_features |= bit_mask;
1489         }
1490         bit++;
1491     }
1492 }
1493 
1494 /* Host notifiers must be enabled at this point. */
1495 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1496 {
1497     int i, r;
1498 
1499     /* should only be called after backend is connected */
1500     assert(hdev->vhost_ops);
1501 
1502     hdev->started = true;
1503     hdev->vdev = vdev;
1504 
1505     r = vhost_dev_set_features(hdev, hdev->log_enabled);
1506     if (r < 0) {
1507         goto fail_features;
1508     }
1509 
1510     if (vhost_dev_has_iommu(hdev)) {
1511         memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
1512     }
1513 
1514     r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
1515     if (r < 0) {
1516         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
1517         r = -errno;
1518         goto fail_mem;
1519     }
1520     for (i = 0; i < hdev->nvqs; ++i) {
1521         r = vhost_virtqueue_start(hdev,
1522                                   vdev,
1523                                   hdev->vqs + i,
1524                                   hdev->vq_index + i);
1525         if (r < 0) {
1526             goto fail_vq;
1527         }
1528     }
1529 
1530     if (hdev->log_enabled) {
1531         uint64_t log_base;
1532 
1533         hdev->log_size = vhost_get_log_size(hdev);
1534         hdev->log = vhost_log_get(hdev->log_size,
1535                                   vhost_dev_log_is_shared(hdev));
1536         log_base = (uintptr_t)hdev->log->log;
1537         r = hdev->vhost_ops->vhost_set_log_base(hdev,
1538                                                 hdev->log_size ? log_base : 0,
1539                                                 hdev->log);
1540         if (r < 0) {
1541             VHOST_OPS_DEBUG("vhost_set_log_base failed");
1542             r = -errno;
1543             goto fail_log;
1544         }
1545     }
1546 
1547     if (vhost_dev_has_iommu(hdev)) {
1548         hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);
1549 
1550         /* Update used ring information for IOTLB to work correctly,
1551          * vhost-kernel code requires for this.*/
1552         for (i = 0; i < hdev->nvqs; ++i) {
1553             struct vhost_virtqueue *vq = hdev->vqs + i;
1554             vhost_device_iotlb_miss(hdev, vq->used_phys, true);
1555         }
1556     }
1557     return 0;
1558 fail_log:
1559     vhost_log_put(hdev, false);
1560 fail_vq:
1561     while (--i >= 0) {
1562         vhost_virtqueue_stop(hdev,
1563                              vdev,
1564                              hdev->vqs + i,
1565                              hdev->vq_index + i);
1566     }
1567     i = hdev->nvqs;
1568 
1569 fail_mem:
1570 fail_features:
1571 
1572     hdev->started = false;
1573     return r;
1574 }
1575 
1576 /* Host notifiers must be enabled at this point. */
1577 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1578 {
1579     int i;
1580 
1581     /* should only be called after backend is connected */
1582     assert(hdev->vhost_ops);
1583 
1584     for (i = 0; i < hdev->nvqs; ++i) {
1585         vhost_virtqueue_stop(hdev,
1586                              vdev,
1587                              hdev->vqs + i,
1588                              hdev->vq_index + i);
1589     }
1590 
1591     if (vhost_dev_has_iommu(hdev)) {
1592         hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
1593         memory_listener_unregister(&hdev->iommu_listener);
1594     }
1595     vhost_log_put(hdev, true);
1596     hdev->started = false;
1597     hdev->vdev = NULL;
1598 }
1599 
1600 int vhost_net_set_backend(struct vhost_dev *hdev,
1601                           struct vhost_vring_file *file)
1602 {
1603     if (hdev->vhost_ops->vhost_net_set_backend) {
1604         return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
1605     }
1606 
1607     return -1;
1608 }
1609