xref: /openbmc/qemu/hw/virtio/vhost.c (revision 79e42085)
1 /*
2  * vhost support
3  *
4  * Copyright Red Hat, Inc. 2010
5  *
6  * Authors:
7  *  Michael S. Tsirkin <mst@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15 
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "hw/virtio/vhost.h"
19 #include "hw/hw.h"
20 #include "qemu/atomic.h"
21 #include "qemu/range.h"
22 #include "qemu/error-report.h"
23 #include "qemu/memfd.h"
24 #include "standard-headers/linux/vhost_types.h"
25 #include "exec/address-spaces.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "migration/blocker.h"
29 #include "sysemu/dma.h"
30 #include "trace.h"
31 
32 /* enabled until disconnected backend stabilizes */
33 #define _VHOST_DEBUG 1
34 
35 #ifdef _VHOST_DEBUG
36 #define VHOST_OPS_DEBUG(fmt, ...) \
37     do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
38                       strerror(errno), errno); } while (0)
39 #else
40 #define VHOST_OPS_DEBUG(fmt, ...) \
41     do { } while (0)
42 #endif
43 
44 static struct vhost_log *vhost_log;
45 static struct vhost_log *vhost_log_shm;
46 
47 static unsigned int used_memslots;
48 static QLIST_HEAD(, vhost_dev) vhost_devices =
49     QLIST_HEAD_INITIALIZER(vhost_devices);
50 
51 bool vhost_has_free_slot(void)
52 {
53     unsigned int slots_limit = ~0U;
54     struct vhost_dev *hdev;
55 
56     QLIST_FOREACH(hdev, &vhost_devices, entry) {
57         unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
58         slots_limit = MIN(slots_limit, r);
59     }
60     return slots_limit > used_memslots;
61 }
62 
63 static void vhost_dev_sync_region(struct vhost_dev *dev,
64                                   MemoryRegionSection *section,
65                                   uint64_t mfirst, uint64_t mlast,
66                                   uint64_t rfirst, uint64_t rlast)
67 {
68     vhost_log_chunk_t *log = dev->log->log;
69 
70     uint64_t start = MAX(mfirst, rfirst);
71     uint64_t end = MIN(mlast, rlast);
72     vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
73     vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
74     uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK);
75 
76     if (end < start) {
77         return;
78     }
79     assert(end / VHOST_LOG_CHUNK < dev->log_size);
80     assert(start / VHOST_LOG_CHUNK < dev->log_size);
81 
82     for (;from < to; ++from) {
83         vhost_log_chunk_t log;
84         /* We first check with non-atomic: much cheaper,
85          * and we expect non-dirty to be the common case. */
86         if (!*from) {
87             addr += VHOST_LOG_CHUNK;
88             continue;
89         }
90         /* Data must be read atomically. We don't really need barrier semantics
91          * but it's easier to use atomic_* than roll our own. */
92         log = atomic_xchg(from, 0);
93         while (log) {
94             int bit = ctzl(log);
95             hwaddr page_addr;
96             hwaddr section_offset;
97             hwaddr mr_offset;
98             page_addr = addr + bit * VHOST_LOG_PAGE;
99             section_offset = page_addr - section->offset_within_address_space;
100             mr_offset = section_offset + section->offset_within_region;
101             memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
102             log &= ~(0x1ull << bit);
103         }
104         addr += VHOST_LOG_CHUNK;
105     }
106 }
107 
108 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
109                                    MemoryRegionSection *section,
110                                    hwaddr first,
111                                    hwaddr last)
112 {
113     int i;
114     hwaddr start_addr;
115     hwaddr end_addr;
116 
117     if (!dev->log_enabled || !dev->started) {
118         return 0;
119     }
120     start_addr = section->offset_within_address_space;
121     end_addr = range_get_last(start_addr, int128_get64(section->size));
122     start_addr = MAX(first, start_addr);
123     end_addr = MIN(last, end_addr);
124 
125     for (i = 0; i < dev->mem->nregions; ++i) {
126         struct vhost_memory_region *reg = dev->mem->regions + i;
127         vhost_dev_sync_region(dev, section, start_addr, end_addr,
128                               reg->guest_phys_addr,
129                               range_get_last(reg->guest_phys_addr,
130                                              reg->memory_size));
131     }
132     for (i = 0; i < dev->nvqs; ++i) {
133         struct vhost_virtqueue *vq = dev->vqs + i;
134         vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
135                               range_get_last(vq->used_phys, vq->used_size));
136     }
137     return 0;
138 }
139 
140 static void vhost_log_sync(MemoryListener *listener,
141                           MemoryRegionSection *section)
142 {
143     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
144                                          memory_listener);
145     vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
146 }
147 
148 static void vhost_log_sync_range(struct vhost_dev *dev,
149                                  hwaddr first, hwaddr last)
150 {
151     int i;
152     /* FIXME: this is N^2 in number of sections */
153     for (i = 0; i < dev->n_mem_sections; ++i) {
154         MemoryRegionSection *section = &dev->mem_sections[i];
155         vhost_sync_dirty_bitmap(dev, section, first, last);
156     }
157 }
158 
159 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
160 {
161     uint64_t log_size = 0;
162     int i;
163     for (i = 0; i < dev->mem->nregions; ++i) {
164         struct vhost_memory_region *reg = dev->mem->regions + i;
165         uint64_t last = range_get_last(reg->guest_phys_addr,
166                                        reg->memory_size);
167         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
168     }
169     for (i = 0; i < dev->nvqs; ++i) {
170         struct vhost_virtqueue *vq = dev->vqs + i;
171         uint64_t last = vq->used_phys + vq->used_size - 1;
172         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
173     }
174     return log_size;
175 }
176 
177 static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
178 {
179     Error *err = NULL;
180     struct vhost_log *log;
181     uint64_t logsize = size * sizeof(*(log->log));
182     int fd = -1;
183 
184     log = g_new0(struct vhost_log, 1);
185     if (share) {
186         log->log = qemu_memfd_alloc("vhost-log", logsize,
187                                     F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
188                                     &fd, &err);
189         if (err) {
190             error_report_err(err);
191             g_free(log);
192             return NULL;
193         }
194         memset(log->log, 0, logsize);
195     } else {
196         log->log = g_malloc0(logsize);
197     }
198 
199     log->size = size;
200     log->refcnt = 1;
201     log->fd = fd;
202 
203     return log;
204 }
205 
206 static struct vhost_log *vhost_log_get(uint64_t size, bool share)
207 {
208     struct vhost_log *log = share ? vhost_log_shm : vhost_log;
209 
210     if (!log || log->size != size) {
211         log = vhost_log_alloc(size, share);
212         if (share) {
213             vhost_log_shm = log;
214         } else {
215             vhost_log = log;
216         }
217     } else {
218         ++log->refcnt;
219     }
220 
221     return log;
222 }
223 
224 static void vhost_log_put(struct vhost_dev *dev, bool sync)
225 {
226     struct vhost_log *log = dev->log;
227 
228     if (!log) {
229         return;
230     }
231 
232     --log->refcnt;
233     if (log->refcnt == 0) {
234         /* Sync only the range covered by the old log */
235         if (dev->log_size && sync) {
236             vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
237         }
238 
239         if (vhost_log == log) {
240             g_free(log->log);
241             vhost_log = NULL;
242         } else if (vhost_log_shm == log) {
243             qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
244                             log->fd);
245             vhost_log_shm = NULL;
246         }
247 
248         g_free(log);
249     }
250 
251     dev->log = NULL;
252     dev->log_size = 0;
253 }
254 
255 static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
256 {
257     return dev->vhost_ops->vhost_requires_shm_log &&
258            dev->vhost_ops->vhost_requires_shm_log(dev);
259 }
260 
261 static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
262 {
263     struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
264     uint64_t log_base = (uintptr_t)log->log;
265     int r;
266 
267     /* inform backend of log switching, this must be done before
268        releasing the current log, to ensure no logging is lost */
269     r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
270     if (r < 0) {
271         VHOST_OPS_DEBUG("vhost_set_log_base failed");
272     }
273 
274     vhost_log_put(dev, true);
275     dev->log = log;
276     dev->log_size = size;
277 }
278 
279 static int vhost_dev_has_iommu(struct vhost_dev *dev)
280 {
281     VirtIODevice *vdev = dev->vdev;
282 
283     return virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
284 }
285 
286 static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
287                               hwaddr *plen, int is_write)
288 {
289     if (!vhost_dev_has_iommu(dev)) {
290         return cpu_physical_memory_map(addr, plen, is_write);
291     } else {
292         return (void *)(uintptr_t)addr;
293     }
294 }
295 
296 static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
297                                hwaddr len, int is_write,
298                                hwaddr access_len)
299 {
300     if (!vhost_dev_has_iommu(dev)) {
301         cpu_physical_memory_unmap(buffer, len, is_write, access_len);
302     }
303 }
304 
305 static int vhost_verify_ring_part_mapping(void *ring_hva,
306                                           uint64_t ring_gpa,
307                                           uint64_t ring_size,
308                                           void *reg_hva,
309                                           uint64_t reg_gpa,
310                                           uint64_t reg_size)
311 {
312     uint64_t hva_ring_offset;
313     uint64_t ring_last = range_get_last(ring_gpa, ring_size);
314     uint64_t reg_last = range_get_last(reg_gpa, reg_size);
315 
316     if (ring_last < reg_gpa || ring_gpa > reg_last) {
317         return 0;
318     }
319     /* check that whole ring's is mapped */
320     if (ring_last > reg_last) {
321         return -ENOMEM;
322     }
323     /* check that ring's MemoryRegion wasn't replaced */
324     hva_ring_offset = ring_gpa - reg_gpa;
325     if (ring_hva != reg_hva + hva_ring_offset) {
326         return -EBUSY;
327     }
328 
329     return 0;
330 }
331 
332 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
333                                       void *reg_hva,
334                                       uint64_t reg_gpa,
335                                       uint64_t reg_size)
336 {
337     int i, j;
338     int r = 0;
339     const char *part_name[] = {
340         "descriptor table",
341         "available ring",
342         "used ring"
343     };
344 
345     if (vhost_dev_has_iommu(dev)) {
346         return 0;
347     }
348 
349     for (i = 0; i < dev->nvqs; ++i) {
350         struct vhost_virtqueue *vq = dev->vqs + i;
351 
352         if (vq->desc_phys == 0) {
353             continue;
354         }
355 
356         j = 0;
357         r = vhost_verify_ring_part_mapping(
358                 vq->desc, vq->desc_phys, vq->desc_size,
359                 reg_hva, reg_gpa, reg_size);
360         if (r) {
361             break;
362         }
363 
364         j++;
365         r = vhost_verify_ring_part_mapping(
366                 vq->avail, vq->avail_phys, vq->avail_size,
367                 reg_hva, reg_gpa, reg_size);
368         if (r) {
369             break;
370         }
371 
372         j++;
373         r = vhost_verify_ring_part_mapping(
374                 vq->used, vq->used_phys, vq->used_size,
375                 reg_hva, reg_gpa, reg_size);
376         if (r) {
377             break;
378         }
379     }
380 
381     if (r == -ENOMEM) {
382         error_report("Unable to map %s for ring %d", part_name[j], i);
383     } else if (r == -EBUSY) {
384         error_report("%s relocated for ring %d", part_name[j], i);
385     }
386     return r;
387 }
388 
389 static bool vhost_section(struct vhost_dev *dev, MemoryRegionSection *section)
390 {
391     bool result;
392     bool log_dirty = memory_region_get_dirty_log_mask(section->mr) &
393                      ~(1 << DIRTY_MEMORY_MIGRATION);
394     result = memory_region_is_ram(section->mr) &&
395         !memory_region_is_rom(section->mr);
396 
397     /* Vhost doesn't handle any block which is doing dirty-tracking other
398      * than migration; this typically fires on VGA areas.
399      */
400     result &= !log_dirty;
401 
402     if (result && dev->vhost_ops->vhost_backend_mem_section_filter) {
403         result &=
404             dev->vhost_ops->vhost_backend_mem_section_filter(dev, section);
405     }
406 
407     trace_vhost_section(section->mr->name, result);
408     return result;
409 }
410 
411 static void vhost_begin(MemoryListener *listener)
412 {
413     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
414                                          memory_listener);
415     dev->tmp_sections = NULL;
416     dev->n_tmp_sections = 0;
417 }
418 
419 static void vhost_commit(MemoryListener *listener)
420 {
421     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
422                                          memory_listener);
423     MemoryRegionSection *old_sections;
424     int n_old_sections;
425     uint64_t log_size;
426     size_t regions_size;
427     int r;
428     int i;
429     bool changed = false;
430 
431     /* Note we can be called before the device is started, but then
432      * starting the device calls set_mem_table, so we need to have
433      * built the data structures.
434      */
435     old_sections = dev->mem_sections;
436     n_old_sections = dev->n_mem_sections;
437     dev->mem_sections = dev->tmp_sections;
438     dev->n_mem_sections = dev->n_tmp_sections;
439 
440     if (dev->n_mem_sections != n_old_sections) {
441         changed = true;
442     } else {
443         /* Same size, lets check the contents */
444         changed = n_old_sections && memcmp(dev->mem_sections, old_sections,
445                          n_old_sections * sizeof(old_sections[0])) != 0;
446     }
447 
448     trace_vhost_commit(dev->started, changed);
449     if (!changed) {
450         goto out;
451     }
452 
453     /* Rebuild the regions list from the new sections list */
454     regions_size = offsetof(struct vhost_memory, regions) +
455                        dev->n_mem_sections * sizeof dev->mem->regions[0];
456     dev->mem = g_realloc(dev->mem, regions_size);
457     dev->mem->nregions = dev->n_mem_sections;
458     used_memslots = dev->mem->nregions;
459     for (i = 0; i < dev->n_mem_sections; i++) {
460         struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
461         struct MemoryRegionSection *mrs = dev->mem_sections + i;
462 
463         cur_vmr->guest_phys_addr = mrs->offset_within_address_space;
464         cur_vmr->memory_size     = int128_get64(mrs->size);
465         cur_vmr->userspace_addr  =
466             (uintptr_t)memory_region_get_ram_ptr(mrs->mr) +
467             mrs->offset_within_region;
468         cur_vmr->flags_padding   = 0;
469     }
470 
471     if (!dev->started) {
472         goto out;
473     }
474 
475     for (i = 0; i < dev->mem->nregions; i++) {
476         if (vhost_verify_ring_mappings(dev,
477                        (void *)(uintptr_t)dev->mem->regions[i].userspace_addr,
478                        dev->mem->regions[i].guest_phys_addr,
479                        dev->mem->regions[i].memory_size)) {
480             error_report("Verify ring failure on region %d", i);
481             abort();
482         }
483     }
484 
485     if (!dev->log_enabled) {
486         r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
487         if (r < 0) {
488             VHOST_OPS_DEBUG("vhost_set_mem_table failed");
489         }
490         goto out;
491     }
492     log_size = vhost_get_log_size(dev);
493     /* We allocate an extra 4K bytes to log,
494      * to reduce the * number of reallocations. */
495 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
496     /* To log more, must increase log size before table update. */
497     if (dev->log_size < log_size) {
498         vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
499     }
500     r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
501     if (r < 0) {
502         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
503     }
504     /* To log less, can only decrease log size after table update. */
505     if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
506         vhost_dev_log_resize(dev, log_size);
507     }
508 
509 out:
510     /* Deref the old list of sections, this must happen _after_ the
511      * vhost_set_mem_table to ensure the client isn't still using the
512      * section we're about to unref.
513      */
514     while (n_old_sections--) {
515         memory_region_unref(old_sections[n_old_sections].mr);
516     }
517     g_free(old_sections);
518     return;
519 }
520 
521 /* Adds the section data to the tmp_section structure.
522  * It relies on the listener calling us in memory address order
523  * and for each region (via the _add and _nop methods) to
524  * join neighbours.
525  */
526 static void vhost_region_add_section(struct vhost_dev *dev,
527                                      MemoryRegionSection *section)
528 {
529     bool need_add = true;
530     uint64_t mrs_size = int128_get64(section->size);
531     uint64_t mrs_gpa = section->offset_within_address_space;
532     uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) +
533                          section->offset_within_region;
534     RAMBlock *mrs_rb = section->mr->ram_block;
535     size_t mrs_page = qemu_ram_pagesize(mrs_rb);
536 
537     trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size,
538                                    mrs_host);
539 
540     /* Round the section to it's page size */
541     /* First align the start down to a page boundary */
542     uint64_t alignage = mrs_host & (mrs_page - 1);
543     if (alignage) {
544         mrs_host -= alignage;
545         mrs_size += alignage;
546         mrs_gpa  -= alignage;
547     }
548     /* Now align the size up to a page boundary */
549     alignage = mrs_size & (mrs_page - 1);
550     if (alignage) {
551         mrs_size += mrs_page - alignage;
552     }
553     trace_vhost_region_add_section_aligned(section->mr->name, mrs_gpa, mrs_size,
554                                            mrs_host);
555 
556     if (dev->n_tmp_sections) {
557         /* Since we already have at least one section, lets see if
558          * this extends it; since we're scanning in order, we only
559          * have to look at the last one, and the FlatView that calls
560          * us shouldn't have overlaps.
561          */
562         MemoryRegionSection *prev_sec = dev->tmp_sections +
563                                                (dev->n_tmp_sections - 1);
564         uint64_t prev_gpa_start = prev_sec->offset_within_address_space;
565         uint64_t prev_size = int128_get64(prev_sec->size);
566         uint64_t prev_gpa_end   = range_get_last(prev_gpa_start, prev_size);
567         uint64_t prev_host_start =
568                         (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) +
569                         prev_sec->offset_within_region;
570         uint64_t prev_host_end   = range_get_last(prev_host_start, prev_size);
571 
572         if (mrs_gpa <= (prev_gpa_end + 1)) {
573             /* OK, looks like overlapping/intersecting - it's possible that
574              * the rounding to page sizes has made them overlap, but they should
575              * match up in the same RAMBlock if they do.
576              */
577             if (mrs_gpa < prev_gpa_start) {
578                 error_report("%s:Section rounded to %"PRIx64
579                              " prior to previous %"PRIx64,
580                              __func__, mrs_gpa, prev_gpa_start);
581                 /* A way to cleanly fail here would be better */
582                 return;
583             }
584             /* Offset from the start of the previous GPA to this GPA */
585             size_t offset = mrs_gpa - prev_gpa_start;
586 
587             if (prev_host_start + offset == mrs_host &&
588                 section->mr == prev_sec->mr &&
589                 (!dev->vhost_ops->vhost_backend_can_merge ||
590                  dev->vhost_ops->vhost_backend_can_merge(dev,
591                     mrs_host, mrs_size,
592                     prev_host_start, prev_size))) {
593                 uint64_t max_end = MAX(prev_host_end, mrs_host + mrs_size);
594                 need_add = false;
595                 prev_sec->offset_within_address_space =
596                     MIN(prev_gpa_start, mrs_gpa);
597                 prev_sec->offset_within_region =
598                     MIN(prev_host_start, mrs_host) -
599                     (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr);
600                 prev_sec->size = int128_make64(max_end - MIN(prev_host_start,
601                                                mrs_host));
602                 trace_vhost_region_add_section_merge(section->mr->name,
603                                         int128_get64(prev_sec->size),
604                                         prev_sec->offset_within_address_space,
605                                         prev_sec->offset_within_region);
606             } else {
607                 /* adjoining regions are fine, but overlapping ones with
608                  * different blocks/offsets shouldn't happen
609                  */
610                 if (mrs_gpa != prev_gpa_end + 1) {
611                     error_report("%s: Overlapping but not coherent sections "
612                                  "at %"PRIx64,
613                                  __func__, mrs_gpa);
614                     return;
615                 }
616             }
617         }
618     }
619 
620     if (need_add) {
621         ++dev->n_tmp_sections;
622         dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections,
623                                     dev->n_tmp_sections);
624         dev->tmp_sections[dev->n_tmp_sections - 1] = *section;
625         /* The flatview isn't stable and we don't use it, making it NULL
626          * means we can memcmp the list.
627          */
628         dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
629         memory_region_ref(section->mr);
630     }
631 }
632 
633 /* Used for both add and nop callbacks */
634 static void vhost_region_addnop(MemoryListener *listener,
635                                 MemoryRegionSection *section)
636 {
637     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
638                                          memory_listener);
639 
640     if (!vhost_section(dev, section)) {
641         return;
642     }
643     vhost_region_add_section(dev, section);
644 }
645 
646 static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
647 {
648     struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
649     struct vhost_dev *hdev = iommu->hdev;
650     hwaddr iova = iotlb->iova + iommu->iommu_offset;
651 
652     if (vhost_backend_invalidate_device_iotlb(hdev, iova,
653                                               iotlb->addr_mask + 1)) {
654         error_report("Fail to invalidate device iotlb");
655     }
656 }
657 
658 static void vhost_iommu_region_add(MemoryListener *listener,
659                                    MemoryRegionSection *section)
660 {
661     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
662                                          iommu_listener);
663     struct vhost_iommu *iommu;
664     Int128 end;
665     int iommu_idx;
666     IOMMUMemoryRegion *iommu_mr;
667 
668     if (!memory_region_is_iommu(section->mr)) {
669         return;
670     }
671 
672     iommu_mr = IOMMU_MEMORY_REGION(section->mr);
673 
674     iommu = g_malloc0(sizeof(*iommu));
675     end = int128_add(int128_make64(section->offset_within_region),
676                      section->size);
677     end = int128_sub(end, int128_one());
678     iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
679                                                    MEMTXATTRS_UNSPECIFIED);
680     iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
681                         IOMMU_NOTIFIER_UNMAP,
682                         section->offset_within_region,
683                         int128_get64(end),
684                         iommu_idx);
685     iommu->mr = section->mr;
686     iommu->iommu_offset = section->offset_within_address_space -
687                           section->offset_within_region;
688     iommu->hdev = dev;
689     memory_region_register_iommu_notifier(section->mr, &iommu->n);
690     QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
691     /* TODO: can replay help performance here? */
692 }
693 
694 static void vhost_iommu_region_del(MemoryListener *listener,
695                                    MemoryRegionSection *section)
696 {
697     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
698                                          iommu_listener);
699     struct vhost_iommu *iommu;
700 
701     if (!memory_region_is_iommu(section->mr)) {
702         return;
703     }
704 
705     QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
706         if (iommu->mr == section->mr &&
707             iommu->n.start == section->offset_within_region) {
708             memory_region_unregister_iommu_notifier(iommu->mr,
709                                                     &iommu->n);
710             QLIST_REMOVE(iommu, iommu_next);
711             g_free(iommu);
712             break;
713         }
714     }
715 }
716 
717 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
718                                     struct vhost_virtqueue *vq,
719                                     unsigned idx, bool enable_log)
720 {
721     struct vhost_vring_addr addr = {
722         .index = idx,
723         .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
724         .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
725         .used_user_addr = (uint64_t)(unsigned long)vq->used,
726         .log_guest_addr = vq->used_phys,
727         .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
728     };
729     int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
730     if (r < 0) {
731         VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
732         return -errno;
733     }
734     return 0;
735 }
736 
737 static int vhost_dev_set_features(struct vhost_dev *dev,
738                                   bool enable_log)
739 {
740     uint64_t features = dev->acked_features;
741     int r;
742     if (enable_log) {
743         features |= 0x1ULL << VHOST_F_LOG_ALL;
744     }
745     r = dev->vhost_ops->vhost_set_features(dev, features);
746     if (r < 0) {
747         VHOST_OPS_DEBUG("vhost_set_features failed");
748     }
749     return r < 0 ? -errno : 0;
750 }
751 
752 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
753 {
754     int r, i, idx;
755     r = vhost_dev_set_features(dev, enable_log);
756     if (r < 0) {
757         goto err_features;
758     }
759     for (i = 0; i < dev->nvqs; ++i) {
760         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
761         r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
762                                      enable_log);
763         if (r < 0) {
764             goto err_vq;
765         }
766     }
767     return 0;
768 err_vq:
769     for (; i >= 0; --i) {
770         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
771         vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
772                                  dev->log_enabled);
773     }
774     vhost_dev_set_features(dev, dev->log_enabled);
775 err_features:
776     return r;
777 }
778 
779 static int vhost_migration_log(MemoryListener *listener, int enable)
780 {
781     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
782                                          memory_listener);
783     int r;
784     if (!!enable == dev->log_enabled) {
785         return 0;
786     }
787     if (!dev->started) {
788         dev->log_enabled = enable;
789         return 0;
790     }
791     if (!enable) {
792         r = vhost_dev_set_log(dev, false);
793         if (r < 0) {
794             return r;
795         }
796         vhost_log_put(dev, false);
797     } else {
798         vhost_dev_log_resize(dev, vhost_get_log_size(dev));
799         r = vhost_dev_set_log(dev, true);
800         if (r < 0) {
801             return r;
802         }
803     }
804     dev->log_enabled = enable;
805     return 0;
806 }
807 
808 static void vhost_log_global_start(MemoryListener *listener)
809 {
810     int r;
811 
812     r = vhost_migration_log(listener, true);
813     if (r < 0) {
814         abort();
815     }
816 }
817 
818 static void vhost_log_global_stop(MemoryListener *listener)
819 {
820     int r;
821 
822     r = vhost_migration_log(listener, false);
823     if (r < 0) {
824         abort();
825     }
826 }
827 
828 static void vhost_log_start(MemoryListener *listener,
829                             MemoryRegionSection *section,
830                             int old, int new)
831 {
832     /* FIXME: implement */
833 }
834 
835 static void vhost_log_stop(MemoryListener *listener,
836                            MemoryRegionSection *section,
837                            int old, int new)
838 {
839     /* FIXME: implement */
840 }
841 
842 /* The vhost driver natively knows how to handle the vrings of non
843  * cross-endian legacy devices and modern devices. Only legacy devices
844  * exposed to a bi-endian guest may require the vhost driver to use a
845  * specific endianness.
846  */
847 static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
848 {
849     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
850         return false;
851     }
852 #ifdef HOST_WORDS_BIGENDIAN
853     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
854 #else
855     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
856 #endif
857 }
858 
859 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
860                                                    bool is_big_endian,
861                                                    int vhost_vq_index)
862 {
863     struct vhost_vring_state s = {
864         .index = vhost_vq_index,
865         .num = is_big_endian
866     };
867 
868     if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
869         return 0;
870     }
871 
872     VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
873     if (errno == ENOTTY) {
874         error_report("vhost does not support cross-endian");
875         return -ENOSYS;
876     }
877 
878     return -errno;
879 }
880 
881 static int vhost_memory_region_lookup(struct vhost_dev *hdev,
882                                       uint64_t gpa, uint64_t *uaddr,
883                                       uint64_t *len)
884 {
885     int i;
886 
887     for (i = 0; i < hdev->mem->nregions; i++) {
888         struct vhost_memory_region *reg = hdev->mem->regions + i;
889 
890         if (gpa >= reg->guest_phys_addr &&
891             reg->guest_phys_addr + reg->memory_size > gpa) {
892             *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
893             *len = reg->guest_phys_addr + reg->memory_size - gpa;
894             return 0;
895         }
896     }
897 
898     return -EFAULT;
899 }
900 
901 int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
902 {
903     IOMMUTLBEntry iotlb;
904     uint64_t uaddr, len;
905     int ret = -EFAULT;
906 
907     rcu_read_lock();
908 
909     trace_vhost_iotlb_miss(dev, 1);
910 
911     iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
912                                           iova, write,
913                                           MEMTXATTRS_UNSPECIFIED);
914     if (iotlb.target_as != NULL) {
915         ret = vhost_memory_region_lookup(dev, iotlb.translated_addr,
916                                          &uaddr, &len);
917         if (ret) {
918             trace_vhost_iotlb_miss(dev, 3);
919             error_report("Fail to lookup the translated address "
920                          "%"PRIx64, iotlb.translated_addr);
921             goto out;
922         }
923 
924         len = MIN(iotlb.addr_mask + 1, len);
925         iova = iova & ~iotlb.addr_mask;
926 
927         ret = vhost_backend_update_device_iotlb(dev, iova, uaddr,
928                                                 len, iotlb.perm);
929         if (ret) {
930             trace_vhost_iotlb_miss(dev, 4);
931             error_report("Fail to update device iotlb");
932             goto out;
933         }
934     }
935 
936     trace_vhost_iotlb_miss(dev, 2);
937 
938 out:
939     rcu_read_unlock();
940 
941     return ret;
942 }
943 
944 static int vhost_virtqueue_start(struct vhost_dev *dev,
945                                 struct VirtIODevice *vdev,
946                                 struct vhost_virtqueue *vq,
947                                 unsigned idx)
948 {
949     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
950     VirtioBusState *vbus = VIRTIO_BUS(qbus);
951     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
952     hwaddr s, l, a;
953     int r;
954     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
955     struct vhost_vring_file file = {
956         .index = vhost_vq_index
957     };
958     struct vhost_vring_state state = {
959         .index = vhost_vq_index
960     };
961     struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
962 
963     a = virtio_queue_get_desc_addr(vdev, idx);
964     if (a == 0) {
965         /* Queue might not be ready for start */
966         return 0;
967     }
968 
969     vq->num = state.num = virtio_queue_get_num(vdev, idx);
970     r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
971     if (r) {
972         VHOST_OPS_DEBUG("vhost_set_vring_num failed");
973         return -errno;
974     }
975 
976     state.num = virtio_queue_get_last_avail_idx(vdev, idx);
977     r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
978     if (r) {
979         VHOST_OPS_DEBUG("vhost_set_vring_base failed");
980         return -errno;
981     }
982 
983     if (vhost_needs_vring_endian(vdev)) {
984         r = vhost_virtqueue_set_vring_endian_legacy(dev,
985                                                     virtio_is_big_endian(vdev),
986                                                     vhost_vq_index);
987         if (r) {
988             return -errno;
989         }
990     }
991 
992     vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
993     vq->desc_phys = a;
994     vq->desc = vhost_memory_map(dev, a, &l, 0);
995     if (!vq->desc || l != s) {
996         r = -ENOMEM;
997         goto fail_alloc_desc;
998     }
999     vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
1000     vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
1001     vq->avail = vhost_memory_map(dev, a, &l, 0);
1002     if (!vq->avail || l != s) {
1003         r = -ENOMEM;
1004         goto fail_alloc_avail;
1005     }
1006     vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
1007     vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
1008     vq->used = vhost_memory_map(dev, a, &l, 1);
1009     if (!vq->used || l != s) {
1010         r = -ENOMEM;
1011         goto fail_alloc_used;
1012     }
1013 
1014     r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
1015     if (r < 0) {
1016         r = -errno;
1017         goto fail_alloc;
1018     }
1019 
1020     file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
1021     r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
1022     if (r) {
1023         VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
1024         r = -errno;
1025         goto fail_kick;
1026     }
1027 
1028     /* Clear and discard previous events if any. */
1029     event_notifier_test_and_clear(&vq->masked_notifier);
1030 
1031     /* Init vring in unmasked state, unless guest_notifier_mask
1032      * will do it later.
1033      */
1034     if (!vdev->use_guest_notifier_mask) {
1035         /* TODO: check and handle errors. */
1036         vhost_virtqueue_mask(dev, vdev, idx, false);
1037     }
1038 
1039     if (k->query_guest_notifiers &&
1040         k->query_guest_notifiers(qbus->parent) &&
1041         virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
1042         file.fd = -1;
1043         r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1044         if (r) {
1045             goto fail_vector;
1046         }
1047     }
1048 
1049     return 0;
1050 
1051 fail_vector:
1052 fail_kick:
1053 fail_alloc:
1054     vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1055                        0, 0);
1056 fail_alloc_used:
1057     vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1058                        0, 0);
1059 fail_alloc_avail:
1060     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1061                        0, 0);
1062 fail_alloc_desc:
1063     return r;
1064 }
1065 
1066 static void vhost_virtqueue_stop(struct vhost_dev *dev,
1067                                     struct VirtIODevice *vdev,
1068                                     struct vhost_virtqueue *vq,
1069                                     unsigned idx)
1070 {
1071     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1072     struct vhost_vring_state state = {
1073         .index = vhost_vq_index,
1074     };
1075     int r;
1076 
1077     if (virtio_queue_get_desc_addr(vdev, idx) == 0) {
1078         /* Don't stop the virtqueue which might have not been started */
1079         return;
1080     }
1081 
1082     r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
1083     if (r < 0) {
1084         VHOST_OPS_DEBUG("vhost VQ %u ring restore failed: %d", idx, r);
1085         /* Connection to the backend is broken, so let's sync internal
1086          * last avail idx to the device used idx.
1087          */
1088         virtio_queue_restore_last_avail_idx(vdev, idx);
1089     } else {
1090         virtio_queue_set_last_avail_idx(vdev, idx, state.num);
1091     }
1092     virtio_queue_invalidate_signalled_used(vdev, idx);
1093     virtio_queue_update_used_idx(vdev, idx);
1094 
1095     /* In the cross-endian case, we need to reset the vring endianness to
1096      * native as legacy devices expect so by default.
1097      */
1098     if (vhost_needs_vring_endian(vdev)) {
1099         vhost_virtqueue_set_vring_endian_legacy(dev,
1100                                                 !virtio_is_big_endian(vdev),
1101                                                 vhost_vq_index);
1102     }
1103 
1104     vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1105                        1, virtio_queue_get_used_size(vdev, idx));
1106     vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1107                        0, virtio_queue_get_avail_size(vdev, idx));
1108     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1109                        0, virtio_queue_get_desc_size(vdev, idx));
1110 }
1111 
1112 static void vhost_eventfd_add(MemoryListener *listener,
1113                               MemoryRegionSection *section,
1114                               bool match_data, uint64_t data, EventNotifier *e)
1115 {
1116 }
1117 
1118 static void vhost_eventfd_del(MemoryListener *listener,
1119                               MemoryRegionSection *section,
1120                               bool match_data, uint64_t data, EventNotifier *e)
1121 {
1122 }
1123 
1124 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1125                                                 int n, uint32_t timeout)
1126 {
1127     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1128     struct vhost_vring_state state = {
1129         .index = vhost_vq_index,
1130         .num = timeout,
1131     };
1132     int r;
1133 
1134     if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1135         return -EINVAL;
1136     }
1137 
1138     r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1139     if (r) {
1140         VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
1141         return r;
1142     }
1143 
1144     return 0;
1145 }
1146 
1147 static int vhost_virtqueue_init(struct vhost_dev *dev,
1148                                 struct vhost_virtqueue *vq, int n)
1149 {
1150     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1151     struct vhost_vring_file file = {
1152         .index = vhost_vq_index,
1153     };
1154     int r = event_notifier_init(&vq->masked_notifier, 0);
1155     if (r < 0) {
1156         return r;
1157     }
1158 
1159     file.fd = event_notifier_get_fd(&vq->masked_notifier);
1160     r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1161     if (r) {
1162         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1163         r = -errno;
1164         goto fail_call;
1165     }
1166 
1167     vq->dev = dev;
1168 
1169     return 0;
1170 fail_call:
1171     event_notifier_cleanup(&vq->masked_notifier);
1172     return r;
1173 }
1174 
1175 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1176 {
1177     event_notifier_cleanup(&vq->masked_notifier);
1178 }
1179 
1180 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1181                    VhostBackendType backend_type, uint32_t busyloop_timeout)
1182 {
1183     uint64_t features;
1184     int i, r, n_initialized_vqs = 0;
1185     Error *local_err = NULL;
1186 
1187     hdev->vdev = NULL;
1188     hdev->migration_blocker = NULL;
1189 
1190     r = vhost_set_backend_type(hdev, backend_type);
1191     assert(r >= 0);
1192 
1193     r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1194     if (r < 0) {
1195         goto fail;
1196     }
1197 
1198     r = hdev->vhost_ops->vhost_set_owner(hdev);
1199     if (r < 0) {
1200         VHOST_OPS_DEBUG("vhost_set_owner failed");
1201         goto fail;
1202     }
1203 
1204     r = hdev->vhost_ops->vhost_get_features(hdev, &features);
1205     if (r < 0) {
1206         VHOST_OPS_DEBUG("vhost_get_features failed");
1207         goto fail;
1208     }
1209 
1210     for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1211         r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1212         if (r < 0) {
1213             goto fail;
1214         }
1215     }
1216 
1217     if (busyloop_timeout) {
1218         for (i = 0; i < hdev->nvqs; ++i) {
1219             r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1220                                                      busyloop_timeout);
1221             if (r < 0) {
1222                 goto fail_busyloop;
1223             }
1224         }
1225     }
1226 
1227     hdev->features = features;
1228 
1229     hdev->memory_listener = (MemoryListener) {
1230         .begin = vhost_begin,
1231         .commit = vhost_commit,
1232         .region_add = vhost_region_addnop,
1233         .region_nop = vhost_region_addnop,
1234         .log_start = vhost_log_start,
1235         .log_stop = vhost_log_stop,
1236         .log_sync = vhost_log_sync,
1237         .log_global_start = vhost_log_global_start,
1238         .log_global_stop = vhost_log_global_stop,
1239         .eventfd_add = vhost_eventfd_add,
1240         .eventfd_del = vhost_eventfd_del,
1241         .priority = 10
1242     };
1243 
1244     hdev->iommu_listener = (MemoryListener) {
1245         .region_add = vhost_iommu_region_add,
1246         .region_del = vhost_iommu_region_del,
1247     };
1248 
1249     if (hdev->migration_blocker == NULL) {
1250         if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1251             error_setg(&hdev->migration_blocker,
1252                        "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1253         } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_alloc_check()) {
1254             error_setg(&hdev->migration_blocker,
1255                        "Migration disabled: failed to allocate shared memory");
1256         }
1257     }
1258 
1259     if (hdev->migration_blocker != NULL) {
1260         r = migrate_add_blocker(hdev->migration_blocker, &local_err);
1261         if (local_err) {
1262             error_report_err(local_err);
1263             error_free(hdev->migration_blocker);
1264             goto fail_busyloop;
1265         }
1266     }
1267 
1268     hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1269     hdev->n_mem_sections = 0;
1270     hdev->mem_sections = NULL;
1271     hdev->log = NULL;
1272     hdev->log_size = 0;
1273     hdev->log_enabled = false;
1274     hdev->started = false;
1275     memory_listener_register(&hdev->memory_listener, &address_space_memory);
1276     QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1277 
1278     if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1279         error_report("vhost backend memory slots limit is less"
1280                 " than current number of present memory slots");
1281         r = -1;
1282         if (busyloop_timeout) {
1283             goto fail_busyloop;
1284         } else {
1285             goto fail;
1286         }
1287     }
1288 
1289     return 0;
1290 
1291 fail_busyloop:
1292     while (--i >= 0) {
1293         vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1294     }
1295 fail:
1296     hdev->nvqs = n_initialized_vqs;
1297     vhost_dev_cleanup(hdev);
1298     return r;
1299 }
1300 
1301 void vhost_dev_cleanup(struct vhost_dev *hdev)
1302 {
1303     int i;
1304 
1305     for (i = 0; i < hdev->nvqs; ++i) {
1306         vhost_virtqueue_cleanup(hdev->vqs + i);
1307     }
1308     if (hdev->mem) {
1309         /* those are only safe after successful init */
1310         memory_listener_unregister(&hdev->memory_listener);
1311         QLIST_REMOVE(hdev, entry);
1312     }
1313     if (hdev->migration_blocker) {
1314         migrate_del_blocker(hdev->migration_blocker);
1315         error_free(hdev->migration_blocker);
1316     }
1317     g_free(hdev->mem);
1318     g_free(hdev->mem_sections);
1319     if (hdev->vhost_ops) {
1320         hdev->vhost_ops->vhost_backend_cleanup(hdev);
1321     }
1322     assert(!hdev->log);
1323 
1324     memset(hdev, 0, sizeof(struct vhost_dev));
1325 }
1326 
1327 /* Stop processing guest IO notifications in qemu.
1328  * Start processing them in vhost in kernel.
1329  */
1330 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1331 {
1332     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1333     int i, r, e;
1334 
1335     /* We will pass the notifiers to the kernel, make sure that QEMU
1336      * doesn't interfere.
1337      */
1338     r = virtio_device_grab_ioeventfd(vdev);
1339     if (r < 0) {
1340         error_report("binding does not support host notifiers");
1341         goto fail;
1342     }
1343 
1344     for (i = 0; i < hdev->nvqs; ++i) {
1345         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1346                                          true);
1347         if (r < 0) {
1348             error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1349             goto fail_vq;
1350         }
1351     }
1352 
1353     return 0;
1354 fail_vq:
1355     while (--i >= 0) {
1356         e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1357                                          false);
1358         if (e < 0) {
1359             error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1360         }
1361         assert (e >= 0);
1362         virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1363     }
1364     virtio_device_release_ioeventfd(vdev);
1365 fail:
1366     return r;
1367 }
1368 
1369 /* Stop processing guest IO notifications in vhost.
1370  * Start processing them in qemu.
1371  * This might actually run the qemu handlers right away,
1372  * so virtio in qemu must be completely setup when this is called.
1373  */
1374 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1375 {
1376     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1377     int i, r;
1378 
1379     for (i = 0; i < hdev->nvqs; ++i) {
1380         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1381                                          false);
1382         if (r < 0) {
1383             error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1384         }
1385         assert (r >= 0);
1386         virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1387     }
1388     virtio_device_release_ioeventfd(vdev);
1389 }
1390 
1391 /* Test and clear event pending status.
1392  * Should be called after unmask to avoid losing events.
1393  */
1394 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1395 {
1396     struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1397     assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1398     return event_notifier_test_and_clear(&vq->masked_notifier);
1399 }
1400 
1401 /* Mask/unmask events from this vq. */
1402 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1403                          bool mask)
1404 {
1405     struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1406     int r, index = n - hdev->vq_index;
1407     struct vhost_vring_file file;
1408 
1409     /* should only be called after backend is connected */
1410     assert(hdev->vhost_ops);
1411 
1412     if (mask) {
1413         assert(vdev->use_guest_notifier_mask);
1414         file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1415     } else {
1416         file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1417     }
1418 
1419     file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1420     r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1421     if (r < 0) {
1422         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1423     }
1424 }
1425 
1426 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1427                             uint64_t features)
1428 {
1429     const int *bit = feature_bits;
1430     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1431         uint64_t bit_mask = (1ULL << *bit);
1432         if (!(hdev->features & bit_mask)) {
1433             features &= ~bit_mask;
1434         }
1435         bit++;
1436     }
1437     return features;
1438 }
1439 
1440 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1441                         uint64_t features)
1442 {
1443     const int *bit = feature_bits;
1444     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1445         uint64_t bit_mask = (1ULL << *bit);
1446         if (features & bit_mask) {
1447             hdev->acked_features |= bit_mask;
1448         }
1449         bit++;
1450     }
1451 }
1452 
1453 int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
1454                          uint32_t config_len)
1455 {
1456     assert(hdev->vhost_ops);
1457 
1458     if (hdev->vhost_ops->vhost_get_config) {
1459         return hdev->vhost_ops->vhost_get_config(hdev, config, config_len);
1460     }
1461 
1462     return -1;
1463 }
1464 
1465 int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
1466                          uint32_t offset, uint32_t size, uint32_t flags)
1467 {
1468     assert(hdev->vhost_ops);
1469 
1470     if (hdev->vhost_ops->vhost_set_config) {
1471         return hdev->vhost_ops->vhost_set_config(hdev, data, offset,
1472                                                  size, flags);
1473     }
1474 
1475     return -1;
1476 }
1477 
1478 void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
1479                                    const VhostDevConfigOps *ops)
1480 {
1481     hdev->config_ops = ops;
1482 }
1483 
1484 void vhost_dev_free_inflight(struct vhost_inflight *inflight)
1485 {
1486     if (inflight->addr) {
1487         qemu_memfd_free(inflight->addr, inflight->size, inflight->fd);
1488         inflight->addr = NULL;
1489         inflight->fd = -1;
1490     }
1491 }
1492 
1493 static int vhost_dev_resize_inflight(struct vhost_inflight *inflight,
1494                                      uint64_t new_size)
1495 {
1496     Error *err = NULL;
1497     int fd = -1;
1498     void *addr = qemu_memfd_alloc("vhost-inflight", new_size,
1499                                   F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
1500                                   &fd, &err);
1501 
1502     if (err) {
1503         error_report_err(err);
1504         return -1;
1505     }
1506 
1507     vhost_dev_free_inflight(inflight);
1508     inflight->offset = 0;
1509     inflight->addr = addr;
1510     inflight->fd = fd;
1511     inflight->size = new_size;
1512 
1513     return 0;
1514 }
1515 
1516 void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f)
1517 {
1518     if (inflight->addr) {
1519         qemu_put_be64(f, inflight->size);
1520         qemu_put_be16(f, inflight->queue_size);
1521         qemu_put_buffer(f, inflight->addr, inflight->size);
1522     } else {
1523         qemu_put_be64(f, 0);
1524     }
1525 }
1526 
1527 int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f)
1528 {
1529     uint64_t size;
1530 
1531     size = qemu_get_be64(f);
1532     if (!size) {
1533         return 0;
1534     }
1535 
1536     if (inflight->size != size) {
1537         if (vhost_dev_resize_inflight(inflight, size)) {
1538             return -1;
1539         }
1540     }
1541     inflight->queue_size = qemu_get_be16(f);
1542 
1543     qemu_get_buffer(f, inflight->addr, size);
1544 
1545     return 0;
1546 }
1547 
1548 int vhost_dev_set_inflight(struct vhost_dev *dev,
1549                            struct vhost_inflight *inflight)
1550 {
1551     int r;
1552 
1553     if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) {
1554         r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight);
1555         if (r) {
1556             VHOST_OPS_DEBUG("vhost_set_inflight_fd failed");
1557             return -errno;
1558         }
1559     }
1560 
1561     return 0;
1562 }
1563 
1564 int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
1565                            struct vhost_inflight *inflight)
1566 {
1567     int r;
1568 
1569     if (dev->vhost_ops->vhost_get_inflight_fd) {
1570         r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight);
1571         if (r) {
1572             VHOST_OPS_DEBUG("vhost_get_inflight_fd failed");
1573             return -errno;
1574         }
1575     }
1576 
1577     return 0;
1578 }
1579 
1580 /* Host notifiers must be enabled at this point. */
1581 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1582 {
1583     int i, r;
1584 
1585     /* should only be called after backend is connected */
1586     assert(hdev->vhost_ops);
1587 
1588     hdev->started = true;
1589     hdev->vdev = vdev;
1590 
1591     r = vhost_dev_set_features(hdev, hdev->log_enabled);
1592     if (r < 0) {
1593         goto fail_features;
1594     }
1595 
1596     if (vhost_dev_has_iommu(hdev)) {
1597         memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
1598     }
1599 
1600     r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
1601     if (r < 0) {
1602         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
1603         r = -errno;
1604         goto fail_mem;
1605     }
1606     for (i = 0; i < hdev->nvqs; ++i) {
1607         r = vhost_virtqueue_start(hdev,
1608                                   vdev,
1609                                   hdev->vqs + i,
1610                                   hdev->vq_index + i);
1611         if (r < 0) {
1612             goto fail_vq;
1613         }
1614     }
1615 
1616     if (hdev->log_enabled) {
1617         uint64_t log_base;
1618 
1619         hdev->log_size = vhost_get_log_size(hdev);
1620         hdev->log = vhost_log_get(hdev->log_size,
1621                                   vhost_dev_log_is_shared(hdev));
1622         log_base = (uintptr_t)hdev->log->log;
1623         r = hdev->vhost_ops->vhost_set_log_base(hdev,
1624                                                 hdev->log_size ? log_base : 0,
1625                                                 hdev->log);
1626         if (r < 0) {
1627             VHOST_OPS_DEBUG("vhost_set_log_base failed");
1628             r = -errno;
1629             goto fail_log;
1630         }
1631     }
1632 
1633     if (vhost_dev_has_iommu(hdev)) {
1634         hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);
1635 
1636         /* Update used ring information for IOTLB to work correctly,
1637          * vhost-kernel code requires for this.*/
1638         for (i = 0; i < hdev->nvqs; ++i) {
1639             struct vhost_virtqueue *vq = hdev->vqs + i;
1640             vhost_device_iotlb_miss(hdev, vq->used_phys, true);
1641         }
1642     }
1643     return 0;
1644 fail_log:
1645     vhost_log_put(hdev, false);
1646 fail_vq:
1647     while (--i >= 0) {
1648         vhost_virtqueue_stop(hdev,
1649                              vdev,
1650                              hdev->vqs + i,
1651                              hdev->vq_index + i);
1652     }
1653 
1654 fail_mem:
1655 fail_features:
1656 
1657     hdev->started = false;
1658     return r;
1659 }
1660 
1661 /* Host notifiers must be enabled at this point. */
1662 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1663 {
1664     int i;
1665 
1666     /* should only be called after backend is connected */
1667     assert(hdev->vhost_ops);
1668 
1669     for (i = 0; i < hdev->nvqs; ++i) {
1670         vhost_virtqueue_stop(hdev,
1671                              vdev,
1672                              hdev->vqs + i,
1673                              hdev->vq_index + i);
1674     }
1675 
1676     if (vhost_dev_has_iommu(hdev)) {
1677         hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
1678         memory_listener_unregister(&hdev->iommu_listener);
1679     }
1680     vhost_log_put(hdev, true);
1681     hdev->started = false;
1682     hdev->vdev = NULL;
1683 }
1684 
1685 int vhost_net_set_backend(struct vhost_dev *hdev,
1686                           struct vhost_vring_file *file)
1687 {
1688     if (hdev->vhost_ops->vhost_net_set_backend) {
1689         return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
1690     }
1691 
1692     return -1;
1693 }
1694