xref: /openbmc/qemu/hw/virtio/vhost.c (revision 6e790746)
1 /*
2  * vhost support
3  *
4  * Copyright Red Hat, Inc. 2010
5  *
6  * Authors:
7  *  Michael S. Tsirkin <mst@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15 
16 #include <sys/ioctl.h>
17 #include "hw/virtio/vhost.h"
18 #include "hw/hw.h"
19 #include "qemu/range.h"
20 #include <linux/vhost.h>
21 #include "exec/address-spaces.h"
22 
23 static void vhost_dev_sync_region(struct vhost_dev *dev,
24                                   MemoryRegionSection *section,
25                                   uint64_t mfirst, uint64_t mlast,
26                                   uint64_t rfirst, uint64_t rlast)
27 {
28     uint64_t start = MAX(mfirst, rfirst);
29     uint64_t end = MIN(mlast, rlast);
30     vhost_log_chunk_t *from = dev->log + start / VHOST_LOG_CHUNK;
31     vhost_log_chunk_t *to = dev->log + end / VHOST_LOG_CHUNK + 1;
32     uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
33 
34     if (end < start) {
35         return;
36     }
37     assert(end / VHOST_LOG_CHUNK < dev->log_size);
38     assert(start / VHOST_LOG_CHUNK < dev->log_size);
39 
40     for (;from < to; ++from) {
41         vhost_log_chunk_t log;
42         int bit;
43         /* We first check with non-atomic: much cheaper,
44          * and we expect non-dirty to be the common case. */
45         if (!*from) {
46             addr += VHOST_LOG_CHUNK;
47             continue;
48         }
49         /* Data must be read atomically. We don't really
50          * need the barrier semantics of __sync
51          * builtins, but it's easier to use them than
52          * roll our own. */
53         log = __sync_fetch_and_and(from, 0);
54         while ((bit = sizeof(log) > sizeof(int) ?
55                 ffsll(log) : ffs(log))) {
56             hwaddr page_addr;
57             hwaddr section_offset;
58             hwaddr mr_offset;
59             bit -= 1;
60             page_addr = addr + bit * VHOST_LOG_PAGE;
61             section_offset = page_addr - section->offset_within_address_space;
62             mr_offset = section_offset + section->offset_within_region;
63             memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
64             log &= ~(0x1ull << bit);
65         }
66         addr += VHOST_LOG_CHUNK;
67     }
68 }
69 
70 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
71                                    MemoryRegionSection *section,
72                                    hwaddr first,
73                                    hwaddr last)
74 {
75     int i;
76     hwaddr start_addr;
77     hwaddr end_addr;
78 
79     if (!dev->log_enabled || !dev->started) {
80         return 0;
81     }
82     start_addr = section->offset_within_address_space;
83     end_addr = range_get_last(start_addr, section->size);
84     start_addr = MAX(first, start_addr);
85     end_addr = MIN(last, end_addr);
86 
87     for (i = 0; i < dev->mem->nregions; ++i) {
88         struct vhost_memory_region *reg = dev->mem->regions + i;
89         vhost_dev_sync_region(dev, section, start_addr, end_addr,
90                               reg->guest_phys_addr,
91                               range_get_last(reg->guest_phys_addr,
92                                              reg->memory_size));
93     }
94     for (i = 0; i < dev->nvqs; ++i) {
95         struct vhost_virtqueue *vq = dev->vqs + i;
96         vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
97                               range_get_last(vq->used_phys, vq->used_size));
98     }
99     return 0;
100 }
101 
102 static void vhost_log_sync(MemoryListener *listener,
103                           MemoryRegionSection *section)
104 {
105     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
106                                          memory_listener);
107     vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
108 }
109 
110 static void vhost_log_sync_range(struct vhost_dev *dev,
111                                  hwaddr first, hwaddr last)
112 {
113     int i;
114     /* FIXME: this is N^2 in number of sections */
115     for (i = 0; i < dev->n_mem_sections; ++i) {
116         MemoryRegionSection *section = &dev->mem_sections[i];
117         vhost_sync_dirty_bitmap(dev, section, first, last);
118     }
119 }
120 
121 /* Assign/unassign. Keep an unsorted array of non-overlapping
122  * memory regions in dev->mem. */
123 static void vhost_dev_unassign_memory(struct vhost_dev *dev,
124                                       uint64_t start_addr,
125                                       uint64_t size)
126 {
127     int from, to, n = dev->mem->nregions;
128     /* Track overlapping/split regions for sanity checking. */
129     int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
130 
131     for (from = 0, to = 0; from < n; ++from, ++to) {
132         struct vhost_memory_region *reg = dev->mem->regions + to;
133         uint64_t reglast;
134         uint64_t memlast;
135         uint64_t change;
136 
137         /* clone old region */
138         if (to != from) {
139             memcpy(reg, dev->mem->regions + from, sizeof *reg);
140         }
141 
142         /* No overlap is simple */
143         if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
144                             start_addr, size)) {
145             continue;
146         }
147 
148         /* Split only happens if supplied region
149          * is in the middle of an existing one. Thus it can not
150          * overlap with any other existing region. */
151         assert(!split);
152 
153         reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
154         memlast = range_get_last(start_addr, size);
155 
156         /* Remove whole region */
157         if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
158             --dev->mem->nregions;
159             --to;
160             ++overlap_middle;
161             continue;
162         }
163 
164         /* Shrink region */
165         if (memlast >= reglast) {
166             reg->memory_size = start_addr - reg->guest_phys_addr;
167             assert(reg->memory_size);
168             assert(!overlap_end);
169             ++overlap_end;
170             continue;
171         }
172 
173         /* Shift region */
174         if (start_addr <= reg->guest_phys_addr) {
175             change = memlast + 1 - reg->guest_phys_addr;
176             reg->memory_size -= change;
177             reg->guest_phys_addr += change;
178             reg->userspace_addr += change;
179             assert(reg->memory_size);
180             assert(!overlap_start);
181             ++overlap_start;
182             continue;
183         }
184 
185         /* This only happens if supplied region
186          * is in the middle of an existing one. Thus it can not
187          * overlap with any other existing region. */
188         assert(!overlap_start);
189         assert(!overlap_end);
190         assert(!overlap_middle);
191         /* Split region: shrink first part, shift second part. */
192         memcpy(dev->mem->regions + n, reg, sizeof *reg);
193         reg->memory_size = start_addr - reg->guest_phys_addr;
194         assert(reg->memory_size);
195         change = memlast + 1 - reg->guest_phys_addr;
196         reg = dev->mem->regions + n;
197         reg->memory_size -= change;
198         assert(reg->memory_size);
199         reg->guest_phys_addr += change;
200         reg->userspace_addr += change;
201         /* Never add more than 1 region */
202         assert(dev->mem->nregions == n);
203         ++dev->mem->nregions;
204         ++split;
205     }
206 }
207 
208 /* Called after unassign, so no regions overlap the given range. */
209 static void vhost_dev_assign_memory(struct vhost_dev *dev,
210                                     uint64_t start_addr,
211                                     uint64_t size,
212                                     uint64_t uaddr)
213 {
214     int from, to;
215     struct vhost_memory_region *merged = NULL;
216     for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
217         struct vhost_memory_region *reg = dev->mem->regions + to;
218         uint64_t prlast, urlast;
219         uint64_t pmlast, umlast;
220         uint64_t s, e, u;
221 
222         /* clone old region */
223         if (to != from) {
224             memcpy(reg, dev->mem->regions + from, sizeof *reg);
225         }
226         prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
227         pmlast = range_get_last(start_addr, size);
228         urlast = range_get_last(reg->userspace_addr, reg->memory_size);
229         umlast = range_get_last(uaddr, size);
230 
231         /* check for overlapping regions: should never happen. */
232         assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
233         /* Not an adjacent or overlapping region - do not merge. */
234         if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
235             (pmlast + 1 != reg->guest_phys_addr ||
236              umlast + 1 != reg->userspace_addr)) {
237             continue;
238         }
239 
240         if (merged) {
241             --to;
242             assert(to >= 0);
243         } else {
244             merged = reg;
245         }
246         u = MIN(uaddr, reg->userspace_addr);
247         s = MIN(start_addr, reg->guest_phys_addr);
248         e = MAX(pmlast, prlast);
249         uaddr = merged->userspace_addr = u;
250         start_addr = merged->guest_phys_addr = s;
251         size = merged->memory_size = e - s + 1;
252         assert(merged->memory_size);
253     }
254 
255     if (!merged) {
256         struct vhost_memory_region *reg = dev->mem->regions + to;
257         memset(reg, 0, sizeof *reg);
258         reg->memory_size = size;
259         assert(reg->memory_size);
260         reg->guest_phys_addr = start_addr;
261         reg->userspace_addr = uaddr;
262         ++to;
263     }
264     assert(to <= dev->mem->nregions + 1);
265     dev->mem->nregions = to;
266 }
267 
268 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
269 {
270     uint64_t log_size = 0;
271     int i;
272     for (i = 0; i < dev->mem->nregions; ++i) {
273         struct vhost_memory_region *reg = dev->mem->regions + i;
274         uint64_t last = range_get_last(reg->guest_phys_addr,
275                                        reg->memory_size);
276         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
277     }
278     for (i = 0; i < dev->nvqs; ++i) {
279         struct vhost_virtqueue *vq = dev->vqs + i;
280         uint64_t last = vq->used_phys + vq->used_size - 1;
281         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
282     }
283     return log_size;
284 }
285 
286 static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size)
287 {
288     vhost_log_chunk_t *log;
289     uint64_t log_base;
290     int r;
291 
292     log = g_malloc0(size * sizeof *log);
293     log_base = (uint64_t)(unsigned long)log;
294     r = ioctl(dev->control, VHOST_SET_LOG_BASE, &log_base);
295     assert(r >= 0);
296     /* Sync only the range covered by the old log */
297     if (dev->log_size) {
298         vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
299     }
300     if (dev->log) {
301         g_free(dev->log);
302     }
303     dev->log = log;
304     dev->log_size = size;
305 }
306 
307 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
308                                       uint64_t start_addr,
309                                       uint64_t size)
310 {
311     int i;
312     for (i = 0; i < dev->nvqs; ++i) {
313         struct vhost_virtqueue *vq = dev->vqs + i;
314         hwaddr l;
315         void *p;
316 
317         if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
318             continue;
319         }
320         l = vq->ring_size;
321         p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
322         if (!p || l != vq->ring_size) {
323             fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
324             return -ENOMEM;
325         }
326         if (p != vq->ring) {
327             fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
328             return -EBUSY;
329         }
330         cpu_physical_memory_unmap(p, l, 0, 0);
331     }
332     return 0;
333 }
334 
335 static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
336 						      uint64_t start_addr,
337 						      uint64_t size)
338 {
339     int i, n = dev->mem->nregions;
340     for (i = 0; i < n; ++i) {
341         struct vhost_memory_region *reg = dev->mem->regions + i;
342         if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
343                            start_addr, size)) {
344             return reg;
345         }
346     }
347     return NULL;
348 }
349 
350 static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
351                                  uint64_t start_addr,
352                                  uint64_t size,
353                                  uint64_t uaddr)
354 {
355     struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
356     uint64_t reglast;
357     uint64_t memlast;
358 
359     if (!reg) {
360         return true;
361     }
362 
363     reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
364     memlast = range_get_last(start_addr, size);
365 
366     /* Need to extend region? */
367     if (start_addr < reg->guest_phys_addr || memlast > reglast) {
368         return true;
369     }
370     /* userspace_addr changed? */
371     return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
372 }
373 
374 static void vhost_set_memory(MemoryListener *listener,
375                              MemoryRegionSection *section,
376                              bool add)
377 {
378     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
379                                          memory_listener);
380     hwaddr start_addr = section->offset_within_address_space;
381     ram_addr_t size = section->size;
382     bool log_dirty = memory_region_is_logging(section->mr);
383     int s = offsetof(struct vhost_memory, regions) +
384         (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
385     uint64_t log_size;
386     int r;
387     void *ram;
388 
389     dev->mem = g_realloc(dev->mem, s);
390 
391     if (log_dirty) {
392         add = false;
393     }
394 
395     assert(size);
396 
397     /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
398     ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
399     if (add) {
400         if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
401             /* Region exists with same address. Nothing to do. */
402             return;
403         }
404     } else {
405         if (!vhost_dev_find_reg(dev, start_addr, size)) {
406             /* Removing region that we don't access. Nothing to do. */
407             return;
408         }
409     }
410 
411     vhost_dev_unassign_memory(dev, start_addr, size);
412     if (add) {
413         /* Add given mapping, merging adjacent regions if any */
414         vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
415     } else {
416         /* Remove old mapping for this memory, if any. */
417         vhost_dev_unassign_memory(dev, start_addr, size);
418     }
419 
420     if (!dev->started) {
421         return;
422     }
423 
424     if (dev->started) {
425         r = vhost_verify_ring_mappings(dev, start_addr, size);
426         assert(r >= 0);
427     }
428 
429     if (!dev->log_enabled) {
430         r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
431         assert(r >= 0);
432         return;
433     }
434     log_size = vhost_get_log_size(dev);
435     /* We allocate an extra 4K bytes to log,
436      * to reduce the * number of reallocations. */
437 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
438     /* To log more, must increase log size before table update. */
439     if (dev->log_size < log_size) {
440         vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
441     }
442     r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
443     assert(r >= 0);
444     /* To log less, can only decrease log size after table update. */
445     if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
446         vhost_dev_log_resize(dev, log_size);
447     }
448 }
449 
450 static bool vhost_section(MemoryRegionSection *section)
451 {
452     return memory_region_is_ram(section->mr);
453 }
454 
455 static void vhost_begin(MemoryListener *listener)
456 {
457 }
458 
459 static void vhost_commit(MemoryListener *listener)
460 {
461 }
462 
463 static void vhost_region_add(MemoryListener *listener,
464                              MemoryRegionSection *section)
465 {
466     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
467                                          memory_listener);
468 
469     if (!vhost_section(section)) {
470         return;
471     }
472 
473     ++dev->n_mem_sections;
474     dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
475                                 dev->n_mem_sections);
476     dev->mem_sections[dev->n_mem_sections - 1] = *section;
477     vhost_set_memory(listener, section, true);
478 }
479 
480 static void vhost_region_del(MemoryListener *listener,
481                              MemoryRegionSection *section)
482 {
483     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
484                                          memory_listener);
485     int i;
486 
487     if (!vhost_section(section)) {
488         return;
489     }
490 
491     vhost_set_memory(listener, section, false);
492     for (i = 0; i < dev->n_mem_sections; ++i) {
493         if (dev->mem_sections[i].offset_within_address_space
494             == section->offset_within_address_space) {
495             --dev->n_mem_sections;
496             memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
497                     (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
498             break;
499         }
500     }
501 }
502 
503 static void vhost_region_nop(MemoryListener *listener,
504                              MemoryRegionSection *section)
505 {
506 }
507 
508 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
509                                     struct vhost_virtqueue *vq,
510                                     unsigned idx, bool enable_log)
511 {
512     struct vhost_vring_addr addr = {
513         .index = idx,
514         .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
515         .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
516         .used_user_addr = (uint64_t)(unsigned long)vq->used,
517         .log_guest_addr = vq->used_phys,
518         .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
519     };
520     int r = ioctl(dev->control, VHOST_SET_VRING_ADDR, &addr);
521     if (r < 0) {
522         return -errno;
523     }
524     return 0;
525 }
526 
527 static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
528 {
529     uint64_t features = dev->acked_features;
530     int r;
531     if (enable_log) {
532         features |= 0x1 << VHOST_F_LOG_ALL;
533     }
534     r = ioctl(dev->control, VHOST_SET_FEATURES, &features);
535     return r < 0 ? -errno : 0;
536 }
537 
538 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
539 {
540     int r, t, i;
541     r = vhost_dev_set_features(dev, enable_log);
542     if (r < 0) {
543         goto err_features;
544     }
545     for (i = 0; i < dev->nvqs; ++i) {
546         r = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
547                                      enable_log);
548         if (r < 0) {
549             goto err_vq;
550         }
551     }
552     return 0;
553 err_vq:
554     for (; i >= 0; --i) {
555         t = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
556                                      dev->log_enabled);
557         assert(t >= 0);
558     }
559     t = vhost_dev_set_features(dev, dev->log_enabled);
560     assert(t >= 0);
561 err_features:
562     return r;
563 }
564 
565 static int vhost_migration_log(MemoryListener *listener, int enable)
566 {
567     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
568                                          memory_listener);
569     int r;
570     if (!!enable == dev->log_enabled) {
571         return 0;
572     }
573     if (!dev->started) {
574         dev->log_enabled = enable;
575         return 0;
576     }
577     if (!enable) {
578         r = vhost_dev_set_log(dev, false);
579         if (r < 0) {
580             return r;
581         }
582         if (dev->log) {
583             g_free(dev->log);
584         }
585         dev->log = NULL;
586         dev->log_size = 0;
587     } else {
588         vhost_dev_log_resize(dev, vhost_get_log_size(dev));
589         r = vhost_dev_set_log(dev, true);
590         if (r < 0) {
591             return r;
592         }
593     }
594     dev->log_enabled = enable;
595     return 0;
596 }
597 
598 static void vhost_log_global_start(MemoryListener *listener)
599 {
600     int r;
601 
602     r = vhost_migration_log(listener, true);
603     if (r < 0) {
604         abort();
605     }
606 }
607 
608 static void vhost_log_global_stop(MemoryListener *listener)
609 {
610     int r;
611 
612     r = vhost_migration_log(listener, false);
613     if (r < 0) {
614         abort();
615     }
616 }
617 
618 static void vhost_log_start(MemoryListener *listener,
619                             MemoryRegionSection *section)
620 {
621     /* FIXME: implement */
622 }
623 
624 static void vhost_log_stop(MemoryListener *listener,
625                            MemoryRegionSection *section)
626 {
627     /* FIXME: implement */
628 }
629 
630 static int vhost_virtqueue_start(struct vhost_dev *dev,
631                                 struct VirtIODevice *vdev,
632                                 struct vhost_virtqueue *vq,
633                                 unsigned idx)
634 {
635     hwaddr s, l, a;
636     int r;
637     int vhost_vq_index = idx - dev->vq_index;
638     struct vhost_vring_file file = {
639         .index = vhost_vq_index
640     };
641     struct vhost_vring_state state = {
642         .index = vhost_vq_index
643     };
644     struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
645 
646     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
647 
648     vq->num = state.num = virtio_queue_get_num(vdev, idx);
649     r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
650     if (r) {
651         return -errno;
652     }
653 
654     state.num = virtio_queue_get_last_avail_idx(vdev, idx);
655     r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state);
656     if (r) {
657         return -errno;
658     }
659 
660     s = l = virtio_queue_get_desc_size(vdev, idx);
661     a = virtio_queue_get_desc_addr(vdev, idx);
662     vq->desc = cpu_physical_memory_map(a, &l, 0);
663     if (!vq->desc || l != s) {
664         r = -ENOMEM;
665         goto fail_alloc_desc;
666     }
667     s = l = virtio_queue_get_avail_size(vdev, idx);
668     a = virtio_queue_get_avail_addr(vdev, idx);
669     vq->avail = cpu_physical_memory_map(a, &l, 0);
670     if (!vq->avail || l != s) {
671         r = -ENOMEM;
672         goto fail_alloc_avail;
673     }
674     vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
675     vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
676     vq->used = cpu_physical_memory_map(a, &l, 1);
677     if (!vq->used || l != s) {
678         r = -ENOMEM;
679         goto fail_alloc_used;
680     }
681 
682     vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
683     vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
684     vq->ring = cpu_physical_memory_map(a, &l, 1);
685     if (!vq->ring || l != s) {
686         r = -ENOMEM;
687         goto fail_alloc_ring;
688     }
689 
690     r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
691     if (r < 0) {
692         r = -errno;
693         goto fail_alloc;
694     }
695 
696     file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
697     r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
698     if (r) {
699         r = -errno;
700         goto fail_kick;
701     }
702 
703     /* Clear and discard previous events if any. */
704     event_notifier_test_and_clear(&vq->masked_notifier);
705 
706     return 0;
707 
708 fail_kick:
709 fail_alloc:
710     cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
711                               0, 0);
712 fail_alloc_ring:
713     cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
714                               0, 0);
715 fail_alloc_used:
716     cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
717                               0, 0);
718 fail_alloc_avail:
719     cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
720                               0, 0);
721 fail_alloc_desc:
722     return r;
723 }
724 
725 static void vhost_virtqueue_stop(struct vhost_dev *dev,
726                                     struct VirtIODevice *vdev,
727                                     struct vhost_virtqueue *vq,
728                                     unsigned idx)
729 {
730     struct vhost_vring_state state = {
731         .index = idx - dev->vq_index
732     };
733     int r;
734     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
735     r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state);
736     if (r < 0) {
737         fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
738         fflush(stderr);
739     }
740     virtio_queue_set_last_avail_idx(vdev, idx, state.num);
741     assert (r >= 0);
742     cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
743                               0, virtio_queue_get_ring_size(vdev, idx));
744     cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
745                               1, virtio_queue_get_used_size(vdev, idx));
746     cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
747                               0, virtio_queue_get_avail_size(vdev, idx));
748     cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
749                               0, virtio_queue_get_desc_size(vdev, idx));
750 }
751 
752 static void vhost_eventfd_add(MemoryListener *listener,
753                               MemoryRegionSection *section,
754                               bool match_data, uint64_t data, EventNotifier *e)
755 {
756 }
757 
758 static void vhost_eventfd_del(MemoryListener *listener,
759                               MemoryRegionSection *section,
760                               bool match_data, uint64_t data, EventNotifier *e)
761 {
762 }
763 
764 static int vhost_virtqueue_init(struct vhost_dev *dev,
765                                 struct vhost_virtqueue *vq, int n)
766 {
767     struct vhost_vring_file file = {
768         .index = n,
769     };
770     int r = event_notifier_init(&vq->masked_notifier, 0);
771     if (r < 0) {
772         return r;
773     }
774 
775     file.fd = event_notifier_get_fd(&vq->masked_notifier);
776     r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file);
777     if (r) {
778         r = -errno;
779         goto fail_call;
780     }
781     return 0;
782 fail_call:
783     event_notifier_cleanup(&vq->masked_notifier);
784     return r;
785 }
786 
787 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
788 {
789     event_notifier_cleanup(&vq->masked_notifier);
790 }
791 
792 int vhost_dev_init(struct vhost_dev *hdev, int devfd, const char *devpath,
793                    bool force)
794 {
795     uint64_t features;
796     int i, r;
797     if (devfd >= 0) {
798         hdev->control = devfd;
799     } else {
800         hdev->control = open(devpath, O_RDWR);
801         if (hdev->control < 0) {
802             return -errno;
803         }
804     }
805     r = ioctl(hdev->control, VHOST_SET_OWNER, NULL);
806     if (r < 0) {
807         goto fail;
808     }
809 
810     r = ioctl(hdev->control, VHOST_GET_FEATURES, &features);
811     if (r < 0) {
812         goto fail;
813     }
814 
815     for (i = 0; i < hdev->nvqs; ++i) {
816         r = vhost_virtqueue_init(hdev, hdev->vqs + i, i);
817         if (r < 0) {
818             goto fail_vq;
819         }
820     }
821     hdev->features = features;
822 
823     hdev->memory_listener = (MemoryListener) {
824         .begin = vhost_begin,
825         .commit = vhost_commit,
826         .region_add = vhost_region_add,
827         .region_del = vhost_region_del,
828         .region_nop = vhost_region_nop,
829         .log_start = vhost_log_start,
830         .log_stop = vhost_log_stop,
831         .log_sync = vhost_log_sync,
832         .log_global_start = vhost_log_global_start,
833         .log_global_stop = vhost_log_global_stop,
834         .eventfd_add = vhost_eventfd_add,
835         .eventfd_del = vhost_eventfd_del,
836         .priority = 10
837     };
838     hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
839     hdev->n_mem_sections = 0;
840     hdev->mem_sections = NULL;
841     hdev->log = NULL;
842     hdev->log_size = 0;
843     hdev->log_enabled = false;
844     hdev->started = false;
845     memory_listener_register(&hdev->memory_listener, &address_space_memory);
846     hdev->force = force;
847     return 0;
848 fail_vq:
849     while (--i >= 0) {
850         vhost_virtqueue_cleanup(hdev->vqs + i);
851     }
852 fail:
853     r = -errno;
854     close(hdev->control);
855     return r;
856 }
857 
858 void vhost_dev_cleanup(struct vhost_dev *hdev)
859 {
860     int i;
861     for (i = 0; i < hdev->nvqs; ++i) {
862         vhost_virtqueue_cleanup(hdev->vqs + i);
863     }
864     memory_listener_unregister(&hdev->memory_listener);
865     g_free(hdev->mem);
866     g_free(hdev->mem_sections);
867     close(hdev->control);
868 }
869 
870 bool vhost_dev_query(struct vhost_dev *hdev, VirtIODevice *vdev)
871 {
872     return !vdev->binding->query_guest_notifiers ||
873         vdev->binding->query_guest_notifiers(vdev->binding_opaque) ||
874         hdev->force;
875 }
876 
877 /* Stop processing guest IO notifications in qemu.
878  * Start processing them in vhost in kernel.
879  */
880 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
881 {
882     int i, r;
883     if (!vdev->binding->set_host_notifier) {
884         fprintf(stderr, "binding does not support host notifiers\n");
885         r = -ENOSYS;
886         goto fail;
887     }
888 
889     for (i = 0; i < hdev->nvqs; ++i) {
890         r = vdev->binding->set_host_notifier(vdev->binding_opaque,
891                                              hdev->vq_index + i,
892                                              true);
893         if (r < 0) {
894             fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
895             goto fail_vq;
896         }
897     }
898 
899     return 0;
900 fail_vq:
901     while (--i >= 0) {
902         r = vdev->binding->set_host_notifier(vdev->binding_opaque,
903                                              hdev->vq_index + i,
904                                              false);
905         if (r < 0) {
906             fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
907             fflush(stderr);
908         }
909         assert (r >= 0);
910     }
911 fail:
912     return r;
913 }
914 
915 /* Stop processing guest IO notifications in vhost.
916  * Start processing them in qemu.
917  * This might actually run the qemu handlers right away,
918  * so virtio in qemu must be completely setup when this is called.
919  */
920 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
921 {
922     int i, r;
923 
924     for (i = 0; i < hdev->nvqs; ++i) {
925         r = vdev->binding->set_host_notifier(vdev->binding_opaque,
926                                              hdev->vq_index + i,
927                                              false);
928         if (r < 0) {
929             fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
930             fflush(stderr);
931         }
932         assert (r >= 0);
933     }
934 }
935 
936 /* Test and clear event pending status.
937  * Should be called after unmask to avoid losing events.
938  */
939 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
940 {
941     struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
942     assert(hdev->started);
943     assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
944     return event_notifier_test_and_clear(&vq->masked_notifier);
945 }
946 
947 /* Mask/unmask events from this vq. */
948 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
949                          bool mask)
950 {
951     struct VirtQueue *vvq = virtio_get_queue(vdev, n);
952     int r, index = n - hdev->vq_index;
953 
954     assert(hdev->started);
955     assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
956 
957     struct vhost_vring_file file = {
958         .index = index
959     };
960     if (mask) {
961         file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
962     } else {
963         file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
964     }
965     r = ioctl(hdev->control, VHOST_SET_VRING_CALL, &file);
966     assert(r >= 0);
967 }
968 
969 /* Host notifiers must be enabled at this point. */
970 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
971 {
972     int i, r;
973 
974     hdev->started = true;
975 
976     r = vhost_dev_set_features(hdev, hdev->log_enabled);
977     if (r < 0) {
978         goto fail_features;
979     }
980     r = ioctl(hdev->control, VHOST_SET_MEM_TABLE, hdev->mem);
981     if (r < 0) {
982         r = -errno;
983         goto fail_mem;
984     }
985     for (i = 0; i < hdev->nvqs; ++i) {
986         r = vhost_virtqueue_start(hdev,
987                                   vdev,
988                                   hdev->vqs + i,
989                                   hdev->vq_index + i);
990         if (r < 0) {
991             goto fail_vq;
992         }
993     }
994 
995     if (hdev->log_enabled) {
996         hdev->log_size = vhost_get_log_size(hdev);
997         hdev->log = hdev->log_size ?
998             g_malloc0(hdev->log_size * sizeof *hdev->log) : NULL;
999         r = ioctl(hdev->control, VHOST_SET_LOG_BASE,
1000                   (uint64_t)(unsigned long)hdev->log);
1001         if (r < 0) {
1002             r = -errno;
1003             goto fail_log;
1004         }
1005     }
1006 
1007     return 0;
1008 fail_log:
1009 fail_vq:
1010     while (--i >= 0) {
1011         vhost_virtqueue_stop(hdev,
1012                              vdev,
1013                              hdev->vqs + i,
1014                              hdev->vq_index + i);
1015     }
1016     i = hdev->nvqs;
1017 fail_mem:
1018 fail_features:
1019 
1020     hdev->started = false;
1021     return r;
1022 }
1023 
1024 /* Host notifiers must be enabled at this point. */
1025 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1026 {
1027     int i;
1028 
1029     for (i = 0; i < hdev->nvqs; ++i) {
1030         vhost_virtqueue_stop(hdev,
1031                              vdev,
1032                              hdev->vqs + i,
1033                              hdev->vq_index + i);
1034     }
1035     vhost_log_sync_range(hdev, 0, ~0x0ull);
1036 
1037     hdev->started = false;
1038     g_free(hdev->log);
1039     hdev->log = NULL;
1040     hdev->log_size = 0;
1041 }
1042 
1043