xref: /openbmc/qemu/hw/virtio/virtio.c (revision 500eb6db)
1 /*
2  * Virtio Support
3  *
4  * Copyright IBM, Corp. 2007
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "cpu.h"
17 #include "trace.h"
18 #include "exec/address-spaces.h"
19 #include "qemu/error-report.h"
20 #include "qemu/module.h"
21 #include "hw/virtio/virtio.h"
22 #include "qemu/atomic.h"
23 #include "hw/virtio/virtio-bus.h"
24 #include "hw/virtio/virtio-access.h"
25 #include "sysemu/dma.h"
26 
27 /*
28  * The alignment to use between consumer and producer parts of vring.
29  * x86 pagesize again. This is the default, used by transports like PCI
30  * which don't provide a means for the guest to tell the host the alignment.
31  */
32 #define VIRTIO_PCI_VRING_ALIGN         4096
33 
34 typedef struct VRingDesc
35 {
36     uint64_t addr;
37     uint32_t len;
38     uint16_t flags;
39     uint16_t next;
40 } VRingDesc;
41 
42 typedef struct VRingAvail
43 {
44     uint16_t flags;
45     uint16_t idx;
46     uint16_t ring[0];
47 } VRingAvail;
48 
49 typedef struct VRingUsedElem
50 {
51     uint32_t id;
52     uint32_t len;
53 } VRingUsedElem;
54 
55 typedef struct VRingUsed
56 {
57     uint16_t flags;
58     uint16_t idx;
59     VRingUsedElem ring[0];
60 } VRingUsed;
61 
62 typedef struct VRingMemoryRegionCaches {
63     struct rcu_head rcu;
64     MemoryRegionCache desc;
65     MemoryRegionCache avail;
66     MemoryRegionCache used;
67 } VRingMemoryRegionCaches;
68 
69 typedef struct VRing
70 {
71     unsigned int num;
72     unsigned int num_default;
73     unsigned int align;
74     hwaddr desc;
75     hwaddr avail;
76     hwaddr used;
77     VRingMemoryRegionCaches *caches;
78 } VRing;
79 
80 struct VirtQueue
81 {
82     VRing vring;
83 
84     /* Next head to pop */
85     uint16_t last_avail_idx;
86 
87     /* Last avail_idx read from VQ. */
88     uint16_t shadow_avail_idx;
89 
90     uint16_t used_idx;
91 
92     /* Last used index value we have signalled on */
93     uint16_t signalled_used;
94 
95     /* Last used index value we have signalled on */
96     bool signalled_used_valid;
97 
98     /* Notification enabled? */
99     bool notification;
100 
101     uint16_t queue_index;
102 
103     unsigned int inuse;
104 
105     uint16_t vector;
106     VirtIOHandleOutput handle_output;
107     VirtIOHandleAIOOutput handle_aio_output;
108     VirtIODevice *vdev;
109     EventNotifier guest_notifier;
110     EventNotifier host_notifier;
111     QLIST_ENTRY(VirtQueue) node;
112 };
113 
114 static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
115 {
116     if (!caches) {
117         return;
118     }
119 
120     address_space_cache_destroy(&caches->desc);
121     address_space_cache_destroy(&caches->avail);
122     address_space_cache_destroy(&caches->used);
123     g_free(caches);
124 }
125 
126 static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
127 {
128     VRingMemoryRegionCaches *caches;
129 
130     caches = atomic_read(&vq->vring.caches);
131     atomic_rcu_set(&vq->vring.caches, NULL);
132     if (caches) {
133         call_rcu(caches, virtio_free_region_cache, rcu);
134     }
135 }
136 
137 static void virtio_init_region_cache(VirtIODevice *vdev, int n)
138 {
139     VirtQueue *vq = &vdev->vq[n];
140     VRingMemoryRegionCaches *old = vq->vring.caches;
141     VRingMemoryRegionCaches *new = NULL;
142     hwaddr addr, size;
143     int event_size;
144     int64_t len;
145 
146     event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
147 
148     addr = vq->vring.desc;
149     if (!addr) {
150         goto out_no_cache;
151     }
152     new = g_new0(VRingMemoryRegionCaches, 1);
153     size = virtio_queue_get_desc_size(vdev, n);
154     len = address_space_cache_init(&new->desc, vdev->dma_as,
155                                    addr, size, false);
156     if (len < size) {
157         virtio_error(vdev, "Cannot map desc");
158         goto err_desc;
159     }
160 
161     size = virtio_queue_get_used_size(vdev, n) + event_size;
162     len = address_space_cache_init(&new->used, vdev->dma_as,
163                                    vq->vring.used, size, true);
164     if (len < size) {
165         virtio_error(vdev, "Cannot map used");
166         goto err_used;
167     }
168 
169     size = virtio_queue_get_avail_size(vdev, n) + event_size;
170     len = address_space_cache_init(&new->avail, vdev->dma_as,
171                                    vq->vring.avail, size, false);
172     if (len < size) {
173         virtio_error(vdev, "Cannot map avail");
174         goto err_avail;
175     }
176 
177     atomic_rcu_set(&vq->vring.caches, new);
178     if (old) {
179         call_rcu(old, virtio_free_region_cache, rcu);
180     }
181     return;
182 
183 err_avail:
184     address_space_cache_destroy(&new->avail);
185 err_used:
186     address_space_cache_destroy(&new->used);
187 err_desc:
188     address_space_cache_destroy(&new->desc);
189 out_no_cache:
190     g_free(new);
191     virtio_virtqueue_reset_region_cache(vq);
192 }
193 
194 /* virt queue functions */
195 void virtio_queue_update_rings(VirtIODevice *vdev, int n)
196 {
197     VRing *vring = &vdev->vq[n].vring;
198 
199     if (!vring->num || !vring->desc || !vring->align) {
200         /* not yet setup -> nothing to do */
201         return;
202     }
203     vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
204     vring->used = vring_align(vring->avail +
205                               offsetof(VRingAvail, ring[vring->num]),
206                               vring->align);
207     virtio_init_region_cache(vdev, n);
208 }
209 
210 /* Called within rcu_read_lock().  */
211 static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
212                             MemoryRegionCache *cache, int i)
213 {
214     address_space_read_cached(cache, i * sizeof(VRingDesc),
215                               desc, sizeof(VRingDesc));
216     virtio_tswap64s(vdev, &desc->addr);
217     virtio_tswap32s(vdev, &desc->len);
218     virtio_tswap16s(vdev, &desc->flags);
219     virtio_tswap16s(vdev, &desc->next);
220 }
221 
222 static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
223 {
224     VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
225     assert(caches != NULL);
226     return caches;
227 }
228 /* Called within rcu_read_lock().  */
229 static inline uint16_t vring_avail_flags(VirtQueue *vq)
230 {
231     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
232     hwaddr pa = offsetof(VRingAvail, flags);
233     return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
234 }
235 
236 /* Called within rcu_read_lock().  */
237 static inline uint16_t vring_avail_idx(VirtQueue *vq)
238 {
239     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
240     hwaddr pa = offsetof(VRingAvail, idx);
241     vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
242     return vq->shadow_avail_idx;
243 }
244 
245 /* Called within rcu_read_lock().  */
246 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
247 {
248     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
249     hwaddr pa = offsetof(VRingAvail, ring[i]);
250     return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
251 }
252 
253 /* Called within rcu_read_lock().  */
254 static inline uint16_t vring_get_used_event(VirtQueue *vq)
255 {
256     return vring_avail_ring(vq, vq->vring.num);
257 }
258 
259 /* Called within rcu_read_lock().  */
260 static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
261                                     int i)
262 {
263     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
264     hwaddr pa = offsetof(VRingUsed, ring[i]);
265     virtio_tswap32s(vq->vdev, &uelem->id);
266     virtio_tswap32s(vq->vdev, &uelem->len);
267     address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
268     address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
269 }
270 
271 /* Called within rcu_read_lock().  */
272 static uint16_t vring_used_idx(VirtQueue *vq)
273 {
274     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
275     hwaddr pa = offsetof(VRingUsed, idx);
276     return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
277 }
278 
279 /* Called within rcu_read_lock().  */
280 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
281 {
282     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
283     hwaddr pa = offsetof(VRingUsed, idx);
284     virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
285     address_space_cache_invalidate(&caches->used, pa, sizeof(val));
286     vq->used_idx = val;
287 }
288 
289 /* Called within rcu_read_lock().  */
290 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
291 {
292     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
293     VirtIODevice *vdev = vq->vdev;
294     hwaddr pa = offsetof(VRingUsed, flags);
295     uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
296 
297     virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
298     address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
299 }
300 
301 /* Called within rcu_read_lock().  */
302 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
303 {
304     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
305     VirtIODevice *vdev = vq->vdev;
306     hwaddr pa = offsetof(VRingUsed, flags);
307     uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
308 
309     virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
310     address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
311 }
312 
313 /* Called within rcu_read_lock().  */
314 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
315 {
316     VRingMemoryRegionCaches *caches;
317     hwaddr pa;
318     if (!vq->notification) {
319         return;
320     }
321 
322     caches = vring_get_region_caches(vq);
323     pa = offsetof(VRingUsed, ring[vq->vring.num]);
324     virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
325     address_space_cache_invalidate(&caches->used, pa, sizeof(val));
326 }
327 
328 void virtio_queue_set_notification(VirtQueue *vq, int enable)
329 {
330     vq->notification = enable;
331 
332     if (!vq->vring.desc) {
333         return;
334     }
335 
336     rcu_read_lock();
337     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
338         vring_set_avail_event(vq, vring_avail_idx(vq));
339     } else if (enable) {
340         vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
341     } else {
342         vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
343     }
344     if (enable) {
345         /* Expose avail event/used flags before caller checks the avail idx. */
346         smp_mb();
347     }
348     rcu_read_unlock();
349 }
350 
351 int virtio_queue_ready(VirtQueue *vq)
352 {
353     return vq->vring.avail != 0;
354 }
355 
356 /* Fetch avail_idx from VQ memory only when we really need to know if
357  * guest has added some buffers.
358  * Called within rcu_read_lock().  */
359 static int virtio_queue_empty_rcu(VirtQueue *vq)
360 {
361     if (unlikely(vq->vdev->broken)) {
362         return 1;
363     }
364 
365     if (unlikely(!vq->vring.avail)) {
366         return 1;
367     }
368 
369     if (vq->shadow_avail_idx != vq->last_avail_idx) {
370         return 0;
371     }
372 
373     return vring_avail_idx(vq) == vq->last_avail_idx;
374 }
375 
376 int virtio_queue_empty(VirtQueue *vq)
377 {
378     bool empty;
379 
380     if (unlikely(vq->vdev->broken)) {
381         return 1;
382     }
383 
384     if (unlikely(!vq->vring.avail)) {
385         return 1;
386     }
387 
388     if (vq->shadow_avail_idx != vq->last_avail_idx) {
389         return 0;
390     }
391 
392     rcu_read_lock();
393     empty = vring_avail_idx(vq) == vq->last_avail_idx;
394     rcu_read_unlock();
395     return empty;
396 }
397 
398 static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
399                                unsigned int len)
400 {
401     AddressSpace *dma_as = vq->vdev->dma_as;
402     unsigned int offset;
403     int i;
404 
405     offset = 0;
406     for (i = 0; i < elem->in_num; i++) {
407         size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
408 
409         dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
410                          elem->in_sg[i].iov_len,
411                          DMA_DIRECTION_FROM_DEVICE, size);
412 
413         offset += size;
414     }
415 
416     for (i = 0; i < elem->out_num; i++)
417         dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
418                          elem->out_sg[i].iov_len,
419                          DMA_DIRECTION_TO_DEVICE,
420                          elem->out_sg[i].iov_len);
421 }
422 
423 /* virtqueue_detach_element:
424  * @vq: The #VirtQueue
425  * @elem: The #VirtQueueElement
426  * @len: number of bytes written
427  *
428  * Detach the element from the virtqueue.  This function is suitable for device
429  * reset or other situations where a #VirtQueueElement is simply freed and will
430  * not be pushed or discarded.
431  */
432 void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
433                               unsigned int len)
434 {
435     vq->inuse--;
436     virtqueue_unmap_sg(vq, elem, len);
437 }
438 
439 /* virtqueue_unpop:
440  * @vq: The #VirtQueue
441  * @elem: The #VirtQueueElement
442  * @len: number of bytes written
443  *
444  * Pretend the most recent element wasn't popped from the virtqueue.  The next
445  * call to virtqueue_pop() will refetch the element.
446  */
447 void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem,
448                      unsigned int len)
449 {
450     vq->last_avail_idx--;
451     virtqueue_detach_element(vq, elem, len);
452 }
453 
454 /* virtqueue_rewind:
455  * @vq: The #VirtQueue
456  * @num: Number of elements to push back
457  *
458  * Pretend that elements weren't popped from the virtqueue.  The next
459  * virtqueue_pop() will refetch the oldest element.
460  *
461  * Use virtqueue_unpop() instead if you have a VirtQueueElement.
462  *
463  * Returns: true on success, false if @num is greater than the number of in use
464  * elements.
465  */
466 bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
467 {
468     if (num > vq->inuse) {
469         return false;
470     }
471     vq->last_avail_idx -= num;
472     vq->inuse -= num;
473     return true;
474 }
475 
476 /* Called within rcu_read_lock().  */
477 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
478                     unsigned int len, unsigned int idx)
479 {
480     VRingUsedElem uelem;
481 
482     trace_virtqueue_fill(vq, elem, len, idx);
483 
484     virtqueue_unmap_sg(vq, elem, len);
485 
486     if (unlikely(vq->vdev->broken)) {
487         return;
488     }
489 
490     if (unlikely(!vq->vring.used)) {
491         return;
492     }
493 
494     idx = (idx + vq->used_idx) % vq->vring.num;
495 
496     uelem.id = elem->index;
497     uelem.len = len;
498     vring_used_write(vq, &uelem, idx);
499 }
500 
501 /* Called within rcu_read_lock().  */
502 void virtqueue_flush(VirtQueue *vq, unsigned int count)
503 {
504     uint16_t old, new;
505 
506     if (unlikely(vq->vdev->broken)) {
507         vq->inuse -= count;
508         return;
509     }
510 
511     if (unlikely(!vq->vring.used)) {
512         return;
513     }
514 
515     /* Make sure buffer is written before we update index. */
516     smp_wmb();
517     trace_virtqueue_flush(vq, count);
518     old = vq->used_idx;
519     new = old + count;
520     vring_used_idx_set(vq, new);
521     vq->inuse -= count;
522     if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
523         vq->signalled_used_valid = false;
524 }
525 
526 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
527                     unsigned int len)
528 {
529     rcu_read_lock();
530     virtqueue_fill(vq, elem, len, 0);
531     virtqueue_flush(vq, 1);
532     rcu_read_unlock();
533 }
534 
535 /* Called within rcu_read_lock().  */
536 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
537 {
538     uint16_t num_heads = vring_avail_idx(vq) - idx;
539 
540     /* Check it isn't doing very strange things with descriptor numbers. */
541     if (num_heads > vq->vring.num) {
542         virtio_error(vq->vdev, "Guest moved used index from %u to %u",
543                      idx, vq->shadow_avail_idx);
544         return -EINVAL;
545     }
546     /* On success, callers read a descriptor at vq->last_avail_idx.
547      * Make sure descriptor read does not bypass avail index read. */
548     if (num_heads) {
549         smp_rmb();
550     }
551 
552     return num_heads;
553 }
554 
555 /* Called within rcu_read_lock().  */
556 static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
557                                unsigned int *head)
558 {
559     /* Grab the next descriptor number they're advertising, and increment
560      * the index we've seen. */
561     *head = vring_avail_ring(vq, idx % vq->vring.num);
562 
563     /* If their number is silly, that's a fatal mistake. */
564     if (*head >= vq->vring.num) {
565         virtio_error(vq->vdev, "Guest says index %u is available", *head);
566         return false;
567     }
568 
569     return true;
570 }
571 
572 enum {
573     VIRTQUEUE_READ_DESC_ERROR = -1,
574     VIRTQUEUE_READ_DESC_DONE = 0,   /* end of chain */
575     VIRTQUEUE_READ_DESC_MORE = 1,   /* more buffers in chain */
576 };
577 
578 static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
579                                     MemoryRegionCache *desc_cache, unsigned int max,
580                                     unsigned int *next)
581 {
582     /* If this descriptor says it doesn't chain, we're done. */
583     if (!(desc->flags & VRING_DESC_F_NEXT)) {
584         return VIRTQUEUE_READ_DESC_DONE;
585     }
586 
587     /* Check they're not leading us off end of descriptors. */
588     *next = desc->next;
589     /* Make sure compiler knows to grab that: we don't want it changing! */
590     smp_wmb();
591 
592     if (*next >= max) {
593         virtio_error(vdev, "Desc next is %u", *next);
594         return VIRTQUEUE_READ_DESC_ERROR;
595     }
596 
597     vring_desc_read(vdev, desc, desc_cache, *next);
598     return VIRTQUEUE_READ_DESC_MORE;
599 }
600 
601 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
602                                unsigned int *out_bytes,
603                                unsigned max_in_bytes, unsigned max_out_bytes)
604 {
605     VirtIODevice *vdev = vq->vdev;
606     unsigned int max, idx;
607     unsigned int total_bufs, in_total, out_total;
608     VRingMemoryRegionCaches *caches;
609     MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
610     int64_t len = 0;
611     int rc;
612 
613     if (unlikely(!vq->vring.desc)) {
614         if (in_bytes) {
615             *in_bytes = 0;
616         }
617         if (out_bytes) {
618             *out_bytes = 0;
619         }
620         return;
621     }
622 
623     rcu_read_lock();
624     idx = vq->last_avail_idx;
625     total_bufs = in_total = out_total = 0;
626 
627     max = vq->vring.num;
628     caches = vring_get_region_caches(vq);
629     if (caches->desc.len < max * sizeof(VRingDesc)) {
630         virtio_error(vdev, "Cannot map descriptor ring");
631         goto err;
632     }
633 
634     while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
635         MemoryRegionCache *desc_cache = &caches->desc;
636         unsigned int num_bufs;
637         VRingDesc desc;
638         unsigned int i;
639 
640         num_bufs = total_bufs;
641 
642         if (!virtqueue_get_head(vq, idx++, &i)) {
643             goto err;
644         }
645 
646         vring_desc_read(vdev, &desc, desc_cache, i);
647 
648         if (desc.flags & VRING_DESC_F_INDIRECT) {
649             if (!desc.len || (desc.len % sizeof(VRingDesc))) {
650                 virtio_error(vdev, "Invalid size for indirect buffer table");
651                 goto err;
652             }
653 
654             /* If we've got too many, that implies a descriptor loop. */
655             if (num_bufs >= max) {
656                 virtio_error(vdev, "Looped descriptor");
657                 goto err;
658             }
659 
660             /* loop over the indirect descriptor table */
661             len = address_space_cache_init(&indirect_desc_cache,
662                                            vdev->dma_as,
663                                            desc.addr, desc.len, false);
664             desc_cache = &indirect_desc_cache;
665             if (len < desc.len) {
666                 virtio_error(vdev, "Cannot map indirect buffer");
667                 goto err;
668             }
669 
670             max = desc.len / sizeof(VRingDesc);
671             num_bufs = i = 0;
672             vring_desc_read(vdev, &desc, desc_cache, i);
673         }
674 
675         do {
676             /* If we've got too many, that implies a descriptor loop. */
677             if (++num_bufs > max) {
678                 virtio_error(vdev, "Looped descriptor");
679                 goto err;
680             }
681 
682             if (desc.flags & VRING_DESC_F_WRITE) {
683                 in_total += desc.len;
684             } else {
685                 out_total += desc.len;
686             }
687             if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
688                 goto done;
689             }
690 
691             rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i);
692         } while (rc == VIRTQUEUE_READ_DESC_MORE);
693 
694         if (rc == VIRTQUEUE_READ_DESC_ERROR) {
695             goto err;
696         }
697 
698         if (desc_cache == &indirect_desc_cache) {
699             address_space_cache_destroy(&indirect_desc_cache);
700             total_bufs++;
701         } else {
702             total_bufs = num_bufs;
703         }
704     }
705 
706     if (rc < 0) {
707         goto err;
708     }
709 
710 done:
711     address_space_cache_destroy(&indirect_desc_cache);
712     if (in_bytes) {
713         *in_bytes = in_total;
714     }
715     if (out_bytes) {
716         *out_bytes = out_total;
717     }
718     rcu_read_unlock();
719     return;
720 
721 err:
722     in_total = out_total = 0;
723     goto done;
724 }
725 
726 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
727                           unsigned int out_bytes)
728 {
729     unsigned int in_total, out_total;
730 
731     virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
732     return in_bytes <= in_total && out_bytes <= out_total;
733 }
734 
735 static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
736                                hwaddr *addr, struct iovec *iov,
737                                unsigned int max_num_sg, bool is_write,
738                                hwaddr pa, size_t sz)
739 {
740     bool ok = false;
741     unsigned num_sg = *p_num_sg;
742     assert(num_sg <= max_num_sg);
743 
744     if (!sz) {
745         virtio_error(vdev, "virtio: zero sized buffers are not allowed");
746         goto out;
747     }
748 
749     while (sz) {
750         hwaddr len = sz;
751 
752         if (num_sg == max_num_sg) {
753             virtio_error(vdev, "virtio: too many write descriptors in "
754                                "indirect table");
755             goto out;
756         }
757 
758         iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
759                                               is_write ?
760                                               DMA_DIRECTION_FROM_DEVICE :
761                                               DMA_DIRECTION_TO_DEVICE);
762         if (!iov[num_sg].iov_base) {
763             virtio_error(vdev, "virtio: bogus descriptor or out of resources");
764             goto out;
765         }
766 
767         iov[num_sg].iov_len = len;
768         addr[num_sg] = pa;
769 
770         sz -= len;
771         pa += len;
772         num_sg++;
773     }
774     ok = true;
775 
776 out:
777     *p_num_sg = num_sg;
778     return ok;
779 }
780 
781 /* Only used by error code paths before we have a VirtQueueElement (therefore
782  * virtqueue_unmap_sg() can't be used).  Assumes buffers weren't written to
783  * yet.
784  */
785 static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
786                                     struct iovec *iov)
787 {
788     unsigned int i;
789 
790     for (i = 0; i < out_num + in_num; i++) {
791         int is_write = i >= out_num;
792 
793         cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
794         iov++;
795     }
796 }
797 
798 static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
799                                 hwaddr *addr, unsigned int num_sg,
800                                 int is_write)
801 {
802     unsigned int i;
803     hwaddr len;
804 
805     for (i = 0; i < num_sg; i++) {
806         len = sg[i].iov_len;
807         sg[i].iov_base = dma_memory_map(vdev->dma_as,
808                                         addr[i], &len, is_write ?
809                                         DMA_DIRECTION_FROM_DEVICE :
810                                         DMA_DIRECTION_TO_DEVICE);
811         if (!sg[i].iov_base) {
812             error_report("virtio: error trying to map MMIO memory");
813             exit(1);
814         }
815         if (len != sg[i].iov_len) {
816             error_report("virtio: unexpected memory split");
817             exit(1);
818         }
819     }
820 }
821 
822 void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem)
823 {
824     virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, elem->in_num, 1);
825     virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, elem->out_num, 0);
826 }
827 
828 static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
829 {
830     VirtQueueElement *elem;
831     size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
832     size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
833     size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
834     size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
835     size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
836     size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
837 
838     assert(sz >= sizeof(VirtQueueElement));
839     elem = g_malloc(out_sg_end);
840     trace_virtqueue_alloc_element(elem, sz, in_num, out_num);
841     elem->out_num = out_num;
842     elem->in_num = in_num;
843     elem->in_addr = (void *)elem + in_addr_ofs;
844     elem->out_addr = (void *)elem + out_addr_ofs;
845     elem->in_sg = (void *)elem + in_sg_ofs;
846     elem->out_sg = (void *)elem + out_sg_ofs;
847     return elem;
848 }
849 
850 void *virtqueue_pop(VirtQueue *vq, size_t sz)
851 {
852     unsigned int i, head, max;
853     VRingMemoryRegionCaches *caches;
854     MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
855     MemoryRegionCache *desc_cache;
856     int64_t len;
857     VirtIODevice *vdev = vq->vdev;
858     VirtQueueElement *elem = NULL;
859     unsigned out_num, in_num, elem_entries;
860     hwaddr addr[VIRTQUEUE_MAX_SIZE];
861     struct iovec iov[VIRTQUEUE_MAX_SIZE];
862     VRingDesc desc;
863     int rc;
864 
865     if (unlikely(vdev->broken)) {
866         return NULL;
867     }
868     rcu_read_lock();
869     if (virtio_queue_empty_rcu(vq)) {
870         goto done;
871     }
872     /* Needed after virtio_queue_empty(), see comment in
873      * virtqueue_num_heads(). */
874     smp_rmb();
875 
876     /* When we start there are none of either input nor output. */
877     out_num = in_num = elem_entries = 0;
878 
879     max = vq->vring.num;
880 
881     if (vq->inuse >= vq->vring.num) {
882         virtio_error(vdev, "Virtqueue size exceeded");
883         goto done;
884     }
885 
886     if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
887         goto done;
888     }
889 
890     if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
891         vring_set_avail_event(vq, vq->last_avail_idx);
892     }
893 
894     i = head;
895 
896     caches = vring_get_region_caches(vq);
897     if (caches->desc.len < max * sizeof(VRingDesc)) {
898         virtio_error(vdev, "Cannot map descriptor ring");
899         goto done;
900     }
901 
902     desc_cache = &caches->desc;
903     vring_desc_read(vdev, &desc, desc_cache, i);
904     if (desc.flags & VRING_DESC_F_INDIRECT) {
905         if (!desc.len || (desc.len % sizeof(VRingDesc))) {
906             virtio_error(vdev, "Invalid size for indirect buffer table");
907             goto done;
908         }
909 
910         /* loop over the indirect descriptor table */
911         len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
912                                        desc.addr, desc.len, false);
913         desc_cache = &indirect_desc_cache;
914         if (len < desc.len) {
915             virtio_error(vdev, "Cannot map indirect buffer");
916             goto done;
917         }
918 
919         max = desc.len / sizeof(VRingDesc);
920         i = 0;
921         vring_desc_read(vdev, &desc, desc_cache, i);
922     }
923 
924     /* Collect all the descriptors */
925     do {
926         bool map_ok;
927 
928         if (desc.flags & VRING_DESC_F_WRITE) {
929             map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
930                                         iov + out_num,
931                                         VIRTQUEUE_MAX_SIZE - out_num, true,
932                                         desc.addr, desc.len);
933         } else {
934             if (in_num) {
935                 virtio_error(vdev, "Incorrect order for descriptors");
936                 goto err_undo_map;
937             }
938             map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
939                                         VIRTQUEUE_MAX_SIZE, false,
940                                         desc.addr, desc.len);
941         }
942         if (!map_ok) {
943             goto err_undo_map;
944         }
945 
946         /* If we've got too many, that implies a descriptor loop. */
947         if (++elem_entries > max) {
948             virtio_error(vdev, "Looped descriptor");
949             goto err_undo_map;
950         }
951 
952         rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i);
953     } while (rc == VIRTQUEUE_READ_DESC_MORE);
954 
955     if (rc == VIRTQUEUE_READ_DESC_ERROR) {
956         goto err_undo_map;
957     }
958 
959     /* Now copy what we have collected and mapped */
960     elem = virtqueue_alloc_element(sz, out_num, in_num);
961     elem->index = head;
962     for (i = 0; i < out_num; i++) {
963         elem->out_addr[i] = addr[i];
964         elem->out_sg[i] = iov[i];
965     }
966     for (i = 0; i < in_num; i++) {
967         elem->in_addr[i] = addr[out_num + i];
968         elem->in_sg[i] = iov[out_num + i];
969     }
970 
971     vq->inuse++;
972 
973     trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
974 done:
975     address_space_cache_destroy(&indirect_desc_cache);
976     rcu_read_unlock();
977 
978     return elem;
979 
980 err_undo_map:
981     virtqueue_undo_map_desc(out_num, in_num, iov);
982     goto done;
983 }
984 
985 /* virtqueue_drop_all:
986  * @vq: The #VirtQueue
987  * Drops all queued buffers and indicates them to the guest
988  * as if they are done. Useful when buffers can not be
989  * processed but must be returned to the guest.
990  */
991 unsigned int virtqueue_drop_all(VirtQueue *vq)
992 {
993     unsigned int dropped = 0;
994     VirtQueueElement elem = {};
995     VirtIODevice *vdev = vq->vdev;
996     bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
997 
998     if (unlikely(vdev->broken)) {
999         return 0;
1000     }
1001 
1002     while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
1003         /* works similar to virtqueue_pop but does not map buffers
1004         * and does not allocate any memory */
1005         smp_rmb();
1006         if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
1007             break;
1008         }
1009         vq->inuse++;
1010         vq->last_avail_idx++;
1011         if (fEventIdx) {
1012             vring_set_avail_event(vq, vq->last_avail_idx);
1013         }
1014         /* immediately push the element, nothing to unmap
1015          * as both in_num and out_num are set to 0 */
1016         virtqueue_push(vq, &elem, 0);
1017         dropped++;
1018     }
1019 
1020     return dropped;
1021 }
1022 
1023 /* Reading and writing a structure directly to QEMUFile is *awful*, but
1024  * it is what QEMU has always done by mistake.  We can change it sooner
1025  * or later by bumping the version number of the affected vm states.
1026  * In the meanwhile, since the in-memory layout of VirtQueueElement
1027  * has changed, we need to marshal to and from the layout that was
1028  * used before the change.
1029  */
1030 typedef struct VirtQueueElementOld {
1031     unsigned int index;
1032     unsigned int out_num;
1033     unsigned int in_num;
1034     hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
1035     hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
1036     struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
1037     struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
1038 } VirtQueueElementOld;
1039 
1040 void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz)
1041 {
1042     VirtQueueElement *elem;
1043     VirtQueueElementOld data;
1044     int i;
1045 
1046     qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1047 
1048     /* TODO: teach all callers that this can fail, and return failure instead
1049      * of asserting here.
1050      * This is just one thing (there are probably more) that must be
1051      * fixed before we can allow NDEBUG compilation.
1052      */
1053     assert(ARRAY_SIZE(data.in_addr) >= data.in_num);
1054     assert(ARRAY_SIZE(data.out_addr) >= data.out_num);
1055 
1056     elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
1057     elem->index = data.index;
1058 
1059     for (i = 0; i < elem->in_num; i++) {
1060         elem->in_addr[i] = data.in_addr[i];
1061     }
1062 
1063     for (i = 0; i < elem->out_num; i++) {
1064         elem->out_addr[i] = data.out_addr[i];
1065     }
1066 
1067     for (i = 0; i < elem->in_num; i++) {
1068         /* Base is overwritten by virtqueue_map.  */
1069         elem->in_sg[i].iov_base = 0;
1070         elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
1071     }
1072 
1073     for (i = 0; i < elem->out_num; i++) {
1074         /* Base is overwritten by virtqueue_map.  */
1075         elem->out_sg[i].iov_base = 0;
1076         elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
1077     }
1078 
1079     virtqueue_map(vdev, elem);
1080     return elem;
1081 }
1082 
1083 void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem)
1084 {
1085     VirtQueueElementOld data;
1086     int i;
1087 
1088     memset(&data, 0, sizeof(data));
1089     data.index = elem->index;
1090     data.in_num = elem->in_num;
1091     data.out_num = elem->out_num;
1092 
1093     for (i = 0; i < elem->in_num; i++) {
1094         data.in_addr[i] = elem->in_addr[i];
1095     }
1096 
1097     for (i = 0; i < elem->out_num; i++) {
1098         data.out_addr[i] = elem->out_addr[i];
1099     }
1100 
1101     for (i = 0; i < elem->in_num; i++) {
1102         /* Base is overwritten by virtqueue_map when loading.  Do not
1103          * save it, as it would leak the QEMU address space layout.  */
1104         data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
1105     }
1106 
1107     for (i = 0; i < elem->out_num; i++) {
1108         /* Do not save iov_base as above.  */
1109         data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
1110     }
1111     qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1112 }
1113 
1114 /* virtio device */
1115 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
1116 {
1117     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1118     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1119 
1120     if (unlikely(vdev->broken)) {
1121         return;
1122     }
1123 
1124     if (k->notify) {
1125         k->notify(qbus->parent, vector);
1126     }
1127 }
1128 
1129 void virtio_update_irq(VirtIODevice *vdev)
1130 {
1131     virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
1132 }
1133 
1134 static int virtio_validate_features(VirtIODevice *vdev)
1135 {
1136     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1137 
1138     if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) &&
1139         !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
1140         return -EFAULT;
1141     }
1142 
1143     if (k->validate_features) {
1144         return k->validate_features(vdev);
1145     } else {
1146         return 0;
1147     }
1148 }
1149 
1150 int virtio_set_status(VirtIODevice *vdev, uint8_t val)
1151 {
1152     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1153     trace_virtio_set_status(vdev, val);
1154 
1155     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1156         if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
1157             val & VIRTIO_CONFIG_S_FEATURES_OK) {
1158             int ret = virtio_validate_features(vdev);
1159 
1160             if (ret) {
1161                 return ret;
1162             }
1163         }
1164     }
1165     vdev->started = val & VIRTIO_CONFIG_S_DRIVER_OK;
1166     if (unlikely(vdev->start_on_kick && vdev->started)) {
1167         vdev->start_on_kick = false;
1168     }
1169 
1170     if (k->set_status) {
1171         k->set_status(vdev, val);
1172     }
1173     vdev->status = val;
1174 
1175     return 0;
1176 }
1177 
1178 static enum virtio_device_endian virtio_default_endian(void)
1179 {
1180     if (target_words_bigendian()) {
1181         return VIRTIO_DEVICE_ENDIAN_BIG;
1182     } else {
1183         return VIRTIO_DEVICE_ENDIAN_LITTLE;
1184     }
1185 }
1186 
1187 static enum virtio_device_endian virtio_current_cpu_endian(void)
1188 {
1189     CPUClass *cc = CPU_GET_CLASS(current_cpu);
1190 
1191     if (cc->virtio_is_big_endian(current_cpu)) {
1192         return VIRTIO_DEVICE_ENDIAN_BIG;
1193     } else {
1194         return VIRTIO_DEVICE_ENDIAN_LITTLE;
1195     }
1196 }
1197 
1198 void virtio_reset(void *opaque)
1199 {
1200     VirtIODevice *vdev = opaque;
1201     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1202     int i;
1203 
1204     virtio_set_status(vdev, 0);
1205     if (current_cpu) {
1206         /* Guest initiated reset */
1207         vdev->device_endian = virtio_current_cpu_endian();
1208     } else {
1209         /* System reset */
1210         vdev->device_endian = virtio_default_endian();
1211     }
1212 
1213     if (k->reset) {
1214         k->reset(vdev);
1215     }
1216 
1217     vdev->start_on_kick = (virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1) &&
1218                           !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1));
1219     vdev->started = false;
1220     vdev->broken = false;
1221     vdev->guest_features = 0;
1222     vdev->queue_sel = 0;
1223     vdev->status = 0;
1224     atomic_set(&vdev->isr, 0);
1225     vdev->config_vector = VIRTIO_NO_VECTOR;
1226     virtio_notify_vector(vdev, vdev->config_vector);
1227 
1228     for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1229         vdev->vq[i].vring.desc = 0;
1230         vdev->vq[i].vring.avail = 0;
1231         vdev->vq[i].vring.used = 0;
1232         vdev->vq[i].last_avail_idx = 0;
1233         vdev->vq[i].shadow_avail_idx = 0;
1234         vdev->vq[i].used_idx = 0;
1235         virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
1236         vdev->vq[i].signalled_used = 0;
1237         vdev->vq[i].signalled_used_valid = false;
1238         vdev->vq[i].notification = true;
1239         vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
1240         vdev->vq[i].inuse = 0;
1241         virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
1242     }
1243 }
1244 
1245 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
1246 {
1247     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1248     uint8_t val;
1249 
1250     if (addr + sizeof(val) > vdev->config_len) {
1251         return (uint32_t)-1;
1252     }
1253 
1254     k->get_config(vdev, vdev->config);
1255 
1256     val = ldub_p(vdev->config + addr);
1257     return val;
1258 }
1259 
1260 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
1261 {
1262     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1263     uint16_t val;
1264 
1265     if (addr + sizeof(val) > vdev->config_len) {
1266         return (uint32_t)-1;
1267     }
1268 
1269     k->get_config(vdev, vdev->config);
1270 
1271     val = lduw_p(vdev->config + addr);
1272     return val;
1273 }
1274 
1275 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
1276 {
1277     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1278     uint32_t val;
1279 
1280     if (addr + sizeof(val) > vdev->config_len) {
1281         return (uint32_t)-1;
1282     }
1283 
1284     k->get_config(vdev, vdev->config);
1285 
1286     val = ldl_p(vdev->config + addr);
1287     return val;
1288 }
1289 
1290 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1291 {
1292     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1293     uint8_t val = data;
1294 
1295     if (addr + sizeof(val) > vdev->config_len) {
1296         return;
1297     }
1298 
1299     stb_p(vdev->config + addr, val);
1300 
1301     if (k->set_config) {
1302         k->set_config(vdev, vdev->config);
1303     }
1304 }
1305 
1306 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1307 {
1308     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1309     uint16_t val = data;
1310 
1311     if (addr + sizeof(val) > vdev->config_len) {
1312         return;
1313     }
1314 
1315     stw_p(vdev->config + addr, val);
1316 
1317     if (k->set_config) {
1318         k->set_config(vdev, vdev->config);
1319     }
1320 }
1321 
1322 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1323 {
1324     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1325     uint32_t val = data;
1326 
1327     if (addr + sizeof(val) > vdev->config_len) {
1328         return;
1329     }
1330 
1331     stl_p(vdev->config + addr, val);
1332 
1333     if (k->set_config) {
1334         k->set_config(vdev, vdev->config);
1335     }
1336 }
1337 
1338 uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
1339 {
1340     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1341     uint8_t val;
1342 
1343     if (addr + sizeof(val) > vdev->config_len) {
1344         return (uint32_t)-1;
1345     }
1346 
1347     k->get_config(vdev, vdev->config);
1348 
1349     val = ldub_p(vdev->config + addr);
1350     return val;
1351 }
1352 
1353 uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
1354 {
1355     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1356     uint16_t val;
1357 
1358     if (addr + sizeof(val) > vdev->config_len) {
1359         return (uint32_t)-1;
1360     }
1361 
1362     k->get_config(vdev, vdev->config);
1363 
1364     val = lduw_le_p(vdev->config + addr);
1365     return val;
1366 }
1367 
1368 uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
1369 {
1370     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1371     uint32_t val;
1372 
1373     if (addr + sizeof(val) > vdev->config_len) {
1374         return (uint32_t)-1;
1375     }
1376 
1377     k->get_config(vdev, vdev->config);
1378 
1379     val = ldl_le_p(vdev->config + addr);
1380     return val;
1381 }
1382 
1383 void virtio_config_modern_writeb(VirtIODevice *vdev,
1384                                  uint32_t addr, uint32_t data)
1385 {
1386     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1387     uint8_t val = data;
1388 
1389     if (addr + sizeof(val) > vdev->config_len) {
1390         return;
1391     }
1392 
1393     stb_p(vdev->config + addr, val);
1394 
1395     if (k->set_config) {
1396         k->set_config(vdev, vdev->config);
1397     }
1398 }
1399 
1400 void virtio_config_modern_writew(VirtIODevice *vdev,
1401                                  uint32_t addr, uint32_t data)
1402 {
1403     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1404     uint16_t val = data;
1405 
1406     if (addr + sizeof(val) > vdev->config_len) {
1407         return;
1408     }
1409 
1410     stw_le_p(vdev->config + addr, val);
1411 
1412     if (k->set_config) {
1413         k->set_config(vdev, vdev->config);
1414     }
1415 }
1416 
1417 void virtio_config_modern_writel(VirtIODevice *vdev,
1418                                  uint32_t addr, uint32_t data)
1419 {
1420     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1421     uint32_t val = data;
1422 
1423     if (addr + sizeof(val) > vdev->config_len) {
1424         return;
1425     }
1426 
1427     stl_le_p(vdev->config + addr, val);
1428 
1429     if (k->set_config) {
1430         k->set_config(vdev, vdev->config);
1431     }
1432 }
1433 
1434 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
1435 {
1436     if (!vdev->vq[n].vring.num) {
1437         return;
1438     }
1439     vdev->vq[n].vring.desc = addr;
1440     virtio_queue_update_rings(vdev, n);
1441 }
1442 
1443 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
1444 {
1445     return vdev->vq[n].vring.desc;
1446 }
1447 
1448 void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
1449                             hwaddr avail, hwaddr used)
1450 {
1451     if (!vdev->vq[n].vring.num) {
1452         return;
1453     }
1454     vdev->vq[n].vring.desc = desc;
1455     vdev->vq[n].vring.avail = avail;
1456     vdev->vq[n].vring.used = used;
1457     virtio_init_region_cache(vdev, n);
1458 }
1459 
1460 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
1461 {
1462     /* Don't allow guest to flip queue between existent and
1463      * nonexistent states, or to set it to an invalid size.
1464      */
1465     if (!!num != !!vdev->vq[n].vring.num ||
1466         num > VIRTQUEUE_MAX_SIZE ||
1467         num < 0) {
1468         return;
1469     }
1470     vdev->vq[n].vring.num = num;
1471 }
1472 
1473 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
1474 {
1475     return QLIST_FIRST(&vdev->vector_queues[vector]);
1476 }
1477 
1478 VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
1479 {
1480     return QLIST_NEXT(vq, node);
1481 }
1482 
1483 int virtio_queue_get_num(VirtIODevice *vdev, int n)
1484 {
1485     return vdev->vq[n].vring.num;
1486 }
1487 
1488 int virtio_queue_get_max_num(VirtIODevice *vdev, int n)
1489 {
1490     return vdev->vq[n].vring.num_default;
1491 }
1492 
1493 int virtio_get_num_queues(VirtIODevice *vdev)
1494 {
1495     int i;
1496 
1497     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1498         if (!virtio_queue_get_num(vdev, i)) {
1499             break;
1500         }
1501     }
1502 
1503     return i;
1504 }
1505 
1506 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
1507 {
1508     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1509     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1510 
1511     /* virtio-1 compliant devices cannot change the alignment */
1512     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1513         error_report("tried to modify queue alignment for virtio-1 device");
1514         return;
1515     }
1516     /* Check that the transport told us it was going to do this
1517      * (so a buggy transport will immediately assert rather than
1518      * silently failing to migrate this state)
1519      */
1520     assert(k->has_variable_vring_alignment);
1521 
1522     if (align) {
1523         vdev->vq[n].vring.align = align;
1524         virtio_queue_update_rings(vdev, n);
1525     }
1526 }
1527 
1528 static bool virtio_queue_notify_aio_vq(VirtQueue *vq)
1529 {
1530     bool ret = false;
1531 
1532     if (vq->vring.desc && vq->handle_aio_output) {
1533         VirtIODevice *vdev = vq->vdev;
1534 
1535         trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1536         ret = vq->handle_aio_output(vdev, vq);
1537 
1538         if (unlikely(vdev->start_on_kick)) {
1539             vdev->started = true;
1540             vdev->start_on_kick = false;
1541         }
1542     }
1543 
1544     return ret;
1545 }
1546 
1547 static void virtio_queue_notify_vq(VirtQueue *vq)
1548 {
1549     if (vq->vring.desc && vq->handle_output) {
1550         VirtIODevice *vdev = vq->vdev;
1551 
1552         if (unlikely(vdev->broken)) {
1553             return;
1554         }
1555 
1556         trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1557         vq->handle_output(vdev, vq);
1558 
1559         if (unlikely(vdev->start_on_kick)) {
1560             vdev->started = true;
1561             vdev->start_on_kick = false;
1562         }
1563     }
1564 }
1565 
1566 void virtio_queue_notify(VirtIODevice *vdev, int n)
1567 {
1568     VirtQueue *vq = &vdev->vq[n];
1569 
1570     if (unlikely(!vq->vring.desc || vdev->broken)) {
1571         return;
1572     }
1573 
1574     trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1575     if (vq->handle_aio_output) {
1576         event_notifier_set(&vq->host_notifier);
1577     } else if (vq->handle_output) {
1578         vq->handle_output(vdev, vq);
1579     }
1580 
1581     if (unlikely(vdev->start_on_kick)) {
1582         vdev->started = true;
1583         vdev->start_on_kick = false;
1584     }
1585 }
1586 
1587 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
1588 {
1589     return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
1590         VIRTIO_NO_VECTOR;
1591 }
1592 
1593 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
1594 {
1595     VirtQueue *vq = &vdev->vq[n];
1596 
1597     if (n < VIRTIO_QUEUE_MAX) {
1598         if (vdev->vector_queues &&
1599             vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
1600             QLIST_REMOVE(vq, node);
1601         }
1602         vdev->vq[n].vector = vector;
1603         if (vdev->vector_queues &&
1604             vector != VIRTIO_NO_VECTOR) {
1605             QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
1606         }
1607     }
1608 }
1609 
1610 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
1611                             VirtIOHandleOutput handle_output)
1612 {
1613     int i;
1614 
1615     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1616         if (vdev->vq[i].vring.num == 0)
1617             break;
1618     }
1619 
1620     if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
1621         abort();
1622 
1623     vdev->vq[i].vring.num = queue_size;
1624     vdev->vq[i].vring.num_default = queue_size;
1625     vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
1626     vdev->vq[i].handle_output = handle_output;
1627     vdev->vq[i].handle_aio_output = NULL;
1628 
1629     return &vdev->vq[i];
1630 }
1631 
1632 void virtio_del_queue(VirtIODevice *vdev, int n)
1633 {
1634     if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
1635         abort();
1636     }
1637 
1638     vdev->vq[n].vring.num = 0;
1639     vdev->vq[n].vring.num_default = 0;
1640     vdev->vq[n].handle_output = NULL;
1641     vdev->vq[n].handle_aio_output = NULL;
1642 }
1643 
1644 static void virtio_set_isr(VirtIODevice *vdev, int value)
1645 {
1646     uint8_t old = atomic_read(&vdev->isr);
1647 
1648     /* Do not write ISR if it does not change, so that its cacheline remains
1649      * shared in the common case where the guest does not read it.
1650      */
1651     if ((old & value) != value) {
1652         atomic_or(&vdev->isr, value);
1653     }
1654 }
1655 
1656 /* Called within rcu_read_lock().  */
1657 static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
1658 {
1659     uint16_t old, new;
1660     bool v;
1661     /* We need to expose used array entries before checking used event. */
1662     smp_mb();
1663     /* Always notify when queue is empty (when feature acknowledge) */
1664     if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1665         !vq->inuse && virtio_queue_empty(vq)) {
1666         return true;
1667     }
1668 
1669     if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
1670         return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
1671     }
1672 
1673     v = vq->signalled_used_valid;
1674     vq->signalled_used_valid = true;
1675     old = vq->signalled_used;
1676     new = vq->signalled_used = vq->used_idx;
1677     return !v || vring_need_event(vring_get_used_event(vq), new, old);
1678 }
1679 
1680 void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
1681 {
1682     bool should_notify;
1683     rcu_read_lock();
1684     should_notify = virtio_should_notify(vdev, vq);
1685     rcu_read_unlock();
1686 
1687     if (!should_notify) {
1688         return;
1689     }
1690 
1691     trace_virtio_notify_irqfd(vdev, vq);
1692 
1693     /*
1694      * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
1695      * windows drivers included in virtio-win 1.8.0 (circa 2015) are
1696      * incorrectly polling this bit during crashdump and hibernation
1697      * in MSI mode, causing a hang if this bit is never updated.
1698      * Recent releases of Windows do not really shut down, but rather
1699      * log out and hibernate to make the next startup faster.  Hence,
1700      * this manifested as a more serious hang during shutdown with
1701      *
1702      * Next driver release from 2016 fixed this problem, so working around it
1703      * is not a must, but it's easy to do so let's do it here.
1704      *
1705      * Note: it's safe to update ISR from any thread as it was switched
1706      * to an atomic operation.
1707      */
1708     virtio_set_isr(vq->vdev, 0x1);
1709     event_notifier_set(&vq->guest_notifier);
1710 }
1711 
1712 static void virtio_irq(VirtQueue *vq)
1713 {
1714     virtio_set_isr(vq->vdev, 0x1);
1715     virtio_notify_vector(vq->vdev, vq->vector);
1716 }
1717 
1718 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
1719 {
1720     bool should_notify;
1721     rcu_read_lock();
1722     should_notify = virtio_should_notify(vdev, vq);
1723     rcu_read_unlock();
1724 
1725     if (!should_notify) {
1726         return;
1727     }
1728 
1729     trace_virtio_notify(vdev, vq);
1730     virtio_irq(vq);
1731 }
1732 
1733 void virtio_notify_config(VirtIODevice *vdev)
1734 {
1735     if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
1736         return;
1737 
1738     virtio_set_isr(vdev, 0x3);
1739     vdev->generation++;
1740     virtio_notify_vector(vdev, vdev->config_vector);
1741 }
1742 
1743 static bool virtio_device_endian_needed(void *opaque)
1744 {
1745     VirtIODevice *vdev = opaque;
1746 
1747     assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
1748     if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1749         return vdev->device_endian != virtio_default_endian();
1750     }
1751     /* Devices conforming to VIRTIO 1.0 or later are always LE. */
1752     return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
1753 }
1754 
1755 static bool virtio_64bit_features_needed(void *opaque)
1756 {
1757     VirtIODevice *vdev = opaque;
1758 
1759     return (vdev->host_features >> 32) != 0;
1760 }
1761 
1762 static bool virtio_virtqueue_needed(void *opaque)
1763 {
1764     VirtIODevice *vdev = opaque;
1765 
1766     return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
1767 }
1768 
1769 static bool virtio_ringsize_needed(void *opaque)
1770 {
1771     VirtIODevice *vdev = opaque;
1772     int i;
1773 
1774     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1775         if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
1776             return true;
1777         }
1778     }
1779     return false;
1780 }
1781 
1782 static bool virtio_extra_state_needed(void *opaque)
1783 {
1784     VirtIODevice *vdev = opaque;
1785     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1786     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1787 
1788     return k->has_extra_state &&
1789         k->has_extra_state(qbus->parent);
1790 }
1791 
1792 static bool virtio_broken_needed(void *opaque)
1793 {
1794     VirtIODevice *vdev = opaque;
1795 
1796     return vdev->broken;
1797 }
1798 
1799 static bool virtio_started_needed(void *opaque)
1800 {
1801     VirtIODevice *vdev = opaque;
1802 
1803     return vdev->started;
1804 }
1805 
1806 static const VMStateDescription vmstate_virtqueue = {
1807     .name = "virtqueue_state",
1808     .version_id = 1,
1809     .minimum_version_id = 1,
1810     .fields = (VMStateField[]) {
1811         VMSTATE_UINT64(vring.avail, struct VirtQueue),
1812         VMSTATE_UINT64(vring.used, struct VirtQueue),
1813         VMSTATE_END_OF_LIST()
1814     }
1815 };
1816 
1817 static const VMStateDescription vmstate_virtio_virtqueues = {
1818     .name = "virtio/virtqueues",
1819     .version_id = 1,
1820     .minimum_version_id = 1,
1821     .needed = &virtio_virtqueue_needed,
1822     .fields = (VMStateField[]) {
1823         VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
1824                       VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
1825         VMSTATE_END_OF_LIST()
1826     }
1827 };
1828 
1829 static const VMStateDescription vmstate_ringsize = {
1830     .name = "ringsize_state",
1831     .version_id = 1,
1832     .minimum_version_id = 1,
1833     .fields = (VMStateField[]) {
1834         VMSTATE_UINT32(vring.num_default, struct VirtQueue),
1835         VMSTATE_END_OF_LIST()
1836     }
1837 };
1838 
1839 static const VMStateDescription vmstate_virtio_ringsize = {
1840     .name = "virtio/ringsize",
1841     .version_id = 1,
1842     .minimum_version_id = 1,
1843     .needed = &virtio_ringsize_needed,
1844     .fields = (VMStateField[]) {
1845         VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
1846                       VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
1847         VMSTATE_END_OF_LIST()
1848     }
1849 };
1850 
1851 static int get_extra_state(QEMUFile *f, void *pv, size_t size,
1852                            const VMStateField *field)
1853 {
1854     VirtIODevice *vdev = pv;
1855     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1856     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1857 
1858     if (!k->load_extra_state) {
1859         return -1;
1860     } else {
1861         return k->load_extra_state(qbus->parent, f);
1862     }
1863 }
1864 
1865 static int put_extra_state(QEMUFile *f, void *pv, size_t size,
1866                            const VMStateField *field, QJSON *vmdesc)
1867 {
1868     VirtIODevice *vdev = pv;
1869     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1870     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1871 
1872     k->save_extra_state(qbus->parent, f);
1873     return 0;
1874 }
1875 
1876 static const VMStateInfo vmstate_info_extra_state = {
1877     .name = "virtqueue_extra_state",
1878     .get = get_extra_state,
1879     .put = put_extra_state,
1880 };
1881 
1882 static const VMStateDescription vmstate_virtio_extra_state = {
1883     .name = "virtio/extra_state",
1884     .version_id = 1,
1885     .minimum_version_id = 1,
1886     .needed = &virtio_extra_state_needed,
1887     .fields = (VMStateField[]) {
1888         {
1889             .name         = "extra_state",
1890             .version_id   = 0,
1891             .field_exists = NULL,
1892             .size         = 0,
1893             .info         = &vmstate_info_extra_state,
1894             .flags        = VMS_SINGLE,
1895             .offset       = 0,
1896         },
1897         VMSTATE_END_OF_LIST()
1898     }
1899 };
1900 
1901 static const VMStateDescription vmstate_virtio_device_endian = {
1902     .name = "virtio/device_endian",
1903     .version_id = 1,
1904     .minimum_version_id = 1,
1905     .needed = &virtio_device_endian_needed,
1906     .fields = (VMStateField[]) {
1907         VMSTATE_UINT8(device_endian, VirtIODevice),
1908         VMSTATE_END_OF_LIST()
1909     }
1910 };
1911 
1912 static const VMStateDescription vmstate_virtio_64bit_features = {
1913     .name = "virtio/64bit_features",
1914     .version_id = 1,
1915     .minimum_version_id = 1,
1916     .needed = &virtio_64bit_features_needed,
1917     .fields = (VMStateField[]) {
1918         VMSTATE_UINT64(guest_features, VirtIODevice),
1919         VMSTATE_END_OF_LIST()
1920     }
1921 };
1922 
1923 static const VMStateDescription vmstate_virtio_broken = {
1924     .name = "virtio/broken",
1925     .version_id = 1,
1926     .minimum_version_id = 1,
1927     .needed = &virtio_broken_needed,
1928     .fields = (VMStateField[]) {
1929         VMSTATE_BOOL(broken, VirtIODevice),
1930         VMSTATE_END_OF_LIST()
1931     }
1932 };
1933 
1934 static const VMStateDescription vmstate_virtio_started = {
1935     .name = "virtio/started",
1936     .version_id = 1,
1937     .minimum_version_id = 1,
1938     .needed = &virtio_started_needed,
1939     .fields = (VMStateField[]) {
1940         VMSTATE_BOOL(started, VirtIODevice),
1941         VMSTATE_END_OF_LIST()
1942     }
1943 };
1944 
1945 static const VMStateDescription vmstate_virtio = {
1946     .name = "virtio",
1947     .version_id = 1,
1948     .minimum_version_id = 1,
1949     .minimum_version_id_old = 1,
1950     .fields = (VMStateField[]) {
1951         VMSTATE_END_OF_LIST()
1952     },
1953     .subsections = (const VMStateDescription*[]) {
1954         &vmstate_virtio_device_endian,
1955         &vmstate_virtio_64bit_features,
1956         &vmstate_virtio_virtqueues,
1957         &vmstate_virtio_ringsize,
1958         &vmstate_virtio_broken,
1959         &vmstate_virtio_extra_state,
1960         &vmstate_virtio_started,
1961         NULL
1962     }
1963 };
1964 
1965 int virtio_save(VirtIODevice *vdev, QEMUFile *f)
1966 {
1967     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1968     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1969     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
1970     uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
1971     int i;
1972 
1973     if (k->save_config) {
1974         k->save_config(qbus->parent, f);
1975     }
1976 
1977     qemu_put_8s(f, &vdev->status);
1978     qemu_put_8s(f, &vdev->isr);
1979     qemu_put_be16s(f, &vdev->queue_sel);
1980     qemu_put_be32s(f, &guest_features_lo);
1981     qemu_put_be32(f, vdev->config_len);
1982     qemu_put_buffer(f, vdev->config, vdev->config_len);
1983 
1984     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1985         if (vdev->vq[i].vring.num == 0)
1986             break;
1987     }
1988 
1989     qemu_put_be32(f, i);
1990 
1991     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1992         if (vdev->vq[i].vring.num == 0)
1993             break;
1994 
1995         qemu_put_be32(f, vdev->vq[i].vring.num);
1996         if (k->has_variable_vring_alignment) {
1997             qemu_put_be32(f, vdev->vq[i].vring.align);
1998         }
1999         /*
2000          * Save desc now, the rest of the ring addresses are saved in
2001          * subsections for VIRTIO-1 devices.
2002          */
2003         qemu_put_be64(f, vdev->vq[i].vring.desc);
2004         qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
2005         if (k->save_queue) {
2006             k->save_queue(qbus->parent, i, f);
2007         }
2008     }
2009 
2010     if (vdc->save != NULL) {
2011         vdc->save(vdev, f);
2012     }
2013 
2014     if (vdc->vmsd) {
2015         int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL);
2016         if (ret) {
2017             return ret;
2018         }
2019     }
2020 
2021     /* Subsections */
2022     return vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
2023 }
2024 
2025 /* A wrapper for use as a VMState .put function */
2026 static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
2027                               const VMStateField *field, QJSON *vmdesc)
2028 {
2029     return virtio_save(VIRTIO_DEVICE(opaque), f);
2030 }
2031 
2032 /* A wrapper for use as a VMState .get function */
2033 static int virtio_device_get(QEMUFile *f, void *opaque, size_t size,
2034                              const VMStateField *field)
2035 {
2036     VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
2037     DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
2038 
2039     return virtio_load(vdev, f, dc->vmsd->version_id);
2040 }
2041 
2042 const VMStateInfo  virtio_vmstate_info = {
2043     .name = "virtio",
2044     .get = virtio_device_get,
2045     .put = virtio_device_put,
2046 };
2047 
2048 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
2049 {
2050     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2051     bool bad = (val & ~(vdev->host_features)) != 0;
2052 
2053     val &= vdev->host_features;
2054     if (k->set_features) {
2055         k->set_features(vdev, val);
2056     }
2057     vdev->guest_features = val;
2058     return bad ? -1 : 0;
2059 }
2060 
2061 int virtio_set_features(VirtIODevice *vdev, uint64_t val)
2062 {
2063     int ret;
2064     /*
2065      * The driver must not attempt to set features after feature negotiation
2066      * has finished.
2067      */
2068     if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
2069         return -EINVAL;
2070     }
2071     ret = virtio_set_features_nocheck(vdev, val);
2072     if (!ret && virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
2073         /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches.  */
2074         int i;
2075         for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2076             if (vdev->vq[i].vring.num != 0) {
2077                 virtio_init_region_cache(vdev, i);
2078             }
2079         }
2080     }
2081     return ret;
2082 }
2083 
2084 size_t virtio_feature_get_config_size(VirtIOFeature *feature_sizes,
2085                                       uint64_t host_features)
2086 {
2087     size_t config_size = 0;
2088     int i;
2089 
2090     for (i = 0; feature_sizes[i].flags != 0; i++) {
2091         if (host_features & feature_sizes[i].flags) {
2092             config_size = MAX(feature_sizes[i].end, config_size);
2093         }
2094     }
2095 
2096     return config_size;
2097 }
2098 
2099 int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
2100 {
2101     int i, ret;
2102     int32_t config_len;
2103     uint32_t num;
2104     uint32_t features;
2105     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2106     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2107     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
2108 
2109     /*
2110      * We poison the endianness to ensure it does not get used before
2111      * subsections have been loaded.
2112      */
2113     vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
2114 
2115     if (k->load_config) {
2116         ret = k->load_config(qbus->parent, f);
2117         if (ret)
2118             return ret;
2119     }
2120 
2121     qemu_get_8s(f, &vdev->status);
2122     qemu_get_8s(f, &vdev->isr);
2123     qemu_get_be16s(f, &vdev->queue_sel);
2124     if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
2125         return -1;
2126     }
2127     qemu_get_be32s(f, &features);
2128 
2129     /*
2130      * Temporarily set guest_features low bits - needed by
2131      * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
2132      * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
2133      *
2134      * Note: devices should always test host features in future - don't create
2135      * new dependencies like this.
2136      */
2137     vdev->guest_features = features;
2138 
2139     config_len = qemu_get_be32(f);
2140 
2141     /*
2142      * There are cases where the incoming config can be bigger or smaller
2143      * than what we have; so load what we have space for, and skip
2144      * any excess that's in the stream.
2145      */
2146     qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
2147 
2148     while (config_len > vdev->config_len) {
2149         qemu_get_byte(f);
2150         config_len--;
2151     }
2152 
2153     num = qemu_get_be32(f);
2154 
2155     if (num > VIRTIO_QUEUE_MAX) {
2156         error_report("Invalid number of virtqueues: 0x%x", num);
2157         return -1;
2158     }
2159 
2160     for (i = 0; i < num; i++) {
2161         vdev->vq[i].vring.num = qemu_get_be32(f);
2162         if (k->has_variable_vring_alignment) {
2163             vdev->vq[i].vring.align = qemu_get_be32(f);
2164         }
2165         vdev->vq[i].vring.desc = qemu_get_be64(f);
2166         qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
2167         vdev->vq[i].signalled_used_valid = false;
2168         vdev->vq[i].notification = true;
2169 
2170         if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) {
2171             error_report("VQ %d address 0x0 "
2172                          "inconsistent with Host index 0x%x",
2173                          i, vdev->vq[i].last_avail_idx);
2174             return -1;
2175         }
2176         if (k->load_queue) {
2177             ret = k->load_queue(qbus->parent, i, f);
2178             if (ret)
2179                 return ret;
2180         }
2181     }
2182 
2183     virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
2184 
2185     if (vdc->load != NULL) {
2186         ret = vdc->load(vdev, f, version_id);
2187         if (ret) {
2188             return ret;
2189         }
2190     }
2191 
2192     if (vdc->vmsd) {
2193         ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
2194         if (ret) {
2195             return ret;
2196         }
2197     }
2198 
2199     /* Subsections */
2200     ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
2201     if (ret) {
2202         return ret;
2203     }
2204 
2205     if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
2206         vdev->device_endian = virtio_default_endian();
2207     }
2208 
2209     if (virtio_64bit_features_needed(vdev)) {
2210         /*
2211          * Subsection load filled vdev->guest_features.  Run them
2212          * through virtio_set_features to sanity-check them against
2213          * host_features.
2214          */
2215         uint64_t features64 = vdev->guest_features;
2216         if (virtio_set_features_nocheck(vdev, features64) < 0) {
2217             error_report("Features 0x%" PRIx64 " unsupported. "
2218                          "Allowed features: 0x%" PRIx64,
2219                          features64, vdev->host_features);
2220             return -1;
2221         }
2222     } else {
2223         if (virtio_set_features_nocheck(vdev, features) < 0) {
2224             error_report("Features 0x%x unsupported. "
2225                          "Allowed features: 0x%" PRIx64,
2226                          features, vdev->host_features);
2227             return -1;
2228         }
2229     }
2230 
2231     rcu_read_lock();
2232     for (i = 0; i < num; i++) {
2233         if (vdev->vq[i].vring.desc) {
2234             uint16_t nheads;
2235 
2236             /*
2237              * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
2238              * only the region cache needs to be set up.  Legacy devices need
2239              * to calculate used and avail ring addresses based on the desc
2240              * address.
2241              */
2242             if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2243                 virtio_init_region_cache(vdev, i);
2244             } else {
2245                 virtio_queue_update_rings(vdev, i);
2246             }
2247 
2248             nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
2249             /* Check it isn't doing strange things with descriptor numbers. */
2250             if (nheads > vdev->vq[i].vring.num) {
2251                 error_report("VQ %d size 0x%x Guest index 0x%x "
2252                              "inconsistent with Host index 0x%x: delta 0x%x",
2253                              i, vdev->vq[i].vring.num,
2254                              vring_avail_idx(&vdev->vq[i]),
2255                              vdev->vq[i].last_avail_idx, nheads);
2256                 return -1;
2257             }
2258             vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
2259             vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
2260 
2261             /*
2262              * Some devices migrate VirtQueueElements that have been popped
2263              * from the avail ring but not yet returned to the used ring.
2264              * Since max ring size < UINT16_MAX it's safe to use modulo
2265              * UINT16_MAX + 1 subtraction.
2266              */
2267             vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
2268                                 vdev->vq[i].used_idx);
2269             if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
2270                 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
2271                              "used_idx 0x%x",
2272                              i, vdev->vq[i].vring.num,
2273                              vdev->vq[i].last_avail_idx,
2274                              vdev->vq[i].used_idx);
2275                 return -1;
2276             }
2277         }
2278     }
2279     rcu_read_unlock();
2280 
2281     return 0;
2282 }
2283 
2284 void virtio_cleanup(VirtIODevice *vdev)
2285 {
2286     qemu_del_vm_change_state_handler(vdev->vmstate);
2287 }
2288 
2289 static void virtio_vmstate_change(void *opaque, int running, RunState state)
2290 {
2291     VirtIODevice *vdev = opaque;
2292     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2293     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2294     bool backend_run = running && vdev->started;
2295     vdev->vm_running = running;
2296 
2297     if (backend_run) {
2298         virtio_set_status(vdev, vdev->status);
2299     }
2300 
2301     if (k->vmstate_change) {
2302         k->vmstate_change(qbus->parent, backend_run);
2303     }
2304 
2305     if (!backend_run) {
2306         virtio_set_status(vdev, vdev->status);
2307     }
2308 }
2309 
2310 void virtio_instance_init_common(Object *proxy_obj, void *data,
2311                                  size_t vdev_size, const char *vdev_name)
2312 {
2313     DeviceState *vdev = data;
2314 
2315     object_initialize_child(proxy_obj, "virtio-backend", vdev, vdev_size,
2316                             vdev_name, &error_abort, NULL);
2317     qdev_alias_all_properties(vdev, proxy_obj);
2318 }
2319 
2320 void virtio_init(VirtIODevice *vdev, const char *name,
2321                  uint16_t device_id, size_t config_size)
2322 {
2323     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2324     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2325     int i;
2326     int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
2327 
2328     if (nvectors) {
2329         vdev->vector_queues =
2330             g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
2331     }
2332 
2333     vdev->start_on_kick = (virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1) &&
2334                           !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1));
2335     vdev->started = false;
2336     vdev->device_id = device_id;
2337     vdev->status = 0;
2338     atomic_set(&vdev->isr, 0);
2339     vdev->queue_sel = 0;
2340     vdev->config_vector = VIRTIO_NO_VECTOR;
2341     vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
2342     vdev->vm_running = runstate_is_running();
2343     vdev->broken = false;
2344     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2345         vdev->vq[i].vector = VIRTIO_NO_VECTOR;
2346         vdev->vq[i].vdev = vdev;
2347         vdev->vq[i].queue_index = i;
2348     }
2349 
2350     vdev->name = name;
2351     vdev->config_len = config_size;
2352     if (vdev->config_len) {
2353         vdev->config = g_malloc0(config_size);
2354     } else {
2355         vdev->config = NULL;
2356     }
2357     vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
2358                                                      vdev);
2359     vdev->device_endian = virtio_default_endian();
2360     vdev->use_guest_notifier_mask = true;
2361 }
2362 
2363 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
2364 {
2365     return vdev->vq[n].vring.desc;
2366 }
2367 
2368 bool virtio_queue_enabled(VirtIODevice *vdev, int n)
2369 {
2370     return virtio_queue_get_desc_addr(vdev, n) != 0;
2371 }
2372 
2373 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
2374 {
2375     return vdev->vq[n].vring.avail;
2376 }
2377 
2378 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
2379 {
2380     return vdev->vq[n].vring.used;
2381 }
2382 
2383 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
2384 {
2385     return sizeof(VRingDesc) * vdev->vq[n].vring.num;
2386 }
2387 
2388 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
2389 {
2390     return offsetof(VRingAvail, ring) +
2391         sizeof(uint16_t) * vdev->vq[n].vring.num;
2392 }
2393 
2394 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
2395 {
2396     return offsetof(VRingUsed, ring) +
2397         sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
2398 }
2399 
2400 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
2401 {
2402     return vdev->vq[n].last_avail_idx;
2403 }
2404 
2405 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
2406 {
2407     vdev->vq[n].last_avail_idx = idx;
2408     vdev->vq[n].shadow_avail_idx = idx;
2409 }
2410 
2411 void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
2412 {
2413     rcu_read_lock();
2414     if (vdev->vq[n].vring.desc) {
2415         vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
2416         vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx;
2417     }
2418     rcu_read_unlock();
2419 }
2420 
2421 void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
2422 {
2423     rcu_read_lock();
2424     if (vdev->vq[n].vring.desc) {
2425         vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
2426     }
2427     rcu_read_unlock();
2428 }
2429 
2430 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
2431 {
2432     vdev->vq[n].signalled_used_valid = false;
2433 }
2434 
2435 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
2436 {
2437     return vdev->vq + n;
2438 }
2439 
2440 uint16_t virtio_get_queue_index(VirtQueue *vq)
2441 {
2442     return vq->queue_index;
2443 }
2444 
2445 static void virtio_queue_guest_notifier_read(EventNotifier *n)
2446 {
2447     VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
2448     if (event_notifier_test_and_clear(n)) {
2449         virtio_irq(vq);
2450     }
2451 }
2452 
2453 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
2454                                                 bool with_irqfd)
2455 {
2456     if (assign && !with_irqfd) {
2457         event_notifier_set_handler(&vq->guest_notifier,
2458                                    virtio_queue_guest_notifier_read);
2459     } else {
2460         event_notifier_set_handler(&vq->guest_notifier, NULL);
2461     }
2462     if (!assign) {
2463         /* Test and clear notifier before closing it,
2464          * in case poll callback didn't have time to run. */
2465         virtio_queue_guest_notifier_read(&vq->guest_notifier);
2466     }
2467 }
2468 
2469 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
2470 {
2471     return &vq->guest_notifier;
2472 }
2473 
2474 static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
2475 {
2476     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2477     if (event_notifier_test_and_clear(n)) {
2478         virtio_queue_notify_aio_vq(vq);
2479     }
2480 }
2481 
2482 static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
2483 {
2484     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2485 
2486     virtio_queue_set_notification(vq, 0);
2487 }
2488 
2489 static bool virtio_queue_host_notifier_aio_poll(void *opaque)
2490 {
2491     EventNotifier *n = opaque;
2492     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2493     bool progress;
2494 
2495     if (!vq->vring.desc || virtio_queue_empty(vq)) {
2496         return false;
2497     }
2498 
2499     progress = virtio_queue_notify_aio_vq(vq);
2500 
2501     /* In case the handler function re-enabled notifications */
2502     virtio_queue_set_notification(vq, 0);
2503     return progress;
2504 }
2505 
2506 static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
2507 {
2508     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2509 
2510     /* Caller polls once more after this to catch requests that race with us */
2511     virtio_queue_set_notification(vq, 1);
2512 }
2513 
2514 void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
2515                                                 VirtIOHandleAIOOutput handle_output)
2516 {
2517     if (handle_output) {
2518         vq->handle_aio_output = handle_output;
2519         aio_set_event_notifier(ctx, &vq->host_notifier, true,
2520                                virtio_queue_host_notifier_aio_read,
2521                                virtio_queue_host_notifier_aio_poll);
2522         aio_set_event_notifier_poll(ctx, &vq->host_notifier,
2523                                     virtio_queue_host_notifier_aio_poll_begin,
2524                                     virtio_queue_host_notifier_aio_poll_end);
2525     } else {
2526         aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL);
2527         /* Test and clear notifier before after disabling event,
2528          * in case poll callback didn't have time to run. */
2529         virtio_queue_host_notifier_aio_read(&vq->host_notifier);
2530         vq->handle_aio_output = NULL;
2531     }
2532 }
2533 
2534 void virtio_queue_host_notifier_read(EventNotifier *n)
2535 {
2536     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2537     if (event_notifier_test_and_clear(n)) {
2538         virtio_queue_notify_vq(vq);
2539     }
2540 }
2541 
2542 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
2543 {
2544     return &vq->host_notifier;
2545 }
2546 
2547 int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n,
2548                                       MemoryRegion *mr, bool assign)
2549 {
2550     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2551     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2552 
2553     if (k->set_host_notifier_mr) {
2554         return k->set_host_notifier_mr(qbus->parent, n, mr, assign);
2555     }
2556 
2557     return -1;
2558 }
2559 
2560 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
2561 {
2562     g_free(vdev->bus_name);
2563     vdev->bus_name = g_strdup(bus_name);
2564 }
2565 
2566 void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
2567 {
2568     va_list ap;
2569 
2570     va_start(ap, fmt);
2571     error_vreport(fmt, ap);
2572     va_end(ap);
2573 
2574     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2575         vdev->status = vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET;
2576         virtio_notify_config(vdev);
2577     }
2578 
2579     vdev->broken = true;
2580 }
2581 
2582 static void virtio_memory_listener_commit(MemoryListener *listener)
2583 {
2584     VirtIODevice *vdev = container_of(listener, VirtIODevice, listener);
2585     int i;
2586 
2587     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2588         if (vdev->vq[i].vring.num == 0) {
2589             break;
2590         }
2591         virtio_init_region_cache(vdev, i);
2592     }
2593 }
2594 
2595 static void virtio_device_realize(DeviceState *dev, Error **errp)
2596 {
2597     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2598     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
2599     Error *err = NULL;
2600 
2601     /* Devices should either use vmsd or the load/save methods */
2602     assert(!vdc->vmsd || !vdc->load);
2603 
2604     if (vdc->realize != NULL) {
2605         vdc->realize(dev, &err);
2606         if (err != NULL) {
2607             error_propagate(errp, err);
2608             return;
2609         }
2610     }
2611 
2612     virtio_bus_device_plugged(vdev, &err);
2613     if (err != NULL) {
2614         error_propagate(errp, err);
2615         vdc->unrealize(dev, NULL);
2616         return;
2617     }
2618 
2619     vdev->listener.commit = virtio_memory_listener_commit;
2620     memory_listener_register(&vdev->listener, vdev->dma_as);
2621 }
2622 
2623 static void virtio_device_unrealize(DeviceState *dev, Error **errp)
2624 {
2625     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2626     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
2627     Error *err = NULL;
2628 
2629     virtio_bus_device_unplugged(vdev);
2630 
2631     if (vdc->unrealize != NULL) {
2632         vdc->unrealize(dev, &err);
2633         if (err != NULL) {
2634             error_propagate(errp, err);
2635             return;
2636         }
2637     }
2638 
2639     g_free(vdev->bus_name);
2640     vdev->bus_name = NULL;
2641 }
2642 
2643 static void virtio_device_free_virtqueues(VirtIODevice *vdev)
2644 {
2645     int i;
2646     if (!vdev->vq) {
2647         return;
2648     }
2649 
2650     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2651         if (vdev->vq[i].vring.num == 0) {
2652             break;
2653         }
2654         virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
2655     }
2656     g_free(vdev->vq);
2657 }
2658 
2659 static void virtio_device_instance_finalize(Object *obj)
2660 {
2661     VirtIODevice *vdev = VIRTIO_DEVICE(obj);
2662 
2663     memory_listener_unregister(&vdev->listener);
2664     virtio_device_free_virtqueues(vdev);
2665 
2666     g_free(vdev->config);
2667     g_free(vdev->vector_queues);
2668 }
2669 
2670 static Property virtio_properties[] = {
2671     DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
2672     DEFINE_PROP_END_OF_LIST(),
2673 };
2674 
2675 static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
2676 {
2677     VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
2678     int i, n, r, err;
2679 
2680     memory_region_transaction_begin();
2681     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2682         VirtQueue *vq = &vdev->vq[n];
2683         if (!virtio_queue_get_num(vdev, n)) {
2684             continue;
2685         }
2686         r = virtio_bus_set_host_notifier(qbus, n, true);
2687         if (r < 0) {
2688             err = r;
2689             goto assign_error;
2690         }
2691         event_notifier_set_handler(&vq->host_notifier,
2692                                    virtio_queue_host_notifier_read);
2693     }
2694 
2695     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2696         /* Kick right away to begin processing requests already in vring */
2697         VirtQueue *vq = &vdev->vq[n];
2698         if (!vq->vring.num) {
2699             continue;
2700         }
2701         event_notifier_set(&vq->host_notifier);
2702     }
2703     memory_region_transaction_commit();
2704     return 0;
2705 
2706 assign_error:
2707     i = n; /* save n for a second iteration after transaction is committed. */
2708     while (--n >= 0) {
2709         VirtQueue *vq = &vdev->vq[n];
2710         if (!virtio_queue_get_num(vdev, n)) {
2711             continue;
2712         }
2713 
2714         event_notifier_set_handler(&vq->host_notifier, NULL);
2715         r = virtio_bus_set_host_notifier(qbus, n, false);
2716         assert(r >= 0);
2717     }
2718     memory_region_transaction_commit();
2719 
2720     while (--i >= 0) {
2721         if (!virtio_queue_get_num(vdev, i)) {
2722             continue;
2723         }
2724         virtio_bus_cleanup_host_notifier(qbus, i);
2725     }
2726     return err;
2727 }
2728 
2729 int virtio_device_start_ioeventfd(VirtIODevice *vdev)
2730 {
2731     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2732     VirtioBusState *vbus = VIRTIO_BUS(qbus);
2733 
2734     return virtio_bus_start_ioeventfd(vbus);
2735 }
2736 
2737 static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
2738 {
2739     VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
2740     int n, r;
2741 
2742     memory_region_transaction_begin();
2743     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2744         VirtQueue *vq = &vdev->vq[n];
2745 
2746         if (!virtio_queue_get_num(vdev, n)) {
2747             continue;
2748         }
2749         event_notifier_set_handler(&vq->host_notifier, NULL);
2750         r = virtio_bus_set_host_notifier(qbus, n, false);
2751         assert(r >= 0);
2752     }
2753     memory_region_transaction_commit();
2754 
2755     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2756         if (!virtio_queue_get_num(vdev, n)) {
2757             continue;
2758         }
2759         virtio_bus_cleanup_host_notifier(qbus, n);
2760     }
2761 }
2762 
2763 void virtio_device_stop_ioeventfd(VirtIODevice *vdev)
2764 {
2765     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2766     VirtioBusState *vbus = VIRTIO_BUS(qbus);
2767 
2768     virtio_bus_stop_ioeventfd(vbus);
2769 }
2770 
2771 int virtio_device_grab_ioeventfd(VirtIODevice *vdev)
2772 {
2773     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2774     VirtioBusState *vbus = VIRTIO_BUS(qbus);
2775 
2776     return virtio_bus_grab_ioeventfd(vbus);
2777 }
2778 
2779 void virtio_device_release_ioeventfd(VirtIODevice *vdev)
2780 {
2781     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2782     VirtioBusState *vbus = VIRTIO_BUS(qbus);
2783 
2784     virtio_bus_release_ioeventfd(vbus);
2785 }
2786 
2787 static void virtio_device_class_init(ObjectClass *klass, void *data)
2788 {
2789     /* Set the default value here. */
2790     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
2791     DeviceClass *dc = DEVICE_CLASS(klass);
2792 
2793     dc->realize = virtio_device_realize;
2794     dc->unrealize = virtio_device_unrealize;
2795     dc->bus_type = TYPE_VIRTIO_BUS;
2796     dc->props = virtio_properties;
2797     vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
2798     vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
2799 
2800     vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
2801 }
2802 
2803 bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
2804 {
2805     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2806     VirtioBusState *vbus = VIRTIO_BUS(qbus);
2807 
2808     return virtio_bus_ioeventfd_enabled(vbus);
2809 }
2810 
2811 static const TypeInfo virtio_device_info = {
2812     .name = TYPE_VIRTIO_DEVICE,
2813     .parent = TYPE_DEVICE,
2814     .instance_size = sizeof(VirtIODevice),
2815     .class_init = virtio_device_class_init,
2816     .instance_finalize = virtio_device_instance_finalize,
2817     .abstract = true,
2818     .class_size = sizeof(VirtioDeviceClass),
2819 };
2820 
2821 static void virtio_register_types(void)
2822 {
2823     type_register_static(&virtio_device_info);
2824 }
2825 
2826 type_init(virtio_register_types)
2827