xref: /openbmc/qemu/hw/virtio/virtio.c (revision ca9759c2a92f528f256fef0e3922416f7bb47bf9)
1 /*
2  * Virtio Support
3  *
4  * Copyright IBM, Corp. 2007
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qemu-common.h"
17 #include "cpu.h"
18 #include "trace.h"
19 #include "exec/address-spaces.h"
20 #include "qemu/error-report.h"
21 #include "hw/virtio/virtio.h"
22 #include "qemu/atomic.h"
23 #include "hw/virtio/virtio-bus.h"
24 #include "hw/virtio/virtio-access.h"
25 #include "sysemu/dma.h"
26 
27 /*
28  * The alignment to use between consumer and producer parts of vring.
29  * x86 pagesize again. This is the default, used by transports like PCI
30  * which don't provide a means for the guest to tell the host the alignment.
31  */
32 #define VIRTIO_PCI_VRING_ALIGN         4096
33 
34 typedef struct VRingDesc
35 {
36     uint64_t addr;
37     uint32_t len;
38     uint16_t flags;
39     uint16_t next;
40 } VRingDesc;
41 
42 typedef struct VRingAvail
43 {
44     uint16_t flags;
45     uint16_t idx;
46     uint16_t ring[0];
47 } VRingAvail;
48 
49 typedef struct VRingUsedElem
50 {
51     uint32_t id;
52     uint32_t len;
53 } VRingUsedElem;
54 
55 typedef struct VRingUsed
56 {
57     uint16_t flags;
58     uint16_t idx;
59     VRingUsedElem ring[0];
60 } VRingUsed;
61 
62 typedef struct VRingMemoryRegionCaches {
63     struct rcu_head rcu;
64     MemoryRegionCache desc;
65     MemoryRegionCache avail;
66     MemoryRegionCache used;
67 } VRingMemoryRegionCaches;
68 
69 typedef struct VRing
70 {
71     unsigned int num;
72     unsigned int num_default;
73     unsigned int align;
74     hwaddr desc;
75     hwaddr avail;
76     hwaddr used;
77     VRingMemoryRegionCaches *caches;
78 } VRing;
79 
80 struct VirtQueue
81 {
82     VRing vring;
83 
84     /* Next head to pop */
85     uint16_t last_avail_idx;
86 
87     /* Last avail_idx read from VQ. */
88     uint16_t shadow_avail_idx;
89 
90     uint16_t used_idx;
91 
92     /* Last used index value we have signalled on */
93     uint16_t signalled_used;
94 
95     /* Last used index value we have signalled on */
96     bool signalled_used_valid;
97 
98     /* Notification enabled? */
99     bool notification;
100 
101     uint16_t queue_index;
102 
103     unsigned int inuse;
104 
105     uint16_t vector;
106     VirtIOHandleOutput handle_output;
107     VirtIOHandleAIOOutput handle_aio_output;
108     VirtIODevice *vdev;
109     EventNotifier guest_notifier;
110     EventNotifier host_notifier;
111     QLIST_ENTRY(VirtQueue) node;
112 };
113 
114 static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
115 {
116     if (!caches) {
117         return;
118     }
119 
120     address_space_cache_destroy(&caches->desc);
121     address_space_cache_destroy(&caches->avail);
122     address_space_cache_destroy(&caches->used);
123     g_free(caches);
124 }
125 
126 static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
127 {
128     VRingMemoryRegionCaches *caches;
129 
130     caches = atomic_read(&vq->vring.caches);
131     atomic_rcu_set(&vq->vring.caches, NULL);
132     if (caches) {
133         call_rcu(caches, virtio_free_region_cache, rcu);
134     }
135 }
136 
137 static void virtio_init_region_cache(VirtIODevice *vdev, int n)
138 {
139     VirtQueue *vq = &vdev->vq[n];
140     VRingMemoryRegionCaches *old = vq->vring.caches;
141     VRingMemoryRegionCaches *new = NULL;
142     hwaddr addr, size;
143     int event_size;
144     int64_t len;
145 
146     event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
147 
148     addr = vq->vring.desc;
149     if (!addr) {
150         goto out_no_cache;
151     }
152     new = g_new0(VRingMemoryRegionCaches, 1);
153     size = virtio_queue_get_desc_size(vdev, n);
154     len = address_space_cache_init(&new->desc, vdev->dma_as,
155                                    addr, size, false);
156     if (len < size) {
157         virtio_error(vdev, "Cannot map desc");
158         goto err_desc;
159     }
160 
161     size = virtio_queue_get_used_size(vdev, n) + event_size;
162     len = address_space_cache_init(&new->used, vdev->dma_as,
163                                    vq->vring.used, size, true);
164     if (len < size) {
165         virtio_error(vdev, "Cannot map used");
166         goto err_used;
167     }
168 
169     size = virtio_queue_get_avail_size(vdev, n) + event_size;
170     len = address_space_cache_init(&new->avail, vdev->dma_as,
171                                    vq->vring.avail, size, false);
172     if (len < size) {
173         virtio_error(vdev, "Cannot map avail");
174         goto err_avail;
175     }
176 
177     atomic_rcu_set(&vq->vring.caches, new);
178     if (old) {
179         call_rcu(old, virtio_free_region_cache, rcu);
180     }
181     return;
182 
183 err_avail:
184     address_space_cache_destroy(&new->avail);
185 err_used:
186     address_space_cache_destroy(&new->used);
187 err_desc:
188     address_space_cache_destroy(&new->desc);
189 out_no_cache:
190     g_free(new);
191     virtio_virtqueue_reset_region_cache(vq);
192 }
193 
194 /* virt queue functions */
195 void virtio_queue_update_rings(VirtIODevice *vdev, int n)
196 {
197     VRing *vring = &vdev->vq[n].vring;
198 
199     if (!vring->num || !vring->desc || !vring->align) {
200         /* not yet setup -> nothing to do */
201         return;
202     }
203     vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
204     vring->used = vring_align(vring->avail +
205                               offsetof(VRingAvail, ring[vring->num]),
206                               vring->align);
207     virtio_init_region_cache(vdev, n);
208 }
209 
210 /* Called within rcu_read_lock().  */
211 static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
212                             MemoryRegionCache *cache, int i)
213 {
214     address_space_read_cached(cache, i * sizeof(VRingDesc),
215                               desc, sizeof(VRingDesc));
216     virtio_tswap64s(vdev, &desc->addr);
217     virtio_tswap32s(vdev, &desc->len);
218     virtio_tswap16s(vdev, &desc->flags);
219     virtio_tswap16s(vdev, &desc->next);
220 }
221 
222 static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
223 {
224     VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
225     assert(caches != NULL);
226     return caches;
227 }
228 /* Called within rcu_read_lock().  */
229 static inline uint16_t vring_avail_flags(VirtQueue *vq)
230 {
231     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
232     hwaddr pa = offsetof(VRingAvail, flags);
233     return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
234 }
235 
236 /* Called within rcu_read_lock().  */
237 static inline uint16_t vring_avail_idx(VirtQueue *vq)
238 {
239     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
240     hwaddr pa = offsetof(VRingAvail, idx);
241     vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
242     return vq->shadow_avail_idx;
243 }
244 
245 /* Called within rcu_read_lock().  */
246 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
247 {
248     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
249     hwaddr pa = offsetof(VRingAvail, ring[i]);
250     return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
251 }
252 
253 /* Called within rcu_read_lock().  */
254 static inline uint16_t vring_get_used_event(VirtQueue *vq)
255 {
256     return vring_avail_ring(vq, vq->vring.num);
257 }
258 
259 /* Called within rcu_read_lock().  */
260 static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
261                                     int i)
262 {
263     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
264     hwaddr pa = offsetof(VRingUsed, ring[i]);
265     virtio_tswap32s(vq->vdev, &uelem->id);
266     virtio_tswap32s(vq->vdev, &uelem->len);
267     address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
268     address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
269 }
270 
271 /* Called within rcu_read_lock().  */
272 static uint16_t vring_used_idx(VirtQueue *vq)
273 {
274     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
275     hwaddr pa = offsetof(VRingUsed, idx);
276     return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
277 }
278 
279 /* Called within rcu_read_lock().  */
280 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
281 {
282     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
283     hwaddr pa = offsetof(VRingUsed, idx);
284     virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
285     address_space_cache_invalidate(&caches->used, pa, sizeof(val));
286     vq->used_idx = val;
287 }
288 
289 /* Called within rcu_read_lock().  */
290 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
291 {
292     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
293     VirtIODevice *vdev = vq->vdev;
294     hwaddr pa = offsetof(VRingUsed, flags);
295     uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
296 
297     virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
298     address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
299 }
300 
301 /* Called within rcu_read_lock().  */
302 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
303 {
304     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
305     VirtIODevice *vdev = vq->vdev;
306     hwaddr pa = offsetof(VRingUsed, flags);
307     uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
308 
309     virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
310     address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
311 }
312 
313 /* Called within rcu_read_lock().  */
314 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
315 {
316     VRingMemoryRegionCaches *caches;
317     hwaddr pa;
318     if (!vq->notification) {
319         return;
320     }
321 
322     caches = vring_get_region_caches(vq);
323     pa = offsetof(VRingUsed, ring[vq->vring.num]);
324     virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
325     address_space_cache_invalidate(&caches->used, pa, sizeof(val));
326 }
327 
328 void virtio_queue_set_notification(VirtQueue *vq, int enable)
329 {
330     vq->notification = enable;
331 
332     if (!vq->vring.desc) {
333         return;
334     }
335 
336     rcu_read_lock();
337     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
338         vring_set_avail_event(vq, vring_avail_idx(vq));
339     } else if (enable) {
340         vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
341     } else {
342         vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
343     }
344     if (enable) {
345         /* Expose avail event/used flags before caller checks the avail idx. */
346         smp_mb();
347     }
348     rcu_read_unlock();
349 }
350 
351 int virtio_queue_ready(VirtQueue *vq)
352 {
353     return vq->vring.avail != 0;
354 }
355 
356 /* Fetch avail_idx from VQ memory only when we really need to know if
357  * guest has added some buffers.
358  * Called within rcu_read_lock().  */
359 static int virtio_queue_empty_rcu(VirtQueue *vq)
360 {
361     if (unlikely(vq->vdev->broken)) {
362         return 1;
363     }
364 
365     if (unlikely(!vq->vring.avail)) {
366         return 1;
367     }
368 
369     if (vq->shadow_avail_idx != vq->last_avail_idx) {
370         return 0;
371     }
372 
373     return vring_avail_idx(vq) == vq->last_avail_idx;
374 }
375 
376 int virtio_queue_empty(VirtQueue *vq)
377 {
378     bool empty;
379 
380     if (unlikely(vq->vdev->broken)) {
381         return 1;
382     }
383 
384     if (unlikely(!vq->vring.avail)) {
385         return 1;
386     }
387 
388     if (vq->shadow_avail_idx != vq->last_avail_idx) {
389         return 0;
390     }
391 
392     rcu_read_lock();
393     empty = vring_avail_idx(vq) == vq->last_avail_idx;
394     rcu_read_unlock();
395     return empty;
396 }
397 
398 static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
399                                unsigned int len)
400 {
401     AddressSpace *dma_as = vq->vdev->dma_as;
402     unsigned int offset;
403     int i;
404 
405     offset = 0;
406     for (i = 0; i < elem->in_num; i++) {
407         size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
408 
409         dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
410                          elem->in_sg[i].iov_len,
411                          DMA_DIRECTION_FROM_DEVICE, size);
412 
413         offset += size;
414     }
415 
416     for (i = 0; i < elem->out_num; i++)
417         dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
418                          elem->out_sg[i].iov_len,
419                          DMA_DIRECTION_TO_DEVICE,
420                          elem->out_sg[i].iov_len);
421 }
422 
423 /* virtqueue_detach_element:
424  * @vq: The #VirtQueue
425  * @elem: The #VirtQueueElement
426  * @len: number of bytes written
427  *
428  * Detach the element from the virtqueue.  This function is suitable for device
429  * reset or other situations where a #VirtQueueElement is simply freed and will
430  * not be pushed or discarded.
431  */
432 void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
433                               unsigned int len)
434 {
435     vq->inuse--;
436     virtqueue_unmap_sg(vq, elem, len);
437 }
438 
439 /* virtqueue_unpop:
440  * @vq: The #VirtQueue
441  * @elem: The #VirtQueueElement
442  * @len: number of bytes written
443  *
444  * Pretend the most recent element wasn't popped from the virtqueue.  The next
445  * call to virtqueue_pop() will refetch the element.
446  */
447 void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem,
448                      unsigned int len)
449 {
450     vq->last_avail_idx--;
451     virtqueue_detach_element(vq, elem, len);
452 }
453 
454 /* virtqueue_rewind:
455  * @vq: The #VirtQueue
456  * @num: Number of elements to push back
457  *
458  * Pretend that elements weren't popped from the virtqueue.  The next
459  * virtqueue_pop() will refetch the oldest element.
460  *
461  * Use virtqueue_unpop() instead if you have a VirtQueueElement.
462  *
463  * Returns: true on success, false if @num is greater than the number of in use
464  * elements.
465  */
466 bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
467 {
468     if (num > vq->inuse) {
469         return false;
470     }
471     vq->last_avail_idx -= num;
472     vq->inuse -= num;
473     return true;
474 }
475 
476 /* Called within rcu_read_lock().  */
477 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
478                     unsigned int len, unsigned int idx)
479 {
480     VRingUsedElem uelem;
481 
482     trace_virtqueue_fill(vq, elem, len, idx);
483 
484     virtqueue_unmap_sg(vq, elem, len);
485 
486     if (unlikely(vq->vdev->broken)) {
487         return;
488     }
489 
490     if (unlikely(!vq->vring.used)) {
491         return;
492     }
493 
494     idx = (idx + vq->used_idx) % vq->vring.num;
495 
496     uelem.id = elem->index;
497     uelem.len = len;
498     vring_used_write(vq, &uelem, idx);
499 }
500 
501 /* Called within rcu_read_lock().  */
502 void virtqueue_flush(VirtQueue *vq, unsigned int count)
503 {
504     uint16_t old, new;
505 
506     if (unlikely(vq->vdev->broken)) {
507         vq->inuse -= count;
508         return;
509     }
510 
511     if (unlikely(!vq->vring.used)) {
512         return;
513     }
514 
515     /* Make sure buffer is written before we update index. */
516     smp_wmb();
517     trace_virtqueue_flush(vq, count);
518     old = vq->used_idx;
519     new = old + count;
520     vring_used_idx_set(vq, new);
521     vq->inuse -= count;
522     if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
523         vq->signalled_used_valid = false;
524 }
525 
526 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
527                     unsigned int len)
528 {
529     rcu_read_lock();
530     virtqueue_fill(vq, elem, len, 0);
531     virtqueue_flush(vq, 1);
532     rcu_read_unlock();
533 }
534 
535 /* Called within rcu_read_lock().  */
536 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
537 {
538     uint16_t num_heads = vring_avail_idx(vq) - idx;
539 
540     /* Check it isn't doing very strange things with descriptor numbers. */
541     if (num_heads > vq->vring.num) {
542         virtio_error(vq->vdev, "Guest moved used index from %u to %u",
543                      idx, vq->shadow_avail_idx);
544         return -EINVAL;
545     }
546     /* On success, callers read a descriptor at vq->last_avail_idx.
547      * Make sure descriptor read does not bypass avail index read. */
548     if (num_heads) {
549         smp_rmb();
550     }
551 
552     return num_heads;
553 }
554 
555 /* Called within rcu_read_lock().  */
556 static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
557                                unsigned int *head)
558 {
559     /* Grab the next descriptor number they're advertising, and increment
560      * the index we've seen. */
561     *head = vring_avail_ring(vq, idx % vq->vring.num);
562 
563     /* If their number is silly, that's a fatal mistake. */
564     if (*head >= vq->vring.num) {
565         virtio_error(vq->vdev, "Guest says index %u is available", *head);
566         return false;
567     }
568 
569     return true;
570 }
571 
572 enum {
573     VIRTQUEUE_READ_DESC_ERROR = -1,
574     VIRTQUEUE_READ_DESC_DONE = 0,   /* end of chain */
575     VIRTQUEUE_READ_DESC_MORE = 1,   /* more buffers in chain */
576 };
577 
578 static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
579                                     MemoryRegionCache *desc_cache, unsigned int max,
580                                     unsigned int *next)
581 {
582     /* If this descriptor says it doesn't chain, we're done. */
583     if (!(desc->flags & VRING_DESC_F_NEXT)) {
584         return VIRTQUEUE_READ_DESC_DONE;
585     }
586 
587     /* Check they're not leading us off end of descriptors. */
588     *next = desc->next;
589     /* Make sure compiler knows to grab that: we don't want it changing! */
590     smp_wmb();
591 
592     if (*next >= max) {
593         virtio_error(vdev, "Desc next is %u", *next);
594         return VIRTQUEUE_READ_DESC_ERROR;
595     }
596 
597     vring_desc_read(vdev, desc, desc_cache, *next);
598     return VIRTQUEUE_READ_DESC_MORE;
599 }
600 
601 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
602                                unsigned int *out_bytes,
603                                unsigned max_in_bytes, unsigned max_out_bytes)
604 {
605     VirtIODevice *vdev = vq->vdev;
606     unsigned int max, idx;
607     unsigned int total_bufs, in_total, out_total;
608     VRingMemoryRegionCaches *caches;
609     MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
610     int64_t len = 0;
611     int rc;
612 
613     if (unlikely(!vq->vring.desc)) {
614         if (in_bytes) {
615             *in_bytes = 0;
616         }
617         if (out_bytes) {
618             *out_bytes = 0;
619         }
620         return;
621     }
622 
623     rcu_read_lock();
624     idx = vq->last_avail_idx;
625     total_bufs = in_total = out_total = 0;
626 
627     max = vq->vring.num;
628     caches = vring_get_region_caches(vq);
629     if (caches->desc.len < max * sizeof(VRingDesc)) {
630         virtio_error(vdev, "Cannot map descriptor ring");
631         goto err;
632     }
633 
634     while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
635         MemoryRegionCache *desc_cache = &caches->desc;
636         unsigned int num_bufs;
637         VRingDesc desc;
638         unsigned int i;
639 
640         num_bufs = total_bufs;
641 
642         if (!virtqueue_get_head(vq, idx++, &i)) {
643             goto err;
644         }
645 
646         vring_desc_read(vdev, &desc, desc_cache, i);
647 
648         if (desc.flags & VRING_DESC_F_INDIRECT) {
649             if (desc.len % sizeof(VRingDesc)) {
650                 virtio_error(vdev, "Invalid size for indirect buffer table");
651                 goto err;
652             }
653 
654             /* If we've got too many, that implies a descriptor loop. */
655             if (num_bufs >= max) {
656                 virtio_error(vdev, "Looped descriptor");
657                 goto err;
658             }
659 
660             /* loop over the indirect descriptor table */
661             len = address_space_cache_init(&indirect_desc_cache,
662                                            vdev->dma_as,
663                                            desc.addr, desc.len, false);
664             desc_cache = &indirect_desc_cache;
665             if (len < desc.len) {
666                 virtio_error(vdev, "Cannot map indirect buffer");
667                 goto err;
668             }
669 
670             max = desc.len / sizeof(VRingDesc);
671             num_bufs = i = 0;
672             vring_desc_read(vdev, &desc, desc_cache, i);
673         }
674 
675         do {
676             /* If we've got too many, that implies a descriptor loop. */
677             if (++num_bufs > max) {
678                 virtio_error(vdev, "Looped descriptor");
679                 goto err;
680             }
681 
682             if (desc.flags & VRING_DESC_F_WRITE) {
683                 in_total += desc.len;
684             } else {
685                 out_total += desc.len;
686             }
687             if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
688                 goto done;
689             }
690 
691             rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i);
692         } while (rc == VIRTQUEUE_READ_DESC_MORE);
693 
694         if (rc == VIRTQUEUE_READ_DESC_ERROR) {
695             goto err;
696         }
697 
698         if (desc_cache == &indirect_desc_cache) {
699             address_space_cache_destroy(&indirect_desc_cache);
700             total_bufs++;
701         } else {
702             total_bufs = num_bufs;
703         }
704     }
705 
706     if (rc < 0) {
707         goto err;
708     }
709 
710 done:
711     address_space_cache_destroy(&indirect_desc_cache);
712     if (in_bytes) {
713         *in_bytes = in_total;
714     }
715     if (out_bytes) {
716         *out_bytes = out_total;
717     }
718     rcu_read_unlock();
719     return;
720 
721 err:
722     in_total = out_total = 0;
723     goto done;
724 }
725 
726 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
727                           unsigned int out_bytes)
728 {
729     unsigned int in_total, out_total;
730 
731     virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
732     return in_bytes <= in_total && out_bytes <= out_total;
733 }
734 
735 static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
736                                hwaddr *addr, struct iovec *iov,
737                                unsigned int max_num_sg, bool is_write,
738                                hwaddr pa, size_t sz)
739 {
740     bool ok = false;
741     unsigned num_sg = *p_num_sg;
742     assert(num_sg <= max_num_sg);
743 
744     if (!sz) {
745         virtio_error(vdev, "virtio: zero sized buffers are not allowed");
746         goto out;
747     }
748 
749     while (sz) {
750         hwaddr len = sz;
751 
752         if (num_sg == max_num_sg) {
753             virtio_error(vdev, "virtio: too many write descriptors in "
754                                "indirect table");
755             goto out;
756         }
757 
758         iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
759                                               is_write ?
760                                               DMA_DIRECTION_FROM_DEVICE :
761                                               DMA_DIRECTION_TO_DEVICE);
762         if (!iov[num_sg].iov_base) {
763             virtio_error(vdev, "virtio: bogus descriptor or out of resources");
764             goto out;
765         }
766 
767         iov[num_sg].iov_len = len;
768         addr[num_sg] = pa;
769 
770         sz -= len;
771         pa += len;
772         num_sg++;
773     }
774     ok = true;
775 
776 out:
777     *p_num_sg = num_sg;
778     return ok;
779 }
780 
781 /* Only used by error code paths before we have a VirtQueueElement (therefore
782  * virtqueue_unmap_sg() can't be used).  Assumes buffers weren't written to
783  * yet.
784  */
785 static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
786                                     struct iovec *iov)
787 {
788     unsigned int i;
789 
790     for (i = 0; i < out_num + in_num; i++) {
791         int is_write = i >= out_num;
792 
793         cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
794         iov++;
795     }
796 }
797 
798 static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
799                                 hwaddr *addr, unsigned int *num_sg,
800                                 int is_write)
801 {
802     unsigned int i;
803     hwaddr len;
804 
805     for (i = 0; i < *num_sg; i++) {
806         len = sg[i].iov_len;
807         sg[i].iov_base = dma_memory_map(vdev->dma_as,
808                                         addr[i], &len, is_write ?
809                                         DMA_DIRECTION_FROM_DEVICE :
810                                         DMA_DIRECTION_TO_DEVICE);
811         if (!sg[i].iov_base) {
812             error_report("virtio: error trying to map MMIO memory");
813             exit(1);
814         }
815         if (len != sg[i].iov_len) {
816             error_report("virtio: unexpected memory split");
817             exit(1);
818         }
819     }
820 }
821 
822 void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem)
823 {
824     virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, &elem->in_num, 1);
825     virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, &elem->out_num, 0);
826 }
827 
828 static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
829 {
830     VirtQueueElement *elem;
831     size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
832     size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
833     size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
834     size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
835     size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
836     size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
837 
838     assert(sz >= sizeof(VirtQueueElement));
839     elem = g_malloc(out_sg_end);
840     trace_virtqueue_alloc_element(elem, sz, in_num, out_num);
841     elem->out_num = out_num;
842     elem->in_num = in_num;
843     elem->in_addr = (void *)elem + in_addr_ofs;
844     elem->out_addr = (void *)elem + out_addr_ofs;
845     elem->in_sg = (void *)elem + in_sg_ofs;
846     elem->out_sg = (void *)elem + out_sg_ofs;
847     return elem;
848 }
849 
850 void *virtqueue_pop(VirtQueue *vq, size_t sz)
851 {
852     unsigned int i, head, max;
853     VRingMemoryRegionCaches *caches;
854     MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
855     MemoryRegionCache *desc_cache;
856     int64_t len;
857     VirtIODevice *vdev = vq->vdev;
858     VirtQueueElement *elem = NULL;
859     unsigned out_num, in_num, elem_entries;
860     hwaddr addr[VIRTQUEUE_MAX_SIZE];
861     struct iovec iov[VIRTQUEUE_MAX_SIZE];
862     VRingDesc desc;
863     int rc;
864 
865     if (unlikely(vdev->broken)) {
866         return NULL;
867     }
868     rcu_read_lock();
869     if (virtio_queue_empty_rcu(vq)) {
870         goto done;
871     }
872     /* Needed after virtio_queue_empty(), see comment in
873      * virtqueue_num_heads(). */
874     smp_rmb();
875 
876     /* When we start there are none of either input nor output. */
877     out_num = in_num = elem_entries = 0;
878 
879     max = vq->vring.num;
880 
881     if (vq->inuse >= vq->vring.num) {
882         virtio_error(vdev, "Virtqueue size exceeded");
883         goto done;
884     }
885 
886     if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
887         goto done;
888     }
889 
890     if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
891         vring_set_avail_event(vq, vq->last_avail_idx);
892     }
893 
894     i = head;
895 
896     caches = vring_get_region_caches(vq);
897     if (caches->desc.len < max * sizeof(VRingDesc)) {
898         virtio_error(vdev, "Cannot map descriptor ring");
899         goto done;
900     }
901 
902     desc_cache = &caches->desc;
903     vring_desc_read(vdev, &desc, desc_cache, i);
904     if (desc.flags & VRING_DESC_F_INDIRECT) {
905         if (desc.len % sizeof(VRingDesc)) {
906             virtio_error(vdev, "Invalid size for indirect buffer table");
907             goto done;
908         }
909 
910         /* loop over the indirect descriptor table */
911         len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
912                                        desc.addr, desc.len, false);
913         desc_cache = &indirect_desc_cache;
914         if (len < desc.len) {
915             virtio_error(vdev, "Cannot map indirect buffer");
916             goto done;
917         }
918 
919         max = desc.len / sizeof(VRingDesc);
920         i = 0;
921         vring_desc_read(vdev, &desc, desc_cache, i);
922     }
923 
924     /* Collect all the descriptors */
925     do {
926         bool map_ok;
927 
928         if (desc.flags & VRING_DESC_F_WRITE) {
929             map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
930                                         iov + out_num,
931                                         VIRTQUEUE_MAX_SIZE - out_num, true,
932                                         desc.addr, desc.len);
933         } else {
934             if (in_num) {
935                 virtio_error(vdev, "Incorrect order for descriptors");
936                 goto err_undo_map;
937             }
938             map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
939                                         VIRTQUEUE_MAX_SIZE, false,
940                                         desc.addr, desc.len);
941         }
942         if (!map_ok) {
943             goto err_undo_map;
944         }
945 
946         /* If we've got too many, that implies a descriptor loop. */
947         if (++elem_entries > max) {
948             virtio_error(vdev, "Looped descriptor");
949             goto err_undo_map;
950         }
951 
952         rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i);
953     } while (rc == VIRTQUEUE_READ_DESC_MORE);
954 
955     if (rc == VIRTQUEUE_READ_DESC_ERROR) {
956         goto err_undo_map;
957     }
958 
959     /* Now copy what we have collected and mapped */
960     elem = virtqueue_alloc_element(sz, out_num, in_num);
961     elem->index = head;
962     for (i = 0; i < out_num; i++) {
963         elem->out_addr[i] = addr[i];
964         elem->out_sg[i] = iov[i];
965     }
966     for (i = 0; i < in_num; i++) {
967         elem->in_addr[i] = addr[out_num + i];
968         elem->in_sg[i] = iov[out_num + i];
969     }
970 
971     vq->inuse++;
972 
973     trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
974 done:
975     address_space_cache_destroy(&indirect_desc_cache);
976     rcu_read_unlock();
977 
978     return elem;
979 
980 err_undo_map:
981     virtqueue_undo_map_desc(out_num, in_num, iov);
982     goto done;
983 }
984 
985 /* virtqueue_drop_all:
986  * @vq: The #VirtQueue
987  * Drops all queued buffers and indicates them to the guest
988  * as if they are done. Useful when buffers can not be
989  * processed but must be returned to the guest.
990  */
991 unsigned int virtqueue_drop_all(VirtQueue *vq)
992 {
993     unsigned int dropped = 0;
994     VirtQueueElement elem = {};
995     VirtIODevice *vdev = vq->vdev;
996     bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
997 
998     if (unlikely(vdev->broken)) {
999         return 0;
1000     }
1001 
1002     while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
1003         /* works similar to virtqueue_pop but does not map buffers
1004         * and does not allocate any memory */
1005         smp_rmb();
1006         if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
1007             break;
1008         }
1009         vq->inuse++;
1010         vq->last_avail_idx++;
1011         if (fEventIdx) {
1012             vring_set_avail_event(vq, vq->last_avail_idx);
1013         }
1014         /* immediately push the element, nothing to unmap
1015          * as both in_num and out_num are set to 0 */
1016         virtqueue_push(vq, &elem, 0);
1017         dropped++;
1018     }
1019 
1020     return dropped;
1021 }
1022 
1023 /* Reading and writing a structure directly to QEMUFile is *awful*, but
1024  * it is what QEMU has always done by mistake.  We can change it sooner
1025  * or later by bumping the version number of the affected vm states.
1026  * In the meanwhile, since the in-memory layout of VirtQueueElement
1027  * has changed, we need to marshal to and from the layout that was
1028  * used before the change.
1029  */
1030 typedef struct VirtQueueElementOld {
1031     unsigned int index;
1032     unsigned int out_num;
1033     unsigned int in_num;
1034     hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
1035     hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
1036     struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
1037     struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
1038 } VirtQueueElementOld;
1039 
1040 void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz)
1041 {
1042     VirtQueueElement *elem;
1043     VirtQueueElementOld data;
1044     int i;
1045 
1046     qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1047 
1048     /* TODO: teach all callers that this can fail, and return failure instead
1049      * of asserting here.
1050      * This is just one thing (there are probably more) that must be
1051      * fixed before we can allow NDEBUG compilation.
1052      */
1053     assert(ARRAY_SIZE(data.in_addr) >= data.in_num);
1054     assert(ARRAY_SIZE(data.out_addr) >= data.out_num);
1055 
1056     elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
1057     elem->index = data.index;
1058 
1059     for (i = 0; i < elem->in_num; i++) {
1060         elem->in_addr[i] = data.in_addr[i];
1061     }
1062 
1063     for (i = 0; i < elem->out_num; i++) {
1064         elem->out_addr[i] = data.out_addr[i];
1065     }
1066 
1067     for (i = 0; i < elem->in_num; i++) {
1068         /* Base is overwritten by virtqueue_map.  */
1069         elem->in_sg[i].iov_base = 0;
1070         elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
1071     }
1072 
1073     for (i = 0; i < elem->out_num; i++) {
1074         /* Base is overwritten by virtqueue_map.  */
1075         elem->out_sg[i].iov_base = 0;
1076         elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
1077     }
1078 
1079     virtqueue_map(vdev, elem);
1080     return elem;
1081 }
1082 
1083 void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem)
1084 {
1085     VirtQueueElementOld data;
1086     int i;
1087 
1088     memset(&data, 0, sizeof(data));
1089     data.index = elem->index;
1090     data.in_num = elem->in_num;
1091     data.out_num = elem->out_num;
1092 
1093     for (i = 0; i < elem->in_num; i++) {
1094         data.in_addr[i] = elem->in_addr[i];
1095     }
1096 
1097     for (i = 0; i < elem->out_num; i++) {
1098         data.out_addr[i] = elem->out_addr[i];
1099     }
1100 
1101     for (i = 0; i < elem->in_num; i++) {
1102         /* Base is overwritten by virtqueue_map when loading.  Do not
1103          * save it, as it would leak the QEMU address space layout.  */
1104         data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
1105     }
1106 
1107     for (i = 0; i < elem->out_num; i++) {
1108         /* Do not save iov_base as above.  */
1109         data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
1110     }
1111     qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1112 }
1113 
1114 /* virtio device */
1115 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
1116 {
1117     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1118     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1119 
1120     if (unlikely(vdev->broken)) {
1121         return;
1122     }
1123 
1124     if (k->notify) {
1125         k->notify(qbus->parent, vector);
1126     }
1127 }
1128 
1129 void virtio_update_irq(VirtIODevice *vdev)
1130 {
1131     virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
1132 }
1133 
1134 static int virtio_validate_features(VirtIODevice *vdev)
1135 {
1136     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1137 
1138     if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) &&
1139         !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
1140         return -EFAULT;
1141     }
1142 
1143     if (k->validate_features) {
1144         return k->validate_features(vdev);
1145     } else {
1146         return 0;
1147     }
1148 }
1149 
1150 int virtio_set_status(VirtIODevice *vdev, uint8_t val)
1151 {
1152     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1153     trace_virtio_set_status(vdev, val);
1154 
1155     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1156         if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
1157             val & VIRTIO_CONFIG_S_FEATURES_OK) {
1158             int ret = virtio_validate_features(vdev);
1159 
1160             if (ret) {
1161                 return ret;
1162             }
1163         }
1164     }
1165     if (k->set_status) {
1166         k->set_status(vdev, val);
1167     }
1168     vdev->status = val;
1169     return 0;
1170 }
1171 
1172 static enum virtio_device_endian virtio_default_endian(void)
1173 {
1174     if (target_words_bigendian()) {
1175         return VIRTIO_DEVICE_ENDIAN_BIG;
1176     } else {
1177         return VIRTIO_DEVICE_ENDIAN_LITTLE;
1178     }
1179 }
1180 
1181 static enum virtio_device_endian virtio_current_cpu_endian(void)
1182 {
1183     CPUClass *cc = CPU_GET_CLASS(current_cpu);
1184 
1185     if (cc->virtio_is_big_endian(current_cpu)) {
1186         return VIRTIO_DEVICE_ENDIAN_BIG;
1187     } else {
1188         return VIRTIO_DEVICE_ENDIAN_LITTLE;
1189     }
1190 }
1191 
1192 void virtio_reset(void *opaque)
1193 {
1194     VirtIODevice *vdev = opaque;
1195     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1196     int i;
1197 
1198     virtio_set_status(vdev, 0);
1199     if (current_cpu) {
1200         /* Guest initiated reset */
1201         vdev->device_endian = virtio_current_cpu_endian();
1202     } else {
1203         /* System reset */
1204         vdev->device_endian = virtio_default_endian();
1205     }
1206 
1207     if (k->reset) {
1208         k->reset(vdev);
1209     }
1210 
1211     vdev->broken = false;
1212     vdev->guest_features = 0;
1213     vdev->queue_sel = 0;
1214     vdev->status = 0;
1215     atomic_set(&vdev->isr, 0);
1216     vdev->config_vector = VIRTIO_NO_VECTOR;
1217     virtio_notify_vector(vdev, vdev->config_vector);
1218 
1219     for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1220         vdev->vq[i].vring.desc = 0;
1221         vdev->vq[i].vring.avail = 0;
1222         vdev->vq[i].vring.used = 0;
1223         vdev->vq[i].last_avail_idx = 0;
1224         vdev->vq[i].shadow_avail_idx = 0;
1225         vdev->vq[i].used_idx = 0;
1226         virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
1227         vdev->vq[i].signalled_used = 0;
1228         vdev->vq[i].signalled_used_valid = false;
1229         vdev->vq[i].notification = true;
1230         vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
1231         vdev->vq[i].inuse = 0;
1232         virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
1233     }
1234 }
1235 
1236 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
1237 {
1238     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1239     uint8_t val;
1240 
1241     if (addr + sizeof(val) > vdev->config_len) {
1242         return (uint32_t)-1;
1243     }
1244 
1245     k->get_config(vdev, vdev->config);
1246 
1247     val = ldub_p(vdev->config + addr);
1248     return val;
1249 }
1250 
1251 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
1252 {
1253     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1254     uint16_t val;
1255 
1256     if (addr + sizeof(val) > vdev->config_len) {
1257         return (uint32_t)-1;
1258     }
1259 
1260     k->get_config(vdev, vdev->config);
1261 
1262     val = lduw_p(vdev->config + addr);
1263     return val;
1264 }
1265 
1266 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
1267 {
1268     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1269     uint32_t val;
1270 
1271     if (addr + sizeof(val) > vdev->config_len) {
1272         return (uint32_t)-1;
1273     }
1274 
1275     k->get_config(vdev, vdev->config);
1276 
1277     val = ldl_p(vdev->config + addr);
1278     return val;
1279 }
1280 
1281 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1282 {
1283     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1284     uint8_t val = data;
1285 
1286     if (addr + sizeof(val) > vdev->config_len) {
1287         return;
1288     }
1289 
1290     stb_p(vdev->config + addr, val);
1291 
1292     if (k->set_config) {
1293         k->set_config(vdev, vdev->config);
1294     }
1295 }
1296 
1297 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1298 {
1299     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1300     uint16_t val = data;
1301 
1302     if (addr + sizeof(val) > vdev->config_len) {
1303         return;
1304     }
1305 
1306     stw_p(vdev->config + addr, val);
1307 
1308     if (k->set_config) {
1309         k->set_config(vdev, vdev->config);
1310     }
1311 }
1312 
1313 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1314 {
1315     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1316     uint32_t val = data;
1317 
1318     if (addr + sizeof(val) > vdev->config_len) {
1319         return;
1320     }
1321 
1322     stl_p(vdev->config + addr, val);
1323 
1324     if (k->set_config) {
1325         k->set_config(vdev, vdev->config);
1326     }
1327 }
1328 
1329 uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
1330 {
1331     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1332     uint8_t val;
1333 
1334     if (addr + sizeof(val) > vdev->config_len) {
1335         return (uint32_t)-1;
1336     }
1337 
1338     k->get_config(vdev, vdev->config);
1339 
1340     val = ldub_p(vdev->config + addr);
1341     return val;
1342 }
1343 
1344 uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
1345 {
1346     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1347     uint16_t val;
1348 
1349     if (addr + sizeof(val) > vdev->config_len) {
1350         return (uint32_t)-1;
1351     }
1352 
1353     k->get_config(vdev, vdev->config);
1354 
1355     val = lduw_le_p(vdev->config + addr);
1356     return val;
1357 }
1358 
1359 uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
1360 {
1361     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1362     uint32_t val;
1363 
1364     if (addr + sizeof(val) > vdev->config_len) {
1365         return (uint32_t)-1;
1366     }
1367 
1368     k->get_config(vdev, vdev->config);
1369 
1370     val = ldl_le_p(vdev->config + addr);
1371     return val;
1372 }
1373 
1374 void virtio_config_modern_writeb(VirtIODevice *vdev,
1375                                  uint32_t addr, uint32_t data)
1376 {
1377     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1378     uint8_t val = data;
1379 
1380     if (addr + sizeof(val) > vdev->config_len) {
1381         return;
1382     }
1383 
1384     stb_p(vdev->config + addr, val);
1385 
1386     if (k->set_config) {
1387         k->set_config(vdev, vdev->config);
1388     }
1389 }
1390 
1391 void virtio_config_modern_writew(VirtIODevice *vdev,
1392                                  uint32_t addr, uint32_t data)
1393 {
1394     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1395     uint16_t val = data;
1396 
1397     if (addr + sizeof(val) > vdev->config_len) {
1398         return;
1399     }
1400 
1401     stw_le_p(vdev->config + addr, val);
1402 
1403     if (k->set_config) {
1404         k->set_config(vdev, vdev->config);
1405     }
1406 }
1407 
1408 void virtio_config_modern_writel(VirtIODevice *vdev,
1409                                  uint32_t addr, uint32_t data)
1410 {
1411     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1412     uint32_t val = data;
1413 
1414     if (addr + sizeof(val) > vdev->config_len) {
1415         return;
1416     }
1417 
1418     stl_le_p(vdev->config + addr, val);
1419 
1420     if (k->set_config) {
1421         k->set_config(vdev, vdev->config);
1422     }
1423 }
1424 
1425 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
1426 {
1427     if (!vdev->vq[n].vring.num) {
1428         return;
1429     }
1430     vdev->vq[n].vring.desc = addr;
1431     virtio_queue_update_rings(vdev, n);
1432 }
1433 
1434 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
1435 {
1436     return vdev->vq[n].vring.desc;
1437 }
1438 
1439 void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
1440                             hwaddr avail, hwaddr used)
1441 {
1442     if (!vdev->vq[n].vring.num) {
1443         return;
1444     }
1445     vdev->vq[n].vring.desc = desc;
1446     vdev->vq[n].vring.avail = avail;
1447     vdev->vq[n].vring.used = used;
1448     virtio_init_region_cache(vdev, n);
1449 }
1450 
1451 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
1452 {
1453     /* Don't allow guest to flip queue between existent and
1454      * nonexistent states, or to set it to an invalid size.
1455      */
1456     if (!!num != !!vdev->vq[n].vring.num ||
1457         num > VIRTQUEUE_MAX_SIZE ||
1458         num < 0) {
1459         return;
1460     }
1461     vdev->vq[n].vring.num = num;
1462 }
1463 
1464 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
1465 {
1466     return QLIST_FIRST(&vdev->vector_queues[vector]);
1467 }
1468 
1469 VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
1470 {
1471     return QLIST_NEXT(vq, node);
1472 }
1473 
1474 int virtio_queue_get_num(VirtIODevice *vdev, int n)
1475 {
1476     return vdev->vq[n].vring.num;
1477 }
1478 
1479 int virtio_queue_get_max_num(VirtIODevice *vdev, int n)
1480 {
1481     return vdev->vq[n].vring.num_default;
1482 }
1483 
1484 int virtio_get_num_queues(VirtIODevice *vdev)
1485 {
1486     int i;
1487 
1488     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1489         if (!virtio_queue_get_num(vdev, i)) {
1490             break;
1491         }
1492     }
1493 
1494     return i;
1495 }
1496 
1497 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
1498 {
1499     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1500     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1501 
1502     /* virtio-1 compliant devices cannot change the alignment */
1503     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1504         error_report("tried to modify queue alignment for virtio-1 device");
1505         return;
1506     }
1507     /* Check that the transport told us it was going to do this
1508      * (so a buggy transport will immediately assert rather than
1509      * silently failing to migrate this state)
1510      */
1511     assert(k->has_variable_vring_alignment);
1512 
1513     if (align) {
1514         vdev->vq[n].vring.align = align;
1515         virtio_queue_update_rings(vdev, n);
1516     }
1517 }
1518 
1519 static bool virtio_queue_notify_aio_vq(VirtQueue *vq)
1520 {
1521     if (vq->vring.desc && vq->handle_aio_output) {
1522         VirtIODevice *vdev = vq->vdev;
1523 
1524         trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1525         return vq->handle_aio_output(vdev, vq);
1526     }
1527 
1528     return false;
1529 }
1530 
1531 static void virtio_queue_notify_vq(VirtQueue *vq)
1532 {
1533     if (vq->vring.desc && vq->handle_output) {
1534         VirtIODevice *vdev = vq->vdev;
1535 
1536         if (unlikely(vdev->broken)) {
1537             return;
1538         }
1539 
1540         trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1541         vq->handle_output(vdev, vq);
1542     }
1543 }
1544 
1545 void virtio_queue_notify(VirtIODevice *vdev, int n)
1546 {
1547     VirtQueue *vq = &vdev->vq[n];
1548 
1549     if (unlikely(!vq->vring.desc || vdev->broken)) {
1550         return;
1551     }
1552 
1553     trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1554     if (vq->handle_aio_output) {
1555         event_notifier_set(&vq->host_notifier);
1556     } else if (vq->handle_output) {
1557         vq->handle_output(vdev, vq);
1558     }
1559 }
1560 
1561 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
1562 {
1563     return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
1564         VIRTIO_NO_VECTOR;
1565 }
1566 
1567 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
1568 {
1569     VirtQueue *vq = &vdev->vq[n];
1570 
1571     if (n < VIRTIO_QUEUE_MAX) {
1572         if (vdev->vector_queues &&
1573             vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
1574             QLIST_REMOVE(vq, node);
1575         }
1576         vdev->vq[n].vector = vector;
1577         if (vdev->vector_queues &&
1578             vector != VIRTIO_NO_VECTOR) {
1579             QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
1580         }
1581     }
1582 }
1583 
1584 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
1585                             VirtIOHandleOutput handle_output)
1586 {
1587     int i;
1588 
1589     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1590         if (vdev->vq[i].vring.num == 0)
1591             break;
1592     }
1593 
1594     if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
1595         abort();
1596 
1597     vdev->vq[i].vring.num = queue_size;
1598     vdev->vq[i].vring.num_default = queue_size;
1599     vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
1600     vdev->vq[i].handle_output = handle_output;
1601     vdev->vq[i].handle_aio_output = NULL;
1602 
1603     return &vdev->vq[i];
1604 }
1605 
1606 void virtio_del_queue(VirtIODevice *vdev, int n)
1607 {
1608     if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
1609         abort();
1610     }
1611 
1612     vdev->vq[n].vring.num = 0;
1613     vdev->vq[n].vring.num_default = 0;
1614 }
1615 
1616 static void virtio_set_isr(VirtIODevice *vdev, int value)
1617 {
1618     uint8_t old = atomic_read(&vdev->isr);
1619 
1620     /* Do not write ISR if it does not change, so that its cacheline remains
1621      * shared in the common case where the guest does not read it.
1622      */
1623     if ((old & value) != value) {
1624         atomic_or(&vdev->isr, value);
1625     }
1626 }
1627 
1628 /* Called within rcu_read_lock().  */
1629 static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
1630 {
1631     uint16_t old, new;
1632     bool v;
1633     /* We need to expose used array entries before checking used event. */
1634     smp_mb();
1635     /* Always notify when queue is empty (when feature acknowledge) */
1636     if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1637         !vq->inuse && virtio_queue_empty(vq)) {
1638         return true;
1639     }
1640 
1641     if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
1642         return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
1643     }
1644 
1645     v = vq->signalled_used_valid;
1646     vq->signalled_used_valid = true;
1647     old = vq->signalled_used;
1648     new = vq->signalled_used = vq->used_idx;
1649     return !v || vring_need_event(vring_get_used_event(vq), new, old);
1650 }
1651 
1652 void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
1653 {
1654     bool should_notify;
1655     rcu_read_lock();
1656     should_notify = virtio_should_notify(vdev, vq);
1657     rcu_read_unlock();
1658 
1659     if (!should_notify) {
1660         return;
1661     }
1662 
1663     trace_virtio_notify_irqfd(vdev, vq);
1664 
1665     /*
1666      * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
1667      * windows drivers included in virtio-win 1.8.0 (circa 2015) are
1668      * incorrectly polling this bit during crashdump and hibernation
1669      * in MSI mode, causing a hang if this bit is never updated.
1670      * Recent releases of Windows do not really shut down, but rather
1671      * log out and hibernate to make the next startup faster.  Hence,
1672      * this manifested as a more serious hang during shutdown with
1673      *
1674      * Next driver release from 2016 fixed this problem, so working around it
1675      * is not a must, but it's easy to do so let's do it here.
1676      *
1677      * Note: it's safe to update ISR from any thread as it was switched
1678      * to an atomic operation.
1679      */
1680     virtio_set_isr(vq->vdev, 0x1);
1681     event_notifier_set(&vq->guest_notifier);
1682 }
1683 
1684 static void virtio_irq(VirtQueue *vq)
1685 {
1686     virtio_set_isr(vq->vdev, 0x1);
1687     virtio_notify_vector(vq->vdev, vq->vector);
1688 }
1689 
1690 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
1691 {
1692     bool should_notify;
1693     rcu_read_lock();
1694     should_notify = virtio_should_notify(vdev, vq);
1695     rcu_read_unlock();
1696 
1697     if (!should_notify) {
1698         return;
1699     }
1700 
1701     trace_virtio_notify(vdev, vq);
1702     virtio_irq(vq);
1703 }
1704 
1705 void virtio_notify_config(VirtIODevice *vdev)
1706 {
1707     if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
1708         return;
1709 
1710     virtio_set_isr(vdev, 0x3);
1711     vdev->generation++;
1712     virtio_notify_vector(vdev, vdev->config_vector);
1713 }
1714 
1715 static bool virtio_device_endian_needed(void *opaque)
1716 {
1717     VirtIODevice *vdev = opaque;
1718 
1719     assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
1720     if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1721         return vdev->device_endian != virtio_default_endian();
1722     }
1723     /* Devices conforming to VIRTIO 1.0 or later are always LE. */
1724     return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
1725 }
1726 
1727 static bool virtio_64bit_features_needed(void *opaque)
1728 {
1729     VirtIODevice *vdev = opaque;
1730 
1731     return (vdev->host_features >> 32) != 0;
1732 }
1733 
1734 static bool virtio_virtqueue_needed(void *opaque)
1735 {
1736     VirtIODevice *vdev = opaque;
1737 
1738     return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
1739 }
1740 
1741 static bool virtio_ringsize_needed(void *opaque)
1742 {
1743     VirtIODevice *vdev = opaque;
1744     int i;
1745 
1746     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1747         if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
1748             return true;
1749         }
1750     }
1751     return false;
1752 }
1753 
1754 static bool virtio_extra_state_needed(void *opaque)
1755 {
1756     VirtIODevice *vdev = opaque;
1757     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1758     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1759 
1760     return k->has_extra_state &&
1761         k->has_extra_state(qbus->parent);
1762 }
1763 
1764 static bool virtio_broken_needed(void *opaque)
1765 {
1766     VirtIODevice *vdev = opaque;
1767 
1768     return vdev->broken;
1769 }
1770 
1771 static const VMStateDescription vmstate_virtqueue = {
1772     .name = "virtqueue_state",
1773     .version_id = 1,
1774     .minimum_version_id = 1,
1775     .fields = (VMStateField[]) {
1776         VMSTATE_UINT64(vring.avail, struct VirtQueue),
1777         VMSTATE_UINT64(vring.used, struct VirtQueue),
1778         VMSTATE_END_OF_LIST()
1779     }
1780 };
1781 
1782 static const VMStateDescription vmstate_virtio_virtqueues = {
1783     .name = "virtio/virtqueues",
1784     .version_id = 1,
1785     .minimum_version_id = 1,
1786     .needed = &virtio_virtqueue_needed,
1787     .fields = (VMStateField[]) {
1788         VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
1789                       VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
1790         VMSTATE_END_OF_LIST()
1791     }
1792 };
1793 
1794 static const VMStateDescription vmstate_ringsize = {
1795     .name = "ringsize_state",
1796     .version_id = 1,
1797     .minimum_version_id = 1,
1798     .fields = (VMStateField[]) {
1799         VMSTATE_UINT32(vring.num_default, struct VirtQueue),
1800         VMSTATE_END_OF_LIST()
1801     }
1802 };
1803 
1804 static const VMStateDescription vmstate_virtio_ringsize = {
1805     .name = "virtio/ringsize",
1806     .version_id = 1,
1807     .minimum_version_id = 1,
1808     .needed = &virtio_ringsize_needed,
1809     .fields = (VMStateField[]) {
1810         VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
1811                       VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
1812         VMSTATE_END_OF_LIST()
1813     }
1814 };
1815 
1816 static int get_extra_state(QEMUFile *f, void *pv, size_t size,
1817                            VMStateField *field)
1818 {
1819     VirtIODevice *vdev = pv;
1820     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1821     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1822 
1823     if (!k->load_extra_state) {
1824         return -1;
1825     } else {
1826         return k->load_extra_state(qbus->parent, f);
1827     }
1828 }
1829 
1830 static int put_extra_state(QEMUFile *f, void *pv, size_t size,
1831                            VMStateField *field, QJSON *vmdesc)
1832 {
1833     VirtIODevice *vdev = pv;
1834     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1835     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1836 
1837     k->save_extra_state(qbus->parent, f);
1838     return 0;
1839 }
1840 
1841 static const VMStateInfo vmstate_info_extra_state = {
1842     .name = "virtqueue_extra_state",
1843     .get = get_extra_state,
1844     .put = put_extra_state,
1845 };
1846 
1847 static const VMStateDescription vmstate_virtio_extra_state = {
1848     .name = "virtio/extra_state",
1849     .version_id = 1,
1850     .minimum_version_id = 1,
1851     .needed = &virtio_extra_state_needed,
1852     .fields = (VMStateField[]) {
1853         {
1854             .name         = "extra_state",
1855             .version_id   = 0,
1856             .field_exists = NULL,
1857             .size         = 0,
1858             .info         = &vmstate_info_extra_state,
1859             .flags        = VMS_SINGLE,
1860             .offset       = 0,
1861         },
1862         VMSTATE_END_OF_LIST()
1863     }
1864 };
1865 
1866 static const VMStateDescription vmstate_virtio_device_endian = {
1867     .name = "virtio/device_endian",
1868     .version_id = 1,
1869     .minimum_version_id = 1,
1870     .needed = &virtio_device_endian_needed,
1871     .fields = (VMStateField[]) {
1872         VMSTATE_UINT8(device_endian, VirtIODevice),
1873         VMSTATE_END_OF_LIST()
1874     }
1875 };
1876 
1877 static const VMStateDescription vmstate_virtio_64bit_features = {
1878     .name = "virtio/64bit_features",
1879     .version_id = 1,
1880     .minimum_version_id = 1,
1881     .needed = &virtio_64bit_features_needed,
1882     .fields = (VMStateField[]) {
1883         VMSTATE_UINT64(guest_features, VirtIODevice),
1884         VMSTATE_END_OF_LIST()
1885     }
1886 };
1887 
1888 static const VMStateDescription vmstate_virtio_broken = {
1889     .name = "virtio/broken",
1890     .version_id = 1,
1891     .minimum_version_id = 1,
1892     .needed = &virtio_broken_needed,
1893     .fields = (VMStateField[]) {
1894         VMSTATE_BOOL(broken, VirtIODevice),
1895         VMSTATE_END_OF_LIST()
1896     }
1897 };
1898 
1899 static const VMStateDescription vmstate_virtio = {
1900     .name = "virtio",
1901     .version_id = 1,
1902     .minimum_version_id = 1,
1903     .minimum_version_id_old = 1,
1904     .fields = (VMStateField[]) {
1905         VMSTATE_END_OF_LIST()
1906     },
1907     .subsections = (const VMStateDescription*[]) {
1908         &vmstate_virtio_device_endian,
1909         &vmstate_virtio_64bit_features,
1910         &vmstate_virtio_virtqueues,
1911         &vmstate_virtio_ringsize,
1912         &vmstate_virtio_broken,
1913         &vmstate_virtio_extra_state,
1914         NULL
1915     }
1916 };
1917 
1918 int virtio_save(VirtIODevice *vdev, QEMUFile *f)
1919 {
1920     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1921     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1922     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
1923     uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
1924     int i;
1925 
1926     if (k->save_config) {
1927         k->save_config(qbus->parent, f);
1928     }
1929 
1930     qemu_put_8s(f, &vdev->status);
1931     qemu_put_8s(f, &vdev->isr);
1932     qemu_put_be16s(f, &vdev->queue_sel);
1933     qemu_put_be32s(f, &guest_features_lo);
1934     qemu_put_be32(f, vdev->config_len);
1935     qemu_put_buffer(f, vdev->config, vdev->config_len);
1936 
1937     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1938         if (vdev->vq[i].vring.num == 0)
1939             break;
1940     }
1941 
1942     qemu_put_be32(f, i);
1943 
1944     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1945         if (vdev->vq[i].vring.num == 0)
1946             break;
1947 
1948         qemu_put_be32(f, vdev->vq[i].vring.num);
1949         if (k->has_variable_vring_alignment) {
1950             qemu_put_be32(f, vdev->vq[i].vring.align);
1951         }
1952         /*
1953          * Save desc now, the rest of the ring addresses are saved in
1954          * subsections for VIRTIO-1 devices.
1955          */
1956         qemu_put_be64(f, vdev->vq[i].vring.desc);
1957         qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
1958         if (k->save_queue) {
1959             k->save_queue(qbus->parent, i, f);
1960         }
1961     }
1962 
1963     if (vdc->save != NULL) {
1964         vdc->save(vdev, f);
1965     }
1966 
1967     if (vdc->vmsd) {
1968         int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL);
1969         if (ret) {
1970             return ret;
1971         }
1972     }
1973 
1974     /* Subsections */
1975     return vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
1976 }
1977 
1978 /* A wrapper for use as a VMState .put function */
1979 static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
1980                               VMStateField *field, QJSON *vmdesc)
1981 {
1982     return virtio_save(VIRTIO_DEVICE(opaque), f);
1983 }
1984 
1985 /* A wrapper for use as a VMState .get function */
1986 static int virtio_device_get(QEMUFile *f, void *opaque, size_t size,
1987                              VMStateField *field)
1988 {
1989     VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
1990     DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
1991 
1992     return virtio_load(vdev, f, dc->vmsd->version_id);
1993 }
1994 
1995 const VMStateInfo  virtio_vmstate_info = {
1996     .name = "virtio",
1997     .get = virtio_device_get,
1998     .put = virtio_device_put,
1999 };
2000 
2001 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
2002 {
2003     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2004     bool bad = (val & ~(vdev->host_features)) != 0;
2005 
2006     val &= vdev->host_features;
2007     if (k->set_features) {
2008         k->set_features(vdev, val);
2009     }
2010     vdev->guest_features = val;
2011     return bad ? -1 : 0;
2012 }
2013 
2014 int virtio_set_features(VirtIODevice *vdev, uint64_t val)
2015 {
2016     int ret;
2017     /*
2018      * The driver must not attempt to set features after feature negotiation
2019      * has finished.
2020      */
2021     if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
2022         return -EINVAL;
2023     }
2024     ret = virtio_set_features_nocheck(vdev, val);
2025     if (!ret && virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
2026         /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches.  */
2027         int i;
2028         for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2029             if (vdev->vq[i].vring.num != 0) {
2030                 virtio_init_region_cache(vdev, i);
2031             }
2032         }
2033     }
2034     return ret;
2035 }
2036 
2037 int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
2038 {
2039     int i, ret;
2040     int32_t config_len;
2041     uint32_t num;
2042     uint32_t features;
2043     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2044     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2045     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
2046 
2047     /*
2048      * We poison the endianness to ensure it does not get used before
2049      * subsections have been loaded.
2050      */
2051     vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
2052 
2053     if (k->load_config) {
2054         ret = k->load_config(qbus->parent, f);
2055         if (ret)
2056             return ret;
2057     }
2058 
2059     qemu_get_8s(f, &vdev->status);
2060     qemu_get_8s(f, &vdev->isr);
2061     qemu_get_be16s(f, &vdev->queue_sel);
2062     if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
2063         return -1;
2064     }
2065     qemu_get_be32s(f, &features);
2066 
2067     /*
2068      * Temporarily set guest_features low bits - needed by
2069      * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
2070      * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
2071      *
2072      * Note: devices should always test host features in future - don't create
2073      * new dependencies like this.
2074      */
2075     vdev->guest_features = features;
2076 
2077     config_len = qemu_get_be32(f);
2078 
2079     /*
2080      * There are cases where the incoming config can be bigger or smaller
2081      * than what we have; so load what we have space for, and skip
2082      * any excess that's in the stream.
2083      */
2084     qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
2085 
2086     while (config_len > vdev->config_len) {
2087         qemu_get_byte(f);
2088         config_len--;
2089     }
2090 
2091     num = qemu_get_be32(f);
2092 
2093     if (num > VIRTIO_QUEUE_MAX) {
2094         error_report("Invalid number of virtqueues: 0x%x", num);
2095         return -1;
2096     }
2097 
2098     for (i = 0; i < num; i++) {
2099         vdev->vq[i].vring.num = qemu_get_be32(f);
2100         if (k->has_variable_vring_alignment) {
2101             vdev->vq[i].vring.align = qemu_get_be32(f);
2102         }
2103         vdev->vq[i].vring.desc = qemu_get_be64(f);
2104         qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
2105         vdev->vq[i].signalled_used_valid = false;
2106         vdev->vq[i].notification = true;
2107 
2108         if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) {
2109             error_report("VQ %d address 0x0 "
2110                          "inconsistent with Host index 0x%x",
2111                          i, vdev->vq[i].last_avail_idx);
2112             return -1;
2113         }
2114         if (k->load_queue) {
2115             ret = k->load_queue(qbus->parent, i, f);
2116             if (ret)
2117                 return ret;
2118         }
2119     }
2120 
2121     virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
2122 
2123     if (vdc->load != NULL) {
2124         ret = vdc->load(vdev, f, version_id);
2125         if (ret) {
2126             return ret;
2127         }
2128     }
2129 
2130     if (vdc->vmsd) {
2131         ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
2132         if (ret) {
2133             return ret;
2134         }
2135     }
2136 
2137     /* Subsections */
2138     ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
2139     if (ret) {
2140         return ret;
2141     }
2142 
2143     if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
2144         vdev->device_endian = virtio_default_endian();
2145     }
2146 
2147     if (virtio_64bit_features_needed(vdev)) {
2148         /*
2149          * Subsection load filled vdev->guest_features.  Run them
2150          * through virtio_set_features to sanity-check them against
2151          * host_features.
2152          */
2153         uint64_t features64 = vdev->guest_features;
2154         if (virtio_set_features_nocheck(vdev, features64) < 0) {
2155             error_report("Features 0x%" PRIx64 " unsupported. "
2156                          "Allowed features: 0x%" PRIx64,
2157                          features64, vdev->host_features);
2158             return -1;
2159         }
2160     } else {
2161         if (virtio_set_features_nocheck(vdev, features) < 0) {
2162             error_report("Features 0x%x unsupported. "
2163                          "Allowed features: 0x%" PRIx64,
2164                          features, vdev->host_features);
2165             return -1;
2166         }
2167     }
2168 
2169     rcu_read_lock();
2170     for (i = 0; i < num; i++) {
2171         if (vdev->vq[i].vring.desc) {
2172             uint16_t nheads;
2173 
2174             /*
2175              * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
2176              * only the region cache needs to be set up.  Legacy devices need
2177              * to calculate used and avail ring addresses based on the desc
2178              * address.
2179              */
2180             if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2181                 virtio_init_region_cache(vdev, i);
2182             } else {
2183                 virtio_queue_update_rings(vdev, i);
2184             }
2185 
2186             nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
2187             /* Check it isn't doing strange things with descriptor numbers. */
2188             if (nheads > vdev->vq[i].vring.num) {
2189                 error_report("VQ %d size 0x%x Guest index 0x%x "
2190                              "inconsistent with Host index 0x%x: delta 0x%x",
2191                              i, vdev->vq[i].vring.num,
2192                              vring_avail_idx(&vdev->vq[i]),
2193                              vdev->vq[i].last_avail_idx, nheads);
2194                 return -1;
2195             }
2196             vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
2197             vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
2198 
2199             /*
2200              * Some devices migrate VirtQueueElements that have been popped
2201              * from the avail ring but not yet returned to the used ring.
2202              * Since max ring size < UINT16_MAX it's safe to use modulo
2203              * UINT16_MAX + 1 subtraction.
2204              */
2205             vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
2206                                 vdev->vq[i].used_idx);
2207             if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
2208                 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
2209                              "used_idx 0x%x",
2210                              i, vdev->vq[i].vring.num,
2211                              vdev->vq[i].last_avail_idx,
2212                              vdev->vq[i].used_idx);
2213                 return -1;
2214             }
2215         }
2216     }
2217     rcu_read_unlock();
2218 
2219     return 0;
2220 }
2221 
2222 void virtio_cleanup(VirtIODevice *vdev)
2223 {
2224     qemu_del_vm_change_state_handler(vdev->vmstate);
2225 }
2226 
2227 static void virtio_vmstate_change(void *opaque, int running, RunState state)
2228 {
2229     VirtIODevice *vdev = opaque;
2230     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2231     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2232     bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK);
2233     vdev->vm_running = running;
2234 
2235     if (backend_run) {
2236         virtio_set_status(vdev, vdev->status);
2237     }
2238 
2239     if (k->vmstate_change) {
2240         k->vmstate_change(qbus->parent, backend_run);
2241     }
2242 
2243     if (!backend_run) {
2244         virtio_set_status(vdev, vdev->status);
2245     }
2246 }
2247 
2248 void virtio_instance_init_common(Object *proxy_obj, void *data,
2249                                  size_t vdev_size, const char *vdev_name)
2250 {
2251     DeviceState *vdev = data;
2252 
2253     object_initialize(vdev, vdev_size, vdev_name);
2254     object_property_add_child(proxy_obj, "virtio-backend", OBJECT(vdev), NULL);
2255     object_unref(OBJECT(vdev));
2256     qdev_alias_all_properties(vdev, proxy_obj);
2257 }
2258 
2259 void virtio_init(VirtIODevice *vdev, const char *name,
2260                  uint16_t device_id, size_t config_size)
2261 {
2262     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2263     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2264     int i;
2265     int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
2266 
2267     if (nvectors) {
2268         vdev->vector_queues =
2269             g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
2270     }
2271 
2272     vdev->device_id = device_id;
2273     vdev->status = 0;
2274     atomic_set(&vdev->isr, 0);
2275     vdev->queue_sel = 0;
2276     vdev->config_vector = VIRTIO_NO_VECTOR;
2277     vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
2278     vdev->vm_running = runstate_is_running();
2279     vdev->broken = false;
2280     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2281         vdev->vq[i].vector = VIRTIO_NO_VECTOR;
2282         vdev->vq[i].vdev = vdev;
2283         vdev->vq[i].queue_index = i;
2284     }
2285 
2286     vdev->name = name;
2287     vdev->config_len = config_size;
2288     if (vdev->config_len) {
2289         vdev->config = g_malloc0(config_size);
2290     } else {
2291         vdev->config = NULL;
2292     }
2293     vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
2294                                                      vdev);
2295     vdev->device_endian = virtio_default_endian();
2296     vdev->use_guest_notifier_mask = true;
2297 }
2298 
2299 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
2300 {
2301     return vdev->vq[n].vring.desc;
2302 }
2303 
2304 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
2305 {
2306     return vdev->vq[n].vring.avail;
2307 }
2308 
2309 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
2310 {
2311     return vdev->vq[n].vring.used;
2312 }
2313 
2314 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
2315 {
2316     return sizeof(VRingDesc) * vdev->vq[n].vring.num;
2317 }
2318 
2319 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
2320 {
2321     return offsetof(VRingAvail, ring) +
2322         sizeof(uint16_t) * vdev->vq[n].vring.num;
2323 }
2324 
2325 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
2326 {
2327     return offsetof(VRingUsed, ring) +
2328         sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
2329 }
2330 
2331 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
2332 {
2333     return vdev->vq[n].last_avail_idx;
2334 }
2335 
2336 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
2337 {
2338     vdev->vq[n].last_avail_idx = idx;
2339     vdev->vq[n].shadow_avail_idx = idx;
2340 }
2341 
2342 void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
2343 {
2344     rcu_read_lock();
2345     if (vdev->vq[n].vring.desc) {
2346         vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
2347         vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx;
2348     }
2349     rcu_read_unlock();
2350 }
2351 
2352 void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
2353 {
2354     rcu_read_lock();
2355     if (vdev->vq[n].vring.desc) {
2356         vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
2357     }
2358     rcu_read_unlock();
2359 }
2360 
2361 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
2362 {
2363     vdev->vq[n].signalled_used_valid = false;
2364 }
2365 
2366 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
2367 {
2368     return vdev->vq + n;
2369 }
2370 
2371 uint16_t virtio_get_queue_index(VirtQueue *vq)
2372 {
2373     return vq->queue_index;
2374 }
2375 
2376 static void virtio_queue_guest_notifier_read(EventNotifier *n)
2377 {
2378     VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
2379     if (event_notifier_test_and_clear(n)) {
2380         virtio_irq(vq);
2381     }
2382 }
2383 
2384 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
2385                                                 bool with_irqfd)
2386 {
2387     if (assign && !with_irqfd) {
2388         event_notifier_set_handler(&vq->guest_notifier,
2389                                    virtio_queue_guest_notifier_read);
2390     } else {
2391         event_notifier_set_handler(&vq->guest_notifier, NULL);
2392     }
2393     if (!assign) {
2394         /* Test and clear notifier before closing it,
2395          * in case poll callback didn't have time to run. */
2396         virtio_queue_guest_notifier_read(&vq->guest_notifier);
2397     }
2398 }
2399 
2400 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
2401 {
2402     return &vq->guest_notifier;
2403 }
2404 
2405 static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
2406 {
2407     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2408     if (event_notifier_test_and_clear(n)) {
2409         virtio_queue_notify_aio_vq(vq);
2410     }
2411 }
2412 
2413 static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
2414 {
2415     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2416 
2417     virtio_queue_set_notification(vq, 0);
2418 }
2419 
2420 static bool virtio_queue_host_notifier_aio_poll(void *opaque)
2421 {
2422     EventNotifier *n = opaque;
2423     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2424     bool progress;
2425 
2426     if (!vq->vring.desc || virtio_queue_empty(vq)) {
2427         return false;
2428     }
2429 
2430     progress = virtio_queue_notify_aio_vq(vq);
2431 
2432     /* In case the handler function re-enabled notifications */
2433     virtio_queue_set_notification(vq, 0);
2434     return progress;
2435 }
2436 
2437 static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
2438 {
2439     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2440 
2441     /* Caller polls once more after this to catch requests that race with us */
2442     virtio_queue_set_notification(vq, 1);
2443 }
2444 
2445 void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
2446                                                 VirtIOHandleAIOOutput handle_output)
2447 {
2448     if (handle_output) {
2449         vq->handle_aio_output = handle_output;
2450         aio_set_event_notifier(ctx, &vq->host_notifier, true,
2451                                virtio_queue_host_notifier_aio_read,
2452                                virtio_queue_host_notifier_aio_poll);
2453         aio_set_event_notifier_poll(ctx, &vq->host_notifier,
2454                                     virtio_queue_host_notifier_aio_poll_begin,
2455                                     virtio_queue_host_notifier_aio_poll_end);
2456     } else {
2457         aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL);
2458         /* Test and clear notifier before after disabling event,
2459          * in case poll callback didn't have time to run. */
2460         virtio_queue_host_notifier_aio_read(&vq->host_notifier);
2461         vq->handle_aio_output = NULL;
2462     }
2463 }
2464 
2465 void virtio_queue_host_notifier_read(EventNotifier *n)
2466 {
2467     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2468     if (event_notifier_test_and_clear(n)) {
2469         virtio_queue_notify_vq(vq);
2470     }
2471 }
2472 
2473 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
2474 {
2475     return &vq->host_notifier;
2476 }
2477 
2478 int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n,
2479                                       MemoryRegion *mr, bool assign)
2480 {
2481     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2482     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2483 
2484     if (k->set_host_notifier_mr) {
2485         return k->set_host_notifier_mr(qbus->parent, n, mr, assign);
2486     }
2487 
2488     return -1;
2489 }
2490 
2491 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
2492 {
2493     g_free(vdev->bus_name);
2494     vdev->bus_name = g_strdup(bus_name);
2495 }
2496 
2497 void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
2498 {
2499     va_list ap;
2500 
2501     va_start(ap, fmt);
2502     error_vreport(fmt, ap);
2503     va_end(ap);
2504 
2505     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2506         vdev->status = vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET;
2507         virtio_notify_config(vdev);
2508     }
2509 
2510     vdev->broken = true;
2511 }
2512 
2513 static void virtio_memory_listener_commit(MemoryListener *listener)
2514 {
2515     VirtIODevice *vdev = container_of(listener, VirtIODevice, listener);
2516     int i;
2517 
2518     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2519         if (vdev->vq[i].vring.num == 0) {
2520             break;
2521         }
2522         virtio_init_region_cache(vdev, i);
2523     }
2524 }
2525 
2526 static void virtio_device_realize(DeviceState *dev, Error **errp)
2527 {
2528     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2529     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
2530     Error *err = NULL;
2531 
2532     /* Devices should either use vmsd or the load/save methods */
2533     assert(!vdc->vmsd || !vdc->load);
2534 
2535     if (vdc->realize != NULL) {
2536         vdc->realize(dev, &err);
2537         if (err != NULL) {
2538             error_propagate(errp, err);
2539             return;
2540         }
2541     }
2542 
2543     virtio_bus_device_plugged(vdev, &err);
2544     if (err != NULL) {
2545         error_propagate(errp, err);
2546         vdc->unrealize(dev, NULL);
2547         return;
2548     }
2549 
2550     vdev->listener.commit = virtio_memory_listener_commit;
2551     memory_listener_register(&vdev->listener, vdev->dma_as);
2552 }
2553 
2554 static void virtio_device_unrealize(DeviceState *dev, Error **errp)
2555 {
2556     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2557     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
2558     Error *err = NULL;
2559 
2560     virtio_bus_device_unplugged(vdev);
2561 
2562     if (vdc->unrealize != NULL) {
2563         vdc->unrealize(dev, &err);
2564         if (err != NULL) {
2565             error_propagate(errp, err);
2566             return;
2567         }
2568     }
2569 
2570     g_free(vdev->bus_name);
2571     vdev->bus_name = NULL;
2572 }
2573 
2574 static void virtio_device_free_virtqueues(VirtIODevice *vdev)
2575 {
2576     int i;
2577     if (!vdev->vq) {
2578         return;
2579     }
2580 
2581     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2582         if (vdev->vq[i].vring.num == 0) {
2583             break;
2584         }
2585         virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
2586     }
2587     g_free(vdev->vq);
2588 }
2589 
2590 static void virtio_device_instance_finalize(Object *obj)
2591 {
2592     VirtIODevice *vdev = VIRTIO_DEVICE(obj);
2593 
2594     memory_listener_unregister(&vdev->listener);
2595     virtio_device_free_virtqueues(vdev);
2596 
2597     g_free(vdev->config);
2598     g_free(vdev->vector_queues);
2599 }
2600 
2601 static Property virtio_properties[] = {
2602     DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
2603     DEFINE_PROP_END_OF_LIST(),
2604 };
2605 
2606 static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
2607 {
2608     VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
2609     int i, n, r, err;
2610 
2611     memory_region_transaction_begin();
2612     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2613         VirtQueue *vq = &vdev->vq[n];
2614         if (!virtio_queue_get_num(vdev, n)) {
2615             continue;
2616         }
2617         r = virtio_bus_set_host_notifier(qbus, n, true);
2618         if (r < 0) {
2619             err = r;
2620             goto assign_error;
2621         }
2622         event_notifier_set_handler(&vq->host_notifier,
2623                                    virtio_queue_host_notifier_read);
2624     }
2625 
2626     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2627         /* Kick right away to begin processing requests already in vring */
2628         VirtQueue *vq = &vdev->vq[n];
2629         if (!vq->vring.num) {
2630             continue;
2631         }
2632         event_notifier_set(&vq->host_notifier);
2633     }
2634     memory_region_transaction_commit();
2635     return 0;
2636 
2637 assign_error:
2638     i = n; /* save n for a second iteration after transaction is committed. */
2639     while (--n >= 0) {
2640         VirtQueue *vq = &vdev->vq[n];
2641         if (!virtio_queue_get_num(vdev, n)) {
2642             continue;
2643         }
2644 
2645         event_notifier_set_handler(&vq->host_notifier, NULL);
2646         r = virtio_bus_set_host_notifier(qbus, n, false);
2647         assert(r >= 0);
2648     }
2649     memory_region_transaction_commit();
2650 
2651     while (--i >= 0) {
2652         if (!virtio_queue_get_num(vdev, i)) {
2653             continue;
2654         }
2655         virtio_bus_cleanup_host_notifier(qbus, i);
2656     }
2657     return err;
2658 }
2659 
2660 int virtio_device_start_ioeventfd(VirtIODevice *vdev)
2661 {
2662     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2663     VirtioBusState *vbus = VIRTIO_BUS(qbus);
2664 
2665     return virtio_bus_start_ioeventfd(vbus);
2666 }
2667 
2668 static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
2669 {
2670     VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
2671     int n, r;
2672 
2673     memory_region_transaction_begin();
2674     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2675         VirtQueue *vq = &vdev->vq[n];
2676 
2677         if (!virtio_queue_get_num(vdev, n)) {
2678             continue;
2679         }
2680         event_notifier_set_handler(&vq->host_notifier, NULL);
2681         r = virtio_bus_set_host_notifier(qbus, n, false);
2682         assert(r >= 0);
2683     }
2684     memory_region_transaction_commit();
2685 
2686     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2687         if (!virtio_queue_get_num(vdev, n)) {
2688             continue;
2689         }
2690         virtio_bus_cleanup_host_notifier(qbus, n);
2691     }
2692 }
2693 
2694 void virtio_device_stop_ioeventfd(VirtIODevice *vdev)
2695 {
2696     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2697     VirtioBusState *vbus = VIRTIO_BUS(qbus);
2698 
2699     virtio_bus_stop_ioeventfd(vbus);
2700 }
2701 
2702 int virtio_device_grab_ioeventfd(VirtIODevice *vdev)
2703 {
2704     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2705     VirtioBusState *vbus = VIRTIO_BUS(qbus);
2706 
2707     return virtio_bus_grab_ioeventfd(vbus);
2708 }
2709 
2710 void virtio_device_release_ioeventfd(VirtIODevice *vdev)
2711 {
2712     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2713     VirtioBusState *vbus = VIRTIO_BUS(qbus);
2714 
2715     virtio_bus_release_ioeventfd(vbus);
2716 }
2717 
2718 static void virtio_device_class_init(ObjectClass *klass, void *data)
2719 {
2720     /* Set the default value here. */
2721     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
2722     DeviceClass *dc = DEVICE_CLASS(klass);
2723 
2724     dc->realize = virtio_device_realize;
2725     dc->unrealize = virtio_device_unrealize;
2726     dc->bus_type = TYPE_VIRTIO_BUS;
2727     dc->props = virtio_properties;
2728     vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
2729     vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
2730 
2731     vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
2732 }
2733 
2734 bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
2735 {
2736     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2737     VirtioBusState *vbus = VIRTIO_BUS(qbus);
2738 
2739     return virtio_bus_ioeventfd_enabled(vbus);
2740 }
2741 
2742 static const TypeInfo virtio_device_info = {
2743     .name = TYPE_VIRTIO_DEVICE,
2744     .parent = TYPE_DEVICE,
2745     .instance_size = sizeof(VirtIODevice),
2746     .class_init = virtio_device_class_init,
2747     .instance_finalize = virtio_device_instance_finalize,
2748     .abstract = true,
2749     .class_size = sizeof(VirtioDeviceClass),
2750 };
2751 
2752 static void virtio_register_types(void)
2753 {
2754     type_register_static(&virtio_device_info);
2755 }
2756 
2757 type_init(virtio_register_types)
2758