xref: /openbmc/qemu/hw/virtio/virtio.c (revision 56e2cd24)
1 /*
2  * Virtio Support
3  *
4  * Copyright IBM, Corp. 2007
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qemu-common.h"
17 #include "cpu.h"
18 #include "trace.h"
19 #include "exec/address-spaces.h"
20 #include "qemu/error-report.h"
21 #include "hw/virtio/virtio.h"
22 #include "qemu/atomic.h"
23 #include "hw/virtio/virtio-bus.h"
24 #include "migration/migration.h"
25 #include "hw/virtio/virtio-access.h"
26 #include "sysemu/dma.h"
27 
28 /*
29  * The alignment to use between consumer and producer parts of vring.
30  * x86 pagesize again. This is the default, used by transports like PCI
31  * which don't provide a means for the guest to tell the host the alignment.
32  */
33 #define VIRTIO_PCI_VRING_ALIGN         4096
34 
35 typedef struct VRingDesc
36 {
37     uint64_t addr;
38     uint32_t len;
39     uint16_t flags;
40     uint16_t next;
41 } VRingDesc;
42 
43 typedef struct VRingAvail
44 {
45     uint16_t flags;
46     uint16_t idx;
47     uint16_t ring[0];
48 } VRingAvail;
49 
50 typedef struct VRingUsedElem
51 {
52     uint32_t id;
53     uint32_t len;
54 } VRingUsedElem;
55 
56 typedef struct VRingUsed
57 {
58     uint16_t flags;
59     uint16_t idx;
60     VRingUsedElem ring[0];
61 } VRingUsed;
62 
63 typedef struct VRingMemoryRegionCaches {
64     struct rcu_head rcu;
65     MemoryRegionCache desc;
66     MemoryRegionCache avail;
67     MemoryRegionCache used;
68 } VRingMemoryRegionCaches;
69 
70 typedef struct VRing
71 {
72     unsigned int num;
73     unsigned int num_default;
74     unsigned int align;
75     hwaddr desc;
76     hwaddr avail;
77     hwaddr used;
78     VRingMemoryRegionCaches *caches;
79 } VRing;
80 
81 struct VirtQueue
82 {
83     VRing vring;
84 
85     /* Next head to pop */
86     uint16_t last_avail_idx;
87 
88     /* Last avail_idx read from VQ. */
89     uint16_t shadow_avail_idx;
90 
91     uint16_t used_idx;
92 
93     /* Last used index value we have signalled on */
94     uint16_t signalled_used;
95 
96     /* Last used index value we have signalled on */
97     bool signalled_used_valid;
98 
99     /* Notification enabled? */
100     bool notification;
101 
102     uint16_t queue_index;
103 
104     unsigned int inuse;
105 
106     uint16_t vector;
107     VirtIOHandleOutput handle_output;
108     VirtIOHandleAIOOutput handle_aio_output;
109     VirtIODevice *vdev;
110     EventNotifier guest_notifier;
111     EventNotifier host_notifier;
112     QLIST_ENTRY(VirtQueue) node;
113 };
114 
115 static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
116 {
117     if (!caches) {
118         return;
119     }
120 
121     address_space_cache_destroy(&caches->desc);
122     address_space_cache_destroy(&caches->avail);
123     address_space_cache_destroy(&caches->used);
124     g_free(caches);
125 }
126 
127 static void virtio_init_region_cache(VirtIODevice *vdev, int n)
128 {
129     VirtQueue *vq = &vdev->vq[n];
130     VRingMemoryRegionCaches *old = vq->vring.caches;
131     VRingMemoryRegionCaches *new;
132     hwaddr addr, size;
133     int event_size;
134     int64_t len;
135 
136     event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
137 
138     addr = vq->vring.desc;
139     if (!addr) {
140         return;
141     }
142     new = g_new0(VRingMemoryRegionCaches, 1);
143     size = virtio_queue_get_desc_size(vdev, n);
144     len = address_space_cache_init(&new->desc, vdev->dma_as,
145                                    addr, size, false);
146     if (len < size) {
147         virtio_error(vdev, "Cannot map desc");
148         goto err_desc;
149     }
150 
151     size = virtio_queue_get_used_size(vdev, n) + event_size;
152     len = address_space_cache_init(&new->used, vdev->dma_as,
153                                    vq->vring.used, size, true);
154     if (len < size) {
155         virtio_error(vdev, "Cannot map used");
156         goto err_used;
157     }
158 
159     size = virtio_queue_get_avail_size(vdev, n) + event_size;
160     len = address_space_cache_init(&new->avail, vdev->dma_as,
161                                    vq->vring.avail, size, false);
162     if (len < size) {
163         virtio_error(vdev, "Cannot map avail");
164         goto err_avail;
165     }
166 
167     atomic_rcu_set(&vq->vring.caches, new);
168     if (old) {
169         call_rcu(old, virtio_free_region_cache, rcu);
170     }
171     return;
172 
173 err_avail:
174     address_space_cache_destroy(&new->used);
175 err_used:
176     address_space_cache_destroy(&new->desc);
177 err_desc:
178     g_free(new);
179 }
180 
181 /* virt queue functions */
182 void virtio_queue_update_rings(VirtIODevice *vdev, int n)
183 {
184     VRing *vring = &vdev->vq[n].vring;
185 
186     if (!vring->desc) {
187         /* not yet setup -> nothing to do */
188         return;
189     }
190     vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
191     vring->used = vring_align(vring->avail +
192                               offsetof(VRingAvail, ring[vring->num]),
193                               vring->align);
194     virtio_init_region_cache(vdev, n);
195 }
196 
197 /* Called within rcu_read_lock().  */
198 static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
199                             MemoryRegionCache *cache, int i)
200 {
201     address_space_read_cached(cache, i * sizeof(VRingDesc),
202                               desc, sizeof(VRingDesc));
203     virtio_tswap64s(vdev, &desc->addr);
204     virtio_tswap32s(vdev, &desc->len);
205     virtio_tswap16s(vdev, &desc->flags);
206     virtio_tswap16s(vdev, &desc->next);
207 }
208 
209 static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
210 {
211     VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
212     assert(caches != NULL);
213     return caches;
214 }
215 /* Called within rcu_read_lock().  */
216 static inline uint16_t vring_avail_flags(VirtQueue *vq)
217 {
218     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
219     hwaddr pa = offsetof(VRingAvail, flags);
220     return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
221 }
222 
223 /* Called within rcu_read_lock().  */
224 static inline uint16_t vring_avail_idx(VirtQueue *vq)
225 {
226     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
227     hwaddr pa = offsetof(VRingAvail, idx);
228     vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
229     return vq->shadow_avail_idx;
230 }
231 
232 /* Called within rcu_read_lock().  */
233 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
234 {
235     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
236     hwaddr pa = offsetof(VRingAvail, ring[i]);
237     return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
238 }
239 
240 /* Called within rcu_read_lock().  */
241 static inline uint16_t vring_get_used_event(VirtQueue *vq)
242 {
243     return vring_avail_ring(vq, vq->vring.num);
244 }
245 
246 /* Called within rcu_read_lock().  */
247 static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
248                                     int i)
249 {
250     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
251     hwaddr pa = offsetof(VRingUsed, ring[i]);
252     virtio_tswap32s(vq->vdev, &uelem->id);
253     virtio_tswap32s(vq->vdev, &uelem->len);
254     address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
255     address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
256 }
257 
258 /* Called within rcu_read_lock().  */
259 static uint16_t vring_used_idx(VirtQueue *vq)
260 {
261     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
262     hwaddr pa = offsetof(VRingUsed, idx);
263     return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
264 }
265 
266 /* Called within rcu_read_lock().  */
267 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
268 {
269     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
270     hwaddr pa = offsetof(VRingUsed, idx);
271     virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
272     address_space_cache_invalidate(&caches->used, pa, sizeof(val));
273     vq->used_idx = val;
274 }
275 
276 /* Called within rcu_read_lock().  */
277 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
278 {
279     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
280     VirtIODevice *vdev = vq->vdev;
281     hwaddr pa = offsetof(VRingUsed, flags);
282     uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
283 
284     virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
285     address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
286 }
287 
288 /* Called within rcu_read_lock().  */
289 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
290 {
291     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
292     VirtIODevice *vdev = vq->vdev;
293     hwaddr pa = offsetof(VRingUsed, flags);
294     uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
295 
296     virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
297     address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
298 }
299 
300 /* Called within rcu_read_lock().  */
301 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
302 {
303     VRingMemoryRegionCaches *caches;
304     hwaddr pa;
305     if (!vq->notification) {
306         return;
307     }
308 
309     caches = vring_get_region_caches(vq);
310     pa = offsetof(VRingUsed, ring[vq->vring.num]);
311     virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
312     address_space_cache_invalidate(&caches->used, pa, sizeof(val));
313 }
314 
315 void virtio_queue_set_notification(VirtQueue *vq, int enable)
316 {
317     vq->notification = enable;
318 
319     if (!vq->vring.desc) {
320         return;
321     }
322 
323     rcu_read_lock();
324     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
325         vring_set_avail_event(vq, vring_avail_idx(vq));
326     } else if (enable) {
327         vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
328     } else {
329         vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
330     }
331     if (enable) {
332         /* Expose avail event/used flags before caller checks the avail idx. */
333         smp_mb();
334     }
335     rcu_read_unlock();
336 }
337 
338 int virtio_queue_ready(VirtQueue *vq)
339 {
340     return vq->vring.avail != 0;
341 }
342 
343 /* Fetch avail_idx from VQ memory only when we really need to know if
344  * guest has added some buffers.
345  * Called within rcu_read_lock().  */
346 static int virtio_queue_empty_rcu(VirtQueue *vq)
347 {
348     if (unlikely(!vq->vring.avail)) {
349         return 1;
350     }
351 
352     if (vq->shadow_avail_idx != vq->last_avail_idx) {
353         return 0;
354     }
355 
356     return vring_avail_idx(vq) == vq->last_avail_idx;
357 }
358 
359 int virtio_queue_empty(VirtQueue *vq)
360 {
361     bool empty;
362 
363     if (unlikely(!vq->vring.avail)) {
364         return 1;
365     }
366 
367     if (vq->shadow_avail_idx != vq->last_avail_idx) {
368         return 0;
369     }
370 
371     rcu_read_lock();
372     empty = vring_avail_idx(vq) == vq->last_avail_idx;
373     rcu_read_unlock();
374     return empty;
375 }
376 
377 static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
378                                unsigned int len)
379 {
380     AddressSpace *dma_as = vq->vdev->dma_as;
381     unsigned int offset;
382     int i;
383 
384     offset = 0;
385     for (i = 0; i < elem->in_num; i++) {
386         size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
387 
388         dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
389                          elem->in_sg[i].iov_len,
390                          DMA_DIRECTION_FROM_DEVICE, size);
391 
392         offset += size;
393     }
394 
395     for (i = 0; i < elem->out_num; i++)
396         dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
397                          elem->out_sg[i].iov_len,
398                          DMA_DIRECTION_TO_DEVICE,
399                          elem->out_sg[i].iov_len);
400 }
401 
402 /* virtqueue_detach_element:
403  * @vq: The #VirtQueue
404  * @elem: The #VirtQueueElement
405  * @len: number of bytes written
406  *
407  * Detach the element from the virtqueue.  This function is suitable for device
408  * reset or other situations where a #VirtQueueElement is simply freed and will
409  * not be pushed or discarded.
410  */
411 void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
412                               unsigned int len)
413 {
414     vq->inuse--;
415     virtqueue_unmap_sg(vq, elem, len);
416 }
417 
418 /* virtqueue_unpop:
419  * @vq: The #VirtQueue
420  * @elem: The #VirtQueueElement
421  * @len: number of bytes written
422  *
423  * Pretend the most recent element wasn't popped from the virtqueue.  The next
424  * call to virtqueue_pop() will refetch the element.
425  */
426 void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem,
427                      unsigned int len)
428 {
429     vq->last_avail_idx--;
430     virtqueue_detach_element(vq, elem, len);
431 }
432 
433 /* virtqueue_rewind:
434  * @vq: The #VirtQueue
435  * @num: Number of elements to push back
436  *
437  * Pretend that elements weren't popped from the virtqueue.  The next
438  * virtqueue_pop() will refetch the oldest element.
439  *
440  * Use virtqueue_unpop() instead if you have a VirtQueueElement.
441  *
442  * Returns: true on success, false if @num is greater than the number of in use
443  * elements.
444  */
445 bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
446 {
447     if (num > vq->inuse) {
448         return false;
449     }
450     vq->last_avail_idx -= num;
451     vq->inuse -= num;
452     return true;
453 }
454 
455 /* Called within rcu_read_lock().  */
456 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
457                     unsigned int len, unsigned int idx)
458 {
459     VRingUsedElem uelem;
460 
461     trace_virtqueue_fill(vq, elem, len, idx);
462 
463     virtqueue_unmap_sg(vq, elem, len);
464 
465     if (unlikely(vq->vdev->broken)) {
466         return;
467     }
468 
469     if (unlikely(!vq->vring.used)) {
470         return;
471     }
472 
473     idx = (idx + vq->used_idx) % vq->vring.num;
474 
475     uelem.id = elem->index;
476     uelem.len = len;
477     vring_used_write(vq, &uelem, idx);
478 }
479 
480 /* Called within rcu_read_lock().  */
481 void virtqueue_flush(VirtQueue *vq, unsigned int count)
482 {
483     uint16_t old, new;
484 
485     if (unlikely(vq->vdev->broken)) {
486         vq->inuse -= count;
487         return;
488     }
489 
490     if (unlikely(!vq->vring.used)) {
491         return;
492     }
493 
494     /* Make sure buffer is written before we update index. */
495     smp_wmb();
496     trace_virtqueue_flush(vq, count);
497     old = vq->used_idx;
498     new = old + count;
499     vring_used_idx_set(vq, new);
500     vq->inuse -= count;
501     if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
502         vq->signalled_used_valid = false;
503 }
504 
505 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
506                     unsigned int len)
507 {
508     rcu_read_lock();
509     virtqueue_fill(vq, elem, len, 0);
510     virtqueue_flush(vq, 1);
511     rcu_read_unlock();
512 }
513 
514 /* Called within rcu_read_lock().  */
515 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
516 {
517     uint16_t num_heads = vring_avail_idx(vq) - idx;
518 
519     /* Check it isn't doing very strange things with descriptor numbers. */
520     if (num_heads > vq->vring.num) {
521         virtio_error(vq->vdev, "Guest moved used index from %u to %u",
522                      idx, vq->shadow_avail_idx);
523         return -EINVAL;
524     }
525     /* On success, callers read a descriptor at vq->last_avail_idx.
526      * Make sure descriptor read does not bypass avail index read. */
527     if (num_heads) {
528         smp_rmb();
529     }
530 
531     return num_heads;
532 }
533 
534 /* Called within rcu_read_lock().  */
535 static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
536                                unsigned int *head)
537 {
538     /* Grab the next descriptor number they're advertising, and increment
539      * the index we've seen. */
540     *head = vring_avail_ring(vq, idx % vq->vring.num);
541 
542     /* If their number is silly, that's a fatal mistake. */
543     if (*head >= vq->vring.num) {
544         virtio_error(vq->vdev, "Guest says index %u is available", *head);
545         return false;
546     }
547 
548     return true;
549 }
550 
551 enum {
552     VIRTQUEUE_READ_DESC_ERROR = -1,
553     VIRTQUEUE_READ_DESC_DONE = 0,   /* end of chain */
554     VIRTQUEUE_READ_DESC_MORE = 1,   /* more buffers in chain */
555 };
556 
557 static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
558                                     MemoryRegionCache *desc_cache, unsigned int max,
559                                     unsigned int *next)
560 {
561     /* If this descriptor says it doesn't chain, we're done. */
562     if (!(desc->flags & VRING_DESC_F_NEXT)) {
563         return VIRTQUEUE_READ_DESC_DONE;
564     }
565 
566     /* Check they're not leading us off end of descriptors. */
567     *next = desc->next;
568     /* Make sure compiler knows to grab that: we don't want it changing! */
569     smp_wmb();
570 
571     if (*next >= max) {
572         virtio_error(vdev, "Desc next is %u", *next);
573         return VIRTQUEUE_READ_DESC_ERROR;
574     }
575 
576     vring_desc_read(vdev, desc, desc_cache, *next);
577     return VIRTQUEUE_READ_DESC_MORE;
578 }
579 
580 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
581                                unsigned int *out_bytes,
582                                unsigned max_in_bytes, unsigned max_out_bytes)
583 {
584     VirtIODevice *vdev = vq->vdev;
585     unsigned int max, idx;
586     unsigned int total_bufs, in_total, out_total;
587     VRingMemoryRegionCaches *caches;
588     MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
589     int64_t len = 0;
590     int rc;
591 
592     if (unlikely(!vq->vring.desc)) {
593         if (in_bytes) {
594             *in_bytes = 0;
595         }
596         if (out_bytes) {
597             *out_bytes = 0;
598         }
599         return;
600     }
601 
602     rcu_read_lock();
603     idx = vq->last_avail_idx;
604     total_bufs = in_total = out_total = 0;
605 
606     max = vq->vring.num;
607     caches = vring_get_region_caches(vq);
608     if (caches->desc.len < max * sizeof(VRingDesc)) {
609         virtio_error(vdev, "Cannot map descriptor ring");
610         goto err;
611     }
612 
613     while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
614         MemoryRegionCache *desc_cache = &caches->desc;
615         unsigned int num_bufs;
616         VRingDesc desc;
617         unsigned int i;
618 
619         num_bufs = total_bufs;
620 
621         if (!virtqueue_get_head(vq, idx++, &i)) {
622             goto err;
623         }
624 
625         vring_desc_read(vdev, &desc, desc_cache, i);
626 
627         if (desc.flags & VRING_DESC_F_INDIRECT) {
628             if (desc.len % sizeof(VRingDesc)) {
629                 virtio_error(vdev, "Invalid size for indirect buffer table");
630                 goto err;
631             }
632 
633             /* If we've got too many, that implies a descriptor loop. */
634             if (num_bufs >= max) {
635                 virtio_error(vdev, "Looped descriptor");
636                 goto err;
637             }
638 
639             /* loop over the indirect descriptor table */
640             len = address_space_cache_init(&indirect_desc_cache,
641                                            vdev->dma_as,
642                                            desc.addr, desc.len, false);
643             desc_cache = &indirect_desc_cache;
644             if (len < desc.len) {
645                 virtio_error(vdev, "Cannot map indirect buffer");
646                 goto err;
647             }
648 
649             max = desc.len / sizeof(VRingDesc);
650             num_bufs = i = 0;
651             vring_desc_read(vdev, &desc, desc_cache, i);
652         }
653 
654         do {
655             /* If we've got too many, that implies a descriptor loop. */
656             if (++num_bufs > max) {
657                 virtio_error(vdev, "Looped descriptor");
658                 goto err;
659             }
660 
661             if (desc.flags & VRING_DESC_F_WRITE) {
662                 in_total += desc.len;
663             } else {
664                 out_total += desc.len;
665             }
666             if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
667                 goto done;
668             }
669 
670             rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i);
671         } while (rc == VIRTQUEUE_READ_DESC_MORE);
672 
673         if (rc == VIRTQUEUE_READ_DESC_ERROR) {
674             goto err;
675         }
676 
677         if (desc_cache == &indirect_desc_cache) {
678             address_space_cache_destroy(&indirect_desc_cache);
679             total_bufs++;
680         } else {
681             total_bufs = num_bufs;
682         }
683     }
684 
685     if (rc < 0) {
686         goto err;
687     }
688 
689 done:
690     address_space_cache_destroy(&indirect_desc_cache);
691     if (in_bytes) {
692         *in_bytes = in_total;
693     }
694     if (out_bytes) {
695         *out_bytes = out_total;
696     }
697     rcu_read_unlock();
698     return;
699 
700 err:
701     in_total = out_total = 0;
702     goto done;
703 }
704 
705 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
706                           unsigned int out_bytes)
707 {
708     unsigned int in_total, out_total;
709 
710     virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
711     return in_bytes <= in_total && out_bytes <= out_total;
712 }
713 
714 static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
715                                hwaddr *addr, struct iovec *iov,
716                                unsigned int max_num_sg, bool is_write,
717                                hwaddr pa, size_t sz)
718 {
719     bool ok = false;
720     unsigned num_sg = *p_num_sg;
721     assert(num_sg <= max_num_sg);
722 
723     if (!sz) {
724         virtio_error(vdev, "virtio: zero sized buffers are not allowed");
725         goto out;
726     }
727 
728     while (sz) {
729         hwaddr len = sz;
730 
731         if (num_sg == max_num_sg) {
732             virtio_error(vdev, "virtio: too many write descriptors in "
733                                "indirect table");
734             goto out;
735         }
736 
737         iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
738                                               is_write ?
739                                               DMA_DIRECTION_FROM_DEVICE :
740                                               DMA_DIRECTION_TO_DEVICE);
741         if (!iov[num_sg].iov_base) {
742             virtio_error(vdev, "virtio: bogus descriptor or out of resources");
743             goto out;
744         }
745 
746         iov[num_sg].iov_len = len;
747         addr[num_sg] = pa;
748 
749         sz -= len;
750         pa += len;
751         num_sg++;
752     }
753     ok = true;
754 
755 out:
756     *p_num_sg = num_sg;
757     return ok;
758 }
759 
760 /* Only used by error code paths before we have a VirtQueueElement (therefore
761  * virtqueue_unmap_sg() can't be used).  Assumes buffers weren't written to
762  * yet.
763  */
764 static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
765                                     struct iovec *iov)
766 {
767     unsigned int i;
768 
769     for (i = 0; i < out_num + in_num; i++) {
770         int is_write = i >= out_num;
771 
772         cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
773         iov++;
774     }
775 }
776 
777 static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
778                                 hwaddr *addr, unsigned int *num_sg,
779                                 int is_write)
780 {
781     unsigned int i;
782     hwaddr len;
783 
784     for (i = 0; i < *num_sg; i++) {
785         len = sg[i].iov_len;
786         sg[i].iov_base = dma_memory_map(vdev->dma_as,
787                                         addr[i], &len, is_write ?
788                                         DMA_DIRECTION_FROM_DEVICE :
789                                         DMA_DIRECTION_TO_DEVICE);
790         if (!sg[i].iov_base) {
791             error_report("virtio: error trying to map MMIO memory");
792             exit(1);
793         }
794         if (len != sg[i].iov_len) {
795             error_report("virtio: unexpected memory split");
796             exit(1);
797         }
798     }
799 }
800 
801 void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem)
802 {
803     virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, &elem->in_num, 1);
804     virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, &elem->out_num, 0);
805 }
806 
807 static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
808 {
809     VirtQueueElement *elem;
810     size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
811     size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
812     size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
813     size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
814     size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
815     size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
816 
817     assert(sz >= sizeof(VirtQueueElement));
818     elem = g_malloc(out_sg_end);
819     elem->out_num = out_num;
820     elem->in_num = in_num;
821     elem->in_addr = (void *)elem + in_addr_ofs;
822     elem->out_addr = (void *)elem + out_addr_ofs;
823     elem->in_sg = (void *)elem + in_sg_ofs;
824     elem->out_sg = (void *)elem + out_sg_ofs;
825     return elem;
826 }
827 
828 void *virtqueue_pop(VirtQueue *vq, size_t sz)
829 {
830     unsigned int i, head, max;
831     VRingMemoryRegionCaches *caches;
832     MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
833     MemoryRegionCache *desc_cache;
834     int64_t len;
835     VirtIODevice *vdev = vq->vdev;
836     VirtQueueElement *elem = NULL;
837     unsigned out_num, in_num;
838     hwaddr addr[VIRTQUEUE_MAX_SIZE];
839     struct iovec iov[VIRTQUEUE_MAX_SIZE];
840     VRingDesc desc;
841     int rc;
842 
843     if (unlikely(vdev->broken)) {
844         return NULL;
845     }
846     rcu_read_lock();
847     if (virtio_queue_empty_rcu(vq)) {
848         goto done;
849     }
850     /* Needed after virtio_queue_empty(), see comment in
851      * virtqueue_num_heads(). */
852     smp_rmb();
853 
854     /* When we start there are none of either input nor output. */
855     out_num = in_num = 0;
856 
857     max = vq->vring.num;
858 
859     if (vq->inuse >= vq->vring.num) {
860         virtio_error(vdev, "Virtqueue size exceeded");
861         goto done;
862     }
863 
864     if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
865         goto done;
866     }
867 
868     if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
869         vring_set_avail_event(vq, vq->last_avail_idx);
870     }
871 
872     i = head;
873 
874     caches = vring_get_region_caches(vq);
875     if (caches->desc.len < max * sizeof(VRingDesc)) {
876         virtio_error(vdev, "Cannot map descriptor ring");
877         goto done;
878     }
879 
880     desc_cache = &caches->desc;
881     vring_desc_read(vdev, &desc, desc_cache, i);
882     if (desc.flags & VRING_DESC_F_INDIRECT) {
883         if (desc.len % sizeof(VRingDesc)) {
884             virtio_error(vdev, "Invalid size for indirect buffer table");
885             goto done;
886         }
887 
888         /* loop over the indirect descriptor table */
889         len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
890                                        desc.addr, desc.len, false);
891         desc_cache = &indirect_desc_cache;
892         if (len < desc.len) {
893             virtio_error(vdev, "Cannot map indirect buffer");
894             goto done;
895         }
896 
897         max = desc.len / sizeof(VRingDesc);
898         i = 0;
899         vring_desc_read(vdev, &desc, desc_cache, i);
900     }
901 
902     /* Collect all the descriptors */
903     do {
904         bool map_ok;
905 
906         if (desc.flags & VRING_DESC_F_WRITE) {
907             map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
908                                         iov + out_num,
909                                         VIRTQUEUE_MAX_SIZE - out_num, true,
910                                         desc.addr, desc.len);
911         } else {
912             if (in_num) {
913                 virtio_error(vdev, "Incorrect order for descriptors");
914                 goto err_undo_map;
915             }
916             map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
917                                         VIRTQUEUE_MAX_SIZE, false,
918                                         desc.addr, desc.len);
919         }
920         if (!map_ok) {
921             goto err_undo_map;
922         }
923 
924         /* If we've got too many, that implies a descriptor loop. */
925         if ((in_num + out_num) > max) {
926             virtio_error(vdev, "Looped descriptor");
927             goto err_undo_map;
928         }
929 
930         rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i);
931     } while (rc == VIRTQUEUE_READ_DESC_MORE);
932 
933     if (rc == VIRTQUEUE_READ_DESC_ERROR) {
934         goto err_undo_map;
935     }
936 
937     /* Now copy what we have collected and mapped */
938     elem = virtqueue_alloc_element(sz, out_num, in_num);
939     elem->index = head;
940     for (i = 0; i < out_num; i++) {
941         elem->out_addr[i] = addr[i];
942         elem->out_sg[i] = iov[i];
943     }
944     for (i = 0; i < in_num; i++) {
945         elem->in_addr[i] = addr[out_num + i];
946         elem->in_sg[i] = iov[out_num + i];
947     }
948 
949     vq->inuse++;
950 
951     trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
952 done:
953     address_space_cache_destroy(&indirect_desc_cache);
954     rcu_read_unlock();
955 
956     return elem;
957 
958 err_undo_map:
959     virtqueue_undo_map_desc(out_num, in_num, iov);
960     goto done;
961 }
962 
963 /* virtqueue_drop_all:
964  * @vq: The #VirtQueue
965  * Drops all queued buffers and indicates them to the guest
966  * as if they are done. Useful when buffers can not be
967  * processed but must be returned to the guest.
968  */
969 unsigned int virtqueue_drop_all(VirtQueue *vq)
970 {
971     unsigned int dropped = 0;
972     VirtQueueElement elem = {};
973     VirtIODevice *vdev = vq->vdev;
974     bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
975 
976     if (unlikely(vdev->broken)) {
977         return 0;
978     }
979 
980     while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
981         /* works similar to virtqueue_pop but does not map buffers
982         * and does not allocate any memory */
983         smp_rmb();
984         if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
985             break;
986         }
987         vq->inuse++;
988         vq->last_avail_idx++;
989         if (fEventIdx) {
990             vring_set_avail_event(vq, vq->last_avail_idx);
991         }
992         /* immediately push the element, nothing to unmap
993          * as both in_num and out_num are set to 0 */
994         virtqueue_push(vq, &elem, 0);
995         dropped++;
996     }
997 
998     return dropped;
999 }
1000 
1001 /* Reading and writing a structure directly to QEMUFile is *awful*, but
1002  * it is what QEMU has always done by mistake.  We can change it sooner
1003  * or later by bumping the version number of the affected vm states.
1004  * In the meanwhile, since the in-memory layout of VirtQueueElement
1005  * has changed, we need to marshal to and from the layout that was
1006  * used before the change.
1007  */
1008 typedef struct VirtQueueElementOld {
1009     unsigned int index;
1010     unsigned int out_num;
1011     unsigned int in_num;
1012     hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
1013     hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
1014     struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
1015     struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
1016 } VirtQueueElementOld;
1017 
1018 void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz)
1019 {
1020     VirtQueueElement *elem;
1021     VirtQueueElementOld data;
1022     int i;
1023 
1024     qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1025 
1026     /* TODO: teach all callers that this can fail, and return failure instead
1027      * of asserting here.
1028      * When we do, we might be able to re-enable NDEBUG below.
1029      */
1030 #ifdef NDEBUG
1031 #error building with NDEBUG is not supported
1032 #endif
1033     assert(ARRAY_SIZE(data.in_addr) >= data.in_num);
1034     assert(ARRAY_SIZE(data.out_addr) >= data.out_num);
1035 
1036     elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
1037     elem->index = data.index;
1038 
1039     for (i = 0; i < elem->in_num; i++) {
1040         elem->in_addr[i] = data.in_addr[i];
1041     }
1042 
1043     for (i = 0; i < elem->out_num; i++) {
1044         elem->out_addr[i] = data.out_addr[i];
1045     }
1046 
1047     for (i = 0; i < elem->in_num; i++) {
1048         /* Base is overwritten by virtqueue_map.  */
1049         elem->in_sg[i].iov_base = 0;
1050         elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
1051     }
1052 
1053     for (i = 0; i < elem->out_num; i++) {
1054         /* Base is overwritten by virtqueue_map.  */
1055         elem->out_sg[i].iov_base = 0;
1056         elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
1057     }
1058 
1059     virtqueue_map(vdev, elem);
1060     return elem;
1061 }
1062 
1063 void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem)
1064 {
1065     VirtQueueElementOld data;
1066     int i;
1067 
1068     memset(&data, 0, sizeof(data));
1069     data.index = elem->index;
1070     data.in_num = elem->in_num;
1071     data.out_num = elem->out_num;
1072 
1073     for (i = 0; i < elem->in_num; i++) {
1074         data.in_addr[i] = elem->in_addr[i];
1075     }
1076 
1077     for (i = 0; i < elem->out_num; i++) {
1078         data.out_addr[i] = elem->out_addr[i];
1079     }
1080 
1081     for (i = 0; i < elem->in_num; i++) {
1082         /* Base is overwritten by virtqueue_map when loading.  Do not
1083          * save it, as it would leak the QEMU address space layout.  */
1084         data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
1085     }
1086 
1087     for (i = 0; i < elem->out_num; i++) {
1088         /* Do not save iov_base as above.  */
1089         data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
1090     }
1091     qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1092 }
1093 
1094 /* virtio device */
1095 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
1096 {
1097     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1098     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1099 
1100     if (unlikely(vdev->broken)) {
1101         return;
1102     }
1103 
1104     if (k->notify) {
1105         k->notify(qbus->parent, vector);
1106     }
1107 }
1108 
1109 void virtio_update_irq(VirtIODevice *vdev)
1110 {
1111     virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
1112 }
1113 
1114 static int virtio_validate_features(VirtIODevice *vdev)
1115 {
1116     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1117 
1118     if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) &&
1119         !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
1120         return -EFAULT;
1121     }
1122 
1123     if (k->validate_features) {
1124         return k->validate_features(vdev);
1125     } else {
1126         return 0;
1127     }
1128 }
1129 
1130 int virtio_set_status(VirtIODevice *vdev, uint8_t val)
1131 {
1132     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1133     trace_virtio_set_status(vdev, val);
1134 
1135     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1136         if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
1137             val & VIRTIO_CONFIG_S_FEATURES_OK) {
1138             int ret = virtio_validate_features(vdev);
1139 
1140             if (ret) {
1141                 return ret;
1142             }
1143         }
1144     }
1145     if (k->set_status) {
1146         k->set_status(vdev, val);
1147     }
1148     vdev->status = val;
1149     return 0;
1150 }
1151 
1152 bool target_words_bigendian(void);
1153 static enum virtio_device_endian virtio_default_endian(void)
1154 {
1155     if (target_words_bigendian()) {
1156         return VIRTIO_DEVICE_ENDIAN_BIG;
1157     } else {
1158         return VIRTIO_DEVICE_ENDIAN_LITTLE;
1159     }
1160 }
1161 
1162 static enum virtio_device_endian virtio_current_cpu_endian(void)
1163 {
1164     CPUClass *cc = CPU_GET_CLASS(current_cpu);
1165 
1166     if (cc->virtio_is_big_endian(current_cpu)) {
1167         return VIRTIO_DEVICE_ENDIAN_BIG;
1168     } else {
1169         return VIRTIO_DEVICE_ENDIAN_LITTLE;
1170     }
1171 }
1172 
1173 static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
1174 {
1175     VRingMemoryRegionCaches *caches;
1176 
1177     caches = atomic_read(&vq->vring.caches);
1178     atomic_rcu_set(&vq->vring.caches, NULL);
1179     if (caches) {
1180         call_rcu(caches, virtio_free_region_cache, rcu);
1181     }
1182 }
1183 
1184 void virtio_reset(void *opaque)
1185 {
1186     VirtIODevice *vdev = opaque;
1187     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1188     int i;
1189 
1190     virtio_set_status(vdev, 0);
1191     if (current_cpu) {
1192         /* Guest initiated reset */
1193         vdev->device_endian = virtio_current_cpu_endian();
1194     } else {
1195         /* System reset */
1196         vdev->device_endian = virtio_default_endian();
1197     }
1198 
1199     if (k->reset) {
1200         k->reset(vdev);
1201     }
1202 
1203     vdev->broken = false;
1204     vdev->guest_features = 0;
1205     vdev->queue_sel = 0;
1206     vdev->status = 0;
1207     atomic_set(&vdev->isr, 0);
1208     vdev->config_vector = VIRTIO_NO_VECTOR;
1209     virtio_notify_vector(vdev, vdev->config_vector);
1210 
1211     for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1212         vdev->vq[i].vring.desc = 0;
1213         vdev->vq[i].vring.avail = 0;
1214         vdev->vq[i].vring.used = 0;
1215         vdev->vq[i].last_avail_idx = 0;
1216         vdev->vq[i].shadow_avail_idx = 0;
1217         vdev->vq[i].used_idx = 0;
1218         virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
1219         vdev->vq[i].signalled_used = 0;
1220         vdev->vq[i].signalled_used_valid = false;
1221         vdev->vq[i].notification = true;
1222         vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
1223         vdev->vq[i].inuse = 0;
1224         virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
1225     }
1226 }
1227 
1228 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
1229 {
1230     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1231     uint8_t val;
1232 
1233     if (addr + sizeof(val) > vdev->config_len) {
1234         return (uint32_t)-1;
1235     }
1236 
1237     k->get_config(vdev, vdev->config);
1238 
1239     val = ldub_p(vdev->config + addr);
1240     return val;
1241 }
1242 
1243 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
1244 {
1245     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1246     uint16_t val;
1247 
1248     if (addr + sizeof(val) > vdev->config_len) {
1249         return (uint32_t)-1;
1250     }
1251 
1252     k->get_config(vdev, vdev->config);
1253 
1254     val = lduw_p(vdev->config + addr);
1255     return val;
1256 }
1257 
1258 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
1259 {
1260     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1261     uint32_t val;
1262 
1263     if (addr + sizeof(val) > vdev->config_len) {
1264         return (uint32_t)-1;
1265     }
1266 
1267     k->get_config(vdev, vdev->config);
1268 
1269     val = ldl_p(vdev->config + addr);
1270     return val;
1271 }
1272 
1273 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1274 {
1275     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1276     uint8_t val = data;
1277 
1278     if (addr + sizeof(val) > vdev->config_len) {
1279         return;
1280     }
1281 
1282     stb_p(vdev->config + addr, val);
1283 
1284     if (k->set_config) {
1285         k->set_config(vdev, vdev->config);
1286     }
1287 }
1288 
1289 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1290 {
1291     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1292     uint16_t val = data;
1293 
1294     if (addr + sizeof(val) > vdev->config_len) {
1295         return;
1296     }
1297 
1298     stw_p(vdev->config + addr, val);
1299 
1300     if (k->set_config) {
1301         k->set_config(vdev, vdev->config);
1302     }
1303 }
1304 
1305 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1306 {
1307     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1308     uint32_t val = data;
1309 
1310     if (addr + sizeof(val) > vdev->config_len) {
1311         return;
1312     }
1313 
1314     stl_p(vdev->config + addr, val);
1315 
1316     if (k->set_config) {
1317         k->set_config(vdev, vdev->config);
1318     }
1319 }
1320 
1321 uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
1322 {
1323     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1324     uint8_t val;
1325 
1326     if (addr + sizeof(val) > vdev->config_len) {
1327         return (uint32_t)-1;
1328     }
1329 
1330     k->get_config(vdev, vdev->config);
1331 
1332     val = ldub_p(vdev->config + addr);
1333     return val;
1334 }
1335 
1336 uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
1337 {
1338     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1339     uint16_t val;
1340 
1341     if (addr + sizeof(val) > vdev->config_len) {
1342         return (uint32_t)-1;
1343     }
1344 
1345     k->get_config(vdev, vdev->config);
1346 
1347     val = lduw_le_p(vdev->config + addr);
1348     return val;
1349 }
1350 
1351 uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
1352 {
1353     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1354     uint32_t val;
1355 
1356     if (addr + sizeof(val) > vdev->config_len) {
1357         return (uint32_t)-1;
1358     }
1359 
1360     k->get_config(vdev, vdev->config);
1361 
1362     val = ldl_le_p(vdev->config + addr);
1363     return val;
1364 }
1365 
1366 void virtio_config_modern_writeb(VirtIODevice *vdev,
1367                                  uint32_t addr, uint32_t data)
1368 {
1369     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1370     uint8_t val = data;
1371 
1372     if (addr + sizeof(val) > vdev->config_len) {
1373         return;
1374     }
1375 
1376     stb_p(vdev->config + addr, val);
1377 
1378     if (k->set_config) {
1379         k->set_config(vdev, vdev->config);
1380     }
1381 }
1382 
1383 void virtio_config_modern_writew(VirtIODevice *vdev,
1384                                  uint32_t addr, uint32_t data)
1385 {
1386     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1387     uint16_t val = data;
1388 
1389     if (addr + sizeof(val) > vdev->config_len) {
1390         return;
1391     }
1392 
1393     stw_le_p(vdev->config + addr, val);
1394 
1395     if (k->set_config) {
1396         k->set_config(vdev, vdev->config);
1397     }
1398 }
1399 
1400 void virtio_config_modern_writel(VirtIODevice *vdev,
1401                                  uint32_t addr, uint32_t data)
1402 {
1403     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1404     uint32_t val = data;
1405 
1406     if (addr + sizeof(val) > vdev->config_len) {
1407         return;
1408     }
1409 
1410     stl_le_p(vdev->config + addr, val);
1411 
1412     if (k->set_config) {
1413         k->set_config(vdev, vdev->config);
1414     }
1415 }
1416 
1417 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
1418 {
1419     vdev->vq[n].vring.desc = addr;
1420     virtio_queue_update_rings(vdev, n);
1421 }
1422 
1423 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
1424 {
1425     return vdev->vq[n].vring.desc;
1426 }
1427 
1428 void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
1429                             hwaddr avail, hwaddr used)
1430 {
1431     vdev->vq[n].vring.desc = desc;
1432     vdev->vq[n].vring.avail = avail;
1433     vdev->vq[n].vring.used = used;
1434     virtio_init_region_cache(vdev, n);
1435 }
1436 
1437 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
1438 {
1439     /* Don't allow guest to flip queue between existent and
1440      * nonexistent states, or to set it to an invalid size.
1441      */
1442     if (!!num != !!vdev->vq[n].vring.num ||
1443         num > VIRTQUEUE_MAX_SIZE ||
1444         num < 0) {
1445         return;
1446     }
1447     vdev->vq[n].vring.num = num;
1448 }
1449 
1450 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
1451 {
1452     return QLIST_FIRST(&vdev->vector_queues[vector]);
1453 }
1454 
1455 VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
1456 {
1457     return QLIST_NEXT(vq, node);
1458 }
1459 
1460 int virtio_queue_get_num(VirtIODevice *vdev, int n)
1461 {
1462     return vdev->vq[n].vring.num;
1463 }
1464 
1465 int virtio_queue_get_max_num(VirtIODevice *vdev, int n)
1466 {
1467     return vdev->vq[n].vring.num_default;
1468 }
1469 
1470 int virtio_get_num_queues(VirtIODevice *vdev)
1471 {
1472     int i;
1473 
1474     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1475         if (!virtio_queue_get_num(vdev, i)) {
1476             break;
1477         }
1478     }
1479 
1480     return i;
1481 }
1482 
1483 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
1484 {
1485     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1486     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1487 
1488     /* virtio-1 compliant devices cannot change the alignment */
1489     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1490         error_report("tried to modify queue alignment for virtio-1 device");
1491         return;
1492     }
1493     /* Check that the transport told us it was going to do this
1494      * (so a buggy transport will immediately assert rather than
1495      * silently failing to migrate this state)
1496      */
1497     assert(k->has_variable_vring_alignment);
1498 
1499     vdev->vq[n].vring.align = align;
1500     virtio_queue_update_rings(vdev, n);
1501 }
1502 
1503 static bool virtio_queue_notify_aio_vq(VirtQueue *vq)
1504 {
1505     if (vq->vring.desc && vq->handle_aio_output) {
1506         VirtIODevice *vdev = vq->vdev;
1507 
1508         trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1509         return vq->handle_aio_output(vdev, vq);
1510     }
1511 
1512     return false;
1513 }
1514 
1515 static void virtio_queue_notify_vq(VirtQueue *vq)
1516 {
1517     if (vq->vring.desc && vq->handle_output) {
1518         VirtIODevice *vdev = vq->vdev;
1519 
1520         if (unlikely(vdev->broken)) {
1521             return;
1522         }
1523 
1524         trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1525         vq->handle_output(vdev, vq);
1526     }
1527 }
1528 
1529 void virtio_queue_notify(VirtIODevice *vdev, int n)
1530 {
1531     virtio_queue_notify_vq(&vdev->vq[n]);
1532 }
1533 
1534 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
1535 {
1536     return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
1537         VIRTIO_NO_VECTOR;
1538 }
1539 
1540 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
1541 {
1542     VirtQueue *vq = &vdev->vq[n];
1543 
1544     if (n < VIRTIO_QUEUE_MAX) {
1545         if (vdev->vector_queues &&
1546             vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
1547             QLIST_REMOVE(vq, node);
1548         }
1549         vdev->vq[n].vector = vector;
1550         if (vdev->vector_queues &&
1551             vector != VIRTIO_NO_VECTOR) {
1552             QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
1553         }
1554     }
1555 }
1556 
1557 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
1558                             VirtIOHandleOutput handle_output)
1559 {
1560     int i;
1561 
1562     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1563         if (vdev->vq[i].vring.num == 0)
1564             break;
1565     }
1566 
1567     if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
1568         abort();
1569 
1570     vdev->vq[i].vring.num = queue_size;
1571     vdev->vq[i].vring.num_default = queue_size;
1572     vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
1573     vdev->vq[i].handle_output = handle_output;
1574     vdev->vq[i].handle_aio_output = NULL;
1575 
1576     return &vdev->vq[i];
1577 }
1578 
1579 void virtio_del_queue(VirtIODevice *vdev, int n)
1580 {
1581     if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
1582         abort();
1583     }
1584 
1585     vdev->vq[n].vring.num = 0;
1586     vdev->vq[n].vring.num_default = 0;
1587 }
1588 
1589 static void virtio_set_isr(VirtIODevice *vdev, int value)
1590 {
1591     uint8_t old = atomic_read(&vdev->isr);
1592 
1593     /* Do not write ISR if it does not change, so that its cacheline remains
1594      * shared in the common case where the guest does not read it.
1595      */
1596     if ((old & value) != value) {
1597         atomic_or(&vdev->isr, value);
1598     }
1599 }
1600 
1601 /* Called within rcu_read_lock().  */
1602 static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
1603 {
1604     uint16_t old, new;
1605     bool v;
1606     /* We need to expose used array entries before checking used event. */
1607     smp_mb();
1608     /* Always notify when queue is empty (when feature acknowledge) */
1609     if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1610         !vq->inuse && virtio_queue_empty(vq)) {
1611         return true;
1612     }
1613 
1614     if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
1615         return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
1616     }
1617 
1618     v = vq->signalled_used_valid;
1619     vq->signalled_used_valid = true;
1620     old = vq->signalled_used;
1621     new = vq->signalled_used = vq->used_idx;
1622     return !v || vring_need_event(vring_get_used_event(vq), new, old);
1623 }
1624 
1625 void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
1626 {
1627     bool should_notify;
1628     rcu_read_lock();
1629     should_notify = virtio_should_notify(vdev, vq);
1630     rcu_read_unlock();
1631 
1632     if (!should_notify) {
1633         return;
1634     }
1635 
1636     trace_virtio_notify_irqfd(vdev, vq);
1637 
1638     /*
1639      * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
1640      * windows drivers included in virtio-win 1.8.0 (circa 2015) are
1641      * incorrectly polling this bit during crashdump and hibernation
1642      * in MSI mode, causing a hang if this bit is never updated.
1643      * Recent releases of Windows do not really shut down, but rather
1644      * log out and hibernate to make the next startup faster.  Hence,
1645      * this manifested as a more serious hang during shutdown with
1646      *
1647      * Next driver release from 2016 fixed this problem, so working around it
1648      * is not a must, but it's easy to do so let's do it here.
1649      *
1650      * Note: it's safe to update ISR from any thread as it was switched
1651      * to an atomic operation.
1652      */
1653     virtio_set_isr(vq->vdev, 0x1);
1654     event_notifier_set(&vq->guest_notifier);
1655 }
1656 
1657 static void virtio_irq(VirtQueue *vq)
1658 {
1659     virtio_set_isr(vq->vdev, 0x1);
1660     virtio_notify_vector(vq->vdev, vq->vector);
1661 }
1662 
1663 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
1664 {
1665     bool should_notify;
1666     rcu_read_lock();
1667     should_notify = virtio_should_notify(vdev, vq);
1668     rcu_read_unlock();
1669 
1670     if (!should_notify) {
1671         return;
1672     }
1673 
1674     trace_virtio_notify(vdev, vq);
1675     virtio_irq(vq);
1676 }
1677 
1678 void virtio_notify_config(VirtIODevice *vdev)
1679 {
1680     if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
1681         return;
1682 
1683     virtio_set_isr(vdev, 0x3);
1684     vdev->generation++;
1685     virtio_notify_vector(vdev, vdev->config_vector);
1686 }
1687 
1688 static bool virtio_device_endian_needed(void *opaque)
1689 {
1690     VirtIODevice *vdev = opaque;
1691 
1692     assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
1693     if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1694         return vdev->device_endian != virtio_default_endian();
1695     }
1696     /* Devices conforming to VIRTIO 1.0 or later are always LE. */
1697     return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
1698 }
1699 
1700 static bool virtio_64bit_features_needed(void *opaque)
1701 {
1702     VirtIODevice *vdev = opaque;
1703 
1704     return (vdev->host_features >> 32) != 0;
1705 }
1706 
1707 static bool virtio_virtqueue_needed(void *opaque)
1708 {
1709     VirtIODevice *vdev = opaque;
1710 
1711     return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
1712 }
1713 
1714 static bool virtio_ringsize_needed(void *opaque)
1715 {
1716     VirtIODevice *vdev = opaque;
1717     int i;
1718 
1719     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1720         if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
1721             return true;
1722         }
1723     }
1724     return false;
1725 }
1726 
1727 static bool virtio_extra_state_needed(void *opaque)
1728 {
1729     VirtIODevice *vdev = opaque;
1730     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1731     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1732 
1733     return k->has_extra_state &&
1734         k->has_extra_state(qbus->parent);
1735 }
1736 
1737 static bool virtio_broken_needed(void *opaque)
1738 {
1739     VirtIODevice *vdev = opaque;
1740 
1741     return vdev->broken;
1742 }
1743 
1744 static const VMStateDescription vmstate_virtqueue = {
1745     .name = "virtqueue_state",
1746     .version_id = 1,
1747     .minimum_version_id = 1,
1748     .fields = (VMStateField[]) {
1749         VMSTATE_UINT64(vring.avail, struct VirtQueue),
1750         VMSTATE_UINT64(vring.used, struct VirtQueue),
1751         VMSTATE_END_OF_LIST()
1752     }
1753 };
1754 
1755 static const VMStateDescription vmstate_virtio_virtqueues = {
1756     .name = "virtio/virtqueues",
1757     .version_id = 1,
1758     .minimum_version_id = 1,
1759     .needed = &virtio_virtqueue_needed,
1760     .fields = (VMStateField[]) {
1761         VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
1762                       VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
1763         VMSTATE_END_OF_LIST()
1764     }
1765 };
1766 
1767 static const VMStateDescription vmstate_ringsize = {
1768     .name = "ringsize_state",
1769     .version_id = 1,
1770     .minimum_version_id = 1,
1771     .fields = (VMStateField[]) {
1772         VMSTATE_UINT32(vring.num_default, struct VirtQueue),
1773         VMSTATE_END_OF_LIST()
1774     }
1775 };
1776 
1777 static const VMStateDescription vmstate_virtio_ringsize = {
1778     .name = "virtio/ringsize",
1779     .version_id = 1,
1780     .minimum_version_id = 1,
1781     .needed = &virtio_ringsize_needed,
1782     .fields = (VMStateField[]) {
1783         VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
1784                       VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
1785         VMSTATE_END_OF_LIST()
1786     }
1787 };
1788 
1789 static int get_extra_state(QEMUFile *f, void *pv, size_t size,
1790                            VMStateField *field)
1791 {
1792     VirtIODevice *vdev = pv;
1793     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1794     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1795 
1796     if (!k->load_extra_state) {
1797         return -1;
1798     } else {
1799         return k->load_extra_state(qbus->parent, f);
1800     }
1801 }
1802 
1803 static int put_extra_state(QEMUFile *f, void *pv, size_t size,
1804                            VMStateField *field, QJSON *vmdesc)
1805 {
1806     VirtIODevice *vdev = pv;
1807     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1808     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1809 
1810     k->save_extra_state(qbus->parent, f);
1811     return 0;
1812 }
1813 
1814 static const VMStateInfo vmstate_info_extra_state = {
1815     .name = "virtqueue_extra_state",
1816     .get = get_extra_state,
1817     .put = put_extra_state,
1818 };
1819 
1820 static const VMStateDescription vmstate_virtio_extra_state = {
1821     .name = "virtio/extra_state",
1822     .version_id = 1,
1823     .minimum_version_id = 1,
1824     .needed = &virtio_extra_state_needed,
1825     .fields = (VMStateField[]) {
1826         {
1827             .name         = "extra_state",
1828             .version_id   = 0,
1829             .field_exists = NULL,
1830             .size         = 0,
1831             .info         = &vmstate_info_extra_state,
1832             .flags        = VMS_SINGLE,
1833             .offset       = 0,
1834         },
1835         VMSTATE_END_OF_LIST()
1836     }
1837 };
1838 
1839 static const VMStateDescription vmstate_virtio_device_endian = {
1840     .name = "virtio/device_endian",
1841     .version_id = 1,
1842     .minimum_version_id = 1,
1843     .needed = &virtio_device_endian_needed,
1844     .fields = (VMStateField[]) {
1845         VMSTATE_UINT8(device_endian, VirtIODevice),
1846         VMSTATE_END_OF_LIST()
1847     }
1848 };
1849 
1850 static const VMStateDescription vmstate_virtio_64bit_features = {
1851     .name = "virtio/64bit_features",
1852     .version_id = 1,
1853     .minimum_version_id = 1,
1854     .needed = &virtio_64bit_features_needed,
1855     .fields = (VMStateField[]) {
1856         VMSTATE_UINT64(guest_features, VirtIODevice),
1857         VMSTATE_END_OF_LIST()
1858     }
1859 };
1860 
1861 static const VMStateDescription vmstate_virtio_broken = {
1862     .name = "virtio/broken",
1863     .version_id = 1,
1864     .minimum_version_id = 1,
1865     .needed = &virtio_broken_needed,
1866     .fields = (VMStateField[]) {
1867         VMSTATE_BOOL(broken, VirtIODevice),
1868         VMSTATE_END_OF_LIST()
1869     }
1870 };
1871 
1872 static const VMStateDescription vmstate_virtio = {
1873     .name = "virtio",
1874     .version_id = 1,
1875     .minimum_version_id = 1,
1876     .minimum_version_id_old = 1,
1877     .fields = (VMStateField[]) {
1878         VMSTATE_END_OF_LIST()
1879     },
1880     .subsections = (const VMStateDescription*[]) {
1881         &vmstate_virtio_device_endian,
1882         &vmstate_virtio_64bit_features,
1883         &vmstate_virtio_virtqueues,
1884         &vmstate_virtio_ringsize,
1885         &vmstate_virtio_broken,
1886         &vmstate_virtio_extra_state,
1887         NULL
1888     }
1889 };
1890 
1891 void virtio_save(VirtIODevice *vdev, QEMUFile *f)
1892 {
1893     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1894     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1895     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
1896     uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
1897     int i;
1898 
1899     if (k->save_config) {
1900         k->save_config(qbus->parent, f);
1901     }
1902 
1903     qemu_put_8s(f, &vdev->status);
1904     qemu_put_8s(f, &vdev->isr);
1905     qemu_put_be16s(f, &vdev->queue_sel);
1906     qemu_put_be32s(f, &guest_features_lo);
1907     qemu_put_be32(f, vdev->config_len);
1908     qemu_put_buffer(f, vdev->config, vdev->config_len);
1909 
1910     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1911         if (vdev->vq[i].vring.num == 0)
1912             break;
1913     }
1914 
1915     qemu_put_be32(f, i);
1916 
1917     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1918         if (vdev->vq[i].vring.num == 0)
1919             break;
1920 
1921         qemu_put_be32(f, vdev->vq[i].vring.num);
1922         if (k->has_variable_vring_alignment) {
1923             qemu_put_be32(f, vdev->vq[i].vring.align);
1924         }
1925         /*
1926          * Save desc now, the rest of the ring addresses are saved in
1927          * subsections for VIRTIO-1 devices.
1928          */
1929         qemu_put_be64(f, vdev->vq[i].vring.desc);
1930         qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
1931         if (k->save_queue) {
1932             k->save_queue(qbus->parent, i, f);
1933         }
1934     }
1935 
1936     if (vdc->save != NULL) {
1937         vdc->save(vdev, f);
1938     }
1939 
1940     if (vdc->vmsd) {
1941         vmstate_save_state(f, vdc->vmsd, vdev, NULL);
1942     }
1943 
1944     /* Subsections */
1945     vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
1946 }
1947 
1948 /* A wrapper for use as a VMState .put function */
1949 static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
1950                               VMStateField *field, QJSON *vmdesc)
1951 {
1952     virtio_save(VIRTIO_DEVICE(opaque), f);
1953 
1954     return 0;
1955 }
1956 
1957 /* A wrapper for use as a VMState .get function */
1958 static int virtio_device_get(QEMUFile *f, void *opaque, size_t size,
1959                              VMStateField *field)
1960 {
1961     VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
1962     DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
1963 
1964     return virtio_load(vdev, f, dc->vmsd->version_id);
1965 }
1966 
1967 const VMStateInfo  virtio_vmstate_info = {
1968     .name = "virtio",
1969     .get = virtio_device_get,
1970     .put = virtio_device_put,
1971 };
1972 
1973 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
1974 {
1975     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1976     bool bad = (val & ~(vdev->host_features)) != 0;
1977 
1978     val &= vdev->host_features;
1979     if (k->set_features) {
1980         k->set_features(vdev, val);
1981     }
1982     vdev->guest_features = val;
1983     return bad ? -1 : 0;
1984 }
1985 
1986 int virtio_set_features(VirtIODevice *vdev, uint64_t val)
1987 {
1988    /*
1989      * The driver must not attempt to set features after feature negotiation
1990      * has finished.
1991      */
1992     if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
1993         return -EINVAL;
1994     }
1995     return virtio_set_features_nocheck(vdev, val);
1996 }
1997 
1998 int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
1999 {
2000     int i, ret;
2001     int32_t config_len;
2002     uint32_t num;
2003     uint32_t features;
2004     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2005     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2006     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
2007 
2008     /*
2009      * We poison the endianness to ensure it does not get used before
2010      * subsections have been loaded.
2011      */
2012     vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
2013 
2014     if (k->load_config) {
2015         ret = k->load_config(qbus->parent, f);
2016         if (ret)
2017             return ret;
2018     }
2019 
2020     qemu_get_8s(f, &vdev->status);
2021     qemu_get_8s(f, &vdev->isr);
2022     qemu_get_be16s(f, &vdev->queue_sel);
2023     if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
2024         return -1;
2025     }
2026     qemu_get_be32s(f, &features);
2027 
2028     /*
2029      * Temporarily set guest_features low bits - needed by
2030      * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
2031      * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
2032      *
2033      * Note: devices should always test host features in future - don't create
2034      * new dependencies like this.
2035      */
2036     vdev->guest_features = features;
2037 
2038     config_len = qemu_get_be32(f);
2039 
2040     /*
2041      * There are cases where the incoming config can be bigger or smaller
2042      * than what we have; so load what we have space for, and skip
2043      * any excess that's in the stream.
2044      */
2045     qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
2046 
2047     while (config_len > vdev->config_len) {
2048         qemu_get_byte(f);
2049         config_len--;
2050     }
2051 
2052     num = qemu_get_be32(f);
2053 
2054     if (num > VIRTIO_QUEUE_MAX) {
2055         error_report("Invalid number of virtqueues: 0x%x", num);
2056         return -1;
2057     }
2058 
2059     for (i = 0; i < num; i++) {
2060         vdev->vq[i].vring.num = qemu_get_be32(f);
2061         if (k->has_variable_vring_alignment) {
2062             vdev->vq[i].vring.align = qemu_get_be32(f);
2063         }
2064         vdev->vq[i].vring.desc = qemu_get_be64(f);
2065         qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
2066         vdev->vq[i].signalled_used_valid = false;
2067         vdev->vq[i].notification = true;
2068 
2069         if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) {
2070             error_report("VQ %d address 0x0 "
2071                          "inconsistent with Host index 0x%x",
2072                          i, vdev->vq[i].last_avail_idx);
2073             return -1;
2074         }
2075         if (k->load_queue) {
2076             ret = k->load_queue(qbus->parent, i, f);
2077             if (ret)
2078                 return ret;
2079         }
2080     }
2081 
2082     virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
2083 
2084     if (vdc->load != NULL) {
2085         ret = vdc->load(vdev, f, version_id);
2086         if (ret) {
2087             return ret;
2088         }
2089     }
2090 
2091     if (vdc->vmsd) {
2092         ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
2093         if (ret) {
2094             return ret;
2095         }
2096     }
2097 
2098     /* Subsections */
2099     ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
2100     if (ret) {
2101         return ret;
2102     }
2103 
2104     if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
2105         vdev->device_endian = virtio_default_endian();
2106     }
2107 
2108     if (virtio_64bit_features_needed(vdev)) {
2109         /*
2110          * Subsection load filled vdev->guest_features.  Run them
2111          * through virtio_set_features to sanity-check them against
2112          * host_features.
2113          */
2114         uint64_t features64 = vdev->guest_features;
2115         if (virtio_set_features_nocheck(vdev, features64) < 0) {
2116             error_report("Features 0x%" PRIx64 " unsupported. "
2117                          "Allowed features: 0x%" PRIx64,
2118                          features64, vdev->host_features);
2119             return -1;
2120         }
2121     } else {
2122         if (virtio_set_features_nocheck(vdev, features) < 0) {
2123             error_report("Features 0x%x unsupported. "
2124                          "Allowed features: 0x%" PRIx64,
2125                          features, vdev->host_features);
2126             return -1;
2127         }
2128     }
2129 
2130     rcu_read_lock();
2131     for (i = 0; i < num; i++) {
2132         if (vdev->vq[i].vring.desc) {
2133             uint16_t nheads;
2134 
2135             /*
2136              * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
2137              * only the region cache needs to be set up.  Legacy devices need
2138              * to calculate used and avail ring addresses based on the desc
2139              * address.
2140              */
2141             if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2142                 virtio_init_region_cache(vdev, i);
2143             } else {
2144                 virtio_queue_update_rings(vdev, i);
2145             }
2146 
2147             nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
2148             /* Check it isn't doing strange things with descriptor numbers. */
2149             if (nheads > vdev->vq[i].vring.num) {
2150                 error_report("VQ %d size 0x%x Guest index 0x%x "
2151                              "inconsistent with Host index 0x%x: delta 0x%x",
2152                              i, vdev->vq[i].vring.num,
2153                              vring_avail_idx(&vdev->vq[i]),
2154                              vdev->vq[i].last_avail_idx, nheads);
2155                 return -1;
2156             }
2157             vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
2158             vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
2159 
2160             /*
2161              * Some devices migrate VirtQueueElements that have been popped
2162              * from the avail ring but not yet returned to the used ring.
2163              * Since max ring size < UINT16_MAX it's safe to use modulo
2164              * UINT16_MAX + 1 subtraction.
2165              */
2166             vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
2167                                 vdev->vq[i].used_idx);
2168             if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
2169                 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
2170                              "used_idx 0x%x",
2171                              i, vdev->vq[i].vring.num,
2172                              vdev->vq[i].last_avail_idx,
2173                              vdev->vq[i].used_idx);
2174                 return -1;
2175             }
2176         }
2177     }
2178     rcu_read_unlock();
2179 
2180     return 0;
2181 }
2182 
2183 void virtio_cleanup(VirtIODevice *vdev)
2184 {
2185     qemu_del_vm_change_state_handler(vdev->vmstate);
2186 }
2187 
2188 static void virtio_vmstate_change(void *opaque, int running, RunState state)
2189 {
2190     VirtIODevice *vdev = opaque;
2191     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2192     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2193     bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK);
2194     vdev->vm_running = running;
2195 
2196     if (backend_run) {
2197         virtio_set_status(vdev, vdev->status);
2198     }
2199 
2200     if (k->vmstate_change) {
2201         k->vmstate_change(qbus->parent, backend_run);
2202     }
2203 
2204     if (!backend_run) {
2205         virtio_set_status(vdev, vdev->status);
2206     }
2207 }
2208 
2209 void virtio_instance_init_common(Object *proxy_obj, void *data,
2210                                  size_t vdev_size, const char *vdev_name)
2211 {
2212     DeviceState *vdev = data;
2213 
2214     object_initialize(vdev, vdev_size, vdev_name);
2215     object_property_add_child(proxy_obj, "virtio-backend", OBJECT(vdev), NULL);
2216     object_unref(OBJECT(vdev));
2217     qdev_alias_all_properties(vdev, proxy_obj);
2218 }
2219 
2220 void virtio_init(VirtIODevice *vdev, const char *name,
2221                  uint16_t device_id, size_t config_size)
2222 {
2223     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2224     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2225     int i;
2226     int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
2227 
2228     if (nvectors) {
2229         vdev->vector_queues =
2230             g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
2231     }
2232 
2233     vdev->device_id = device_id;
2234     vdev->status = 0;
2235     atomic_set(&vdev->isr, 0);
2236     vdev->queue_sel = 0;
2237     vdev->config_vector = VIRTIO_NO_VECTOR;
2238     vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
2239     vdev->vm_running = runstate_is_running();
2240     vdev->broken = false;
2241     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2242         vdev->vq[i].vector = VIRTIO_NO_VECTOR;
2243         vdev->vq[i].vdev = vdev;
2244         vdev->vq[i].queue_index = i;
2245     }
2246 
2247     vdev->name = name;
2248     vdev->config_len = config_size;
2249     if (vdev->config_len) {
2250         vdev->config = g_malloc0(config_size);
2251     } else {
2252         vdev->config = NULL;
2253     }
2254     vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
2255                                                      vdev);
2256     vdev->device_endian = virtio_default_endian();
2257     vdev->use_guest_notifier_mask = true;
2258 }
2259 
2260 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
2261 {
2262     return vdev->vq[n].vring.desc;
2263 }
2264 
2265 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
2266 {
2267     return vdev->vq[n].vring.avail;
2268 }
2269 
2270 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
2271 {
2272     return vdev->vq[n].vring.used;
2273 }
2274 
2275 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
2276 {
2277     return sizeof(VRingDesc) * vdev->vq[n].vring.num;
2278 }
2279 
2280 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
2281 {
2282     return offsetof(VRingAvail, ring) +
2283         sizeof(uint16_t) * vdev->vq[n].vring.num;
2284 }
2285 
2286 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
2287 {
2288     return offsetof(VRingUsed, ring) +
2289         sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
2290 }
2291 
2292 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
2293 {
2294     return vdev->vq[n].last_avail_idx;
2295 }
2296 
2297 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
2298 {
2299     vdev->vq[n].last_avail_idx = idx;
2300     vdev->vq[n].shadow_avail_idx = idx;
2301 }
2302 
2303 void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
2304 {
2305     rcu_read_lock();
2306     if (vdev->vq[n].vring.desc) {
2307         vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
2308     }
2309     rcu_read_unlock();
2310 }
2311 
2312 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
2313 {
2314     vdev->vq[n].signalled_used_valid = false;
2315 }
2316 
2317 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
2318 {
2319     return vdev->vq + n;
2320 }
2321 
2322 uint16_t virtio_get_queue_index(VirtQueue *vq)
2323 {
2324     return vq->queue_index;
2325 }
2326 
2327 static void virtio_queue_guest_notifier_read(EventNotifier *n)
2328 {
2329     VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
2330     if (event_notifier_test_and_clear(n)) {
2331         virtio_irq(vq);
2332     }
2333 }
2334 
2335 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
2336                                                 bool with_irqfd)
2337 {
2338     if (assign && !with_irqfd) {
2339         event_notifier_set_handler(&vq->guest_notifier,
2340                                    virtio_queue_guest_notifier_read);
2341     } else {
2342         event_notifier_set_handler(&vq->guest_notifier, NULL);
2343     }
2344     if (!assign) {
2345         /* Test and clear notifier before closing it,
2346          * in case poll callback didn't have time to run. */
2347         virtio_queue_guest_notifier_read(&vq->guest_notifier);
2348     }
2349 }
2350 
2351 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
2352 {
2353     return &vq->guest_notifier;
2354 }
2355 
2356 static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
2357 {
2358     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2359     if (event_notifier_test_and_clear(n)) {
2360         virtio_queue_notify_aio_vq(vq);
2361     }
2362 }
2363 
2364 static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
2365 {
2366     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2367 
2368     virtio_queue_set_notification(vq, 0);
2369 }
2370 
2371 static bool virtio_queue_host_notifier_aio_poll(void *opaque)
2372 {
2373     EventNotifier *n = opaque;
2374     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2375     bool progress;
2376 
2377     if (!vq->vring.desc || virtio_queue_empty(vq)) {
2378         return false;
2379     }
2380 
2381     progress = virtio_queue_notify_aio_vq(vq);
2382 
2383     /* In case the handler function re-enabled notifications */
2384     virtio_queue_set_notification(vq, 0);
2385     return progress;
2386 }
2387 
2388 static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
2389 {
2390     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2391 
2392     /* Caller polls once more after this to catch requests that race with us */
2393     virtio_queue_set_notification(vq, 1);
2394 }
2395 
2396 void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
2397                                                 VirtIOHandleAIOOutput handle_output)
2398 {
2399     if (handle_output) {
2400         vq->handle_aio_output = handle_output;
2401         aio_set_event_notifier(ctx, &vq->host_notifier, true,
2402                                virtio_queue_host_notifier_aio_read,
2403                                virtio_queue_host_notifier_aio_poll);
2404         aio_set_event_notifier_poll(ctx, &vq->host_notifier,
2405                                     virtio_queue_host_notifier_aio_poll_begin,
2406                                     virtio_queue_host_notifier_aio_poll_end);
2407     } else {
2408         aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL);
2409         /* Test and clear notifier before after disabling event,
2410          * in case poll callback didn't have time to run. */
2411         virtio_queue_host_notifier_aio_read(&vq->host_notifier);
2412         vq->handle_aio_output = NULL;
2413     }
2414 }
2415 
2416 void virtio_queue_host_notifier_read(EventNotifier *n)
2417 {
2418     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2419     if (event_notifier_test_and_clear(n)) {
2420         virtio_queue_notify_vq(vq);
2421     }
2422 }
2423 
2424 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
2425 {
2426     return &vq->host_notifier;
2427 }
2428 
2429 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
2430 {
2431     g_free(vdev->bus_name);
2432     vdev->bus_name = g_strdup(bus_name);
2433 }
2434 
2435 void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
2436 {
2437     va_list ap;
2438 
2439     va_start(ap, fmt);
2440     error_vreport(fmt, ap);
2441     va_end(ap);
2442 
2443     vdev->broken = true;
2444 
2445     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2446         virtio_set_status(vdev, vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET);
2447         virtio_notify_config(vdev);
2448     }
2449 }
2450 
2451 static void virtio_memory_listener_commit(MemoryListener *listener)
2452 {
2453     VirtIODevice *vdev = container_of(listener, VirtIODevice, listener);
2454     int i;
2455 
2456     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2457         if (vdev->vq[i].vring.num == 0) {
2458             break;
2459         }
2460         virtio_init_region_cache(vdev, i);
2461     }
2462 }
2463 
2464 static void virtio_device_realize(DeviceState *dev, Error **errp)
2465 {
2466     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2467     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
2468     Error *err = NULL;
2469 
2470     /* Devices should either use vmsd or the load/save methods */
2471     assert(!vdc->vmsd || !vdc->load);
2472 
2473     if (vdc->realize != NULL) {
2474         vdc->realize(dev, &err);
2475         if (err != NULL) {
2476             error_propagate(errp, err);
2477             return;
2478         }
2479     }
2480 
2481     virtio_bus_device_plugged(vdev, &err);
2482     if (err != NULL) {
2483         error_propagate(errp, err);
2484         return;
2485     }
2486 
2487     vdev->listener.commit = virtio_memory_listener_commit;
2488     memory_listener_register(&vdev->listener, vdev->dma_as);
2489 }
2490 
2491 static void virtio_device_unrealize(DeviceState *dev, Error **errp)
2492 {
2493     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2494     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
2495     Error *err = NULL;
2496 
2497     virtio_bus_device_unplugged(vdev);
2498 
2499     if (vdc->unrealize != NULL) {
2500         vdc->unrealize(dev, &err);
2501         if (err != NULL) {
2502             error_propagate(errp, err);
2503             return;
2504         }
2505     }
2506 
2507     g_free(vdev->bus_name);
2508     vdev->bus_name = NULL;
2509 }
2510 
2511 static void virtio_device_free_virtqueues(VirtIODevice *vdev)
2512 {
2513     int i;
2514     if (!vdev->vq) {
2515         return;
2516     }
2517 
2518     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2519         if (vdev->vq[i].vring.num == 0) {
2520             break;
2521         }
2522         virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
2523     }
2524     g_free(vdev->vq);
2525 }
2526 
2527 static void virtio_device_instance_finalize(Object *obj)
2528 {
2529     VirtIODevice *vdev = VIRTIO_DEVICE(obj);
2530 
2531     memory_listener_unregister(&vdev->listener);
2532     virtio_device_free_virtqueues(vdev);
2533 
2534     g_free(vdev->config);
2535     g_free(vdev->vector_queues);
2536 }
2537 
2538 static Property virtio_properties[] = {
2539     DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
2540     DEFINE_PROP_END_OF_LIST(),
2541 };
2542 
2543 static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
2544 {
2545     VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
2546     int n, r, err;
2547 
2548     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2549         VirtQueue *vq = &vdev->vq[n];
2550         if (!virtio_queue_get_num(vdev, n)) {
2551             continue;
2552         }
2553         r = virtio_bus_set_host_notifier(qbus, n, true);
2554         if (r < 0) {
2555             err = r;
2556             goto assign_error;
2557         }
2558         event_notifier_set_handler(&vq->host_notifier,
2559                                    virtio_queue_host_notifier_read);
2560     }
2561 
2562     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2563         /* Kick right away to begin processing requests already in vring */
2564         VirtQueue *vq = &vdev->vq[n];
2565         if (!vq->vring.num) {
2566             continue;
2567         }
2568         event_notifier_set(&vq->host_notifier);
2569     }
2570     return 0;
2571 
2572 assign_error:
2573     while (--n >= 0) {
2574         VirtQueue *vq = &vdev->vq[n];
2575         if (!virtio_queue_get_num(vdev, n)) {
2576             continue;
2577         }
2578 
2579         event_notifier_set_handler(&vq->host_notifier, NULL);
2580         r = virtio_bus_set_host_notifier(qbus, n, false);
2581         assert(r >= 0);
2582     }
2583     return err;
2584 }
2585 
2586 int virtio_device_start_ioeventfd(VirtIODevice *vdev)
2587 {
2588     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2589     VirtioBusState *vbus = VIRTIO_BUS(qbus);
2590 
2591     return virtio_bus_start_ioeventfd(vbus);
2592 }
2593 
2594 static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
2595 {
2596     VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
2597     int n, r;
2598 
2599     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2600         VirtQueue *vq = &vdev->vq[n];
2601 
2602         if (!virtio_queue_get_num(vdev, n)) {
2603             continue;
2604         }
2605         event_notifier_set_handler(&vq->host_notifier, NULL);
2606         r = virtio_bus_set_host_notifier(qbus, n, false);
2607         assert(r >= 0);
2608     }
2609 }
2610 
2611 void virtio_device_stop_ioeventfd(VirtIODevice *vdev)
2612 {
2613     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2614     VirtioBusState *vbus = VIRTIO_BUS(qbus);
2615 
2616     virtio_bus_stop_ioeventfd(vbus);
2617 }
2618 
2619 int virtio_device_grab_ioeventfd(VirtIODevice *vdev)
2620 {
2621     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2622     VirtioBusState *vbus = VIRTIO_BUS(qbus);
2623 
2624     return virtio_bus_grab_ioeventfd(vbus);
2625 }
2626 
2627 void virtio_device_release_ioeventfd(VirtIODevice *vdev)
2628 {
2629     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2630     VirtioBusState *vbus = VIRTIO_BUS(qbus);
2631 
2632     virtio_bus_release_ioeventfd(vbus);
2633 }
2634 
2635 static void virtio_device_class_init(ObjectClass *klass, void *data)
2636 {
2637     /* Set the default value here. */
2638     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
2639     DeviceClass *dc = DEVICE_CLASS(klass);
2640 
2641     dc->realize = virtio_device_realize;
2642     dc->unrealize = virtio_device_unrealize;
2643     dc->bus_type = TYPE_VIRTIO_BUS;
2644     dc->props = virtio_properties;
2645     vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
2646     vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
2647 
2648     vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
2649 }
2650 
2651 bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
2652 {
2653     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2654     VirtioBusState *vbus = VIRTIO_BUS(qbus);
2655 
2656     return virtio_bus_ioeventfd_enabled(vbus);
2657 }
2658 
2659 static const TypeInfo virtio_device_info = {
2660     .name = TYPE_VIRTIO_DEVICE,
2661     .parent = TYPE_DEVICE,
2662     .instance_size = sizeof(VirtIODevice),
2663     .class_init = virtio_device_class_init,
2664     .instance_finalize = virtio_device_instance_finalize,
2665     .abstract = true,
2666     .class_size = sizeof(VirtioDeviceClass),
2667 };
2668 
2669 static void virtio_register_types(void)
2670 {
2671     type_register_static(&virtio_device_info);
2672 }
2673 
2674 type_init(virtio_register_types)
2675