xref: /openbmc/qemu/hw/virtio/virtio.c (revision c39f95dc)
1 /*
2  * Virtio Support
3  *
4  * Copyright IBM, Corp. 2007
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qemu-common.h"
17 #include "cpu.h"
18 #include "trace.h"
19 #include "exec/address-spaces.h"
20 #include "qemu/error-report.h"
21 #include "hw/virtio/virtio.h"
22 #include "qemu/atomic.h"
23 #include "hw/virtio/virtio-bus.h"
24 #include "hw/virtio/virtio-access.h"
25 #include "sysemu/dma.h"
26 
27 /*
28  * The alignment to use between consumer and producer parts of vring.
29  * x86 pagesize again. This is the default, used by transports like PCI
30  * which don't provide a means for the guest to tell the host the alignment.
31  */
32 #define VIRTIO_PCI_VRING_ALIGN         4096
33 
34 typedef struct VRingDesc
35 {
36     uint64_t addr;
37     uint32_t len;
38     uint16_t flags;
39     uint16_t next;
40 } VRingDesc;
41 
42 typedef struct VRingAvail
43 {
44     uint16_t flags;
45     uint16_t idx;
46     uint16_t ring[0];
47 } VRingAvail;
48 
49 typedef struct VRingUsedElem
50 {
51     uint32_t id;
52     uint32_t len;
53 } VRingUsedElem;
54 
55 typedef struct VRingUsed
56 {
57     uint16_t flags;
58     uint16_t idx;
59     VRingUsedElem ring[0];
60 } VRingUsed;
61 
62 typedef struct VRingMemoryRegionCaches {
63     struct rcu_head rcu;
64     MemoryRegionCache desc;
65     MemoryRegionCache avail;
66     MemoryRegionCache used;
67 } VRingMemoryRegionCaches;
68 
69 typedef struct VRing
70 {
71     unsigned int num;
72     unsigned int num_default;
73     unsigned int align;
74     hwaddr desc;
75     hwaddr avail;
76     hwaddr used;
77     VRingMemoryRegionCaches *caches;
78 } VRing;
79 
80 struct VirtQueue
81 {
82     VRing vring;
83 
84     /* Next head to pop */
85     uint16_t last_avail_idx;
86 
87     /* Last avail_idx read from VQ. */
88     uint16_t shadow_avail_idx;
89 
90     uint16_t used_idx;
91 
92     /* Last used index value we have signalled on */
93     uint16_t signalled_used;
94 
95     /* Last used index value we have signalled on */
96     bool signalled_used_valid;
97 
98     /* Notification enabled? */
99     bool notification;
100 
101     uint16_t queue_index;
102 
103     unsigned int inuse;
104 
105     uint16_t vector;
106     VirtIOHandleOutput handle_output;
107     VirtIOHandleAIOOutput handle_aio_output;
108     VirtIODevice *vdev;
109     EventNotifier guest_notifier;
110     EventNotifier host_notifier;
111     QLIST_ENTRY(VirtQueue) node;
112 };
113 
114 static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
115 {
116     if (!caches) {
117         return;
118     }
119 
120     address_space_cache_destroy(&caches->desc);
121     address_space_cache_destroy(&caches->avail);
122     address_space_cache_destroy(&caches->used);
123     g_free(caches);
124 }
125 
126 static void virtio_init_region_cache(VirtIODevice *vdev, int n)
127 {
128     VirtQueue *vq = &vdev->vq[n];
129     VRingMemoryRegionCaches *old = vq->vring.caches;
130     VRingMemoryRegionCaches *new;
131     hwaddr addr, size;
132     int event_size;
133     int64_t len;
134 
135     event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
136 
137     addr = vq->vring.desc;
138     if (!addr) {
139         return;
140     }
141     new = g_new0(VRingMemoryRegionCaches, 1);
142     size = virtio_queue_get_desc_size(vdev, n);
143     len = address_space_cache_init(&new->desc, vdev->dma_as,
144                                    addr, size, false);
145     if (len < size) {
146         virtio_error(vdev, "Cannot map desc");
147         goto err_desc;
148     }
149 
150     size = virtio_queue_get_used_size(vdev, n) + event_size;
151     len = address_space_cache_init(&new->used, vdev->dma_as,
152                                    vq->vring.used, size, true);
153     if (len < size) {
154         virtio_error(vdev, "Cannot map used");
155         goto err_used;
156     }
157 
158     size = virtio_queue_get_avail_size(vdev, n) + event_size;
159     len = address_space_cache_init(&new->avail, vdev->dma_as,
160                                    vq->vring.avail, size, false);
161     if (len < size) {
162         virtio_error(vdev, "Cannot map avail");
163         goto err_avail;
164     }
165 
166     atomic_rcu_set(&vq->vring.caches, new);
167     if (old) {
168         call_rcu(old, virtio_free_region_cache, rcu);
169     }
170     return;
171 
172 err_avail:
173     address_space_cache_destroy(&new->used);
174 err_used:
175     address_space_cache_destroy(&new->desc);
176 err_desc:
177     g_free(new);
178 }
179 
180 /* virt queue functions */
181 void virtio_queue_update_rings(VirtIODevice *vdev, int n)
182 {
183     VRing *vring = &vdev->vq[n].vring;
184 
185     if (!vring->desc) {
186         /* not yet setup -> nothing to do */
187         return;
188     }
189     vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
190     vring->used = vring_align(vring->avail +
191                               offsetof(VRingAvail, ring[vring->num]),
192                               vring->align);
193     virtio_init_region_cache(vdev, n);
194 }
195 
196 /* Called within rcu_read_lock().  */
197 static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
198                             MemoryRegionCache *cache, int i)
199 {
200     address_space_read_cached(cache, i * sizeof(VRingDesc),
201                               desc, sizeof(VRingDesc));
202     virtio_tswap64s(vdev, &desc->addr);
203     virtio_tswap32s(vdev, &desc->len);
204     virtio_tswap16s(vdev, &desc->flags);
205     virtio_tswap16s(vdev, &desc->next);
206 }
207 
208 static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
209 {
210     VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
211     assert(caches != NULL);
212     return caches;
213 }
214 /* Called within rcu_read_lock().  */
215 static inline uint16_t vring_avail_flags(VirtQueue *vq)
216 {
217     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
218     hwaddr pa = offsetof(VRingAvail, flags);
219     return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
220 }
221 
222 /* Called within rcu_read_lock().  */
223 static inline uint16_t vring_avail_idx(VirtQueue *vq)
224 {
225     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
226     hwaddr pa = offsetof(VRingAvail, idx);
227     vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
228     return vq->shadow_avail_idx;
229 }
230 
231 /* Called within rcu_read_lock().  */
232 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
233 {
234     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
235     hwaddr pa = offsetof(VRingAvail, ring[i]);
236     return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
237 }
238 
239 /* Called within rcu_read_lock().  */
240 static inline uint16_t vring_get_used_event(VirtQueue *vq)
241 {
242     return vring_avail_ring(vq, vq->vring.num);
243 }
244 
245 /* Called within rcu_read_lock().  */
246 static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
247                                     int i)
248 {
249     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
250     hwaddr pa = offsetof(VRingUsed, ring[i]);
251     virtio_tswap32s(vq->vdev, &uelem->id);
252     virtio_tswap32s(vq->vdev, &uelem->len);
253     address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
254     address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
255 }
256 
257 /* Called within rcu_read_lock().  */
258 static uint16_t vring_used_idx(VirtQueue *vq)
259 {
260     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
261     hwaddr pa = offsetof(VRingUsed, idx);
262     return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
263 }
264 
265 /* Called within rcu_read_lock().  */
266 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
267 {
268     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
269     hwaddr pa = offsetof(VRingUsed, idx);
270     virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
271     address_space_cache_invalidate(&caches->used, pa, sizeof(val));
272     vq->used_idx = val;
273 }
274 
275 /* Called within rcu_read_lock().  */
276 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
277 {
278     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
279     VirtIODevice *vdev = vq->vdev;
280     hwaddr pa = offsetof(VRingUsed, flags);
281     uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
282 
283     virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
284     address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
285 }
286 
287 /* Called within rcu_read_lock().  */
288 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
289 {
290     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
291     VirtIODevice *vdev = vq->vdev;
292     hwaddr pa = offsetof(VRingUsed, flags);
293     uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
294 
295     virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
296     address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
297 }
298 
299 /* Called within rcu_read_lock().  */
300 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
301 {
302     VRingMemoryRegionCaches *caches;
303     hwaddr pa;
304     if (!vq->notification) {
305         return;
306     }
307 
308     caches = vring_get_region_caches(vq);
309     pa = offsetof(VRingUsed, ring[vq->vring.num]);
310     virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
311     address_space_cache_invalidate(&caches->used, pa, sizeof(val));
312 }
313 
314 void virtio_queue_set_notification(VirtQueue *vq, int enable)
315 {
316     vq->notification = enable;
317 
318     if (!vq->vring.desc) {
319         return;
320     }
321 
322     rcu_read_lock();
323     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
324         vring_set_avail_event(vq, vring_avail_idx(vq));
325     } else if (enable) {
326         vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
327     } else {
328         vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
329     }
330     if (enable) {
331         /* Expose avail event/used flags before caller checks the avail idx. */
332         smp_mb();
333     }
334     rcu_read_unlock();
335 }
336 
337 int virtio_queue_ready(VirtQueue *vq)
338 {
339     return vq->vring.avail != 0;
340 }
341 
342 /* Fetch avail_idx from VQ memory only when we really need to know if
343  * guest has added some buffers.
344  * Called within rcu_read_lock().  */
345 static int virtio_queue_empty_rcu(VirtQueue *vq)
346 {
347     if (unlikely(!vq->vring.avail)) {
348         return 1;
349     }
350 
351     if (vq->shadow_avail_idx != vq->last_avail_idx) {
352         return 0;
353     }
354 
355     return vring_avail_idx(vq) == vq->last_avail_idx;
356 }
357 
358 int virtio_queue_empty(VirtQueue *vq)
359 {
360     bool empty;
361 
362     if (unlikely(!vq->vring.avail)) {
363         return 1;
364     }
365 
366     if (vq->shadow_avail_idx != vq->last_avail_idx) {
367         return 0;
368     }
369 
370     rcu_read_lock();
371     empty = vring_avail_idx(vq) == vq->last_avail_idx;
372     rcu_read_unlock();
373     return empty;
374 }
375 
376 static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
377                                unsigned int len)
378 {
379     AddressSpace *dma_as = vq->vdev->dma_as;
380     unsigned int offset;
381     int i;
382 
383     offset = 0;
384     for (i = 0; i < elem->in_num; i++) {
385         size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
386 
387         dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
388                          elem->in_sg[i].iov_len,
389                          DMA_DIRECTION_FROM_DEVICE, size);
390 
391         offset += size;
392     }
393 
394     for (i = 0; i < elem->out_num; i++)
395         dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
396                          elem->out_sg[i].iov_len,
397                          DMA_DIRECTION_TO_DEVICE,
398                          elem->out_sg[i].iov_len);
399 }
400 
401 /* virtqueue_detach_element:
402  * @vq: The #VirtQueue
403  * @elem: The #VirtQueueElement
404  * @len: number of bytes written
405  *
406  * Detach the element from the virtqueue.  This function is suitable for device
407  * reset or other situations where a #VirtQueueElement is simply freed and will
408  * not be pushed or discarded.
409  */
410 void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
411                               unsigned int len)
412 {
413     vq->inuse--;
414     virtqueue_unmap_sg(vq, elem, len);
415 }
416 
417 /* virtqueue_unpop:
418  * @vq: The #VirtQueue
419  * @elem: The #VirtQueueElement
420  * @len: number of bytes written
421  *
422  * Pretend the most recent element wasn't popped from the virtqueue.  The next
423  * call to virtqueue_pop() will refetch the element.
424  */
425 void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem,
426                      unsigned int len)
427 {
428     vq->last_avail_idx--;
429     virtqueue_detach_element(vq, elem, len);
430 }
431 
432 /* virtqueue_rewind:
433  * @vq: The #VirtQueue
434  * @num: Number of elements to push back
435  *
436  * Pretend that elements weren't popped from the virtqueue.  The next
437  * virtqueue_pop() will refetch the oldest element.
438  *
439  * Use virtqueue_unpop() instead if you have a VirtQueueElement.
440  *
441  * Returns: true on success, false if @num is greater than the number of in use
442  * elements.
443  */
444 bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
445 {
446     if (num > vq->inuse) {
447         return false;
448     }
449     vq->last_avail_idx -= num;
450     vq->inuse -= num;
451     return true;
452 }
453 
454 /* Called within rcu_read_lock().  */
455 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
456                     unsigned int len, unsigned int idx)
457 {
458     VRingUsedElem uelem;
459 
460     trace_virtqueue_fill(vq, elem, len, idx);
461 
462     virtqueue_unmap_sg(vq, elem, len);
463 
464     if (unlikely(vq->vdev->broken)) {
465         return;
466     }
467 
468     if (unlikely(!vq->vring.used)) {
469         return;
470     }
471 
472     idx = (idx + vq->used_idx) % vq->vring.num;
473 
474     uelem.id = elem->index;
475     uelem.len = len;
476     vring_used_write(vq, &uelem, idx);
477 }
478 
479 /* Called within rcu_read_lock().  */
480 void virtqueue_flush(VirtQueue *vq, unsigned int count)
481 {
482     uint16_t old, new;
483 
484     if (unlikely(vq->vdev->broken)) {
485         vq->inuse -= count;
486         return;
487     }
488 
489     if (unlikely(!vq->vring.used)) {
490         return;
491     }
492 
493     /* Make sure buffer is written before we update index. */
494     smp_wmb();
495     trace_virtqueue_flush(vq, count);
496     old = vq->used_idx;
497     new = old + count;
498     vring_used_idx_set(vq, new);
499     vq->inuse -= count;
500     if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
501         vq->signalled_used_valid = false;
502 }
503 
504 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
505                     unsigned int len)
506 {
507     rcu_read_lock();
508     virtqueue_fill(vq, elem, len, 0);
509     virtqueue_flush(vq, 1);
510     rcu_read_unlock();
511 }
512 
513 /* Called within rcu_read_lock().  */
514 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
515 {
516     uint16_t num_heads = vring_avail_idx(vq) - idx;
517 
518     /* Check it isn't doing very strange things with descriptor numbers. */
519     if (num_heads > vq->vring.num) {
520         virtio_error(vq->vdev, "Guest moved used index from %u to %u",
521                      idx, vq->shadow_avail_idx);
522         return -EINVAL;
523     }
524     /* On success, callers read a descriptor at vq->last_avail_idx.
525      * Make sure descriptor read does not bypass avail index read. */
526     if (num_heads) {
527         smp_rmb();
528     }
529 
530     return num_heads;
531 }
532 
533 /* Called within rcu_read_lock().  */
534 static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
535                                unsigned int *head)
536 {
537     /* Grab the next descriptor number they're advertising, and increment
538      * the index we've seen. */
539     *head = vring_avail_ring(vq, idx % vq->vring.num);
540 
541     /* If their number is silly, that's a fatal mistake. */
542     if (*head >= vq->vring.num) {
543         virtio_error(vq->vdev, "Guest says index %u is available", *head);
544         return false;
545     }
546 
547     return true;
548 }
549 
550 enum {
551     VIRTQUEUE_READ_DESC_ERROR = -1,
552     VIRTQUEUE_READ_DESC_DONE = 0,   /* end of chain */
553     VIRTQUEUE_READ_DESC_MORE = 1,   /* more buffers in chain */
554 };
555 
556 static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
557                                     MemoryRegionCache *desc_cache, unsigned int max,
558                                     unsigned int *next)
559 {
560     /* If this descriptor says it doesn't chain, we're done. */
561     if (!(desc->flags & VRING_DESC_F_NEXT)) {
562         return VIRTQUEUE_READ_DESC_DONE;
563     }
564 
565     /* Check they're not leading us off end of descriptors. */
566     *next = desc->next;
567     /* Make sure compiler knows to grab that: we don't want it changing! */
568     smp_wmb();
569 
570     if (*next >= max) {
571         virtio_error(vdev, "Desc next is %u", *next);
572         return VIRTQUEUE_READ_DESC_ERROR;
573     }
574 
575     vring_desc_read(vdev, desc, desc_cache, *next);
576     return VIRTQUEUE_READ_DESC_MORE;
577 }
578 
579 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
580                                unsigned int *out_bytes,
581                                unsigned max_in_bytes, unsigned max_out_bytes)
582 {
583     VirtIODevice *vdev = vq->vdev;
584     unsigned int max, idx;
585     unsigned int total_bufs, in_total, out_total;
586     VRingMemoryRegionCaches *caches;
587     MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
588     int64_t len = 0;
589     int rc;
590 
591     if (unlikely(!vq->vring.desc)) {
592         if (in_bytes) {
593             *in_bytes = 0;
594         }
595         if (out_bytes) {
596             *out_bytes = 0;
597         }
598         return;
599     }
600 
601     rcu_read_lock();
602     idx = vq->last_avail_idx;
603     total_bufs = in_total = out_total = 0;
604 
605     max = vq->vring.num;
606     caches = vring_get_region_caches(vq);
607     if (caches->desc.len < max * sizeof(VRingDesc)) {
608         virtio_error(vdev, "Cannot map descriptor ring");
609         goto err;
610     }
611 
612     while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
613         MemoryRegionCache *desc_cache = &caches->desc;
614         unsigned int num_bufs;
615         VRingDesc desc;
616         unsigned int i;
617 
618         num_bufs = total_bufs;
619 
620         if (!virtqueue_get_head(vq, idx++, &i)) {
621             goto err;
622         }
623 
624         vring_desc_read(vdev, &desc, desc_cache, i);
625 
626         if (desc.flags & VRING_DESC_F_INDIRECT) {
627             if (desc.len % sizeof(VRingDesc)) {
628                 virtio_error(vdev, "Invalid size for indirect buffer table");
629                 goto err;
630             }
631 
632             /* If we've got too many, that implies a descriptor loop. */
633             if (num_bufs >= max) {
634                 virtio_error(vdev, "Looped descriptor");
635                 goto err;
636             }
637 
638             /* loop over the indirect descriptor table */
639             len = address_space_cache_init(&indirect_desc_cache,
640                                            vdev->dma_as,
641                                            desc.addr, desc.len, false);
642             desc_cache = &indirect_desc_cache;
643             if (len < desc.len) {
644                 virtio_error(vdev, "Cannot map indirect buffer");
645                 goto err;
646             }
647 
648             max = desc.len / sizeof(VRingDesc);
649             num_bufs = i = 0;
650             vring_desc_read(vdev, &desc, desc_cache, i);
651         }
652 
653         do {
654             /* If we've got too many, that implies a descriptor loop. */
655             if (++num_bufs > max) {
656                 virtio_error(vdev, "Looped descriptor");
657                 goto err;
658             }
659 
660             if (desc.flags & VRING_DESC_F_WRITE) {
661                 in_total += desc.len;
662             } else {
663                 out_total += desc.len;
664             }
665             if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
666                 goto done;
667             }
668 
669             rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i);
670         } while (rc == VIRTQUEUE_READ_DESC_MORE);
671 
672         if (rc == VIRTQUEUE_READ_DESC_ERROR) {
673             goto err;
674         }
675 
676         if (desc_cache == &indirect_desc_cache) {
677             address_space_cache_destroy(&indirect_desc_cache);
678             total_bufs++;
679         } else {
680             total_bufs = num_bufs;
681         }
682     }
683 
684     if (rc < 0) {
685         goto err;
686     }
687 
688 done:
689     address_space_cache_destroy(&indirect_desc_cache);
690     if (in_bytes) {
691         *in_bytes = in_total;
692     }
693     if (out_bytes) {
694         *out_bytes = out_total;
695     }
696     rcu_read_unlock();
697     return;
698 
699 err:
700     in_total = out_total = 0;
701     goto done;
702 }
703 
704 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
705                           unsigned int out_bytes)
706 {
707     unsigned int in_total, out_total;
708 
709     virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
710     return in_bytes <= in_total && out_bytes <= out_total;
711 }
712 
713 static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
714                                hwaddr *addr, struct iovec *iov,
715                                unsigned int max_num_sg, bool is_write,
716                                hwaddr pa, size_t sz)
717 {
718     bool ok = false;
719     unsigned num_sg = *p_num_sg;
720     assert(num_sg <= max_num_sg);
721 
722     if (!sz) {
723         virtio_error(vdev, "virtio: zero sized buffers are not allowed");
724         goto out;
725     }
726 
727     while (sz) {
728         hwaddr len = sz;
729 
730         if (num_sg == max_num_sg) {
731             virtio_error(vdev, "virtio: too many write descriptors in "
732                                "indirect table");
733             goto out;
734         }
735 
736         iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
737                                               is_write ?
738                                               DMA_DIRECTION_FROM_DEVICE :
739                                               DMA_DIRECTION_TO_DEVICE);
740         if (!iov[num_sg].iov_base) {
741             virtio_error(vdev, "virtio: bogus descriptor or out of resources");
742             goto out;
743         }
744 
745         iov[num_sg].iov_len = len;
746         addr[num_sg] = pa;
747 
748         sz -= len;
749         pa += len;
750         num_sg++;
751     }
752     ok = true;
753 
754 out:
755     *p_num_sg = num_sg;
756     return ok;
757 }
758 
759 /* Only used by error code paths before we have a VirtQueueElement (therefore
760  * virtqueue_unmap_sg() can't be used).  Assumes buffers weren't written to
761  * yet.
762  */
763 static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
764                                     struct iovec *iov)
765 {
766     unsigned int i;
767 
768     for (i = 0; i < out_num + in_num; i++) {
769         int is_write = i >= out_num;
770 
771         cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
772         iov++;
773     }
774 }
775 
776 static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
777                                 hwaddr *addr, unsigned int *num_sg,
778                                 int is_write)
779 {
780     unsigned int i;
781     hwaddr len;
782 
783     for (i = 0; i < *num_sg; i++) {
784         len = sg[i].iov_len;
785         sg[i].iov_base = dma_memory_map(vdev->dma_as,
786                                         addr[i], &len, is_write ?
787                                         DMA_DIRECTION_FROM_DEVICE :
788                                         DMA_DIRECTION_TO_DEVICE);
789         if (!sg[i].iov_base) {
790             error_report("virtio: error trying to map MMIO memory");
791             exit(1);
792         }
793         if (len != sg[i].iov_len) {
794             error_report("virtio: unexpected memory split");
795             exit(1);
796         }
797     }
798 }
799 
800 void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem)
801 {
802     virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, &elem->in_num, 1);
803     virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, &elem->out_num, 0);
804 }
805 
806 static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
807 {
808     VirtQueueElement *elem;
809     size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
810     size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
811     size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
812     size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
813     size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
814     size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
815 
816     assert(sz >= sizeof(VirtQueueElement));
817     elem = g_malloc(out_sg_end);
818     trace_virtqueue_alloc_element(elem, sz, in_num, out_num);
819     elem->out_num = out_num;
820     elem->in_num = in_num;
821     elem->in_addr = (void *)elem + in_addr_ofs;
822     elem->out_addr = (void *)elem + out_addr_ofs;
823     elem->in_sg = (void *)elem + in_sg_ofs;
824     elem->out_sg = (void *)elem + out_sg_ofs;
825     return elem;
826 }
827 
828 void *virtqueue_pop(VirtQueue *vq, size_t sz)
829 {
830     unsigned int i, head, max;
831     VRingMemoryRegionCaches *caches;
832     MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
833     MemoryRegionCache *desc_cache;
834     int64_t len;
835     VirtIODevice *vdev = vq->vdev;
836     VirtQueueElement *elem = NULL;
837     unsigned out_num, in_num, elem_entries;
838     hwaddr addr[VIRTQUEUE_MAX_SIZE];
839     struct iovec iov[VIRTQUEUE_MAX_SIZE];
840     VRingDesc desc;
841     int rc;
842 
843     if (unlikely(vdev->broken)) {
844         return NULL;
845     }
846     rcu_read_lock();
847     if (virtio_queue_empty_rcu(vq)) {
848         goto done;
849     }
850     /* Needed after virtio_queue_empty(), see comment in
851      * virtqueue_num_heads(). */
852     smp_rmb();
853 
854     /* When we start there are none of either input nor output. */
855     out_num = in_num = elem_entries = 0;
856 
857     max = vq->vring.num;
858 
859     if (vq->inuse >= vq->vring.num) {
860         virtio_error(vdev, "Virtqueue size exceeded");
861         goto done;
862     }
863 
864     if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
865         goto done;
866     }
867 
868     if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
869         vring_set_avail_event(vq, vq->last_avail_idx);
870     }
871 
872     i = head;
873 
874     caches = vring_get_region_caches(vq);
875     if (caches->desc.len < max * sizeof(VRingDesc)) {
876         virtio_error(vdev, "Cannot map descriptor ring");
877         goto done;
878     }
879 
880     desc_cache = &caches->desc;
881     vring_desc_read(vdev, &desc, desc_cache, i);
882     if (desc.flags & VRING_DESC_F_INDIRECT) {
883         if (desc.len % sizeof(VRingDesc)) {
884             virtio_error(vdev, "Invalid size for indirect buffer table");
885             goto done;
886         }
887 
888         /* loop over the indirect descriptor table */
889         len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
890                                        desc.addr, desc.len, false);
891         desc_cache = &indirect_desc_cache;
892         if (len < desc.len) {
893             virtio_error(vdev, "Cannot map indirect buffer");
894             goto done;
895         }
896 
897         max = desc.len / sizeof(VRingDesc);
898         i = 0;
899         vring_desc_read(vdev, &desc, desc_cache, i);
900     }
901 
902     /* Collect all the descriptors */
903     do {
904         bool map_ok;
905 
906         if (desc.flags & VRING_DESC_F_WRITE) {
907             map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
908                                         iov + out_num,
909                                         VIRTQUEUE_MAX_SIZE - out_num, true,
910                                         desc.addr, desc.len);
911         } else {
912             if (in_num) {
913                 virtio_error(vdev, "Incorrect order for descriptors");
914                 goto err_undo_map;
915             }
916             map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
917                                         VIRTQUEUE_MAX_SIZE, false,
918                                         desc.addr, desc.len);
919         }
920         if (!map_ok) {
921             goto err_undo_map;
922         }
923 
924         /* If we've got too many, that implies a descriptor loop. */
925         if (++elem_entries > max) {
926             virtio_error(vdev, "Looped descriptor");
927             goto err_undo_map;
928         }
929 
930         rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i);
931     } while (rc == VIRTQUEUE_READ_DESC_MORE);
932 
933     if (rc == VIRTQUEUE_READ_DESC_ERROR) {
934         goto err_undo_map;
935     }
936 
937     /* Now copy what we have collected and mapped */
938     elem = virtqueue_alloc_element(sz, out_num, in_num);
939     elem->index = head;
940     for (i = 0; i < out_num; i++) {
941         elem->out_addr[i] = addr[i];
942         elem->out_sg[i] = iov[i];
943     }
944     for (i = 0; i < in_num; i++) {
945         elem->in_addr[i] = addr[out_num + i];
946         elem->in_sg[i] = iov[out_num + i];
947     }
948 
949     vq->inuse++;
950 
951     trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
952 done:
953     address_space_cache_destroy(&indirect_desc_cache);
954     rcu_read_unlock();
955 
956     return elem;
957 
958 err_undo_map:
959     virtqueue_undo_map_desc(out_num, in_num, iov);
960     goto done;
961 }
962 
963 /* virtqueue_drop_all:
964  * @vq: The #VirtQueue
965  * Drops all queued buffers and indicates them to the guest
966  * as if they are done. Useful when buffers can not be
967  * processed but must be returned to the guest.
968  */
969 unsigned int virtqueue_drop_all(VirtQueue *vq)
970 {
971     unsigned int dropped = 0;
972     VirtQueueElement elem = {};
973     VirtIODevice *vdev = vq->vdev;
974     bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
975 
976     if (unlikely(vdev->broken)) {
977         return 0;
978     }
979 
980     while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
981         /* works similar to virtqueue_pop but does not map buffers
982         * and does not allocate any memory */
983         smp_rmb();
984         if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
985             break;
986         }
987         vq->inuse++;
988         vq->last_avail_idx++;
989         if (fEventIdx) {
990             vring_set_avail_event(vq, vq->last_avail_idx);
991         }
992         /* immediately push the element, nothing to unmap
993          * as both in_num and out_num are set to 0 */
994         virtqueue_push(vq, &elem, 0);
995         dropped++;
996     }
997 
998     return dropped;
999 }
1000 
1001 /* Reading and writing a structure directly to QEMUFile is *awful*, but
1002  * it is what QEMU has always done by mistake.  We can change it sooner
1003  * or later by bumping the version number of the affected vm states.
1004  * In the meanwhile, since the in-memory layout of VirtQueueElement
1005  * has changed, we need to marshal to and from the layout that was
1006  * used before the change.
1007  */
1008 typedef struct VirtQueueElementOld {
1009     unsigned int index;
1010     unsigned int out_num;
1011     unsigned int in_num;
1012     hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
1013     hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
1014     struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
1015     struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
1016 } VirtQueueElementOld;
1017 
1018 void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz)
1019 {
1020     VirtQueueElement *elem;
1021     VirtQueueElementOld data;
1022     int i;
1023 
1024     qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1025 
1026     /* TODO: teach all callers that this can fail, and return failure instead
1027      * of asserting here.
1028      * This is just one thing (there are probably more) that must be
1029      * fixed before we can allow NDEBUG compilation.
1030      */
1031     assert(ARRAY_SIZE(data.in_addr) >= data.in_num);
1032     assert(ARRAY_SIZE(data.out_addr) >= data.out_num);
1033 
1034     elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
1035     elem->index = data.index;
1036 
1037     for (i = 0; i < elem->in_num; i++) {
1038         elem->in_addr[i] = data.in_addr[i];
1039     }
1040 
1041     for (i = 0; i < elem->out_num; i++) {
1042         elem->out_addr[i] = data.out_addr[i];
1043     }
1044 
1045     for (i = 0; i < elem->in_num; i++) {
1046         /* Base is overwritten by virtqueue_map.  */
1047         elem->in_sg[i].iov_base = 0;
1048         elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
1049     }
1050 
1051     for (i = 0; i < elem->out_num; i++) {
1052         /* Base is overwritten by virtqueue_map.  */
1053         elem->out_sg[i].iov_base = 0;
1054         elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
1055     }
1056 
1057     virtqueue_map(vdev, elem);
1058     return elem;
1059 }
1060 
1061 void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem)
1062 {
1063     VirtQueueElementOld data;
1064     int i;
1065 
1066     memset(&data, 0, sizeof(data));
1067     data.index = elem->index;
1068     data.in_num = elem->in_num;
1069     data.out_num = elem->out_num;
1070 
1071     for (i = 0; i < elem->in_num; i++) {
1072         data.in_addr[i] = elem->in_addr[i];
1073     }
1074 
1075     for (i = 0; i < elem->out_num; i++) {
1076         data.out_addr[i] = elem->out_addr[i];
1077     }
1078 
1079     for (i = 0; i < elem->in_num; i++) {
1080         /* Base is overwritten by virtqueue_map when loading.  Do not
1081          * save it, as it would leak the QEMU address space layout.  */
1082         data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
1083     }
1084 
1085     for (i = 0; i < elem->out_num; i++) {
1086         /* Do not save iov_base as above.  */
1087         data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
1088     }
1089     qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1090 }
1091 
1092 /* virtio device */
1093 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
1094 {
1095     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1096     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1097 
1098     if (unlikely(vdev->broken)) {
1099         return;
1100     }
1101 
1102     if (k->notify) {
1103         k->notify(qbus->parent, vector);
1104     }
1105 }
1106 
1107 void virtio_update_irq(VirtIODevice *vdev)
1108 {
1109     virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
1110 }
1111 
1112 static int virtio_validate_features(VirtIODevice *vdev)
1113 {
1114     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1115 
1116     if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) &&
1117         !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
1118         return -EFAULT;
1119     }
1120 
1121     if (k->validate_features) {
1122         return k->validate_features(vdev);
1123     } else {
1124         return 0;
1125     }
1126 }
1127 
1128 int virtio_set_status(VirtIODevice *vdev, uint8_t val)
1129 {
1130     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1131     trace_virtio_set_status(vdev, val);
1132 
1133     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1134         if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
1135             val & VIRTIO_CONFIG_S_FEATURES_OK) {
1136             int ret = virtio_validate_features(vdev);
1137 
1138             if (ret) {
1139                 return ret;
1140             }
1141         }
1142     }
1143     if (k->set_status) {
1144         k->set_status(vdev, val);
1145     }
1146     vdev->status = val;
1147     return 0;
1148 }
1149 
1150 bool target_words_bigendian(void);
1151 static enum virtio_device_endian virtio_default_endian(void)
1152 {
1153     if (target_words_bigendian()) {
1154         return VIRTIO_DEVICE_ENDIAN_BIG;
1155     } else {
1156         return VIRTIO_DEVICE_ENDIAN_LITTLE;
1157     }
1158 }
1159 
1160 static enum virtio_device_endian virtio_current_cpu_endian(void)
1161 {
1162     CPUClass *cc = CPU_GET_CLASS(current_cpu);
1163 
1164     if (cc->virtio_is_big_endian(current_cpu)) {
1165         return VIRTIO_DEVICE_ENDIAN_BIG;
1166     } else {
1167         return VIRTIO_DEVICE_ENDIAN_LITTLE;
1168     }
1169 }
1170 
1171 static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
1172 {
1173     VRingMemoryRegionCaches *caches;
1174 
1175     caches = atomic_read(&vq->vring.caches);
1176     atomic_rcu_set(&vq->vring.caches, NULL);
1177     if (caches) {
1178         call_rcu(caches, virtio_free_region_cache, rcu);
1179     }
1180 }
1181 
1182 void virtio_reset(void *opaque)
1183 {
1184     VirtIODevice *vdev = opaque;
1185     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1186     int i;
1187 
1188     virtio_set_status(vdev, 0);
1189     if (current_cpu) {
1190         /* Guest initiated reset */
1191         vdev->device_endian = virtio_current_cpu_endian();
1192     } else {
1193         /* System reset */
1194         vdev->device_endian = virtio_default_endian();
1195     }
1196 
1197     if (k->reset) {
1198         k->reset(vdev);
1199     }
1200 
1201     vdev->broken = false;
1202     vdev->guest_features = 0;
1203     vdev->queue_sel = 0;
1204     vdev->status = 0;
1205     atomic_set(&vdev->isr, 0);
1206     vdev->config_vector = VIRTIO_NO_VECTOR;
1207     virtio_notify_vector(vdev, vdev->config_vector);
1208 
1209     for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1210         vdev->vq[i].vring.desc = 0;
1211         vdev->vq[i].vring.avail = 0;
1212         vdev->vq[i].vring.used = 0;
1213         vdev->vq[i].last_avail_idx = 0;
1214         vdev->vq[i].shadow_avail_idx = 0;
1215         vdev->vq[i].used_idx = 0;
1216         virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
1217         vdev->vq[i].signalled_used = 0;
1218         vdev->vq[i].signalled_used_valid = false;
1219         vdev->vq[i].notification = true;
1220         vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
1221         vdev->vq[i].inuse = 0;
1222         virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
1223     }
1224 }
1225 
1226 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
1227 {
1228     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1229     uint8_t val;
1230 
1231     if (addr + sizeof(val) > vdev->config_len) {
1232         return (uint32_t)-1;
1233     }
1234 
1235     k->get_config(vdev, vdev->config);
1236 
1237     val = ldub_p(vdev->config + addr);
1238     return val;
1239 }
1240 
1241 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
1242 {
1243     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1244     uint16_t val;
1245 
1246     if (addr + sizeof(val) > vdev->config_len) {
1247         return (uint32_t)-1;
1248     }
1249 
1250     k->get_config(vdev, vdev->config);
1251 
1252     val = lduw_p(vdev->config + addr);
1253     return val;
1254 }
1255 
1256 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
1257 {
1258     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1259     uint32_t val;
1260 
1261     if (addr + sizeof(val) > vdev->config_len) {
1262         return (uint32_t)-1;
1263     }
1264 
1265     k->get_config(vdev, vdev->config);
1266 
1267     val = ldl_p(vdev->config + addr);
1268     return val;
1269 }
1270 
1271 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1272 {
1273     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1274     uint8_t val = data;
1275 
1276     if (addr + sizeof(val) > vdev->config_len) {
1277         return;
1278     }
1279 
1280     stb_p(vdev->config + addr, val);
1281 
1282     if (k->set_config) {
1283         k->set_config(vdev, vdev->config);
1284     }
1285 }
1286 
1287 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1288 {
1289     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1290     uint16_t val = data;
1291 
1292     if (addr + sizeof(val) > vdev->config_len) {
1293         return;
1294     }
1295 
1296     stw_p(vdev->config + addr, val);
1297 
1298     if (k->set_config) {
1299         k->set_config(vdev, vdev->config);
1300     }
1301 }
1302 
1303 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1304 {
1305     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1306     uint32_t val = data;
1307 
1308     if (addr + sizeof(val) > vdev->config_len) {
1309         return;
1310     }
1311 
1312     stl_p(vdev->config + addr, val);
1313 
1314     if (k->set_config) {
1315         k->set_config(vdev, vdev->config);
1316     }
1317 }
1318 
1319 uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
1320 {
1321     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1322     uint8_t val;
1323 
1324     if (addr + sizeof(val) > vdev->config_len) {
1325         return (uint32_t)-1;
1326     }
1327 
1328     k->get_config(vdev, vdev->config);
1329 
1330     val = ldub_p(vdev->config + addr);
1331     return val;
1332 }
1333 
1334 uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
1335 {
1336     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1337     uint16_t val;
1338 
1339     if (addr + sizeof(val) > vdev->config_len) {
1340         return (uint32_t)-1;
1341     }
1342 
1343     k->get_config(vdev, vdev->config);
1344 
1345     val = lduw_le_p(vdev->config + addr);
1346     return val;
1347 }
1348 
1349 uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
1350 {
1351     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1352     uint32_t val;
1353 
1354     if (addr + sizeof(val) > vdev->config_len) {
1355         return (uint32_t)-1;
1356     }
1357 
1358     k->get_config(vdev, vdev->config);
1359 
1360     val = ldl_le_p(vdev->config + addr);
1361     return val;
1362 }
1363 
1364 void virtio_config_modern_writeb(VirtIODevice *vdev,
1365                                  uint32_t addr, uint32_t data)
1366 {
1367     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1368     uint8_t val = data;
1369 
1370     if (addr + sizeof(val) > vdev->config_len) {
1371         return;
1372     }
1373 
1374     stb_p(vdev->config + addr, val);
1375 
1376     if (k->set_config) {
1377         k->set_config(vdev, vdev->config);
1378     }
1379 }
1380 
1381 void virtio_config_modern_writew(VirtIODevice *vdev,
1382                                  uint32_t addr, uint32_t data)
1383 {
1384     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1385     uint16_t val = data;
1386 
1387     if (addr + sizeof(val) > vdev->config_len) {
1388         return;
1389     }
1390 
1391     stw_le_p(vdev->config + addr, val);
1392 
1393     if (k->set_config) {
1394         k->set_config(vdev, vdev->config);
1395     }
1396 }
1397 
1398 void virtio_config_modern_writel(VirtIODevice *vdev,
1399                                  uint32_t addr, uint32_t data)
1400 {
1401     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1402     uint32_t val = data;
1403 
1404     if (addr + sizeof(val) > vdev->config_len) {
1405         return;
1406     }
1407 
1408     stl_le_p(vdev->config + addr, val);
1409 
1410     if (k->set_config) {
1411         k->set_config(vdev, vdev->config);
1412     }
1413 }
1414 
1415 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
1416 {
1417     vdev->vq[n].vring.desc = addr;
1418     virtio_queue_update_rings(vdev, n);
1419 }
1420 
1421 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
1422 {
1423     return vdev->vq[n].vring.desc;
1424 }
1425 
1426 void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
1427                             hwaddr avail, hwaddr used)
1428 {
1429     vdev->vq[n].vring.desc = desc;
1430     vdev->vq[n].vring.avail = avail;
1431     vdev->vq[n].vring.used = used;
1432     virtio_init_region_cache(vdev, n);
1433 }
1434 
1435 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
1436 {
1437     /* Don't allow guest to flip queue between existent and
1438      * nonexistent states, or to set it to an invalid size.
1439      */
1440     if (!!num != !!vdev->vq[n].vring.num ||
1441         num > VIRTQUEUE_MAX_SIZE ||
1442         num < 0) {
1443         return;
1444     }
1445     vdev->vq[n].vring.num = num;
1446 }
1447 
1448 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
1449 {
1450     return QLIST_FIRST(&vdev->vector_queues[vector]);
1451 }
1452 
1453 VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
1454 {
1455     return QLIST_NEXT(vq, node);
1456 }
1457 
1458 int virtio_queue_get_num(VirtIODevice *vdev, int n)
1459 {
1460     return vdev->vq[n].vring.num;
1461 }
1462 
1463 int virtio_queue_get_max_num(VirtIODevice *vdev, int n)
1464 {
1465     return vdev->vq[n].vring.num_default;
1466 }
1467 
1468 int virtio_get_num_queues(VirtIODevice *vdev)
1469 {
1470     int i;
1471 
1472     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1473         if (!virtio_queue_get_num(vdev, i)) {
1474             break;
1475         }
1476     }
1477 
1478     return i;
1479 }
1480 
1481 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
1482 {
1483     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1484     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1485 
1486     /* virtio-1 compliant devices cannot change the alignment */
1487     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1488         error_report("tried to modify queue alignment for virtio-1 device");
1489         return;
1490     }
1491     /* Check that the transport told us it was going to do this
1492      * (so a buggy transport will immediately assert rather than
1493      * silently failing to migrate this state)
1494      */
1495     assert(k->has_variable_vring_alignment);
1496 
1497     vdev->vq[n].vring.align = align;
1498     virtio_queue_update_rings(vdev, n);
1499 }
1500 
1501 static bool virtio_queue_notify_aio_vq(VirtQueue *vq)
1502 {
1503     if (vq->vring.desc && vq->handle_aio_output) {
1504         VirtIODevice *vdev = vq->vdev;
1505 
1506         trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1507         return vq->handle_aio_output(vdev, vq);
1508     }
1509 
1510     return false;
1511 }
1512 
1513 static void virtio_queue_notify_vq(VirtQueue *vq)
1514 {
1515     if (vq->vring.desc && vq->handle_output) {
1516         VirtIODevice *vdev = vq->vdev;
1517 
1518         if (unlikely(vdev->broken)) {
1519             return;
1520         }
1521 
1522         trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1523         vq->handle_output(vdev, vq);
1524     }
1525 }
1526 
1527 void virtio_queue_notify(VirtIODevice *vdev, int n)
1528 {
1529     VirtQueue *vq = &vdev->vq[n];
1530 
1531     if (unlikely(!vq->vring.desc || vdev->broken)) {
1532         return;
1533     }
1534 
1535     trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1536     if (vq->handle_aio_output) {
1537         event_notifier_set(&vq->host_notifier);
1538     } else if (vq->handle_output) {
1539         vq->handle_output(vdev, vq);
1540     }
1541 }
1542 
1543 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
1544 {
1545     return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
1546         VIRTIO_NO_VECTOR;
1547 }
1548 
1549 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
1550 {
1551     VirtQueue *vq = &vdev->vq[n];
1552 
1553     if (n < VIRTIO_QUEUE_MAX) {
1554         if (vdev->vector_queues &&
1555             vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
1556             QLIST_REMOVE(vq, node);
1557         }
1558         vdev->vq[n].vector = vector;
1559         if (vdev->vector_queues &&
1560             vector != VIRTIO_NO_VECTOR) {
1561             QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
1562         }
1563     }
1564 }
1565 
1566 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
1567                             VirtIOHandleOutput handle_output)
1568 {
1569     int i;
1570 
1571     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1572         if (vdev->vq[i].vring.num == 0)
1573             break;
1574     }
1575 
1576     if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
1577         abort();
1578 
1579     vdev->vq[i].vring.num = queue_size;
1580     vdev->vq[i].vring.num_default = queue_size;
1581     vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
1582     vdev->vq[i].handle_output = handle_output;
1583     vdev->vq[i].handle_aio_output = NULL;
1584 
1585     return &vdev->vq[i];
1586 }
1587 
1588 void virtio_del_queue(VirtIODevice *vdev, int n)
1589 {
1590     if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
1591         abort();
1592     }
1593 
1594     vdev->vq[n].vring.num = 0;
1595     vdev->vq[n].vring.num_default = 0;
1596 }
1597 
1598 static void virtio_set_isr(VirtIODevice *vdev, int value)
1599 {
1600     uint8_t old = atomic_read(&vdev->isr);
1601 
1602     /* Do not write ISR if it does not change, so that its cacheline remains
1603      * shared in the common case where the guest does not read it.
1604      */
1605     if ((old & value) != value) {
1606         atomic_or(&vdev->isr, value);
1607     }
1608 }
1609 
1610 /* Called within rcu_read_lock().  */
1611 static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
1612 {
1613     uint16_t old, new;
1614     bool v;
1615     /* We need to expose used array entries before checking used event. */
1616     smp_mb();
1617     /* Always notify when queue is empty (when feature acknowledge) */
1618     if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1619         !vq->inuse && virtio_queue_empty(vq)) {
1620         return true;
1621     }
1622 
1623     if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
1624         return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
1625     }
1626 
1627     v = vq->signalled_used_valid;
1628     vq->signalled_used_valid = true;
1629     old = vq->signalled_used;
1630     new = vq->signalled_used = vq->used_idx;
1631     return !v || vring_need_event(vring_get_used_event(vq), new, old);
1632 }
1633 
1634 void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
1635 {
1636     bool should_notify;
1637     rcu_read_lock();
1638     should_notify = virtio_should_notify(vdev, vq);
1639     rcu_read_unlock();
1640 
1641     if (!should_notify) {
1642         return;
1643     }
1644 
1645     trace_virtio_notify_irqfd(vdev, vq);
1646 
1647     /*
1648      * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
1649      * windows drivers included in virtio-win 1.8.0 (circa 2015) are
1650      * incorrectly polling this bit during crashdump and hibernation
1651      * in MSI mode, causing a hang if this bit is never updated.
1652      * Recent releases of Windows do not really shut down, but rather
1653      * log out and hibernate to make the next startup faster.  Hence,
1654      * this manifested as a more serious hang during shutdown with
1655      *
1656      * Next driver release from 2016 fixed this problem, so working around it
1657      * is not a must, but it's easy to do so let's do it here.
1658      *
1659      * Note: it's safe to update ISR from any thread as it was switched
1660      * to an atomic operation.
1661      */
1662     virtio_set_isr(vq->vdev, 0x1);
1663     event_notifier_set(&vq->guest_notifier);
1664 }
1665 
1666 static void virtio_irq(VirtQueue *vq)
1667 {
1668     virtio_set_isr(vq->vdev, 0x1);
1669     virtio_notify_vector(vq->vdev, vq->vector);
1670 }
1671 
1672 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
1673 {
1674     bool should_notify;
1675     rcu_read_lock();
1676     should_notify = virtio_should_notify(vdev, vq);
1677     rcu_read_unlock();
1678 
1679     if (!should_notify) {
1680         return;
1681     }
1682 
1683     trace_virtio_notify(vdev, vq);
1684     virtio_irq(vq);
1685 }
1686 
1687 void virtio_notify_config(VirtIODevice *vdev)
1688 {
1689     if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
1690         return;
1691 
1692     virtio_set_isr(vdev, 0x3);
1693     vdev->generation++;
1694     virtio_notify_vector(vdev, vdev->config_vector);
1695 }
1696 
1697 static bool virtio_device_endian_needed(void *opaque)
1698 {
1699     VirtIODevice *vdev = opaque;
1700 
1701     assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
1702     if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1703         return vdev->device_endian != virtio_default_endian();
1704     }
1705     /* Devices conforming to VIRTIO 1.0 or later are always LE. */
1706     return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
1707 }
1708 
1709 static bool virtio_64bit_features_needed(void *opaque)
1710 {
1711     VirtIODevice *vdev = opaque;
1712 
1713     return (vdev->host_features >> 32) != 0;
1714 }
1715 
1716 static bool virtio_virtqueue_needed(void *opaque)
1717 {
1718     VirtIODevice *vdev = opaque;
1719 
1720     return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
1721 }
1722 
1723 static bool virtio_ringsize_needed(void *opaque)
1724 {
1725     VirtIODevice *vdev = opaque;
1726     int i;
1727 
1728     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1729         if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
1730             return true;
1731         }
1732     }
1733     return false;
1734 }
1735 
1736 static bool virtio_extra_state_needed(void *opaque)
1737 {
1738     VirtIODevice *vdev = opaque;
1739     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1740     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1741 
1742     return k->has_extra_state &&
1743         k->has_extra_state(qbus->parent);
1744 }
1745 
1746 static bool virtio_broken_needed(void *opaque)
1747 {
1748     VirtIODevice *vdev = opaque;
1749 
1750     return vdev->broken;
1751 }
1752 
1753 static const VMStateDescription vmstate_virtqueue = {
1754     .name = "virtqueue_state",
1755     .version_id = 1,
1756     .minimum_version_id = 1,
1757     .fields = (VMStateField[]) {
1758         VMSTATE_UINT64(vring.avail, struct VirtQueue),
1759         VMSTATE_UINT64(vring.used, struct VirtQueue),
1760         VMSTATE_END_OF_LIST()
1761     }
1762 };
1763 
1764 static const VMStateDescription vmstate_virtio_virtqueues = {
1765     .name = "virtio/virtqueues",
1766     .version_id = 1,
1767     .minimum_version_id = 1,
1768     .needed = &virtio_virtqueue_needed,
1769     .fields = (VMStateField[]) {
1770         VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
1771                       VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
1772         VMSTATE_END_OF_LIST()
1773     }
1774 };
1775 
1776 static const VMStateDescription vmstate_ringsize = {
1777     .name = "ringsize_state",
1778     .version_id = 1,
1779     .minimum_version_id = 1,
1780     .fields = (VMStateField[]) {
1781         VMSTATE_UINT32(vring.num_default, struct VirtQueue),
1782         VMSTATE_END_OF_LIST()
1783     }
1784 };
1785 
1786 static const VMStateDescription vmstate_virtio_ringsize = {
1787     .name = "virtio/ringsize",
1788     .version_id = 1,
1789     .minimum_version_id = 1,
1790     .needed = &virtio_ringsize_needed,
1791     .fields = (VMStateField[]) {
1792         VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
1793                       VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
1794         VMSTATE_END_OF_LIST()
1795     }
1796 };
1797 
1798 static int get_extra_state(QEMUFile *f, void *pv, size_t size,
1799                            VMStateField *field)
1800 {
1801     VirtIODevice *vdev = pv;
1802     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1803     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1804 
1805     if (!k->load_extra_state) {
1806         return -1;
1807     } else {
1808         return k->load_extra_state(qbus->parent, f);
1809     }
1810 }
1811 
1812 static int put_extra_state(QEMUFile *f, void *pv, size_t size,
1813                            VMStateField *field, QJSON *vmdesc)
1814 {
1815     VirtIODevice *vdev = pv;
1816     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1817     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1818 
1819     k->save_extra_state(qbus->parent, f);
1820     return 0;
1821 }
1822 
1823 static const VMStateInfo vmstate_info_extra_state = {
1824     .name = "virtqueue_extra_state",
1825     .get = get_extra_state,
1826     .put = put_extra_state,
1827 };
1828 
1829 static const VMStateDescription vmstate_virtio_extra_state = {
1830     .name = "virtio/extra_state",
1831     .version_id = 1,
1832     .minimum_version_id = 1,
1833     .needed = &virtio_extra_state_needed,
1834     .fields = (VMStateField[]) {
1835         {
1836             .name         = "extra_state",
1837             .version_id   = 0,
1838             .field_exists = NULL,
1839             .size         = 0,
1840             .info         = &vmstate_info_extra_state,
1841             .flags        = VMS_SINGLE,
1842             .offset       = 0,
1843         },
1844         VMSTATE_END_OF_LIST()
1845     }
1846 };
1847 
1848 static const VMStateDescription vmstate_virtio_device_endian = {
1849     .name = "virtio/device_endian",
1850     .version_id = 1,
1851     .minimum_version_id = 1,
1852     .needed = &virtio_device_endian_needed,
1853     .fields = (VMStateField[]) {
1854         VMSTATE_UINT8(device_endian, VirtIODevice),
1855         VMSTATE_END_OF_LIST()
1856     }
1857 };
1858 
1859 static const VMStateDescription vmstate_virtio_64bit_features = {
1860     .name = "virtio/64bit_features",
1861     .version_id = 1,
1862     .minimum_version_id = 1,
1863     .needed = &virtio_64bit_features_needed,
1864     .fields = (VMStateField[]) {
1865         VMSTATE_UINT64(guest_features, VirtIODevice),
1866         VMSTATE_END_OF_LIST()
1867     }
1868 };
1869 
1870 static const VMStateDescription vmstate_virtio_broken = {
1871     .name = "virtio/broken",
1872     .version_id = 1,
1873     .minimum_version_id = 1,
1874     .needed = &virtio_broken_needed,
1875     .fields = (VMStateField[]) {
1876         VMSTATE_BOOL(broken, VirtIODevice),
1877         VMSTATE_END_OF_LIST()
1878     }
1879 };
1880 
1881 static const VMStateDescription vmstate_virtio = {
1882     .name = "virtio",
1883     .version_id = 1,
1884     .minimum_version_id = 1,
1885     .minimum_version_id_old = 1,
1886     .fields = (VMStateField[]) {
1887         VMSTATE_END_OF_LIST()
1888     },
1889     .subsections = (const VMStateDescription*[]) {
1890         &vmstate_virtio_device_endian,
1891         &vmstate_virtio_64bit_features,
1892         &vmstate_virtio_virtqueues,
1893         &vmstate_virtio_ringsize,
1894         &vmstate_virtio_broken,
1895         &vmstate_virtio_extra_state,
1896         NULL
1897     }
1898 };
1899 
1900 int virtio_save(VirtIODevice *vdev, QEMUFile *f)
1901 {
1902     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1903     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1904     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
1905     uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
1906     int i;
1907 
1908     if (k->save_config) {
1909         k->save_config(qbus->parent, f);
1910     }
1911 
1912     qemu_put_8s(f, &vdev->status);
1913     qemu_put_8s(f, &vdev->isr);
1914     qemu_put_be16s(f, &vdev->queue_sel);
1915     qemu_put_be32s(f, &guest_features_lo);
1916     qemu_put_be32(f, vdev->config_len);
1917     qemu_put_buffer(f, vdev->config, vdev->config_len);
1918 
1919     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1920         if (vdev->vq[i].vring.num == 0)
1921             break;
1922     }
1923 
1924     qemu_put_be32(f, i);
1925 
1926     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1927         if (vdev->vq[i].vring.num == 0)
1928             break;
1929 
1930         qemu_put_be32(f, vdev->vq[i].vring.num);
1931         if (k->has_variable_vring_alignment) {
1932             qemu_put_be32(f, vdev->vq[i].vring.align);
1933         }
1934         /*
1935          * Save desc now, the rest of the ring addresses are saved in
1936          * subsections for VIRTIO-1 devices.
1937          */
1938         qemu_put_be64(f, vdev->vq[i].vring.desc);
1939         qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
1940         if (k->save_queue) {
1941             k->save_queue(qbus->parent, i, f);
1942         }
1943     }
1944 
1945     if (vdc->save != NULL) {
1946         vdc->save(vdev, f);
1947     }
1948 
1949     if (vdc->vmsd) {
1950         int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL);
1951         if (ret) {
1952             return ret;
1953         }
1954     }
1955 
1956     /* Subsections */
1957     return vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
1958 }
1959 
1960 /* A wrapper for use as a VMState .put function */
1961 static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
1962                               VMStateField *field, QJSON *vmdesc)
1963 {
1964     return virtio_save(VIRTIO_DEVICE(opaque), f);
1965 }
1966 
1967 /* A wrapper for use as a VMState .get function */
1968 static int virtio_device_get(QEMUFile *f, void *opaque, size_t size,
1969                              VMStateField *field)
1970 {
1971     VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
1972     DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
1973 
1974     return virtio_load(vdev, f, dc->vmsd->version_id);
1975 }
1976 
1977 const VMStateInfo  virtio_vmstate_info = {
1978     .name = "virtio",
1979     .get = virtio_device_get,
1980     .put = virtio_device_put,
1981 };
1982 
1983 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
1984 {
1985     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1986     bool bad = (val & ~(vdev->host_features)) != 0;
1987 
1988     val &= vdev->host_features;
1989     if (k->set_features) {
1990         k->set_features(vdev, val);
1991     }
1992     vdev->guest_features = val;
1993     return bad ? -1 : 0;
1994 }
1995 
1996 int virtio_set_features(VirtIODevice *vdev, uint64_t val)
1997 {
1998    /*
1999      * The driver must not attempt to set features after feature negotiation
2000      * has finished.
2001      */
2002     if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
2003         return -EINVAL;
2004     }
2005     return virtio_set_features_nocheck(vdev, val);
2006 }
2007 
2008 int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
2009 {
2010     int i, ret;
2011     int32_t config_len;
2012     uint32_t num;
2013     uint32_t features;
2014     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2015     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2016     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
2017 
2018     /*
2019      * We poison the endianness to ensure it does not get used before
2020      * subsections have been loaded.
2021      */
2022     vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
2023 
2024     if (k->load_config) {
2025         ret = k->load_config(qbus->parent, f);
2026         if (ret)
2027             return ret;
2028     }
2029 
2030     qemu_get_8s(f, &vdev->status);
2031     qemu_get_8s(f, &vdev->isr);
2032     qemu_get_be16s(f, &vdev->queue_sel);
2033     if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
2034         return -1;
2035     }
2036     qemu_get_be32s(f, &features);
2037 
2038     /*
2039      * Temporarily set guest_features low bits - needed by
2040      * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
2041      * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
2042      *
2043      * Note: devices should always test host features in future - don't create
2044      * new dependencies like this.
2045      */
2046     vdev->guest_features = features;
2047 
2048     config_len = qemu_get_be32(f);
2049 
2050     /*
2051      * There are cases where the incoming config can be bigger or smaller
2052      * than what we have; so load what we have space for, and skip
2053      * any excess that's in the stream.
2054      */
2055     qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
2056 
2057     while (config_len > vdev->config_len) {
2058         qemu_get_byte(f);
2059         config_len--;
2060     }
2061 
2062     num = qemu_get_be32(f);
2063 
2064     if (num > VIRTIO_QUEUE_MAX) {
2065         error_report("Invalid number of virtqueues: 0x%x", num);
2066         return -1;
2067     }
2068 
2069     for (i = 0; i < num; i++) {
2070         vdev->vq[i].vring.num = qemu_get_be32(f);
2071         if (k->has_variable_vring_alignment) {
2072             vdev->vq[i].vring.align = qemu_get_be32(f);
2073         }
2074         vdev->vq[i].vring.desc = qemu_get_be64(f);
2075         qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
2076         vdev->vq[i].signalled_used_valid = false;
2077         vdev->vq[i].notification = true;
2078 
2079         if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) {
2080             error_report("VQ %d address 0x0 "
2081                          "inconsistent with Host index 0x%x",
2082                          i, vdev->vq[i].last_avail_idx);
2083             return -1;
2084         }
2085         if (k->load_queue) {
2086             ret = k->load_queue(qbus->parent, i, f);
2087             if (ret)
2088                 return ret;
2089         }
2090     }
2091 
2092     virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
2093 
2094     if (vdc->load != NULL) {
2095         ret = vdc->load(vdev, f, version_id);
2096         if (ret) {
2097             return ret;
2098         }
2099     }
2100 
2101     if (vdc->vmsd) {
2102         ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
2103         if (ret) {
2104             return ret;
2105         }
2106     }
2107 
2108     /* Subsections */
2109     ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
2110     if (ret) {
2111         return ret;
2112     }
2113 
2114     if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
2115         vdev->device_endian = virtio_default_endian();
2116     }
2117 
2118     if (virtio_64bit_features_needed(vdev)) {
2119         /*
2120          * Subsection load filled vdev->guest_features.  Run them
2121          * through virtio_set_features to sanity-check them against
2122          * host_features.
2123          */
2124         uint64_t features64 = vdev->guest_features;
2125         if (virtio_set_features_nocheck(vdev, features64) < 0) {
2126             error_report("Features 0x%" PRIx64 " unsupported. "
2127                          "Allowed features: 0x%" PRIx64,
2128                          features64, vdev->host_features);
2129             return -1;
2130         }
2131     } else {
2132         if (virtio_set_features_nocheck(vdev, features) < 0) {
2133             error_report("Features 0x%x unsupported. "
2134                          "Allowed features: 0x%" PRIx64,
2135                          features, vdev->host_features);
2136             return -1;
2137         }
2138     }
2139 
2140     rcu_read_lock();
2141     for (i = 0; i < num; i++) {
2142         if (vdev->vq[i].vring.desc) {
2143             uint16_t nheads;
2144 
2145             /*
2146              * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
2147              * only the region cache needs to be set up.  Legacy devices need
2148              * to calculate used and avail ring addresses based on the desc
2149              * address.
2150              */
2151             if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2152                 virtio_init_region_cache(vdev, i);
2153             } else {
2154                 virtio_queue_update_rings(vdev, i);
2155             }
2156 
2157             nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
2158             /* Check it isn't doing strange things with descriptor numbers. */
2159             if (nheads > vdev->vq[i].vring.num) {
2160                 error_report("VQ %d size 0x%x Guest index 0x%x "
2161                              "inconsistent with Host index 0x%x: delta 0x%x",
2162                              i, vdev->vq[i].vring.num,
2163                              vring_avail_idx(&vdev->vq[i]),
2164                              vdev->vq[i].last_avail_idx, nheads);
2165                 return -1;
2166             }
2167             vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
2168             vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
2169 
2170             /*
2171              * Some devices migrate VirtQueueElements that have been popped
2172              * from the avail ring but not yet returned to the used ring.
2173              * Since max ring size < UINT16_MAX it's safe to use modulo
2174              * UINT16_MAX + 1 subtraction.
2175              */
2176             vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
2177                                 vdev->vq[i].used_idx);
2178             if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
2179                 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
2180                              "used_idx 0x%x",
2181                              i, vdev->vq[i].vring.num,
2182                              vdev->vq[i].last_avail_idx,
2183                              vdev->vq[i].used_idx);
2184                 return -1;
2185             }
2186         }
2187     }
2188     rcu_read_unlock();
2189 
2190     return 0;
2191 }
2192 
2193 void virtio_cleanup(VirtIODevice *vdev)
2194 {
2195     qemu_del_vm_change_state_handler(vdev->vmstate);
2196 }
2197 
2198 static void virtio_vmstate_change(void *opaque, int running, RunState state)
2199 {
2200     VirtIODevice *vdev = opaque;
2201     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2202     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2203     bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK);
2204     vdev->vm_running = running;
2205 
2206     if (backend_run) {
2207         virtio_set_status(vdev, vdev->status);
2208     }
2209 
2210     if (k->vmstate_change) {
2211         k->vmstate_change(qbus->parent, backend_run);
2212     }
2213 
2214     if (!backend_run) {
2215         virtio_set_status(vdev, vdev->status);
2216     }
2217 }
2218 
2219 void virtio_instance_init_common(Object *proxy_obj, void *data,
2220                                  size_t vdev_size, const char *vdev_name)
2221 {
2222     DeviceState *vdev = data;
2223 
2224     object_initialize(vdev, vdev_size, vdev_name);
2225     object_property_add_child(proxy_obj, "virtio-backend", OBJECT(vdev), NULL);
2226     object_unref(OBJECT(vdev));
2227     qdev_alias_all_properties(vdev, proxy_obj);
2228 }
2229 
2230 void virtio_init(VirtIODevice *vdev, const char *name,
2231                  uint16_t device_id, size_t config_size)
2232 {
2233     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2234     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2235     int i;
2236     int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
2237 
2238     if (nvectors) {
2239         vdev->vector_queues =
2240             g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
2241     }
2242 
2243     vdev->device_id = device_id;
2244     vdev->status = 0;
2245     atomic_set(&vdev->isr, 0);
2246     vdev->queue_sel = 0;
2247     vdev->config_vector = VIRTIO_NO_VECTOR;
2248     vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
2249     vdev->vm_running = runstate_is_running();
2250     vdev->broken = false;
2251     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2252         vdev->vq[i].vector = VIRTIO_NO_VECTOR;
2253         vdev->vq[i].vdev = vdev;
2254         vdev->vq[i].queue_index = i;
2255     }
2256 
2257     vdev->name = name;
2258     vdev->config_len = config_size;
2259     if (vdev->config_len) {
2260         vdev->config = g_malloc0(config_size);
2261     } else {
2262         vdev->config = NULL;
2263     }
2264     vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
2265                                                      vdev);
2266     vdev->device_endian = virtio_default_endian();
2267     vdev->use_guest_notifier_mask = true;
2268 }
2269 
2270 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
2271 {
2272     return vdev->vq[n].vring.desc;
2273 }
2274 
2275 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
2276 {
2277     return vdev->vq[n].vring.avail;
2278 }
2279 
2280 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
2281 {
2282     return vdev->vq[n].vring.used;
2283 }
2284 
2285 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
2286 {
2287     return sizeof(VRingDesc) * vdev->vq[n].vring.num;
2288 }
2289 
2290 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
2291 {
2292     return offsetof(VRingAvail, ring) +
2293         sizeof(uint16_t) * vdev->vq[n].vring.num;
2294 }
2295 
2296 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
2297 {
2298     return offsetof(VRingUsed, ring) +
2299         sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
2300 }
2301 
2302 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
2303 {
2304     return vdev->vq[n].last_avail_idx;
2305 }
2306 
2307 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
2308 {
2309     vdev->vq[n].last_avail_idx = idx;
2310     vdev->vq[n].shadow_avail_idx = idx;
2311 }
2312 
2313 void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
2314 {
2315     rcu_read_lock();
2316     if (vdev->vq[n].vring.desc) {
2317         vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
2318     }
2319     rcu_read_unlock();
2320 }
2321 
2322 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
2323 {
2324     vdev->vq[n].signalled_used_valid = false;
2325 }
2326 
2327 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
2328 {
2329     return vdev->vq + n;
2330 }
2331 
2332 uint16_t virtio_get_queue_index(VirtQueue *vq)
2333 {
2334     return vq->queue_index;
2335 }
2336 
2337 static void virtio_queue_guest_notifier_read(EventNotifier *n)
2338 {
2339     VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
2340     if (event_notifier_test_and_clear(n)) {
2341         virtio_irq(vq);
2342     }
2343 }
2344 
2345 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
2346                                                 bool with_irqfd)
2347 {
2348     if (assign && !with_irqfd) {
2349         event_notifier_set_handler(&vq->guest_notifier,
2350                                    virtio_queue_guest_notifier_read);
2351     } else {
2352         event_notifier_set_handler(&vq->guest_notifier, NULL);
2353     }
2354     if (!assign) {
2355         /* Test and clear notifier before closing it,
2356          * in case poll callback didn't have time to run. */
2357         virtio_queue_guest_notifier_read(&vq->guest_notifier);
2358     }
2359 }
2360 
2361 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
2362 {
2363     return &vq->guest_notifier;
2364 }
2365 
2366 static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
2367 {
2368     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2369     if (event_notifier_test_and_clear(n)) {
2370         virtio_queue_notify_aio_vq(vq);
2371     }
2372 }
2373 
2374 static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
2375 {
2376     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2377 
2378     virtio_queue_set_notification(vq, 0);
2379 }
2380 
2381 static bool virtio_queue_host_notifier_aio_poll(void *opaque)
2382 {
2383     EventNotifier *n = opaque;
2384     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2385     bool progress;
2386 
2387     if (!vq->vring.desc || virtio_queue_empty(vq)) {
2388         return false;
2389     }
2390 
2391     progress = virtio_queue_notify_aio_vq(vq);
2392 
2393     /* In case the handler function re-enabled notifications */
2394     virtio_queue_set_notification(vq, 0);
2395     return progress;
2396 }
2397 
2398 static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
2399 {
2400     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2401 
2402     /* Caller polls once more after this to catch requests that race with us */
2403     virtio_queue_set_notification(vq, 1);
2404 }
2405 
2406 void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
2407                                                 VirtIOHandleAIOOutput handle_output)
2408 {
2409     if (handle_output) {
2410         vq->handle_aio_output = handle_output;
2411         aio_set_event_notifier(ctx, &vq->host_notifier, true,
2412                                virtio_queue_host_notifier_aio_read,
2413                                virtio_queue_host_notifier_aio_poll);
2414         aio_set_event_notifier_poll(ctx, &vq->host_notifier,
2415                                     virtio_queue_host_notifier_aio_poll_begin,
2416                                     virtio_queue_host_notifier_aio_poll_end);
2417     } else {
2418         aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL);
2419         /* Test and clear notifier before after disabling event,
2420          * in case poll callback didn't have time to run. */
2421         virtio_queue_host_notifier_aio_read(&vq->host_notifier);
2422         vq->handle_aio_output = NULL;
2423     }
2424 }
2425 
2426 void virtio_queue_host_notifier_read(EventNotifier *n)
2427 {
2428     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2429     if (event_notifier_test_and_clear(n)) {
2430         virtio_queue_notify_vq(vq);
2431     }
2432 }
2433 
2434 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
2435 {
2436     return &vq->host_notifier;
2437 }
2438 
2439 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
2440 {
2441     g_free(vdev->bus_name);
2442     vdev->bus_name = g_strdup(bus_name);
2443 }
2444 
2445 void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
2446 {
2447     va_list ap;
2448 
2449     va_start(ap, fmt);
2450     error_vreport(fmt, ap);
2451     va_end(ap);
2452 
2453     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2454         virtio_set_status(vdev, vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET);
2455         virtio_notify_config(vdev);
2456     }
2457 
2458     vdev->broken = true;
2459 }
2460 
2461 static void virtio_memory_listener_commit(MemoryListener *listener)
2462 {
2463     VirtIODevice *vdev = container_of(listener, VirtIODevice, listener);
2464     int i;
2465 
2466     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2467         if (vdev->vq[i].vring.num == 0) {
2468             break;
2469         }
2470         virtio_init_region_cache(vdev, i);
2471     }
2472 }
2473 
2474 static void virtio_device_realize(DeviceState *dev, Error **errp)
2475 {
2476     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2477     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
2478     Error *err = NULL;
2479 
2480     /* Devices should either use vmsd or the load/save methods */
2481     assert(!vdc->vmsd || !vdc->load);
2482 
2483     if (vdc->realize != NULL) {
2484         vdc->realize(dev, &err);
2485         if (err != NULL) {
2486             error_propagate(errp, err);
2487             return;
2488         }
2489     }
2490 
2491     virtio_bus_device_plugged(vdev, &err);
2492     if (err != NULL) {
2493         error_propagate(errp, err);
2494         return;
2495     }
2496 
2497     vdev->listener.commit = virtio_memory_listener_commit;
2498     memory_listener_register(&vdev->listener, vdev->dma_as);
2499 }
2500 
2501 static void virtio_device_unrealize(DeviceState *dev, Error **errp)
2502 {
2503     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2504     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
2505     Error *err = NULL;
2506 
2507     virtio_bus_device_unplugged(vdev);
2508 
2509     if (vdc->unrealize != NULL) {
2510         vdc->unrealize(dev, &err);
2511         if (err != NULL) {
2512             error_propagate(errp, err);
2513             return;
2514         }
2515     }
2516 
2517     g_free(vdev->bus_name);
2518     vdev->bus_name = NULL;
2519 }
2520 
2521 static void virtio_device_free_virtqueues(VirtIODevice *vdev)
2522 {
2523     int i;
2524     if (!vdev->vq) {
2525         return;
2526     }
2527 
2528     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2529         if (vdev->vq[i].vring.num == 0) {
2530             break;
2531         }
2532         virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
2533     }
2534     g_free(vdev->vq);
2535 }
2536 
2537 static void virtio_device_instance_finalize(Object *obj)
2538 {
2539     VirtIODevice *vdev = VIRTIO_DEVICE(obj);
2540 
2541     memory_listener_unregister(&vdev->listener);
2542     virtio_device_free_virtqueues(vdev);
2543 
2544     g_free(vdev->config);
2545     g_free(vdev->vector_queues);
2546 }
2547 
2548 static Property virtio_properties[] = {
2549     DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
2550     DEFINE_PROP_END_OF_LIST(),
2551 };
2552 
2553 static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
2554 {
2555     VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
2556     int n, r, err;
2557 
2558     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2559         VirtQueue *vq = &vdev->vq[n];
2560         if (!virtio_queue_get_num(vdev, n)) {
2561             continue;
2562         }
2563         r = virtio_bus_set_host_notifier(qbus, n, true);
2564         if (r < 0) {
2565             err = r;
2566             goto assign_error;
2567         }
2568         event_notifier_set_handler(&vq->host_notifier,
2569                                    virtio_queue_host_notifier_read);
2570     }
2571 
2572     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2573         /* Kick right away to begin processing requests already in vring */
2574         VirtQueue *vq = &vdev->vq[n];
2575         if (!vq->vring.num) {
2576             continue;
2577         }
2578         event_notifier_set(&vq->host_notifier);
2579     }
2580     return 0;
2581 
2582 assign_error:
2583     while (--n >= 0) {
2584         VirtQueue *vq = &vdev->vq[n];
2585         if (!virtio_queue_get_num(vdev, n)) {
2586             continue;
2587         }
2588 
2589         event_notifier_set_handler(&vq->host_notifier, NULL);
2590         r = virtio_bus_set_host_notifier(qbus, n, false);
2591         assert(r >= 0);
2592     }
2593     return err;
2594 }
2595 
2596 int virtio_device_start_ioeventfd(VirtIODevice *vdev)
2597 {
2598     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2599     VirtioBusState *vbus = VIRTIO_BUS(qbus);
2600 
2601     return virtio_bus_start_ioeventfd(vbus);
2602 }
2603 
2604 static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
2605 {
2606     VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
2607     int n, r;
2608 
2609     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2610         VirtQueue *vq = &vdev->vq[n];
2611 
2612         if (!virtio_queue_get_num(vdev, n)) {
2613             continue;
2614         }
2615         event_notifier_set_handler(&vq->host_notifier, NULL);
2616         r = virtio_bus_set_host_notifier(qbus, n, false);
2617         assert(r >= 0);
2618     }
2619 }
2620 
2621 void virtio_device_stop_ioeventfd(VirtIODevice *vdev)
2622 {
2623     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2624     VirtioBusState *vbus = VIRTIO_BUS(qbus);
2625 
2626     virtio_bus_stop_ioeventfd(vbus);
2627 }
2628 
2629 int virtio_device_grab_ioeventfd(VirtIODevice *vdev)
2630 {
2631     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2632     VirtioBusState *vbus = VIRTIO_BUS(qbus);
2633 
2634     return virtio_bus_grab_ioeventfd(vbus);
2635 }
2636 
2637 void virtio_device_release_ioeventfd(VirtIODevice *vdev)
2638 {
2639     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2640     VirtioBusState *vbus = VIRTIO_BUS(qbus);
2641 
2642     virtio_bus_release_ioeventfd(vbus);
2643 }
2644 
2645 static void virtio_device_class_init(ObjectClass *klass, void *data)
2646 {
2647     /* Set the default value here. */
2648     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
2649     DeviceClass *dc = DEVICE_CLASS(klass);
2650 
2651     dc->realize = virtio_device_realize;
2652     dc->unrealize = virtio_device_unrealize;
2653     dc->bus_type = TYPE_VIRTIO_BUS;
2654     dc->props = virtio_properties;
2655     vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
2656     vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
2657 
2658     vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
2659 }
2660 
2661 bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
2662 {
2663     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2664     VirtioBusState *vbus = VIRTIO_BUS(qbus);
2665 
2666     return virtio_bus_ioeventfd_enabled(vbus);
2667 }
2668 
2669 static const TypeInfo virtio_device_info = {
2670     .name = TYPE_VIRTIO_DEVICE,
2671     .parent = TYPE_DEVICE,
2672     .instance_size = sizeof(VirtIODevice),
2673     .class_init = virtio_device_class_init,
2674     .instance_finalize = virtio_device_instance_finalize,
2675     .abstract = true,
2676     .class_size = sizeof(VirtioDeviceClass),
2677 };
2678 
2679 static void virtio_register_types(void)
2680 {
2681     type_register_static(&virtio_device_info);
2682 }
2683 
2684 type_init(virtio_register_types)
2685