xref: /openbmc/qemu/hw/virtio/virtio.c (revision 6016b7b4)
1 /*
2  * Virtio Support
3  *
4  * Copyright IBM, Corp. 2007
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "cpu.h"
17 #include "trace.h"
18 #include "qemu/error-report.h"
19 #include "qemu/log.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/module.h"
22 #include "hw/virtio/virtio.h"
23 #include "migration/qemu-file-types.h"
24 #include "qemu/atomic.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "hw/qdev-properties.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "sysemu/dma.h"
29 #include "sysemu/runstate.h"
30 #include "standard-headers/linux/virtio_ids.h"
31 
32 /*
33  * The alignment to use between consumer and producer parts of vring.
34  * x86 pagesize again. This is the default, used by transports like PCI
35  * which don't provide a means for the guest to tell the host the alignment.
36  */
37 #define VIRTIO_PCI_VRING_ALIGN         4096
38 
39 typedef struct VRingDesc
40 {
41     uint64_t addr;
42     uint32_t len;
43     uint16_t flags;
44     uint16_t next;
45 } VRingDesc;
46 
47 typedef struct VRingPackedDesc {
48     uint64_t addr;
49     uint32_t len;
50     uint16_t id;
51     uint16_t flags;
52 } VRingPackedDesc;
53 
54 typedef struct VRingAvail
55 {
56     uint16_t flags;
57     uint16_t idx;
58     uint16_t ring[];
59 } VRingAvail;
60 
61 typedef struct VRingUsedElem
62 {
63     uint32_t id;
64     uint32_t len;
65 } VRingUsedElem;
66 
67 typedef struct VRingUsed
68 {
69     uint16_t flags;
70     uint16_t idx;
71     VRingUsedElem ring[];
72 } VRingUsed;
73 
74 typedef struct VRingMemoryRegionCaches {
75     struct rcu_head rcu;
76     MemoryRegionCache desc;
77     MemoryRegionCache avail;
78     MemoryRegionCache used;
79 } VRingMemoryRegionCaches;
80 
81 typedef struct VRing
82 {
83     unsigned int num;
84     unsigned int num_default;
85     unsigned int align;
86     hwaddr desc;
87     hwaddr avail;
88     hwaddr used;
89     VRingMemoryRegionCaches *caches;
90 } VRing;
91 
92 typedef struct VRingPackedDescEvent {
93     uint16_t off_wrap;
94     uint16_t flags;
95 } VRingPackedDescEvent ;
96 
97 struct VirtQueue
98 {
99     VRing vring;
100     VirtQueueElement *used_elems;
101 
102     /* Next head to pop */
103     uint16_t last_avail_idx;
104     bool last_avail_wrap_counter;
105 
106     /* Last avail_idx read from VQ. */
107     uint16_t shadow_avail_idx;
108     bool shadow_avail_wrap_counter;
109 
110     uint16_t used_idx;
111     bool used_wrap_counter;
112 
113     /* Last used index value we have signalled on */
114     uint16_t signalled_used;
115 
116     /* Last used index value we have signalled on */
117     bool signalled_used_valid;
118 
119     /* Notification enabled? */
120     bool notification;
121 
122     uint16_t queue_index;
123 
124     unsigned int inuse;
125 
126     uint16_t vector;
127     VirtIOHandleOutput handle_output;
128     VirtIOHandleAIOOutput handle_aio_output;
129     VirtIODevice *vdev;
130     EventNotifier guest_notifier;
131     EventNotifier host_notifier;
132     bool host_notifier_enabled;
133     QLIST_ENTRY(VirtQueue) node;
134 };
135 
136 /* Called within call_rcu().  */
137 static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
138 {
139     assert(caches != NULL);
140     address_space_cache_destroy(&caches->desc);
141     address_space_cache_destroy(&caches->avail);
142     address_space_cache_destroy(&caches->used);
143     g_free(caches);
144 }
145 
146 static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
147 {
148     VRingMemoryRegionCaches *caches;
149 
150     caches = qatomic_read(&vq->vring.caches);
151     qatomic_rcu_set(&vq->vring.caches, NULL);
152     if (caches) {
153         call_rcu(caches, virtio_free_region_cache, rcu);
154     }
155 }
156 
157 static void virtio_init_region_cache(VirtIODevice *vdev, int n)
158 {
159     VirtQueue *vq = &vdev->vq[n];
160     VRingMemoryRegionCaches *old = vq->vring.caches;
161     VRingMemoryRegionCaches *new = NULL;
162     hwaddr addr, size;
163     int64_t len;
164     bool packed;
165 
166 
167     addr = vq->vring.desc;
168     if (!addr) {
169         goto out_no_cache;
170     }
171     new = g_new0(VRingMemoryRegionCaches, 1);
172     size = virtio_queue_get_desc_size(vdev, n);
173     packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
174                                    true : false;
175     len = address_space_cache_init(&new->desc, vdev->dma_as,
176                                    addr, size, packed);
177     if (len < size) {
178         virtio_error(vdev, "Cannot map desc");
179         goto err_desc;
180     }
181 
182     size = virtio_queue_get_used_size(vdev, n);
183     len = address_space_cache_init(&new->used, vdev->dma_as,
184                                    vq->vring.used, size, true);
185     if (len < size) {
186         virtio_error(vdev, "Cannot map used");
187         goto err_used;
188     }
189 
190     size = virtio_queue_get_avail_size(vdev, n);
191     len = address_space_cache_init(&new->avail, vdev->dma_as,
192                                    vq->vring.avail, size, false);
193     if (len < size) {
194         virtio_error(vdev, "Cannot map avail");
195         goto err_avail;
196     }
197 
198     qatomic_rcu_set(&vq->vring.caches, new);
199     if (old) {
200         call_rcu(old, virtio_free_region_cache, rcu);
201     }
202     return;
203 
204 err_avail:
205     address_space_cache_destroy(&new->avail);
206 err_used:
207     address_space_cache_destroy(&new->used);
208 err_desc:
209     address_space_cache_destroy(&new->desc);
210 out_no_cache:
211     g_free(new);
212     virtio_virtqueue_reset_region_cache(vq);
213 }
214 
215 /* virt queue functions */
216 void virtio_queue_update_rings(VirtIODevice *vdev, int n)
217 {
218     VRing *vring = &vdev->vq[n].vring;
219 
220     if (!vring->num || !vring->desc || !vring->align) {
221         /* not yet setup -> nothing to do */
222         return;
223     }
224     vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
225     vring->used = vring_align(vring->avail +
226                               offsetof(VRingAvail, ring[vring->num]),
227                               vring->align);
228     virtio_init_region_cache(vdev, n);
229 }
230 
231 /* Called within rcu_read_lock().  */
232 static void vring_split_desc_read(VirtIODevice *vdev, VRingDesc *desc,
233                                   MemoryRegionCache *cache, int i)
234 {
235     address_space_read_cached(cache, i * sizeof(VRingDesc),
236                               desc, sizeof(VRingDesc));
237     virtio_tswap64s(vdev, &desc->addr);
238     virtio_tswap32s(vdev, &desc->len);
239     virtio_tswap16s(vdev, &desc->flags);
240     virtio_tswap16s(vdev, &desc->next);
241 }
242 
243 static void vring_packed_event_read(VirtIODevice *vdev,
244                                     MemoryRegionCache *cache,
245                                     VRingPackedDescEvent *e)
246 {
247     hwaddr off_off = offsetof(VRingPackedDescEvent, off_wrap);
248     hwaddr off_flags = offsetof(VRingPackedDescEvent, flags);
249 
250     e->flags = virtio_lduw_phys_cached(vdev, cache, off_flags);
251     /* Make sure flags is seen before off_wrap */
252     smp_rmb();
253     e->off_wrap = virtio_lduw_phys_cached(vdev, cache, off_off);
254     virtio_tswap16s(vdev, &e->flags);
255 }
256 
257 static void vring_packed_off_wrap_write(VirtIODevice *vdev,
258                                         MemoryRegionCache *cache,
259                                         uint16_t off_wrap)
260 {
261     hwaddr off = offsetof(VRingPackedDescEvent, off_wrap);
262 
263     virtio_stw_phys_cached(vdev, cache, off, off_wrap);
264     address_space_cache_invalidate(cache, off, sizeof(off_wrap));
265 }
266 
267 static void vring_packed_flags_write(VirtIODevice *vdev,
268                                      MemoryRegionCache *cache, uint16_t flags)
269 {
270     hwaddr off = offsetof(VRingPackedDescEvent, flags);
271 
272     virtio_stw_phys_cached(vdev, cache, off, flags);
273     address_space_cache_invalidate(cache, off, sizeof(flags));
274 }
275 
276 /* Called within rcu_read_lock().  */
277 static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
278 {
279     return qatomic_rcu_read(&vq->vring.caches);
280 }
281 
282 /* Called within rcu_read_lock().  */
283 static inline uint16_t vring_avail_flags(VirtQueue *vq)
284 {
285     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
286     hwaddr pa = offsetof(VRingAvail, flags);
287 
288     if (!caches) {
289         return 0;
290     }
291 
292     return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
293 }
294 
295 /* Called within rcu_read_lock().  */
296 static inline uint16_t vring_avail_idx(VirtQueue *vq)
297 {
298     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
299     hwaddr pa = offsetof(VRingAvail, idx);
300 
301     if (!caches) {
302         return 0;
303     }
304 
305     vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
306     return vq->shadow_avail_idx;
307 }
308 
309 /* Called within rcu_read_lock().  */
310 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
311 {
312     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
313     hwaddr pa = offsetof(VRingAvail, ring[i]);
314 
315     if (!caches) {
316         return 0;
317     }
318 
319     return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
320 }
321 
322 /* Called within rcu_read_lock().  */
323 static inline uint16_t vring_get_used_event(VirtQueue *vq)
324 {
325     return vring_avail_ring(vq, vq->vring.num);
326 }
327 
328 /* Called within rcu_read_lock().  */
329 static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
330                                     int i)
331 {
332     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
333     hwaddr pa = offsetof(VRingUsed, ring[i]);
334 
335     if (!caches) {
336         return;
337     }
338 
339     virtio_tswap32s(vq->vdev, &uelem->id);
340     virtio_tswap32s(vq->vdev, &uelem->len);
341     address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
342     address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
343 }
344 
345 /* Called within rcu_read_lock().  */
346 static uint16_t vring_used_idx(VirtQueue *vq)
347 {
348     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
349     hwaddr pa = offsetof(VRingUsed, idx);
350 
351     if (!caches) {
352         return 0;
353     }
354 
355     return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
356 }
357 
358 /* Called within rcu_read_lock().  */
359 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
360 {
361     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
362     hwaddr pa = offsetof(VRingUsed, idx);
363 
364     if (caches) {
365         virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
366         address_space_cache_invalidate(&caches->used, pa, sizeof(val));
367     }
368 
369     vq->used_idx = val;
370 }
371 
372 /* Called within rcu_read_lock().  */
373 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
374 {
375     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
376     VirtIODevice *vdev = vq->vdev;
377     hwaddr pa = offsetof(VRingUsed, flags);
378     uint16_t flags;
379 
380     if (!caches) {
381         return;
382     }
383 
384     flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
385     virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
386     address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
387 }
388 
389 /* Called within rcu_read_lock().  */
390 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
391 {
392     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
393     VirtIODevice *vdev = vq->vdev;
394     hwaddr pa = offsetof(VRingUsed, flags);
395     uint16_t flags;
396 
397     if (!caches) {
398         return;
399     }
400 
401     flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
402     virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
403     address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
404 }
405 
406 /* Called within rcu_read_lock().  */
407 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
408 {
409     VRingMemoryRegionCaches *caches;
410     hwaddr pa;
411     if (!vq->notification) {
412         return;
413     }
414 
415     caches = vring_get_region_caches(vq);
416     if (!caches) {
417         return;
418     }
419 
420     pa = offsetof(VRingUsed, ring[vq->vring.num]);
421     virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
422     address_space_cache_invalidate(&caches->used, pa, sizeof(val));
423 }
424 
425 static void virtio_queue_split_set_notification(VirtQueue *vq, int enable)
426 {
427     RCU_READ_LOCK_GUARD();
428 
429     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
430         vring_set_avail_event(vq, vring_avail_idx(vq));
431     } else if (enable) {
432         vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
433     } else {
434         vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
435     }
436     if (enable) {
437         /* Expose avail event/used flags before caller checks the avail idx. */
438         smp_mb();
439     }
440 }
441 
442 static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable)
443 {
444     uint16_t off_wrap;
445     VRingPackedDescEvent e;
446     VRingMemoryRegionCaches *caches;
447 
448     RCU_READ_LOCK_GUARD();
449     caches = vring_get_region_caches(vq);
450     if (!caches) {
451         return;
452     }
453 
454     vring_packed_event_read(vq->vdev, &caches->used, &e);
455 
456     if (!enable) {
457         e.flags = VRING_PACKED_EVENT_FLAG_DISABLE;
458     } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
459         off_wrap = vq->shadow_avail_idx | vq->shadow_avail_wrap_counter << 15;
460         vring_packed_off_wrap_write(vq->vdev, &caches->used, off_wrap);
461         /* Make sure off_wrap is wrote before flags */
462         smp_wmb();
463         e.flags = VRING_PACKED_EVENT_FLAG_DESC;
464     } else {
465         e.flags = VRING_PACKED_EVENT_FLAG_ENABLE;
466     }
467 
468     vring_packed_flags_write(vq->vdev, &caches->used, e.flags);
469     if (enable) {
470         /* Expose avail event/used flags before caller checks the avail idx. */
471         smp_mb();
472     }
473 }
474 
475 bool virtio_queue_get_notification(VirtQueue *vq)
476 {
477     return vq->notification;
478 }
479 
480 void virtio_queue_set_notification(VirtQueue *vq, int enable)
481 {
482     vq->notification = enable;
483 
484     if (!vq->vring.desc) {
485         return;
486     }
487 
488     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
489         virtio_queue_packed_set_notification(vq, enable);
490     } else {
491         virtio_queue_split_set_notification(vq, enable);
492     }
493 }
494 
495 int virtio_queue_ready(VirtQueue *vq)
496 {
497     return vq->vring.avail != 0;
498 }
499 
500 static void vring_packed_desc_read_flags(VirtIODevice *vdev,
501                                          uint16_t *flags,
502                                          MemoryRegionCache *cache,
503                                          int i)
504 {
505     hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags);
506 
507     *flags = virtio_lduw_phys_cached(vdev, cache, off);
508 }
509 
510 static void vring_packed_desc_read(VirtIODevice *vdev,
511                                    VRingPackedDesc *desc,
512                                    MemoryRegionCache *cache,
513                                    int i, bool strict_order)
514 {
515     hwaddr off = i * sizeof(VRingPackedDesc);
516 
517     vring_packed_desc_read_flags(vdev, &desc->flags, cache, i);
518 
519     if (strict_order) {
520         /* Make sure flags is read before the rest fields. */
521         smp_rmb();
522     }
523 
524     address_space_read_cached(cache, off + offsetof(VRingPackedDesc, addr),
525                               &desc->addr, sizeof(desc->addr));
526     address_space_read_cached(cache, off + offsetof(VRingPackedDesc, id),
527                               &desc->id, sizeof(desc->id));
528     address_space_read_cached(cache, off + offsetof(VRingPackedDesc, len),
529                               &desc->len, sizeof(desc->len));
530     virtio_tswap64s(vdev, &desc->addr);
531     virtio_tswap16s(vdev, &desc->id);
532     virtio_tswap32s(vdev, &desc->len);
533 }
534 
535 static void vring_packed_desc_write_data(VirtIODevice *vdev,
536                                          VRingPackedDesc *desc,
537                                          MemoryRegionCache *cache,
538                                          int i)
539 {
540     hwaddr off_id = i * sizeof(VRingPackedDesc) +
541                     offsetof(VRingPackedDesc, id);
542     hwaddr off_len = i * sizeof(VRingPackedDesc) +
543                     offsetof(VRingPackedDesc, len);
544 
545     virtio_tswap32s(vdev, &desc->len);
546     virtio_tswap16s(vdev, &desc->id);
547     address_space_write_cached(cache, off_id, &desc->id, sizeof(desc->id));
548     address_space_cache_invalidate(cache, off_id, sizeof(desc->id));
549     address_space_write_cached(cache, off_len, &desc->len, sizeof(desc->len));
550     address_space_cache_invalidate(cache, off_len, sizeof(desc->len));
551 }
552 
553 static void vring_packed_desc_write_flags(VirtIODevice *vdev,
554                                           VRingPackedDesc *desc,
555                                           MemoryRegionCache *cache,
556                                           int i)
557 {
558     hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags);
559 
560     virtio_stw_phys_cached(vdev, cache, off, desc->flags);
561     address_space_cache_invalidate(cache, off, sizeof(desc->flags));
562 }
563 
564 static void vring_packed_desc_write(VirtIODevice *vdev,
565                                     VRingPackedDesc *desc,
566                                     MemoryRegionCache *cache,
567                                     int i, bool strict_order)
568 {
569     vring_packed_desc_write_data(vdev, desc, cache, i);
570     if (strict_order) {
571         /* Make sure data is wrote before flags. */
572         smp_wmb();
573     }
574     vring_packed_desc_write_flags(vdev, desc, cache, i);
575 }
576 
577 static inline bool is_desc_avail(uint16_t flags, bool wrap_counter)
578 {
579     bool avail, used;
580 
581     avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
582     used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
583     return (avail != used) && (avail == wrap_counter);
584 }
585 
586 /* Fetch avail_idx from VQ memory only when we really need to know if
587  * guest has added some buffers.
588  * Called within rcu_read_lock().  */
589 static int virtio_queue_empty_rcu(VirtQueue *vq)
590 {
591     if (virtio_device_disabled(vq->vdev)) {
592         return 1;
593     }
594 
595     if (unlikely(!vq->vring.avail)) {
596         return 1;
597     }
598 
599     if (vq->shadow_avail_idx != vq->last_avail_idx) {
600         return 0;
601     }
602 
603     return vring_avail_idx(vq) == vq->last_avail_idx;
604 }
605 
606 static int virtio_queue_split_empty(VirtQueue *vq)
607 {
608     bool empty;
609 
610     if (virtio_device_disabled(vq->vdev)) {
611         return 1;
612     }
613 
614     if (unlikely(!vq->vring.avail)) {
615         return 1;
616     }
617 
618     if (vq->shadow_avail_idx != vq->last_avail_idx) {
619         return 0;
620     }
621 
622     RCU_READ_LOCK_GUARD();
623     empty = vring_avail_idx(vq) == vq->last_avail_idx;
624     return empty;
625 }
626 
627 /* Called within rcu_read_lock().  */
628 static int virtio_queue_packed_empty_rcu(VirtQueue *vq)
629 {
630     struct VRingPackedDesc desc;
631     VRingMemoryRegionCaches *cache;
632 
633     if (unlikely(!vq->vring.desc)) {
634         return 1;
635     }
636 
637     cache = vring_get_region_caches(vq);
638     if (!cache) {
639         return 1;
640     }
641 
642     vring_packed_desc_read_flags(vq->vdev, &desc.flags, &cache->desc,
643                                  vq->last_avail_idx);
644 
645     return !is_desc_avail(desc.flags, vq->last_avail_wrap_counter);
646 }
647 
648 static int virtio_queue_packed_empty(VirtQueue *vq)
649 {
650     RCU_READ_LOCK_GUARD();
651     return virtio_queue_packed_empty_rcu(vq);
652 }
653 
654 int virtio_queue_empty(VirtQueue *vq)
655 {
656     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
657         return virtio_queue_packed_empty(vq);
658     } else {
659         return virtio_queue_split_empty(vq);
660     }
661 }
662 
663 static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
664                                unsigned int len)
665 {
666     AddressSpace *dma_as = vq->vdev->dma_as;
667     unsigned int offset;
668     int i;
669 
670     offset = 0;
671     for (i = 0; i < elem->in_num; i++) {
672         size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
673 
674         dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
675                          elem->in_sg[i].iov_len,
676                          DMA_DIRECTION_FROM_DEVICE, size);
677 
678         offset += size;
679     }
680 
681     for (i = 0; i < elem->out_num; i++)
682         dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
683                          elem->out_sg[i].iov_len,
684                          DMA_DIRECTION_TO_DEVICE,
685                          elem->out_sg[i].iov_len);
686 }
687 
688 /* virtqueue_detach_element:
689  * @vq: The #VirtQueue
690  * @elem: The #VirtQueueElement
691  * @len: number of bytes written
692  *
693  * Detach the element from the virtqueue.  This function is suitable for device
694  * reset or other situations where a #VirtQueueElement is simply freed and will
695  * not be pushed or discarded.
696  */
697 void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
698                               unsigned int len)
699 {
700     vq->inuse -= elem->ndescs;
701     virtqueue_unmap_sg(vq, elem, len);
702 }
703 
704 static void virtqueue_split_rewind(VirtQueue *vq, unsigned int num)
705 {
706     vq->last_avail_idx -= num;
707 }
708 
709 static void virtqueue_packed_rewind(VirtQueue *vq, unsigned int num)
710 {
711     if (vq->last_avail_idx < num) {
712         vq->last_avail_idx = vq->vring.num + vq->last_avail_idx - num;
713         vq->last_avail_wrap_counter ^= 1;
714     } else {
715         vq->last_avail_idx -= num;
716     }
717 }
718 
719 /* virtqueue_unpop:
720  * @vq: The #VirtQueue
721  * @elem: The #VirtQueueElement
722  * @len: number of bytes written
723  *
724  * Pretend the most recent element wasn't popped from the virtqueue.  The next
725  * call to virtqueue_pop() will refetch the element.
726  */
727 void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem,
728                      unsigned int len)
729 {
730 
731     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
732         virtqueue_packed_rewind(vq, 1);
733     } else {
734         virtqueue_split_rewind(vq, 1);
735     }
736 
737     virtqueue_detach_element(vq, elem, len);
738 }
739 
740 /* virtqueue_rewind:
741  * @vq: The #VirtQueue
742  * @num: Number of elements to push back
743  *
744  * Pretend that elements weren't popped from the virtqueue.  The next
745  * virtqueue_pop() will refetch the oldest element.
746  *
747  * Use virtqueue_unpop() instead if you have a VirtQueueElement.
748  *
749  * Returns: true on success, false if @num is greater than the number of in use
750  * elements.
751  */
752 bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
753 {
754     if (num > vq->inuse) {
755         return false;
756     }
757 
758     vq->inuse -= num;
759     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
760         virtqueue_packed_rewind(vq, num);
761     } else {
762         virtqueue_split_rewind(vq, num);
763     }
764     return true;
765 }
766 
767 static void virtqueue_split_fill(VirtQueue *vq, const VirtQueueElement *elem,
768                     unsigned int len, unsigned int idx)
769 {
770     VRingUsedElem uelem;
771 
772     if (unlikely(!vq->vring.used)) {
773         return;
774     }
775 
776     idx = (idx + vq->used_idx) % vq->vring.num;
777 
778     uelem.id = elem->index;
779     uelem.len = len;
780     vring_used_write(vq, &uelem, idx);
781 }
782 
783 static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem,
784                                   unsigned int len, unsigned int idx)
785 {
786     vq->used_elems[idx].index = elem->index;
787     vq->used_elems[idx].len = len;
788     vq->used_elems[idx].ndescs = elem->ndescs;
789 }
790 
791 static void virtqueue_packed_fill_desc(VirtQueue *vq,
792                                        const VirtQueueElement *elem,
793                                        unsigned int idx,
794                                        bool strict_order)
795 {
796     uint16_t head;
797     VRingMemoryRegionCaches *caches;
798     VRingPackedDesc desc = {
799         .id = elem->index,
800         .len = elem->len,
801     };
802     bool wrap_counter = vq->used_wrap_counter;
803 
804     if (unlikely(!vq->vring.desc)) {
805         return;
806     }
807 
808     head = vq->used_idx + idx;
809     if (head >= vq->vring.num) {
810         head -= vq->vring.num;
811         wrap_counter ^= 1;
812     }
813     if (wrap_counter) {
814         desc.flags |= (1 << VRING_PACKED_DESC_F_AVAIL);
815         desc.flags |= (1 << VRING_PACKED_DESC_F_USED);
816     } else {
817         desc.flags &= ~(1 << VRING_PACKED_DESC_F_AVAIL);
818         desc.flags &= ~(1 << VRING_PACKED_DESC_F_USED);
819     }
820 
821     caches = vring_get_region_caches(vq);
822     if (!caches) {
823         return;
824     }
825 
826     vring_packed_desc_write(vq->vdev, &desc, &caches->desc, head, strict_order);
827 }
828 
829 /* Called within rcu_read_lock().  */
830 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
831                     unsigned int len, unsigned int idx)
832 {
833     trace_virtqueue_fill(vq, elem, len, idx);
834 
835     virtqueue_unmap_sg(vq, elem, len);
836 
837     if (virtio_device_disabled(vq->vdev)) {
838         return;
839     }
840 
841     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
842         virtqueue_packed_fill(vq, elem, len, idx);
843     } else {
844         virtqueue_split_fill(vq, elem, len, idx);
845     }
846 }
847 
848 /* Called within rcu_read_lock().  */
849 static void virtqueue_split_flush(VirtQueue *vq, unsigned int count)
850 {
851     uint16_t old, new;
852 
853     if (unlikely(!vq->vring.used)) {
854         return;
855     }
856 
857     /* Make sure buffer is written before we update index. */
858     smp_wmb();
859     trace_virtqueue_flush(vq, count);
860     old = vq->used_idx;
861     new = old + count;
862     vring_used_idx_set(vq, new);
863     vq->inuse -= count;
864     if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
865         vq->signalled_used_valid = false;
866 }
867 
868 static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count)
869 {
870     unsigned int i, ndescs = 0;
871 
872     if (unlikely(!vq->vring.desc)) {
873         return;
874     }
875 
876     for (i = 1; i < count; i++) {
877         virtqueue_packed_fill_desc(vq, &vq->used_elems[i], i, false);
878         ndescs += vq->used_elems[i].ndescs;
879     }
880     virtqueue_packed_fill_desc(vq, &vq->used_elems[0], 0, true);
881     ndescs += vq->used_elems[0].ndescs;
882 
883     vq->inuse -= ndescs;
884     vq->used_idx += ndescs;
885     if (vq->used_idx >= vq->vring.num) {
886         vq->used_idx -= vq->vring.num;
887         vq->used_wrap_counter ^= 1;
888     }
889 }
890 
891 void virtqueue_flush(VirtQueue *vq, unsigned int count)
892 {
893     if (virtio_device_disabled(vq->vdev)) {
894         vq->inuse -= count;
895         return;
896     }
897 
898     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
899         virtqueue_packed_flush(vq, count);
900     } else {
901         virtqueue_split_flush(vq, count);
902     }
903 }
904 
905 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
906                     unsigned int len)
907 {
908     RCU_READ_LOCK_GUARD();
909     virtqueue_fill(vq, elem, len, 0);
910     virtqueue_flush(vq, 1);
911 }
912 
913 /* Called within rcu_read_lock().  */
914 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
915 {
916     uint16_t num_heads = vring_avail_idx(vq) - idx;
917 
918     /* Check it isn't doing very strange things with descriptor numbers. */
919     if (num_heads > vq->vring.num) {
920         virtio_error(vq->vdev, "Guest moved used index from %u to %u",
921                      idx, vq->shadow_avail_idx);
922         return -EINVAL;
923     }
924     /* On success, callers read a descriptor at vq->last_avail_idx.
925      * Make sure descriptor read does not bypass avail index read. */
926     if (num_heads) {
927         smp_rmb();
928     }
929 
930     return num_heads;
931 }
932 
933 /* Called within rcu_read_lock().  */
934 static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
935                                unsigned int *head)
936 {
937     /* Grab the next descriptor number they're advertising, and increment
938      * the index we've seen. */
939     *head = vring_avail_ring(vq, idx % vq->vring.num);
940 
941     /* If their number is silly, that's a fatal mistake. */
942     if (*head >= vq->vring.num) {
943         virtio_error(vq->vdev, "Guest says index %u is available", *head);
944         return false;
945     }
946 
947     return true;
948 }
949 
950 enum {
951     VIRTQUEUE_READ_DESC_ERROR = -1,
952     VIRTQUEUE_READ_DESC_DONE = 0,   /* end of chain */
953     VIRTQUEUE_READ_DESC_MORE = 1,   /* more buffers in chain */
954 };
955 
956 static int virtqueue_split_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
957                                           MemoryRegionCache *desc_cache,
958                                           unsigned int max, unsigned int *next)
959 {
960     /* If this descriptor says it doesn't chain, we're done. */
961     if (!(desc->flags & VRING_DESC_F_NEXT)) {
962         return VIRTQUEUE_READ_DESC_DONE;
963     }
964 
965     /* Check they're not leading us off end of descriptors. */
966     *next = desc->next;
967     /* Make sure compiler knows to grab that: we don't want it changing! */
968     smp_wmb();
969 
970     if (*next >= max) {
971         virtio_error(vdev, "Desc next is %u", *next);
972         return VIRTQUEUE_READ_DESC_ERROR;
973     }
974 
975     vring_split_desc_read(vdev, desc, desc_cache, *next);
976     return VIRTQUEUE_READ_DESC_MORE;
977 }
978 
979 /* Called within rcu_read_lock().  */
980 static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
981                             unsigned int *in_bytes, unsigned int *out_bytes,
982                             unsigned max_in_bytes, unsigned max_out_bytes,
983                             VRingMemoryRegionCaches *caches)
984 {
985     VirtIODevice *vdev = vq->vdev;
986     unsigned int max, idx;
987     unsigned int total_bufs, in_total, out_total;
988     MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
989     int64_t len = 0;
990     int rc;
991 
992     idx = vq->last_avail_idx;
993     total_bufs = in_total = out_total = 0;
994 
995     max = vq->vring.num;
996 
997     while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
998         MemoryRegionCache *desc_cache = &caches->desc;
999         unsigned int num_bufs;
1000         VRingDesc desc;
1001         unsigned int i;
1002 
1003         num_bufs = total_bufs;
1004 
1005         if (!virtqueue_get_head(vq, idx++, &i)) {
1006             goto err;
1007         }
1008 
1009         vring_split_desc_read(vdev, &desc, desc_cache, i);
1010 
1011         if (desc.flags & VRING_DESC_F_INDIRECT) {
1012             if (!desc.len || (desc.len % sizeof(VRingDesc))) {
1013                 virtio_error(vdev, "Invalid size for indirect buffer table");
1014                 goto err;
1015             }
1016 
1017             /* If we've got too many, that implies a descriptor loop. */
1018             if (num_bufs >= max) {
1019                 virtio_error(vdev, "Looped descriptor");
1020                 goto err;
1021             }
1022 
1023             /* loop over the indirect descriptor table */
1024             len = address_space_cache_init(&indirect_desc_cache,
1025                                            vdev->dma_as,
1026                                            desc.addr, desc.len, false);
1027             desc_cache = &indirect_desc_cache;
1028             if (len < desc.len) {
1029                 virtio_error(vdev, "Cannot map indirect buffer");
1030                 goto err;
1031             }
1032 
1033             max = desc.len / sizeof(VRingDesc);
1034             num_bufs = i = 0;
1035             vring_split_desc_read(vdev, &desc, desc_cache, i);
1036         }
1037 
1038         do {
1039             /* If we've got too many, that implies a descriptor loop. */
1040             if (++num_bufs > max) {
1041                 virtio_error(vdev, "Looped descriptor");
1042                 goto err;
1043             }
1044 
1045             if (desc.flags & VRING_DESC_F_WRITE) {
1046                 in_total += desc.len;
1047             } else {
1048                 out_total += desc.len;
1049             }
1050             if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1051                 goto done;
1052             }
1053 
1054             rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max, &i);
1055         } while (rc == VIRTQUEUE_READ_DESC_MORE);
1056 
1057         if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1058             goto err;
1059         }
1060 
1061         if (desc_cache == &indirect_desc_cache) {
1062             address_space_cache_destroy(&indirect_desc_cache);
1063             total_bufs++;
1064         } else {
1065             total_bufs = num_bufs;
1066         }
1067     }
1068 
1069     if (rc < 0) {
1070         goto err;
1071     }
1072 
1073 done:
1074     address_space_cache_destroy(&indirect_desc_cache);
1075     if (in_bytes) {
1076         *in_bytes = in_total;
1077     }
1078     if (out_bytes) {
1079         *out_bytes = out_total;
1080     }
1081     return;
1082 
1083 err:
1084     in_total = out_total = 0;
1085     goto done;
1086 }
1087 
1088 static int virtqueue_packed_read_next_desc(VirtQueue *vq,
1089                                            VRingPackedDesc *desc,
1090                                            MemoryRegionCache
1091                                            *desc_cache,
1092                                            unsigned int max,
1093                                            unsigned int *next,
1094                                            bool indirect)
1095 {
1096     /* If this descriptor says it doesn't chain, we're done. */
1097     if (!indirect && !(desc->flags & VRING_DESC_F_NEXT)) {
1098         return VIRTQUEUE_READ_DESC_DONE;
1099     }
1100 
1101     ++*next;
1102     if (*next == max) {
1103         if (indirect) {
1104             return VIRTQUEUE_READ_DESC_DONE;
1105         } else {
1106             (*next) -= vq->vring.num;
1107         }
1108     }
1109 
1110     vring_packed_desc_read(vq->vdev, desc, desc_cache, *next, false);
1111     return VIRTQUEUE_READ_DESC_MORE;
1112 }
1113 
1114 /* Called within rcu_read_lock().  */
1115 static void virtqueue_packed_get_avail_bytes(VirtQueue *vq,
1116                                              unsigned int *in_bytes,
1117                                              unsigned int *out_bytes,
1118                                              unsigned max_in_bytes,
1119                                              unsigned max_out_bytes,
1120                                              VRingMemoryRegionCaches *caches)
1121 {
1122     VirtIODevice *vdev = vq->vdev;
1123     unsigned int max, idx;
1124     unsigned int total_bufs, in_total, out_total;
1125     MemoryRegionCache *desc_cache;
1126     MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
1127     int64_t len = 0;
1128     VRingPackedDesc desc;
1129     bool wrap_counter;
1130 
1131     idx = vq->last_avail_idx;
1132     wrap_counter = vq->last_avail_wrap_counter;
1133     total_bufs = in_total = out_total = 0;
1134 
1135     max = vq->vring.num;
1136 
1137     for (;;) {
1138         unsigned int num_bufs = total_bufs;
1139         unsigned int i = idx;
1140         int rc;
1141 
1142         desc_cache = &caches->desc;
1143         vring_packed_desc_read(vdev, &desc, desc_cache, idx, true);
1144         if (!is_desc_avail(desc.flags, wrap_counter)) {
1145             break;
1146         }
1147 
1148         if (desc.flags & VRING_DESC_F_INDIRECT) {
1149             if (desc.len % sizeof(VRingPackedDesc)) {
1150                 virtio_error(vdev, "Invalid size for indirect buffer table");
1151                 goto err;
1152             }
1153 
1154             /* If we've got too many, that implies a descriptor loop. */
1155             if (num_bufs >= max) {
1156                 virtio_error(vdev, "Looped descriptor");
1157                 goto err;
1158             }
1159 
1160             /* loop over the indirect descriptor table */
1161             len = address_space_cache_init(&indirect_desc_cache,
1162                                            vdev->dma_as,
1163                                            desc.addr, desc.len, false);
1164             desc_cache = &indirect_desc_cache;
1165             if (len < desc.len) {
1166                 virtio_error(vdev, "Cannot map indirect buffer");
1167                 goto err;
1168             }
1169 
1170             max = desc.len / sizeof(VRingPackedDesc);
1171             num_bufs = i = 0;
1172             vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
1173         }
1174 
1175         do {
1176             /* If we've got too many, that implies a descriptor loop. */
1177             if (++num_bufs > max) {
1178                 virtio_error(vdev, "Looped descriptor");
1179                 goto err;
1180             }
1181 
1182             if (desc.flags & VRING_DESC_F_WRITE) {
1183                 in_total += desc.len;
1184             } else {
1185                 out_total += desc.len;
1186             }
1187             if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1188                 goto done;
1189             }
1190 
1191             rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max,
1192                                                  &i, desc_cache ==
1193                                                  &indirect_desc_cache);
1194         } while (rc == VIRTQUEUE_READ_DESC_MORE);
1195 
1196         if (desc_cache == &indirect_desc_cache) {
1197             address_space_cache_destroy(&indirect_desc_cache);
1198             total_bufs++;
1199             idx++;
1200         } else {
1201             idx += num_bufs - total_bufs;
1202             total_bufs = num_bufs;
1203         }
1204 
1205         if (idx >= vq->vring.num) {
1206             idx -= vq->vring.num;
1207             wrap_counter ^= 1;
1208         }
1209     }
1210 
1211     /* Record the index and wrap counter for a kick we want */
1212     vq->shadow_avail_idx = idx;
1213     vq->shadow_avail_wrap_counter = wrap_counter;
1214 done:
1215     address_space_cache_destroy(&indirect_desc_cache);
1216     if (in_bytes) {
1217         *in_bytes = in_total;
1218     }
1219     if (out_bytes) {
1220         *out_bytes = out_total;
1221     }
1222     return;
1223 
1224 err:
1225     in_total = out_total = 0;
1226     goto done;
1227 }
1228 
1229 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
1230                                unsigned int *out_bytes,
1231                                unsigned max_in_bytes, unsigned max_out_bytes)
1232 {
1233     uint16_t desc_size;
1234     VRingMemoryRegionCaches *caches;
1235 
1236     RCU_READ_LOCK_GUARD();
1237 
1238     if (unlikely(!vq->vring.desc)) {
1239         goto err;
1240     }
1241 
1242     caches = vring_get_region_caches(vq);
1243     if (!caches) {
1244         goto err;
1245     }
1246 
1247     desc_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
1248                                 sizeof(VRingPackedDesc) : sizeof(VRingDesc);
1249     if (caches->desc.len < vq->vring.num * desc_size) {
1250         virtio_error(vq->vdev, "Cannot map descriptor ring");
1251         goto err;
1252     }
1253 
1254     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1255         virtqueue_packed_get_avail_bytes(vq, in_bytes, out_bytes,
1256                                          max_in_bytes, max_out_bytes,
1257                                          caches);
1258     } else {
1259         virtqueue_split_get_avail_bytes(vq, in_bytes, out_bytes,
1260                                         max_in_bytes, max_out_bytes,
1261                                         caches);
1262     }
1263 
1264     return;
1265 err:
1266     if (in_bytes) {
1267         *in_bytes = 0;
1268     }
1269     if (out_bytes) {
1270         *out_bytes = 0;
1271     }
1272 }
1273 
1274 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
1275                           unsigned int out_bytes)
1276 {
1277     unsigned int in_total, out_total;
1278 
1279     virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
1280     return in_bytes <= in_total && out_bytes <= out_total;
1281 }
1282 
1283 static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
1284                                hwaddr *addr, struct iovec *iov,
1285                                unsigned int max_num_sg, bool is_write,
1286                                hwaddr pa, size_t sz)
1287 {
1288     bool ok = false;
1289     unsigned num_sg = *p_num_sg;
1290     assert(num_sg <= max_num_sg);
1291 
1292     if (!sz) {
1293         virtio_error(vdev, "virtio: zero sized buffers are not allowed");
1294         goto out;
1295     }
1296 
1297     while (sz) {
1298         hwaddr len = sz;
1299 
1300         if (num_sg == max_num_sg) {
1301             virtio_error(vdev, "virtio: too many write descriptors in "
1302                                "indirect table");
1303             goto out;
1304         }
1305 
1306         iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
1307                                               is_write ?
1308                                               DMA_DIRECTION_FROM_DEVICE :
1309                                               DMA_DIRECTION_TO_DEVICE);
1310         if (!iov[num_sg].iov_base) {
1311             virtio_error(vdev, "virtio: bogus descriptor or out of resources");
1312             goto out;
1313         }
1314 
1315         iov[num_sg].iov_len = len;
1316         addr[num_sg] = pa;
1317 
1318         sz -= len;
1319         pa += len;
1320         num_sg++;
1321     }
1322     ok = true;
1323 
1324 out:
1325     *p_num_sg = num_sg;
1326     return ok;
1327 }
1328 
1329 /* Only used by error code paths before we have a VirtQueueElement (therefore
1330  * virtqueue_unmap_sg() can't be used).  Assumes buffers weren't written to
1331  * yet.
1332  */
1333 static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
1334                                     struct iovec *iov)
1335 {
1336     unsigned int i;
1337 
1338     for (i = 0; i < out_num + in_num; i++) {
1339         int is_write = i >= out_num;
1340 
1341         cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
1342         iov++;
1343     }
1344 }
1345 
1346 static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
1347                                 hwaddr *addr, unsigned int num_sg,
1348                                 bool is_write)
1349 {
1350     unsigned int i;
1351     hwaddr len;
1352 
1353     for (i = 0; i < num_sg; i++) {
1354         len = sg[i].iov_len;
1355         sg[i].iov_base = dma_memory_map(vdev->dma_as,
1356                                         addr[i], &len, is_write ?
1357                                         DMA_DIRECTION_FROM_DEVICE :
1358                                         DMA_DIRECTION_TO_DEVICE);
1359         if (!sg[i].iov_base) {
1360             error_report("virtio: error trying to map MMIO memory");
1361             exit(1);
1362         }
1363         if (len != sg[i].iov_len) {
1364             error_report("virtio: unexpected memory split");
1365             exit(1);
1366         }
1367     }
1368 }
1369 
1370 void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem)
1371 {
1372     virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, elem->in_num, true);
1373     virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, elem->out_num,
1374                                                                         false);
1375 }
1376 
1377 static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
1378 {
1379     VirtQueueElement *elem;
1380     size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
1381     size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
1382     size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
1383     size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
1384     size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
1385     size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
1386 
1387     assert(sz >= sizeof(VirtQueueElement));
1388     elem = g_malloc(out_sg_end);
1389     trace_virtqueue_alloc_element(elem, sz, in_num, out_num);
1390     elem->out_num = out_num;
1391     elem->in_num = in_num;
1392     elem->in_addr = (void *)elem + in_addr_ofs;
1393     elem->out_addr = (void *)elem + out_addr_ofs;
1394     elem->in_sg = (void *)elem + in_sg_ofs;
1395     elem->out_sg = (void *)elem + out_sg_ofs;
1396     return elem;
1397 }
1398 
1399 static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
1400 {
1401     unsigned int i, head, max;
1402     VRingMemoryRegionCaches *caches;
1403     MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
1404     MemoryRegionCache *desc_cache;
1405     int64_t len;
1406     VirtIODevice *vdev = vq->vdev;
1407     VirtQueueElement *elem = NULL;
1408     unsigned out_num, in_num, elem_entries;
1409     hwaddr addr[VIRTQUEUE_MAX_SIZE];
1410     struct iovec iov[VIRTQUEUE_MAX_SIZE];
1411     VRingDesc desc;
1412     int rc;
1413 
1414     RCU_READ_LOCK_GUARD();
1415     if (virtio_queue_empty_rcu(vq)) {
1416         goto done;
1417     }
1418     /* Needed after virtio_queue_empty(), see comment in
1419      * virtqueue_num_heads(). */
1420     smp_rmb();
1421 
1422     /* When we start there are none of either input nor output. */
1423     out_num = in_num = elem_entries = 0;
1424 
1425     max = vq->vring.num;
1426 
1427     if (vq->inuse >= vq->vring.num) {
1428         virtio_error(vdev, "Virtqueue size exceeded");
1429         goto done;
1430     }
1431 
1432     if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
1433         goto done;
1434     }
1435 
1436     if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
1437         vring_set_avail_event(vq, vq->last_avail_idx);
1438     }
1439 
1440     i = head;
1441 
1442     caches = vring_get_region_caches(vq);
1443     if (!caches) {
1444         virtio_error(vdev, "Region caches not initialized");
1445         goto done;
1446     }
1447 
1448     if (caches->desc.len < max * sizeof(VRingDesc)) {
1449         virtio_error(vdev, "Cannot map descriptor ring");
1450         goto done;
1451     }
1452 
1453     desc_cache = &caches->desc;
1454     vring_split_desc_read(vdev, &desc, desc_cache, i);
1455     if (desc.flags & VRING_DESC_F_INDIRECT) {
1456         if (!desc.len || (desc.len % sizeof(VRingDesc))) {
1457             virtio_error(vdev, "Invalid size for indirect buffer table");
1458             goto done;
1459         }
1460 
1461         /* loop over the indirect descriptor table */
1462         len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1463                                        desc.addr, desc.len, false);
1464         desc_cache = &indirect_desc_cache;
1465         if (len < desc.len) {
1466             virtio_error(vdev, "Cannot map indirect buffer");
1467             goto done;
1468         }
1469 
1470         max = desc.len / sizeof(VRingDesc);
1471         i = 0;
1472         vring_split_desc_read(vdev, &desc, desc_cache, i);
1473     }
1474 
1475     /* Collect all the descriptors */
1476     do {
1477         bool map_ok;
1478 
1479         if (desc.flags & VRING_DESC_F_WRITE) {
1480             map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
1481                                         iov + out_num,
1482                                         VIRTQUEUE_MAX_SIZE - out_num, true,
1483                                         desc.addr, desc.len);
1484         } else {
1485             if (in_num) {
1486                 virtio_error(vdev, "Incorrect order for descriptors");
1487                 goto err_undo_map;
1488             }
1489             map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
1490                                         VIRTQUEUE_MAX_SIZE, false,
1491                                         desc.addr, desc.len);
1492         }
1493         if (!map_ok) {
1494             goto err_undo_map;
1495         }
1496 
1497         /* If we've got too many, that implies a descriptor loop. */
1498         if (++elem_entries > max) {
1499             virtio_error(vdev, "Looped descriptor");
1500             goto err_undo_map;
1501         }
1502 
1503         rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max, &i);
1504     } while (rc == VIRTQUEUE_READ_DESC_MORE);
1505 
1506     if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1507         goto err_undo_map;
1508     }
1509 
1510     /* Now copy what we have collected and mapped */
1511     elem = virtqueue_alloc_element(sz, out_num, in_num);
1512     elem->index = head;
1513     elem->ndescs = 1;
1514     for (i = 0; i < out_num; i++) {
1515         elem->out_addr[i] = addr[i];
1516         elem->out_sg[i] = iov[i];
1517     }
1518     for (i = 0; i < in_num; i++) {
1519         elem->in_addr[i] = addr[out_num + i];
1520         elem->in_sg[i] = iov[out_num + i];
1521     }
1522 
1523     vq->inuse++;
1524 
1525     trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
1526 done:
1527     address_space_cache_destroy(&indirect_desc_cache);
1528 
1529     return elem;
1530 
1531 err_undo_map:
1532     virtqueue_undo_map_desc(out_num, in_num, iov);
1533     goto done;
1534 }
1535 
1536 static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
1537 {
1538     unsigned int i, max;
1539     VRingMemoryRegionCaches *caches;
1540     MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
1541     MemoryRegionCache *desc_cache;
1542     int64_t len;
1543     VirtIODevice *vdev = vq->vdev;
1544     VirtQueueElement *elem = NULL;
1545     unsigned out_num, in_num, elem_entries;
1546     hwaddr addr[VIRTQUEUE_MAX_SIZE];
1547     struct iovec iov[VIRTQUEUE_MAX_SIZE];
1548     VRingPackedDesc desc;
1549     uint16_t id;
1550     int rc;
1551 
1552     RCU_READ_LOCK_GUARD();
1553     if (virtio_queue_packed_empty_rcu(vq)) {
1554         goto done;
1555     }
1556 
1557     /* When we start there are none of either input nor output. */
1558     out_num = in_num = elem_entries = 0;
1559 
1560     max = vq->vring.num;
1561 
1562     if (vq->inuse >= vq->vring.num) {
1563         virtio_error(vdev, "Virtqueue size exceeded");
1564         goto done;
1565     }
1566 
1567     i = vq->last_avail_idx;
1568 
1569     caches = vring_get_region_caches(vq);
1570     if (!caches) {
1571         virtio_error(vdev, "Region caches not initialized");
1572         goto done;
1573     }
1574 
1575     if (caches->desc.len < max * sizeof(VRingDesc)) {
1576         virtio_error(vdev, "Cannot map descriptor ring");
1577         goto done;
1578     }
1579 
1580     desc_cache = &caches->desc;
1581     vring_packed_desc_read(vdev, &desc, desc_cache, i, true);
1582     id = desc.id;
1583     if (desc.flags & VRING_DESC_F_INDIRECT) {
1584         if (desc.len % sizeof(VRingPackedDesc)) {
1585             virtio_error(vdev, "Invalid size for indirect buffer table");
1586             goto done;
1587         }
1588 
1589         /* loop over the indirect descriptor table */
1590         len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1591                                        desc.addr, desc.len, false);
1592         desc_cache = &indirect_desc_cache;
1593         if (len < desc.len) {
1594             virtio_error(vdev, "Cannot map indirect buffer");
1595             goto done;
1596         }
1597 
1598         max = desc.len / sizeof(VRingPackedDesc);
1599         i = 0;
1600         vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
1601     }
1602 
1603     /* Collect all the descriptors */
1604     do {
1605         bool map_ok;
1606 
1607         if (desc.flags & VRING_DESC_F_WRITE) {
1608             map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
1609                                         iov + out_num,
1610                                         VIRTQUEUE_MAX_SIZE - out_num, true,
1611                                         desc.addr, desc.len);
1612         } else {
1613             if (in_num) {
1614                 virtio_error(vdev, "Incorrect order for descriptors");
1615                 goto err_undo_map;
1616             }
1617             map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
1618                                         VIRTQUEUE_MAX_SIZE, false,
1619                                         desc.addr, desc.len);
1620         }
1621         if (!map_ok) {
1622             goto err_undo_map;
1623         }
1624 
1625         /* If we've got too many, that implies a descriptor loop. */
1626         if (++elem_entries > max) {
1627             virtio_error(vdev, "Looped descriptor");
1628             goto err_undo_map;
1629         }
1630 
1631         rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max, &i,
1632                                              desc_cache ==
1633                                              &indirect_desc_cache);
1634     } while (rc == VIRTQUEUE_READ_DESC_MORE);
1635 
1636     /* Now copy what we have collected and mapped */
1637     elem = virtqueue_alloc_element(sz, out_num, in_num);
1638     for (i = 0; i < out_num; i++) {
1639         elem->out_addr[i] = addr[i];
1640         elem->out_sg[i] = iov[i];
1641     }
1642     for (i = 0; i < in_num; i++) {
1643         elem->in_addr[i] = addr[out_num + i];
1644         elem->in_sg[i] = iov[out_num + i];
1645     }
1646 
1647     elem->index = id;
1648     elem->ndescs = (desc_cache == &indirect_desc_cache) ? 1 : elem_entries;
1649     vq->last_avail_idx += elem->ndescs;
1650     vq->inuse += elem->ndescs;
1651 
1652     if (vq->last_avail_idx >= vq->vring.num) {
1653         vq->last_avail_idx -= vq->vring.num;
1654         vq->last_avail_wrap_counter ^= 1;
1655     }
1656 
1657     vq->shadow_avail_idx = vq->last_avail_idx;
1658     vq->shadow_avail_wrap_counter = vq->last_avail_wrap_counter;
1659 
1660     trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
1661 done:
1662     address_space_cache_destroy(&indirect_desc_cache);
1663 
1664     return elem;
1665 
1666 err_undo_map:
1667     virtqueue_undo_map_desc(out_num, in_num, iov);
1668     goto done;
1669 }
1670 
1671 void *virtqueue_pop(VirtQueue *vq, size_t sz)
1672 {
1673     if (virtio_device_disabled(vq->vdev)) {
1674         return NULL;
1675     }
1676 
1677     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1678         return virtqueue_packed_pop(vq, sz);
1679     } else {
1680         return virtqueue_split_pop(vq, sz);
1681     }
1682 }
1683 
1684 static unsigned int virtqueue_packed_drop_all(VirtQueue *vq)
1685 {
1686     VRingMemoryRegionCaches *caches;
1687     MemoryRegionCache *desc_cache;
1688     unsigned int dropped = 0;
1689     VirtQueueElement elem = {};
1690     VirtIODevice *vdev = vq->vdev;
1691     VRingPackedDesc desc;
1692 
1693     RCU_READ_LOCK_GUARD();
1694 
1695     caches = vring_get_region_caches(vq);
1696     if (!caches) {
1697         return 0;
1698     }
1699 
1700     desc_cache = &caches->desc;
1701 
1702     virtio_queue_set_notification(vq, 0);
1703 
1704     while (vq->inuse < vq->vring.num) {
1705         unsigned int idx = vq->last_avail_idx;
1706         /*
1707          * works similar to virtqueue_pop but does not map buffers
1708          * and does not allocate any memory.
1709          */
1710         vring_packed_desc_read(vdev, &desc, desc_cache,
1711                                vq->last_avail_idx , true);
1712         if (!is_desc_avail(desc.flags, vq->last_avail_wrap_counter)) {
1713             break;
1714         }
1715         elem.index = desc.id;
1716         elem.ndescs = 1;
1717         while (virtqueue_packed_read_next_desc(vq, &desc, desc_cache,
1718                                                vq->vring.num, &idx, false)) {
1719             ++elem.ndescs;
1720         }
1721         /*
1722          * immediately push the element, nothing to unmap
1723          * as both in_num and out_num are set to 0.
1724          */
1725         virtqueue_push(vq, &elem, 0);
1726         dropped++;
1727         vq->last_avail_idx += elem.ndescs;
1728         if (vq->last_avail_idx >= vq->vring.num) {
1729             vq->last_avail_idx -= vq->vring.num;
1730             vq->last_avail_wrap_counter ^= 1;
1731         }
1732     }
1733 
1734     return dropped;
1735 }
1736 
1737 static unsigned int virtqueue_split_drop_all(VirtQueue *vq)
1738 {
1739     unsigned int dropped = 0;
1740     VirtQueueElement elem = {};
1741     VirtIODevice *vdev = vq->vdev;
1742     bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1743 
1744     while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
1745         /* works similar to virtqueue_pop but does not map buffers
1746         * and does not allocate any memory */
1747         smp_rmb();
1748         if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
1749             break;
1750         }
1751         vq->inuse++;
1752         vq->last_avail_idx++;
1753         if (fEventIdx) {
1754             vring_set_avail_event(vq, vq->last_avail_idx);
1755         }
1756         /* immediately push the element, nothing to unmap
1757          * as both in_num and out_num are set to 0 */
1758         virtqueue_push(vq, &elem, 0);
1759         dropped++;
1760     }
1761 
1762     return dropped;
1763 }
1764 
1765 /* virtqueue_drop_all:
1766  * @vq: The #VirtQueue
1767  * Drops all queued buffers and indicates them to the guest
1768  * as if they are done. Useful when buffers can not be
1769  * processed but must be returned to the guest.
1770  */
1771 unsigned int virtqueue_drop_all(VirtQueue *vq)
1772 {
1773     struct VirtIODevice *vdev = vq->vdev;
1774 
1775     if (virtio_device_disabled(vq->vdev)) {
1776         return 0;
1777     }
1778 
1779     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1780         return virtqueue_packed_drop_all(vq);
1781     } else {
1782         return virtqueue_split_drop_all(vq);
1783     }
1784 }
1785 
1786 /* Reading and writing a structure directly to QEMUFile is *awful*, but
1787  * it is what QEMU has always done by mistake.  We can change it sooner
1788  * or later by bumping the version number of the affected vm states.
1789  * In the meanwhile, since the in-memory layout of VirtQueueElement
1790  * has changed, we need to marshal to and from the layout that was
1791  * used before the change.
1792  */
1793 typedef struct VirtQueueElementOld {
1794     unsigned int index;
1795     unsigned int out_num;
1796     unsigned int in_num;
1797     hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
1798     hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
1799     struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
1800     struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
1801 } VirtQueueElementOld;
1802 
1803 void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz)
1804 {
1805     VirtQueueElement *elem;
1806     VirtQueueElementOld data;
1807     int i;
1808 
1809     qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1810 
1811     /* TODO: teach all callers that this can fail, and return failure instead
1812      * of asserting here.
1813      * This is just one thing (there are probably more) that must be
1814      * fixed before we can allow NDEBUG compilation.
1815      */
1816     assert(ARRAY_SIZE(data.in_addr) >= data.in_num);
1817     assert(ARRAY_SIZE(data.out_addr) >= data.out_num);
1818 
1819     elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
1820     elem->index = data.index;
1821 
1822     for (i = 0; i < elem->in_num; i++) {
1823         elem->in_addr[i] = data.in_addr[i];
1824     }
1825 
1826     for (i = 0; i < elem->out_num; i++) {
1827         elem->out_addr[i] = data.out_addr[i];
1828     }
1829 
1830     for (i = 0; i < elem->in_num; i++) {
1831         /* Base is overwritten by virtqueue_map.  */
1832         elem->in_sg[i].iov_base = 0;
1833         elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
1834     }
1835 
1836     for (i = 0; i < elem->out_num; i++) {
1837         /* Base is overwritten by virtqueue_map.  */
1838         elem->out_sg[i].iov_base = 0;
1839         elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
1840     }
1841 
1842     if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1843         qemu_get_be32s(f, &elem->ndescs);
1844     }
1845 
1846     virtqueue_map(vdev, elem);
1847     return elem;
1848 }
1849 
1850 void qemu_put_virtqueue_element(VirtIODevice *vdev, QEMUFile *f,
1851                                 VirtQueueElement *elem)
1852 {
1853     VirtQueueElementOld data;
1854     int i;
1855 
1856     memset(&data, 0, sizeof(data));
1857     data.index = elem->index;
1858     data.in_num = elem->in_num;
1859     data.out_num = elem->out_num;
1860 
1861     for (i = 0; i < elem->in_num; i++) {
1862         data.in_addr[i] = elem->in_addr[i];
1863     }
1864 
1865     for (i = 0; i < elem->out_num; i++) {
1866         data.out_addr[i] = elem->out_addr[i];
1867     }
1868 
1869     for (i = 0; i < elem->in_num; i++) {
1870         /* Base is overwritten by virtqueue_map when loading.  Do not
1871          * save it, as it would leak the QEMU address space layout.  */
1872         data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
1873     }
1874 
1875     for (i = 0; i < elem->out_num; i++) {
1876         /* Do not save iov_base as above.  */
1877         data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
1878     }
1879 
1880     if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1881         qemu_put_be32s(f, &elem->ndescs);
1882     }
1883 
1884     qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1885 }
1886 
1887 /* virtio device */
1888 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
1889 {
1890     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1891     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1892 
1893     if (virtio_device_disabled(vdev)) {
1894         return;
1895     }
1896 
1897     if (k->notify) {
1898         k->notify(qbus->parent, vector);
1899     }
1900 }
1901 
1902 void virtio_update_irq(VirtIODevice *vdev)
1903 {
1904     virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
1905 }
1906 
1907 static int virtio_validate_features(VirtIODevice *vdev)
1908 {
1909     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1910 
1911     if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) &&
1912         !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
1913         return -EFAULT;
1914     }
1915 
1916     if (k->validate_features) {
1917         return k->validate_features(vdev);
1918     } else {
1919         return 0;
1920     }
1921 }
1922 
1923 int virtio_set_status(VirtIODevice *vdev, uint8_t val)
1924 {
1925     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1926     trace_virtio_set_status(vdev, val);
1927 
1928     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1929         if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
1930             val & VIRTIO_CONFIG_S_FEATURES_OK) {
1931             int ret = virtio_validate_features(vdev);
1932 
1933             if (ret) {
1934                 return ret;
1935             }
1936         }
1937     }
1938 
1939     if ((vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) !=
1940         (val & VIRTIO_CONFIG_S_DRIVER_OK)) {
1941         virtio_set_started(vdev, val & VIRTIO_CONFIG_S_DRIVER_OK);
1942     }
1943 
1944     if (k->set_status) {
1945         k->set_status(vdev, val);
1946     }
1947     vdev->status = val;
1948 
1949     return 0;
1950 }
1951 
1952 static enum virtio_device_endian virtio_default_endian(void)
1953 {
1954     if (target_words_bigendian()) {
1955         return VIRTIO_DEVICE_ENDIAN_BIG;
1956     } else {
1957         return VIRTIO_DEVICE_ENDIAN_LITTLE;
1958     }
1959 }
1960 
1961 static enum virtio_device_endian virtio_current_cpu_endian(void)
1962 {
1963     if (cpu_virtio_is_big_endian(current_cpu)) {
1964         return VIRTIO_DEVICE_ENDIAN_BIG;
1965     } else {
1966         return VIRTIO_DEVICE_ENDIAN_LITTLE;
1967     }
1968 }
1969 
1970 void virtio_reset(void *opaque)
1971 {
1972     VirtIODevice *vdev = opaque;
1973     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1974     int i;
1975 
1976     virtio_set_status(vdev, 0);
1977     if (current_cpu) {
1978         /* Guest initiated reset */
1979         vdev->device_endian = virtio_current_cpu_endian();
1980     } else {
1981         /* System reset */
1982         vdev->device_endian = virtio_default_endian();
1983     }
1984 
1985     if (k->reset) {
1986         k->reset(vdev);
1987     }
1988 
1989     vdev->start_on_kick = false;
1990     vdev->started = false;
1991     vdev->broken = false;
1992     vdev->guest_features = 0;
1993     vdev->queue_sel = 0;
1994     vdev->status = 0;
1995     vdev->disabled = false;
1996     qatomic_set(&vdev->isr, 0);
1997     vdev->config_vector = VIRTIO_NO_VECTOR;
1998     virtio_notify_vector(vdev, vdev->config_vector);
1999 
2000     for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2001         vdev->vq[i].vring.desc = 0;
2002         vdev->vq[i].vring.avail = 0;
2003         vdev->vq[i].vring.used = 0;
2004         vdev->vq[i].last_avail_idx = 0;
2005         vdev->vq[i].shadow_avail_idx = 0;
2006         vdev->vq[i].used_idx = 0;
2007         vdev->vq[i].last_avail_wrap_counter = true;
2008         vdev->vq[i].shadow_avail_wrap_counter = true;
2009         vdev->vq[i].used_wrap_counter = true;
2010         virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
2011         vdev->vq[i].signalled_used = 0;
2012         vdev->vq[i].signalled_used_valid = false;
2013         vdev->vq[i].notification = true;
2014         vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
2015         vdev->vq[i].inuse = 0;
2016         virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
2017     }
2018 }
2019 
2020 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
2021 {
2022     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2023     uint8_t val;
2024 
2025     if (addr + sizeof(val) > vdev->config_len) {
2026         return (uint32_t)-1;
2027     }
2028 
2029     k->get_config(vdev, vdev->config);
2030 
2031     val = ldub_p(vdev->config + addr);
2032     return val;
2033 }
2034 
2035 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
2036 {
2037     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2038     uint16_t val;
2039 
2040     if (addr + sizeof(val) > vdev->config_len) {
2041         return (uint32_t)-1;
2042     }
2043 
2044     k->get_config(vdev, vdev->config);
2045 
2046     val = lduw_p(vdev->config + addr);
2047     return val;
2048 }
2049 
2050 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
2051 {
2052     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2053     uint32_t val;
2054 
2055     if (addr + sizeof(val) > vdev->config_len) {
2056         return (uint32_t)-1;
2057     }
2058 
2059     k->get_config(vdev, vdev->config);
2060 
2061     val = ldl_p(vdev->config + addr);
2062     return val;
2063 }
2064 
2065 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
2066 {
2067     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2068     uint8_t val = data;
2069 
2070     if (addr + sizeof(val) > vdev->config_len) {
2071         return;
2072     }
2073 
2074     stb_p(vdev->config + addr, val);
2075 
2076     if (k->set_config) {
2077         k->set_config(vdev, vdev->config);
2078     }
2079 }
2080 
2081 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
2082 {
2083     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2084     uint16_t val = data;
2085 
2086     if (addr + sizeof(val) > vdev->config_len) {
2087         return;
2088     }
2089 
2090     stw_p(vdev->config + addr, val);
2091 
2092     if (k->set_config) {
2093         k->set_config(vdev, vdev->config);
2094     }
2095 }
2096 
2097 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
2098 {
2099     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2100     uint32_t val = data;
2101 
2102     if (addr + sizeof(val) > vdev->config_len) {
2103         return;
2104     }
2105 
2106     stl_p(vdev->config + addr, val);
2107 
2108     if (k->set_config) {
2109         k->set_config(vdev, vdev->config);
2110     }
2111 }
2112 
2113 uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
2114 {
2115     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2116     uint8_t val;
2117 
2118     if (addr + sizeof(val) > vdev->config_len) {
2119         return (uint32_t)-1;
2120     }
2121 
2122     k->get_config(vdev, vdev->config);
2123 
2124     val = ldub_p(vdev->config + addr);
2125     return val;
2126 }
2127 
2128 uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
2129 {
2130     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2131     uint16_t val;
2132 
2133     if (addr + sizeof(val) > vdev->config_len) {
2134         return (uint32_t)-1;
2135     }
2136 
2137     k->get_config(vdev, vdev->config);
2138 
2139     val = lduw_le_p(vdev->config + addr);
2140     return val;
2141 }
2142 
2143 uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
2144 {
2145     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2146     uint32_t val;
2147 
2148     if (addr + sizeof(val) > vdev->config_len) {
2149         return (uint32_t)-1;
2150     }
2151 
2152     k->get_config(vdev, vdev->config);
2153 
2154     val = ldl_le_p(vdev->config + addr);
2155     return val;
2156 }
2157 
2158 void virtio_config_modern_writeb(VirtIODevice *vdev,
2159                                  uint32_t addr, uint32_t data)
2160 {
2161     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2162     uint8_t val = data;
2163 
2164     if (addr + sizeof(val) > vdev->config_len) {
2165         return;
2166     }
2167 
2168     stb_p(vdev->config + addr, val);
2169 
2170     if (k->set_config) {
2171         k->set_config(vdev, vdev->config);
2172     }
2173 }
2174 
2175 void virtio_config_modern_writew(VirtIODevice *vdev,
2176                                  uint32_t addr, uint32_t data)
2177 {
2178     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2179     uint16_t val = data;
2180 
2181     if (addr + sizeof(val) > vdev->config_len) {
2182         return;
2183     }
2184 
2185     stw_le_p(vdev->config + addr, val);
2186 
2187     if (k->set_config) {
2188         k->set_config(vdev, vdev->config);
2189     }
2190 }
2191 
2192 void virtio_config_modern_writel(VirtIODevice *vdev,
2193                                  uint32_t addr, uint32_t data)
2194 {
2195     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2196     uint32_t val = data;
2197 
2198     if (addr + sizeof(val) > vdev->config_len) {
2199         return;
2200     }
2201 
2202     stl_le_p(vdev->config + addr, val);
2203 
2204     if (k->set_config) {
2205         k->set_config(vdev, vdev->config);
2206     }
2207 }
2208 
2209 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
2210 {
2211     if (!vdev->vq[n].vring.num) {
2212         return;
2213     }
2214     vdev->vq[n].vring.desc = addr;
2215     virtio_queue_update_rings(vdev, n);
2216 }
2217 
2218 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
2219 {
2220     return vdev->vq[n].vring.desc;
2221 }
2222 
2223 void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
2224                             hwaddr avail, hwaddr used)
2225 {
2226     if (!vdev->vq[n].vring.num) {
2227         return;
2228     }
2229     vdev->vq[n].vring.desc = desc;
2230     vdev->vq[n].vring.avail = avail;
2231     vdev->vq[n].vring.used = used;
2232     virtio_init_region_cache(vdev, n);
2233 }
2234 
2235 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
2236 {
2237     /* Don't allow guest to flip queue between existent and
2238      * nonexistent states, or to set it to an invalid size.
2239      */
2240     if (!!num != !!vdev->vq[n].vring.num ||
2241         num > VIRTQUEUE_MAX_SIZE ||
2242         num < 0) {
2243         return;
2244     }
2245     vdev->vq[n].vring.num = num;
2246 }
2247 
2248 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
2249 {
2250     return QLIST_FIRST(&vdev->vector_queues[vector]);
2251 }
2252 
2253 VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
2254 {
2255     return QLIST_NEXT(vq, node);
2256 }
2257 
2258 int virtio_queue_get_num(VirtIODevice *vdev, int n)
2259 {
2260     return vdev->vq[n].vring.num;
2261 }
2262 
2263 int virtio_queue_get_max_num(VirtIODevice *vdev, int n)
2264 {
2265     return vdev->vq[n].vring.num_default;
2266 }
2267 
2268 int virtio_get_num_queues(VirtIODevice *vdev)
2269 {
2270     int i;
2271 
2272     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2273         if (!virtio_queue_get_num(vdev, i)) {
2274             break;
2275         }
2276     }
2277 
2278     return i;
2279 }
2280 
2281 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
2282 {
2283     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2284     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2285 
2286     /* virtio-1 compliant devices cannot change the alignment */
2287     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2288         error_report("tried to modify queue alignment for virtio-1 device");
2289         return;
2290     }
2291     /* Check that the transport told us it was going to do this
2292      * (so a buggy transport will immediately assert rather than
2293      * silently failing to migrate this state)
2294      */
2295     assert(k->has_variable_vring_alignment);
2296 
2297     if (align) {
2298         vdev->vq[n].vring.align = align;
2299         virtio_queue_update_rings(vdev, n);
2300     }
2301 }
2302 
2303 static bool virtio_queue_notify_aio_vq(VirtQueue *vq)
2304 {
2305     bool ret = false;
2306 
2307     if (vq->vring.desc && vq->handle_aio_output) {
2308         VirtIODevice *vdev = vq->vdev;
2309 
2310         trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2311         ret = vq->handle_aio_output(vdev, vq);
2312 
2313         if (unlikely(vdev->start_on_kick)) {
2314             virtio_set_started(vdev, true);
2315         }
2316     }
2317 
2318     return ret;
2319 }
2320 
2321 static void virtio_queue_notify_vq(VirtQueue *vq)
2322 {
2323     if (vq->vring.desc && vq->handle_output) {
2324         VirtIODevice *vdev = vq->vdev;
2325 
2326         if (unlikely(vdev->broken)) {
2327             return;
2328         }
2329 
2330         trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2331         vq->handle_output(vdev, vq);
2332 
2333         if (unlikely(vdev->start_on_kick)) {
2334             virtio_set_started(vdev, true);
2335         }
2336     }
2337 }
2338 
2339 void virtio_queue_notify(VirtIODevice *vdev, int n)
2340 {
2341     VirtQueue *vq = &vdev->vq[n];
2342 
2343     if (unlikely(!vq->vring.desc || vdev->broken)) {
2344         return;
2345     }
2346 
2347     trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2348     if (vq->host_notifier_enabled) {
2349         event_notifier_set(&vq->host_notifier);
2350     } else if (vq->handle_output) {
2351         vq->handle_output(vdev, vq);
2352 
2353         if (unlikely(vdev->start_on_kick)) {
2354             virtio_set_started(vdev, true);
2355         }
2356     }
2357 }
2358 
2359 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
2360 {
2361     return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
2362         VIRTIO_NO_VECTOR;
2363 }
2364 
2365 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
2366 {
2367     VirtQueue *vq = &vdev->vq[n];
2368 
2369     if (n < VIRTIO_QUEUE_MAX) {
2370         if (vdev->vector_queues &&
2371             vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
2372             QLIST_REMOVE(vq, node);
2373         }
2374         vdev->vq[n].vector = vector;
2375         if (vdev->vector_queues &&
2376             vector != VIRTIO_NO_VECTOR) {
2377             QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
2378         }
2379     }
2380 }
2381 
2382 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
2383                             VirtIOHandleOutput handle_output)
2384 {
2385     int i;
2386 
2387     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2388         if (vdev->vq[i].vring.num == 0)
2389             break;
2390     }
2391 
2392     if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
2393         abort();
2394 
2395     vdev->vq[i].vring.num = queue_size;
2396     vdev->vq[i].vring.num_default = queue_size;
2397     vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
2398     vdev->vq[i].handle_output = handle_output;
2399     vdev->vq[i].handle_aio_output = NULL;
2400     vdev->vq[i].used_elems = g_malloc0(sizeof(VirtQueueElement) *
2401                                        queue_size);
2402 
2403     return &vdev->vq[i];
2404 }
2405 
2406 void virtio_delete_queue(VirtQueue *vq)
2407 {
2408     vq->vring.num = 0;
2409     vq->vring.num_default = 0;
2410     vq->handle_output = NULL;
2411     vq->handle_aio_output = NULL;
2412     g_free(vq->used_elems);
2413     vq->used_elems = NULL;
2414     virtio_virtqueue_reset_region_cache(vq);
2415 }
2416 
2417 void virtio_del_queue(VirtIODevice *vdev, int n)
2418 {
2419     if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
2420         abort();
2421     }
2422 
2423     virtio_delete_queue(&vdev->vq[n]);
2424 }
2425 
2426 static void virtio_set_isr(VirtIODevice *vdev, int value)
2427 {
2428     uint8_t old = qatomic_read(&vdev->isr);
2429 
2430     /* Do not write ISR if it does not change, so that its cacheline remains
2431      * shared in the common case where the guest does not read it.
2432      */
2433     if ((old & value) != value) {
2434         qatomic_or(&vdev->isr, value);
2435     }
2436 }
2437 
2438 /* Called within rcu_read_lock(). */
2439 static bool virtio_split_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2440 {
2441     uint16_t old, new;
2442     bool v;
2443     /* We need to expose used array entries before checking used event. */
2444     smp_mb();
2445     /* Always notify when queue is empty (when feature acknowledge) */
2446     if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2447         !vq->inuse && virtio_queue_empty(vq)) {
2448         return true;
2449     }
2450 
2451     if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
2452         return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
2453     }
2454 
2455     v = vq->signalled_used_valid;
2456     vq->signalled_used_valid = true;
2457     old = vq->signalled_used;
2458     new = vq->signalled_used = vq->used_idx;
2459     return !v || vring_need_event(vring_get_used_event(vq), new, old);
2460 }
2461 
2462 static bool vring_packed_need_event(VirtQueue *vq, bool wrap,
2463                                     uint16_t off_wrap, uint16_t new,
2464                                     uint16_t old)
2465 {
2466     int off = off_wrap & ~(1 << 15);
2467 
2468     if (wrap != off_wrap >> 15) {
2469         off -= vq->vring.num;
2470     }
2471 
2472     return vring_need_event(off, new, old);
2473 }
2474 
2475 /* Called within rcu_read_lock(). */
2476 static bool virtio_packed_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2477 {
2478     VRingPackedDescEvent e;
2479     uint16_t old, new;
2480     bool v;
2481     VRingMemoryRegionCaches *caches;
2482 
2483     caches = vring_get_region_caches(vq);
2484     if (!caches) {
2485         return false;
2486     }
2487 
2488     vring_packed_event_read(vdev, &caches->avail, &e);
2489 
2490     old = vq->signalled_used;
2491     new = vq->signalled_used = vq->used_idx;
2492     v = vq->signalled_used_valid;
2493     vq->signalled_used_valid = true;
2494 
2495     if (e.flags == VRING_PACKED_EVENT_FLAG_DISABLE) {
2496         return false;
2497     } else if (e.flags == VRING_PACKED_EVENT_FLAG_ENABLE) {
2498         return true;
2499     }
2500 
2501     return !v || vring_packed_need_event(vq, vq->used_wrap_counter,
2502                                          e.off_wrap, new, old);
2503 }
2504 
2505 /* Called within rcu_read_lock().  */
2506 static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2507 {
2508     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
2509         return virtio_packed_should_notify(vdev, vq);
2510     } else {
2511         return virtio_split_should_notify(vdev, vq);
2512     }
2513 }
2514 
2515 void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
2516 {
2517     WITH_RCU_READ_LOCK_GUARD() {
2518         if (!virtio_should_notify(vdev, vq)) {
2519             return;
2520         }
2521     }
2522 
2523     trace_virtio_notify_irqfd(vdev, vq);
2524 
2525     /*
2526      * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
2527      * windows drivers included in virtio-win 1.8.0 (circa 2015) are
2528      * incorrectly polling this bit during crashdump and hibernation
2529      * in MSI mode, causing a hang if this bit is never updated.
2530      * Recent releases of Windows do not really shut down, but rather
2531      * log out and hibernate to make the next startup faster.  Hence,
2532      * this manifested as a more serious hang during shutdown with
2533      *
2534      * Next driver release from 2016 fixed this problem, so working around it
2535      * is not a must, but it's easy to do so let's do it here.
2536      *
2537      * Note: it's safe to update ISR from any thread as it was switched
2538      * to an atomic operation.
2539      */
2540     virtio_set_isr(vq->vdev, 0x1);
2541     event_notifier_set(&vq->guest_notifier);
2542 }
2543 
2544 static void virtio_irq(VirtQueue *vq)
2545 {
2546     virtio_set_isr(vq->vdev, 0x1);
2547     virtio_notify_vector(vq->vdev, vq->vector);
2548 }
2549 
2550 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
2551 {
2552     WITH_RCU_READ_LOCK_GUARD() {
2553         if (!virtio_should_notify(vdev, vq)) {
2554             return;
2555         }
2556     }
2557 
2558     trace_virtio_notify(vdev, vq);
2559     virtio_irq(vq);
2560 }
2561 
2562 void virtio_notify_config(VirtIODevice *vdev)
2563 {
2564     if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
2565         return;
2566 
2567     virtio_set_isr(vdev, 0x3);
2568     vdev->generation++;
2569     virtio_notify_vector(vdev, vdev->config_vector);
2570 }
2571 
2572 static bool virtio_device_endian_needed(void *opaque)
2573 {
2574     VirtIODevice *vdev = opaque;
2575 
2576     assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
2577     if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2578         return vdev->device_endian != virtio_default_endian();
2579     }
2580     /* Devices conforming to VIRTIO 1.0 or later are always LE. */
2581     return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
2582 }
2583 
2584 static bool virtio_64bit_features_needed(void *opaque)
2585 {
2586     VirtIODevice *vdev = opaque;
2587 
2588     return (vdev->host_features >> 32) != 0;
2589 }
2590 
2591 static bool virtio_virtqueue_needed(void *opaque)
2592 {
2593     VirtIODevice *vdev = opaque;
2594 
2595     return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
2596 }
2597 
2598 static bool virtio_packed_virtqueue_needed(void *opaque)
2599 {
2600     VirtIODevice *vdev = opaque;
2601 
2602     return virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED);
2603 }
2604 
2605 static bool virtio_ringsize_needed(void *opaque)
2606 {
2607     VirtIODevice *vdev = opaque;
2608     int i;
2609 
2610     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2611         if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
2612             return true;
2613         }
2614     }
2615     return false;
2616 }
2617 
2618 static bool virtio_extra_state_needed(void *opaque)
2619 {
2620     VirtIODevice *vdev = opaque;
2621     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2622     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2623 
2624     return k->has_extra_state &&
2625         k->has_extra_state(qbus->parent);
2626 }
2627 
2628 static bool virtio_broken_needed(void *opaque)
2629 {
2630     VirtIODevice *vdev = opaque;
2631 
2632     return vdev->broken;
2633 }
2634 
2635 static bool virtio_started_needed(void *opaque)
2636 {
2637     VirtIODevice *vdev = opaque;
2638 
2639     return vdev->started;
2640 }
2641 
2642 static bool virtio_disabled_needed(void *opaque)
2643 {
2644     VirtIODevice *vdev = opaque;
2645 
2646     return vdev->disabled;
2647 }
2648 
2649 static const VMStateDescription vmstate_virtqueue = {
2650     .name = "virtqueue_state",
2651     .version_id = 1,
2652     .minimum_version_id = 1,
2653     .fields = (VMStateField[]) {
2654         VMSTATE_UINT64(vring.avail, struct VirtQueue),
2655         VMSTATE_UINT64(vring.used, struct VirtQueue),
2656         VMSTATE_END_OF_LIST()
2657     }
2658 };
2659 
2660 static const VMStateDescription vmstate_packed_virtqueue = {
2661     .name = "packed_virtqueue_state",
2662     .version_id = 1,
2663     .minimum_version_id = 1,
2664     .fields = (VMStateField[]) {
2665         VMSTATE_UINT16(last_avail_idx, struct VirtQueue),
2666         VMSTATE_BOOL(last_avail_wrap_counter, struct VirtQueue),
2667         VMSTATE_UINT16(used_idx, struct VirtQueue),
2668         VMSTATE_BOOL(used_wrap_counter, struct VirtQueue),
2669         VMSTATE_UINT32(inuse, struct VirtQueue),
2670         VMSTATE_END_OF_LIST()
2671     }
2672 };
2673 
2674 static const VMStateDescription vmstate_virtio_virtqueues = {
2675     .name = "virtio/virtqueues",
2676     .version_id = 1,
2677     .minimum_version_id = 1,
2678     .needed = &virtio_virtqueue_needed,
2679     .fields = (VMStateField[]) {
2680         VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2681                       VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
2682         VMSTATE_END_OF_LIST()
2683     }
2684 };
2685 
2686 static const VMStateDescription vmstate_virtio_packed_virtqueues = {
2687     .name = "virtio/packed_virtqueues",
2688     .version_id = 1,
2689     .minimum_version_id = 1,
2690     .needed = &virtio_packed_virtqueue_needed,
2691     .fields = (VMStateField[]) {
2692         VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2693                       VIRTIO_QUEUE_MAX, 0, vmstate_packed_virtqueue, VirtQueue),
2694         VMSTATE_END_OF_LIST()
2695     }
2696 };
2697 
2698 static const VMStateDescription vmstate_ringsize = {
2699     .name = "ringsize_state",
2700     .version_id = 1,
2701     .minimum_version_id = 1,
2702     .fields = (VMStateField[]) {
2703         VMSTATE_UINT32(vring.num_default, struct VirtQueue),
2704         VMSTATE_END_OF_LIST()
2705     }
2706 };
2707 
2708 static const VMStateDescription vmstate_virtio_ringsize = {
2709     .name = "virtio/ringsize",
2710     .version_id = 1,
2711     .minimum_version_id = 1,
2712     .needed = &virtio_ringsize_needed,
2713     .fields = (VMStateField[]) {
2714         VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2715                       VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
2716         VMSTATE_END_OF_LIST()
2717     }
2718 };
2719 
2720 static int get_extra_state(QEMUFile *f, void *pv, size_t size,
2721                            const VMStateField *field)
2722 {
2723     VirtIODevice *vdev = pv;
2724     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2725     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2726 
2727     if (!k->load_extra_state) {
2728         return -1;
2729     } else {
2730         return k->load_extra_state(qbus->parent, f);
2731     }
2732 }
2733 
2734 static int put_extra_state(QEMUFile *f, void *pv, size_t size,
2735                            const VMStateField *field, JSONWriter *vmdesc)
2736 {
2737     VirtIODevice *vdev = pv;
2738     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2739     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2740 
2741     k->save_extra_state(qbus->parent, f);
2742     return 0;
2743 }
2744 
2745 static const VMStateInfo vmstate_info_extra_state = {
2746     .name = "virtqueue_extra_state",
2747     .get = get_extra_state,
2748     .put = put_extra_state,
2749 };
2750 
2751 static const VMStateDescription vmstate_virtio_extra_state = {
2752     .name = "virtio/extra_state",
2753     .version_id = 1,
2754     .minimum_version_id = 1,
2755     .needed = &virtio_extra_state_needed,
2756     .fields = (VMStateField[]) {
2757         {
2758             .name         = "extra_state",
2759             .version_id   = 0,
2760             .field_exists = NULL,
2761             .size         = 0,
2762             .info         = &vmstate_info_extra_state,
2763             .flags        = VMS_SINGLE,
2764             .offset       = 0,
2765         },
2766         VMSTATE_END_OF_LIST()
2767     }
2768 };
2769 
2770 static const VMStateDescription vmstate_virtio_device_endian = {
2771     .name = "virtio/device_endian",
2772     .version_id = 1,
2773     .minimum_version_id = 1,
2774     .needed = &virtio_device_endian_needed,
2775     .fields = (VMStateField[]) {
2776         VMSTATE_UINT8(device_endian, VirtIODevice),
2777         VMSTATE_END_OF_LIST()
2778     }
2779 };
2780 
2781 static const VMStateDescription vmstate_virtio_64bit_features = {
2782     .name = "virtio/64bit_features",
2783     .version_id = 1,
2784     .minimum_version_id = 1,
2785     .needed = &virtio_64bit_features_needed,
2786     .fields = (VMStateField[]) {
2787         VMSTATE_UINT64(guest_features, VirtIODevice),
2788         VMSTATE_END_OF_LIST()
2789     }
2790 };
2791 
2792 static const VMStateDescription vmstate_virtio_broken = {
2793     .name = "virtio/broken",
2794     .version_id = 1,
2795     .minimum_version_id = 1,
2796     .needed = &virtio_broken_needed,
2797     .fields = (VMStateField[]) {
2798         VMSTATE_BOOL(broken, VirtIODevice),
2799         VMSTATE_END_OF_LIST()
2800     }
2801 };
2802 
2803 static const VMStateDescription vmstate_virtio_started = {
2804     .name = "virtio/started",
2805     .version_id = 1,
2806     .minimum_version_id = 1,
2807     .needed = &virtio_started_needed,
2808     .fields = (VMStateField[]) {
2809         VMSTATE_BOOL(started, VirtIODevice),
2810         VMSTATE_END_OF_LIST()
2811     }
2812 };
2813 
2814 static const VMStateDescription vmstate_virtio_disabled = {
2815     .name = "virtio/disabled",
2816     .version_id = 1,
2817     .minimum_version_id = 1,
2818     .needed = &virtio_disabled_needed,
2819     .fields = (VMStateField[]) {
2820         VMSTATE_BOOL(disabled, VirtIODevice),
2821         VMSTATE_END_OF_LIST()
2822     }
2823 };
2824 
2825 static const VMStateDescription vmstate_virtio = {
2826     .name = "virtio",
2827     .version_id = 1,
2828     .minimum_version_id = 1,
2829     .minimum_version_id_old = 1,
2830     .fields = (VMStateField[]) {
2831         VMSTATE_END_OF_LIST()
2832     },
2833     .subsections = (const VMStateDescription*[]) {
2834         &vmstate_virtio_device_endian,
2835         &vmstate_virtio_64bit_features,
2836         &vmstate_virtio_virtqueues,
2837         &vmstate_virtio_ringsize,
2838         &vmstate_virtio_broken,
2839         &vmstate_virtio_extra_state,
2840         &vmstate_virtio_started,
2841         &vmstate_virtio_packed_virtqueues,
2842         &vmstate_virtio_disabled,
2843         NULL
2844     }
2845 };
2846 
2847 int virtio_save(VirtIODevice *vdev, QEMUFile *f)
2848 {
2849     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2850     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2851     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
2852     uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
2853     int i;
2854 
2855     if (k->save_config) {
2856         k->save_config(qbus->parent, f);
2857     }
2858 
2859     qemu_put_8s(f, &vdev->status);
2860     qemu_put_8s(f, &vdev->isr);
2861     qemu_put_be16s(f, &vdev->queue_sel);
2862     qemu_put_be32s(f, &guest_features_lo);
2863     qemu_put_be32(f, vdev->config_len);
2864     qemu_put_buffer(f, vdev->config, vdev->config_len);
2865 
2866     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2867         if (vdev->vq[i].vring.num == 0)
2868             break;
2869     }
2870 
2871     qemu_put_be32(f, i);
2872 
2873     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2874         if (vdev->vq[i].vring.num == 0)
2875             break;
2876 
2877         qemu_put_be32(f, vdev->vq[i].vring.num);
2878         if (k->has_variable_vring_alignment) {
2879             qemu_put_be32(f, vdev->vq[i].vring.align);
2880         }
2881         /*
2882          * Save desc now, the rest of the ring addresses are saved in
2883          * subsections for VIRTIO-1 devices.
2884          */
2885         qemu_put_be64(f, vdev->vq[i].vring.desc);
2886         qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
2887         if (k->save_queue) {
2888             k->save_queue(qbus->parent, i, f);
2889         }
2890     }
2891 
2892     if (vdc->save != NULL) {
2893         vdc->save(vdev, f);
2894     }
2895 
2896     if (vdc->vmsd) {
2897         int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL);
2898         if (ret) {
2899             return ret;
2900         }
2901     }
2902 
2903     /* Subsections */
2904     return vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
2905 }
2906 
2907 /* A wrapper for use as a VMState .put function */
2908 static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
2909                               const VMStateField *field, JSONWriter *vmdesc)
2910 {
2911     return virtio_save(VIRTIO_DEVICE(opaque), f);
2912 }
2913 
2914 /* A wrapper for use as a VMState .get function */
2915 static int virtio_device_get(QEMUFile *f, void *opaque, size_t size,
2916                              const VMStateField *field)
2917 {
2918     VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
2919     DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
2920 
2921     return virtio_load(vdev, f, dc->vmsd->version_id);
2922 }
2923 
2924 const VMStateInfo  virtio_vmstate_info = {
2925     .name = "virtio",
2926     .get = virtio_device_get,
2927     .put = virtio_device_put,
2928 };
2929 
2930 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
2931 {
2932     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2933     bool bad = (val & ~(vdev->host_features)) != 0;
2934 
2935     val &= vdev->host_features;
2936     if (k->set_features) {
2937         k->set_features(vdev, val);
2938     }
2939     vdev->guest_features = val;
2940     return bad ? -1 : 0;
2941 }
2942 
2943 int virtio_set_features(VirtIODevice *vdev, uint64_t val)
2944 {
2945     int ret;
2946     /*
2947      * The driver must not attempt to set features after feature negotiation
2948      * has finished.
2949      */
2950     if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
2951         return -EINVAL;
2952     }
2953     ret = virtio_set_features_nocheck(vdev, val);
2954     if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
2955         /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches.  */
2956         int i;
2957         for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2958             if (vdev->vq[i].vring.num != 0) {
2959                 virtio_init_region_cache(vdev, i);
2960             }
2961         }
2962     }
2963     if (!ret) {
2964         if (!virtio_device_started(vdev, vdev->status) &&
2965             !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2966             vdev->start_on_kick = true;
2967         }
2968     }
2969     return ret;
2970 }
2971 
2972 size_t virtio_feature_get_config_size(const VirtIOFeature *feature_sizes,
2973                                       uint64_t host_features)
2974 {
2975     size_t config_size = 0;
2976     int i;
2977 
2978     for (i = 0; feature_sizes[i].flags != 0; i++) {
2979         if (host_features & feature_sizes[i].flags) {
2980             config_size = MAX(feature_sizes[i].end, config_size);
2981         }
2982     }
2983 
2984     return config_size;
2985 }
2986 
2987 int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
2988 {
2989     int i, ret;
2990     int32_t config_len;
2991     uint32_t num;
2992     uint32_t features;
2993     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2994     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2995     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
2996 
2997     /*
2998      * We poison the endianness to ensure it does not get used before
2999      * subsections have been loaded.
3000      */
3001     vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
3002 
3003     if (k->load_config) {
3004         ret = k->load_config(qbus->parent, f);
3005         if (ret)
3006             return ret;
3007     }
3008 
3009     qemu_get_8s(f, &vdev->status);
3010     qemu_get_8s(f, &vdev->isr);
3011     qemu_get_be16s(f, &vdev->queue_sel);
3012     if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
3013         return -1;
3014     }
3015     qemu_get_be32s(f, &features);
3016 
3017     /*
3018      * Temporarily set guest_features low bits - needed by
3019      * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
3020      * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
3021      *
3022      * Note: devices should always test host features in future - don't create
3023      * new dependencies like this.
3024      */
3025     vdev->guest_features = features;
3026 
3027     config_len = qemu_get_be32(f);
3028 
3029     /*
3030      * There are cases where the incoming config can be bigger or smaller
3031      * than what we have; so load what we have space for, and skip
3032      * any excess that's in the stream.
3033      */
3034     qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
3035 
3036     while (config_len > vdev->config_len) {
3037         qemu_get_byte(f);
3038         config_len--;
3039     }
3040 
3041     num = qemu_get_be32(f);
3042 
3043     if (num > VIRTIO_QUEUE_MAX) {
3044         error_report("Invalid number of virtqueues: 0x%x", num);
3045         return -1;
3046     }
3047 
3048     for (i = 0; i < num; i++) {
3049         vdev->vq[i].vring.num = qemu_get_be32(f);
3050         if (k->has_variable_vring_alignment) {
3051             vdev->vq[i].vring.align = qemu_get_be32(f);
3052         }
3053         vdev->vq[i].vring.desc = qemu_get_be64(f);
3054         qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
3055         vdev->vq[i].signalled_used_valid = false;
3056         vdev->vq[i].notification = true;
3057 
3058         if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) {
3059             error_report("VQ %d address 0x0 "
3060                          "inconsistent with Host index 0x%x",
3061                          i, vdev->vq[i].last_avail_idx);
3062             return -1;
3063         }
3064         if (k->load_queue) {
3065             ret = k->load_queue(qbus->parent, i, f);
3066             if (ret)
3067                 return ret;
3068         }
3069     }
3070 
3071     virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
3072 
3073     if (vdc->load != NULL) {
3074         ret = vdc->load(vdev, f, version_id);
3075         if (ret) {
3076             return ret;
3077         }
3078     }
3079 
3080     if (vdc->vmsd) {
3081         ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
3082         if (ret) {
3083             return ret;
3084         }
3085     }
3086 
3087     /* Subsections */
3088     ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
3089     if (ret) {
3090         return ret;
3091     }
3092 
3093     if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
3094         vdev->device_endian = virtio_default_endian();
3095     }
3096 
3097     if (virtio_64bit_features_needed(vdev)) {
3098         /*
3099          * Subsection load filled vdev->guest_features.  Run them
3100          * through virtio_set_features to sanity-check them against
3101          * host_features.
3102          */
3103         uint64_t features64 = vdev->guest_features;
3104         if (virtio_set_features_nocheck(vdev, features64) < 0) {
3105             error_report("Features 0x%" PRIx64 " unsupported. "
3106                          "Allowed features: 0x%" PRIx64,
3107                          features64, vdev->host_features);
3108             return -1;
3109         }
3110     } else {
3111         if (virtio_set_features_nocheck(vdev, features) < 0) {
3112             error_report("Features 0x%x unsupported. "
3113                          "Allowed features: 0x%" PRIx64,
3114                          features, vdev->host_features);
3115             return -1;
3116         }
3117     }
3118 
3119     if (!virtio_device_started(vdev, vdev->status) &&
3120         !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3121         vdev->start_on_kick = true;
3122     }
3123 
3124     RCU_READ_LOCK_GUARD();
3125     for (i = 0; i < num; i++) {
3126         if (vdev->vq[i].vring.desc) {
3127             uint16_t nheads;
3128 
3129             /*
3130              * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
3131              * only the region cache needs to be set up.  Legacy devices need
3132              * to calculate used and avail ring addresses based on the desc
3133              * address.
3134              */
3135             if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3136                 virtio_init_region_cache(vdev, i);
3137             } else {
3138                 virtio_queue_update_rings(vdev, i);
3139             }
3140 
3141             if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3142                 vdev->vq[i].shadow_avail_idx = vdev->vq[i].last_avail_idx;
3143                 vdev->vq[i].shadow_avail_wrap_counter =
3144                                         vdev->vq[i].last_avail_wrap_counter;
3145                 continue;
3146             }
3147 
3148             nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
3149             /* Check it isn't doing strange things with descriptor numbers. */
3150             if (nheads > vdev->vq[i].vring.num) {
3151                 virtio_error(vdev, "VQ %d size 0x%x Guest index 0x%x "
3152                              "inconsistent with Host index 0x%x: delta 0x%x",
3153                              i, vdev->vq[i].vring.num,
3154                              vring_avail_idx(&vdev->vq[i]),
3155                              vdev->vq[i].last_avail_idx, nheads);
3156                 vdev->vq[i].used_idx = 0;
3157                 vdev->vq[i].shadow_avail_idx = 0;
3158                 vdev->vq[i].inuse = 0;
3159                 continue;
3160             }
3161             vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
3162             vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
3163 
3164             /*
3165              * Some devices migrate VirtQueueElements that have been popped
3166              * from the avail ring but not yet returned to the used ring.
3167              * Since max ring size < UINT16_MAX it's safe to use modulo
3168              * UINT16_MAX + 1 subtraction.
3169              */
3170             vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
3171                                 vdev->vq[i].used_idx);
3172             if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
3173                 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
3174                              "used_idx 0x%x",
3175                              i, vdev->vq[i].vring.num,
3176                              vdev->vq[i].last_avail_idx,
3177                              vdev->vq[i].used_idx);
3178                 return -1;
3179             }
3180         }
3181     }
3182 
3183     if (vdc->post_load) {
3184         ret = vdc->post_load(vdev);
3185         if (ret) {
3186             return ret;
3187         }
3188     }
3189 
3190     return 0;
3191 }
3192 
3193 void virtio_cleanup(VirtIODevice *vdev)
3194 {
3195     qemu_del_vm_change_state_handler(vdev->vmstate);
3196 }
3197 
3198 static void virtio_vmstate_change(void *opaque, bool running, RunState state)
3199 {
3200     VirtIODevice *vdev = opaque;
3201     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3202     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3203     bool backend_run = running && virtio_device_started(vdev, vdev->status);
3204     vdev->vm_running = running;
3205 
3206     if (backend_run) {
3207         virtio_set_status(vdev, vdev->status);
3208     }
3209 
3210     if (k->vmstate_change) {
3211         k->vmstate_change(qbus->parent, backend_run);
3212     }
3213 
3214     if (!backend_run) {
3215         virtio_set_status(vdev, vdev->status);
3216     }
3217 }
3218 
3219 void virtio_instance_init_common(Object *proxy_obj, void *data,
3220                                  size_t vdev_size, const char *vdev_name)
3221 {
3222     DeviceState *vdev = data;
3223 
3224     object_initialize_child_with_props(proxy_obj, "virtio-backend", vdev,
3225                                        vdev_size, vdev_name, &error_abort,
3226                                        NULL);
3227     qdev_alias_all_properties(vdev, proxy_obj);
3228 }
3229 
3230 void virtio_init(VirtIODevice *vdev, const char *name,
3231                  uint16_t device_id, size_t config_size)
3232 {
3233     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3234     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3235     int i;
3236     int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
3237 
3238     if (nvectors) {
3239         vdev->vector_queues =
3240             g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
3241     }
3242 
3243     vdev->start_on_kick = false;
3244     vdev->started = false;
3245     vdev->device_id = device_id;
3246     vdev->status = 0;
3247     qatomic_set(&vdev->isr, 0);
3248     vdev->queue_sel = 0;
3249     vdev->config_vector = VIRTIO_NO_VECTOR;
3250     vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
3251     vdev->vm_running = runstate_is_running();
3252     vdev->broken = false;
3253     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3254         vdev->vq[i].vector = VIRTIO_NO_VECTOR;
3255         vdev->vq[i].vdev = vdev;
3256         vdev->vq[i].queue_index = i;
3257         vdev->vq[i].host_notifier_enabled = false;
3258     }
3259 
3260     vdev->name = name;
3261     vdev->config_len = config_size;
3262     if (vdev->config_len) {
3263         vdev->config = g_malloc0(config_size);
3264     } else {
3265         vdev->config = NULL;
3266     }
3267     vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev),
3268             virtio_vmstate_change, vdev);
3269     vdev->device_endian = virtio_default_endian();
3270     vdev->use_guest_notifier_mask = true;
3271 }
3272 
3273 /*
3274  * Only devices that have already been around prior to defining the virtio
3275  * standard support legacy mode; this includes devices not specified in the
3276  * standard. All newer devices conform to the virtio standard only.
3277  */
3278 bool virtio_legacy_allowed(VirtIODevice *vdev)
3279 {
3280     switch (vdev->device_id) {
3281     case VIRTIO_ID_NET:
3282     case VIRTIO_ID_BLOCK:
3283     case VIRTIO_ID_CONSOLE:
3284     case VIRTIO_ID_RNG:
3285     case VIRTIO_ID_BALLOON:
3286     case VIRTIO_ID_RPMSG:
3287     case VIRTIO_ID_SCSI:
3288     case VIRTIO_ID_9P:
3289     case VIRTIO_ID_RPROC_SERIAL:
3290     case VIRTIO_ID_CAIF:
3291         return true;
3292     default:
3293         return false;
3294     }
3295 }
3296 
3297 bool virtio_legacy_check_disabled(VirtIODevice *vdev)
3298 {
3299     return vdev->disable_legacy_check;
3300 }
3301 
3302 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
3303 {
3304     return vdev->vq[n].vring.desc;
3305 }
3306 
3307 bool virtio_queue_enabled_legacy(VirtIODevice *vdev, int n)
3308 {
3309     return virtio_queue_get_desc_addr(vdev, n) != 0;
3310 }
3311 
3312 bool virtio_queue_enabled(VirtIODevice *vdev, int n)
3313 {
3314     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3315     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3316 
3317     if (k->queue_enabled) {
3318         return k->queue_enabled(qbus->parent, n);
3319     }
3320     return virtio_queue_enabled_legacy(vdev, n);
3321 }
3322 
3323 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
3324 {
3325     return vdev->vq[n].vring.avail;
3326 }
3327 
3328 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
3329 {
3330     return vdev->vq[n].vring.used;
3331 }
3332 
3333 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
3334 {
3335     return sizeof(VRingDesc) * vdev->vq[n].vring.num;
3336 }
3337 
3338 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
3339 {
3340     int s;
3341 
3342     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3343         return sizeof(struct VRingPackedDescEvent);
3344     }
3345 
3346     s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
3347     return offsetof(VRingAvail, ring) +
3348         sizeof(uint16_t) * vdev->vq[n].vring.num + s;
3349 }
3350 
3351 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
3352 {
3353     int s;
3354 
3355     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3356         return sizeof(struct VRingPackedDescEvent);
3357     }
3358 
3359     s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
3360     return offsetof(VRingUsed, ring) +
3361         sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s;
3362 }
3363 
3364 static unsigned int virtio_queue_packed_get_last_avail_idx(VirtIODevice *vdev,
3365                                                            int n)
3366 {
3367     unsigned int avail, used;
3368 
3369     avail = vdev->vq[n].last_avail_idx;
3370     avail |= ((uint16_t)vdev->vq[n].last_avail_wrap_counter) << 15;
3371 
3372     used = vdev->vq[n].used_idx;
3373     used |= ((uint16_t)vdev->vq[n].used_wrap_counter) << 15;
3374 
3375     return avail | used << 16;
3376 }
3377 
3378 static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice *vdev,
3379                                                       int n)
3380 {
3381     return vdev->vq[n].last_avail_idx;
3382 }
3383 
3384 unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
3385 {
3386     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3387         return virtio_queue_packed_get_last_avail_idx(vdev, n);
3388     } else {
3389         return virtio_queue_split_get_last_avail_idx(vdev, n);
3390     }
3391 }
3392 
3393 static void virtio_queue_packed_set_last_avail_idx(VirtIODevice *vdev,
3394                                                    int n, unsigned int idx)
3395 {
3396     struct VirtQueue *vq = &vdev->vq[n];
3397 
3398     vq->last_avail_idx = vq->shadow_avail_idx = idx & 0x7fff;
3399     vq->last_avail_wrap_counter =
3400         vq->shadow_avail_wrap_counter = !!(idx & 0x8000);
3401     idx >>= 16;
3402     vq->used_idx = idx & 0x7ffff;
3403     vq->used_wrap_counter = !!(idx & 0x8000);
3404 }
3405 
3406 static void virtio_queue_split_set_last_avail_idx(VirtIODevice *vdev,
3407                                                   int n, unsigned int idx)
3408 {
3409         vdev->vq[n].last_avail_idx = idx;
3410         vdev->vq[n].shadow_avail_idx = idx;
3411 }
3412 
3413 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n,
3414                                      unsigned int idx)
3415 {
3416     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3417         virtio_queue_packed_set_last_avail_idx(vdev, n, idx);
3418     } else {
3419         virtio_queue_split_set_last_avail_idx(vdev, n, idx);
3420     }
3421 }
3422 
3423 static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice *vdev,
3424                                                        int n)
3425 {
3426     /* We don't have a reference like avail idx in shared memory */
3427     return;
3428 }
3429 
3430 static void virtio_queue_split_restore_last_avail_idx(VirtIODevice *vdev,
3431                                                       int n)
3432 {
3433     RCU_READ_LOCK_GUARD();
3434     if (vdev->vq[n].vring.desc) {
3435         vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
3436         vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx;
3437     }
3438 }
3439 
3440 void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
3441 {
3442     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3443         virtio_queue_packed_restore_last_avail_idx(vdev, n);
3444     } else {
3445         virtio_queue_split_restore_last_avail_idx(vdev, n);
3446     }
3447 }
3448 
3449 static void virtio_queue_packed_update_used_idx(VirtIODevice *vdev, int n)
3450 {
3451     /* used idx was updated through set_last_avail_idx() */
3452     return;
3453 }
3454 
3455 static void virtio_split_packed_update_used_idx(VirtIODevice *vdev, int n)
3456 {
3457     RCU_READ_LOCK_GUARD();
3458     if (vdev->vq[n].vring.desc) {
3459         vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
3460     }
3461 }
3462 
3463 void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
3464 {
3465     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3466         return virtio_queue_packed_update_used_idx(vdev, n);
3467     } else {
3468         return virtio_split_packed_update_used_idx(vdev, n);
3469     }
3470 }
3471 
3472 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
3473 {
3474     vdev->vq[n].signalled_used_valid = false;
3475 }
3476 
3477 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
3478 {
3479     return vdev->vq + n;
3480 }
3481 
3482 uint16_t virtio_get_queue_index(VirtQueue *vq)
3483 {
3484     return vq->queue_index;
3485 }
3486 
3487 static void virtio_queue_guest_notifier_read(EventNotifier *n)
3488 {
3489     VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
3490     if (event_notifier_test_and_clear(n)) {
3491         virtio_irq(vq);
3492     }
3493 }
3494 
3495 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
3496                                                 bool with_irqfd)
3497 {
3498     if (assign && !with_irqfd) {
3499         event_notifier_set_handler(&vq->guest_notifier,
3500                                    virtio_queue_guest_notifier_read);
3501     } else {
3502         event_notifier_set_handler(&vq->guest_notifier, NULL);
3503     }
3504     if (!assign) {
3505         /* Test and clear notifier before closing it,
3506          * in case poll callback didn't have time to run. */
3507         virtio_queue_guest_notifier_read(&vq->guest_notifier);
3508     }
3509 }
3510 
3511 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
3512 {
3513     return &vq->guest_notifier;
3514 }
3515 
3516 static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
3517 {
3518     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3519     if (event_notifier_test_and_clear(n)) {
3520         virtio_queue_notify_aio_vq(vq);
3521     }
3522 }
3523 
3524 static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
3525 {
3526     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3527 
3528     virtio_queue_set_notification(vq, 0);
3529 }
3530 
3531 static bool virtio_queue_host_notifier_aio_poll(void *opaque)
3532 {
3533     EventNotifier *n = opaque;
3534     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3535 
3536     if (!vq->vring.desc || virtio_queue_empty(vq)) {
3537         return false;
3538     }
3539 
3540     return virtio_queue_notify_aio_vq(vq);
3541 }
3542 
3543 static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
3544 {
3545     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3546 
3547     /* Caller polls once more after this to catch requests that race with us */
3548     virtio_queue_set_notification(vq, 1);
3549 }
3550 
3551 void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
3552                                                 VirtIOHandleAIOOutput handle_output)
3553 {
3554     if (handle_output) {
3555         vq->handle_aio_output = handle_output;
3556         aio_set_event_notifier(ctx, &vq->host_notifier, true,
3557                                virtio_queue_host_notifier_aio_read,
3558                                virtio_queue_host_notifier_aio_poll);
3559         aio_set_event_notifier_poll(ctx, &vq->host_notifier,
3560                                     virtio_queue_host_notifier_aio_poll_begin,
3561                                     virtio_queue_host_notifier_aio_poll_end);
3562     } else {
3563         aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL);
3564         /* Test and clear notifier before after disabling event,
3565          * in case poll callback didn't have time to run. */
3566         virtio_queue_host_notifier_aio_read(&vq->host_notifier);
3567         vq->handle_aio_output = NULL;
3568     }
3569 }
3570 
3571 void virtio_queue_host_notifier_read(EventNotifier *n)
3572 {
3573     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3574     if (event_notifier_test_and_clear(n)) {
3575         virtio_queue_notify_vq(vq);
3576     }
3577 }
3578 
3579 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
3580 {
3581     return &vq->host_notifier;
3582 }
3583 
3584 void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled)
3585 {
3586     vq->host_notifier_enabled = enabled;
3587 }
3588 
3589 int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n,
3590                                       MemoryRegion *mr, bool assign)
3591 {
3592     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3593     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3594 
3595     if (k->set_host_notifier_mr) {
3596         return k->set_host_notifier_mr(qbus->parent, n, mr, assign);
3597     }
3598 
3599     return -1;
3600 }
3601 
3602 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
3603 {
3604     g_free(vdev->bus_name);
3605     vdev->bus_name = g_strdup(bus_name);
3606 }
3607 
3608 void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
3609 {
3610     va_list ap;
3611 
3612     va_start(ap, fmt);
3613     error_vreport(fmt, ap);
3614     va_end(ap);
3615 
3616     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3617         vdev->status = vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET;
3618         virtio_notify_config(vdev);
3619     }
3620 
3621     vdev->broken = true;
3622 }
3623 
3624 static void virtio_memory_listener_commit(MemoryListener *listener)
3625 {
3626     VirtIODevice *vdev = container_of(listener, VirtIODevice, listener);
3627     int i;
3628 
3629     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3630         if (vdev->vq[i].vring.num == 0) {
3631             break;
3632         }
3633         virtio_init_region_cache(vdev, i);
3634     }
3635 }
3636 
3637 static void virtio_device_realize(DeviceState *dev, Error **errp)
3638 {
3639     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3640     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
3641     Error *err = NULL;
3642 
3643     /* Devices should either use vmsd or the load/save methods */
3644     assert(!vdc->vmsd || !vdc->load);
3645 
3646     if (vdc->realize != NULL) {
3647         vdc->realize(dev, &err);
3648         if (err != NULL) {
3649             error_propagate(errp, err);
3650             return;
3651         }
3652     }
3653 
3654     virtio_bus_device_plugged(vdev, &err);
3655     if (err != NULL) {
3656         error_propagate(errp, err);
3657         vdc->unrealize(dev);
3658         return;
3659     }
3660 
3661     vdev->listener.commit = virtio_memory_listener_commit;
3662     vdev->listener.name = "virtio";
3663     memory_listener_register(&vdev->listener, vdev->dma_as);
3664 }
3665 
3666 static void virtio_device_unrealize(DeviceState *dev)
3667 {
3668     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3669     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
3670 
3671     memory_listener_unregister(&vdev->listener);
3672     virtio_bus_device_unplugged(vdev);
3673 
3674     if (vdc->unrealize != NULL) {
3675         vdc->unrealize(dev);
3676     }
3677 
3678     g_free(vdev->bus_name);
3679     vdev->bus_name = NULL;
3680 }
3681 
3682 static void virtio_device_free_virtqueues(VirtIODevice *vdev)
3683 {
3684     int i;
3685     if (!vdev->vq) {
3686         return;
3687     }
3688 
3689     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3690         if (vdev->vq[i].vring.num == 0) {
3691             break;
3692         }
3693         virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
3694     }
3695     g_free(vdev->vq);
3696 }
3697 
3698 static void virtio_device_instance_finalize(Object *obj)
3699 {
3700     VirtIODevice *vdev = VIRTIO_DEVICE(obj);
3701 
3702     virtio_device_free_virtqueues(vdev);
3703 
3704     g_free(vdev->config);
3705     g_free(vdev->vector_queues);
3706 }
3707 
3708 static Property virtio_properties[] = {
3709     DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
3710     DEFINE_PROP_BOOL("use-started", VirtIODevice, use_started, true),
3711     DEFINE_PROP_BOOL("use-disabled-flag", VirtIODevice, use_disabled_flag, true),
3712     DEFINE_PROP_BOOL("x-disable-legacy-check", VirtIODevice,
3713                      disable_legacy_check, false),
3714     DEFINE_PROP_END_OF_LIST(),
3715 };
3716 
3717 static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
3718 {
3719     VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
3720     int i, n, r, err;
3721 
3722     /*
3723      * Batch all the host notifiers in a single transaction to avoid
3724      * quadratic time complexity in address_space_update_ioeventfds().
3725      */
3726     memory_region_transaction_begin();
3727     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3728         VirtQueue *vq = &vdev->vq[n];
3729         if (!virtio_queue_get_num(vdev, n)) {
3730             continue;
3731         }
3732         r = virtio_bus_set_host_notifier(qbus, n, true);
3733         if (r < 0) {
3734             err = r;
3735             goto assign_error;
3736         }
3737         event_notifier_set_handler(&vq->host_notifier,
3738                                    virtio_queue_host_notifier_read);
3739     }
3740 
3741     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3742         /* Kick right away to begin processing requests already in vring */
3743         VirtQueue *vq = &vdev->vq[n];
3744         if (!vq->vring.num) {
3745             continue;
3746         }
3747         event_notifier_set(&vq->host_notifier);
3748     }
3749     memory_region_transaction_commit();
3750     return 0;
3751 
3752 assign_error:
3753     i = n; /* save n for a second iteration after transaction is committed. */
3754     while (--n >= 0) {
3755         VirtQueue *vq = &vdev->vq[n];
3756         if (!virtio_queue_get_num(vdev, n)) {
3757             continue;
3758         }
3759 
3760         event_notifier_set_handler(&vq->host_notifier, NULL);
3761         r = virtio_bus_set_host_notifier(qbus, n, false);
3762         assert(r >= 0);
3763     }
3764     /*
3765      * The transaction expects the ioeventfds to be open when it
3766      * commits. Do it now, before the cleanup loop.
3767      */
3768     memory_region_transaction_commit();
3769 
3770     while (--i >= 0) {
3771         if (!virtio_queue_get_num(vdev, i)) {
3772             continue;
3773         }
3774         virtio_bus_cleanup_host_notifier(qbus, i);
3775     }
3776     return err;
3777 }
3778 
3779 int virtio_device_start_ioeventfd(VirtIODevice *vdev)
3780 {
3781     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3782     VirtioBusState *vbus = VIRTIO_BUS(qbus);
3783 
3784     return virtio_bus_start_ioeventfd(vbus);
3785 }
3786 
3787 static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
3788 {
3789     VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
3790     int n, r;
3791 
3792     /*
3793      * Batch all the host notifiers in a single transaction to avoid
3794      * quadratic time complexity in address_space_update_ioeventfds().
3795      */
3796     memory_region_transaction_begin();
3797     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3798         VirtQueue *vq = &vdev->vq[n];
3799 
3800         if (!virtio_queue_get_num(vdev, n)) {
3801             continue;
3802         }
3803         event_notifier_set_handler(&vq->host_notifier, NULL);
3804         r = virtio_bus_set_host_notifier(qbus, n, false);
3805         assert(r >= 0);
3806     }
3807     /*
3808      * The transaction expects the ioeventfds to be open when it
3809      * commits. Do it now, before the cleanup loop.
3810      */
3811     memory_region_transaction_commit();
3812 
3813     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3814         if (!virtio_queue_get_num(vdev, n)) {
3815             continue;
3816         }
3817         virtio_bus_cleanup_host_notifier(qbus, n);
3818     }
3819 }
3820 
3821 int virtio_device_grab_ioeventfd(VirtIODevice *vdev)
3822 {
3823     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3824     VirtioBusState *vbus = VIRTIO_BUS(qbus);
3825 
3826     return virtio_bus_grab_ioeventfd(vbus);
3827 }
3828 
3829 void virtio_device_release_ioeventfd(VirtIODevice *vdev)
3830 {
3831     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3832     VirtioBusState *vbus = VIRTIO_BUS(qbus);
3833 
3834     virtio_bus_release_ioeventfd(vbus);
3835 }
3836 
3837 static void virtio_device_class_init(ObjectClass *klass, void *data)
3838 {
3839     /* Set the default value here. */
3840     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
3841     DeviceClass *dc = DEVICE_CLASS(klass);
3842 
3843     dc->realize = virtio_device_realize;
3844     dc->unrealize = virtio_device_unrealize;
3845     dc->bus_type = TYPE_VIRTIO_BUS;
3846     device_class_set_props(dc, virtio_properties);
3847     vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
3848     vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
3849 
3850     vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
3851 }
3852 
3853 bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
3854 {
3855     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3856     VirtioBusState *vbus = VIRTIO_BUS(qbus);
3857 
3858     return virtio_bus_ioeventfd_enabled(vbus);
3859 }
3860 
3861 static const TypeInfo virtio_device_info = {
3862     .name = TYPE_VIRTIO_DEVICE,
3863     .parent = TYPE_DEVICE,
3864     .instance_size = sizeof(VirtIODevice),
3865     .class_init = virtio_device_class_init,
3866     .instance_finalize = virtio_device_instance_finalize,
3867     .abstract = true,
3868     .class_size = sizeof(VirtioDeviceClass),
3869 };
3870 
3871 static void virtio_register_types(void)
3872 {
3873     type_register_static(&virtio_device_info);
3874 }
3875 
3876 type_init(virtio_register_types)
3877