xref: /openbmc/qemu/hw/virtio/virtio.c (revision 7d87775f)
1 /*
2  * Virtio Support
3  *
4  * Copyright IBM, Corp. 2007
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qapi/qapi-commands-virtio.h"
17 #include "trace.h"
18 #include "qemu/defer-call.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/main-loop.h"
22 #include "qemu/module.h"
23 #include "exec/tswap.h"
24 #include "qom/object_interfaces.h"
25 #include "hw/core/cpu.h"
26 #include "hw/virtio/virtio.h"
27 #include "hw/virtio/vhost.h"
28 #include "migration/qemu-file-types.h"
29 #include "qemu/atomic.h"
30 #include "hw/virtio/virtio-bus.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/virtio/virtio-access.h"
33 #include "sysemu/dma.h"
34 #include "sysemu/runstate.h"
35 #include "virtio-qmp.h"
36 
37 #include "standard-headers/linux/virtio_ids.h"
38 #include "standard-headers/linux/vhost_types.h"
39 #include "standard-headers/linux/virtio_blk.h"
40 #include "standard-headers/linux/virtio_console.h"
41 #include "standard-headers/linux/virtio_gpu.h"
42 #include "standard-headers/linux/virtio_net.h"
43 #include "standard-headers/linux/virtio_scsi.h"
44 #include "standard-headers/linux/virtio_i2c.h"
45 #include "standard-headers/linux/virtio_balloon.h"
46 #include "standard-headers/linux/virtio_iommu.h"
47 #include "standard-headers/linux/virtio_mem.h"
48 #include "standard-headers/linux/virtio_vsock.h"
49 
50 /*
51  * Maximum size of virtio device config space
52  */
53 #define VHOST_USER_MAX_CONFIG_SIZE 256
54 
55 /*
56  * The alignment to use between consumer and producer parts of vring.
57  * x86 pagesize again. This is the default, used by transports like PCI
58  * which don't provide a means for the guest to tell the host the alignment.
59  */
60 #define VIRTIO_PCI_VRING_ALIGN         4096
61 
62 typedef struct VRingDesc
63 {
64     uint64_t addr;
65     uint32_t len;
66     uint16_t flags;
67     uint16_t next;
68 } VRingDesc;
69 
70 typedef struct VRingPackedDesc {
71     uint64_t addr;
72     uint32_t len;
73     uint16_t id;
74     uint16_t flags;
75 } VRingPackedDesc;
76 
77 typedef struct VRingAvail
78 {
79     uint16_t flags;
80     uint16_t idx;
81     uint16_t ring[];
82 } VRingAvail;
83 
84 typedef struct VRingUsedElem
85 {
86     uint32_t id;
87     uint32_t len;
88 } VRingUsedElem;
89 
90 typedef struct VRingUsed
91 {
92     uint16_t flags;
93     uint16_t idx;
94     VRingUsedElem ring[];
95 } VRingUsed;
96 
97 typedef struct VRingMemoryRegionCaches {
98     struct rcu_head rcu;
99     MemoryRegionCache desc;
100     MemoryRegionCache avail;
101     MemoryRegionCache used;
102 } VRingMemoryRegionCaches;
103 
104 typedef struct VRing
105 {
106     unsigned int num;
107     unsigned int num_default;
108     unsigned int align;
109     hwaddr desc;
110     hwaddr avail;
111     hwaddr used;
112     VRingMemoryRegionCaches *caches;
113 } VRing;
114 
115 typedef struct VRingPackedDescEvent {
116     uint16_t off_wrap;
117     uint16_t flags;
118 } VRingPackedDescEvent ;
119 
120 struct VirtQueue
121 {
122     VRing vring;
123     VirtQueueElement *used_elems;
124 
125     /* Next head to pop */
126     uint16_t last_avail_idx;
127     bool last_avail_wrap_counter;
128 
129     /* Last avail_idx read from VQ. */
130     uint16_t shadow_avail_idx;
131     bool shadow_avail_wrap_counter;
132 
133     uint16_t used_idx;
134     bool used_wrap_counter;
135 
136     /* Last used index value we have signalled on */
137     uint16_t signalled_used;
138 
139     /* Last used index value we have signalled on */
140     bool signalled_used_valid;
141 
142     /* Notification enabled? */
143     bool notification;
144 
145     uint16_t queue_index;
146 
147     unsigned int inuse;
148 
149     uint16_t vector;
150     VirtIOHandleOutput handle_output;
151     VirtIODevice *vdev;
152     EventNotifier guest_notifier;
153     EventNotifier host_notifier;
154     bool host_notifier_enabled;
155     QLIST_ENTRY(VirtQueue) node;
156 };
157 
158 const char *virtio_device_names[] = {
159     [VIRTIO_ID_NET] = "virtio-net",
160     [VIRTIO_ID_BLOCK] = "virtio-blk",
161     [VIRTIO_ID_CONSOLE] = "virtio-serial",
162     [VIRTIO_ID_RNG] = "virtio-rng",
163     [VIRTIO_ID_BALLOON] = "virtio-balloon",
164     [VIRTIO_ID_IOMEM] = "virtio-iomem",
165     [VIRTIO_ID_RPMSG] = "virtio-rpmsg",
166     [VIRTIO_ID_SCSI] = "virtio-scsi",
167     [VIRTIO_ID_9P] = "virtio-9p",
168     [VIRTIO_ID_MAC80211_WLAN] = "virtio-mac-wlan",
169     [VIRTIO_ID_RPROC_SERIAL] = "virtio-rproc-serial",
170     [VIRTIO_ID_CAIF] = "virtio-caif",
171     [VIRTIO_ID_MEMORY_BALLOON] = "virtio-mem-balloon",
172     [VIRTIO_ID_GPU] = "virtio-gpu",
173     [VIRTIO_ID_CLOCK] = "virtio-clk",
174     [VIRTIO_ID_INPUT] = "virtio-input",
175     [VIRTIO_ID_VSOCK] = "vhost-vsock",
176     [VIRTIO_ID_CRYPTO] = "virtio-crypto",
177     [VIRTIO_ID_SIGNAL_DIST] = "virtio-signal",
178     [VIRTIO_ID_PSTORE] = "virtio-pstore",
179     [VIRTIO_ID_IOMMU] = "virtio-iommu",
180     [VIRTIO_ID_MEM] = "virtio-mem",
181     [VIRTIO_ID_SOUND] = "virtio-sound",
182     [VIRTIO_ID_FS] = "virtio-user-fs",
183     [VIRTIO_ID_PMEM] = "virtio-pmem",
184     [VIRTIO_ID_RPMB] = "virtio-rpmb",
185     [VIRTIO_ID_MAC80211_HWSIM] = "virtio-mac-hwsim",
186     [VIRTIO_ID_VIDEO_ENCODER] = "virtio-vid-encoder",
187     [VIRTIO_ID_VIDEO_DECODER] = "virtio-vid-decoder",
188     [VIRTIO_ID_SCMI] = "virtio-scmi",
189     [VIRTIO_ID_NITRO_SEC_MOD] = "virtio-nitro-sec-mod",
190     [VIRTIO_ID_I2C_ADAPTER] = "vhost-user-i2c",
191     [VIRTIO_ID_WATCHDOG] = "virtio-watchdog",
192     [VIRTIO_ID_CAN] = "virtio-can",
193     [VIRTIO_ID_DMABUF] = "virtio-dmabuf",
194     [VIRTIO_ID_PARAM_SERV] = "virtio-param-serv",
195     [VIRTIO_ID_AUDIO_POLICY] = "virtio-audio-pol",
196     [VIRTIO_ID_BT] = "virtio-bluetooth",
197     [VIRTIO_ID_GPIO] = "virtio-gpio"
198 };
199 
200 static const char *virtio_id_to_name(uint16_t device_id)
201 {
202     assert(device_id < G_N_ELEMENTS(virtio_device_names));
203     const char *name = virtio_device_names[device_id];
204     assert(name != NULL);
205     return name;
206 }
207 
208 /* Called within call_rcu().  */
209 static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
210 {
211     assert(caches != NULL);
212     address_space_cache_destroy(&caches->desc);
213     address_space_cache_destroy(&caches->avail);
214     address_space_cache_destroy(&caches->used);
215     g_free(caches);
216 }
217 
218 static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
219 {
220     VRingMemoryRegionCaches *caches;
221 
222     caches = qatomic_read(&vq->vring.caches);
223     qatomic_rcu_set(&vq->vring.caches, NULL);
224     if (caches) {
225         call_rcu(caches, virtio_free_region_cache, rcu);
226     }
227 }
228 
229 void virtio_init_region_cache(VirtIODevice *vdev, int n)
230 {
231     VirtQueue *vq = &vdev->vq[n];
232     VRingMemoryRegionCaches *old = vq->vring.caches;
233     VRingMemoryRegionCaches *new = NULL;
234     hwaddr addr, size;
235     int64_t len;
236     bool packed;
237 
238 
239     addr = vq->vring.desc;
240     if (!addr) {
241         goto out_no_cache;
242     }
243     new = g_new0(VRingMemoryRegionCaches, 1);
244     size = virtio_queue_get_desc_size(vdev, n);
245     packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
246                                    true : false;
247     len = address_space_cache_init(&new->desc, vdev->dma_as,
248                                    addr, size, packed);
249     if (len < size) {
250         virtio_error(vdev, "Cannot map desc");
251         goto err_desc;
252     }
253 
254     size = virtio_queue_get_used_size(vdev, n);
255     len = address_space_cache_init(&new->used, vdev->dma_as,
256                                    vq->vring.used, size, true);
257     if (len < size) {
258         virtio_error(vdev, "Cannot map used");
259         goto err_used;
260     }
261 
262     size = virtio_queue_get_avail_size(vdev, n);
263     len = address_space_cache_init(&new->avail, vdev->dma_as,
264                                    vq->vring.avail, size, false);
265     if (len < size) {
266         virtio_error(vdev, "Cannot map avail");
267         goto err_avail;
268     }
269 
270     qatomic_rcu_set(&vq->vring.caches, new);
271     if (old) {
272         call_rcu(old, virtio_free_region_cache, rcu);
273     }
274     return;
275 
276 err_avail:
277     address_space_cache_destroy(&new->avail);
278 err_used:
279     address_space_cache_destroy(&new->used);
280 err_desc:
281     address_space_cache_destroy(&new->desc);
282 out_no_cache:
283     g_free(new);
284     virtio_virtqueue_reset_region_cache(vq);
285 }
286 
287 /* virt queue functions */
288 void virtio_queue_update_rings(VirtIODevice *vdev, int n)
289 {
290     VRing *vring = &vdev->vq[n].vring;
291 
292     if (!vring->num || !vring->desc || !vring->align) {
293         /* not yet setup -> nothing to do */
294         return;
295     }
296     vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
297     vring->used = vring_align(vring->avail +
298                               offsetof(VRingAvail, ring[vring->num]),
299                               vring->align);
300     virtio_init_region_cache(vdev, n);
301 }
302 
303 /* Called within rcu_read_lock().  */
304 static void vring_split_desc_read(VirtIODevice *vdev, VRingDesc *desc,
305                                   MemoryRegionCache *cache, int i)
306 {
307     address_space_read_cached(cache, i * sizeof(VRingDesc),
308                               desc, sizeof(VRingDesc));
309     virtio_tswap64s(vdev, &desc->addr);
310     virtio_tswap32s(vdev, &desc->len);
311     virtio_tswap16s(vdev, &desc->flags);
312     virtio_tswap16s(vdev, &desc->next);
313 }
314 
315 static void vring_packed_event_read(VirtIODevice *vdev,
316                                     MemoryRegionCache *cache,
317                                     VRingPackedDescEvent *e)
318 {
319     hwaddr off_off = offsetof(VRingPackedDescEvent, off_wrap);
320     hwaddr off_flags = offsetof(VRingPackedDescEvent, flags);
321 
322     e->flags = virtio_lduw_phys_cached(vdev, cache, off_flags);
323     /* Make sure flags is seen before off_wrap */
324     smp_rmb();
325     e->off_wrap = virtio_lduw_phys_cached(vdev, cache, off_off);
326 }
327 
328 static void vring_packed_off_wrap_write(VirtIODevice *vdev,
329                                         MemoryRegionCache *cache,
330                                         uint16_t off_wrap)
331 {
332     hwaddr off = offsetof(VRingPackedDescEvent, off_wrap);
333 
334     virtio_stw_phys_cached(vdev, cache, off, off_wrap);
335     address_space_cache_invalidate(cache, off, sizeof(off_wrap));
336 }
337 
338 static void vring_packed_flags_write(VirtIODevice *vdev,
339                                      MemoryRegionCache *cache, uint16_t flags)
340 {
341     hwaddr off = offsetof(VRingPackedDescEvent, flags);
342 
343     virtio_stw_phys_cached(vdev, cache, off, flags);
344     address_space_cache_invalidate(cache, off, sizeof(flags));
345 }
346 
347 /* Called within rcu_read_lock().  */
348 static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
349 {
350     return qatomic_rcu_read(&vq->vring.caches);
351 }
352 
353 /* Called within rcu_read_lock().  */
354 static inline uint16_t vring_avail_flags(VirtQueue *vq)
355 {
356     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
357     hwaddr pa = offsetof(VRingAvail, flags);
358 
359     if (!caches) {
360         return 0;
361     }
362 
363     return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
364 }
365 
366 /* Called within rcu_read_lock().  */
367 static inline uint16_t vring_avail_idx(VirtQueue *vq)
368 {
369     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
370     hwaddr pa = offsetof(VRingAvail, idx);
371 
372     if (!caches) {
373         return 0;
374     }
375 
376     vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
377     return vq->shadow_avail_idx;
378 }
379 
380 /* Called within rcu_read_lock().  */
381 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
382 {
383     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
384     hwaddr pa = offsetof(VRingAvail, ring[i]);
385 
386     if (!caches) {
387         return 0;
388     }
389 
390     return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
391 }
392 
393 /* Called within rcu_read_lock().  */
394 static inline uint16_t vring_get_used_event(VirtQueue *vq)
395 {
396     return vring_avail_ring(vq, vq->vring.num);
397 }
398 
399 /* Called within rcu_read_lock().  */
400 static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
401                                     int i)
402 {
403     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
404     hwaddr pa = offsetof(VRingUsed, ring[i]);
405 
406     if (!caches) {
407         return;
408     }
409 
410     virtio_tswap32s(vq->vdev, &uelem->id);
411     virtio_tswap32s(vq->vdev, &uelem->len);
412     address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
413     address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
414 }
415 
416 /* Called within rcu_read_lock(). */
417 static inline uint16_t vring_used_flags(VirtQueue *vq)
418 {
419     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
420     hwaddr pa = offsetof(VRingUsed, flags);
421 
422     if (!caches) {
423         return 0;
424     }
425 
426     return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
427 }
428 
429 /* Called within rcu_read_lock().  */
430 static uint16_t vring_used_idx(VirtQueue *vq)
431 {
432     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
433     hwaddr pa = offsetof(VRingUsed, idx);
434 
435     if (!caches) {
436         return 0;
437     }
438 
439     return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
440 }
441 
442 /* Called within rcu_read_lock().  */
443 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
444 {
445     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
446     hwaddr pa = offsetof(VRingUsed, idx);
447 
448     if (caches) {
449         virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
450         address_space_cache_invalidate(&caches->used, pa, sizeof(val));
451     }
452 
453     vq->used_idx = val;
454 }
455 
456 /* Called within rcu_read_lock().  */
457 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
458 {
459     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
460     VirtIODevice *vdev = vq->vdev;
461     hwaddr pa = offsetof(VRingUsed, flags);
462     uint16_t flags;
463 
464     if (!caches) {
465         return;
466     }
467 
468     flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
469     virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
470     address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
471 }
472 
473 /* Called within rcu_read_lock().  */
474 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
475 {
476     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
477     VirtIODevice *vdev = vq->vdev;
478     hwaddr pa = offsetof(VRingUsed, flags);
479     uint16_t flags;
480 
481     if (!caches) {
482         return;
483     }
484 
485     flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
486     virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
487     address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
488 }
489 
490 /* Called within rcu_read_lock().  */
491 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
492 {
493     VRingMemoryRegionCaches *caches;
494     hwaddr pa;
495     if (!vq->notification) {
496         return;
497     }
498 
499     caches = vring_get_region_caches(vq);
500     if (!caches) {
501         return;
502     }
503 
504     pa = offsetof(VRingUsed, ring[vq->vring.num]);
505     virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
506     address_space_cache_invalidate(&caches->used, pa, sizeof(val));
507 }
508 
509 static void virtio_queue_split_set_notification(VirtQueue *vq, int enable)
510 {
511     RCU_READ_LOCK_GUARD();
512 
513     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
514         vring_set_avail_event(vq, vring_avail_idx(vq));
515     } else if (enable) {
516         vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
517     } else {
518         vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
519     }
520     if (enable) {
521         /* Expose avail event/used flags before caller checks the avail idx. */
522         smp_mb();
523     }
524 }
525 
526 static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable)
527 {
528     uint16_t off_wrap;
529     VRingPackedDescEvent e;
530     VRingMemoryRegionCaches *caches;
531 
532     RCU_READ_LOCK_GUARD();
533     caches = vring_get_region_caches(vq);
534     if (!caches) {
535         return;
536     }
537 
538     vring_packed_event_read(vq->vdev, &caches->used, &e);
539 
540     if (!enable) {
541         e.flags = VRING_PACKED_EVENT_FLAG_DISABLE;
542     } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
543         off_wrap = vq->shadow_avail_idx | vq->shadow_avail_wrap_counter << 15;
544         vring_packed_off_wrap_write(vq->vdev, &caches->used, off_wrap);
545         /* Make sure off_wrap is wrote before flags */
546         smp_wmb();
547         e.flags = VRING_PACKED_EVENT_FLAG_DESC;
548     } else {
549         e.flags = VRING_PACKED_EVENT_FLAG_ENABLE;
550     }
551 
552     vring_packed_flags_write(vq->vdev, &caches->used, e.flags);
553     if (enable) {
554         /* Expose avail event/used flags before caller checks the avail idx. */
555         smp_mb();
556     }
557 }
558 
559 bool virtio_queue_get_notification(VirtQueue *vq)
560 {
561     return vq->notification;
562 }
563 
564 void virtio_queue_set_notification(VirtQueue *vq, int enable)
565 {
566     vq->notification = enable;
567 
568     if (!vq->vring.desc) {
569         return;
570     }
571 
572     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
573         virtio_queue_packed_set_notification(vq, enable);
574     } else {
575         virtio_queue_split_set_notification(vq, enable);
576     }
577 }
578 
579 int virtio_queue_ready(VirtQueue *vq)
580 {
581     return vq->vring.avail != 0;
582 }
583 
584 static void vring_packed_desc_read_flags(VirtIODevice *vdev,
585                                          uint16_t *flags,
586                                          MemoryRegionCache *cache,
587                                          int i)
588 {
589     hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags);
590 
591     *flags = virtio_lduw_phys_cached(vdev, cache, off);
592 }
593 
594 static void vring_packed_desc_read(VirtIODevice *vdev,
595                                    VRingPackedDesc *desc,
596                                    MemoryRegionCache *cache,
597                                    int i, bool strict_order)
598 {
599     hwaddr off = i * sizeof(VRingPackedDesc);
600 
601     vring_packed_desc_read_flags(vdev, &desc->flags, cache, i);
602 
603     if (strict_order) {
604         /* Make sure flags is read before the rest fields. */
605         smp_rmb();
606     }
607 
608     address_space_read_cached(cache, off + offsetof(VRingPackedDesc, addr),
609                               &desc->addr, sizeof(desc->addr));
610     address_space_read_cached(cache, off + offsetof(VRingPackedDesc, id),
611                               &desc->id, sizeof(desc->id));
612     address_space_read_cached(cache, off + offsetof(VRingPackedDesc, len),
613                               &desc->len, sizeof(desc->len));
614     virtio_tswap64s(vdev, &desc->addr);
615     virtio_tswap16s(vdev, &desc->id);
616     virtio_tswap32s(vdev, &desc->len);
617 }
618 
619 static void vring_packed_desc_write_data(VirtIODevice *vdev,
620                                          VRingPackedDesc *desc,
621                                          MemoryRegionCache *cache,
622                                          int i)
623 {
624     hwaddr off_id = i * sizeof(VRingPackedDesc) +
625                     offsetof(VRingPackedDesc, id);
626     hwaddr off_len = i * sizeof(VRingPackedDesc) +
627                     offsetof(VRingPackedDesc, len);
628 
629     virtio_tswap32s(vdev, &desc->len);
630     virtio_tswap16s(vdev, &desc->id);
631     address_space_write_cached(cache, off_id, &desc->id, sizeof(desc->id));
632     address_space_cache_invalidate(cache, off_id, sizeof(desc->id));
633     address_space_write_cached(cache, off_len, &desc->len, sizeof(desc->len));
634     address_space_cache_invalidate(cache, off_len, sizeof(desc->len));
635 }
636 
637 static void vring_packed_desc_write_flags(VirtIODevice *vdev,
638                                           VRingPackedDesc *desc,
639                                           MemoryRegionCache *cache,
640                                           int i)
641 {
642     hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags);
643 
644     virtio_stw_phys_cached(vdev, cache, off, desc->flags);
645     address_space_cache_invalidate(cache, off, sizeof(desc->flags));
646 }
647 
648 static void vring_packed_desc_write(VirtIODevice *vdev,
649                                     VRingPackedDesc *desc,
650                                     MemoryRegionCache *cache,
651                                     int i, bool strict_order)
652 {
653     vring_packed_desc_write_data(vdev, desc, cache, i);
654     if (strict_order) {
655         /* Make sure data is wrote before flags. */
656         smp_wmb();
657     }
658     vring_packed_desc_write_flags(vdev, desc, cache, i);
659 }
660 
661 static inline bool is_desc_avail(uint16_t flags, bool wrap_counter)
662 {
663     bool avail, used;
664 
665     avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
666     used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
667     return (avail != used) && (avail == wrap_counter);
668 }
669 
670 /* Fetch avail_idx from VQ memory only when we really need to know if
671  * guest has added some buffers.
672  * Called within rcu_read_lock().  */
673 static int virtio_queue_empty_rcu(VirtQueue *vq)
674 {
675     if (virtio_device_disabled(vq->vdev)) {
676         return 1;
677     }
678 
679     if (unlikely(!vq->vring.avail)) {
680         return 1;
681     }
682 
683     if (vq->shadow_avail_idx != vq->last_avail_idx) {
684         return 0;
685     }
686 
687     return vring_avail_idx(vq) == vq->last_avail_idx;
688 }
689 
690 static int virtio_queue_split_empty(VirtQueue *vq)
691 {
692     bool empty;
693 
694     if (virtio_device_disabled(vq->vdev)) {
695         return 1;
696     }
697 
698     if (unlikely(!vq->vring.avail)) {
699         return 1;
700     }
701 
702     if (vq->shadow_avail_idx != vq->last_avail_idx) {
703         return 0;
704     }
705 
706     RCU_READ_LOCK_GUARD();
707     empty = vring_avail_idx(vq) == vq->last_avail_idx;
708     return empty;
709 }
710 
711 /* Called within rcu_read_lock().  */
712 static int virtio_queue_packed_empty_rcu(VirtQueue *vq)
713 {
714     struct VRingPackedDesc desc;
715     VRingMemoryRegionCaches *cache;
716 
717     if (unlikely(!vq->vring.desc)) {
718         return 1;
719     }
720 
721     cache = vring_get_region_caches(vq);
722     if (!cache) {
723         return 1;
724     }
725 
726     vring_packed_desc_read_flags(vq->vdev, &desc.flags, &cache->desc,
727                                  vq->last_avail_idx);
728 
729     return !is_desc_avail(desc.flags, vq->last_avail_wrap_counter);
730 }
731 
732 static int virtio_queue_packed_empty(VirtQueue *vq)
733 {
734     RCU_READ_LOCK_GUARD();
735     return virtio_queue_packed_empty_rcu(vq);
736 }
737 
738 int virtio_queue_empty(VirtQueue *vq)
739 {
740     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
741         return virtio_queue_packed_empty(vq);
742     } else {
743         return virtio_queue_split_empty(vq);
744     }
745 }
746 
747 static bool virtio_queue_split_poll(VirtQueue *vq, unsigned shadow_idx)
748 {
749     if (unlikely(!vq->vring.avail)) {
750         return false;
751     }
752 
753     return (uint16_t)shadow_idx != vring_avail_idx(vq);
754 }
755 
756 static bool virtio_queue_packed_poll(VirtQueue *vq, unsigned shadow_idx)
757 {
758     VRingPackedDesc desc;
759     VRingMemoryRegionCaches *caches;
760 
761     if (unlikely(!vq->vring.desc)) {
762         return false;
763     }
764 
765     caches = vring_get_region_caches(vq);
766     if (!caches) {
767         return false;
768     }
769 
770     vring_packed_desc_read(vq->vdev, &desc, &caches->desc,
771                            shadow_idx, true);
772 
773     return is_desc_avail(desc.flags, vq->shadow_avail_wrap_counter);
774 }
775 
776 static bool virtio_queue_poll(VirtQueue *vq, unsigned shadow_idx)
777 {
778     if (virtio_device_disabled(vq->vdev)) {
779         return false;
780     }
781 
782     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
783         return virtio_queue_packed_poll(vq, shadow_idx);
784     } else {
785         return virtio_queue_split_poll(vq, shadow_idx);
786     }
787 }
788 
789 bool virtio_queue_enable_notification_and_check(VirtQueue *vq,
790                                                 int opaque)
791 {
792     virtio_queue_set_notification(vq, 1);
793 
794     if (opaque >= 0) {
795         return virtio_queue_poll(vq, (unsigned)opaque);
796     } else {
797         return false;
798     }
799 }
800 
801 static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
802                                unsigned int len)
803 {
804     AddressSpace *dma_as = vq->vdev->dma_as;
805     unsigned int offset;
806     int i;
807 
808     offset = 0;
809     for (i = 0; i < elem->in_num; i++) {
810         size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
811 
812         dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
813                          elem->in_sg[i].iov_len,
814                          DMA_DIRECTION_FROM_DEVICE, size);
815 
816         offset += size;
817     }
818 
819     for (i = 0; i < elem->out_num; i++)
820         dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
821                          elem->out_sg[i].iov_len,
822                          DMA_DIRECTION_TO_DEVICE,
823                          elem->out_sg[i].iov_len);
824 }
825 
826 /* virtqueue_detach_element:
827  * @vq: The #VirtQueue
828  * @elem: The #VirtQueueElement
829  * @len: number of bytes written
830  *
831  * Detach the element from the virtqueue.  This function is suitable for device
832  * reset or other situations where a #VirtQueueElement is simply freed and will
833  * not be pushed or discarded.
834  */
835 void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
836                               unsigned int len)
837 {
838     vq->inuse -= elem->ndescs;
839     virtqueue_unmap_sg(vq, elem, len);
840 }
841 
842 static void virtqueue_split_rewind(VirtQueue *vq, unsigned int num)
843 {
844     vq->last_avail_idx -= num;
845 }
846 
847 static void virtqueue_packed_rewind(VirtQueue *vq, unsigned int num)
848 {
849     if (vq->last_avail_idx < num) {
850         vq->last_avail_idx = vq->vring.num + vq->last_avail_idx - num;
851         vq->last_avail_wrap_counter ^= 1;
852     } else {
853         vq->last_avail_idx -= num;
854     }
855 }
856 
857 /* virtqueue_unpop:
858  * @vq: The #VirtQueue
859  * @elem: The #VirtQueueElement
860  * @len: number of bytes written
861  *
862  * Pretend the most recent element wasn't popped from the virtqueue.  The next
863  * call to virtqueue_pop() will refetch the element.
864  */
865 void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem,
866                      unsigned int len)
867 {
868 
869     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
870         virtqueue_packed_rewind(vq, 1);
871     } else {
872         virtqueue_split_rewind(vq, 1);
873     }
874 
875     virtqueue_detach_element(vq, elem, len);
876 }
877 
878 /* virtqueue_rewind:
879  * @vq: The #VirtQueue
880  * @num: Number of elements to push back
881  *
882  * Pretend that elements weren't popped from the virtqueue.  The next
883  * virtqueue_pop() will refetch the oldest element.
884  *
885  * Use virtqueue_unpop() instead if you have a VirtQueueElement.
886  *
887  * Returns: true on success, false if @num is greater than the number of in use
888  * elements.
889  */
890 bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
891 {
892     if (num > vq->inuse) {
893         return false;
894     }
895 
896     vq->inuse -= num;
897     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
898         virtqueue_packed_rewind(vq, num);
899     } else {
900         virtqueue_split_rewind(vq, num);
901     }
902     return true;
903 }
904 
905 static void virtqueue_split_fill(VirtQueue *vq, const VirtQueueElement *elem,
906                     unsigned int len, unsigned int idx)
907 {
908     VRingUsedElem uelem;
909 
910     if (unlikely(!vq->vring.used)) {
911         return;
912     }
913 
914     idx = (idx + vq->used_idx) % vq->vring.num;
915 
916     uelem.id = elem->index;
917     uelem.len = len;
918     vring_used_write(vq, &uelem, idx);
919 }
920 
921 static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem,
922                                   unsigned int len, unsigned int idx)
923 {
924     vq->used_elems[idx].index = elem->index;
925     vq->used_elems[idx].len = len;
926     vq->used_elems[idx].ndescs = elem->ndescs;
927 }
928 
929 static void virtqueue_ordered_fill(VirtQueue *vq, const VirtQueueElement *elem,
930                                    unsigned int len)
931 {
932     unsigned int i, steps, max_steps;
933 
934     i = vq->used_idx % vq->vring.num;
935     steps = 0;
936     /*
937      * We shouldn't need to increase 'i' by more than the distance
938      * between used_idx and last_avail_idx.
939      */
940     max_steps = (vq->last_avail_idx - vq->used_idx) % vq->vring.num;
941 
942     /* Search for element in vq->used_elems */
943     while (steps <= max_steps) {
944         /* Found element, set length and mark as filled */
945         if (vq->used_elems[i].index == elem->index) {
946             vq->used_elems[i].len = len;
947             vq->used_elems[i].in_order_filled = true;
948             break;
949         }
950 
951         i += vq->used_elems[i].ndescs;
952         steps += vq->used_elems[i].ndescs;
953 
954         if (i >= vq->vring.num) {
955             i -= vq->vring.num;
956         }
957     }
958 
959     /*
960      * We should be able to find a matching VirtQueueElement in
961      * used_elems. If we don't, this is an error.
962      */
963     if (steps >= max_steps) {
964         qemu_log_mask(LOG_GUEST_ERROR, "%s: %s cannot fill buffer id %u\n",
965                       __func__, vq->vdev->name, elem->index);
966     }
967 }
968 
969 static void virtqueue_packed_fill_desc(VirtQueue *vq,
970                                        const VirtQueueElement *elem,
971                                        unsigned int idx,
972                                        bool strict_order)
973 {
974     uint16_t head;
975     VRingMemoryRegionCaches *caches;
976     VRingPackedDesc desc = {
977         .id = elem->index,
978         .len = elem->len,
979     };
980     bool wrap_counter = vq->used_wrap_counter;
981 
982     if (unlikely(!vq->vring.desc)) {
983         return;
984     }
985 
986     head = vq->used_idx + idx;
987     if (head >= vq->vring.num) {
988         head -= vq->vring.num;
989         wrap_counter ^= 1;
990     }
991     if (wrap_counter) {
992         desc.flags |= (1 << VRING_PACKED_DESC_F_AVAIL);
993         desc.flags |= (1 << VRING_PACKED_DESC_F_USED);
994     } else {
995         desc.flags &= ~(1 << VRING_PACKED_DESC_F_AVAIL);
996         desc.flags &= ~(1 << VRING_PACKED_DESC_F_USED);
997     }
998 
999     caches = vring_get_region_caches(vq);
1000     if (!caches) {
1001         return;
1002     }
1003 
1004     vring_packed_desc_write(vq->vdev, &desc, &caches->desc, head, strict_order);
1005 }
1006 
1007 /* Called within rcu_read_lock().  */
1008 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
1009                     unsigned int len, unsigned int idx)
1010 {
1011     trace_virtqueue_fill(vq, elem, len, idx);
1012 
1013     virtqueue_unmap_sg(vq, elem, len);
1014 
1015     if (virtio_device_disabled(vq->vdev)) {
1016         return;
1017     }
1018 
1019     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_IN_ORDER)) {
1020         virtqueue_ordered_fill(vq, elem, len);
1021     } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1022         virtqueue_packed_fill(vq, elem, len, idx);
1023     } else {
1024         virtqueue_split_fill(vq, elem, len, idx);
1025     }
1026 }
1027 
1028 /* Called within rcu_read_lock().  */
1029 static void virtqueue_split_flush(VirtQueue *vq, unsigned int count)
1030 {
1031     uint16_t old, new;
1032 
1033     if (unlikely(!vq->vring.used)) {
1034         return;
1035     }
1036 
1037     /* Make sure buffer is written before we update index. */
1038     smp_wmb();
1039     trace_virtqueue_flush(vq, count);
1040     old = vq->used_idx;
1041     new = old + count;
1042     vring_used_idx_set(vq, new);
1043     vq->inuse -= count;
1044     if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
1045         vq->signalled_used_valid = false;
1046 }
1047 
1048 static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count)
1049 {
1050     unsigned int i, ndescs = 0;
1051 
1052     if (unlikely(!vq->vring.desc)) {
1053         return;
1054     }
1055 
1056     /*
1057      * For indirect element's 'ndescs' is 1.
1058      * For all other elemment's 'ndescs' is the
1059      * number of descriptors chained by NEXT (as set in virtqueue_packed_pop).
1060      * So When the 'elem' be filled into the descriptor ring,
1061      * The 'idx' of this 'elem' shall be
1062      * the value of 'vq->used_idx' plus the 'ndescs'.
1063      */
1064     ndescs += vq->used_elems[0].ndescs;
1065     for (i = 1; i < count; i++) {
1066         virtqueue_packed_fill_desc(vq, &vq->used_elems[i], ndescs, false);
1067         ndescs += vq->used_elems[i].ndescs;
1068     }
1069     virtqueue_packed_fill_desc(vq, &vq->used_elems[0], 0, true);
1070 
1071     vq->inuse -= ndescs;
1072     vq->used_idx += ndescs;
1073     if (vq->used_idx >= vq->vring.num) {
1074         vq->used_idx -= vq->vring.num;
1075         vq->used_wrap_counter ^= 1;
1076         vq->signalled_used_valid = false;
1077     }
1078 }
1079 
1080 static void virtqueue_ordered_flush(VirtQueue *vq)
1081 {
1082     unsigned int i = vq->used_idx % vq->vring.num;
1083     unsigned int ndescs = 0;
1084     uint16_t old = vq->used_idx;
1085     uint16_t new;
1086     bool packed;
1087     VRingUsedElem uelem;
1088 
1089     packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED);
1090 
1091     if (packed) {
1092         if (unlikely(!vq->vring.desc)) {
1093             return;
1094         }
1095     } else if (unlikely(!vq->vring.used)) {
1096         return;
1097     }
1098 
1099     /* First expected in-order element isn't ready, nothing to do */
1100     if (!vq->used_elems[i].in_order_filled) {
1101         return;
1102     }
1103 
1104     /* Search for filled elements in-order */
1105     while (vq->used_elems[i].in_order_filled) {
1106         /*
1107          * First entry for packed VQs is written last so the guest
1108          * doesn't see invalid descriptors.
1109          */
1110         if (packed && i != vq->used_idx) {
1111             virtqueue_packed_fill_desc(vq, &vq->used_elems[i], ndescs, false);
1112         } else if (!packed) {
1113             uelem.id = vq->used_elems[i].index;
1114             uelem.len = vq->used_elems[i].len;
1115             vring_used_write(vq, &uelem, i);
1116         }
1117 
1118         vq->used_elems[i].in_order_filled = false;
1119         ndescs += vq->used_elems[i].ndescs;
1120         i += vq->used_elems[i].ndescs;
1121         if (i >= vq->vring.num) {
1122             i -= vq->vring.num;
1123         }
1124     }
1125 
1126     if (packed) {
1127         virtqueue_packed_fill_desc(vq, &vq->used_elems[vq->used_idx], 0, true);
1128         vq->used_idx += ndescs;
1129         if (vq->used_idx >= vq->vring.num) {
1130             vq->used_idx -= vq->vring.num;
1131             vq->used_wrap_counter ^= 1;
1132             vq->signalled_used_valid = false;
1133         }
1134     } else {
1135         /* Make sure buffer is written before we update index. */
1136         smp_wmb();
1137         new = old + ndescs;
1138         vring_used_idx_set(vq, new);
1139         if (unlikely((int16_t)(new - vq->signalled_used) <
1140                      (uint16_t)(new - old))) {
1141             vq->signalled_used_valid = false;
1142         }
1143     }
1144     vq->inuse -= ndescs;
1145 }
1146 
1147 void virtqueue_flush(VirtQueue *vq, unsigned int count)
1148 {
1149     if (virtio_device_disabled(vq->vdev)) {
1150         vq->inuse -= count;
1151         return;
1152     }
1153 
1154     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_IN_ORDER)) {
1155         virtqueue_ordered_flush(vq);
1156     } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1157         virtqueue_packed_flush(vq, count);
1158     } else {
1159         virtqueue_split_flush(vq, count);
1160     }
1161 }
1162 
1163 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
1164                     unsigned int len)
1165 {
1166     RCU_READ_LOCK_GUARD();
1167     virtqueue_fill(vq, elem, len, 0);
1168     virtqueue_flush(vq, 1);
1169 }
1170 
1171 /* Called within rcu_read_lock().  */
1172 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
1173 {
1174     uint16_t avail_idx, num_heads;
1175 
1176     /* Use shadow index whenever possible. */
1177     avail_idx = (vq->shadow_avail_idx != idx) ? vq->shadow_avail_idx
1178                                               : vring_avail_idx(vq);
1179     num_heads = avail_idx - idx;
1180 
1181     /* Check it isn't doing very strange things with descriptor numbers. */
1182     if (num_heads > vq->vring.num) {
1183         virtio_error(vq->vdev, "Guest moved used index from %u to %u",
1184                      idx, vq->shadow_avail_idx);
1185         return -EINVAL;
1186     }
1187     /*
1188      * On success, callers read a descriptor at vq->last_avail_idx.
1189      * Make sure descriptor read does not bypass avail index read.
1190      *
1191      * This is necessary even if we are using a shadow index, since
1192      * the shadow index could have been initialized by calling
1193      * vring_avail_idx() outside of this function, i.e., by a guest
1194      * memory read not accompanied by a barrier.
1195      */
1196     if (num_heads) {
1197         smp_rmb();
1198     }
1199 
1200     return num_heads;
1201 }
1202 
1203 /* Called within rcu_read_lock().  */
1204 static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
1205                                unsigned int *head)
1206 {
1207     /* Grab the next descriptor number they're advertising, and increment
1208      * the index we've seen. */
1209     *head = vring_avail_ring(vq, idx % vq->vring.num);
1210 
1211     /* If their number is silly, that's a fatal mistake. */
1212     if (*head >= vq->vring.num) {
1213         virtio_error(vq->vdev, "Guest says index %u is available", *head);
1214         return false;
1215     }
1216 
1217     return true;
1218 }
1219 
1220 enum {
1221     VIRTQUEUE_READ_DESC_ERROR = -1,
1222     VIRTQUEUE_READ_DESC_DONE = 0,   /* end of chain */
1223     VIRTQUEUE_READ_DESC_MORE = 1,   /* more buffers in chain */
1224 };
1225 
1226 /* Reads the 'desc->next' descriptor into '*desc'. */
1227 static int virtqueue_split_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
1228                                           MemoryRegionCache *desc_cache,
1229                                           unsigned int max)
1230 {
1231     /* If this descriptor says it doesn't chain, we're done. */
1232     if (!(desc->flags & VRING_DESC_F_NEXT)) {
1233         return VIRTQUEUE_READ_DESC_DONE;
1234     }
1235 
1236     /* Check they're not leading us off end of descriptors. */
1237     if (desc->next >= max) {
1238         virtio_error(vdev, "Desc next is %u", desc->next);
1239         return VIRTQUEUE_READ_DESC_ERROR;
1240     }
1241 
1242     vring_split_desc_read(vdev, desc, desc_cache, desc->next);
1243     return VIRTQUEUE_READ_DESC_MORE;
1244 }
1245 
1246 /* Called within rcu_read_lock().  */
1247 static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
1248                             unsigned int *in_bytes, unsigned int *out_bytes,
1249                             unsigned max_in_bytes, unsigned max_out_bytes,
1250                             VRingMemoryRegionCaches *caches)
1251 {
1252     VirtIODevice *vdev = vq->vdev;
1253     unsigned int idx;
1254     unsigned int total_bufs, in_total, out_total;
1255     MemoryRegionCache indirect_desc_cache;
1256     int64_t len = 0;
1257     int rc;
1258 
1259     address_space_cache_init_empty(&indirect_desc_cache);
1260 
1261     idx = vq->last_avail_idx;
1262     total_bufs = in_total = out_total = 0;
1263 
1264     while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
1265         MemoryRegionCache *desc_cache = &caches->desc;
1266         unsigned int num_bufs;
1267         VRingDesc desc;
1268         unsigned int i;
1269         unsigned int max = vq->vring.num;
1270 
1271         num_bufs = total_bufs;
1272 
1273         if (!virtqueue_get_head(vq, idx++, &i)) {
1274             goto err;
1275         }
1276 
1277         vring_split_desc_read(vdev, &desc, desc_cache, i);
1278 
1279         if (desc.flags & VRING_DESC_F_INDIRECT) {
1280             if (!desc.len || (desc.len % sizeof(VRingDesc))) {
1281                 virtio_error(vdev, "Invalid size for indirect buffer table");
1282                 goto err;
1283             }
1284 
1285             /* If we've got too many, that implies a descriptor loop. */
1286             if (num_bufs >= max) {
1287                 virtio_error(vdev, "Looped descriptor");
1288                 goto err;
1289             }
1290 
1291             /* loop over the indirect descriptor table */
1292             len = address_space_cache_init(&indirect_desc_cache,
1293                                            vdev->dma_as,
1294                                            desc.addr, desc.len, false);
1295             desc_cache = &indirect_desc_cache;
1296             if (len < desc.len) {
1297                 virtio_error(vdev, "Cannot map indirect buffer");
1298                 goto err;
1299             }
1300 
1301             max = desc.len / sizeof(VRingDesc);
1302             num_bufs = i = 0;
1303             vring_split_desc_read(vdev, &desc, desc_cache, i);
1304         }
1305 
1306         do {
1307             /* If we've got too many, that implies a descriptor loop. */
1308             if (++num_bufs > max) {
1309                 virtio_error(vdev, "Looped descriptor");
1310                 goto err;
1311             }
1312 
1313             if (desc.flags & VRING_DESC_F_WRITE) {
1314                 in_total += desc.len;
1315             } else {
1316                 out_total += desc.len;
1317             }
1318             if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1319                 goto done;
1320             }
1321 
1322             rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max);
1323         } while (rc == VIRTQUEUE_READ_DESC_MORE);
1324 
1325         if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1326             goto err;
1327         }
1328 
1329         if (desc_cache == &indirect_desc_cache) {
1330             address_space_cache_destroy(&indirect_desc_cache);
1331             total_bufs++;
1332         } else {
1333             total_bufs = num_bufs;
1334         }
1335     }
1336 
1337     if (rc < 0) {
1338         goto err;
1339     }
1340 
1341 done:
1342     address_space_cache_destroy(&indirect_desc_cache);
1343     if (in_bytes) {
1344         *in_bytes = in_total;
1345     }
1346     if (out_bytes) {
1347         *out_bytes = out_total;
1348     }
1349     return;
1350 
1351 err:
1352     in_total = out_total = 0;
1353     goto done;
1354 }
1355 
1356 static int virtqueue_packed_read_next_desc(VirtQueue *vq,
1357                                            VRingPackedDesc *desc,
1358                                            MemoryRegionCache
1359                                            *desc_cache,
1360                                            unsigned int max,
1361                                            unsigned int *next,
1362                                            bool indirect)
1363 {
1364     /* If this descriptor says it doesn't chain, we're done. */
1365     if (!indirect && !(desc->flags & VRING_DESC_F_NEXT)) {
1366         return VIRTQUEUE_READ_DESC_DONE;
1367     }
1368 
1369     ++*next;
1370     if (*next == max) {
1371         if (indirect) {
1372             return VIRTQUEUE_READ_DESC_DONE;
1373         } else {
1374             (*next) -= vq->vring.num;
1375         }
1376     }
1377 
1378     vring_packed_desc_read(vq->vdev, desc, desc_cache, *next, false);
1379     return VIRTQUEUE_READ_DESC_MORE;
1380 }
1381 
1382 /* Called within rcu_read_lock().  */
1383 static void virtqueue_packed_get_avail_bytes(VirtQueue *vq,
1384                                              unsigned int *in_bytes,
1385                                              unsigned int *out_bytes,
1386                                              unsigned max_in_bytes,
1387                                              unsigned max_out_bytes,
1388                                              VRingMemoryRegionCaches *caches)
1389 {
1390     VirtIODevice *vdev = vq->vdev;
1391     unsigned int idx;
1392     unsigned int total_bufs, in_total, out_total;
1393     MemoryRegionCache indirect_desc_cache;
1394     MemoryRegionCache *desc_cache;
1395     int64_t len = 0;
1396     VRingPackedDesc desc;
1397     bool wrap_counter;
1398 
1399     address_space_cache_init_empty(&indirect_desc_cache);
1400 
1401     idx = vq->last_avail_idx;
1402     wrap_counter = vq->last_avail_wrap_counter;
1403     total_bufs = in_total = out_total = 0;
1404 
1405     for (;;) {
1406         unsigned int num_bufs = total_bufs;
1407         unsigned int i = idx;
1408         int rc;
1409         unsigned int max = vq->vring.num;
1410 
1411         desc_cache = &caches->desc;
1412 
1413         vring_packed_desc_read(vdev, &desc, desc_cache, idx, true);
1414         if (!is_desc_avail(desc.flags, wrap_counter)) {
1415             break;
1416         }
1417 
1418         if (desc.flags & VRING_DESC_F_INDIRECT) {
1419             if (desc.len % sizeof(VRingPackedDesc)) {
1420                 virtio_error(vdev, "Invalid size for indirect buffer table");
1421                 goto err;
1422             }
1423 
1424             /* If we've got too many, that implies a descriptor loop. */
1425             if (num_bufs >= max) {
1426                 virtio_error(vdev, "Looped descriptor");
1427                 goto err;
1428             }
1429 
1430             /* loop over the indirect descriptor table */
1431             len = address_space_cache_init(&indirect_desc_cache,
1432                                            vdev->dma_as,
1433                                            desc.addr, desc.len, false);
1434             desc_cache = &indirect_desc_cache;
1435             if (len < desc.len) {
1436                 virtio_error(vdev, "Cannot map indirect buffer");
1437                 goto err;
1438             }
1439 
1440             max = desc.len / sizeof(VRingPackedDesc);
1441             num_bufs = i = 0;
1442             vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
1443         }
1444 
1445         do {
1446             /* If we've got too many, that implies a descriptor loop. */
1447             if (++num_bufs > max) {
1448                 virtio_error(vdev, "Looped descriptor");
1449                 goto err;
1450             }
1451 
1452             if (desc.flags & VRING_DESC_F_WRITE) {
1453                 in_total += desc.len;
1454             } else {
1455                 out_total += desc.len;
1456             }
1457             if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1458                 goto done;
1459             }
1460 
1461             rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max,
1462                                                  &i, desc_cache ==
1463                                                  &indirect_desc_cache);
1464         } while (rc == VIRTQUEUE_READ_DESC_MORE);
1465 
1466         if (desc_cache == &indirect_desc_cache) {
1467             address_space_cache_destroy(&indirect_desc_cache);
1468             total_bufs++;
1469             idx++;
1470         } else {
1471             idx += num_bufs - total_bufs;
1472             total_bufs = num_bufs;
1473         }
1474 
1475         if (idx >= vq->vring.num) {
1476             idx -= vq->vring.num;
1477             wrap_counter ^= 1;
1478         }
1479     }
1480 
1481     /* Record the index and wrap counter for a kick we want */
1482     vq->shadow_avail_idx = idx;
1483     vq->shadow_avail_wrap_counter = wrap_counter;
1484 done:
1485     address_space_cache_destroy(&indirect_desc_cache);
1486     if (in_bytes) {
1487         *in_bytes = in_total;
1488     }
1489     if (out_bytes) {
1490         *out_bytes = out_total;
1491     }
1492     return;
1493 
1494 err:
1495     in_total = out_total = 0;
1496     goto done;
1497 }
1498 
1499 int virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
1500                               unsigned int *out_bytes, unsigned max_in_bytes,
1501                               unsigned max_out_bytes)
1502 {
1503     uint16_t desc_size;
1504     VRingMemoryRegionCaches *caches;
1505 
1506     RCU_READ_LOCK_GUARD();
1507 
1508     if (unlikely(!vq->vring.desc)) {
1509         goto err;
1510     }
1511 
1512     caches = vring_get_region_caches(vq);
1513     if (!caches) {
1514         goto err;
1515     }
1516 
1517     desc_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
1518                                 sizeof(VRingPackedDesc) : sizeof(VRingDesc);
1519     if (caches->desc.len < vq->vring.num * desc_size) {
1520         virtio_error(vq->vdev, "Cannot map descriptor ring");
1521         goto err;
1522     }
1523 
1524     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1525         virtqueue_packed_get_avail_bytes(vq, in_bytes, out_bytes,
1526                                          max_in_bytes, max_out_bytes,
1527                                          caches);
1528     } else {
1529         virtqueue_split_get_avail_bytes(vq, in_bytes, out_bytes,
1530                                         max_in_bytes, max_out_bytes,
1531                                         caches);
1532     }
1533 
1534     return (int)vq->shadow_avail_idx;
1535 err:
1536     if (in_bytes) {
1537         *in_bytes = 0;
1538     }
1539     if (out_bytes) {
1540         *out_bytes = 0;
1541     }
1542 
1543     return -1;
1544 }
1545 
1546 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
1547                           unsigned int out_bytes)
1548 {
1549     unsigned int in_total, out_total;
1550 
1551     virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
1552     return in_bytes <= in_total && out_bytes <= out_total;
1553 }
1554 
1555 static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
1556                                hwaddr *addr, struct iovec *iov,
1557                                unsigned int max_num_sg, bool is_write,
1558                                hwaddr pa, size_t sz)
1559 {
1560     bool ok = false;
1561     unsigned num_sg = *p_num_sg;
1562     assert(num_sg <= max_num_sg);
1563 
1564     if (!sz) {
1565         virtio_error(vdev, "virtio: zero sized buffers are not allowed");
1566         goto out;
1567     }
1568 
1569     while (sz) {
1570         hwaddr len = sz;
1571 
1572         if (num_sg == max_num_sg) {
1573             virtio_error(vdev, "virtio: too many write descriptors in "
1574                                "indirect table");
1575             goto out;
1576         }
1577 
1578         iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
1579                                               is_write ?
1580                                               DMA_DIRECTION_FROM_DEVICE :
1581                                               DMA_DIRECTION_TO_DEVICE,
1582                                               MEMTXATTRS_UNSPECIFIED);
1583         if (!iov[num_sg].iov_base) {
1584             virtio_error(vdev, "virtio: bogus descriptor or out of resources");
1585             goto out;
1586         }
1587 
1588         iov[num_sg].iov_len = len;
1589         addr[num_sg] = pa;
1590 
1591         sz -= len;
1592         pa += len;
1593         num_sg++;
1594     }
1595     ok = true;
1596 
1597 out:
1598     *p_num_sg = num_sg;
1599     return ok;
1600 }
1601 
1602 /* Only used by error code paths before we have a VirtQueueElement (therefore
1603  * virtqueue_unmap_sg() can't be used).  Assumes buffers weren't written to
1604  * yet.
1605  */
1606 static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
1607                                     struct iovec *iov)
1608 {
1609     unsigned int i;
1610 
1611     for (i = 0; i < out_num + in_num; i++) {
1612         int is_write = i >= out_num;
1613 
1614         cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
1615         iov++;
1616     }
1617 }
1618 
1619 static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
1620                                 hwaddr *addr, unsigned int num_sg,
1621                                 bool is_write)
1622 {
1623     unsigned int i;
1624     hwaddr len;
1625 
1626     for (i = 0; i < num_sg; i++) {
1627         len = sg[i].iov_len;
1628         sg[i].iov_base = dma_memory_map(vdev->dma_as,
1629                                         addr[i], &len, is_write ?
1630                                         DMA_DIRECTION_FROM_DEVICE :
1631                                         DMA_DIRECTION_TO_DEVICE,
1632                                         MEMTXATTRS_UNSPECIFIED);
1633         if (!sg[i].iov_base) {
1634             error_report("virtio: error trying to map MMIO memory");
1635             exit(1);
1636         }
1637         if (len != sg[i].iov_len) {
1638             error_report("virtio: unexpected memory split");
1639             exit(1);
1640         }
1641     }
1642 }
1643 
1644 void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem)
1645 {
1646     virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, elem->in_num, true);
1647     virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, elem->out_num,
1648                                                                         false);
1649 }
1650 
1651 static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
1652 {
1653     VirtQueueElement *elem;
1654     size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
1655     size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
1656     size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
1657     size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
1658     size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
1659     size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
1660 
1661     assert(sz >= sizeof(VirtQueueElement));
1662     elem = g_malloc(out_sg_end);
1663     trace_virtqueue_alloc_element(elem, sz, in_num, out_num);
1664     elem->out_num = out_num;
1665     elem->in_num = in_num;
1666     elem->in_addr = (void *)elem + in_addr_ofs;
1667     elem->out_addr = (void *)elem + out_addr_ofs;
1668     elem->in_sg = (void *)elem + in_sg_ofs;
1669     elem->out_sg = (void *)elem + out_sg_ofs;
1670     return elem;
1671 }
1672 
1673 static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
1674 {
1675     unsigned int i, head, max, idx;
1676     VRingMemoryRegionCaches *caches;
1677     MemoryRegionCache indirect_desc_cache;
1678     MemoryRegionCache *desc_cache;
1679     int64_t len;
1680     VirtIODevice *vdev = vq->vdev;
1681     VirtQueueElement *elem = NULL;
1682     unsigned out_num, in_num, elem_entries;
1683     hwaddr addr[VIRTQUEUE_MAX_SIZE];
1684     struct iovec iov[VIRTQUEUE_MAX_SIZE];
1685     VRingDesc desc;
1686     int rc;
1687 
1688     address_space_cache_init_empty(&indirect_desc_cache);
1689 
1690     RCU_READ_LOCK_GUARD();
1691     if (virtio_queue_empty_rcu(vq)) {
1692         goto done;
1693     }
1694     /* Needed after virtio_queue_empty(), see comment in
1695      * virtqueue_num_heads(). */
1696     smp_rmb();
1697 
1698     /* When we start there are none of either input nor output. */
1699     out_num = in_num = elem_entries = 0;
1700 
1701     max = vq->vring.num;
1702 
1703     if (vq->inuse >= vq->vring.num) {
1704         virtio_error(vdev, "Virtqueue size exceeded");
1705         goto done;
1706     }
1707 
1708     if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
1709         goto done;
1710     }
1711 
1712     if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
1713         vring_set_avail_event(vq, vq->last_avail_idx);
1714     }
1715 
1716     i = head;
1717 
1718     caches = vring_get_region_caches(vq);
1719     if (!caches) {
1720         virtio_error(vdev, "Region caches not initialized");
1721         goto done;
1722     }
1723 
1724     if (caches->desc.len < max * sizeof(VRingDesc)) {
1725         virtio_error(vdev, "Cannot map descriptor ring");
1726         goto done;
1727     }
1728 
1729     desc_cache = &caches->desc;
1730     vring_split_desc_read(vdev, &desc, desc_cache, i);
1731     if (desc.flags & VRING_DESC_F_INDIRECT) {
1732         if (!desc.len || (desc.len % sizeof(VRingDesc))) {
1733             virtio_error(vdev, "Invalid size for indirect buffer table");
1734             goto done;
1735         }
1736 
1737         /* loop over the indirect descriptor table */
1738         len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1739                                        desc.addr, desc.len, false);
1740         desc_cache = &indirect_desc_cache;
1741         if (len < desc.len) {
1742             virtio_error(vdev, "Cannot map indirect buffer");
1743             goto done;
1744         }
1745 
1746         max = desc.len / sizeof(VRingDesc);
1747         i = 0;
1748         vring_split_desc_read(vdev, &desc, desc_cache, i);
1749     }
1750 
1751     /* Collect all the descriptors */
1752     do {
1753         bool map_ok;
1754 
1755         if (desc.flags & VRING_DESC_F_WRITE) {
1756             map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
1757                                         iov + out_num,
1758                                         VIRTQUEUE_MAX_SIZE - out_num, true,
1759                                         desc.addr, desc.len);
1760         } else {
1761             if (in_num) {
1762                 virtio_error(vdev, "Incorrect order for descriptors");
1763                 goto err_undo_map;
1764             }
1765             map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
1766                                         VIRTQUEUE_MAX_SIZE, false,
1767                                         desc.addr, desc.len);
1768         }
1769         if (!map_ok) {
1770             goto err_undo_map;
1771         }
1772 
1773         /* If we've got too many, that implies a descriptor loop. */
1774         if (++elem_entries > max) {
1775             virtio_error(vdev, "Looped descriptor");
1776             goto err_undo_map;
1777         }
1778 
1779         rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max);
1780     } while (rc == VIRTQUEUE_READ_DESC_MORE);
1781 
1782     if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1783         goto err_undo_map;
1784     }
1785 
1786     /* Now copy what we have collected and mapped */
1787     elem = virtqueue_alloc_element(sz, out_num, in_num);
1788     elem->index = head;
1789     elem->ndescs = 1;
1790     for (i = 0; i < out_num; i++) {
1791         elem->out_addr[i] = addr[i];
1792         elem->out_sg[i] = iov[i];
1793     }
1794     for (i = 0; i < in_num; i++) {
1795         elem->in_addr[i] = addr[out_num + i];
1796         elem->in_sg[i] = iov[out_num + i];
1797     }
1798 
1799     if (virtio_vdev_has_feature(vdev, VIRTIO_F_IN_ORDER)) {
1800         idx = (vq->last_avail_idx - 1) % vq->vring.num;
1801         vq->used_elems[idx].index = elem->index;
1802         vq->used_elems[idx].len = elem->len;
1803         vq->used_elems[idx].ndescs = elem->ndescs;
1804     }
1805 
1806     vq->inuse++;
1807 
1808     trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
1809 done:
1810     address_space_cache_destroy(&indirect_desc_cache);
1811 
1812     return elem;
1813 
1814 err_undo_map:
1815     virtqueue_undo_map_desc(out_num, in_num, iov);
1816     goto done;
1817 }
1818 
1819 static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
1820 {
1821     unsigned int i, max;
1822     VRingMemoryRegionCaches *caches;
1823     MemoryRegionCache indirect_desc_cache;
1824     MemoryRegionCache *desc_cache;
1825     int64_t len;
1826     VirtIODevice *vdev = vq->vdev;
1827     VirtQueueElement *elem = NULL;
1828     unsigned out_num, in_num, elem_entries;
1829     hwaddr addr[VIRTQUEUE_MAX_SIZE];
1830     struct iovec iov[VIRTQUEUE_MAX_SIZE];
1831     VRingPackedDesc desc;
1832     uint16_t id;
1833     int rc;
1834 
1835     address_space_cache_init_empty(&indirect_desc_cache);
1836 
1837     RCU_READ_LOCK_GUARD();
1838     if (virtio_queue_packed_empty_rcu(vq)) {
1839         goto done;
1840     }
1841 
1842     /* When we start there are none of either input nor output. */
1843     out_num = in_num = elem_entries = 0;
1844 
1845     max = vq->vring.num;
1846 
1847     if (vq->inuse >= vq->vring.num) {
1848         virtio_error(vdev, "Virtqueue size exceeded");
1849         goto done;
1850     }
1851 
1852     i = vq->last_avail_idx;
1853 
1854     caches = vring_get_region_caches(vq);
1855     if (!caches) {
1856         virtio_error(vdev, "Region caches not initialized");
1857         goto done;
1858     }
1859 
1860     if (caches->desc.len < max * sizeof(VRingDesc)) {
1861         virtio_error(vdev, "Cannot map descriptor ring");
1862         goto done;
1863     }
1864 
1865     desc_cache = &caches->desc;
1866     vring_packed_desc_read(vdev, &desc, desc_cache, i, true);
1867     id = desc.id;
1868     if (desc.flags & VRING_DESC_F_INDIRECT) {
1869         if (desc.len % sizeof(VRingPackedDesc)) {
1870             virtio_error(vdev, "Invalid size for indirect buffer table");
1871             goto done;
1872         }
1873 
1874         /* loop over the indirect descriptor table */
1875         len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1876                                        desc.addr, desc.len, false);
1877         desc_cache = &indirect_desc_cache;
1878         if (len < desc.len) {
1879             virtio_error(vdev, "Cannot map indirect buffer");
1880             goto done;
1881         }
1882 
1883         max = desc.len / sizeof(VRingPackedDesc);
1884         i = 0;
1885         vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
1886     }
1887 
1888     /* Collect all the descriptors */
1889     do {
1890         bool map_ok;
1891 
1892         if (desc.flags & VRING_DESC_F_WRITE) {
1893             map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
1894                                         iov + out_num,
1895                                         VIRTQUEUE_MAX_SIZE - out_num, true,
1896                                         desc.addr, desc.len);
1897         } else {
1898             if (in_num) {
1899                 virtio_error(vdev, "Incorrect order for descriptors");
1900                 goto err_undo_map;
1901             }
1902             map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
1903                                         VIRTQUEUE_MAX_SIZE, false,
1904                                         desc.addr, desc.len);
1905         }
1906         if (!map_ok) {
1907             goto err_undo_map;
1908         }
1909 
1910         /* If we've got too many, that implies a descriptor loop. */
1911         if (++elem_entries > max) {
1912             virtio_error(vdev, "Looped descriptor");
1913             goto err_undo_map;
1914         }
1915 
1916         rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max, &i,
1917                                              desc_cache ==
1918                                              &indirect_desc_cache);
1919     } while (rc == VIRTQUEUE_READ_DESC_MORE);
1920 
1921     if (desc_cache != &indirect_desc_cache) {
1922         /* Buffer ID is included in the last descriptor in the list. */
1923         id = desc.id;
1924     }
1925 
1926     /* Now copy what we have collected and mapped */
1927     elem = virtqueue_alloc_element(sz, out_num, in_num);
1928     for (i = 0; i < out_num; i++) {
1929         elem->out_addr[i] = addr[i];
1930         elem->out_sg[i] = iov[i];
1931     }
1932     for (i = 0; i < in_num; i++) {
1933         elem->in_addr[i] = addr[out_num + i];
1934         elem->in_sg[i] = iov[out_num + i];
1935     }
1936 
1937     elem->index = id;
1938     elem->ndescs = (desc_cache == &indirect_desc_cache) ? 1 : elem_entries;
1939 
1940     if (virtio_vdev_has_feature(vdev, VIRTIO_F_IN_ORDER)) {
1941         vq->used_elems[vq->last_avail_idx].index = elem->index;
1942         vq->used_elems[vq->last_avail_idx].len = elem->len;
1943         vq->used_elems[vq->last_avail_idx].ndescs = elem->ndescs;
1944     }
1945 
1946     vq->last_avail_idx += elem->ndescs;
1947     vq->inuse += elem->ndescs;
1948 
1949     if (vq->last_avail_idx >= vq->vring.num) {
1950         vq->last_avail_idx -= vq->vring.num;
1951         vq->last_avail_wrap_counter ^= 1;
1952     }
1953 
1954     vq->shadow_avail_idx = vq->last_avail_idx;
1955     vq->shadow_avail_wrap_counter = vq->last_avail_wrap_counter;
1956 
1957     trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
1958 done:
1959     address_space_cache_destroy(&indirect_desc_cache);
1960 
1961     return elem;
1962 
1963 err_undo_map:
1964     virtqueue_undo_map_desc(out_num, in_num, iov);
1965     goto done;
1966 }
1967 
1968 void *virtqueue_pop(VirtQueue *vq, size_t sz)
1969 {
1970     if (virtio_device_disabled(vq->vdev)) {
1971         return NULL;
1972     }
1973 
1974     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1975         return virtqueue_packed_pop(vq, sz);
1976     } else {
1977         return virtqueue_split_pop(vq, sz);
1978     }
1979 }
1980 
1981 static unsigned int virtqueue_packed_drop_all(VirtQueue *vq)
1982 {
1983     VRingMemoryRegionCaches *caches;
1984     MemoryRegionCache *desc_cache;
1985     unsigned int dropped = 0;
1986     VirtQueueElement elem = {};
1987     VirtIODevice *vdev = vq->vdev;
1988     VRingPackedDesc desc;
1989 
1990     RCU_READ_LOCK_GUARD();
1991 
1992     caches = vring_get_region_caches(vq);
1993     if (!caches) {
1994         return 0;
1995     }
1996 
1997     desc_cache = &caches->desc;
1998 
1999     virtio_queue_set_notification(vq, 0);
2000 
2001     while (vq->inuse < vq->vring.num) {
2002         unsigned int idx = vq->last_avail_idx;
2003         /*
2004          * works similar to virtqueue_pop but does not map buffers
2005          * and does not allocate any memory.
2006          */
2007         vring_packed_desc_read(vdev, &desc, desc_cache,
2008                                vq->last_avail_idx , true);
2009         if (!is_desc_avail(desc.flags, vq->last_avail_wrap_counter)) {
2010             break;
2011         }
2012         elem.index = desc.id;
2013         elem.ndescs = 1;
2014         while (virtqueue_packed_read_next_desc(vq, &desc, desc_cache,
2015                                                vq->vring.num, &idx, false)) {
2016             ++elem.ndescs;
2017         }
2018         /*
2019          * immediately push the element, nothing to unmap
2020          * as both in_num and out_num are set to 0.
2021          */
2022         virtqueue_push(vq, &elem, 0);
2023         dropped++;
2024         vq->last_avail_idx += elem.ndescs;
2025         if (vq->last_avail_idx >= vq->vring.num) {
2026             vq->last_avail_idx -= vq->vring.num;
2027             vq->last_avail_wrap_counter ^= 1;
2028         }
2029     }
2030 
2031     return dropped;
2032 }
2033 
2034 static unsigned int virtqueue_split_drop_all(VirtQueue *vq)
2035 {
2036     unsigned int dropped = 0;
2037     VirtQueueElement elem = {};
2038     VirtIODevice *vdev = vq->vdev;
2039     bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2040 
2041     while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
2042         /* works similar to virtqueue_pop but does not map buffers
2043         * and does not allocate any memory */
2044         smp_rmb();
2045         if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
2046             break;
2047         }
2048         vq->inuse++;
2049         vq->last_avail_idx++;
2050         if (fEventIdx) {
2051             vring_set_avail_event(vq, vq->last_avail_idx);
2052         }
2053         /* immediately push the element, nothing to unmap
2054          * as both in_num and out_num are set to 0 */
2055         virtqueue_push(vq, &elem, 0);
2056         dropped++;
2057     }
2058 
2059     return dropped;
2060 }
2061 
2062 /* virtqueue_drop_all:
2063  * @vq: The #VirtQueue
2064  * Drops all queued buffers and indicates them to the guest
2065  * as if they are done. Useful when buffers can not be
2066  * processed but must be returned to the guest.
2067  */
2068 unsigned int virtqueue_drop_all(VirtQueue *vq)
2069 {
2070     struct VirtIODevice *vdev = vq->vdev;
2071 
2072     if (virtio_device_disabled(vq->vdev)) {
2073         return 0;
2074     }
2075 
2076     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
2077         return virtqueue_packed_drop_all(vq);
2078     } else {
2079         return virtqueue_split_drop_all(vq);
2080     }
2081 }
2082 
2083 /* Reading and writing a structure directly to QEMUFile is *awful*, but
2084  * it is what QEMU has always done by mistake.  We can change it sooner
2085  * or later by bumping the version number of the affected vm states.
2086  * In the meanwhile, since the in-memory layout of VirtQueueElement
2087  * has changed, we need to marshal to and from the layout that was
2088  * used before the change.
2089  */
2090 typedef struct VirtQueueElementOld {
2091     unsigned int index;
2092     unsigned int out_num;
2093     unsigned int in_num;
2094     hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
2095     hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
2096     struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
2097     struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
2098 } VirtQueueElementOld;
2099 
2100 void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz)
2101 {
2102     VirtQueueElement *elem;
2103     VirtQueueElementOld data;
2104     int i;
2105 
2106     qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
2107 
2108     /* TODO: teach all callers that this can fail, and return failure instead
2109      * of asserting here.
2110      * This is just one thing (there are probably more) that must be
2111      * fixed before we can allow NDEBUG compilation.
2112      */
2113     assert(ARRAY_SIZE(data.in_addr) >= data.in_num);
2114     assert(ARRAY_SIZE(data.out_addr) >= data.out_num);
2115 
2116     elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
2117     elem->index = data.index;
2118 
2119     for (i = 0; i < elem->in_num; i++) {
2120         elem->in_addr[i] = data.in_addr[i];
2121     }
2122 
2123     for (i = 0; i < elem->out_num; i++) {
2124         elem->out_addr[i] = data.out_addr[i];
2125     }
2126 
2127     for (i = 0; i < elem->in_num; i++) {
2128         /* Base is overwritten by virtqueue_map.  */
2129         elem->in_sg[i].iov_base = 0;
2130         elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
2131     }
2132 
2133     for (i = 0; i < elem->out_num; i++) {
2134         /* Base is overwritten by virtqueue_map.  */
2135         elem->out_sg[i].iov_base = 0;
2136         elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
2137     }
2138 
2139     if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
2140         qemu_get_be32s(f, &elem->ndescs);
2141     }
2142 
2143     virtqueue_map(vdev, elem);
2144     return elem;
2145 }
2146 
2147 void qemu_put_virtqueue_element(VirtIODevice *vdev, QEMUFile *f,
2148                                 VirtQueueElement *elem)
2149 {
2150     VirtQueueElementOld data;
2151     int i;
2152 
2153     memset(&data, 0, sizeof(data));
2154     data.index = elem->index;
2155     data.in_num = elem->in_num;
2156     data.out_num = elem->out_num;
2157 
2158     for (i = 0; i < elem->in_num; i++) {
2159         data.in_addr[i] = elem->in_addr[i];
2160     }
2161 
2162     for (i = 0; i < elem->out_num; i++) {
2163         data.out_addr[i] = elem->out_addr[i];
2164     }
2165 
2166     for (i = 0; i < elem->in_num; i++) {
2167         /* Base is overwritten by virtqueue_map when loading.  Do not
2168          * save it, as it would leak the QEMU address space layout.  */
2169         data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
2170     }
2171 
2172     for (i = 0; i < elem->out_num; i++) {
2173         /* Do not save iov_base as above.  */
2174         data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
2175     }
2176 
2177     if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
2178         qemu_put_be32s(f, &elem->ndescs);
2179     }
2180 
2181     qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
2182 }
2183 
2184 /* virtio device */
2185 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
2186 {
2187     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2188     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2189 
2190     if (virtio_device_disabled(vdev)) {
2191         return;
2192     }
2193 
2194     if (k->notify) {
2195         k->notify(qbus->parent, vector);
2196     }
2197 }
2198 
2199 void virtio_update_irq(VirtIODevice *vdev)
2200 {
2201     virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
2202 }
2203 
2204 static int virtio_validate_features(VirtIODevice *vdev)
2205 {
2206     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2207 
2208     if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) &&
2209         !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
2210         return -EFAULT;
2211     }
2212 
2213     if (k->validate_features) {
2214         return k->validate_features(vdev);
2215     } else {
2216         return 0;
2217     }
2218 }
2219 
2220 int virtio_set_status(VirtIODevice *vdev, uint8_t val)
2221 {
2222     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2223     trace_virtio_set_status(vdev, val);
2224 
2225     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2226         if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
2227             val & VIRTIO_CONFIG_S_FEATURES_OK) {
2228             int ret = virtio_validate_features(vdev);
2229 
2230             if (ret) {
2231                 return ret;
2232             }
2233         }
2234     }
2235 
2236     if ((vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) !=
2237         (val & VIRTIO_CONFIG_S_DRIVER_OK)) {
2238         virtio_set_started(vdev, val & VIRTIO_CONFIG_S_DRIVER_OK);
2239     }
2240 
2241     if (k->set_status) {
2242         k->set_status(vdev, val);
2243     }
2244     vdev->status = val;
2245 
2246     return 0;
2247 }
2248 
2249 static enum virtio_device_endian virtio_default_endian(void)
2250 {
2251     if (target_words_bigendian()) {
2252         return VIRTIO_DEVICE_ENDIAN_BIG;
2253     } else {
2254         return VIRTIO_DEVICE_ENDIAN_LITTLE;
2255     }
2256 }
2257 
2258 static enum virtio_device_endian virtio_current_cpu_endian(void)
2259 {
2260     if (cpu_virtio_is_big_endian(current_cpu)) {
2261         return VIRTIO_DEVICE_ENDIAN_BIG;
2262     } else {
2263         return VIRTIO_DEVICE_ENDIAN_LITTLE;
2264     }
2265 }
2266 
2267 static void __virtio_queue_reset(VirtIODevice *vdev, uint32_t i)
2268 {
2269     vdev->vq[i].vring.desc = 0;
2270     vdev->vq[i].vring.avail = 0;
2271     vdev->vq[i].vring.used = 0;
2272     vdev->vq[i].last_avail_idx = 0;
2273     vdev->vq[i].shadow_avail_idx = 0;
2274     vdev->vq[i].used_idx = 0;
2275     vdev->vq[i].last_avail_wrap_counter = true;
2276     vdev->vq[i].shadow_avail_wrap_counter = true;
2277     vdev->vq[i].used_wrap_counter = true;
2278     virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
2279     vdev->vq[i].signalled_used = 0;
2280     vdev->vq[i].signalled_used_valid = false;
2281     vdev->vq[i].notification = true;
2282     vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
2283     vdev->vq[i].inuse = 0;
2284     virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
2285 }
2286 
2287 void virtio_queue_reset(VirtIODevice *vdev, uint32_t queue_index)
2288 {
2289     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2290 
2291     if (k->queue_reset) {
2292         k->queue_reset(vdev, queue_index);
2293     }
2294 
2295     __virtio_queue_reset(vdev, queue_index);
2296 }
2297 
2298 void virtio_queue_enable(VirtIODevice *vdev, uint32_t queue_index)
2299 {
2300     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2301 
2302     /*
2303      * TODO: Seabios is currently out of spec and triggering this error.
2304      * So this needs to be fixed in Seabios, then this can
2305      * be re-enabled for new machine types only, and also after
2306      * being converted to LOG_GUEST_ERROR.
2307      *
2308     if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2309         error_report("queue_enable is only supported in devices of virtio "
2310                      "1.0 or later.");
2311     }
2312     */
2313 
2314     if (k->queue_enable) {
2315         k->queue_enable(vdev, queue_index);
2316     }
2317 }
2318 
2319 void virtio_reset(void *opaque)
2320 {
2321     VirtIODevice *vdev = opaque;
2322     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2323     int i;
2324 
2325     virtio_set_status(vdev, 0);
2326     if (current_cpu) {
2327         /* Guest initiated reset */
2328         vdev->device_endian = virtio_current_cpu_endian();
2329     } else {
2330         /* System reset */
2331         vdev->device_endian = virtio_default_endian();
2332     }
2333 
2334     if (k->get_vhost) {
2335         struct vhost_dev *hdev = k->get_vhost(vdev);
2336         /* Only reset when vhost back-end is connected */
2337         if (hdev && hdev->vhost_ops) {
2338             vhost_reset_device(hdev);
2339         }
2340     }
2341 
2342     if (k->reset) {
2343         k->reset(vdev);
2344     }
2345 
2346     vdev->start_on_kick = false;
2347     vdev->started = false;
2348     vdev->broken = false;
2349     vdev->guest_features = 0;
2350     vdev->queue_sel = 0;
2351     vdev->status = 0;
2352     vdev->disabled = false;
2353     qatomic_set(&vdev->isr, 0);
2354     vdev->config_vector = VIRTIO_NO_VECTOR;
2355     virtio_notify_vector(vdev, vdev->config_vector);
2356 
2357     for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2358         __virtio_queue_reset(vdev, i);
2359     }
2360 }
2361 
2362 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
2363 {
2364     if (!vdev->vq[n].vring.num) {
2365         return;
2366     }
2367     vdev->vq[n].vring.desc = addr;
2368     virtio_queue_update_rings(vdev, n);
2369 }
2370 
2371 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
2372 {
2373     return vdev->vq[n].vring.desc;
2374 }
2375 
2376 void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
2377                             hwaddr avail, hwaddr used)
2378 {
2379     if (!vdev->vq[n].vring.num) {
2380         return;
2381     }
2382     vdev->vq[n].vring.desc = desc;
2383     vdev->vq[n].vring.avail = avail;
2384     vdev->vq[n].vring.used = used;
2385     virtio_init_region_cache(vdev, n);
2386 }
2387 
2388 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
2389 {
2390     /* Don't allow guest to flip queue between existent and
2391      * nonexistent states, or to set it to an invalid size.
2392      */
2393     if (!!num != !!vdev->vq[n].vring.num ||
2394         num > VIRTQUEUE_MAX_SIZE ||
2395         num < 0) {
2396         return;
2397     }
2398     vdev->vq[n].vring.num = num;
2399 }
2400 
2401 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
2402 {
2403     return QLIST_FIRST(&vdev->vector_queues[vector]);
2404 }
2405 
2406 VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
2407 {
2408     return QLIST_NEXT(vq, node);
2409 }
2410 
2411 int virtio_queue_get_num(VirtIODevice *vdev, int n)
2412 {
2413     return vdev->vq[n].vring.num;
2414 }
2415 
2416 int virtio_queue_get_max_num(VirtIODevice *vdev, int n)
2417 {
2418     return vdev->vq[n].vring.num_default;
2419 }
2420 
2421 int virtio_get_num_queues(VirtIODevice *vdev)
2422 {
2423     int i;
2424 
2425     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2426         if (!virtio_queue_get_num(vdev, i)) {
2427             break;
2428         }
2429     }
2430 
2431     return i;
2432 }
2433 
2434 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
2435 {
2436     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2437     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2438 
2439     /* virtio-1 compliant devices cannot change the alignment */
2440     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2441         error_report("tried to modify queue alignment for virtio-1 device");
2442         return;
2443     }
2444     /* Check that the transport told us it was going to do this
2445      * (so a buggy transport will immediately assert rather than
2446      * silently failing to migrate this state)
2447      */
2448     assert(k->has_variable_vring_alignment);
2449 
2450     if (align) {
2451         vdev->vq[n].vring.align = align;
2452         virtio_queue_update_rings(vdev, n);
2453     }
2454 }
2455 
2456 void virtio_queue_set_shadow_avail_idx(VirtQueue *vq, uint16_t shadow_avail_idx)
2457 {
2458     if (!vq->vring.desc) {
2459         return;
2460     }
2461 
2462     /*
2463      * 16-bit data for packed VQs include 1-bit wrap counter and
2464      * 15-bit shadow_avail_idx.
2465      */
2466     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
2467         vq->shadow_avail_wrap_counter = (shadow_avail_idx >> 15) & 0x1;
2468         vq->shadow_avail_idx = shadow_avail_idx & 0x7FFF;
2469     } else {
2470         vq->shadow_avail_idx = shadow_avail_idx;
2471     }
2472 }
2473 
2474 static void virtio_queue_notify_vq(VirtQueue *vq)
2475 {
2476     if (vq->vring.desc && vq->handle_output) {
2477         VirtIODevice *vdev = vq->vdev;
2478 
2479         if (unlikely(vdev->broken)) {
2480             return;
2481         }
2482 
2483         trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2484         vq->handle_output(vdev, vq);
2485 
2486         if (unlikely(vdev->start_on_kick)) {
2487             virtio_set_started(vdev, true);
2488         }
2489     }
2490 }
2491 
2492 void virtio_queue_notify(VirtIODevice *vdev, int n)
2493 {
2494     VirtQueue *vq = &vdev->vq[n];
2495 
2496     if (unlikely(!vq->vring.desc || vdev->broken)) {
2497         return;
2498     }
2499 
2500     trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2501     if (vq->host_notifier_enabled) {
2502         event_notifier_set(&vq->host_notifier);
2503     } else if (vq->handle_output) {
2504         vq->handle_output(vdev, vq);
2505 
2506         if (unlikely(vdev->start_on_kick)) {
2507             virtio_set_started(vdev, true);
2508         }
2509     }
2510 }
2511 
2512 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
2513 {
2514     return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
2515         VIRTIO_NO_VECTOR;
2516 }
2517 
2518 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
2519 {
2520     VirtQueue *vq = &vdev->vq[n];
2521 
2522     if (n < VIRTIO_QUEUE_MAX) {
2523         if (vdev->vector_queues &&
2524             vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
2525             QLIST_REMOVE(vq, node);
2526         }
2527         vdev->vq[n].vector = vector;
2528         if (vdev->vector_queues &&
2529             vector != VIRTIO_NO_VECTOR) {
2530             QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
2531         }
2532     }
2533 }
2534 
2535 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
2536                             VirtIOHandleOutput handle_output)
2537 {
2538     int i;
2539 
2540     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2541         if (vdev->vq[i].vring.num == 0)
2542             break;
2543     }
2544 
2545     if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
2546         abort();
2547 
2548     vdev->vq[i].vring.num = queue_size;
2549     vdev->vq[i].vring.num_default = queue_size;
2550     vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
2551     vdev->vq[i].handle_output = handle_output;
2552     vdev->vq[i].used_elems = g_new0(VirtQueueElement, queue_size);
2553 
2554     return &vdev->vq[i];
2555 }
2556 
2557 void virtio_delete_queue(VirtQueue *vq)
2558 {
2559     vq->vring.num = 0;
2560     vq->vring.num_default = 0;
2561     vq->handle_output = NULL;
2562     g_free(vq->used_elems);
2563     vq->used_elems = NULL;
2564     virtio_virtqueue_reset_region_cache(vq);
2565 }
2566 
2567 void virtio_del_queue(VirtIODevice *vdev, int n)
2568 {
2569     if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
2570         abort();
2571     }
2572 
2573     virtio_delete_queue(&vdev->vq[n]);
2574 }
2575 
2576 static void virtio_set_isr(VirtIODevice *vdev, int value)
2577 {
2578     uint8_t old = qatomic_read(&vdev->isr);
2579 
2580     /* Do not write ISR if it does not change, so that its cacheline remains
2581      * shared in the common case where the guest does not read it.
2582      */
2583     if ((old & value) != value) {
2584         qatomic_or(&vdev->isr, value);
2585     }
2586 }
2587 
2588 /* Called within rcu_read_lock(). */
2589 static bool virtio_split_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2590 {
2591     uint16_t old, new;
2592     bool v;
2593     /* We need to expose used array entries before checking used event. */
2594     smp_mb();
2595     /* Always notify when queue is empty (when feature acknowledge) */
2596     if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2597         !vq->inuse && virtio_queue_empty(vq)) {
2598         return true;
2599     }
2600 
2601     if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
2602         return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
2603     }
2604 
2605     v = vq->signalled_used_valid;
2606     vq->signalled_used_valid = true;
2607     old = vq->signalled_used;
2608     new = vq->signalled_used = vq->used_idx;
2609     return !v || vring_need_event(vring_get_used_event(vq), new, old);
2610 }
2611 
2612 static bool vring_packed_need_event(VirtQueue *vq, bool wrap,
2613                                     uint16_t off_wrap, uint16_t new,
2614                                     uint16_t old)
2615 {
2616     int off = off_wrap & ~(1 << 15);
2617 
2618     if (wrap != off_wrap >> 15) {
2619         off -= vq->vring.num;
2620     }
2621 
2622     return vring_need_event(off, new, old);
2623 }
2624 
2625 /* Called within rcu_read_lock(). */
2626 static bool virtio_packed_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2627 {
2628     VRingPackedDescEvent e;
2629     uint16_t old, new;
2630     bool v;
2631     VRingMemoryRegionCaches *caches;
2632 
2633     caches = vring_get_region_caches(vq);
2634     if (!caches) {
2635         return false;
2636     }
2637 
2638     vring_packed_event_read(vdev, &caches->avail, &e);
2639 
2640     old = vq->signalled_used;
2641     new = vq->signalled_used = vq->used_idx;
2642     v = vq->signalled_used_valid;
2643     vq->signalled_used_valid = true;
2644 
2645     if (e.flags == VRING_PACKED_EVENT_FLAG_DISABLE) {
2646         return false;
2647     } else if (e.flags == VRING_PACKED_EVENT_FLAG_ENABLE) {
2648         return true;
2649     }
2650 
2651     return !v || vring_packed_need_event(vq, vq->used_wrap_counter,
2652                                          e.off_wrap, new, old);
2653 }
2654 
2655 /* Called within rcu_read_lock().  */
2656 static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2657 {
2658     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
2659         return virtio_packed_should_notify(vdev, vq);
2660     } else {
2661         return virtio_split_should_notify(vdev, vq);
2662     }
2663 }
2664 
2665 /* Batch irqs while inside a defer_call_begin()/defer_call_end() section */
2666 static void virtio_notify_irqfd_deferred_fn(void *opaque)
2667 {
2668     EventNotifier *notifier = opaque;
2669     VirtQueue *vq = container_of(notifier, VirtQueue, guest_notifier);
2670 
2671     trace_virtio_notify_irqfd_deferred_fn(vq->vdev, vq);
2672     event_notifier_set(notifier);
2673 }
2674 
2675 void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
2676 {
2677     WITH_RCU_READ_LOCK_GUARD() {
2678         if (!virtio_should_notify(vdev, vq)) {
2679             return;
2680         }
2681     }
2682 
2683     trace_virtio_notify_irqfd(vdev, vq);
2684 
2685     /*
2686      * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
2687      * windows drivers included in virtio-win 1.8.0 (circa 2015) are
2688      * incorrectly polling this bit during crashdump and hibernation
2689      * in MSI mode, causing a hang if this bit is never updated.
2690      * Recent releases of Windows do not really shut down, but rather
2691      * log out and hibernate to make the next startup faster.  Hence,
2692      * this manifested as a more serious hang during shutdown with
2693      *
2694      * Next driver release from 2016 fixed this problem, so working around it
2695      * is not a must, but it's easy to do so let's do it here.
2696      *
2697      * Note: it's safe to update ISR from any thread as it was switched
2698      * to an atomic operation.
2699      */
2700     virtio_set_isr(vq->vdev, 0x1);
2701     defer_call(virtio_notify_irqfd_deferred_fn, &vq->guest_notifier);
2702 }
2703 
2704 static void virtio_irq(VirtQueue *vq)
2705 {
2706     virtio_set_isr(vq->vdev, 0x1);
2707     virtio_notify_vector(vq->vdev, vq->vector);
2708 }
2709 
2710 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
2711 {
2712     WITH_RCU_READ_LOCK_GUARD() {
2713         if (!virtio_should_notify(vdev, vq)) {
2714             return;
2715         }
2716     }
2717 
2718     trace_virtio_notify(vdev, vq);
2719     virtio_irq(vq);
2720 }
2721 
2722 void virtio_notify_config(VirtIODevice *vdev)
2723 {
2724     if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
2725         return;
2726 
2727     virtio_set_isr(vdev, 0x3);
2728     vdev->generation++;
2729     virtio_notify_vector(vdev, vdev->config_vector);
2730 }
2731 
2732 static bool virtio_device_endian_needed(void *opaque)
2733 {
2734     VirtIODevice *vdev = opaque;
2735 
2736     assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
2737     if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2738         return vdev->device_endian != virtio_default_endian();
2739     }
2740     /* Devices conforming to VIRTIO 1.0 or later are always LE. */
2741     return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
2742 }
2743 
2744 static bool virtio_64bit_features_needed(void *opaque)
2745 {
2746     VirtIODevice *vdev = opaque;
2747 
2748     return (vdev->host_features >> 32) != 0;
2749 }
2750 
2751 static bool virtio_virtqueue_needed(void *opaque)
2752 {
2753     VirtIODevice *vdev = opaque;
2754 
2755     return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
2756 }
2757 
2758 static bool virtio_packed_virtqueue_needed(void *opaque)
2759 {
2760     VirtIODevice *vdev = opaque;
2761 
2762     return virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED);
2763 }
2764 
2765 static bool virtio_ringsize_needed(void *opaque)
2766 {
2767     VirtIODevice *vdev = opaque;
2768     int i;
2769 
2770     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2771         if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
2772             return true;
2773         }
2774     }
2775     return false;
2776 }
2777 
2778 static bool virtio_extra_state_needed(void *opaque)
2779 {
2780     VirtIODevice *vdev = opaque;
2781     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2782     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2783 
2784     return k->has_extra_state &&
2785         k->has_extra_state(qbus->parent);
2786 }
2787 
2788 static bool virtio_broken_needed(void *opaque)
2789 {
2790     VirtIODevice *vdev = opaque;
2791 
2792     return vdev->broken;
2793 }
2794 
2795 static bool virtio_started_needed(void *opaque)
2796 {
2797     VirtIODevice *vdev = opaque;
2798 
2799     return vdev->started;
2800 }
2801 
2802 static bool virtio_disabled_needed(void *opaque)
2803 {
2804     VirtIODevice *vdev = opaque;
2805 
2806     return vdev->disabled;
2807 }
2808 
2809 static const VMStateDescription vmstate_virtqueue = {
2810     .name = "virtqueue_state",
2811     .version_id = 1,
2812     .minimum_version_id = 1,
2813     .fields = (const VMStateField[]) {
2814         VMSTATE_UINT64(vring.avail, struct VirtQueue),
2815         VMSTATE_UINT64(vring.used, struct VirtQueue),
2816         VMSTATE_END_OF_LIST()
2817     }
2818 };
2819 
2820 static const VMStateDescription vmstate_packed_virtqueue = {
2821     .name = "packed_virtqueue_state",
2822     .version_id = 1,
2823     .minimum_version_id = 1,
2824     .fields = (const VMStateField[]) {
2825         VMSTATE_UINT16(last_avail_idx, struct VirtQueue),
2826         VMSTATE_BOOL(last_avail_wrap_counter, struct VirtQueue),
2827         VMSTATE_UINT16(used_idx, struct VirtQueue),
2828         VMSTATE_BOOL(used_wrap_counter, struct VirtQueue),
2829         VMSTATE_UINT32(inuse, struct VirtQueue),
2830         VMSTATE_END_OF_LIST()
2831     }
2832 };
2833 
2834 static const VMStateDescription vmstate_virtio_virtqueues = {
2835     .name = "virtio/virtqueues",
2836     .version_id = 1,
2837     .minimum_version_id = 1,
2838     .needed = &virtio_virtqueue_needed,
2839     .fields = (const VMStateField[]) {
2840         VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2841                       VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
2842         VMSTATE_END_OF_LIST()
2843     }
2844 };
2845 
2846 static const VMStateDescription vmstate_virtio_packed_virtqueues = {
2847     .name = "virtio/packed_virtqueues",
2848     .version_id = 1,
2849     .minimum_version_id = 1,
2850     .needed = &virtio_packed_virtqueue_needed,
2851     .fields = (const VMStateField[]) {
2852         VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2853                       VIRTIO_QUEUE_MAX, 0, vmstate_packed_virtqueue, VirtQueue),
2854         VMSTATE_END_OF_LIST()
2855     }
2856 };
2857 
2858 static const VMStateDescription vmstate_ringsize = {
2859     .name = "ringsize_state",
2860     .version_id = 1,
2861     .minimum_version_id = 1,
2862     .fields = (const VMStateField[]) {
2863         VMSTATE_UINT32(vring.num_default, struct VirtQueue),
2864         VMSTATE_END_OF_LIST()
2865     }
2866 };
2867 
2868 static const VMStateDescription vmstate_virtio_ringsize = {
2869     .name = "virtio/ringsize",
2870     .version_id = 1,
2871     .minimum_version_id = 1,
2872     .needed = &virtio_ringsize_needed,
2873     .fields = (const VMStateField[]) {
2874         VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2875                       VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
2876         VMSTATE_END_OF_LIST()
2877     }
2878 };
2879 
2880 static int get_extra_state(QEMUFile *f, void *pv, size_t size,
2881                            const VMStateField *field)
2882 {
2883     VirtIODevice *vdev = pv;
2884     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2885     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2886 
2887     if (!k->load_extra_state) {
2888         return -1;
2889     } else {
2890         return k->load_extra_state(qbus->parent, f);
2891     }
2892 }
2893 
2894 static int put_extra_state(QEMUFile *f, void *pv, size_t size,
2895                            const VMStateField *field, JSONWriter *vmdesc)
2896 {
2897     VirtIODevice *vdev = pv;
2898     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2899     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2900 
2901     k->save_extra_state(qbus->parent, f);
2902     return 0;
2903 }
2904 
2905 static const VMStateInfo vmstate_info_extra_state = {
2906     .name = "virtqueue_extra_state",
2907     .get = get_extra_state,
2908     .put = put_extra_state,
2909 };
2910 
2911 static const VMStateDescription vmstate_virtio_extra_state = {
2912     .name = "virtio/extra_state",
2913     .version_id = 1,
2914     .minimum_version_id = 1,
2915     .needed = &virtio_extra_state_needed,
2916     .fields = (const VMStateField[]) {
2917         {
2918             .name         = "extra_state",
2919             .version_id   = 0,
2920             .field_exists = NULL,
2921             .size         = 0,
2922             .info         = &vmstate_info_extra_state,
2923             .flags        = VMS_SINGLE,
2924             .offset       = 0,
2925         },
2926         VMSTATE_END_OF_LIST()
2927     }
2928 };
2929 
2930 static const VMStateDescription vmstate_virtio_device_endian = {
2931     .name = "virtio/device_endian",
2932     .version_id = 1,
2933     .minimum_version_id = 1,
2934     .needed = &virtio_device_endian_needed,
2935     .fields = (const VMStateField[]) {
2936         VMSTATE_UINT8(device_endian, VirtIODevice),
2937         VMSTATE_END_OF_LIST()
2938     }
2939 };
2940 
2941 static const VMStateDescription vmstate_virtio_64bit_features = {
2942     .name = "virtio/64bit_features",
2943     .version_id = 1,
2944     .minimum_version_id = 1,
2945     .needed = &virtio_64bit_features_needed,
2946     .fields = (const VMStateField[]) {
2947         VMSTATE_UINT64(guest_features, VirtIODevice),
2948         VMSTATE_END_OF_LIST()
2949     }
2950 };
2951 
2952 static const VMStateDescription vmstate_virtio_broken = {
2953     .name = "virtio/broken",
2954     .version_id = 1,
2955     .minimum_version_id = 1,
2956     .needed = &virtio_broken_needed,
2957     .fields = (const VMStateField[]) {
2958         VMSTATE_BOOL(broken, VirtIODevice),
2959         VMSTATE_END_OF_LIST()
2960     }
2961 };
2962 
2963 static const VMStateDescription vmstate_virtio_started = {
2964     .name = "virtio/started",
2965     .version_id = 1,
2966     .minimum_version_id = 1,
2967     .needed = &virtio_started_needed,
2968     .fields = (const VMStateField[]) {
2969         VMSTATE_BOOL(started, VirtIODevice),
2970         VMSTATE_END_OF_LIST()
2971     }
2972 };
2973 
2974 static const VMStateDescription vmstate_virtio_disabled = {
2975     .name = "virtio/disabled",
2976     .version_id = 1,
2977     .minimum_version_id = 1,
2978     .needed = &virtio_disabled_needed,
2979     .fields = (const VMStateField[]) {
2980         VMSTATE_BOOL(disabled, VirtIODevice),
2981         VMSTATE_END_OF_LIST()
2982     }
2983 };
2984 
2985 static const VMStateDescription vmstate_virtio = {
2986     .name = "virtio",
2987     .version_id = 1,
2988     .minimum_version_id = 1,
2989     .fields = (const VMStateField[]) {
2990         VMSTATE_END_OF_LIST()
2991     },
2992     .subsections = (const VMStateDescription * const []) {
2993         &vmstate_virtio_device_endian,
2994         &vmstate_virtio_64bit_features,
2995         &vmstate_virtio_virtqueues,
2996         &vmstate_virtio_ringsize,
2997         &vmstate_virtio_broken,
2998         &vmstate_virtio_extra_state,
2999         &vmstate_virtio_started,
3000         &vmstate_virtio_packed_virtqueues,
3001         &vmstate_virtio_disabled,
3002         NULL
3003     }
3004 };
3005 
3006 int virtio_save(VirtIODevice *vdev, QEMUFile *f)
3007 {
3008     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3009     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3010     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
3011     uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
3012     int i;
3013 
3014     if (k->save_config) {
3015         k->save_config(qbus->parent, f);
3016     }
3017 
3018     qemu_put_8s(f, &vdev->status);
3019     qemu_put_8s(f, &vdev->isr);
3020     qemu_put_be16s(f, &vdev->queue_sel);
3021     qemu_put_be32s(f, &guest_features_lo);
3022     qemu_put_be32(f, vdev->config_len);
3023     qemu_put_buffer(f, vdev->config, vdev->config_len);
3024 
3025     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3026         if (vdev->vq[i].vring.num == 0)
3027             break;
3028     }
3029 
3030     qemu_put_be32(f, i);
3031 
3032     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3033         if (vdev->vq[i].vring.num == 0)
3034             break;
3035 
3036         qemu_put_be32(f, vdev->vq[i].vring.num);
3037         if (k->has_variable_vring_alignment) {
3038             qemu_put_be32(f, vdev->vq[i].vring.align);
3039         }
3040         /*
3041          * Save desc now, the rest of the ring addresses are saved in
3042          * subsections for VIRTIO-1 devices.
3043          */
3044         qemu_put_be64(f, vdev->vq[i].vring.desc);
3045         qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
3046         if (k->save_queue) {
3047             k->save_queue(qbus->parent, i, f);
3048         }
3049     }
3050 
3051     if (vdc->save != NULL) {
3052         vdc->save(vdev, f);
3053     }
3054 
3055     if (vdc->vmsd) {
3056         int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL);
3057         if (ret) {
3058             return ret;
3059         }
3060     }
3061 
3062     /* Subsections */
3063     return vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
3064 }
3065 
3066 /* A wrapper for use as a VMState .put function */
3067 static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
3068                               const VMStateField *field, JSONWriter *vmdesc)
3069 {
3070     return virtio_save(VIRTIO_DEVICE(opaque), f);
3071 }
3072 
3073 /* A wrapper for use as a VMState .get function */
3074 static int coroutine_mixed_fn
3075 virtio_device_get(QEMUFile *f, void *opaque, size_t size,
3076                   const VMStateField *field)
3077 {
3078     VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
3079     DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
3080 
3081     return virtio_load(vdev, f, dc->vmsd->version_id);
3082 }
3083 
3084 const VMStateInfo  virtio_vmstate_info = {
3085     .name = "virtio",
3086     .get = virtio_device_get,
3087     .put = virtio_device_put,
3088 };
3089 
3090 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
3091 {
3092     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
3093     bool bad = (val & ~(vdev->host_features)) != 0;
3094 
3095     val &= vdev->host_features;
3096     if (k->set_features) {
3097         k->set_features(vdev, val);
3098     }
3099     vdev->guest_features = val;
3100     return bad ? -1 : 0;
3101 }
3102 
3103 typedef struct VirtioSetFeaturesNocheckData {
3104     Coroutine *co;
3105     VirtIODevice *vdev;
3106     uint64_t val;
3107     int ret;
3108 } VirtioSetFeaturesNocheckData;
3109 
3110 static void virtio_set_features_nocheck_bh(void *opaque)
3111 {
3112     VirtioSetFeaturesNocheckData *data = opaque;
3113 
3114     data->ret = virtio_set_features_nocheck(data->vdev, data->val);
3115     aio_co_wake(data->co);
3116 }
3117 
3118 static int coroutine_mixed_fn
3119 virtio_set_features_nocheck_maybe_co(VirtIODevice *vdev, uint64_t val)
3120 {
3121     if (qemu_in_coroutine()) {
3122         VirtioSetFeaturesNocheckData data = {
3123             .co = qemu_coroutine_self(),
3124             .vdev = vdev,
3125             .val = val,
3126         };
3127         aio_bh_schedule_oneshot(qemu_get_current_aio_context(),
3128                                 virtio_set_features_nocheck_bh, &data);
3129         qemu_coroutine_yield();
3130         return data.ret;
3131     } else {
3132         return virtio_set_features_nocheck(vdev, val);
3133     }
3134 }
3135 
3136 int virtio_set_features(VirtIODevice *vdev, uint64_t val)
3137 {
3138     int ret;
3139     /*
3140      * The driver must not attempt to set features after feature negotiation
3141      * has finished.
3142      */
3143     if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
3144         return -EINVAL;
3145     }
3146 
3147     if (val & (1ull << VIRTIO_F_BAD_FEATURE)) {
3148         qemu_log_mask(LOG_GUEST_ERROR,
3149                       "%s: guest driver for %s has enabled UNUSED(30) feature bit!\n",
3150                       __func__, vdev->name);
3151     }
3152 
3153     ret = virtio_set_features_nocheck(vdev, val);
3154     if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
3155         /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches.  */
3156         int i;
3157         for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3158             if (vdev->vq[i].vring.num != 0) {
3159                 virtio_init_region_cache(vdev, i);
3160             }
3161         }
3162     }
3163     if (!ret) {
3164         if (!virtio_device_started(vdev, vdev->status) &&
3165             !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3166             vdev->start_on_kick = true;
3167         }
3168     }
3169     return ret;
3170 }
3171 
3172 static void virtio_device_check_notification_compatibility(VirtIODevice *vdev,
3173                                                            Error **errp)
3174 {
3175     VirtioBusState *bus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
3176     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
3177     DeviceState *proxy = DEVICE(BUS(bus)->parent);
3178 
3179     if (virtio_host_has_feature(vdev, VIRTIO_F_NOTIFICATION_DATA) &&
3180         k->ioeventfd_enabled(proxy)) {
3181         error_setg(errp,
3182                    "notification_data=on without ioeventfd=off is not supported");
3183     }
3184 }
3185 
3186 size_t virtio_get_config_size(const VirtIOConfigSizeParams *params,
3187                               uint64_t host_features)
3188 {
3189     size_t config_size = params->min_size;
3190     const VirtIOFeature *feature_sizes = params->feature_sizes;
3191     size_t i;
3192 
3193     for (i = 0; feature_sizes[i].flags != 0; i++) {
3194         if (host_features & feature_sizes[i].flags) {
3195             config_size = MAX(feature_sizes[i].end, config_size);
3196         }
3197     }
3198 
3199     assert(config_size <= params->max_size);
3200     return config_size;
3201 }
3202 
3203 int coroutine_mixed_fn
3204 virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
3205 {
3206     int i, ret;
3207     int32_t config_len;
3208     uint32_t num;
3209     uint32_t features;
3210     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3211     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3212     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
3213 
3214     /*
3215      * We poison the endianness to ensure it does not get used before
3216      * subsections have been loaded.
3217      */
3218     vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
3219 
3220     if (k->load_config) {
3221         ret = k->load_config(qbus->parent, f);
3222         if (ret)
3223             return ret;
3224     }
3225 
3226     qemu_get_8s(f, &vdev->status);
3227     qemu_get_8s(f, &vdev->isr);
3228     qemu_get_be16s(f, &vdev->queue_sel);
3229     if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
3230         return -1;
3231     }
3232     qemu_get_be32s(f, &features);
3233 
3234     /*
3235      * Temporarily set guest_features low bits - needed by
3236      * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
3237      * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
3238      *
3239      * Note: devices should always test host features in future - don't create
3240      * new dependencies like this.
3241      */
3242     vdev->guest_features = features;
3243 
3244     config_len = qemu_get_be32(f);
3245 
3246     /*
3247      * There are cases where the incoming config can be bigger or smaller
3248      * than what we have; so load what we have space for, and skip
3249      * any excess that's in the stream.
3250      */
3251     qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
3252 
3253     while (config_len > vdev->config_len) {
3254         qemu_get_byte(f);
3255         config_len--;
3256     }
3257 
3258     if (vdc->pre_load_queues) {
3259         ret = vdc->pre_load_queues(vdev);
3260         if (ret) {
3261             return ret;
3262         }
3263     }
3264 
3265     num = qemu_get_be32(f);
3266 
3267     if (num > VIRTIO_QUEUE_MAX) {
3268         error_report("Invalid number of virtqueues: 0x%x", num);
3269         return -1;
3270     }
3271 
3272     for (i = 0; i < num; i++) {
3273         vdev->vq[i].vring.num = qemu_get_be32(f);
3274         if (k->has_variable_vring_alignment) {
3275             vdev->vq[i].vring.align = qemu_get_be32(f);
3276         }
3277         vdev->vq[i].vring.desc = qemu_get_be64(f);
3278         qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
3279         vdev->vq[i].signalled_used_valid = false;
3280         vdev->vq[i].notification = true;
3281 
3282         if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) {
3283             error_report("VQ %d address 0x0 "
3284                          "inconsistent with Host index 0x%x",
3285                          i, vdev->vq[i].last_avail_idx);
3286             return -1;
3287         }
3288         if (k->load_queue) {
3289             ret = k->load_queue(qbus->parent, i, f);
3290             if (ret)
3291                 return ret;
3292         }
3293     }
3294 
3295     virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
3296 
3297     if (vdc->load != NULL) {
3298         ret = vdc->load(vdev, f, version_id);
3299         if (ret) {
3300             return ret;
3301         }
3302     }
3303 
3304     if (vdc->vmsd) {
3305         ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
3306         if (ret) {
3307             return ret;
3308         }
3309     }
3310 
3311     /* Subsections */
3312     ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
3313     if (ret) {
3314         return ret;
3315     }
3316 
3317     if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
3318         vdev->device_endian = virtio_default_endian();
3319     }
3320 
3321     if (virtio_64bit_features_needed(vdev)) {
3322         /*
3323          * Subsection load filled vdev->guest_features.  Run them
3324          * through virtio_set_features to sanity-check them against
3325          * host_features.
3326          */
3327         uint64_t features64 = vdev->guest_features;
3328         if (virtio_set_features_nocheck_maybe_co(vdev, features64) < 0) {
3329             error_report("Features 0x%" PRIx64 " unsupported. "
3330                          "Allowed features: 0x%" PRIx64,
3331                          features64, vdev->host_features);
3332             return -1;
3333         }
3334     } else {
3335         if (virtio_set_features_nocheck_maybe_co(vdev, features) < 0) {
3336             error_report("Features 0x%x unsupported. "
3337                          "Allowed features: 0x%" PRIx64,
3338                          features, vdev->host_features);
3339             return -1;
3340         }
3341     }
3342 
3343     if (!virtio_device_started(vdev, vdev->status) &&
3344         !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3345         vdev->start_on_kick = true;
3346     }
3347 
3348     RCU_READ_LOCK_GUARD();
3349     for (i = 0; i < num; i++) {
3350         if (vdev->vq[i].vring.desc) {
3351             uint16_t nheads;
3352 
3353             /*
3354              * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
3355              * only the region cache needs to be set up.  Legacy devices need
3356              * to calculate used and avail ring addresses based on the desc
3357              * address.
3358              */
3359             if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3360                 virtio_init_region_cache(vdev, i);
3361             } else {
3362                 virtio_queue_update_rings(vdev, i);
3363             }
3364 
3365             if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3366                 vdev->vq[i].shadow_avail_idx = vdev->vq[i].last_avail_idx;
3367                 vdev->vq[i].shadow_avail_wrap_counter =
3368                                         vdev->vq[i].last_avail_wrap_counter;
3369                 continue;
3370             }
3371 
3372             nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
3373             /* Check it isn't doing strange things with descriptor numbers. */
3374             if (nheads > vdev->vq[i].vring.num) {
3375                 virtio_error(vdev, "VQ %d size 0x%x Guest index 0x%x "
3376                              "inconsistent with Host index 0x%x: delta 0x%x",
3377                              i, vdev->vq[i].vring.num,
3378                              vring_avail_idx(&vdev->vq[i]),
3379                              vdev->vq[i].last_avail_idx, nheads);
3380                 vdev->vq[i].used_idx = 0;
3381                 vdev->vq[i].shadow_avail_idx = 0;
3382                 vdev->vq[i].inuse = 0;
3383                 continue;
3384             }
3385             vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
3386             vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
3387 
3388             /*
3389              * Some devices migrate VirtQueueElements that have been popped
3390              * from the avail ring but not yet returned to the used ring.
3391              * Since max ring size < UINT16_MAX it's safe to use modulo
3392              * UINT16_MAX + 1 subtraction.
3393              */
3394             vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
3395                                 vdev->vq[i].used_idx);
3396             if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
3397                 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
3398                              "used_idx 0x%x",
3399                              i, vdev->vq[i].vring.num,
3400                              vdev->vq[i].last_avail_idx,
3401                              vdev->vq[i].used_idx);
3402                 return -1;
3403             }
3404         }
3405     }
3406 
3407     if (vdc->post_load) {
3408         ret = vdc->post_load(vdev);
3409         if (ret) {
3410             return ret;
3411         }
3412     }
3413 
3414     return 0;
3415 }
3416 
3417 void virtio_cleanup(VirtIODevice *vdev)
3418 {
3419     qemu_del_vm_change_state_handler(vdev->vmstate);
3420 }
3421 
3422 static void virtio_vmstate_change(void *opaque, bool running, RunState state)
3423 {
3424     VirtIODevice *vdev = opaque;
3425     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3426     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3427     bool backend_run = running && virtio_device_started(vdev, vdev->status);
3428     vdev->vm_running = running;
3429 
3430     if (backend_run) {
3431         virtio_set_status(vdev, vdev->status);
3432     }
3433 
3434     if (k->vmstate_change) {
3435         k->vmstate_change(qbus->parent, backend_run);
3436     }
3437 
3438     if (!backend_run) {
3439         virtio_set_status(vdev, vdev->status);
3440     }
3441 }
3442 
3443 void virtio_instance_init_common(Object *proxy_obj, void *data,
3444                                  size_t vdev_size, const char *vdev_name)
3445 {
3446     DeviceState *vdev = data;
3447 
3448     object_initialize_child_with_props(proxy_obj, "virtio-backend", vdev,
3449                                        vdev_size, vdev_name, &error_abort,
3450                                        NULL);
3451     qdev_alias_all_properties(vdev, proxy_obj);
3452 }
3453 
3454 void virtio_init(VirtIODevice *vdev, uint16_t device_id, size_t config_size)
3455 {
3456     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3457     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3458     int i;
3459     int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
3460 
3461     if (nvectors) {
3462         vdev->vector_queues =
3463             g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
3464     }
3465 
3466     vdev->start_on_kick = false;
3467     vdev->started = false;
3468     vdev->vhost_started = false;
3469     vdev->device_id = device_id;
3470     vdev->status = 0;
3471     qatomic_set(&vdev->isr, 0);
3472     vdev->queue_sel = 0;
3473     vdev->config_vector = VIRTIO_NO_VECTOR;
3474     vdev->vq = g_new0(VirtQueue, VIRTIO_QUEUE_MAX);
3475     vdev->vm_running = runstate_is_running();
3476     vdev->broken = false;
3477     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3478         vdev->vq[i].vector = VIRTIO_NO_VECTOR;
3479         vdev->vq[i].vdev = vdev;
3480         vdev->vq[i].queue_index = i;
3481         vdev->vq[i].host_notifier_enabled = false;
3482     }
3483 
3484     vdev->name = virtio_id_to_name(device_id);
3485     vdev->config_len = config_size;
3486     if (vdev->config_len) {
3487         vdev->config = g_malloc0(config_size);
3488     } else {
3489         vdev->config = NULL;
3490     }
3491     vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev),
3492             virtio_vmstate_change, vdev);
3493     vdev->device_endian = virtio_default_endian();
3494     vdev->use_guest_notifier_mask = true;
3495 }
3496 
3497 /*
3498  * Only devices that have already been around prior to defining the virtio
3499  * standard support legacy mode; this includes devices not specified in the
3500  * standard. All newer devices conform to the virtio standard only.
3501  */
3502 bool virtio_legacy_allowed(VirtIODevice *vdev)
3503 {
3504     switch (vdev->device_id) {
3505     case VIRTIO_ID_NET:
3506     case VIRTIO_ID_BLOCK:
3507     case VIRTIO_ID_CONSOLE:
3508     case VIRTIO_ID_RNG:
3509     case VIRTIO_ID_BALLOON:
3510     case VIRTIO_ID_RPMSG:
3511     case VIRTIO_ID_SCSI:
3512     case VIRTIO_ID_9P:
3513     case VIRTIO_ID_RPROC_SERIAL:
3514     case VIRTIO_ID_CAIF:
3515         return true;
3516     default:
3517         return false;
3518     }
3519 }
3520 
3521 bool virtio_legacy_check_disabled(VirtIODevice *vdev)
3522 {
3523     return vdev->disable_legacy_check;
3524 }
3525 
3526 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
3527 {
3528     return vdev->vq[n].vring.desc;
3529 }
3530 
3531 bool virtio_queue_enabled_legacy(VirtIODevice *vdev, int n)
3532 {
3533     return virtio_queue_get_desc_addr(vdev, n) != 0;
3534 }
3535 
3536 bool virtio_queue_enabled(VirtIODevice *vdev, int n)
3537 {
3538     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3539     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3540 
3541     if (k->queue_enabled) {
3542         return k->queue_enabled(qbus->parent, n);
3543     }
3544     return virtio_queue_enabled_legacy(vdev, n);
3545 }
3546 
3547 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
3548 {
3549     return vdev->vq[n].vring.avail;
3550 }
3551 
3552 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
3553 {
3554     return vdev->vq[n].vring.used;
3555 }
3556 
3557 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
3558 {
3559     return sizeof(VRingDesc) * vdev->vq[n].vring.num;
3560 }
3561 
3562 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
3563 {
3564     int s;
3565 
3566     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3567         return sizeof(struct VRingPackedDescEvent);
3568     }
3569 
3570     s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
3571     return offsetof(VRingAvail, ring) +
3572         sizeof(uint16_t) * vdev->vq[n].vring.num + s;
3573 }
3574 
3575 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
3576 {
3577     int s;
3578 
3579     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3580         return sizeof(struct VRingPackedDescEvent);
3581     }
3582 
3583     s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
3584     return offsetof(VRingUsed, ring) +
3585         sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s;
3586 }
3587 
3588 static unsigned int virtio_queue_packed_get_last_avail_idx(VirtIODevice *vdev,
3589                                                            int n)
3590 {
3591     unsigned int avail, used;
3592 
3593     avail = vdev->vq[n].last_avail_idx;
3594     avail |= ((uint16_t)vdev->vq[n].last_avail_wrap_counter) << 15;
3595 
3596     used = vdev->vq[n].used_idx;
3597     used |= ((uint16_t)vdev->vq[n].used_wrap_counter) << 15;
3598 
3599     return avail | used << 16;
3600 }
3601 
3602 static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice *vdev,
3603                                                       int n)
3604 {
3605     return vdev->vq[n].last_avail_idx;
3606 }
3607 
3608 unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
3609 {
3610     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3611         return virtio_queue_packed_get_last_avail_idx(vdev, n);
3612     } else {
3613         return virtio_queue_split_get_last_avail_idx(vdev, n);
3614     }
3615 }
3616 
3617 static void virtio_queue_packed_set_last_avail_idx(VirtIODevice *vdev,
3618                                                    int n, unsigned int idx)
3619 {
3620     struct VirtQueue *vq = &vdev->vq[n];
3621 
3622     vq->last_avail_idx = vq->shadow_avail_idx = idx & 0x7fff;
3623     vq->last_avail_wrap_counter =
3624         vq->shadow_avail_wrap_counter = !!(idx & 0x8000);
3625     idx >>= 16;
3626     vq->used_idx = idx & 0x7fff;
3627     vq->used_wrap_counter = !!(idx & 0x8000);
3628 }
3629 
3630 static void virtio_queue_split_set_last_avail_idx(VirtIODevice *vdev,
3631                                                   int n, unsigned int idx)
3632 {
3633         vdev->vq[n].last_avail_idx = idx;
3634         vdev->vq[n].shadow_avail_idx = idx;
3635 }
3636 
3637 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n,
3638                                      unsigned int idx)
3639 {
3640     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3641         virtio_queue_packed_set_last_avail_idx(vdev, n, idx);
3642     } else {
3643         virtio_queue_split_set_last_avail_idx(vdev, n, idx);
3644     }
3645 }
3646 
3647 static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice *vdev,
3648                                                        int n)
3649 {
3650     /* We don't have a reference like avail idx in shared memory */
3651     return;
3652 }
3653 
3654 static void virtio_queue_split_restore_last_avail_idx(VirtIODevice *vdev,
3655                                                       int n)
3656 {
3657     RCU_READ_LOCK_GUARD();
3658     if (vdev->vq[n].vring.desc) {
3659         vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
3660         vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx;
3661     }
3662 }
3663 
3664 void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
3665 {
3666     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3667         virtio_queue_packed_restore_last_avail_idx(vdev, n);
3668     } else {
3669         virtio_queue_split_restore_last_avail_idx(vdev, n);
3670     }
3671 }
3672 
3673 static void virtio_queue_packed_update_used_idx(VirtIODevice *vdev, int n)
3674 {
3675     /* used idx was updated through set_last_avail_idx() */
3676     return;
3677 }
3678 
3679 static void virtio_queue_split_update_used_idx(VirtIODevice *vdev, int n)
3680 {
3681     RCU_READ_LOCK_GUARD();
3682     if (vdev->vq[n].vring.desc) {
3683         vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
3684     }
3685 }
3686 
3687 void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
3688 {
3689     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3690         return virtio_queue_packed_update_used_idx(vdev, n);
3691     } else {
3692         return virtio_queue_split_update_used_idx(vdev, n);
3693     }
3694 }
3695 
3696 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
3697 {
3698     vdev->vq[n].signalled_used_valid = false;
3699 }
3700 
3701 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
3702 {
3703     return vdev->vq + n;
3704 }
3705 
3706 uint16_t virtio_get_queue_index(VirtQueue *vq)
3707 {
3708     return vq->queue_index;
3709 }
3710 
3711 static void virtio_queue_guest_notifier_read(EventNotifier *n)
3712 {
3713     VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
3714     if (event_notifier_test_and_clear(n)) {
3715         virtio_irq(vq);
3716     }
3717 }
3718 static void virtio_config_guest_notifier_read(EventNotifier *n)
3719 {
3720     VirtIODevice *vdev = container_of(n, VirtIODevice, config_notifier);
3721 
3722     if (event_notifier_test_and_clear(n)) {
3723         virtio_notify_config(vdev);
3724     }
3725 }
3726 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
3727                                                 bool with_irqfd)
3728 {
3729     if (assign && !with_irqfd) {
3730         event_notifier_set_handler(&vq->guest_notifier,
3731                                    virtio_queue_guest_notifier_read);
3732     } else {
3733         event_notifier_set_handler(&vq->guest_notifier, NULL);
3734     }
3735     if (!assign) {
3736         /* Test and clear notifier before closing it,
3737          * in case poll callback didn't have time to run. */
3738         virtio_queue_guest_notifier_read(&vq->guest_notifier);
3739     }
3740 }
3741 
3742 void virtio_config_set_guest_notifier_fd_handler(VirtIODevice *vdev,
3743                                                  bool assign, bool with_irqfd)
3744 {
3745     EventNotifier *n;
3746     n = &vdev->config_notifier;
3747     if (assign && !with_irqfd) {
3748         event_notifier_set_handler(n, virtio_config_guest_notifier_read);
3749     } else {
3750         event_notifier_set_handler(n, NULL);
3751     }
3752     if (!assign) {
3753         /* Test and clear notifier before closing it,*/
3754         /* in case poll callback didn't have time to run. */
3755         virtio_config_guest_notifier_read(n);
3756     }
3757 }
3758 
3759 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
3760 {
3761     return &vq->guest_notifier;
3762 }
3763 
3764 static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
3765 {
3766     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3767 
3768     virtio_queue_set_notification(vq, 0);
3769 }
3770 
3771 static bool virtio_queue_host_notifier_aio_poll(void *opaque)
3772 {
3773     EventNotifier *n = opaque;
3774     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3775 
3776     return vq->vring.desc && !virtio_queue_empty(vq);
3777 }
3778 
3779 static void virtio_queue_host_notifier_aio_poll_ready(EventNotifier *n)
3780 {
3781     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3782 
3783     virtio_queue_notify_vq(vq);
3784 }
3785 
3786 static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
3787 {
3788     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3789 
3790     /* Caller polls once more after this to catch requests that race with us */
3791     virtio_queue_set_notification(vq, 1);
3792 }
3793 
3794 void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx)
3795 {
3796     /*
3797      * virtio_queue_aio_detach_host_notifier() can leave notifications disabled.
3798      * Re-enable them.  (And if detach has not been used before, notifications
3799      * being enabled is still the default state while a notifier is attached;
3800      * see virtio_queue_host_notifier_aio_poll_end(), which will always leave
3801      * notifications enabled once the polling section is left.)
3802      */
3803     if (!virtio_queue_get_notification(vq)) {
3804         virtio_queue_set_notification(vq, 1);
3805     }
3806 
3807     aio_set_event_notifier(ctx, &vq->host_notifier,
3808                            virtio_queue_host_notifier_read,
3809                            virtio_queue_host_notifier_aio_poll,
3810                            virtio_queue_host_notifier_aio_poll_ready);
3811     aio_set_event_notifier_poll(ctx, &vq->host_notifier,
3812                                 virtio_queue_host_notifier_aio_poll_begin,
3813                                 virtio_queue_host_notifier_aio_poll_end);
3814 
3815     /*
3816      * We will have ignored notifications about new requests from the guest
3817      * while no notifiers were attached, so "kick" the virt queue to process
3818      * those requests now.
3819      */
3820     event_notifier_set(&vq->host_notifier);
3821 }
3822 
3823 /*
3824  * Same as virtio_queue_aio_attach_host_notifier() but without polling. Use
3825  * this for rx virtqueues and similar cases where the virtqueue handler
3826  * function does not pop all elements. When the virtqueue is left non-empty
3827  * polling consumes CPU cycles and should not be used.
3828  */
3829 void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx)
3830 {
3831     /* See virtio_queue_aio_attach_host_notifier() */
3832     if (!virtio_queue_get_notification(vq)) {
3833         virtio_queue_set_notification(vq, 1);
3834     }
3835 
3836     aio_set_event_notifier(ctx, &vq->host_notifier,
3837                            virtio_queue_host_notifier_read,
3838                            NULL, NULL);
3839 
3840     /*
3841      * See virtio_queue_aio_attach_host_notifier().
3842      * Note that this may be unnecessary for the type of virtqueues this
3843      * function is used for.  Still, it will not hurt to have a quick look into
3844      * whether we can/should process any of the virtqueue elements.
3845      */
3846     event_notifier_set(&vq->host_notifier);
3847 }
3848 
3849 void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx)
3850 {
3851     aio_set_event_notifier(ctx, &vq->host_notifier, NULL, NULL, NULL);
3852 
3853     /*
3854      * aio_set_event_notifier_poll() does not guarantee whether io_poll_end()
3855      * will run after io_poll_begin(), so by removing the notifier, we do not
3856      * know whether virtio_queue_host_notifier_aio_poll_end() has run after a
3857      * previous virtio_queue_host_notifier_aio_poll_begin(), i.e. whether
3858      * notifications are enabled or disabled.  It does not really matter anyway;
3859      * we just removed the notifier, so we do not care about notifications until
3860      * we potentially re-attach it.  The attach_host_notifier functions will
3861      * ensure that notifications are enabled again when they are needed.
3862      */
3863 }
3864 
3865 void virtio_queue_host_notifier_read(EventNotifier *n)
3866 {
3867     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3868     if (event_notifier_test_and_clear(n)) {
3869         virtio_queue_notify_vq(vq);
3870     }
3871 }
3872 
3873 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
3874 {
3875     return &vq->host_notifier;
3876 }
3877 
3878 EventNotifier *virtio_config_get_guest_notifier(VirtIODevice *vdev)
3879 {
3880     return &vdev->config_notifier;
3881 }
3882 
3883 void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled)
3884 {
3885     vq->host_notifier_enabled = enabled;
3886 }
3887 
3888 int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n,
3889                                       MemoryRegion *mr, bool assign)
3890 {
3891     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3892     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3893 
3894     if (k->set_host_notifier_mr) {
3895         return k->set_host_notifier_mr(qbus->parent, n, mr, assign);
3896     }
3897 
3898     return -1;
3899 }
3900 
3901 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
3902 {
3903     g_free(vdev->bus_name);
3904     vdev->bus_name = g_strdup(bus_name);
3905 }
3906 
3907 void G_GNUC_PRINTF(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
3908 {
3909     va_list ap;
3910 
3911     va_start(ap, fmt);
3912     error_vreport(fmt, ap);
3913     va_end(ap);
3914 
3915     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3916         vdev->status = vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET;
3917         virtio_notify_config(vdev);
3918     }
3919 
3920     vdev->broken = true;
3921 }
3922 
3923 static void virtio_memory_listener_commit(MemoryListener *listener)
3924 {
3925     VirtIODevice *vdev = container_of(listener, VirtIODevice, listener);
3926     int i;
3927 
3928     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3929         if (vdev->vq[i].vring.num == 0) {
3930             break;
3931         }
3932         virtio_init_region_cache(vdev, i);
3933     }
3934 }
3935 
3936 static void virtio_device_realize(DeviceState *dev, Error **errp)
3937 {
3938     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3939     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
3940     Error *err = NULL;
3941 
3942     /* Devices should either use vmsd or the load/save methods */
3943     assert(!vdc->vmsd || !vdc->load);
3944 
3945     if (vdc->realize != NULL) {
3946         vdc->realize(dev, &err);
3947         if (err != NULL) {
3948             error_propagate(errp, err);
3949             return;
3950         }
3951     }
3952 
3953     /* Devices should not use both ioeventfd and notification data feature */
3954     virtio_device_check_notification_compatibility(vdev, &err);
3955     if (err != NULL) {
3956         error_propagate(errp, err);
3957         vdc->unrealize(dev);
3958         return;
3959     }
3960 
3961     virtio_bus_device_plugged(vdev, &err);
3962     if (err != NULL) {
3963         error_propagate(errp, err);
3964         vdc->unrealize(dev);
3965         return;
3966     }
3967 
3968     vdev->listener.commit = virtio_memory_listener_commit;
3969     vdev->listener.name = "virtio";
3970     memory_listener_register(&vdev->listener, vdev->dma_as);
3971 }
3972 
3973 static void virtio_device_unrealize(DeviceState *dev)
3974 {
3975     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3976     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
3977 
3978     memory_listener_unregister(&vdev->listener);
3979     virtio_bus_device_unplugged(vdev);
3980 
3981     if (vdc->unrealize != NULL) {
3982         vdc->unrealize(dev);
3983     }
3984 
3985     g_free(vdev->bus_name);
3986     vdev->bus_name = NULL;
3987 }
3988 
3989 static void virtio_device_free_virtqueues(VirtIODevice *vdev)
3990 {
3991     int i;
3992     if (!vdev->vq) {
3993         return;
3994     }
3995 
3996     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3997         if (vdev->vq[i].vring.num == 0) {
3998             break;
3999         }
4000         virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
4001     }
4002     g_free(vdev->vq);
4003 }
4004 
4005 static void virtio_device_instance_finalize(Object *obj)
4006 {
4007     VirtIODevice *vdev = VIRTIO_DEVICE(obj);
4008 
4009     virtio_device_free_virtqueues(vdev);
4010 
4011     g_free(vdev->config);
4012     g_free(vdev->vector_queues);
4013 }
4014 
4015 static Property virtio_properties[] = {
4016     DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
4017     DEFINE_PROP_BOOL("use-started", VirtIODevice, use_started, true),
4018     DEFINE_PROP_BOOL("use-disabled-flag", VirtIODevice, use_disabled_flag, true),
4019     DEFINE_PROP_BOOL("x-disable-legacy-check", VirtIODevice,
4020                      disable_legacy_check, false),
4021     DEFINE_PROP_END_OF_LIST(),
4022 };
4023 
4024 static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
4025 {
4026     VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
4027     int i, n, r, err;
4028 
4029     /*
4030      * Batch all the host notifiers in a single transaction to avoid
4031      * quadratic time complexity in address_space_update_ioeventfds().
4032      */
4033     memory_region_transaction_begin();
4034     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
4035         VirtQueue *vq = &vdev->vq[n];
4036         if (!virtio_queue_get_num(vdev, n)) {
4037             continue;
4038         }
4039         r = virtio_bus_set_host_notifier(qbus, n, true);
4040         if (r < 0) {
4041             err = r;
4042             goto assign_error;
4043         }
4044         event_notifier_set_handler(&vq->host_notifier,
4045                                    virtio_queue_host_notifier_read);
4046     }
4047 
4048     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
4049         /* Kick right away to begin processing requests already in vring */
4050         VirtQueue *vq = &vdev->vq[n];
4051         if (!vq->vring.num) {
4052             continue;
4053         }
4054         event_notifier_set(&vq->host_notifier);
4055     }
4056     memory_region_transaction_commit();
4057     return 0;
4058 
4059 assign_error:
4060     i = n; /* save n for a second iteration after transaction is committed. */
4061     while (--n >= 0) {
4062         VirtQueue *vq = &vdev->vq[n];
4063         if (!virtio_queue_get_num(vdev, n)) {
4064             continue;
4065         }
4066 
4067         event_notifier_set_handler(&vq->host_notifier, NULL);
4068         r = virtio_bus_set_host_notifier(qbus, n, false);
4069         assert(r >= 0);
4070     }
4071     /*
4072      * The transaction expects the ioeventfds to be open when it
4073      * commits. Do it now, before the cleanup loop.
4074      */
4075     memory_region_transaction_commit();
4076 
4077     while (--i >= 0) {
4078         if (!virtio_queue_get_num(vdev, i)) {
4079             continue;
4080         }
4081         virtio_bus_cleanup_host_notifier(qbus, i);
4082     }
4083     return err;
4084 }
4085 
4086 int virtio_device_start_ioeventfd(VirtIODevice *vdev)
4087 {
4088     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
4089     VirtioBusState *vbus = VIRTIO_BUS(qbus);
4090 
4091     return virtio_bus_start_ioeventfd(vbus);
4092 }
4093 
4094 static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
4095 {
4096     VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
4097     int n, r;
4098 
4099     /*
4100      * Batch all the host notifiers in a single transaction to avoid
4101      * quadratic time complexity in address_space_update_ioeventfds().
4102      */
4103     memory_region_transaction_begin();
4104     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
4105         VirtQueue *vq = &vdev->vq[n];
4106 
4107         if (!virtio_queue_get_num(vdev, n)) {
4108             continue;
4109         }
4110         event_notifier_set_handler(&vq->host_notifier, NULL);
4111         r = virtio_bus_set_host_notifier(qbus, n, false);
4112         assert(r >= 0);
4113     }
4114     /*
4115      * The transaction expects the ioeventfds to be open when it
4116      * commits. Do it now, before the cleanup loop.
4117      */
4118     memory_region_transaction_commit();
4119 
4120     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
4121         if (!virtio_queue_get_num(vdev, n)) {
4122             continue;
4123         }
4124         virtio_bus_cleanup_host_notifier(qbus, n);
4125     }
4126 }
4127 
4128 int virtio_device_grab_ioeventfd(VirtIODevice *vdev)
4129 {
4130     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
4131     VirtioBusState *vbus = VIRTIO_BUS(qbus);
4132 
4133     return virtio_bus_grab_ioeventfd(vbus);
4134 }
4135 
4136 void virtio_device_release_ioeventfd(VirtIODevice *vdev)
4137 {
4138     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
4139     VirtioBusState *vbus = VIRTIO_BUS(qbus);
4140 
4141     virtio_bus_release_ioeventfd(vbus);
4142 }
4143 
4144 static void virtio_device_class_init(ObjectClass *klass, void *data)
4145 {
4146     /* Set the default value here. */
4147     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
4148     DeviceClass *dc = DEVICE_CLASS(klass);
4149 
4150     dc->realize = virtio_device_realize;
4151     dc->unrealize = virtio_device_unrealize;
4152     dc->bus_type = TYPE_VIRTIO_BUS;
4153     device_class_set_props(dc, virtio_properties);
4154     vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
4155     vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
4156 
4157     vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
4158 }
4159 
4160 bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
4161 {
4162     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
4163     VirtioBusState *vbus = VIRTIO_BUS(qbus);
4164 
4165     return virtio_bus_ioeventfd_enabled(vbus);
4166 }
4167 
4168 VirtQueueStatus *qmp_x_query_virtio_queue_status(const char *path,
4169                                                  uint16_t queue,
4170                                                  Error **errp)
4171 {
4172     VirtIODevice *vdev;
4173     VirtQueueStatus *status;
4174 
4175     vdev = qmp_find_virtio_device(path);
4176     if (vdev == NULL) {
4177         error_setg(errp, "Path %s is not a VirtIODevice", path);
4178         return NULL;
4179     }
4180 
4181     if (queue >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, queue)) {
4182         error_setg(errp, "Invalid virtqueue number %d", queue);
4183         return NULL;
4184     }
4185 
4186     status = g_new0(VirtQueueStatus, 1);
4187     status->name = g_strdup(vdev->name);
4188     status->queue_index = vdev->vq[queue].queue_index;
4189     status->inuse = vdev->vq[queue].inuse;
4190     status->vring_num = vdev->vq[queue].vring.num;
4191     status->vring_num_default = vdev->vq[queue].vring.num_default;
4192     status->vring_align = vdev->vq[queue].vring.align;
4193     status->vring_desc = vdev->vq[queue].vring.desc;
4194     status->vring_avail = vdev->vq[queue].vring.avail;
4195     status->vring_used = vdev->vq[queue].vring.used;
4196     status->used_idx = vdev->vq[queue].used_idx;
4197     status->signalled_used = vdev->vq[queue].signalled_used;
4198     status->signalled_used_valid = vdev->vq[queue].signalled_used_valid;
4199 
4200     if (vdev->vhost_started) {
4201         VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
4202         struct vhost_dev *hdev = vdc->get_vhost(vdev);
4203 
4204         /* check if vq index exists for vhost as well  */
4205         if (queue >= hdev->vq_index && queue < hdev->vq_index + hdev->nvqs) {
4206             status->has_last_avail_idx = true;
4207 
4208             int vhost_vq_index =
4209                 hdev->vhost_ops->vhost_get_vq_index(hdev, queue);
4210             struct vhost_vring_state state = {
4211                 .index = vhost_vq_index,
4212             };
4213 
4214             status->last_avail_idx =
4215                 hdev->vhost_ops->vhost_get_vring_base(hdev, &state);
4216         }
4217     } else {
4218         status->has_shadow_avail_idx = true;
4219         status->has_last_avail_idx = true;
4220         status->last_avail_idx = vdev->vq[queue].last_avail_idx;
4221         status->shadow_avail_idx = vdev->vq[queue].shadow_avail_idx;
4222     }
4223 
4224     return status;
4225 }
4226 
4227 static strList *qmp_decode_vring_desc_flags(uint16_t flags)
4228 {
4229     strList *list = NULL;
4230     strList *node;
4231     int i;
4232 
4233     struct {
4234         uint16_t flag;
4235         const char *value;
4236     } map[] = {
4237         { VRING_DESC_F_NEXT, "next" },
4238         { VRING_DESC_F_WRITE, "write" },
4239         { VRING_DESC_F_INDIRECT, "indirect" },
4240         { 1 << VRING_PACKED_DESC_F_AVAIL, "avail" },
4241         { 1 << VRING_PACKED_DESC_F_USED, "used" },
4242         { 0, "" }
4243     };
4244 
4245     for (i = 0; map[i].flag; i++) {
4246         if ((map[i].flag & flags) == 0) {
4247             continue;
4248         }
4249         node = g_malloc0(sizeof(strList));
4250         node->value = g_strdup(map[i].value);
4251         node->next = list;
4252         list = node;
4253     }
4254 
4255     return list;
4256 }
4257 
4258 VirtioQueueElement *qmp_x_query_virtio_queue_element(const char *path,
4259                                                      uint16_t queue,
4260                                                      bool has_index,
4261                                                      uint16_t index,
4262                                                      Error **errp)
4263 {
4264     VirtIODevice *vdev;
4265     VirtQueue *vq;
4266     VirtioQueueElement *element = NULL;
4267 
4268     vdev = qmp_find_virtio_device(path);
4269     if (vdev == NULL) {
4270         error_setg(errp, "Path %s is not a VirtIO device", path);
4271         return NULL;
4272     }
4273 
4274     if (queue >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, queue)) {
4275         error_setg(errp, "Invalid virtqueue number %d", queue);
4276         return NULL;
4277     }
4278     vq = &vdev->vq[queue];
4279 
4280     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
4281         error_setg(errp, "Packed ring not supported");
4282         return NULL;
4283     } else {
4284         unsigned int head, i, max;
4285         VRingMemoryRegionCaches *caches;
4286         MemoryRegionCache indirect_desc_cache;
4287         MemoryRegionCache *desc_cache;
4288         VRingDesc desc;
4289         VirtioRingDescList *list = NULL;
4290         VirtioRingDescList *node;
4291         int rc; int ndescs;
4292 
4293         address_space_cache_init_empty(&indirect_desc_cache);
4294 
4295         RCU_READ_LOCK_GUARD();
4296 
4297         max = vq->vring.num;
4298 
4299         if (!has_index) {
4300             head = vring_avail_ring(vq, vq->last_avail_idx % vq->vring.num);
4301         } else {
4302             head = vring_avail_ring(vq, index % vq->vring.num);
4303         }
4304         i = head;
4305 
4306         caches = vring_get_region_caches(vq);
4307         if (!caches) {
4308             error_setg(errp, "Region caches not initialized");
4309             return NULL;
4310         }
4311         if (caches->desc.len < max * sizeof(VRingDesc)) {
4312             error_setg(errp, "Cannot map descriptor ring");
4313             return NULL;
4314         }
4315 
4316         desc_cache = &caches->desc;
4317         vring_split_desc_read(vdev, &desc, desc_cache, i);
4318         if (desc.flags & VRING_DESC_F_INDIRECT) {
4319             int64_t len;
4320             len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
4321                                            desc.addr, desc.len, false);
4322             desc_cache = &indirect_desc_cache;
4323             if (len < desc.len) {
4324                 error_setg(errp, "Cannot map indirect buffer");
4325                 goto done;
4326             }
4327 
4328             max = desc.len / sizeof(VRingDesc);
4329             i = 0;
4330             vring_split_desc_read(vdev, &desc, desc_cache, i);
4331         }
4332 
4333         element = g_new0(VirtioQueueElement, 1);
4334         element->avail = g_new0(VirtioRingAvail, 1);
4335         element->used = g_new0(VirtioRingUsed, 1);
4336         element->name = g_strdup(vdev->name);
4337         element->index = head;
4338         element->avail->flags = vring_avail_flags(vq);
4339         element->avail->idx = vring_avail_idx(vq);
4340         element->avail->ring = head;
4341         element->used->flags = vring_used_flags(vq);
4342         element->used->idx = vring_used_idx(vq);
4343         ndescs = 0;
4344 
4345         do {
4346             /* A buggy driver may produce an infinite loop */
4347             if (ndescs >= max) {
4348                 break;
4349             }
4350             node = g_new0(VirtioRingDescList, 1);
4351             node->value = g_new0(VirtioRingDesc, 1);
4352             node->value->addr = desc.addr;
4353             node->value->len = desc.len;
4354             node->value->flags = qmp_decode_vring_desc_flags(desc.flags);
4355             node->next = list;
4356             list = node;
4357 
4358             ndescs++;
4359             rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max);
4360         } while (rc == VIRTQUEUE_READ_DESC_MORE);
4361         element->descs = list;
4362 done:
4363         address_space_cache_destroy(&indirect_desc_cache);
4364     }
4365 
4366     return element;
4367 }
4368 
4369 static const TypeInfo virtio_device_info = {
4370     .name = TYPE_VIRTIO_DEVICE,
4371     .parent = TYPE_DEVICE,
4372     .instance_size = sizeof(VirtIODevice),
4373     .class_init = virtio_device_class_init,
4374     .instance_finalize = virtio_device_instance_finalize,
4375     .abstract = true,
4376     .class_size = sizeof(VirtioDeviceClass),
4377 };
4378 
4379 static void virtio_register_types(void)
4380 {
4381     type_register_static(&virtio_device_info);
4382 }
4383 
4384 type_init(virtio_register_types)
4385 
4386 QEMUBH *virtio_bh_new_guarded_full(DeviceState *dev,
4387                                    QEMUBHFunc *cb, void *opaque,
4388                                    const char *name)
4389 {
4390     DeviceState *transport = qdev_get_parent_bus(dev)->parent;
4391 
4392     return qemu_bh_new_full(cb, opaque, name,
4393                             &transport->mem_reentrancy_guard);
4394 }
4395