xref: /openbmc/qemu/hw/virtio/virtio.c (revision 78378f450a723eed34156259ca2861a0c5ca77cf)
1 /*
2  * Virtio Support
3  *
4  * Copyright IBM, Corp. 2007
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qapi/qapi-commands-virtio.h"
17 #include "trace.h"
18 #include "qemu/defer-call.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/main-loop.h"
22 #include "qemu/module.h"
23 #include "exec/tswap.h"
24 #include "qom/object_interfaces.h"
25 #include "hw/core/cpu.h"
26 #include "hw/virtio/virtio.h"
27 #include "hw/virtio/vhost.h"
28 #include "migration/qemu-file-types.h"
29 #include "qemu/atomic.h"
30 #include "hw/virtio/virtio-bus.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/virtio/virtio-access.h"
33 #include "sysemu/dma.h"
34 #include "sysemu/runstate.h"
35 #include "virtio-qmp.h"
36 
37 #include "standard-headers/linux/virtio_ids.h"
38 #include "standard-headers/linux/vhost_types.h"
39 #include "standard-headers/linux/virtio_blk.h"
40 #include "standard-headers/linux/virtio_console.h"
41 #include "standard-headers/linux/virtio_gpu.h"
42 #include "standard-headers/linux/virtio_net.h"
43 #include "standard-headers/linux/virtio_scsi.h"
44 #include "standard-headers/linux/virtio_i2c.h"
45 #include "standard-headers/linux/virtio_balloon.h"
46 #include "standard-headers/linux/virtio_iommu.h"
47 #include "standard-headers/linux/virtio_mem.h"
48 #include "standard-headers/linux/virtio_vsock.h"
49 
50 /*
51  * Maximum size of virtio device config space
52  */
53 #define VHOST_USER_MAX_CONFIG_SIZE 256
54 
55 /*
56  * The alignment to use between consumer and producer parts of vring.
57  * x86 pagesize again. This is the default, used by transports like PCI
58  * which don't provide a means for the guest to tell the host the alignment.
59  */
60 #define VIRTIO_PCI_VRING_ALIGN         4096
61 
62 typedef struct VRingDesc
63 {
64     uint64_t addr;
65     uint32_t len;
66     uint16_t flags;
67     uint16_t next;
68 } VRingDesc;
69 
70 typedef struct VRingPackedDesc {
71     uint64_t addr;
72     uint32_t len;
73     uint16_t id;
74     uint16_t flags;
75 } VRingPackedDesc;
76 
77 typedef struct VRingAvail
78 {
79     uint16_t flags;
80     uint16_t idx;
81     uint16_t ring[];
82 } VRingAvail;
83 
84 typedef struct VRingUsedElem
85 {
86     uint32_t id;
87     uint32_t len;
88 } VRingUsedElem;
89 
90 typedef struct VRingUsed
91 {
92     uint16_t flags;
93     uint16_t idx;
94     VRingUsedElem ring[];
95 } VRingUsed;
96 
97 typedef struct VRingMemoryRegionCaches {
98     struct rcu_head rcu;
99     MemoryRegionCache desc;
100     MemoryRegionCache avail;
101     MemoryRegionCache used;
102 } VRingMemoryRegionCaches;
103 
104 typedef struct VRing
105 {
106     unsigned int num;
107     unsigned int num_default;
108     unsigned int align;
109     hwaddr desc;
110     hwaddr avail;
111     hwaddr used;
112     VRingMemoryRegionCaches *caches;
113 } VRing;
114 
115 typedef struct VRingPackedDescEvent {
116     uint16_t off_wrap;
117     uint16_t flags;
118 } VRingPackedDescEvent ;
119 
120 struct VirtQueue
121 {
122     VRing vring;
123     VirtQueueElement *used_elems;
124 
125     /* Next head to pop */
126     uint16_t last_avail_idx;
127     bool last_avail_wrap_counter;
128 
129     /* Last avail_idx read from VQ. */
130     uint16_t shadow_avail_idx;
131     bool shadow_avail_wrap_counter;
132 
133     uint16_t used_idx;
134     bool used_wrap_counter;
135 
136     /* Last used index value we have signalled on */
137     uint16_t signalled_used;
138 
139     /* Last used index value we have signalled on */
140     bool signalled_used_valid;
141 
142     /* Notification enabled? */
143     bool notification;
144 
145     uint16_t queue_index;
146 
147     unsigned int inuse;
148 
149     uint16_t vector;
150     VirtIOHandleOutput handle_output;
151     VirtIODevice *vdev;
152     EventNotifier guest_notifier;
153     EventNotifier host_notifier;
154     bool host_notifier_enabled;
155     QLIST_ENTRY(VirtQueue) node;
156 };
157 
158 const char *virtio_device_names[] = {
159     [VIRTIO_ID_NET] = "virtio-net",
160     [VIRTIO_ID_BLOCK] = "virtio-blk",
161     [VIRTIO_ID_CONSOLE] = "virtio-serial",
162     [VIRTIO_ID_RNG] = "virtio-rng",
163     [VIRTIO_ID_BALLOON] = "virtio-balloon",
164     [VIRTIO_ID_IOMEM] = "virtio-iomem",
165     [VIRTIO_ID_RPMSG] = "virtio-rpmsg",
166     [VIRTIO_ID_SCSI] = "virtio-scsi",
167     [VIRTIO_ID_9P] = "virtio-9p",
168     [VIRTIO_ID_MAC80211_WLAN] = "virtio-mac-wlan",
169     [VIRTIO_ID_RPROC_SERIAL] = "virtio-rproc-serial",
170     [VIRTIO_ID_CAIF] = "virtio-caif",
171     [VIRTIO_ID_MEMORY_BALLOON] = "virtio-mem-balloon",
172     [VIRTIO_ID_GPU] = "virtio-gpu",
173     [VIRTIO_ID_CLOCK] = "virtio-clk",
174     [VIRTIO_ID_INPUT] = "virtio-input",
175     [VIRTIO_ID_VSOCK] = "vhost-vsock",
176     [VIRTIO_ID_CRYPTO] = "virtio-crypto",
177     [VIRTIO_ID_SIGNAL_DIST] = "virtio-signal",
178     [VIRTIO_ID_PSTORE] = "virtio-pstore",
179     [VIRTIO_ID_IOMMU] = "virtio-iommu",
180     [VIRTIO_ID_MEM] = "virtio-mem",
181     [VIRTIO_ID_SOUND] = "virtio-sound",
182     [VIRTIO_ID_FS] = "virtio-user-fs",
183     [VIRTIO_ID_PMEM] = "virtio-pmem",
184     [VIRTIO_ID_RPMB] = "virtio-rpmb",
185     [VIRTIO_ID_MAC80211_HWSIM] = "virtio-mac-hwsim",
186     [VIRTIO_ID_VIDEO_ENCODER] = "virtio-vid-encoder",
187     [VIRTIO_ID_VIDEO_DECODER] = "virtio-vid-decoder",
188     [VIRTIO_ID_SCMI] = "virtio-scmi",
189     [VIRTIO_ID_NITRO_SEC_MOD] = "virtio-nitro-sec-mod",
190     [VIRTIO_ID_I2C_ADAPTER] = "vhost-user-i2c",
191     [VIRTIO_ID_WATCHDOG] = "virtio-watchdog",
192     [VIRTIO_ID_CAN] = "virtio-can",
193     [VIRTIO_ID_DMABUF] = "virtio-dmabuf",
194     [VIRTIO_ID_PARAM_SERV] = "virtio-param-serv",
195     [VIRTIO_ID_AUDIO_POLICY] = "virtio-audio-pol",
196     [VIRTIO_ID_BT] = "virtio-bluetooth",
197     [VIRTIO_ID_GPIO] = "virtio-gpio"
198 };
199 
200 static const char *virtio_id_to_name(uint16_t device_id)
201 {
202     assert(device_id < G_N_ELEMENTS(virtio_device_names));
203     const char *name = virtio_device_names[device_id];
204     assert(name != NULL);
205     return name;
206 }
207 
208 /* Called within call_rcu().  */
209 static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
210 {
211     assert(caches != NULL);
212     address_space_cache_destroy(&caches->desc);
213     address_space_cache_destroy(&caches->avail);
214     address_space_cache_destroy(&caches->used);
215     g_free(caches);
216 }
217 
218 static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
219 {
220     VRingMemoryRegionCaches *caches;
221 
222     caches = qatomic_read(&vq->vring.caches);
223     qatomic_rcu_set(&vq->vring.caches, NULL);
224     if (caches) {
225         call_rcu(caches, virtio_free_region_cache, rcu);
226     }
227 }
228 
229 void virtio_init_region_cache(VirtIODevice *vdev, int n)
230 {
231     VirtQueue *vq = &vdev->vq[n];
232     VRingMemoryRegionCaches *old = vq->vring.caches;
233     VRingMemoryRegionCaches *new = NULL;
234     hwaddr addr, size;
235     int64_t len;
236     bool packed;
237 
238 
239     addr = vq->vring.desc;
240     if (!addr) {
241         goto out_no_cache;
242     }
243     new = g_new0(VRingMemoryRegionCaches, 1);
244     size = virtio_queue_get_desc_size(vdev, n);
245     packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
246                                    true : false;
247     len = address_space_cache_init(&new->desc, vdev->dma_as,
248                                    addr, size, packed);
249     if (len < size) {
250         virtio_error(vdev, "Cannot map desc");
251         goto err_desc;
252     }
253 
254     size = virtio_queue_get_used_size(vdev, n);
255     len = address_space_cache_init(&new->used, vdev->dma_as,
256                                    vq->vring.used, size, true);
257     if (len < size) {
258         virtio_error(vdev, "Cannot map used");
259         goto err_used;
260     }
261 
262     size = virtio_queue_get_avail_size(vdev, n);
263     len = address_space_cache_init(&new->avail, vdev->dma_as,
264                                    vq->vring.avail, size, false);
265     if (len < size) {
266         virtio_error(vdev, "Cannot map avail");
267         goto err_avail;
268     }
269 
270     qatomic_rcu_set(&vq->vring.caches, new);
271     if (old) {
272         call_rcu(old, virtio_free_region_cache, rcu);
273     }
274     return;
275 
276 err_avail:
277     address_space_cache_destroy(&new->avail);
278 err_used:
279     address_space_cache_destroy(&new->used);
280 err_desc:
281     address_space_cache_destroy(&new->desc);
282 out_no_cache:
283     g_free(new);
284     virtio_virtqueue_reset_region_cache(vq);
285 }
286 
287 /* virt queue functions */
288 void virtio_queue_update_rings(VirtIODevice *vdev, int n)
289 {
290     VRing *vring = &vdev->vq[n].vring;
291 
292     if (!vring->num || !vring->desc || !vring->align) {
293         /* not yet setup -> nothing to do */
294         return;
295     }
296     vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
297     vring->used = vring_align(vring->avail +
298                               offsetof(VRingAvail, ring[vring->num]),
299                               vring->align);
300     virtio_init_region_cache(vdev, n);
301 }
302 
303 /* Called within rcu_read_lock().  */
304 static void vring_split_desc_read(VirtIODevice *vdev, VRingDesc *desc,
305                                   MemoryRegionCache *cache, int i)
306 {
307     address_space_read_cached(cache, i * sizeof(VRingDesc),
308                               desc, sizeof(VRingDesc));
309     virtio_tswap64s(vdev, &desc->addr);
310     virtio_tswap32s(vdev, &desc->len);
311     virtio_tswap16s(vdev, &desc->flags);
312     virtio_tswap16s(vdev, &desc->next);
313 }
314 
315 static void vring_packed_event_read(VirtIODevice *vdev,
316                                     MemoryRegionCache *cache,
317                                     VRingPackedDescEvent *e)
318 {
319     hwaddr off_off = offsetof(VRingPackedDescEvent, off_wrap);
320     hwaddr off_flags = offsetof(VRingPackedDescEvent, flags);
321 
322     e->flags = virtio_lduw_phys_cached(vdev, cache, off_flags);
323     /* Make sure flags is seen before off_wrap */
324     smp_rmb();
325     e->off_wrap = virtio_lduw_phys_cached(vdev, cache, off_off);
326     virtio_tswap16s(vdev, &e->flags);
327 }
328 
329 static void vring_packed_off_wrap_write(VirtIODevice *vdev,
330                                         MemoryRegionCache *cache,
331                                         uint16_t off_wrap)
332 {
333     hwaddr off = offsetof(VRingPackedDescEvent, off_wrap);
334 
335     virtio_stw_phys_cached(vdev, cache, off, off_wrap);
336     address_space_cache_invalidate(cache, off, sizeof(off_wrap));
337 }
338 
339 static void vring_packed_flags_write(VirtIODevice *vdev,
340                                      MemoryRegionCache *cache, uint16_t flags)
341 {
342     hwaddr off = offsetof(VRingPackedDescEvent, flags);
343 
344     virtio_stw_phys_cached(vdev, cache, off, flags);
345     address_space_cache_invalidate(cache, off, sizeof(flags));
346 }
347 
348 /* Called within rcu_read_lock().  */
349 static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
350 {
351     return qatomic_rcu_read(&vq->vring.caches);
352 }
353 
354 /* Called within rcu_read_lock().  */
355 static inline uint16_t vring_avail_flags(VirtQueue *vq)
356 {
357     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
358     hwaddr pa = offsetof(VRingAvail, flags);
359 
360     if (!caches) {
361         return 0;
362     }
363 
364     return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
365 }
366 
367 /* Called within rcu_read_lock().  */
368 static inline uint16_t vring_avail_idx(VirtQueue *vq)
369 {
370     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
371     hwaddr pa = offsetof(VRingAvail, idx);
372 
373     if (!caches) {
374         return 0;
375     }
376 
377     vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
378     return vq->shadow_avail_idx;
379 }
380 
381 /* Called within rcu_read_lock().  */
382 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
383 {
384     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
385     hwaddr pa = offsetof(VRingAvail, ring[i]);
386 
387     if (!caches) {
388         return 0;
389     }
390 
391     return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
392 }
393 
394 /* Called within rcu_read_lock().  */
395 static inline uint16_t vring_get_used_event(VirtQueue *vq)
396 {
397     return vring_avail_ring(vq, vq->vring.num);
398 }
399 
400 /* Called within rcu_read_lock().  */
401 static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
402                                     int i)
403 {
404     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
405     hwaddr pa = offsetof(VRingUsed, ring[i]);
406 
407     if (!caches) {
408         return;
409     }
410 
411     virtio_tswap32s(vq->vdev, &uelem->id);
412     virtio_tswap32s(vq->vdev, &uelem->len);
413     address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
414     address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
415 }
416 
417 /* Called within rcu_read_lock(). */
418 static inline uint16_t vring_used_flags(VirtQueue *vq)
419 {
420     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
421     hwaddr pa = offsetof(VRingUsed, flags);
422 
423     if (!caches) {
424         return 0;
425     }
426 
427     return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
428 }
429 
430 /* Called within rcu_read_lock().  */
431 static uint16_t vring_used_idx(VirtQueue *vq)
432 {
433     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
434     hwaddr pa = offsetof(VRingUsed, idx);
435 
436     if (!caches) {
437         return 0;
438     }
439 
440     return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
441 }
442 
443 /* Called within rcu_read_lock().  */
444 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
445 {
446     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
447     hwaddr pa = offsetof(VRingUsed, idx);
448 
449     if (caches) {
450         virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
451         address_space_cache_invalidate(&caches->used, pa, sizeof(val));
452     }
453 
454     vq->used_idx = val;
455 }
456 
457 /* Called within rcu_read_lock().  */
458 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
459 {
460     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
461     VirtIODevice *vdev = vq->vdev;
462     hwaddr pa = offsetof(VRingUsed, flags);
463     uint16_t flags;
464 
465     if (!caches) {
466         return;
467     }
468 
469     flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
470     virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
471     address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
472 }
473 
474 /* Called within rcu_read_lock().  */
475 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
476 {
477     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
478     VirtIODevice *vdev = vq->vdev;
479     hwaddr pa = offsetof(VRingUsed, flags);
480     uint16_t flags;
481 
482     if (!caches) {
483         return;
484     }
485 
486     flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
487     virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
488     address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
489 }
490 
491 /* Called within rcu_read_lock().  */
492 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
493 {
494     VRingMemoryRegionCaches *caches;
495     hwaddr pa;
496     if (!vq->notification) {
497         return;
498     }
499 
500     caches = vring_get_region_caches(vq);
501     if (!caches) {
502         return;
503     }
504 
505     pa = offsetof(VRingUsed, ring[vq->vring.num]);
506     virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
507     address_space_cache_invalidate(&caches->used, pa, sizeof(val));
508 }
509 
510 static void virtio_queue_split_set_notification(VirtQueue *vq, int enable)
511 {
512     RCU_READ_LOCK_GUARD();
513 
514     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
515         vring_set_avail_event(vq, vring_avail_idx(vq));
516     } else if (enable) {
517         vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
518     } else {
519         vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
520     }
521     if (enable) {
522         /* Expose avail event/used flags before caller checks the avail idx. */
523         smp_mb();
524     }
525 }
526 
527 static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable)
528 {
529     uint16_t off_wrap;
530     VRingPackedDescEvent e;
531     VRingMemoryRegionCaches *caches;
532 
533     RCU_READ_LOCK_GUARD();
534     caches = vring_get_region_caches(vq);
535     if (!caches) {
536         return;
537     }
538 
539     vring_packed_event_read(vq->vdev, &caches->used, &e);
540 
541     if (!enable) {
542         e.flags = VRING_PACKED_EVENT_FLAG_DISABLE;
543     } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
544         off_wrap = vq->shadow_avail_idx | vq->shadow_avail_wrap_counter << 15;
545         vring_packed_off_wrap_write(vq->vdev, &caches->used, off_wrap);
546         /* Make sure off_wrap is wrote before flags */
547         smp_wmb();
548         e.flags = VRING_PACKED_EVENT_FLAG_DESC;
549     } else {
550         e.flags = VRING_PACKED_EVENT_FLAG_ENABLE;
551     }
552 
553     vring_packed_flags_write(vq->vdev, &caches->used, e.flags);
554     if (enable) {
555         /* Expose avail event/used flags before caller checks the avail idx. */
556         smp_mb();
557     }
558 }
559 
560 bool virtio_queue_get_notification(VirtQueue *vq)
561 {
562     return vq->notification;
563 }
564 
565 void virtio_queue_set_notification(VirtQueue *vq, int enable)
566 {
567     vq->notification = enable;
568 
569     if (!vq->vring.desc) {
570         return;
571     }
572 
573     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
574         virtio_queue_packed_set_notification(vq, enable);
575     } else {
576         virtio_queue_split_set_notification(vq, enable);
577     }
578 }
579 
580 int virtio_queue_ready(VirtQueue *vq)
581 {
582     return vq->vring.avail != 0;
583 }
584 
585 static void vring_packed_desc_read_flags(VirtIODevice *vdev,
586                                          uint16_t *flags,
587                                          MemoryRegionCache *cache,
588                                          int i)
589 {
590     hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags);
591 
592     *flags = virtio_lduw_phys_cached(vdev, cache, off);
593 }
594 
595 static void vring_packed_desc_read(VirtIODevice *vdev,
596                                    VRingPackedDesc *desc,
597                                    MemoryRegionCache *cache,
598                                    int i, bool strict_order)
599 {
600     hwaddr off = i * sizeof(VRingPackedDesc);
601 
602     vring_packed_desc_read_flags(vdev, &desc->flags, cache, i);
603 
604     if (strict_order) {
605         /* Make sure flags is read before the rest fields. */
606         smp_rmb();
607     }
608 
609     address_space_read_cached(cache, off + offsetof(VRingPackedDesc, addr),
610                               &desc->addr, sizeof(desc->addr));
611     address_space_read_cached(cache, off + offsetof(VRingPackedDesc, id),
612                               &desc->id, sizeof(desc->id));
613     address_space_read_cached(cache, off + offsetof(VRingPackedDesc, len),
614                               &desc->len, sizeof(desc->len));
615     virtio_tswap64s(vdev, &desc->addr);
616     virtio_tswap16s(vdev, &desc->id);
617     virtio_tswap32s(vdev, &desc->len);
618 }
619 
620 static void vring_packed_desc_write_data(VirtIODevice *vdev,
621                                          VRingPackedDesc *desc,
622                                          MemoryRegionCache *cache,
623                                          int i)
624 {
625     hwaddr off_id = i * sizeof(VRingPackedDesc) +
626                     offsetof(VRingPackedDesc, id);
627     hwaddr off_len = i * sizeof(VRingPackedDesc) +
628                     offsetof(VRingPackedDesc, len);
629 
630     virtio_tswap32s(vdev, &desc->len);
631     virtio_tswap16s(vdev, &desc->id);
632     address_space_write_cached(cache, off_id, &desc->id, sizeof(desc->id));
633     address_space_cache_invalidate(cache, off_id, sizeof(desc->id));
634     address_space_write_cached(cache, off_len, &desc->len, sizeof(desc->len));
635     address_space_cache_invalidate(cache, off_len, sizeof(desc->len));
636 }
637 
638 static void vring_packed_desc_write_flags(VirtIODevice *vdev,
639                                           VRingPackedDesc *desc,
640                                           MemoryRegionCache *cache,
641                                           int i)
642 {
643     hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags);
644 
645     virtio_stw_phys_cached(vdev, cache, off, desc->flags);
646     address_space_cache_invalidate(cache, off, sizeof(desc->flags));
647 }
648 
649 static void vring_packed_desc_write(VirtIODevice *vdev,
650                                     VRingPackedDesc *desc,
651                                     MemoryRegionCache *cache,
652                                     int i, bool strict_order)
653 {
654     vring_packed_desc_write_data(vdev, desc, cache, i);
655     if (strict_order) {
656         /* Make sure data is wrote before flags. */
657         smp_wmb();
658     }
659     vring_packed_desc_write_flags(vdev, desc, cache, i);
660 }
661 
662 static inline bool is_desc_avail(uint16_t flags, bool wrap_counter)
663 {
664     bool avail, used;
665 
666     avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
667     used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
668     return (avail != used) && (avail == wrap_counter);
669 }
670 
671 /* Fetch avail_idx from VQ memory only when we really need to know if
672  * guest has added some buffers.
673  * Called within rcu_read_lock().  */
674 static int virtio_queue_empty_rcu(VirtQueue *vq)
675 {
676     if (virtio_device_disabled(vq->vdev)) {
677         return 1;
678     }
679 
680     if (unlikely(!vq->vring.avail)) {
681         return 1;
682     }
683 
684     if (vq->shadow_avail_idx != vq->last_avail_idx) {
685         return 0;
686     }
687 
688     return vring_avail_idx(vq) == vq->last_avail_idx;
689 }
690 
691 static int virtio_queue_split_empty(VirtQueue *vq)
692 {
693     bool empty;
694 
695     if (virtio_device_disabled(vq->vdev)) {
696         return 1;
697     }
698 
699     if (unlikely(!vq->vring.avail)) {
700         return 1;
701     }
702 
703     if (vq->shadow_avail_idx != vq->last_avail_idx) {
704         return 0;
705     }
706 
707     RCU_READ_LOCK_GUARD();
708     empty = vring_avail_idx(vq) == vq->last_avail_idx;
709     return empty;
710 }
711 
712 /* Called within rcu_read_lock().  */
713 static int virtio_queue_packed_empty_rcu(VirtQueue *vq)
714 {
715     struct VRingPackedDesc desc;
716     VRingMemoryRegionCaches *cache;
717 
718     if (unlikely(!vq->vring.desc)) {
719         return 1;
720     }
721 
722     cache = vring_get_region_caches(vq);
723     if (!cache) {
724         return 1;
725     }
726 
727     vring_packed_desc_read_flags(vq->vdev, &desc.flags, &cache->desc,
728                                  vq->last_avail_idx);
729 
730     return !is_desc_avail(desc.flags, vq->last_avail_wrap_counter);
731 }
732 
733 static int virtio_queue_packed_empty(VirtQueue *vq)
734 {
735     RCU_READ_LOCK_GUARD();
736     return virtio_queue_packed_empty_rcu(vq);
737 }
738 
739 int virtio_queue_empty(VirtQueue *vq)
740 {
741     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
742         return virtio_queue_packed_empty(vq);
743     } else {
744         return virtio_queue_split_empty(vq);
745     }
746 }
747 
748 static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
749                                unsigned int len)
750 {
751     AddressSpace *dma_as = vq->vdev->dma_as;
752     unsigned int offset;
753     int i;
754 
755     offset = 0;
756     for (i = 0; i < elem->in_num; i++) {
757         size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
758 
759         dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
760                          elem->in_sg[i].iov_len,
761                          DMA_DIRECTION_FROM_DEVICE, size);
762 
763         offset += size;
764     }
765 
766     for (i = 0; i < elem->out_num; i++)
767         dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
768                          elem->out_sg[i].iov_len,
769                          DMA_DIRECTION_TO_DEVICE,
770                          elem->out_sg[i].iov_len);
771 }
772 
773 /* virtqueue_detach_element:
774  * @vq: The #VirtQueue
775  * @elem: The #VirtQueueElement
776  * @len: number of bytes written
777  *
778  * Detach the element from the virtqueue.  This function is suitable for device
779  * reset or other situations where a #VirtQueueElement is simply freed and will
780  * not be pushed or discarded.
781  */
782 void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
783                               unsigned int len)
784 {
785     vq->inuse -= elem->ndescs;
786     virtqueue_unmap_sg(vq, elem, len);
787 }
788 
789 static void virtqueue_split_rewind(VirtQueue *vq, unsigned int num)
790 {
791     vq->last_avail_idx -= num;
792 }
793 
794 static void virtqueue_packed_rewind(VirtQueue *vq, unsigned int num)
795 {
796     if (vq->last_avail_idx < num) {
797         vq->last_avail_idx = vq->vring.num + vq->last_avail_idx - num;
798         vq->last_avail_wrap_counter ^= 1;
799     } else {
800         vq->last_avail_idx -= num;
801     }
802 }
803 
804 /* virtqueue_unpop:
805  * @vq: The #VirtQueue
806  * @elem: The #VirtQueueElement
807  * @len: number of bytes written
808  *
809  * Pretend the most recent element wasn't popped from the virtqueue.  The next
810  * call to virtqueue_pop() will refetch the element.
811  */
812 void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem,
813                      unsigned int len)
814 {
815 
816     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
817         virtqueue_packed_rewind(vq, 1);
818     } else {
819         virtqueue_split_rewind(vq, 1);
820     }
821 
822     virtqueue_detach_element(vq, elem, len);
823 }
824 
825 /* virtqueue_rewind:
826  * @vq: The #VirtQueue
827  * @num: Number of elements to push back
828  *
829  * Pretend that elements weren't popped from the virtqueue.  The next
830  * virtqueue_pop() will refetch the oldest element.
831  *
832  * Use virtqueue_unpop() instead if you have a VirtQueueElement.
833  *
834  * Returns: true on success, false if @num is greater than the number of in use
835  * elements.
836  */
837 bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
838 {
839     if (num > vq->inuse) {
840         return false;
841     }
842 
843     vq->inuse -= num;
844     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
845         virtqueue_packed_rewind(vq, num);
846     } else {
847         virtqueue_split_rewind(vq, num);
848     }
849     return true;
850 }
851 
852 static void virtqueue_split_fill(VirtQueue *vq, const VirtQueueElement *elem,
853                     unsigned int len, unsigned int idx)
854 {
855     VRingUsedElem uelem;
856 
857     if (unlikely(!vq->vring.used)) {
858         return;
859     }
860 
861     idx = (idx + vq->used_idx) % vq->vring.num;
862 
863     uelem.id = elem->index;
864     uelem.len = len;
865     vring_used_write(vq, &uelem, idx);
866 }
867 
868 static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem,
869                                   unsigned int len, unsigned int idx)
870 {
871     vq->used_elems[idx].index = elem->index;
872     vq->used_elems[idx].len = len;
873     vq->used_elems[idx].ndescs = elem->ndescs;
874 }
875 
876 static void virtqueue_packed_fill_desc(VirtQueue *vq,
877                                        const VirtQueueElement *elem,
878                                        unsigned int idx,
879                                        bool strict_order)
880 {
881     uint16_t head;
882     VRingMemoryRegionCaches *caches;
883     VRingPackedDesc desc = {
884         .id = elem->index,
885         .len = elem->len,
886     };
887     bool wrap_counter = vq->used_wrap_counter;
888 
889     if (unlikely(!vq->vring.desc)) {
890         return;
891     }
892 
893     head = vq->used_idx + idx;
894     if (head >= vq->vring.num) {
895         head -= vq->vring.num;
896         wrap_counter ^= 1;
897     }
898     if (wrap_counter) {
899         desc.flags |= (1 << VRING_PACKED_DESC_F_AVAIL);
900         desc.flags |= (1 << VRING_PACKED_DESC_F_USED);
901     } else {
902         desc.flags &= ~(1 << VRING_PACKED_DESC_F_AVAIL);
903         desc.flags &= ~(1 << VRING_PACKED_DESC_F_USED);
904     }
905 
906     caches = vring_get_region_caches(vq);
907     if (!caches) {
908         return;
909     }
910 
911     vring_packed_desc_write(vq->vdev, &desc, &caches->desc, head, strict_order);
912 }
913 
914 /* Called within rcu_read_lock().  */
915 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
916                     unsigned int len, unsigned int idx)
917 {
918     trace_virtqueue_fill(vq, elem, len, idx);
919 
920     virtqueue_unmap_sg(vq, elem, len);
921 
922     if (virtio_device_disabled(vq->vdev)) {
923         return;
924     }
925 
926     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
927         virtqueue_packed_fill(vq, elem, len, idx);
928     } else {
929         virtqueue_split_fill(vq, elem, len, idx);
930     }
931 }
932 
933 /* Called within rcu_read_lock().  */
934 static void virtqueue_split_flush(VirtQueue *vq, unsigned int count)
935 {
936     uint16_t old, new;
937 
938     if (unlikely(!vq->vring.used)) {
939         return;
940     }
941 
942     /* Make sure buffer is written before we update index. */
943     smp_wmb();
944     trace_virtqueue_flush(vq, count);
945     old = vq->used_idx;
946     new = old + count;
947     vring_used_idx_set(vq, new);
948     vq->inuse -= count;
949     if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
950         vq->signalled_used_valid = false;
951 }
952 
953 static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count)
954 {
955     unsigned int i, ndescs = 0;
956 
957     if (unlikely(!vq->vring.desc)) {
958         return;
959     }
960 
961     /*
962      * For indirect element's 'ndescs' is 1.
963      * For all other elemment's 'ndescs' is the
964      * number of descriptors chained by NEXT (as set in virtqueue_packed_pop).
965      * So When the 'elem' be filled into the descriptor ring,
966      * The 'idx' of this 'elem' shall be
967      * the value of 'vq->used_idx' plus the 'ndescs'.
968      */
969     ndescs += vq->used_elems[0].ndescs;
970     for (i = 1; i < count; i++) {
971         virtqueue_packed_fill_desc(vq, &vq->used_elems[i], ndescs, false);
972         ndescs += vq->used_elems[i].ndescs;
973     }
974     virtqueue_packed_fill_desc(vq, &vq->used_elems[0], 0, true);
975 
976     vq->inuse -= ndescs;
977     vq->used_idx += ndescs;
978     if (vq->used_idx >= vq->vring.num) {
979         vq->used_idx -= vq->vring.num;
980         vq->used_wrap_counter ^= 1;
981         vq->signalled_used_valid = false;
982     }
983 }
984 
985 void virtqueue_flush(VirtQueue *vq, unsigned int count)
986 {
987     if (virtio_device_disabled(vq->vdev)) {
988         vq->inuse -= count;
989         return;
990     }
991 
992     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
993         virtqueue_packed_flush(vq, count);
994     } else {
995         virtqueue_split_flush(vq, count);
996     }
997 }
998 
999 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
1000                     unsigned int len)
1001 {
1002     RCU_READ_LOCK_GUARD();
1003     virtqueue_fill(vq, elem, len, 0);
1004     virtqueue_flush(vq, 1);
1005 }
1006 
1007 /* Called within rcu_read_lock().  */
1008 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
1009 {
1010     uint16_t avail_idx, num_heads;
1011 
1012     /* Use shadow index whenever possible. */
1013     avail_idx = (vq->shadow_avail_idx != idx) ? vq->shadow_avail_idx
1014                                               : vring_avail_idx(vq);
1015     num_heads = avail_idx - idx;
1016 
1017     /* Check it isn't doing very strange things with descriptor numbers. */
1018     if (num_heads > vq->vring.num) {
1019         virtio_error(vq->vdev, "Guest moved used index from %u to %u",
1020                      idx, vq->shadow_avail_idx);
1021         return -EINVAL;
1022     }
1023     /*
1024      * On success, callers read a descriptor at vq->last_avail_idx.
1025      * Make sure descriptor read does not bypass avail index read.
1026      *
1027      * This is necessary even if we are using a shadow index, since
1028      * the shadow index could have been initialized by calling
1029      * vring_avail_idx() outside of this function, i.e., by a guest
1030      * memory read not accompanied by a barrier.
1031      */
1032     if (num_heads) {
1033         smp_rmb();
1034     }
1035 
1036     return num_heads;
1037 }
1038 
1039 /* Called within rcu_read_lock().  */
1040 static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
1041                                unsigned int *head)
1042 {
1043     /* Grab the next descriptor number they're advertising, and increment
1044      * the index we've seen. */
1045     *head = vring_avail_ring(vq, idx % vq->vring.num);
1046 
1047     /* If their number is silly, that's a fatal mistake. */
1048     if (*head >= vq->vring.num) {
1049         virtio_error(vq->vdev, "Guest says index %u is available", *head);
1050         return false;
1051     }
1052 
1053     return true;
1054 }
1055 
1056 enum {
1057     VIRTQUEUE_READ_DESC_ERROR = -1,
1058     VIRTQUEUE_READ_DESC_DONE = 0,   /* end of chain */
1059     VIRTQUEUE_READ_DESC_MORE = 1,   /* more buffers in chain */
1060 };
1061 
1062 /* Reads the 'desc->next' descriptor into '*desc'. */
1063 static int virtqueue_split_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
1064                                           MemoryRegionCache *desc_cache,
1065                                           unsigned int max)
1066 {
1067     /* If this descriptor says it doesn't chain, we're done. */
1068     if (!(desc->flags & VRING_DESC_F_NEXT)) {
1069         return VIRTQUEUE_READ_DESC_DONE;
1070     }
1071 
1072     /* Check they're not leading us off end of descriptors. */
1073     if (desc->next >= max) {
1074         virtio_error(vdev, "Desc next is %u", desc->next);
1075         return VIRTQUEUE_READ_DESC_ERROR;
1076     }
1077 
1078     vring_split_desc_read(vdev, desc, desc_cache, desc->next);
1079     return VIRTQUEUE_READ_DESC_MORE;
1080 }
1081 
1082 /* Called within rcu_read_lock().  */
1083 static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
1084                             unsigned int *in_bytes, unsigned int *out_bytes,
1085                             unsigned max_in_bytes, unsigned max_out_bytes,
1086                             VRingMemoryRegionCaches *caches)
1087 {
1088     VirtIODevice *vdev = vq->vdev;
1089     unsigned int idx;
1090     unsigned int total_bufs, in_total, out_total;
1091     MemoryRegionCache indirect_desc_cache;
1092     int64_t len = 0;
1093     int rc;
1094 
1095     address_space_cache_init_empty(&indirect_desc_cache);
1096 
1097     idx = vq->last_avail_idx;
1098     total_bufs = in_total = out_total = 0;
1099 
1100     while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
1101         MemoryRegionCache *desc_cache = &caches->desc;
1102         unsigned int num_bufs;
1103         VRingDesc desc;
1104         unsigned int i;
1105         unsigned int max = vq->vring.num;
1106 
1107         num_bufs = total_bufs;
1108 
1109         if (!virtqueue_get_head(vq, idx++, &i)) {
1110             goto err;
1111         }
1112 
1113         vring_split_desc_read(vdev, &desc, desc_cache, i);
1114 
1115         if (desc.flags & VRING_DESC_F_INDIRECT) {
1116             if (!desc.len || (desc.len % sizeof(VRingDesc))) {
1117                 virtio_error(vdev, "Invalid size for indirect buffer table");
1118                 goto err;
1119             }
1120 
1121             /* If we've got too many, that implies a descriptor loop. */
1122             if (num_bufs >= max) {
1123                 virtio_error(vdev, "Looped descriptor");
1124                 goto err;
1125             }
1126 
1127             /* loop over the indirect descriptor table */
1128             len = address_space_cache_init(&indirect_desc_cache,
1129                                            vdev->dma_as,
1130                                            desc.addr, desc.len, false);
1131             desc_cache = &indirect_desc_cache;
1132             if (len < desc.len) {
1133                 virtio_error(vdev, "Cannot map indirect buffer");
1134                 goto err;
1135             }
1136 
1137             max = desc.len / sizeof(VRingDesc);
1138             num_bufs = i = 0;
1139             vring_split_desc_read(vdev, &desc, desc_cache, i);
1140         }
1141 
1142         do {
1143             /* If we've got too many, that implies a descriptor loop. */
1144             if (++num_bufs > max) {
1145                 virtio_error(vdev, "Looped descriptor");
1146                 goto err;
1147             }
1148 
1149             if (desc.flags & VRING_DESC_F_WRITE) {
1150                 in_total += desc.len;
1151             } else {
1152                 out_total += desc.len;
1153             }
1154             if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1155                 goto done;
1156             }
1157 
1158             rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max);
1159         } while (rc == VIRTQUEUE_READ_DESC_MORE);
1160 
1161         if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1162             goto err;
1163         }
1164 
1165         if (desc_cache == &indirect_desc_cache) {
1166             address_space_cache_destroy(&indirect_desc_cache);
1167             total_bufs++;
1168         } else {
1169             total_bufs = num_bufs;
1170         }
1171     }
1172 
1173     if (rc < 0) {
1174         goto err;
1175     }
1176 
1177 done:
1178     address_space_cache_destroy(&indirect_desc_cache);
1179     if (in_bytes) {
1180         *in_bytes = in_total;
1181     }
1182     if (out_bytes) {
1183         *out_bytes = out_total;
1184     }
1185     return;
1186 
1187 err:
1188     in_total = out_total = 0;
1189     goto done;
1190 }
1191 
1192 static int virtqueue_packed_read_next_desc(VirtQueue *vq,
1193                                            VRingPackedDesc *desc,
1194                                            MemoryRegionCache
1195                                            *desc_cache,
1196                                            unsigned int max,
1197                                            unsigned int *next,
1198                                            bool indirect)
1199 {
1200     /* If this descriptor says it doesn't chain, we're done. */
1201     if (!indirect && !(desc->flags & VRING_DESC_F_NEXT)) {
1202         return VIRTQUEUE_READ_DESC_DONE;
1203     }
1204 
1205     ++*next;
1206     if (*next == max) {
1207         if (indirect) {
1208             return VIRTQUEUE_READ_DESC_DONE;
1209         } else {
1210             (*next) -= vq->vring.num;
1211         }
1212     }
1213 
1214     vring_packed_desc_read(vq->vdev, desc, desc_cache, *next, false);
1215     return VIRTQUEUE_READ_DESC_MORE;
1216 }
1217 
1218 /* Called within rcu_read_lock().  */
1219 static void virtqueue_packed_get_avail_bytes(VirtQueue *vq,
1220                                              unsigned int *in_bytes,
1221                                              unsigned int *out_bytes,
1222                                              unsigned max_in_bytes,
1223                                              unsigned max_out_bytes,
1224                                              VRingMemoryRegionCaches *caches)
1225 {
1226     VirtIODevice *vdev = vq->vdev;
1227     unsigned int idx;
1228     unsigned int total_bufs, in_total, out_total;
1229     MemoryRegionCache indirect_desc_cache;
1230     MemoryRegionCache *desc_cache;
1231     int64_t len = 0;
1232     VRingPackedDesc desc;
1233     bool wrap_counter;
1234 
1235     address_space_cache_init_empty(&indirect_desc_cache);
1236 
1237     idx = vq->last_avail_idx;
1238     wrap_counter = vq->last_avail_wrap_counter;
1239     total_bufs = in_total = out_total = 0;
1240 
1241     for (;;) {
1242         unsigned int num_bufs = total_bufs;
1243         unsigned int i = idx;
1244         int rc;
1245         unsigned int max = vq->vring.num;
1246 
1247         desc_cache = &caches->desc;
1248 
1249         vring_packed_desc_read(vdev, &desc, desc_cache, idx, true);
1250         if (!is_desc_avail(desc.flags, wrap_counter)) {
1251             break;
1252         }
1253 
1254         if (desc.flags & VRING_DESC_F_INDIRECT) {
1255             if (desc.len % sizeof(VRingPackedDesc)) {
1256                 virtio_error(vdev, "Invalid size for indirect buffer table");
1257                 goto err;
1258             }
1259 
1260             /* If we've got too many, that implies a descriptor loop. */
1261             if (num_bufs >= max) {
1262                 virtio_error(vdev, "Looped descriptor");
1263                 goto err;
1264             }
1265 
1266             /* loop over the indirect descriptor table */
1267             len = address_space_cache_init(&indirect_desc_cache,
1268                                            vdev->dma_as,
1269                                            desc.addr, desc.len, false);
1270             desc_cache = &indirect_desc_cache;
1271             if (len < desc.len) {
1272                 virtio_error(vdev, "Cannot map indirect buffer");
1273                 goto err;
1274             }
1275 
1276             max = desc.len / sizeof(VRingPackedDesc);
1277             num_bufs = i = 0;
1278             vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
1279         }
1280 
1281         do {
1282             /* If we've got too many, that implies a descriptor loop. */
1283             if (++num_bufs > max) {
1284                 virtio_error(vdev, "Looped descriptor");
1285                 goto err;
1286             }
1287 
1288             if (desc.flags & VRING_DESC_F_WRITE) {
1289                 in_total += desc.len;
1290             } else {
1291                 out_total += desc.len;
1292             }
1293             if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1294                 goto done;
1295             }
1296 
1297             rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max,
1298                                                  &i, desc_cache ==
1299                                                  &indirect_desc_cache);
1300         } while (rc == VIRTQUEUE_READ_DESC_MORE);
1301 
1302         if (desc_cache == &indirect_desc_cache) {
1303             address_space_cache_destroy(&indirect_desc_cache);
1304             total_bufs++;
1305             idx++;
1306         } else {
1307             idx += num_bufs - total_bufs;
1308             total_bufs = num_bufs;
1309         }
1310 
1311         if (idx >= vq->vring.num) {
1312             idx -= vq->vring.num;
1313             wrap_counter ^= 1;
1314         }
1315     }
1316 
1317     /* Record the index and wrap counter for a kick we want */
1318     vq->shadow_avail_idx = idx;
1319     vq->shadow_avail_wrap_counter = wrap_counter;
1320 done:
1321     address_space_cache_destroy(&indirect_desc_cache);
1322     if (in_bytes) {
1323         *in_bytes = in_total;
1324     }
1325     if (out_bytes) {
1326         *out_bytes = out_total;
1327     }
1328     return;
1329 
1330 err:
1331     in_total = out_total = 0;
1332     goto done;
1333 }
1334 
1335 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
1336                                unsigned int *out_bytes,
1337                                unsigned max_in_bytes, unsigned max_out_bytes)
1338 {
1339     uint16_t desc_size;
1340     VRingMemoryRegionCaches *caches;
1341 
1342     RCU_READ_LOCK_GUARD();
1343 
1344     if (unlikely(!vq->vring.desc)) {
1345         goto err;
1346     }
1347 
1348     caches = vring_get_region_caches(vq);
1349     if (!caches) {
1350         goto err;
1351     }
1352 
1353     desc_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
1354                                 sizeof(VRingPackedDesc) : sizeof(VRingDesc);
1355     if (caches->desc.len < vq->vring.num * desc_size) {
1356         virtio_error(vq->vdev, "Cannot map descriptor ring");
1357         goto err;
1358     }
1359 
1360     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1361         virtqueue_packed_get_avail_bytes(vq, in_bytes, out_bytes,
1362                                          max_in_bytes, max_out_bytes,
1363                                          caches);
1364     } else {
1365         virtqueue_split_get_avail_bytes(vq, in_bytes, out_bytes,
1366                                         max_in_bytes, max_out_bytes,
1367                                         caches);
1368     }
1369 
1370     return;
1371 err:
1372     if (in_bytes) {
1373         *in_bytes = 0;
1374     }
1375     if (out_bytes) {
1376         *out_bytes = 0;
1377     }
1378 }
1379 
1380 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
1381                           unsigned int out_bytes)
1382 {
1383     unsigned int in_total, out_total;
1384 
1385     virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
1386     return in_bytes <= in_total && out_bytes <= out_total;
1387 }
1388 
1389 static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
1390                                hwaddr *addr, struct iovec *iov,
1391                                unsigned int max_num_sg, bool is_write,
1392                                hwaddr pa, size_t sz)
1393 {
1394     bool ok = false;
1395     unsigned num_sg = *p_num_sg;
1396     assert(num_sg <= max_num_sg);
1397 
1398     if (!sz) {
1399         virtio_error(vdev, "virtio: zero sized buffers are not allowed");
1400         goto out;
1401     }
1402 
1403     while (sz) {
1404         hwaddr len = sz;
1405 
1406         if (num_sg == max_num_sg) {
1407             virtio_error(vdev, "virtio: too many write descriptors in "
1408                                "indirect table");
1409             goto out;
1410         }
1411 
1412         iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
1413                                               is_write ?
1414                                               DMA_DIRECTION_FROM_DEVICE :
1415                                               DMA_DIRECTION_TO_DEVICE,
1416                                               MEMTXATTRS_UNSPECIFIED);
1417         if (!iov[num_sg].iov_base) {
1418             virtio_error(vdev, "virtio: bogus descriptor or out of resources");
1419             goto out;
1420         }
1421 
1422         iov[num_sg].iov_len = len;
1423         addr[num_sg] = pa;
1424 
1425         sz -= len;
1426         pa += len;
1427         num_sg++;
1428     }
1429     ok = true;
1430 
1431 out:
1432     *p_num_sg = num_sg;
1433     return ok;
1434 }
1435 
1436 /* Only used by error code paths before we have a VirtQueueElement (therefore
1437  * virtqueue_unmap_sg() can't be used).  Assumes buffers weren't written to
1438  * yet.
1439  */
1440 static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
1441                                     struct iovec *iov)
1442 {
1443     unsigned int i;
1444 
1445     for (i = 0; i < out_num + in_num; i++) {
1446         int is_write = i >= out_num;
1447 
1448         cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
1449         iov++;
1450     }
1451 }
1452 
1453 static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
1454                                 hwaddr *addr, unsigned int num_sg,
1455                                 bool is_write)
1456 {
1457     unsigned int i;
1458     hwaddr len;
1459 
1460     for (i = 0; i < num_sg; i++) {
1461         len = sg[i].iov_len;
1462         sg[i].iov_base = dma_memory_map(vdev->dma_as,
1463                                         addr[i], &len, is_write ?
1464                                         DMA_DIRECTION_FROM_DEVICE :
1465                                         DMA_DIRECTION_TO_DEVICE,
1466                                         MEMTXATTRS_UNSPECIFIED);
1467         if (!sg[i].iov_base) {
1468             error_report("virtio: error trying to map MMIO memory");
1469             exit(1);
1470         }
1471         if (len != sg[i].iov_len) {
1472             error_report("virtio: unexpected memory split");
1473             exit(1);
1474         }
1475     }
1476 }
1477 
1478 void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem)
1479 {
1480     virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, elem->in_num, true);
1481     virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, elem->out_num,
1482                                                                         false);
1483 }
1484 
1485 static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
1486 {
1487     VirtQueueElement *elem;
1488     size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
1489     size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
1490     size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
1491     size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
1492     size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
1493     size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
1494 
1495     assert(sz >= sizeof(VirtQueueElement));
1496     elem = g_malloc(out_sg_end);
1497     trace_virtqueue_alloc_element(elem, sz, in_num, out_num);
1498     elem->out_num = out_num;
1499     elem->in_num = in_num;
1500     elem->in_addr = (void *)elem + in_addr_ofs;
1501     elem->out_addr = (void *)elem + out_addr_ofs;
1502     elem->in_sg = (void *)elem + in_sg_ofs;
1503     elem->out_sg = (void *)elem + out_sg_ofs;
1504     return elem;
1505 }
1506 
1507 static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
1508 {
1509     unsigned int i, head, max;
1510     VRingMemoryRegionCaches *caches;
1511     MemoryRegionCache indirect_desc_cache;
1512     MemoryRegionCache *desc_cache;
1513     int64_t len;
1514     VirtIODevice *vdev = vq->vdev;
1515     VirtQueueElement *elem = NULL;
1516     unsigned out_num, in_num, elem_entries;
1517     hwaddr addr[VIRTQUEUE_MAX_SIZE];
1518     struct iovec iov[VIRTQUEUE_MAX_SIZE];
1519     VRingDesc desc;
1520     int rc;
1521 
1522     address_space_cache_init_empty(&indirect_desc_cache);
1523 
1524     RCU_READ_LOCK_GUARD();
1525     if (virtio_queue_empty_rcu(vq)) {
1526         goto done;
1527     }
1528     /* Needed after virtio_queue_empty(), see comment in
1529      * virtqueue_num_heads(). */
1530     smp_rmb();
1531 
1532     /* When we start there are none of either input nor output. */
1533     out_num = in_num = elem_entries = 0;
1534 
1535     max = vq->vring.num;
1536 
1537     if (vq->inuse >= vq->vring.num) {
1538         virtio_error(vdev, "Virtqueue size exceeded");
1539         goto done;
1540     }
1541 
1542     if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
1543         goto done;
1544     }
1545 
1546     if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
1547         vring_set_avail_event(vq, vq->last_avail_idx);
1548     }
1549 
1550     i = head;
1551 
1552     caches = vring_get_region_caches(vq);
1553     if (!caches) {
1554         virtio_error(vdev, "Region caches not initialized");
1555         goto done;
1556     }
1557 
1558     if (caches->desc.len < max * sizeof(VRingDesc)) {
1559         virtio_error(vdev, "Cannot map descriptor ring");
1560         goto done;
1561     }
1562 
1563     desc_cache = &caches->desc;
1564     vring_split_desc_read(vdev, &desc, desc_cache, i);
1565     if (desc.flags & VRING_DESC_F_INDIRECT) {
1566         if (!desc.len || (desc.len % sizeof(VRingDesc))) {
1567             virtio_error(vdev, "Invalid size for indirect buffer table");
1568             goto done;
1569         }
1570 
1571         /* loop over the indirect descriptor table */
1572         len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1573                                        desc.addr, desc.len, false);
1574         desc_cache = &indirect_desc_cache;
1575         if (len < desc.len) {
1576             virtio_error(vdev, "Cannot map indirect buffer");
1577             goto done;
1578         }
1579 
1580         max = desc.len / sizeof(VRingDesc);
1581         i = 0;
1582         vring_split_desc_read(vdev, &desc, desc_cache, i);
1583     }
1584 
1585     /* Collect all the descriptors */
1586     do {
1587         bool map_ok;
1588 
1589         if (desc.flags & VRING_DESC_F_WRITE) {
1590             map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
1591                                         iov + out_num,
1592                                         VIRTQUEUE_MAX_SIZE - out_num, true,
1593                                         desc.addr, desc.len);
1594         } else {
1595             if (in_num) {
1596                 virtio_error(vdev, "Incorrect order for descriptors");
1597                 goto err_undo_map;
1598             }
1599             map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
1600                                         VIRTQUEUE_MAX_SIZE, false,
1601                                         desc.addr, desc.len);
1602         }
1603         if (!map_ok) {
1604             goto err_undo_map;
1605         }
1606 
1607         /* If we've got too many, that implies a descriptor loop. */
1608         if (++elem_entries > max) {
1609             virtio_error(vdev, "Looped descriptor");
1610             goto err_undo_map;
1611         }
1612 
1613         rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max);
1614     } while (rc == VIRTQUEUE_READ_DESC_MORE);
1615 
1616     if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1617         goto err_undo_map;
1618     }
1619 
1620     /* Now copy what we have collected and mapped */
1621     elem = virtqueue_alloc_element(sz, out_num, in_num);
1622     elem->index = head;
1623     elem->ndescs = 1;
1624     for (i = 0; i < out_num; i++) {
1625         elem->out_addr[i] = addr[i];
1626         elem->out_sg[i] = iov[i];
1627     }
1628     for (i = 0; i < in_num; i++) {
1629         elem->in_addr[i] = addr[out_num + i];
1630         elem->in_sg[i] = iov[out_num + i];
1631     }
1632 
1633     vq->inuse++;
1634 
1635     trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
1636 done:
1637     address_space_cache_destroy(&indirect_desc_cache);
1638 
1639     return elem;
1640 
1641 err_undo_map:
1642     virtqueue_undo_map_desc(out_num, in_num, iov);
1643     goto done;
1644 }
1645 
1646 static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
1647 {
1648     unsigned int i, max;
1649     VRingMemoryRegionCaches *caches;
1650     MemoryRegionCache indirect_desc_cache;
1651     MemoryRegionCache *desc_cache;
1652     int64_t len;
1653     VirtIODevice *vdev = vq->vdev;
1654     VirtQueueElement *elem = NULL;
1655     unsigned out_num, in_num, elem_entries;
1656     hwaddr addr[VIRTQUEUE_MAX_SIZE];
1657     struct iovec iov[VIRTQUEUE_MAX_SIZE];
1658     VRingPackedDesc desc;
1659     uint16_t id;
1660     int rc;
1661 
1662     address_space_cache_init_empty(&indirect_desc_cache);
1663 
1664     RCU_READ_LOCK_GUARD();
1665     if (virtio_queue_packed_empty_rcu(vq)) {
1666         goto done;
1667     }
1668 
1669     /* When we start there are none of either input nor output. */
1670     out_num = in_num = elem_entries = 0;
1671 
1672     max = vq->vring.num;
1673 
1674     if (vq->inuse >= vq->vring.num) {
1675         virtio_error(vdev, "Virtqueue size exceeded");
1676         goto done;
1677     }
1678 
1679     i = vq->last_avail_idx;
1680 
1681     caches = vring_get_region_caches(vq);
1682     if (!caches) {
1683         virtio_error(vdev, "Region caches not initialized");
1684         goto done;
1685     }
1686 
1687     if (caches->desc.len < max * sizeof(VRingDesc)) {
1688         virtio_error(vdev, "Cannot map descriptor ring");
1689         goto done;
1690     }
1691 
1692     desc_cache = &caches->desc;
1693     vring_packed_desc_read(vdev, &desc, desc_cache, i, true);
1694     id = desc.id;
1695     if (desc.flags & VRING_DESC_F_INDIRECT) {
1696         if (desc.len % sizeof(VRingPackedDesc)) {
1697             virtio_error(vdev, "Invalid size for indirect buffer table");
1698             goto done;
1699         }
1700 
1701         /* loop over the indirect descriptor table */
1702         len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1703                                        desc.addr, desc.len, false);
1704         desc_cache = &indirect_desc_cache;
1705         if (len < desc.len) {
1706             virtio_error(vdev, "Cannot map indirect buffer");
1707             goto done;
1708         }
1709 
1710         max = desc.len / sizeof(VRingPackedDesc);
1711         i = 0;
1712         vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
1713     }
1714 
1715     /* Collect all the descriptors */
1716     do {
1717         bool map_ok;
1718 
1719         if (desc.flags & VRING_DESC_F_WRITE) {
1720             map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
1721                                         iov + out_num,
1722                                         VIRTQUEUE_MAX_SIZE - out_num, true,
1723                                         desc.addr, desc.len);
1724         } else {
1725             if (in_num) {
1726                 virtio_error(vdev, "Incorrect order for descriptors");
1727                 goto err_undo_map;
1728             }
1729             map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
1730                                         VIRTQUEUE_MAX_SIZE, false,
1731                                         desc.addr, desc.len);
1732         }
1733         if (!map_ok) {
1734             goto err_undo_map;
1735         }
1736 
1737         /* If we've got too many, that implies a descriptor loop. */
1738         if (++elem_entries > max) {
1739             virtio_error(vdev, "Looped descriptor");
1740             goto err_undo_map;
1741         }
1742 
1743         rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max, &i,
1744                                              desc_cache ==
1745                                              &indirect_desc_cache);
1746     } while (rc == VIRTQUEUE_READ_DESC_MORE);
1747 
1748     /* Now copy what we have collected and mapped */
1749     elem = virtqueue_alloc_element(sz, out_num, in_num);
1750     for (i = 0; i < out_num; i++) {
1751         elem->out_addr[i] = addr[i];
1752         elem->out_sg[i] = iov[i];
1753     }
1754     for (i = 0; i < in_num; i++) {
1755         elem->in_addr[i] = addr[out_num + i];
1756         elem->in_sg[i] = iov[out_num + i];
1757     }
1758 
1759     elem->index = id;
1760     elem->ndescs = (desc_cache == &indirect_desc_cache) ? 1 : elem_entries;
1761     vq->last_avail_idx += elem->ndescs;
1762     vq->inuse += elem->ndescs;
1763 
1764     if (vq->last_avail_idx >= vq->vring.num) {
1765         vq->last_avail_idx -= vq->vring.num;
1766         vq->last_avail_wrap_counter ^= 1;
1767     }
1768 
1769     vq->shadow_avail_idx = vq->last_avail_idx;
1770     vq->shadow_avail_wrap_counter = vq->last_avail_wrap_counter;
1771 
1772     trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
1773 done:
1774     address_space_cache_destroy(&indirect_desc_cache);
1775 
1776     return elem;
1777 
1778 err_undo_map:
1779     virtqueue_undo_map_desc(out_num, in_num, iov);
1780     goto done;
1781 }
1782 
1783 void *virtqueue_pop(VirtQueue *vq, size_t sz)
1784 {
1785     if (virtio_device_disabled(vq->vdev)) {
1786         return NULL;
1787     }
1788 
1789     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1790         return virtqueue_packed_pop(vq, sz);
1791     } else {
1792         return virtqueue_split_pop(vq, sz);
1793     }
1794 }
1795 
1796 static unsigned int virtqueue_packed_drop_all(VirtQueue *vq)
1797 {
1798     VRingMemoryRegionCaches *caches;
1799     MemoryRegionCache *desc_cache;
1800     unsigned int dropped = 0;
1801     VirtQueueElement elem = {};
1802     VirtIODevice *vdev = vq->vdev;
1803     VRingPackedDesc desc;
1804 
1805     RCU_READ_LOCK_GUARD();
1806 
1807     caches = vring_get_region_caches(vq);
1808     if (!caches) {
1809         return 0;
1810     }
1811 
1812     desc_cache = &caches->desc;
1813 
1814     virtio_queue_set_notification(vq, 0);
1815 
1816     while (vq->inuse < vq->vring.num) {
1817         unsigned int idx = vq->last_avail_idx;
1818         /*
1819          * works similar to virtqueue_pop but does not map buffers
1820          * and does not allocate any memory.
1821          */
1822         vring_packed_desc_read(vdev, &desc, desc_cache,
1823                                vq->last_avail_idx , true);
1824         if (!is_desc_avail(desc.flags, vq->last_avail_wrap_counter)) {
1825             break;
1826         }
1827         elem.index = desc.id;
1828         elem.ndescs = 1;
1829         while (virtqueue_packed_read_next_desc(vq, &desc, desc_cache,
1830                                                vq->vring.num, &idx, false)) {
1831             ++elem.ndescs;
1832         }
1833         /*
1834          * immediately push the element, nothing to unmap
1835          * as both in_num and out_num are set to 0.
1836          */
1837         virtqueue_push(vq, &elem, 0);
1838         dropped++;
1839         vq->last_avail_idx += elem.ndescs;
1840         if (vq->last_avail_idx >= vq->vring.num) {
1841             vq->last_avail_idx -= vq->vring.num;
1842             vq->last_avail_wrap_counter ^= 1;
1843         }
1844     }
1845 
1846     return dropped;
1847 }
1848 
1849 static unsigned int virtqueue_split_drop_all(VirtQueue *vq)
1850 {
1851     unsigned int dropped = 0;
1852     VirtQueueElement elem = {};
1853     VirtIODevice *vdev = vq->vdev;
1854     bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1855 
1856     while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
1857         /* works similar to virtqueue_pop but does not map buffers
1858         * and does not allocate any memory */
1859         smp_rmb();
1860         if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
1861             break;
1862         }
1863         vq->inuse++;
1864         vq->last_avail_idx++;
1865         if (fEventIdx) {
1866             vring_set_avail_event(vq, vq->last_avail_idx);
1867         }
1868         /* immediately push the element, nothing to unmap
1869          * as both in_num and out_num are set to 0 */
1870         virtqueue_push(vq, &elem, 0);
1871         dropped++;
1872     }
1873 
1874     return dropped;
1875 }
1876 
1877 /* virtqueue_drop_all:
1878  * @vq: The #VirtQueue
1879  * Drops all queued buffers and indicates them to the guest
1880  * as if they are done. Useful when buffers can not be
1881  * processed but must be returned to the guest.
1882  */
1883 unsigned int virtqueue_drop_all(VirtQueue *vq)
1884 {
1885     struct VirtIODevice *vdev = vq->vdev;
1886 
1887     if (virtio_device_disabled(vq->vdev)) {
1888         return 0;
1889     }
1890 
1891     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1892         return virtqueue_packed_drop_all(vq);
1893     } else {
1894         return virtqueue_split_drop_all(vq);
1895     }
1896 }
1897 
1898 /* Reading and writing a structure directly to QEMUFile is *awful*, but
1899  * it is what QEMU has always done by mistake.  We can change it sooner
1900  * or later by bumping the version number of the affected vm states.
1901  * In the meanwhile, since the in-memory layout of VirtQueueElement
1902  * has changed, we need to marshal to and from the layout that was
1903  * used before the change.
1904  */
1905 typedef struct VirtQueueElementOld {
1906     unsigned int index;
1907     unsigned int out_num;
1908     unsigned int in_num;
1909     hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
1910     hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
1911     struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
1912     struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
1913 } VirtQueueElementOld;
1914 
1915 void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz)
1916 {
1917     VirtQueueElement *elem;
1918     VirtQueueElementOld data;
1919     int i;
1920 
1921     qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1922 
1923     /* TODO: teach all callers that this can fail, and return failure instead
1924      * of asserting here.
1925      * This is just one thing (there are probably more) that must be
1926      * fixed before we can allow NDEBUG compilation.
1927      */
1928     assert(ARRAY_SIZE(data.in_addr) >= data.in_num);
1929     assert(ARRAY_SIZE(data.out_addr) >= data.out_num);
1930 
1931     elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
1932     elem->index = data.index;
1933 
1934     for (i = 0; i < elem->in_num; i++) {
1935         elem->in_addr[i] = data.in_addr[i];
1936     }
1937 
1938     for (i = 0; i < elem->out_num; i++) {
1939         elem->out_addr[i] = data.out_addr[i];
1940     }
1941 
1942     for (i = 0; i < elem->in_num; i++) {
1943         /* Base is overwritten by virtqueue_map.  */
1944         elem->in_sg[i].iov_base = 0;
1945         elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
1946     }
1947 
1948     for (i = 0; i < elem->out_num; i++) {
1949         /* Base is overwritten by virtqueue_map.  */
1950         elem->out_sg[i].iov_base = 0;
1951         elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
1952     }
1953 
1954     if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1955         qemu_get_be32s(f, &elem->ndescs);
1956     }
1957 
1958     virtqueue_map(vdev, elem);
1959     return elem;
1960 }
1961 
1962 void qemu_put_virtqueue_element(VirtIODevice *vdev, QEMUFile *f,
1963                                 VirtQueueElement *elem)
1964 {
1965     VirtQueueElementOld data;
1966     int i;
1967 
1968     memset(&data, 0, sizeof(data));
1969     data.index = elem->index;
1970     data.in_num = elem->in_num;
1971     data.out_num = elem->out_num;
1972 
1973     for (i = 0; i < elem->in_num; i++) {
1974         data.in_addr[i] = elem->in_addr[i];
1975     }
1976 
1977     for (i = 0; i < elem->out_num; i++) {
1978         data.out_addr[i] = elem->out_addr[i];
1979     }
1980 
1981     for (i = 0; i < elem->in_num; i++) {
1982         /* Base is overwritten by virtqueue_map when loading.  Do not
1983          * save it, as it would leak the QEMU address space layout.  */
1984         data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
1985     }
1986 
1987     for (i = 0; i < elem->out_num; i++) {
1988         /* Do not save iov_base as above.  */
1989         data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
1990     }
1991 
1992     if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1993         qemu_put_be32s(f, &elem->ndescs);
1994     }
1995 
1996     qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1997 }
1998 
1999 /* virtio device */
2000 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
2001 {
2002     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2003     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2004 
2005     if (virtio_device_disabled(vdev)) {
2006         return;
2007     }
2008 
2009     if (k->notify) {
2010         k->notify(qbus->parent, vector);
2011     }
2012 }
2013 
2014 void virtio_update_irq(VirtIODevice *vdev)
2015 {
2016     virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
2017 }
2018 
2019 static int virtio_validate_features(VirtIODevice *vdev)
2020 {
2021     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2022 
2023     if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) &&
2024         !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
2025         return -EFAULT;
2026     }
2027 
2028     if (k->validate_features) {
2029         return k->validate_features(vdev);
2030     } else {
2031         return 0;
2032     }
2033 }
2034 
2035 int virtio_set_status(VirtIODevice *vdev, uint8_t val)
2036 {
2037     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2038     trace_virtio_set_status(vdev, val);
2039 
2040     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2041         if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
2042             val & VIRTIO_CONFIG_S_FEATURES_OK) {
2043             int ret = virtio_validate_features(vdev);
2044 
2045             if (ret) {
2046                 return ret;
2047             }
2048         }
2049     }
2050 
2051     if ((vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) !=
2052         (val & VIRTIO_CONFIG_S_DRIVER_OK)) {
2053         virtio_set_started(vdev, val & VIRTIO_CONFIG_S_DRIVER_OK);
2054     }
2055 
2056     if (k->set_status) {
2057         k->set_status(vdev, val);
2058     }
2059     vdev->status = val;
2060 
2061     return 0;
2062 }
2063 
2064 static enum virtio_device_endian virtio_default_endian(void)
2065 {
2066     if (target_words_bigendian()) {
2067         return VIRTIO_DEVICE_ENDIAN_BIG;
2068     } else {
2069         return VIRTIO_DEVICE_ENDIAN_LITTLE;
2070     }
2071 }
2072 
2073 static enum virtio_device_endian virtio_current_cpu_endian(void)
2074 {
2075     if (cpu_virtio_is_big_endian(current_cpu)) {
2076         return VIRTIO_DEVICE_ENDIAN_BIG;
2077     } else {
2078         return VIRTIO_DEVICE_ENDIAN_LITTLE;
2079     }
2080 }
2081 
2082 static void __virtio_queue_reset(VirtIODevice *vdev, uint32_t i)
2083 {
2084     vdev->vq[i].vring.desc = 0;
2085     vdev->vq[i].vring.avail = 0;
2086     vdev->vq[i].vring.used = 0;
2087     vdev->vq[i].last_avail_idx = 0;
2088     vdev->vq[i].shadow_avail_idx = 0;
2089     vdev->vq[i].used_idx = 0;
2090     vdev->vq[i].last_avail_wrap_counter = true;
2091     vdev->vq[i].shadow_avail_wrap_counter = true;
2092     vdev->vq[i].used_wrap_counter = true;
2093     virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
2094     vdev->vq[i].signalled_used = 0;
2095     vdev->vq[i].signalled_used_valid = false;
2096     vdev->vq[i].notification = true;
2097     vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
2098     vdev->vq[i].inuse = 0;
2099     virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
2100 }
2101 
2102 void virtio_queue_reset(VirtIODevice *vdev, uint32_t queue_index)
2103 {
2104     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2105 
2106     if (k->queue_reset) {
2107         k->queue_reset(vdev, queue_index);
2108     }
2109 
2110     __virtio_queue_reset(vdev, queue_index);
2111 }
2112 
2113 void virtio_queue_enable(VirtIODevice *vdev, uint32_t queue_index)
2114 {
2115     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2116 
2117     /*
2118      * TODO: Seabios is currently out of spec and triggering this error.
2119      * So this needs to be fixed in Seabios, then this can
2120      * be re-enabled for new machine types only, and also after
2121      * being converted to LOG_GUEST_ERROR.
2122      *
2123     if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2124         error_report("queue_enable is only supported in devices of virtio "
2125                      "1.0 or later.");
2126     }
2127     */
2128 
2129     if (k->queue_enable) {
2130         k->queue_enable(vdev, queue_index);
2131     }
2132 }
2133 
2134 void virtio_reset(void *opaque)
2135 {
2136     VirtIODevice *vdev = opaque;
2137     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2138     int i;
2139 
2140     virtio_set_status(vdev, 0);
2141     if (current_cpu) {
2142         /* Guest initiated reset */
2143         vdev->device_endian = virtio_current_cpu_endian();
2144     } else {
2145         /* System reset */
2146         vdev->device_endian = virtio_default_endian();
2147     }
2148 
2149     if (vdev->vhost_started && k->get_vhost) {
2150         vhost_reset_device(k->get_vhost(vdev));
2151     }
2152 
2153     if (k->reset) {
2154         k->reset(vdev);
2155     }
2156 
2157     vdev->start_on_kick = false;
2158     vdev->started = false;
2159     vdev->broken = false;
2160     vdev->guest_features = 0;
2161     vdev->queue_sel = 0;
2162     vdev->status = 0;
2163     vdev->disabled = false;
2164     qatomic_set(&vdev->isr, 0);
2165     vdev->config_vector = VIRTIO_NO_VECTOR;
2166     virtio_notify_vector(vdev, vdev->config_vector);
2167 
2168     for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2169         __virtio_queue_reset(vdev, i);
2170     }
2171 }
2172 
2173 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
2174 {
2175     if (!vdev->vq[n].vring.num) {
2176         return;
2177     }
2178     vdev->vq[n].vring.desc = addr;
2179     virtio_queue_update_rings(vdev, n);
2180 }
2181 
2182 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
2183 {
2184     return vdev->vq[n].vring.desc;
2185 }
2186 
2187 void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
2188                             hwaddr avail, hwaddr used)
2189 {
2190     if (!vdev->vq[n].vring.num) {
2191         return;
2192     }
2193     vdev->vq[n].vring.desc = desc;
2194     vdev->vq[n].vring.avail = avail;
2195     vdev->vq[n].vring.used = used;
2196     virtio_init_region_cache(vdev, n);
2197 }
2198 
2199 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
2200 {
2201     /* Don't allow guest to flip queue between existent and
2202      * nonexistent states, or to set it to an invalid size.
2203      */
2204     if (!!num != !!vdev->vq[n].vring.num ||
2205         num > VIRTQUEUE_MAX_SIZE ||
2206         num < 0) {
2207         return;
2208     }
2209     vdev->vq[n].vring.num = num;
2210 }
2211 
2212 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
2213 {
2214     return QLIST_FIRST(&vdev->vector_queues[vector]);
2215 }
2216 
2217 VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
2218 {
2219     return QLIST_NEXT(vq, node);
2220 }
2221 
2222 int virtio_queue_get_num(VirtIODevice *vdev, int n)
2223 {
2224     return vdev->vq[n].vring.num;
2225 }
2226 
2227 int virtio_queue_get_max_num(VirtIODevice *vdev, int n)
2228 {
2229     return vdev->vq[n].vring.num_default;
2230 }
2231 
2232 int virtio_get_num_queues(VirtIODevice *vdev)
2233 {
2234     int i;
2235 
2236     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2237         if (!virtio_queue_get_num(vdev, i)) {
2238             break;
2239         }
2240     }
2241 
2242     return i;
2243 }
2244 
2245 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
2246 {
2247     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2248     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2249 
2250     /* virtio-1 compliant devices cannot change the alignment */
2251     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2252         error_report("tried to modify queue alignment for virtio-1 device");
2253         return;
2254     }
2255     /* Check that the transport told us it was going to do this
2256      * (so a buggy transport will immediately assert rather than
2257      * silently failing to migrate this state)
2258      */
2259     assert(k->has_variable_vring_alignment);
2260 
2261     if (align) {
2262         vdev->vq[n].vring.align = align;
2263         virtio_queue_update_rings(vdev, n);
2264     }
2265 }
2266 
2267 void virtio_queue_set_shadow_avail_idx(VirtQueue *vq, uint16_t shadow_avail_idx)
2268 {
2269     if (!vq->vring.desc) {
2270         return;
2271     }
2272 
2273     /*
2274      * 16-bit data for packed VQs include 1-bit wrap counter and
2275      * 15-bit shadow_avail_idx.
2276      */
2277     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
2278         vq->shadow_avail_wrap_counter = (shadow_avail_idx >> 15) & 0x1;
2279         vq->shadow_avail_idx = shadow_avail_idx & 0x7FFF;
2280     } else {
2281         vq->shadow_avail_idx = shadow_avail_idx;
2282     }
2283 }
2284 
2285 static void virtio_queue_notify_vq(VirtQueue *vq)
2286 {
2287     if (vq->vring.desc && vq->handle_output) {
2288         VirtIODevice *vdev = vq->vdev;
2289 
2290         if (unlikely(vdev->broken)) {
2291             return;
2292         }
2293 
2294         trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2295         vq->handle_output(vdev, vq);
2296 
2297         if (unlikely(vdev->start_on_kick)) {
2298             virtio_set_started(vdev, true);
2299         }
2300     }
2301 }
2302 
2303 void virtio_queue_notify(VirtIODevice *vdev, int n)
2304 {
2305     VirtQueue *vq = &vdev->vq[n];
2306 
2307     if (unlikely(!vq->vring.desc || vdev->broken)) {
2308         return;
2309     }
2310 
2311     trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2312     if (vq->host_notifier_enabled) {
2313         event_notifier_set(&vq->host_notifier);
2314     } else if (vq->handle_output) {
2315         vq->handle_output(vdev, vq);
2316 
2317         if (unlikely(vdev->start_on_kick)) {
2318             virtio_set_started(vdev, true);
2319         }
2320     }
2321 }
2322 
2323 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
2324 {
2325     return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
2326         VIRTIO_NO_VECTOR;
2327 }
2328 
2329 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
2330 {
2331     VirtQueue *vq = &vdev->vq[n];
2332 
2333     if (n < VIRTIO_QUEUE_MAX) {
2334         if (vdev->vector_queues &&
2335             vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
2336             QLIST_REMOVE(vq, node);
2337         }
2338         vdev->vq[n].vector = vector;
2339         if (vdev->vector_queues &&
2340             vector != VIRTIO_NO_VECTOR) {
2341             QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
2342         }
2343     }
2344 }
2345 
2346 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
2347                             VirtIOHandleOutput handle_output)
2348 {
2349     int i;
2350 
2351     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2352         if (vdev->vq[i].vring.num == 0)
2353             break;
2354     }
2355 
2356     if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
2357         abort();
2358 
2359     vdev->vq[i].vring.num = queue_size;
2360     vdev->vq[i].vring.num_default = queue_size;
2361     vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
2362     vdev->vq[i].handle_output = handle_output;
2363     vdev->vq[i].used_elems = g_new0(VirtQueueElement, queue_size);
2364 
2365     return &vdev->vq[i];
2366 }
2367 
2368 void virtio_delete_queue(VirtQueue *vq)
2369 {
2370     vq->vring.num = 0;
2371     vq->vring.num_default = 0;
2372     vq->handle_output = NULL;
2373     g_free(vq->used_elems);
2374     vq->used_elems = NULL;
2375     virtio_virtqueue_reset_region_cache(vq);
2376 }
2377 
2378 void virtio_del_queue(VirtIODevice *vdev, int n)
2379 {
2380     if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
2381         abort();
2382     }
2383 
2384     virtio_delete_queue(&vdev->vq[n]);
2385 }
2386 
2387 static void virtio_set_isr(VirtIODevice *vdev, int value)
2388 {
2389     uint8_t old = qatomic_read(&vdev->isr);
2390 
2391     /* Do not write ISR if it does not change, so that its cacheline remains
2392      * shared in the common case where the guest does not read it.
2393      */
2394     if ((old & value) != value) {
2395         qatomic_or(&vdev->isr, value);
2396     }
2397 }
2398 
2399 /* Called within rcu_read_lock(). */
2400 static bool virtio_split_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2401 {
2402     uint16_t old, new;
2403     bool v;
2404     /* We need to expose used array entries before checking used event. */
2405     smp_mb();
2406     /* Always notify when queue is empty (when feature acknowledge) */
2407     if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2408         !vq->inuse && virtio_queue_empty(vq)) {
2409         return true;
2410     }
2411 
2412     if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
2413         return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
2414     }
2415 
2416     v = vq->signalled_used_valid;
2417     vq->signalled_used_valid = true;
2418     old = vq->signalled_used;
2419     new = vq->signalled_used = vq->used_idx;
2420     return !v || vring_need_event(vring_get_used_event(vq), new, old);
2421 }
2422 
2423 static bool vring_packed_need_event(VirtQueue *vq, bool wrap,
2424                                     uint16_t off_wrap, uint16_t new,
2425                                     uint16_t old)
2426 {
2427     int off = off_wrap & ~(1 << 15);
2428 
2429     if (wrap != off_wrap >> 15) {
2430         off -= vq->vring.num;
2431     }
2432 
2433     return vring_need_event(off, new, old);
2434 }
2435 
2436 /* Called within rcu_read_lock(). */
2437 static bool virtio_packed_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2438 {
2439     VRingPackedDescEvent e;
2440     uint16_t old, new;
2441     bool v;
2442     VRingMemoryRegionCaches *caches;
2443 
2444     caches = vring_get_region_caches(vq);
2445     if (!caches) {
2446         return false;
2447     }
2448 
2449     vring_packed_event_read(vdev, &caches->avail, &e);
2450 
2451     old = vq->signalled_used;
2452     new = vq->signalled_used = vq->used_idx;
2453     v = vq->signalled_used_valid;
2454     vq->signalled_used_valid = true;
2455 
2456     if (e.flags == VRING_PACKED_EVENT_FLAG_DISABLE) {
2457         return false;
2458     } else if (e.flags == VRING_PACKED_EVENT_FLAG_ENABLE) {
2459         return true;
2460     }
2461 
2462     return !v || vring_packed_need_event(vq, vq->used_wrap_counter,
2463                                          e.off_wrap, new, old);
2464 }
2465 
2466 /* Called within rcu_read_lock().  */
2467 static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2468 {
2469     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
2470         return virtio_packed_should_notify(vdev, vq);
2471     } else {
2472         return virtio_split_should_notify(vdev, vq);
2473     }
2474 }
2475 
2476 /* Batch irqs while inside a defer_call_begin()/defer_call_end() section */
2477 static void virtio_notify_irqfd_deferred_fn(void *opaque)
2478 {
2479     EventNotifier *notifier = opaque;
2480     VirtQueue *vq = container_of(notifier, VirtQueue, guest_notifier);
2481 
2482     trace_virtio_notify_irqfd_deferred_fn(vq->vdev, vq);
2483     event_notifier_set(notifier);
2484 }
2485 
2486 void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
2487 {
2488     WITH_RCU_READ_LOCK_GUARD() {
2489         if (!virtio_should_notify(vdev, vq)) {
2490             return;
2491         }
2492     }
2493 
2494     trace_virtio_notify_irqfd(vdev, vq);
2495 
2496     /*
2497      * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
2498      * windows drivers included in virtio-win 1.8.0 (circa 2015) are
2499      * incorrectly polling this bit during crashdump and hibernation
2500      * in MSI mode, causing a hang if this bit is never updated.
2501      * Recent releases of Windows do not really shut down, but rather
2502      * log out and hibernate to make the next startup faster.  Hence,
2503      * this manifested as a more serious hang during shutdown with
2504      *
2505      * Next driver release from 2016 fixed this problem, so working around it
2506      * is not a must, but it's easy to do so let's do it here.
2507      *
2508      * Note: it's safe to update ISR from any thread as it was switched
2509      * to an atomic operation.
2510      */
2511     virtio_set_isr(vq->vdev, 0x1);
2512     defer_call(virtio_notify_irqfd_deferred_fn, &vq->guest_notifier);
2513 }
2514 
2515 static void virtio_irq(VirtQueue *vq)
2516 {
2517     virtio_set_isr(vq->vdev, 0x1);
2518     virtio_notify_vector(vq->vdev, vq->vector);
2519 }
2520 
2521 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
2522 {
2523     WITH_RCU_READ_LOCK_GUARD() {
2524         if (!virtio_should_notify(vdev, vq)) {
2525             return;
2526         }
2527     }
2528 
2529     trace_virtio_notify(vdev, vq);
2530     virtio_irq(vq);
2531 }
2532 
2533 void virtio_notify_config(VirtIODevice *vdev)
2534 {
2535     if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
2536         return;
2537 
2538     virtio_set_isr(vdev, 0x3);
2539     vdev->generation++;
2540     virtio_notify_vector(vdev, vdev->config_vector);
2541 }
2542 
2543 static bool virtio_device_endian_needed(void *opaque)
2544 {
2545     VirtIODevice *vdev = opaque;
2546 
2547     assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
2548     if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2549         return vdev->device_endian != virtio_default_endian();
2550     }
2551     /* Devices conforming to VIRTIO 1.0 or later are always LE. */
2552     return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
2553 }
2554 
2555 static bool virtio_64bit_features_needed(void *opaque)
2556 {
2557     VirtIODevice *vdev = opaque;
2558 
2559     return (vdev->host_features >> 32) != 0;
2560 }
2561 
2562 static bool virtio_virtqueue_needed(void *opaque)
2563 {
2564     VirtIODevice *vdev = opaque;
2565 
2566     return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
2567 }
2568 
2569 static bool virtio_packed_virtqueue_needed(void *opaque)
2570 {
2571     VirtIODevice *vdev = opaque;
2572 
2573     return virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED);
2574 }
2575 
2576 static bool virtio_ringsize_needed(void *opaque)
2577 {
2578     VirtIODevice *vdev = opaque;
2579     int i;
2580 
2581     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2582         if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
2583             return true;
2584         }
2585     }
2586     return false;
2587 }
2588 
2589 static bool virtio_extra_state_needed(void *opaque)
2590 {
2591     VirtIODevice *vdev = opaque;
2592     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2593     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2594 
2595     return k->has_extra_state &&
2596         k->has_extra_state(qbus->parent);
2597 }
2598 
2599 static bool virtio_broken_needed(void *opaque)
2600 {
2601     VirtIODevice *vdev = opaque;
2602 
2603     return vdev->broken;
2604 }
2605 
2606 static bool virtio_started_needed(void *opaque)
2607 {
2608     VirtIODevice *vdev = opaque;
2609 
2610     return vdev->started;
2611 }
2612 
2613 static bool virtio_disabled_needed(void *opaque)
2614 {
2615     VirtIODevice *vdev = opaque;
2616 
2617     return vdev->disabled;
2618 }
2619 
2620 static const VMStateDescription vmstate_virtqueue = {
2621     .name = "virtqueue_state",
2622     .version_id = 1,
2623     .minimum_version_id = 1,
2624     .fields = (const VMStateField[]) {
2625         VMSTATE_UINT64(vring.avail, struct VirtQueue),
2626         VMSTATE_UINT64(vring.used, struct VirtQueue),
2627         VMSTATE_END_OF_LIST()
2628     }
2629 };
2630 
2631 static const VMStateDescription vmstate_packed_virtqueue = {
2632     .name = "packed_virtqueue_state",
2633     .version_id = 1,
2634     .minimum_version_id = 1,
2635     .fields = (const VMStateField[]) {
2636         VMSTATE_UINT16(last_avail_idx, struct VirtQueue),
2637         VMSTATE_BOOL(last_avail_wrap_counter, struct VirtQueue),
2638         VMSTATE_UINT16(used_idx, struct VirtQueue),
2639         VMSTATE_BOOL(used_wrap_counter, struct VirtQueue),
2640         VMSTATE_UINT32(inuse, struct VirtQueue),
2641         VMSTATE_END_OF_LIST()
2642     }
2643 };
2644 
2645 static const VMStateDescription vmstate_virtio_virtqueues = {
2646     .name = "virtio/virtqueues",
2647     .version_id = 1,
2648     .minimum_version_id = 1,
2649     .needed = &virtio_virtqueue_needed,
2650     .fields = (const VMStateField[]) {
2651         VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2652                       VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
2653         VMSTATE_END_OF_LIST()
2654     }
2655 };
2656 
2657 static const VMStateDescription vmstate_virtio_packed_virtqueues = {
2658     .name = "virtio/packed_virtqueues",
2659     .version_id = 1,
2660     .minimum_version_id = 1,
2661     .needed = &virtio_packed_virtqueue_needed,
2662     .fields = (const VMStateField[]) {
2663         VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2664                       VIRTIO_QUEUE_MAX, 0, vmstate_packed_virtqueue, VirtQueue),
2665         VMSTATE_END_OF_LIST()
2666     }
2667 };
2668 
2669 static const VMStateDescription vmstate_ringsize = {
2670     .name = "ringsize_state",
2671     .version_id = 1,
2672     .minimum_version_id = 1,
2673     .fields = (const VMStateField[]) {
2674         VMSTATE_UINT32(vring.num_default, struct VirtQueue),
2675         VMSTATE_END_OF_LIST()
2676     }
2677 };
2678 
2679 static const VMStateDescription vmstate_virtio_ringsize = {
2680     .name = "virtio/ringsize",
2681     .version_id = 1,
2682     .minimum_version_id = 1,
2683     .needed = &virtio_ringsize_needed,
2684     .fields = (const VMStateField[]) {
2685         VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2686                       VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
2687         VMSTATE_END_OF_LIST()
2688     }
2689 };
2690 
2691 static int get_extra_state(QEMUFile *f, void *pv, size_t size,
2692                            const VMStateField *field)
2693 {
2694     VirtIODevice *vdev = pv;
2695     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2696     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2697 
2698     if (!k->load_extra_state) {
2699         return -1;
2700     } else {
2701         return k->load_extra_state(qbus->parent, f);
2702     }
2703 }
2704 
2705 static int put_extra_state(QEMUFile *f, void *pv, size_t size,
2706                            const VMStateField *field, JSONWriter *vmdesc)
2707 {
2708     VirtIODevice *vdev = pv;
2709     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2710     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2711 
2712     k->save_extra_state(qbus->parent, f);
2713     return 0;
2714 }
2715 
2716 static const VMStateInfo vmstate_info_extra_state = {
2717     .name = "virtqueue_extra_state",
2718     .get = get_extra_state,
2719     .put = put_extra_state,
2720 };
2721 
2722 static const VMStateDescription vmstate_virtio_extra_state = {
2723     .name = "virtio/extra_state",
2724     .version_id = 1,
2725     .minimum_version_id = 1,
2726     .needed = &virtio_extra_state_needed,
2727     .fields = (const VMStateField[]) {
2728         {
2729             .name         = "extra_state",
2730             .version_id   = 0,
2731             .field_exists = NULL,
2732             .size         = 0,
2733             .info         = &vmstate_info_extra_state,
2734             .flags        = VMS_SINGLE,
2735             .offset       = 0,
2736         },
2737         VMSTATE_END_OF_LIST()
2738     }
2739 };
2740 
2741 static const VMStateDescription vmstate_virtio_device_endian = {
2742     .name = "virtio/device_endian",
2743     .version_id = 1,
2744     .minimum_version_id = 1,
2745     .needed = &virtio_device_endian_needed,
2746     .fields = (const VMStateField[]) {
2747         VMSTATE_UINT8(device_endian, VirtIODevice),
2748         VMSTATE_END_OF_LIST()
2749     }
2750 };
2751 
2752 static const VMStateDescription vmstate_virtio_64bit_features = {
2753     .name = "virtio/64bit_features",
2754     .version_id = 1,
2755     .minimum_version_id = 1,
2756     .needed = &virtio_64bit_features_needed,
2757     .fields = (const VMStateField[]) {
2758         VMSTATE_UINT64(guest_features, VirtIODevice),
2759         VMSTATE_END_OF_LIST()
2760     }
2761 };
2762 
2763 static const VMStateDescription vmstate_virtio_broken = {
2764     .name = "virtio/broken",
2765     .version_id = 1,
2766     .minimum_version_id = 1,
2767     .needed = &virtio_broken_needed,
2768     .fields = (const VMStateField[]) {
2769         VMSTATE_BOOL(broken, VirtIODevice),
2770         VMSTATE_END_OF_LIST()
2771     }
2772 };
2773 
2774 static const VMStateDescription vmstate_virtio_started = {
2775     .name = "virtio/started",
2776     .version_id = 1,
2777     .minimum_version_id = 1,
2778     .needed = &virtio_started_needed,
2779     .fields = (const VMStateField[]) {
2780         VMSTATE_BOOL(started, VirtIODevice),
2781         VMSTATE_END_OF_LIST()
2782     }
2783 };
2784 
2785 static const VMStateDescription vmstate_virtio_disabled = {
2786     .name = "virtio/disabled",
2787     .version_id = 1,
2788     .minimum_version_id = 1,
2789     .needed = &virtio_disabled_needed,
2790     .fields = (const VMStateField[]) {
2791         VMSTATE_BOOL(disabled, VirtIODevice),
2792         VMSTATE_END_OF_LIST()
2793     }
2794 };
2795 
2796 static const VMStateDescription vmstate_virtio = {
2797     .name = "virtio",
2798     .version_id = 1,
2799     .minimum_version_id = 1,
2800     .fields = (const VMStateField[]) {
2801         VMSTATE_END_OF_LIST()
2802     },
2803     .subsections = (const VMStateDescription * const []) {
2804         &vmstate_virtio_device_endian,
2805         &vmstate_virtio_64bit_features,
2806         &vmstate_virtio_virtqueues,
2807         &vmstate_virtio_ringsize,
2808         &vmstate_virtio_broken,
2809         &vmstate_virtio_extra_state,
2810         &vmstate_virtio_started,
2811         &vmstate_virtio_packed_virtqueues,
2812         &vmstate_virtio_disabled,
2813         NULL
2814     }
2815 };
2816 
2817 int virtio_save(VirtIODevice *vdev, QEMUFile *f)
2818 {
2819     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2820     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2821     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
2822     uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
2823     int i;
2824 
2825     if (k->save_config) {
2826         k->save_config(qbus->parent, f);
2827     }
2828 
2829     qemu_put_8s(f, &vdev->status);
2830     qemu_put_8s(f, &vdev->isr);
2831     qemu_put_be16s(f, &vdev->queue_sel);
2832     qemu_put_be32s(f, &guest_features_lo);
2833     qemu_put_be32(f, vdev->config_len);
2834     qemu_put_buffer(f, vdev->config, vdev->config_len);
2835 
2836     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2837         if (vdev->vq[i].vring.num == 0)
2838             break;
2839     }
2840 
2841     qemu_put_be32(f, i);
2842 
2843     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2844         if (vdev->vq[i].vring.num == 0)
2845             break;
2846 
2847         qemu_put_be32(f, vdev->vq[i].vring.num);
2848         if (k->has_variable_vring_alignment) {
2849             qemu_put_be32(f, vdev->vq[i].vring.align);
2850         }
2851         /*
2852          * Save desc now, the rest of the ring addresses are saved in
2853          * subsections for VIRTIO-1 devices.
2854          */
2855         qemu_put_be64(f, vdev->vq[i].vring.desc);
2856         qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
2857         if (k->save_queue) {
2858             k->save_queue(qbus->parent, i, f);
2859         }
2860     }
2861 
2862     if (vdc->save != NULL) {
2863         vdc->save(vdev, f);
2864     }
2865 
2866     if (vdc->vmsd) {
2867         int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL);
2868         if (ret) {
2869             return ret;
2870         }
2871     }
2872 
2873     /* Subsections */
2874     return vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
2875 }
2876 
2877 /* A wrapper for use as a VMState .put function */
2878 static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
2879                               const VMStateField *field, JSONWriter *vmdesc)
2880 {
2881     return virtio_save(VIRTIO_DEVICE(opaque), f);
2882 }
2883 
2884 /* A wrapper for use as a VMState .get function */
2885 static int coroutine_mixed_fn
2886 virtio_device_get(QEMUFile *f, void *opaque, size_t size,
2887                   const VMStateField *field)
2888 {
2889     VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
2890     DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
2891 
2892     return virtio_load(vdev, f, dc->vmsd->version_id);
2893 }
2894 
2895 const VMStateInfo  virtio_vmstate_info = {
2896     .name = "virtio",
2897     .get = virtio_device_get,
2898     .put = virtio_device_put,
2899 };
2900 
2901 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
2902 {
2903     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2904     bool bad = (val & ~(vdev->host_features)) != 0;
2905 
2906     val &= vdev->host_features;
2907     if (k->set_features) {
2908         k->set_features(vdev, val);
2909     }
2910     vdev->guest_features = val;
2911     return bad ? -1 : 0;
2912 }
2913 
2914 typedef struct VirtioSetFeaturesNocheckData {
2915     Coroutine *co;
2916     VirtIODevice *vdev;
2917     uint64_t val;
2918     int ret;
2919 } VirtioSetFeaturesNocheckData;
2920 
2921 static void virtio_set_features_nocheck_bh(void *opaque)
2922 {
2923     VirtioSetFeaturesNocheckData *data = opaque;
2924 
2925     data->ret = virtio_set_features_nocheck(data->vdev, data->val);
2926     aio_co_wake(data->co);
2927 }
2928 
2929 static int coroutine_mixed_fn
2930 virtio_set_features_nocheck_maybe_co(VirtIODevice *vdev, uint64_t val)
2931 {
2932     if (qemu_in_coroutine()) {
2933         VirtioSetFeaturesNocheckData data = {
2934             .co = qemu_coroutine_self(),
2935             .vdev = vdev,
2936             .val = val,
2937         };
2938         aio_bh_schedule_oneshot(qemu_get_current_aio_context(),
2939                                 virtio_set_features_nocheck_bh, &data);
2940         qemu_coroutine_yield();
2941         return data.ret;
2942     } else {
2943         return virtio_set_features_nocheck(vdev, val);
2944     }
2945 }
2946 
2947 int virtio_set_features(VirtIODevice *vdev, uint64_t val)
2948 {
2949     int ret;
2950     /*
2951      * The driver must not attempt to set features after feature negotiation
2952      * has finished.
2953      */
2954     if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
2955         return -EINVAL;
2956     }
2957 
2958     if (val & (1ull << VIRTIO_F_BAD_FEATURE)) {
2959         qemu_log_mask(LOG_GUEST_ERROR,
2960                       "%s: guest driver for %s has enabled UNUSED(30) feature bit!\n",
2961                       __func__, vdev->name);
2962     }
2963 
2964     ret = virtio_set_features_nocheck(vdev, val);
2965     if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
2966         /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches.  */
2967         int i;
2968         for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2969             if (vdev->vq[i].vring.num != 0) {
2970                 virtio_init_region_cache(vdev, i);
2971             }
2972         }
2973     }
2974     if (!ret) {
2975         if (!virtio_device_started(vdev, vdev->status) &&
2976             !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2977             vdev->start_on_kick = true;
2978         }
2979     }
2980     return ret;
2981 }
2982 
2983 static void virtio_device_check_notification_compatibility(VirtIODevice *vdev,
2984                                                            Error **errp)
2985 {
2986     VirtioBusState *bus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
2987     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
2988     DeviceState *proxy = DEVICE(BUS(bus)->parent);
2989 
2990     if (virtio_host_has_feature(vdev, VIRTIO_F_NOTIFICATION_DATA) &&
2991         k->ioeventfd_enabled(proxy)) {
2992         error_setg(errp,
2993                    "notification_data=on without ioeventfd=off is not supported");
2994     }
2995 }
2996 
2997 size_t virtio_get_config_size(const VirtIOConfigSizeParams *params,
2998                               uint64_t host_features)
2999 {
3000     size_t config_size = params->min_size;
3001     const VirtIOFeature *feature_sizes = params->feature_sizes;
3002     size_t i;
3003 
3004     for (i = 0; feature_sizes[i].flags != 0; i++) {
3005         if (host_features & feature_sizes[i].flags) {
3006             config_size = MAX(feature_sizes[i].end, config_size);
3007         }
3008     }
3009 
3010     assert(config_size <= params->max_size);
3011     return config_size;
3012 }
3013 
3014 int coroutine_mixed_fn
3015 virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
3016 {
3017     int i, ret;
3018     int32_t config_len;
3019     uint32_t num;
3020     uint32_t features;
3021     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3022     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3023     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
3024 
3025     /*
3026      * We poison the endianness to ensure it does not get used before
3027      * subsections have been loaded.
3028      */
3029     vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
3030 
3031     if (k->load_config) {
3032         ret = k->load_config(qbus->parent, f);
3033         if (ret)
3034             return ret;
3035     }
3036 
3037     qemu_get_8s(f, &vdev->status);
3038     qemu_get_8s(f, &vdev->isr);
3039     qemu_get_be16s(f, &vdev->queue_sel);
3040     if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
3041         return -1;
3042     }
3043     qemu_get_be32s(f, &features);
3044 
3045     /*
3046      * Temporarily set guest_features low bits - needed by
3047      * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
3048      * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
3049      *
3050      * Note: devices should always test host features in future - don't create
3051      * new dependencies like this.
3052      */
3053     vdev->guest_features = features;
3054 
3055     config_len = qemu_get_be32(f);
3056 
3057     /*
3058      * There are cases where the incoming config can be bigger or smaller
3059      * than what we have; so load what we have space for, and skip
3060      * any excess that's in the stream.
3061      */
3062     qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
3063 
3064     while (config_len > vdev->config_len) {
3065         qemu_get_byte(f);
3066         config_len--;
3067     }
3068 
3069     num = qemu_get_be32(f);
3070 
3071     if (num > VIRTIO_QUEUE_MAX) {
3072         error_report("Invalid number of virtqueues: 0x%x", num);
3073         return -1;
3074     }
3075 
3076     for (i = 0; i < num; i++) {
3077         vdev->vq[i].vring.num = qemu_get_be32(f);
3078         if (k->has_variable_vring_alignment) {
3079             vdev->vq[i].vring.align = qemu_get_be32(f);
3080         }
3081         vdev->vq[i].vring.desc = qemu_get_be64(f);
3082         qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
3083         vdev->vq[i].signalled_used_valid = false;
3084         vdev->vq[i].notification = true;
3085 
3086         if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) {
3087             error_report("VQ %d address 0x0 "
3088                          "inconsistent with Host index 0x%x",
3089                          i, vdev->vq[i].last_avail_idx);
3090             return -1;
3091         }
3092         if (k->load_queue) {
3093             ret = k->load_queue(qbus->parent, i, f);
3094             if (ret)
3095                 return ret;
3096         }
3097     }
3098 
3099     virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
3100 
3101     if (vdc->load != NULL) {
3102         ret = vdc->load(vdev, f, version_id);
3103         if (ret) {
3104             return ret;
3105         }
3106     }
3107 
3108     if (vdc->vmsd) {
3109         ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
3110         if (ret) {
3111             return ret;
3112         }
3113     }
3114 
3115     /* Subsections */
3116     ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
3117     if (ret) {
3118         return ret;
3119     }
3120 
3121     if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
3122         vdev->device_endian = virtio_default_endian();
3123     }
3124 
3125     if (virtio_64bit_features_needed(vdev)) {
3126         /*
3127          * Subsection load filled vdev->guest_features.  Run them
3128          * through virtio_set_features to sanity-check them against
3129          * host_features.
3130          */
3131         uint64_t features64 = vdev->guest_features;
3132         if (virtio_set_features_nocheck_maybe_co(vdev, features64) < 0) {
3133             error_report("Features 0x%" PRIx64 " unsupported. "
3134                          "Allowed features: 0x%" PRIx64,
3135                          features64, vdev->host_features);
3136             return -1;
3137         }
3138     } else {
3139         if (virtio_set_features_nocheck_maybe_co(vdev, features) < 0) {
3140             error_report("Features 0x%x unsupported. "
3141                          "Allowed features: 0x%" PRIx64,
3142                          features, vdev->host_features);
3143             return -1;
3144         }
3145     }
3146 
3147     if (!virtio_device_started(vdev, vdev->status) &&
3148         !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3149         vdev->start_on_kick = true;
3150     }
3151 
3152     RCU_READ_LOCK_GUARD();
3153     for (i = 0; i < num; i++) {
3154         if (vdev->vq[i].vring.desc) {
3155             uint16_t nheads;
3156 
3157             /*
3158              * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
3159              * only the region cache needs to be set up.  Legacy devices need
3160              * to calculate used and avail ring addresses based on the desc
3161              * address.
3162              */
3163             if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3164                 virtio_init_region_cache(vdev, i);
3165             } else {
3166                 virtio_queue_update_rings(vdev, i);
3167             }
3168 
3169             if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3170                 vdev->vq[i].shadow_avail_idx = vdev->vq[i].last_avail_idx;
3171                 vdev->vq[i].shadow_avail_wrap_counter =
3172                                         vdev->vq[i].last_avail_wrap_counter;
3173                 continue;
3174             }
3175 
3176             nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
3177             /* Check it isn't doing strange things with descriptor numbers. */
3178             if (nheads > vdev->vq[i].vring.num) {
3179                 virtio_error(vdev, "VQ %d size 0x%x Guest index 0x%x "
3180                              "inconsistent with Host index 0x%x: delta 0x%x",
3181                              i, vdev->vq[i].vring.num,
3182                              vring_avail_idx(&vdev->vq[i]),
3183                              vdev->vq[i].last_avail_idx, nheads);
3184                 vdev->vq[i].used_idx = 0;
3185                 vdev->vq[i].shadow_avail_idx = 0;
3186                 vdev->vq[i].inuse = 0;
3187                 continue;
3188             }
3189             vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
3190             vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
3191 
3192             /*
3193              * Some devices migrate VirtQueueElements that have been popped
3194              * from the avail ring but not yet returned to the used ring.
3195              * Since max ring size < UINT16_MAX it's safe to use modulo
3196              * UINT16_MAX + 1 subtraction.
3197              */
3198             vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
3199                                 vdev->vq[i].used_idx);
3200             if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
3201                 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
3202                              "used_idx 0x%x",
3203                              i, vdev->vq[i].vring.num,
3204                              vdev->vq[i].last_avail_idx,
3205                              vdev->vq[i].used_idx);
3206                 return -1;
3207             }
3208         }
3209     }
3210 
3211     if (vdc->post_load) {
3212         ret = vdc->post_load(vdev);
3213         if (ret) {
3214             return ret;
3215         }
3216     }
3217 
3218     return 0;
3219 }
3220 
3221 void virtio_cleanup(VirtIODevice *vdev)
3222 {
3223     qemu_del_vm_change_state_handler(vdev->vmstate);
3224 }
3225 
3226 static void virtio_vmstate_change(void *opaque, bool running, RunState state)
3227 {
3228     VirtIODevice *vdev = opaque;
3229     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3230     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3231     bool backend_run = running && virtio_device_started(vdev, vdev->status);
3232     vdev->vm_running = running;
3233 
3234     if (backend_run) {
3235         virtio_set_status(vdev, vdev->status);
3236     }
3237 
3238     if (k->vmstate_change) {
3239         k->vmstate_change(qbus->parent, backend_run);
3240     }
3241 
3242     if (!backend_run) {
3243         virtio_set_status(vdev, vdev->status);
3244     }
3245 }
3246 
3247 void virtio_instance_init_common(Object *proxy_obj, void *data,
3248                                  size_t vdev_size, const char *vdev_name)
3249 {
3250     DeviceState *vdev = data;
3251 
3252     object_initialize_child_with_props(proxy_obj, "virtio-backend", vdev,
3253                                        vdev_size, vdev_name, &error_abort,
3254                                        NULL);
3255     qdev_alias_all_properties(vdev, proxy_obj);
3256 }
3257 
3258 void virtio_init(VirtIODevice *vdev, uint16_t device_id, size_t config_size)
3259 {
3260     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3261     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3262     int i;
3263     int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
3264 
3265     if (nvectors) {
3266         vdev->vector_queues =
3267             g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
3268     }
3269 
3270     vdev->start_on_kick = false;
3271     vdev->started = false;
3272     vdev->vhost_started = false;
3273     vdev->device_id = device_id;
3274     vdev->status = 0;
3275     qatomic_set(&vdev->isr, 0);
3276     vdev->queue_sel = 0;
3277     vdev->config_vector = VIRTIO_NO_VECTOR;
3278     vdev->vq = g_new0(VirtQueue, VIRTIO_QUEUE_MAX);
3279     vdev->vm_running = runstate_is_running();
3280     vdev->broken = false;
3281     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3282         vdev->vq[i].vector = VIRTIO_NO_VECTOR;
3283         vdev->vq[i].vdev = vdev;
3284         vdev->vq[i].queue_index = i;
3285         vdev->vq[i].host_notifier_enabled = false;
3286     }
3287 
3288     vdev->name = virtio_id_to_name(device_id);
3289     vdev->config_len = config_size;
3290     if (vdev->config_len) {
3291         vdev->config = g_malloc0(config_size);
3292     } else {
3293         vdev->config = NULL;
3294     }
3295     vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev),
3296             virtio_vmstate_change, vdev);
3297     vdev->device_endian = virtio_default_endian();
3298     vdev->use_guest_notifier_mask = true;
3299 }
3300 
3301 /*
3302  * Only devices that have already been around prior to defining the virtio
3303  * standard support legacy mode; this includes devices not specified in the
3304  * standard. All newer devices conform to the virtio standard only.
3305  */
3306 bool virtio_legacy_allowed(VirtIODevice *vdev)
3307 {
3308     switch (vdev->device_id) {
3309     case VIRTIO_ID_NET:
3310     case VIRTIO_ID_BLOCK:
3311     case VIRTIO_ID_CONSOLE:
3312     case VIRTIO_ID_RNG:
3313     case VIRTIO_ID_BALLOON:
3314     case VIRTIO_ID_RPMSG:
3315     case VIRTIO_ID_SCSI:
3316     case VIRTIO_ID_9P:
3317     case VIRTIO_ID_RPROC_SERIAL:
3318     case VIRTIO_ID_CAIF:
3319         return true;
3320     default:
3321         return false;
3322     }
3323 }
3324 
3325 bool virtio_legacy_check_disabled(VirtIODevice *vdev)
3326 {
3327     return vdev->disable_legacy_check;
3328 }
3329 
3330 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
3331 {
3332     return vdev->vq[n].vring.desc;
3333 }
3334 
3335 bool virtio_queue_enabled_legacy(VirtIODevice *vdev, int n)
3336 {
3337     return virtio_queue_get_desc_addr(vdev, n) != 0;
3338 }
3339 
3340 bool virtio_queue_enabled(VirtIODevice *vdev, int n)
3341 {
3342     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3343     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3344 
3345     if (k->queue_enabled) {
3346         return k->queue_enabled(qbus->parent, n);
3347     }
3348     return virtio_queue_enabled_legacy(vdev, n);
3349 }
3350 
3351 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
3352 {
3353     return vdev->vq[n].vring.avail;
3354 }
3355 
3356 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
3357 {
3358     return vdev->vq[n].vring.used;
3359 }
3360 
3361 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
3362 {
3363     return sizeof(VRingDesc) * vdev->vq[n].vring.num;
3364 }
3365 
3366 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
3367 {
3368     int s;
3369 
3370     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3371         return sizeof(struct VRingPackedDescEvent);
3372     }
3373 
3374     s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
3375     return offsetof(VRingAvail, ring) +
3376         sizeof(uint16_t) * vdev->vq[n].vring.num + s;
3377 }
3378 
3379 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
3380 {
3381     int s;
3382 
3383     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3384         return sizeof(struct VRingPackedDescEvent);
3385     }
3386 
3387     s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
3388     return offsetof(VRingUsed, ring) +
3389         sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s;
3390 }
3391 
3392 static unsigned int virtio_queue_packed_get_last_avail_idx(VirtIODevice *vdev,
3393                                                            int n)
3394 {
3395     unsigned int avail, used;
3396 
3397     avail = vdev->vq[n].last_avail_idx;
3398     avail |= ((uint16_t)vdev->vq[n].last_avail_wrap_counter) << 15;
3399 
3400     used = vdev->vq[n].used_idx;
3401     used |= ((uint16_t)vdev->vq[n].used_wrap_counter) << 15;
3402 
3403     return avail | used << 16;
3404 }
3405 
3406 static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice *vdev,
3407                                                       int n)
3408 {
3409     return vdev->vq[n].last_avail_idx;
3410 }
3411 
3412 unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
3413 {
3414     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3415         return virtio_queue_packed_get_last_avail_idx(vdev, n);
3416     } else {
3417         return virtio_queue_split_get_last_avail_idx(vdev, n);
3418     }
3419 }
3420 
3421 static void virtio_queue_packed_set_last_avail_idx(VirtIODevice *vdev,
3422                                                    int n, unsigned int idx)
3423 {
3424     struct VirtQueue *vq = &vdev->vq[n];
3425 
3426     vq->last_avail_idx = vq->shadow_avail_idx = idx & 0x7fff;
3427     vq->last_avail_wrap_counter =
3428         vq->shadow_avail_wrap_counter = !!(idx & 0x8000);
3429     idx >>= 16;
3430     vq->used_idx = idx & 0x7fff;
3431     vq->used_wrap_counter = !!(idx & 0x8000);
3432 }
3433 
3434 static void virtio_queue_split_set_last_avail_idx(VirtIODevice *vdev,
3435                                                   int n, unsigned int idx)
3436 {
3437         vdev->vq[n].last_avail_idx = idx;
3438         vdev->vq[n].shadow_avail_idx = idx;
3439 }
3440 
3441 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n,
3442                                      unsigned int idx)
3443 {
3444     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3445         virtio_queue_packed_set_last_avail_idx(vdev, n, idx);
3446     } else {
3447         virtio_queue_split_set_last_avail_idx(vdev, n, idx);
3448     }
3449 }
3450 
3451 static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice *vdev,
3452                                                        int n)
3453 {
3454     /* We don't have a reference like avail idx in shared memory */
3455     return;
3456 }
3457 
3458 static void virtio_queue_split_restore_last_avail_idx(VirtIODevice *vdev,
3459                                                       int n)
3460 {
3461     RCU_READ_LOCK_GUARD();
3462     if (vdev->vq[n].vring.desc) {
3463         vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
3464         vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx;
3465     }
3466 }
3467 
3468 void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
3469 {
3470     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3471         virtio_queue_packed_restore_last_avail_idx(vdev, n);
3472     } else {
3473         virtio_queue_split_restore_last_avail_idx(vdev, n);
3474     }
3475 }
3476 
3477 static void virtio_queue_packed_update_used_idx(VirtIODevice *vdev, int n)
3478 {
3479     /* used idx was updated through set_last_avail_idx() */
3480     return;
3481 }
3482 
3483 static void virtio_split_packed_update_used_idx(VirtIODevice *vdev, int n)
3484 {
3485     RCU_READ_LOCK_GUARD();
3486     if (vdev->vq[n].vring.desc) {
3487         vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
3488     }
3489 }
3490 
3491 void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
3492 {
3493     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3494         return virtio_queue_packed_update_used_idx(vdev, n);
3495     } else {
3496         return virtio_split_packed_update_used_idx(vdev, n);
3497     }
3498 }
3499 
3500 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
3501 {
3502     vdev->vq[n].signalled_used_valid = false;
3503 }
3504 
3505 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
3506 {
3507     return vdev->vq + n;
3508 }
3509 
3510 uint16_t virtio_get_queue_index(VirtQueue *vq)
3511 {
3512     return vq->queue_index;
3513 }
3514 
3515 static void virtio_queue_guest_notifier_read(EventNotifier *n)
3516 {
3517     VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
3518     if (event_notifier_test_and_clear(n)) {
3519         virtio_irq(vq);
3520     }
3521 }
3522 static void virtio_config_guest_notifier_read(EventNotifier *n)
3523 {
3524     VirtIODevice *vdev = container_of(n, VirtIODevice, config_notifier);
3525 
3526     if (event_notifier_test_and_clear(n)) {
3527         virtio_notify_config(vdev);
3528     }
3529 }
3530 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
3531                                                 bool with_irqfd)
3532 {
3533     if (assign && !with_irqfd) {
3534         event_notifier_set_handler(&vq->guest_notifier,
3535                                    virtio_queue_guest_notifier_read);
3536     } else {
3537         event_notifier_set_handler(&vq->guest_notifier, NULL);
3538     }
3539     if (!assign) {
3540         /* Test and clear notifier before closing it,
3541          * in case poll callback didn't have time to run. */
3542         virtio_queue_guest_notifier_read(&vq->guest_notifier);
3543     }
3544 }
3545 
3546 void virtio_config_set_guest_notifier_fd_handler(VirtIODevice *vdev,
3547                                                  bool assign, bool with_irqfd)
3548 {
3549     EventNotifier *n;
3550     n = &vdev->config_notifier;
3551     if (assign && !with_irqfd) {
3552         event_notifier_set_handler(n, virtio_config_guest_notifier_read);
3553     } else {
3554         event_notifier_set_handler(n, NULL);
3555     }
3556     if (!assign) {
3557         /* Test and clear notifier before closing it,*/
3558         /* in case poll callback didn't have time to run. */
3559         virtio_config_guest_notifier_read(n);
3560     }
3561 }
3562 
3563 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
3564 {
3565     return &vq->guest_notifier;
3566 }
3567 
3568 static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
3569 {
3570     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3571 
3572     virtio_queue_set_notification(vq, 0);
3573 }
3574 
3575 static bool virtio_queue_host_notifier_aio_poll(void *opaque)
3576 {
3577     EventNotifier *n = opaque;
3578     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3579 
3580     return vq->vring.desc && !virtio_queue_empty(vq);
3581 }
3582 
3583 static void virtio_queue_host_notifier_aio_poll_ready(EventNotifier *n)
3584 {
3585     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3586 
3587     virtio_queue_notify_vq(vq);
3588 }
3589 
3590 static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
3591 {
3592     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3593 
3594     /* Caller polls once more after this to catch requests that race with us */
3595     virtio_queue_set_notification(vq, 1);
3596 }
3597 
3598 void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx)
3599 {
3600     /*
3601      * virtio_queue_aio_detach_host_notifier() can leave notifications disabled.
3602      * Re-enable them.  (And if detach has not been used before, notifications
3603      * being enabled is still the default state while a notifier is attached;
3604      * see virtio_queue_host_notifier_aio_poll_end(), which will always leave
3605      * notifications enabled once the polling section is left.)
3606      */
3607     if (!virtio_queue_get_notification(vq)) {
3608         virtio_queue_set_notification(vq, 1);
3609     }
3610 
3611     aio_set_event_notifier(ctx, &vq->host_notifier,
3612                            virtio_queue_host_notifier_read,
3613                            virtio_queue_host_notifier_aio_poll,
3614                            virtio_queue_host_notifier_aio_poll_ready);
3615     aio_set_event_notifier_poll(ctx, &vq->host_notifier,
3616                                 virtio_queue_host_notifier_aio_poll_begin,
3617                                 virtio_queue_host_notifier_aio_poll_end);
3618 
3619     /*
3620      * We will have ignored notifications about new requests from the guest
3621      * while no notifiers were attached, so "kick" the virt queue to process
3622      * those requests now.
3623      */
3624     event_notifier_set(&vq->host_notifier);
3625 }
3626 
3627 /*
3628  * Same as virtio_queue_aio_attach_host_notifier() but without polling. Use
3629  * this for rx virtqueues and similar cases where the virtqueue handler
3630  * function does not pop all elements. When the virtqueue is left non-empty
3631  * polling consumes CPU cycles and should not be used.
3632  */
3633 void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx)
3634 {
3635     /* See virtio_queue_aio_attach_host_notifier() */
3636     if (!virtio_queue_get_notification(vq)) {
3637         virtio_queue_set_notification(vq, 1);
3638     }
3639 
3640     aio_set_event_notifier(ctx, &vq->host_notifier,
3641                            virtio_queue_host_notifier_read,
3642                            NULL, NULL);
3643 
3644     /*
3645      * See virtio_queue_aio_attach_host_notifier().
3646      * Note that this may be unnecessary for the type of virtqueues this
3647      * function is used for.  Still, it will not hurt to have a quick look into
3648      * whether we can/should process any of the virtqueue elements.
3649      */
3650     event_notifier_set(&vq->host_notifier);
3651 }
3652 
3653 void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx)
3654 {
3655     aio_set_event_notifier(ctx, &vq->host_notifier, NULL, NULL, NULL);
3656 
3657     /*
3658      * aio_set_event_notifier_poll() does not guarantee whether io_poll_end()
3659      * will run after io_poll_begin(), so by removing the notifier, we do not
3660      * know whether virtio_queue_host_notifier_aio_poll_end() has run after a
3661      * previous virtio_queue_host_notifier_aio_poll_begin(), i.e. whether
3662      * notifications are enabled or disabled.  It does not really matter anyway;
3663      * we just removed the notifier, so we do not care about notifications until
3664      * we potentially re-attach it.  The attach_host_notifier functions will
3665      * ensure that notifications are enabled again when they are needed.
3666      */
3667 }
3668 
3669 void virtio_queue_host_notifier_read(EventNotifier *n)
3670 {
3671     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3672     if (event_notifier_test_and_clear(n)) {
3673         virtio_queue_notify_vq(vq);
3674     }
3675 }
3676 
3677 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
3678 {
3679     return &vq->host_notifier;
3680 }
3681 
3682 EventNotifier *virtio_config_get_guest_notifier(VirtIODevice *vdev)
3683 {
3684     return &vdev->config_notifier;
3685 }
3686 
3687 void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled)
3688 {
3689     vq->host_notifier_enabled = enabled;
3690 }
3691 
3692 int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n,
3693                                       MemoryRegion *mr, bool assign)
3694 {
3695     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3696     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3697 
3698     if (k->set_host_notifier_mr) {
3699         return k->set_host_notifier_mr(qbus->parent, n, mr, assign);
3700     }
3701 
3702     return -1;
3703 }
3704 
3705 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
3706 {
3707     g_free(vdev->bus_name);
3708     vdev->bus_name = g_strdup(bus_name);
3709 }
3710 
3711 void G_GNUC_PRINTF(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
3712 {
3713     va_list ap;
3714 
3715     va_start(ap, fmt);
3716     error_vreport(fmt, ap);
3717     va_end(ap);
3718 
3719     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3720         vdev->status = vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET;
3721         virtio_notify_config(vdev);
3722     }
3723 
3724     vdev->broken = true;
3725 }
3726 
3727 static void virtio_memory_listener_commit(MemoryListener *listener)
3728 {
3729     VirtIODevice *vdev = container_of(listener, VirtIODevice, listener);
3730     int i;
3731 
3732     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3733         if (vdev->vq[i].vring.num == 0) {
3734             break;
3735         }
3736         virtio_init_region_cache(vdev, i);
3737     }
3738 }
3739 
3740 static void virtio_device_realize(DeviceState *dev, Error **errp)
3741 {
3742     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3743     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
3744     Error *err = NULL;
3745 
3746     /* Devices should either use vmsd or the load/save methods */
3747     assert(!vdc->vmsd || !vdc->load);
3748 
3749     if (vdc->realize != NULL) {
3750         vdc->realize(dev, &err);
3751         if (err != NULL) {
3752             error_propagate(errp, err);
3753             return;
3754         }
3755     }
3756 
3757     /* Devices should not use both ioeventfd and notification data feature */
3758     virtio_device_check_notification_compatibility(vdev, &err);
3759     if (err != NULL) {
3760         error_propagate(errp, err);
3761         vdc->unrealize(dev);
3762         return;
3763     }
3764 
3765     virtio_bus_device_plugged(vdev, &err);
3766     if (err != NULL) {
3767         error_propagate(errp, err);
3768         vdc->unrealize(dev);
3769         return;
3770     }
3771 
3772     vdev->listener.commit = virtio_memory_listener_commit;
3773     vdev->listener.name = "virtio";
3774     memory_listener_register(&vdev->listener, vdev->dma_as);
3775 }
3776 
3777 static void virtio_device_unrealize(DeviceState *dev)
3778 {
3779     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3780     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
3781 
3782     memory_listener_unregister(&vdev->listener);
3783     virtio_bus_device_unplugged(vdev);
3784 
3785     if (vdc->unrealize != NULL) {
3786         vdc->unrealize(dev);
3787     }
3788 
3789     g_free(vdev->bus_name);
3790     vdev->bus_name = NULL;
3791 }
3792 
3793 static void virtio_device_free_virtqueues(VirtIODevice *vdev)
3794 {
3795     int i;
3796     if (!vdev->vq) {
3797         return;
3798     }
3799 
3800     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3801         if (vdev->vq[i].vring.num == 0) {
3802             break;
3803         }
3804         virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
3805     }
3806     g_free(vdev->vq);
3807 }
3808 
3809 static void virtio_device_instance_finalize(Object *obj)
3810 {
3811     VirtIODevice *vdev = VIRTIO_DEVICE(obj);
3812 
3813     virtio_device_free_virtqueues(vdev);
3814 
3815     g_free(vdev->config);
3816     g_free(vdev->vector_queues);
3817 }
3818 
3819 static Property virtio_properties[] = {
3820     DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
3821     DEFINE_PROP_BOOL("use-started", VirtIODevice, use_started, true),
3822     DEFINE_PROP_BOOL("use-disabled-flag", VirtIODevice, use_disabled_flag, true),
3823     DEFINE_PROP_BOOL("x-disable-legacy-check", VirtIODevice,
3824                      disable_legacy_check, false),
3825     DEFINE_PROP_END_OF_LIST(),
3826 };
3827 
3828 static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
3829 {
3830     VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
3831     int i, n, r, err;
3832 
3833     /*
3834      * Batch all the host notifiers in a single transaction to avoid
3835      * quadratic time complexity in address_space_update_ioeventfds().
3836      */
3837     memory_region_transaction_begin();
3838     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3839         VirtQueue *vq = &vdev->vq[n];
3840         if (!virtio_queue_get_num(vdev, n)) {
3841             continue;
3842         }
3843         r = virtio_bus_set_host_notifier(qbus, n, true);
3844         if (r < 0) {
3845             err = r;
3846             goto assign_error;
3847         }
3848         event_notifier_set_handler(&vq->host_notifier,
3849                                    virtio_queue_host_notifier_read);
3850     }
3851 
3852     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3853         /* Kick right away to begin processing requests already in vring */
3854         VirtQueue *vq = &vdev->vq[n];
3855         if (!vq->vring.num) {
3856             continue;
3857         }
3858         event_notifier_set(&vq->host_notifier);
3859     }
3860     memory_region_transaction_commit();
3861     return 0;
3862 
3863 assign_error:
3864     i = n; /* save n for a second iteration after transaction is committed. */
3865     while (--n >= 0) {
3866         VirtQueue *vq = &vdev->vq[n];
3867         if (!virtio_queue_get_num(vdev, n)) {
3868             continue;
3869         }
3870 
3871         event_notifier_set_handler(&vq->host_notifier, NULL);
3872         r = virtio_bus_set_host_notifier(qbus, n, false);
3873         assert(r >= 0);
3874     }
3875     /*
3876      * The transaction expects the ioeventfds to be open when it
3877      * commits. Do it now, before the cleanup loop.
3878      */
3879     memory_region_transaction_commit();
3880 
3881     while (--i >= 0) {
3882         if (!virtio_queue_get_num(vdev, i)) {
3883             continue;
3884         }
3885         virtio_bus_cleanup_host_notifier(qbus, i);
3886     }
3887     return err;
3888 }
3889 
3890 int virtio_device_start_ioeventfd(VirtIODevice *vdev)
3891 {
3892     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3893     VirtioBusState *vbus = VIRTIO_BUS(qbus);
3894 
3895     return virtio_bus_start_ioeventfd(vbus);
3896 }
3897 
3898 static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
3899 {
3900     VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
3901     int n, r;
3902 
3903     /*
3904      * Batch all the host notifiers in a single transaction to avoid
3905      * quadratic time complexity in address_space_update_ioeventfds().
3906      */
3907     memory_region_transaction_begin();
3908     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3909         VirtQueue *vq = &vdev->vq[n];
3910 
3911         if (!virtio_queue_get_num(vdev, n)) {
3912             continue;
3913         }
3914         event_notifier_set_handler(&vq->host_notifier, NULL);
3915         r = virtio_bus_set_host_notifier(qbus, n, false);
3916         assert(r >= 0);
3917     }
3918     /*
3919      * The transaction expects the ioeventfds to be open when it
3920      * commits. Do it now, before the cleanup loop.
3921      */
3922     memory_region_transaction_commit();
3923 
3924     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3925         if (!virtio_queue_get_num(vdev, n)) {
3926             continue;
3927         }
3928         virtio_bus_cleanup_host_notifier(qbus, n);
3929     }
3930 }
3931 
3932 int virtio_device_grab_ioeventfd(VirtIODevice *vdev)
3933 {
3934     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3935     VirtioBusState *vbus = VIRTIO_BUS(qbus);
3936 
3937     return virtio_bus_grab_ioeventfd(vbus);
3938 }
3939 
3940 void virtio_device_release_ioeventfd(VirtIODevice *vdev)
3941 {
3942     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3943     VirtioBusState *vbus = VIRTIO_BUS(qbus);
3944 
3945     virtio_bus_release_ioeventfd(vbus);
3946 }
3947 
3948 static void virtio_device_class_init(ObjectClass *klass, void *data)
3949 {
3950     /* Set the default value here. */
3951     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
3952     DeviceClass *dc = DEVICE_CLASS(klass);
3953 
3954     dc->realize = virtio_device_realize;
3955     dc->unrealize = virtio_device_unrealize;
3956     dc->bus_type = TYPE_VIRTIO_BUS;
3957     device_class_set_props(dc, virtio_properties);
3958     vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
3959     vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
3960 
3961     vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
3962 }
3963 
3964 bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
3965 {
3966     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3967     VirtioBusState *vbus = VIRTIO_BUS(qbus);
3968 
3969     return virtio_bus_ioeventfd_enabled(vbus);
3970 }
3971 
3972 VirtQueueStatus *qmp_x_query_virtio_queue_status(const char *path,
3973                                                  uint16_t queue,
3974                                                  Error **errp)
3975 {
3976     VirtIODevice *vdev;
3977     VirtQueueStatus *status;
3978 
3979     vdev = qmp_find_virtio_device(path);
3980     if (vdev == NULL) {
3981         error_setg(errp, "Path %s is not a VirtIODevice", path);
3982         return NULL;
3983     }
3984 
3985     if (queue >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, queue)) {
3986         error_setg(errp, "Invalid virtqueue number %d", queue);
3987         return NULL;
3988     }
3989 
3990     status = g_new0(VirtQueueStatus, 1);
3991     status->name = g_strdup(vdev->name);
3992     status->queue_index = vdev->vq[queue].queue_index;
3993     status->inuse = vdev->vq[queue].inuse;
3994     status->vring_num = vdev->vq[queue].vring.num;
3995     status->vring_num_default = vdev->vq[queue].vring.num_default;
3996     status->vring_align = vdev->vq[queue].vring.align;
3997     status->vring_desc = vdev->vq[queue].vring.desc;
3998     status->vring_avail = vdev->vq[queue].vring.avail;
3999     status->vring_used = vdev->vq[queue].vring.used;
4000     status->used_idx = vdev->vq[queue].used_idx;
4001     status->signalled_used = vdev->vq[queue].signalled_used;
4002     status->signalled_used_valid = vdev->vq[queue].signalled_used_valid;
4003 
4004     if (vdev->vhost_started) {
4005         VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
4006         struct vhost_dev *hdev = vdc->get_vhost(vdev);
4007 
4008         /* check if vq index exists for vhost as well  */
4009         if (queue >= hdev->vq_index && queue < hdev->vq_index + hdev->nvqs) {
4010             status->has_last_avail_idx = true;
4011 
4012             int vhost_vq_index =
4013                 hdev->vhost_ops->vhost_get_vq_index(hdev, queue);
4014             struct vhost_vring_state state = {
4015                 .index = vhost_vq_index,
4016             };
4017 
4018             status->last_avail_idx =
4019                 hdev->vhost_ops->vhost_get_vring_base(hdev, &state);
4020         }
4021     } else {
4022         status->has_shadow_avail_idx = true;
4023         status->has_last_avail_idx = true;
4024         status->last_avail_idx = vdev->vq[queue].last_avail_idx;
4025         status->shadow_avail_idx = vdev->vq[queue].shadow_avail_idx;
4026     }
4027 
4028     return status;
4029 }
4030 
4031 static strList *qmp_decode_vring_desc_flags(uint16_t flags)
4032 {
4033     strList *list = NULL;
4034     strList *node;
4035     int i;
4036 
4037     struct {
4038         uint16_t flag;
4039         const char *value;
4040     } map[] = {
4041         { VRING_DESC_F_NEXT, "next" },
4042         { VRING_DESC_F_WRITE, "write" },
4043         { VRING_DESC_F_INDIRECT, "indirect" },
4044         { 1 << VRING_PACKED_DESC_F_AVAIL, "avail" },
4045         { 1 << VRING_PACKED_DESC_F_USED, "used" },
4046         { 0, "" }
4047     };
4048 
4049     for (i = 0; map[i].flag; i++) {
4050         if ((map[i].flag & flags) == 0) {
4051             continue;
4052         }
4053         node = g_malloc0(sizeof(strList));
4054         node->value = g_strdup(map[i].value);
4055         node->next = list;
4056         list = node;
4057     }
4058 
4059     return list;
4060 }
4061 
4062 VirtioQueueElement *qmp_x_query_virtio_queue_element(const char *path,
4063                                                      uint16_t queue,
4064                                                      bool has_index,
4065                                                      uint16_t index,
4066                                                      Error **errp)
4067 {
4068     VirtIODevice *vdev;
4069     VirtQueue *vq;
4070     VirtioQueueElement *element = NULL;
4071 
4072     vdev = qmp_find_virtio_device(path);
4073     if (vdev == NULL) {
4074         error_setg(errp, "Path %s is not a VirtIO device", path);
4075         return NULL;
4076     }
4077 
4078     if (queue >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, queue)) {
4079         error_setg(errp, "Invalid virtqueue number %d", queue);
4080         return NULL;
4081     }
4082     vq = &vdev->vq[queue];
4083 
4084     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
4085         error_setg(errp, "Packed ring not supported");
4086         return NULL;
4087     } else {
4088         unsigned int head, i, max;
4089         VRingMemoryRegionCaches *caches;
4090         MemoryRegionCache indirect_desc_cache;
4091         MemoryRegionCache *desc_cache;
4092         VRingDesc desc;
4093         VirtioRingDescList *list = NULL;
4094         VirtioRingDescList *node;
4095         int rc; int ndescs;
4096 
4097         address_space_cache_init_empty(&indirect_desc_cache);
4098 
4099         RCU_READ_LOCK_GUARD();
4100 
4101         max = vq->vring.num;
4102 
4103         if (!has_index) {
4104             head = vring_avail_ring(vq, vq->last_avail_idx % vq->vring.num);
4105         } else {
4106             head = vring_avail_ring(vq, index % vq->vring.num);
4107         }
4108         i = head;
4109 
4110         caches = vring_get_region_caches(vq);
4111         if (!caches) {
4112             error_setg(errp, "Region caches not initialized");
4113             return NULL;
4114         }
4115         if (caches->desc.len < max * sizeof(VRingDesc)) {
4116             error_setg(errp, "Cannot map descriptor ring");
4117             return NULL;
4118         }
4119 
4120         desc_cache = &caches->desc;
4121         vring_split_desc_read(vdev, &desc, desc_cache, i);
4122         if (desc.flags & VRING_DESC_F_INDIRECT) {
4123             int64_t len;
4124             len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
4125                                            desc.addr, desc.len, false);
4126             desc_cache = &indirect_desc_cache;
4127             if (len < desc.len) {
4128                 error_setg(errp, "Cannot map indirect buffer");
4129                 goto done;
4130             }
4131 
4132             max = desc.len / sizeof(VRingDesc);
4133             i = 0;
4134             vring_split_desc_read(vdev, &desc, desc_cache, i);
4135         }
4136 
4137         element = g_new0(VirtioQueueElement, 1);
4138         element->avail = g_new0(VirtioRingAvail, 1);
4139         element->used = g_new0(VirtioRingUsed, 1);
4140         element->name = g_strdup(vdev->name);
4141         element->index = head;
4142         element->avail->flags = vring_avail_flags(vq);
4143         element->avail->idx = vring_avail_idx(vq);
4144         element->avail->ring = head;
4145         element->used->flags = vring_used_flags(vq);
4146         element->used->idx = vring_used_idx(vq);
4147         ndescs = 0;
4148 
4149         do {
4150             /* A buggy driver may produce an infinite loop */
4151             if (ndescs >= max) {
4152                 break;
4153             }
4154             node = g_new0(VirtioRingDescList, 1);
4155             node->value = g_new0(VirtioRingDesc, 1);
4156             node->value->addr = desc.addr;
4157             node->value->len = desc.len;
4158             node->value->flags = qmp_decode_vring_desc_flags(desc.flags);
4159             node->next = list;
4160             list = node;
4161 
4162             ndescs++;
4163             rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max);
4164         } while (rc == VIRTQUEUE_READ_DESC_MORE);
4165         element->descs = list;
4166 done:
4167         address_space_cache_destroy(&indirect_desc_cache);
4168     }
4169 
4170     return element;
4171 }
4172 
4173 static const TypeInfo virtio_device_info = {
4174     .name = TYPE_VIRTIO_DEVICE,
4175     .parent = TYPE_DEVICE,
4176     .instance_size = sizeof(VirtIODevice),
4177     .class_init = virtio_device_class_init,
4178     .instance_finalize = virtio_device_instance_finalize,
4179     .abstract = true,
4180     .class_size = sizeof(VirtioDeviceClass),
4181 };
4182 
4183 static void virtio_register_types(void)
4184 {
4185     type_register_static(&virtio_device_info);
4186 }
4187 
4188 type_init(virtio_register_types)
4189 
4190 QEMUBH *virtio_bh_new_guarded_full(DeviceState *dev,
4191                                    QEMUBHFunc *cb, void *opaque,
4192                                    const char *name)
4193 {
4194     DeviceState *transport = qdev_get_parent_bus(dev)->parent;
4195 
4196     return qemu_bh_new_full(cb, opaque, name,
4197                             &transport->mem_reentrancy_guard);
4198 }
4199