xref: /openbmc/qemu/hw/virtio/virtio.c (revision 40558266b1bcea744427118fd6f848a9e31364e2)
1 /*
2  * Virtio Support
3  *
4  * Copyright IBM, Corp. 2007
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qapi/qapi-commands-virtio.h"
17 #include "trace.h"
18 #include "qemu/defer-call.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/main-loop.h"
22 #include "qemu/module.h"
23 #include "exec/tswap.h"
24 #include "qom/object_interfaces.h"
25 #include "hw/core/cpu.h"
26 #include "hw/virtio/virtio.h"
27 #include "hw/virtio/vhost.h"
28 #include "migration/qemu-file-types.h"
29 #include "qemu/atomic.h"
30 #include "hw/virtio/virtio-bus.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/virtio/virtio-access.h"
33 #include "system/dma.h"
34 #include "system/runstate.h"
35 #include "virtio-qmp.h"
36 
37 #include "standard-headers/linux/virtio_ids.h"
38 #include "standard-headers/linux/vhost_types.h"
39 #include "standard-headers/linux/virtio_blk.h"
40 #include "standard-headers/linux/virtio_console.h"
41 #include "standard-headers/linux/virtio_gpu.h"
42 #include "standard-headers/linux/virtio_net.h"
43 #include "standard-headers/linux/virtio_scsi.h"
44 #include "standard-headers/linux/virtio_i2c.h"
45 #include "standard-headers/linux/virtio_balloon.h"
46 #include "standard-headers/linux/virtio_iommu.h"
47 #include "standard-headers/linux/virtio_mem.h"
48 #include "standard-headers/linux/virtio_vsock.h"
49 
50 /*
51  * Maximum size of virtio device config space
52  */
53 #define VHOST_USER_MAX_CONFIG_SIZE 256
54 
55 /*
56  * The alignment to use between consumer and producer parts of vring.
57  * x86 pagesize again. This is the default, used by transports like PCI
58  * which don't provide a means for the guest to tell the host the alignment.
59  */
60 #define VIRTIO_PCI_VRING_ALIGN         4096
61 
62 typedef struct VRingDesc
63 {
64     uint64_t addr;
65     uint32_t len;
66     uint16_t flags;
67     uint16_t next;
68 } VRingDesc;
69 
70 typedef struct VRingPackedDesc {
71     uint64_t addr;
72     uint32_t len;
73     uint16_t id;
74     uint16_t flags;
75 } VRingPackedDesc;
76 
77 typedef struct VRingAvail
78 {
79     uint16_t flags;
80     uint16_t idx;
81     uint16_t ring[];
82 } VRingAvail;
83 
84 typedef struct VRingUsedElem
85 {
86     uint32_t id;
87     uint32_t len;
88 } VRingUsedElem;
89 
90 typedef struct VRingUsed
91 {
92     uint16_t flags;
93     uint16_t idx;
94     VRingUsedElem ring[];
95 } VRingUsed;
96 
97 typedef struct VRingMemoryRegionCaches {
98     struct rcu_head rcu;
99     MemoryRegionCache desc;
100     MemoryRegionCache avail;
101     MemoryRegionCache used;
102 } VRingMemoryRegionCaches;
103 
104 typedef struct VRing
105 {
106     unsigned int num;
107     unsigned int num_default;
108     unsigned int align;
109     hwaddr desc;
110     hwaddr avail;
111     hwaddr used;
112     VRingMemoryRegionCaches *caches;
113 } VRing;
114 
115 typedef struct VRingPackedDescEvent {
116     uint16_t off_wrap;
117     uint16_t flags;
118 } VRingPackedDescEvent ;
119 
120 struct VirtQueue
121 {
122     VRing vring;
123     VirtQueueElement *used_elems;
124 
125     /* Next head to pop */
126     uint16_t last_avail_idx;
127     bool last_avail_wrap_counter;
128 
129     /* Last avail_idx read from VQ. */
130     uint16_t shadow_avail_idx;
131     bool shadow_avail_wrap_counter;
132 
133     uint16_t used_idx;
134     bool used_wrap_counter;
135 
136     /* Last used index value we have signalled on */
137     uint16_t signalled_used;
138 
139     /* Last used index value we have signalled on */
140     bool signalled_used_valid;
141 
142     /* Notification enabled? */
143     bool notification;
144 
145     uint16_t queue_index;
146 
147     unsigned int inuse;
148 
149     uint16_t vector;
150     VirtIOHandleOutput handle_output;
151     VirtIODevice *vdev;
152     EventNotifier guest_notifier;
153     EventNotifier host_notifier;
154     bool host_notifier_enabled;
155     QLIST_ENTRY(VirtQueue) node;
156 };
157 
158 const char *virtio_device_names[] = {
159     [VIRTIO_ID_NET] = "virtio-net",
160     [VIRTIO_ID_BLOCK] = "virtio-blk",
161     [VIRTIO_ID_CONSOLE] = "virtio-serial",
162     [VIRTIO_ID_RNG] = "virtio-rng",
163     [VIRTIO_ID_BALLOON] = "virtio-balloon",
164     [VIRTIO_ID_IOMEM] = "virtio-iomem",
165     [VIRTIO_ID_RPMSG] = "virtio-rpmsg",
166     [VIRTIO_ID_SCSI] = "virtio-scsi",
167     [VIRTIO_ID_9P] = "virtio-9p",
168     [VIRTIO_ID_MAC80211_WLAN] = "virtio-mac-wlan",
169     [VIRTIO_ID_RPROC_SERIAL] = "virtio-rproc-serial",
170     [VIRTIO_ID_CAIF] = "virtio-caif",
171     [VIRTIO_ID_MEMORY_BALLOON] = "virtio-mem-balloon",
172     [VIRTIO_ID_GPU] = "virtio-gpu",
173     [VIRTIO_ID_CLOCK] = "virtio-clk",
174     [VIRTIO_ID_INPUT] = "virtio-input",
175     [VIRTIO_ID_VSOCK] = "vhost-vsock",
176     [VIRTIO_ID_CRYPTO] = "virtio-crypto",
177     [VIRTIO_ID_SIGNAL_DIST] = "virtio-signal",
178     [VIRTIO_ID_PSTORE] = "virtio-pstore",
179     [VIRTIO_ID_IOMMU] = "virtio-iommu",
180     [VIRTIO_ID_MEM] = "virtio-mem",
181     [VIRTIO_ID_SOUND] = "virtio-sound",
182     [VIRTIO_ID_FS] = "virtio-user-fs",
183     [VIRTIO_ID_PMEM] = "virtio-pmem",
184     [VIRTIO_ID_RPMB] = "virtio-rpmb",
185     [VIRTIO_ID_MAC80211_HWSIM] = "virtio-mac-hwsim",
186     [VIRTIO_ID_VIDEO_ENCODER] = "virtio-vid-encoder",
187     [VIRTIO_ID_VIDEO_DECODER] = "virtio-vid-decoder",
188     [VIRTIO_ID_SCMI] = "virtio-scmi",
189     [VIRTIO_ID_NITRO_SEC_MOD] = "virtio-nitro-sec-mod",
190     [VIRTIO_ID_I2C_ADAPTER] = "vhost-user-i2c",
191     [VIRTIO_ID_WATCHDOG] = "virtio-watchdog",
192     [VIRTIO_ID_CAN] = "virtio-can",
193     [VIRTIO_ID_DMABUF] = "virtio-dmabuf",
194     [VIRTIO_ID_PARAM_SERV] = "virtio-param-serv",
195     [VIRTIO_ID_AUDIO_POLICY] = "virtio-audio-pol",
196     [VIRTIO_ID_BT] = "virtio-bluetooth",
197     [VIRTIO_ID_GPIO] = "virtio-gpio"
198 };
199 
virtio_id_to_name(uint16_t device_id)200 static const char *virtio_id_to_name(uint16_t device_id)
201 {
202     assert(device_id < G_N_ELEMENTS(virtio_device_names));
203     const char *name = virtio_device_names[device_id];
204     assert(name != NULL);
205     return name;
206 }
207 
208 /* Called within call_rcu().  */
virtio_free_region_cache(VRingMemoryRegionCaches * caches)209 static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
210 {
211     assert(caches != NULL);
212     address_space_cache_destroy(&caches->desc);
213     address_space_cache_destroy(&caches->avail);
214     address_space_cache_destroy(&caches->used);
215     g_free(caches);
216 }
217 
virtio_virtqueue_reset_region_cache(struct VirtQueue * vq)218 static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
219 {
220     VRingMemoryRegionCaches *caches;
221 
222     caches = qatomic_read(&vq->vring.caches);
223     qatomic_rcu_set(&vq->vring.caches, NULL);
224     if (caches) {
225         call_rcu(caches, virtio_free_region_cache, rcu);
226     }
227 }
228 
virtio_init_region_cache(VirtIODevice * vdev,int n)229 void virtio_init_region_cache(VirtIODevice *vdev, int n)
230 {
231     VirtQueue *vq = &vdev->vq[n];
232     VRingMemoryRegionCaches *old = vq->vring.caches;
233     VRingMemoryRegionCaches *new = NULL;
234     hwaddr addr, size;
235     int64_t len;
236     bool packed;
237 
238 
239     addr = vq->vring.desc;
240     if (!addr) {
241         goto out_no_cache;
242     }
243     new = g_new0(VRingMemoryRegionCaches, 1);
244     size = virtio_queue_get_desc_size(vdev, n);
245     packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
246                                    true : false;
247     len = address_space_cache_init(&new->desc, vdev->dma_as,
248                                    addr, size, packed);
249     if (len < size) {
250         virtio_error(vdev, "Cannot map desc");
251         goto err_desc;
252     }
253 
254     size = virtio_queue_get_used_size(vdev, n);
255     len = address_space_cache_init(&new->used, vdev->dma_as,
256                                    vq->vring.used, size, true);
257     if (len < size) {
258         virtio_error(vdev, "Cannot map used");
259         goto err_used;
260     }
261 
262     size = virtio_queue_get_avail_size(vdev, n);
263     len = address_space_cache_init(&new->avail, vdev->dma_as,
264                                    vq->vring.avail, size, false);
265     if (len < size) {
266         virtio_error(vdev, "Cannot map avail");
267         goto err_avail;
268     }
269 
270     qatomic_rcu_set(&vq->vring.caches, new);
271     if (old) {
272         call_rcu(old, virtio_free_region_cache, rcu);
273     }
274     return;
275 
276 err_avail:
277     address_space_cache_destroy(&new->avail);
278 err_used:
279     address_space_cache_destroy(&new->used);
280 err_desc:
281     address_space_cache_destroy(&new->desc);
282 out_no_cache:
283     g_free(new);
284     virtio_virtqueue_reset_region_cache(vq);
285 }
286 
287 /* virt queue functions */
virtio_queue_update_rings(VirtIODevice * vdev,int n)288 void virtio_queue_update_rings(VirtIODevice *vdev, int n)
289 {
290     VRing *vring = &vdev->vq[n].vring;
291 
292     if (!vring->num || !vring->desc || !vring->align) {
293         /* not yet setup -> nothing to do */
294         return;
295     }
296     vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
297     vring->used = vring_align(vring->avail +
298                               offsetof(VRingAvail, ring[vring->num]),
299                               vring->align);
300     virtio_init_region_cache(vdev, n);
301 }
302 
303 /* Called within rcu_read_lock().  */
vring_split_desc_read(VirtIODevice * vdev,VRingDesc * desc,MemoryRegionCache * cache,int i)304 static void vring_split_desc_read(VirtIODevice *vdev, VRingDesc *desc,
305                                   MemoryRegionCache *cache, int i)
306 {
307     address_space_read_cached(cache, i * sizeof(VRingDesc),
308                               desc, sizeof(VRingDesc));
309     virtio_tswap64s(vdev, &desc->addr);
310     virtio_tswap32s(vdev, &desc->len);
311     virtio_tswap16s(vdev, &desc->flags);
312     virtio_tswap16s(vdev, &desc->next);
313 }
314 
vring_packed_event_read(VirtIODevice * vdev,MemoryRegionCache * cache,VRingPackedDescEvent * e)315 static void vring_packed_event_read(VirtIODevice *vdev,
316                                     MemoryRegionCache *cache,
317                                     VRingPackedDescEvent *e)
318 {
319     hwaddr off_off = offsetof(VRingPackedDescEvent, off_wrap);
320     hwaddr off_flags = offsetof(VRingPackedDescEvent, flags);
321 
322     e->flags = virtio_lduw_phys_cached(vdev, cache, off_flags);
323     /* Make sure flags is seen before off_wrap */
324     smp_rmb();
325     e->off_wrap = virtio_lduw_phys_cached(vdev, cache, off_off);
326 }
327 
vring_packed_off_wrap_write(VirtIODevice * vdev,MemoryRegionCache * cache,uint16_t off_wrap)328 static void vring_packed_off_wrap_write(VirtIODevice *vdev,
329                                         MemoryRegionCache *cache,
330                                         uint16_t off_wrap)
331 {
332     hwaddr off = offsetof(VRingPackedDescEvent, off_wrap);
333 
334     virtio_stw_phys_cached(vdev, cache, off, off_wrap);
335     address_space_cache_invalidate(cache, off, sizeof(off_wrap));
336 }
337 
vring_packed_flags_write(VirtIODevice * vdev,MemoryRegionCache * cache,uint16_t flags)338 static void vring_packed_flags_write(VirtIODevice *vdev,
339                                      MemoryRegionCache *cache, uint16_t flags)
340 {
341     hwaddr off = offsetof(VRingPackedDescEvent, flags);
342 
343     virtio_stw_phys_cached(vdev, cache, off, flags);
344     address_space_cache_invalidate(cache, off, sizeof(flags));
345 }
346 
347 /* Called within rcu_read_lock().  */
vring_get_region_caches(struct VirtQueue * vq)348 static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
349 {
350     return qatomic_rcu_read(&vq->vring.caches);
351 }
352 
353 /* Called within rcu_read_lock().  */
vring_avail_flags(VirtQueue * vq)354 static inline uint16_t vring_avail_flags(VirtQueue *vq)
355 {
356     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
357     hwaddr pa = offsetof(VRingAvail, flags);
358 
359     if (!caches) {
360         return 0;
361     }
362 
363     return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
364 }
365 
366 /* Called within rcu_read_lock().  */
vring_avail_idx(VirtQueue * vq)367 static inline uint16_t vring_avail_idx(VirtQueue *vq)
368 {
369     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
370     hwaddr pa = offsetof(VRingAvail, idx);
371 
372     if (!caches) {
373         return 0;
374     }
375 
376     vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
377     return vq->shadow_avail_idx;
378 }
379 
380 /* Called within rcu_read_lock().  */
vring_avail_ring(VirtQueue * vq,int i)381 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
382 {
383     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
384     hwaddr pa = offsetof(VRingAvail, ring[i]);
385 
386     if (!caches) {
387         return 0;
388     }
389 
390     return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
391 }
392 
393 /* Called within rcu_read_lock().  */
vring_get_used_event(VirtQueue * vq)394 static inline uint16_t vring_get_used_event(VirtQueue *vq)
395 {
396     return vring_avail_ring(vq, vq->vring.num);
397 }
398 
399 /* Called within rcu_read_lock().  */
vring_used_write(VirtQueue * vq,VRingUsedElem * uelem,int i)400 static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
401                                     int i)
402 {
403     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
404     hwaddr pa = offsetof(VRingUsed, ring[i]);
405 
406     if (!caches) {
407         return;
408     }
409 
410     virtio_tswap32s(vq->vdev, &uelem->id);
411     virtio_tswap32s(vq->vdev, &uelem->len);
412     address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
413     address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
414 }
415 
416 /* Called within rcu_read_lock(). */
vring_used_flags(VirtQueue * vq)417 static inline uint16_t vring_used_flags(VirtQueue *vq)
418 {
419     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
420     hwaddr pa = offsetof(VRingUsed, flags);
421 
422     if (!caches) {
423         return 0;
424     }
425 
426     return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
427 }
428 
429 /* Called within rcu_read_lock().  */
vring_used_idx(VirtQueue * vq)430 static uint16_t vring_used_idx(VirtQueue *vq)
431 {
432     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
433     hwaddr pa = offsetof(VRingUsed, idx);
434 
435     if (!caches) {
436         return 0;
437     }
438 
439     return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
440 }
441 
442 /* Called within rcu_read_lock().  */
vring_used_idx_set(VirtQueue * vq,uint16_t val)443 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
444 {
445     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
446     hwaddr pa = offsetof(VRingUsed, idx);
447 
448     if (caches) {
449         virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
450         address_space_cache_invalidate(&caches->used, pa, sizeof(val));
451     }
452 
453     vq->used_idx = val;
454 }
455 
456 /* Called within rcu_read_lock().  */
vring_used_flags_set_bit(VirtQueue * vq,int mask)457 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
458 {
459     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
460     VirtIODevice *vdev = vq->vdev;
461     hwaddr pa = offsetof(VRingUsed, flags);
462     uint16_t flags;
463 
464     if (!caches) {
465         return;
466     }
467 
468     flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
469     virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
470     address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
471 }
472 
473 /* Called within rcu_read_lock().  */
vring_used_flags_unset_bit(VirtQueue * vq,int mask)474 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
475 {
476     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
477     VirtIODevice *vdev = vq->vdev;
478     hwaddr pa = offsetof(VRingUsed, flags);
479     uint16_t flags;
480 
481     if (!caches) {
482         return;
483     }
484 
485     flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
486     virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
487     address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
488 }
489 
490 /* Called within rcu_read_lock().  */
vring_set_avail_event(VirtQueue * vq,uint16_t val)491 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
492 {
493     VRingMemoryRegionCaches *caches;
494     hwaddr pa;
495     if (!vq->notification) {
496         return;
497     }
498 
499     caches = vring_get_region_caches(vq);
500     if (!caches) {
501         return;
502     }
503 
504     pa = offsetof(VRingUsed, ring[vq->vring.num]);
505     virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
506     address_space_cache_invalidate(&caches->used, pa, sizeof(val));
507 }
508 
virtio_queue_split_set_notification(VirtQueue * vq,int enable)509 static void virtio_queue_split_set_notification(VirtQueue *vq, int enable)
510 {
511     RCU_READ_LOCK_GUARD();
512 
513     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
514         vring_set_avail_event(vq, vring_avail_idx(vq));
515     } else if (enable) {
516         vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
517     } else {
518         vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
519     }
520     if (enable) {
521         /* Expose avail event/used flags before caller checks the avail idx. */
522         smp_mb();
523     }
524 }
525 
virtio_queue_packed_set_notification(VirtQueue * vq,int enable)526 static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable)
527 {
528     uint16_t off_wrap;
529     VRingPackedDescEvent e;
530     VRingMemoryRegionCaches *caches;
531 
532     RCU_READ_LOCK_GUARD();
533     caches = vring_get_region_caches(vq);
534     if (!caches) {
535         return;
536     }
537 
538     vring_packed_event_read(vq->vdev, &caches->used, &e);
539 
540     if (!enable) {
541         e.flags = VRING_PACKED_EVENT_FLAG_DISABLE;
542     } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
543         off_wrap = vq->shadow_avail_idx | vq->shadow_avail_wrap_counter << 15;
544         vring_packed_off_wrap_write(vq->vdev, &caches->used, off_wrap);
545         /* Make sure off_wrap is wrote before flags */
546         smp_wmb();
547         e.flags = VRING_PACKED_EVENT_FLAG_DESC;
548     } else {
549         e.flags = VRING_PACKED_EVENT_FLAG_ENABLE;
550     }
551 
552     vring_packed_flags_write(vq->vdev, &caches->used, e.flags);
553     if (enable) {
554         /* Expose avail event/used flags before caller checks the avail idx. */
555         smp_mb();
556     }
557 }
558 
virtio_queue_get_notification(VirtQueue * vq)559 bool virtio_queue_get_notification(VirtQueue *vq)
560 {
561     return vq->notification;
562 }
563 
virtio_queue_set_notification(VirtQueue * vq,int enable)564 void virtio_queue_set_notification(VirtQueue *vq, int enable)
565 {
566     vq->notification = enable;
567 
568     if (!vq->vring.desc) {
569         return;
570     }
571 
572     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
573         virtio_queue_packed_set_notification(vq, enable);
574     } else {
575         virtio_queue_split_set_notification(vq, enable);
576     }
577 }
578 
virtio_queue_ready(VirtQueue * vq)579 int virtio_queue_ready(VirtQueue *vq)
580 {
581     return vq->vring.avail != 0;
582 }
583 
vring_packed_desc_read_flags(VirtIODevice * vdev,uint16_t * flags,MemoryRegionCache * cache,int i)584 static void vring_packed_desc_read_flags(VirtIODevice *vdev,
585                                          uint16_t *flags,
586                                          MemoryRegionCache *cache,
587                                          int i)
588 {
589     hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags);
590 
591     *flags = virtio_lduw_phys_cached(vdev, cache, off);
592 }
593 
vring_packed_desc_read(VirtIODevice * vdev,VRingPackedDesc * desc,MemoryRegionCache * cache,int i,bool strict_order)594 static void vring_packed_desc_read(VirtIODevice *vdev,
595                                    VRingPackedDesc *desc,
596                                    MemoryRegionCache *cache,
597                                    int i, bool strict_order)
598 {
599     hwaddr off = i * sizeof(VRingPackedDesc);
600 
601     vring_packed_desc_read_flags(vdev, &desc->flags, cache, i);
602 
603     if (strict_order) {
604         /* Make sure flags is read before the rest fields. */
605         smp_rmb();
606     }
607 
608     address_space_read_cached(cache, off + offsetof(VRingPackedDesc, addr),
609                               &desc->addr, sizeof(desc->addr));
610     address_space_read_cached(cache, off + offsetof(VRingPackedDesc, id),
611                               &desc->id, sizeof(desc->id));
612     address_space_read_cached(cache, off + offsetof(VRingPackedDesc, len),
613                               &desc->len, sizeof(desc->len));
614     virtio_tswap64s(vdev, &desc->addr);
615     virtio_tswap16s(vdev, &desc->id);
616     virtio_tswap32s(vdev, &desc->len);
617 }
618 
vring_packed_desc_write_data(VirtIODevice * vdev,VRingPackedDesc * desc,MemoryRegionCache * cache,int i)619 static void vring_packed_desc_write_data(VirtIODevice *vdev,
620                                          VRingPackedDesc *desc,
621                                          MemoryRegionCache *cache,
622                                          int i)
623 {
624     hwaddr off_id = i * sizeof(VRingPackedDesc) +
625                     offsetof(VRingPackedDesc, id);
626     hwaddr off_len = i * sizeof(VRingPackedDesc) +
627                     offsetof(VRingPackedDesc, len);
628 
629     virtio_tswap32s(vdev, &desc->len);
630     virtio_tswap16s(vdev, &desc->id);
631     address_space_write_cached(cache, off_id, &desc->id, sizeof(desc->id));
632     address_space_cache_invalidate(cache, off_id, sizeof(desc->id));
633     address_space_write_cached(cache, off_len, &desc->len, sizeof(desc->len));
634     address_space_cache_invalidate(cache, off_len, sizeof(desc->len));
635 }
636 
vring_packed_desc_write_flags(VirtIODevice * vdev,VRingPackedDesc * desc,MemoryRegionCache * cache,int i)637 static void vring_packed_desc_write_flags(VirtIODevice *vdev,
638                                           VRingPackedDesc *desc,
639                                           MemoryRegionCache *cache,
640                                           int i)
641 {
642     hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags);
643 
644     virtio_stw_phys_cached(vdev, cache, off, desc->flags);
645     address_space_cache_invalidate(cache, off, sizeof(desc->flags));
646 }
647 
vring_packed_desc_write(VirtIODevice * vdev,VRingPackedDesc * desc,MemoryRegionCache * cache,int i,bool strict_order)648 static void vring_packed_desc_write(VirtIODevice *vdev,
649                                     VRingPackedDesc *desc,
650                                     MemoryRegionCache *cache,
651                                     int i, bool strict_order)
652 {
653     vring_packed_desc_write_data(vdev, desc, cache, i);
654     if (strict_order) {
655         /* Make sure data is wrote before flags. */
656         smp_wmb();
657     }
658     vring_packed_desc_write_flags(vdev, desc, cache, i);
659 }
660 
is_desc_avail(uint16_t flags,bool wrap_counter)661 static inline bool is_desc_avail(uint16_t flags, bool wrap_counter)
662 {
663     bool avail, used;
664 
665     avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
666     used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
667     return (avail != used) && (avail == wrap_counter);
668 }
669 
670 /* Fetch avail_idx from VQ memory only when we really need to know if
671  * guest has added some buffers.
672  * Called within rcu_read_lock().  */
virtio_queue_empty_rcu(VirtQueue * vq)673 static int virtio_queue_empty_rcu(VirtQueue *vq)
674 {
675     if (virtio_device_disabled(vq->vdev)) {
676         return 1;
677     }
678 
679     if (unlikely(!vq->vring.avail)) {
680         return 1;
681     }
682 
683     if (vq->shadow_avail_idx != vq->last_avail_idx) {
684         return 0;
685     }
686 
687     return vring_avail_idx(vq) == vq->last_avail_idx;
688 }
689 
virtio_queue_split_empty(VirtQueue * vq)690 static int virtio_queue_split_empty(VirtQueue *vq)
691 {
692     bool empty;
693 
694     if (virtio_device_disabled(vq->vdev)) {
695         return 1;
696     }
697 
698     if (unlikely(!vq->vring.avail)) {
699         return 1;
700     }
701 
702     if (vq->shadow_avail_idx != vq->last_avail_idx) {
703         return 0;
704     }
705 
706     RCU_READ_LOCK_GUARD();
707     empty = vring_avail_idx(vq) == vq->last_avail_idx;
708     return empty;
709 }
710 
711 /* Called within rcu_read_lock().  */
virtio_queue_packed_empty_rcu(VirtQueue * vq)712 static int virtio_queue_packed_empty_rcu(VirtQueue *vq)
713 {
714     struct VRingPackedDesc desc;
715     VRingMemoryRegionCaches *cache;
716 
717     if (unlikely(!vq->vring.desc)) {
718         return 1;
719     }
720 
721     cache = vring_get_region_caches(vq);
722     if (!cache) {
723         return 1;
724     }
725 
726     vring_packed_desc_read_flags(vq->vdev, &desc.flags, &cache->desc,
727                                  vq->last_avail_idx);
728 
729     return !is_desc_avail(desc.flags, vq->last_avail_wrap_counter);
730 }
731 
virtio_queue_packed_empty(VirtQueue * vq)732 static int virtio_queue_packed_empty(VirtQueue *vq)
733 {
734     RCU_READ_LOCK_GUARD();
735     return virtio_queue_packed_empty_rcu(vq);
736 }
737 
virtio_queue_empty(VirtQueue * vq)738 int virtio_queue_empty(VirtQueue *vq)
739 {
740     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
741         return virtio_queue_packed_empty(vq);
742     } else {
743         return virtio_queue_split_empty(vq);
744     }
745 }
746 
virtio_queue_split_poll(VirtQueue * vq,unsigned shadow_idx)747 static bool virtio_queue_split_poll(VirtQueue *vq, unsigned shadow_idx)
748 {
749     if (unlikely(!vq->vring.avail)) {
750         return false;
751     }
752 
753     return (uint16_t)shadow_idx != vring_avail_idx(vq);
754 }
755 
virtio_queue_packed_poll(VirtQueue * vq,unsigned shadow_idx)756 static bool virtio_queue_packed_poll(VirtQueue *vq, unsigned shadow_idx)
757 {
758     VRingPackedDesc desc;
759     VRingMemoryRegionCaches *caches;
760 
761     if (unlikely(!vq->vring.desc)) {
762         return false;
763     }
764 
765     caches = vring_get_region_caches(vq);
766     if (!caches) {
767         return false;
768     }
769 
770     vring_packed_desc_read(vq->vdev, &desc, &caches->desc,
771                            shadow_idx, true);
772 
773     return is_desc_avail(desc.flags, vq->shadow_avail_wrap_counter);
774 }
775 
virtio_queue_poll(VirtQueue * vq,unsigned shadow_idx)776 static bool virtio_queue_poll(VirtQueue *vq, unsigned shadow_idx)
777 {
778     if (virtio_device_disabled(vq->vdev)) {
779         return false;
780     }
781 
782     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
783         return virtio_queue_packed_poll(vq, shadow_idx);
784     } else {
785         return virtio_queue_split_poll(vq, shadow_idx);
786     }
787 }
788 
virtio_queue_enable_notification_and_check(VirtQueue * vq,int opaque)789 bool virtio_queue_enable_notification_and_check(VirtQueue *vq,
790                                                 int opaque)
791 {
792     virtio_queue_set_notification(vq, 1);
793 
794     if (opaque >= 0) {
795         return virtio_queue_poll(vq, (unsigned)opaque);
796     } else {
797         return false;
798     }
799 }
800 
virtqueue_unmap_sg(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len)801 static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
802                                unsigned int len)
803 {
804     AddressSpace *dma_as = vq->vdev->dma_as;
805     unsigned int offset;
806     int i;
807 
808     offset = 0;
809     for (i = 0; i < elem->in_num; i++) {
810         size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
811 
812         dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
813                          elem->in_sg[i].iov_len,
814                          DMA_DIRECTION_FROM_DEVICE, size);
815 
816         offset += size;
817     }
818 
819     for (i = 0; i < elem->out_num; i++)
820         dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
821                          elem->out_sg[i].iov_len,
822                          DMA_DIRECTION_TO_DEVICE,
823                          elem->out_sg[i].iov_len);
824 }
825 
826 /* virtqueue_detach_element:
827  * @vq: The #VirtQueue
828  * @elem: The #VirtQueueElement
829  * @len: number of bytes written
830  *
831  * Detach the element from the virtqueue.  This function is suitable for device
832  * reset or other situations where a #VirtQueueElement is simply freed and will
833  * not be pushed or discarded.
834  */
virtqueue_detach_element(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len)835 void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
836                               unsigned int len)
837 {
838     vq->inuse -= elem->ndescs;
839     virtqueue_unmap_sg(vq, elem, len);
840 }
841 
virtqueue_split_rewind(VirtQueue * vq,unsigned int num)842 static void virtqueue_split_rewind(VirtQueue *vq, unsigned int num)
843 {
844     vq->last_avail_idx -= num;
845 }
846 
virtqueue_packed_rewind(VirtQueue * vq,unsigned int num)847 static void virtqueue_packed_rewind(VirtQueue *vq, unsigned int num)
848 {
849     if (vq->last_avail_idx < num) {
850         vq->last_avail_idx = vq->vring.num + vq->last_avail_idx - num;
851         vq->last_avail_wrap_counter ^= 1;
852     } else {
853         vq->last_avail_idx -= num;
854     }
855 }
856 
857 /* virtqueue_unpop:
858  * @vq: The #VirtQueue
859  * @elem: The #VirtQueueElement
860  * @len: number of bytes written
861  *
862  * Pretend the most recent element wasn't popped from the virtqueue.  The next
863  * call to virtqueue_pop() will refetch the element.
864  */
virtqueue_unpop(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len)865 void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem,
866                      unsigned int len)
867 {
868 
869     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
870         virtqueue_packed_rewind(vq, 1);
871     } else {
872         virtqueue_split_rewind(vq, 1);
873     }
874 
875     virtqueue_detach_element(vq, elem, len);
876 }
877 
878 /* virtqueue_rewind:
879  * @vq: The #VirtQueue
880  * @num: Number of elements to push back
881  *
882  * Pretend that elements weren't popped from the virtqueue.  The next
883  * virtqueue_pop() will refetch the oldest element.
884  *
885  * Use virtqueue_unpop() instead if you have a VirtQueueElement.
886  *
887  * Returns: true on success, false if @num is greater than the number of in use
888  * elements.
889  */
virtqueue_rewind(VirtQueue * vq,unsigned int num)890 bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
891 {
892     if (num > vq->inuse) {
893         return false;
894     }
895 
896     vq->inuse -= num;
897     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
898         virtqueue_packed_rewind(vq, num);
899     } else {
900         virtqueue_split_rewind(vq, num);
901     }
902     return true;
903 }
904 
virtqueue_split_fill(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len,unsigned int idx)905 static void virtqueue_split_fill(VirtQueue *vq, const VirtQueueElement *elem,
906                     unsigned int len, unsigned int idx)
907 {
908     VRingUsedElem uelem;
909 
910     if (unlikely(!vq->vring.used)) {
911         return;
912     }
913 
914     idx = (idx + vq->used_idx) % vq->vring.num;
915 
916     uelem.id = elem->index;
917     uelem.len = len;
918     vring_used_write(vq, &uelem, idx);
919 }
920 
virtqueue_packed_fill(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len,unsigned int idx)921 static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem,
922                                   unsigned int len, unsigned int idx)
923 {
924     vq->used_elems[idx].index = elem->index;
925     vq->used_elems[idx].len = len;
926     vq->used_elems[idx].ndescs = elem->ndescs;
927 }
928 
virtqueue_ordered_fill(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len)929 static void virtqueue_ordered_fill(VirtQueue *vq, const VirtQueueElement *elem,
930                                    unsigned int len)
931 {
932     unsigned int i, steps, max_steps;
933 
934     i = vq->used_idx % vq->vring.num;
935     steps = 0;
936     /*
937      * We shouldn't need to increase 'i' by more than the distance
938      * between used_idx and last_avail_idx.
939      */
940     max_steps = (vq->last_avail_idx - vq->used_idx) % vq->vring.num;
941 
942     /* Search for element in vq->used_elems */
943     while (steps <= max_steps) {
944         /* Found element, set length and mark as filled */
945         if (vq->used_elems[i].index == elem->index) {
946             vq->used_elems[i].len = len;
947             vq->used_elems[i].in_order_filled = true;
948             break;
949         }
950 
951         i += vq->used_elems[i].ndescs;
952         steps += vq->used_elems[i].ndescs;
953 
954         if (i >= vq->vring.num) {
955             i -= vq->vring.num;
956         }
957     }
958 
959     /*
960      * We should be able to find a matching VirtQueueElement in
961      * used_elems. If we don't, this is an error.
962      */
963     if (steps >= max_steps) {
964         qemu_log_mask(LOG_GUEST_ERROR, "%s: %s cannot fill buffer id %u\n",
965                       __func__, vq->vdev->name, elem->index);
966     }
967 }
968 
virtqueue_packed_fill_desc(VirtQueue * vq,const VirtQueueElement * elem,unsigned int idx,bool strict_order)969 static void virtqueue_packed_fill_desc(VirtQueue *vq,
970                                        const VirtQueueElement *elem,
971                                        unsigned int idx,
972                                        bool strict_order)
973 {
974     uint16_t head;
975     VRingMemoryRegionCaches *caches;
976     VRingPackedDesc desc = {
977         .id = elem->index,
978         .len = elem->len,
979     };
980     bool wrap_counter = vq->used_wrap_counter;
981 
982     if (unlikely(!vq->vring.desc)) {
983         return;
984     }
985 
986     head = vq->used_idx + idx;
987     if (head >= vq->vring.num) {
988         head -= vq->vring.num;
989         wrap_counter ^= 1;
990     }
991     if (wrap_counter) {
992         desc.flags |= (1 << VRING_PACKED_DESC_F_AVAIL);
993         desc.flags |= (1 << VRING_PACKED_DESC_F_USED);
994     } else {
995         desc.flags &= ~(1 << VRING_PACKED_DESC_F_AVAIL);
996         desc.flags &= ~(1 << VRING_PACKED_DESC_F_USED);
997     }
998 
999     caches = vring_get_region_caches(vq);
1000     if (!caches) {
1001         return;
1002     }
1003 
1004     vring_packed_desc_write(vq->vdev, &desc, &caches->desc, head, strict_order);
1005 }
1006 
1007 /* Called within rcu_read_lock().  */
virtqueue_fill(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len,unsigned int idx)1008 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
1009                     unsigned int len, unsigned int idx)
1010 {
1011     trace_virtqueue_fill(vq, elem, len, idx);
1012 
1013     virtqueue_unmap_sg(vq, elem, len);
1014 
1015     if (virtio_device_disabled(vq->vdev)) {
1016         return;
1017     }
1018 
1019     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_IN_ORDER)) {
1020         virtqueue_ordered_fill(vq, elem, len);
1021     } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1022         virtqueue_packed_fill(vq, elem, len, idx);
1023     } else {
1024         virtqueue_split_fill(vq, elem, len, idx);
1025     }
1026 }
1027 
1028 /* Called within rcu_read_lock().  */
virtqueue_split_flush(VirtQueue * vq,unsigned int count)1029 static void virtqueue_split_flush(VirtQueue *vq, unsigned int count)
1030 {
1031     uint16_t old, new;
1032 
1033     if (unlikely(!vq->vring.used)) {
1034         return;
1035     }
1036 
1037     /* Make sure buffer is written before we update index. */
1038     smp_wmb();
1039     trace_virtqueue_flush(vq, count);
1040     old = vq->used_idx;
1041     new = old + count;
1042     vring_used_idx_set(vq, new);
1043     vq->inuse -= count;
1044     if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
1045         vq->signalled_used_valid = false;
1046 }
1047 
virtqueue_packed_flush(VirtQueue * vq,unsigned int count)1048 static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count)
1049 {
1050     unsigned int i, ndescs = 0;
1051 
1052     if (unlikely(!vq->vring.desc)) {
1053         return;
1054     }
1055 
1056     /*
1057      * For indirect element's 'ndescs' is 1.
1058      * For all other elemment's 'ndescs' is the
1059      * number of descriptors chained by NEXT (as set in virtqueue_packed_pop).
1060      * So When the 'elem' be filled into the descriptor ring,
1061      * The 'idx' of this 'elem' shall be
1062      * the value of 'vq->used_idx' plus the 'ndescs'.
1063      */
1064     ndescs += vq->used_elems[0].ndescs;
1065     for (i = 1; i < count; i++) {
1066         virtqueue_packed_fill_desc(vq, &vq->used_elems[i], ndescs, false);
1067         ndescs += vq->used_elems[i].ndescs;
1068     }
1069     virtqueue_packed_fill_desc(vq, &vq->used_elems[0], 0, true);
1070 
1071     vq->inuse -= ndescs;
1072     vq->used_idx += ndescs;
1073     if (vq->used_idx >= vq->vring.num) {
1074         vq->used_idx -= vq->vring.num;
1075         vq->used_wrap_counter ^= 1;
1076         vq->signalled_used_valid = false;
1077     }
1078 }
1079 
virtqueue_ordered_flush(VirtQueue * vq)1080 static void virtqueue_ordered_flush(VirtQueue *vq)
1081 {
1082     unsigned int i = vq->used_idx % vq->vring.num;
1083     unsigned int ndescs = 0;
1084     uint16_t old = vq->used_idx;
1085     uint16_t new;
1086     bool packed;
1087     VRingUsedElem uelem;
1088 
1089     packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED);
1090 
1091     if (packed) {
1092         if (unlikely(!vq->vring.desc)) {
1093             return;
1094         }
1095     } else if (unlikely(!vq->vring.used)) {
1096         return;
1097     }
1098 
1099     /* First expected in-order element isn't ready, nothing to do */
1100     if (!vq->used_elems[i].in_order_filled) {
1101         return;
1102     }
1103 
1104     /* Search for filled elements in-order */
1105     while (vq->used_elems[i].in_order_filled) {
1106         /*
1107          * First entry for packed VQs is written last so the guest
1108          * doesn't see invalid descriptors.
1109          */
1110         if (packed && i != vq->used_idx) {
1111             virtqueue_packed_fill_desc(vq, &vq->used_elems[i], ndescs, false);
1112         } else if (!packed) {
1113             uelem.id = vq->used_elems[i].index;
1114             uelem.len = vq->used_elems[i].len;
1115             vring_used_write(vq, &uelem, i);
1116         }
1117 
1118         vq->used_elems[i].in_order_filled = false;
1119         ndescs += vq->used_elems[i].ndescs;
1120         i += vq->used_elems[i].ndescs;
1121         if (i >= vq->vring.num) {
1122             i -= vq->vring.num;
1123         }
1124     }
1125 
1126     if (packed) {
1127         virtqueue_packed_fill_desc(vq, &vq->used_elems[vq->used_idx], 0, true);
1128         vq->used_idx += ndescs;
1129         if (vq->used_idx >= vq->vring.num) {
1130             vq->used_idx -= vq->vring.num;
1131             vq->used_wrap_counter ^= 1;
1132             vq->signalled_used_valid = false;
1133         }
1134     } else {
1135         /* Make sure buffer is written before we update index. */
1136         smp_wmb();
1137         new = old + ndescs;
1138         vring_used_idx_set(vq, new);
1139         if (unlikely((int16_t)(new - vq->signalled_used) <
1140                      (uint16_t)(new - old))) {
1141             vq->signalled_used_valid = false;
1142         }
1143     }
1144     vq->inuse -= ndescs;
1145 }
1146 
virtqueue_flush(VirtQueue * vq,unsigned int count)1147 void virtqueue_flush(VirtQueue *vq, unsigned int count)
1148 {
1149     if (virtio_device_disabled(vq->vdev)) {
1150         vq->inuse -= count;
1151         return;
1152     }
1153 
1154     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_IN_ORDER)) {
1155         virtqueue_ordered_flush(vq);
1156     } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1157         virtqueue_packed_flush(vq, count);
1158     } else {
1159         virtqueue_split_flush(vq, count);
1160     }
1161 }
1162 
virtqueue_push(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len)1163 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
1164                     unsigned int len)
1165 {
1166     RCU_READ_LOCK_GUARD();
1167     virtqueue_fill(vq, elem, len, 0);
1168     virtqueue_flush(vq, 1);
1169 }
1170 
1171 /* Called within rcu_read_lock().  */
virtqueue_num_heads(VirtQueue * vq,unsigned int idx)1172 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
1173 {
1174     uint16_t avail_idx, num_heads;
1175 
1176     /* Use shadow index whenever possible. */
1177     avail_idx = (vq->shadow_avail_idx != idx) ? vq->shadow_avail_idx
1178                                               : vring_avail_idx(vq);
1179     num_heads = avail_idx - idx;
1180 
1181     /* Check it isn't doing very strange things with descriptor numbers. */
1182     if (num_heads > vq->vring.num) {
1183         virtio_error(vq->vdev, "Guest moved used index from %u to %u",
1184                      idx, vq->shadow_avail_idx);
1185         return -EINVAL;
1186     }
1187     /*
1188      * On success, callers read a descriptor at vq->last_avail_idx.
1189      * Make sure descriptor read does not bypass avail index read.
1190      *
1191      * This is necessary even if we are using a shadow index, since
1192      * the shadow index could have been initialized by calling
1193      * vring_avail_idx() outside of this function, i.e., by a guest
1194      * memory read not accompanied by a barrier.
1195      */
1196     if (num_heads) {
1197         smp_rmb();
1198     }
1199 
1200     return num_heads;
1201 }
1202 
1203 /* Called within rcu_read_lock().  */
virtqueue_get_head(VirtQueue * vq,unsigned int idx,unsigned int * head)1204 static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
1205                                unsigned int *head)
1206 {
1207     /* Grab the next descriptor number they're advertising, and increment
1208      * the index we've seen. */
1209     *head = vring_avail_ring(vq, idx % vq->vring.num);
1210 
1211     /* If their number is silly, that's a fatal mistake. */
1212     if (*head >= vq->vring.num) {
1213         virtio_error(vq->vdev, "Guest says index %u is available", *head);
1214         return false;
1215     }
1216 
1217     return true;
1218 }
1219 
1220 enum {
1221     VIRTQUEUE_READ_DESC_ERROR = -1,
1222     VIRTQUEUE_READ_DESC_DONE = 0,   /* end of chain */
1223     VIRTQUEUE_READ_DESC_MORE = 1,   /* more buffers in chain */
1224 };
1225 
1226 /* Reads the 'desc->next' descriptor into '*desc'. */
virtqueue_split_read_next_desc(VirtIODevice * vdev,VRingDesc * desc,MemoryRegionCache * desc_cache,unsigned int max)1227 static int virtqueue_split_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
1228                                           MemoryRegionCache *desc_cache,
1229                                           unsigned int max)
1230 {
1231     /* If this descriptor says it doesn't chain, we're done. */
1232     if (!(desc->flags & VRING_DESC_F_NEXT)) {
1233         return VIRTQUEUE_READ_DESC_DONE;
1234     }
1235 
1236     /* Check they're not leading us off end of descriptors. */
1237     if (desc->next >= max) {
1238         virtio_error(vdev, "Desc next is %u", desc->next);
1239         return VIRTQUEUE_READ_DESC_ERROR;
1240     }
1241 
1242     vring_split_desc_read(vdev, desc, desc_cache, desc->next);
1243     return VIRTQUEUE_READ_DESC_MORE;
1244 }
1245 
1246 /* Called within rcu_read_lock().  */
virtqueue_split_get_avail_bytes(VirtQueue * vq,unsigned int * in_bytes,unsigned int * out_bytes,unsigned max_in_bytes,unsigned max_out_bytes,VRingMemoryRegionCaches * caches)1247 static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
1248                             unsigned int *in_bytes, unsigned int *out_bytes,
1249                             unsigned max_in_bytes, unsigned max_out_bytes,
1250                             VRingMemoryRegionCaches *caches)
1251 {
1252     VirtIODevice *vdev = vq->vdev;
1253     unsigned int idx;
1254     unsigned int total_bufs, in_total, out_total;
1255     MemoryRegionCache indirect_desc_cache;
1256     int64_t len = 0;
1257     int rc;
1258 
1259     address_space_cache_init_empty(&indirect_desc_cache);
1260 
1261     idx = vq->last_avail_idx;
1262     total_bufs = in_total = out_total = 0;
1263 
1264     while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
1265         MemoryRegionCache *desc_cache = &caches->desc;
1266         unsigned int num_bufs;
1267         VRingDesc desc;
1268         unsigned int i;
1269         unsigned int max = vq->vring.num;
1270 
1271         num_bufs = total_bufs;
1272 
1273         if (!virtqueue_get_head(vq, idx++, &i)) {
1274             goto err;
1275         }
1276 
1277         vring_split_desc_read(vdev, &desc, desc_cache, i);
1278 
1279         if (desc.flags & VRING_DESC_F_INDIRECT) {
1280             if (!desc.len || (desc.len % sizeof(VRingDesc))) {
1281                 virtio_error(vdev, "Invalid size for indirect buffer table");
1282                 goto err;
1283             }
1284 
1285             /* If we've got too many, that implies a descriptor loop. */
1286             if (num_bufs >= max) {
1287                 virtio_error(vdev, "Looped descriptor");
1288                 goto err;
1289             }
1290 
1291             /* loop over the indirect descriptor table */
1292             len = address_space_cache_init(&indirect_desc_cache,
1293                                            vdev->dma_as,
1294                                            desc.addr, desc.len, false);
1295             desc_cache = &indirect_desc_cache;
1296             if (len < desc.len) {
1297                 virtio_error(vdev, "Cannot map indirect buffer");
1298                 goto err;
1299             }
1300 
1301             max = desc.len / sizeof(VRingDesc);
1302             num_bufs = i = 0;
1303             vring_split_desc_read(vdev, &desc, desc_cache, i);
1304         }
1305 
1306         do {
1307             /* If we've got too many, that implies a descriptor loop. */
1308             if (++num_bufs > max) {
1309                 virtio_error(vdev, "Looped descriptor");
1310                 goto err;
1311             }
1312 
1313             if (desc.flags & VRING_DESC_F_WRITE) {
1314                 in_total += desc.len;
1315             } else {
1316                 out_total += desc.len;
1317             }
1318             if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1319                 goto done;
1320             }
1321 
1322             rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max);
1323         } while (rc == VIRTQUEUE_READ_DESC_MORE);
1324 
1325         if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1326             goto err;
1327         }
1328 
1329         if (desc_cache == &indirect_desc_cache) {
1330             address_space_cache_destroy(&indirect_desc_cache);
1331             total_bufs++;
1332         } else {
1333             total_bufs = num_bufs;
1334         }
1335     }
1336 
1337     if (rc < 0) {
1338         goto err;
1339     }
1340 
1341 done:
1342     address_space_cache_destroy(&indirect_desc_cache);
1343     if (in_bytes) {
1344         *in_bytes = in_total;
1345     }
1346     if (out_bytes) {
1347         *out_bytes = out_total;
1348     }
1349     return;
1350 
1351 err:
1352     in_total = out_total = 0;
1353     goto done;
1354 }
1355 
virtqueue_packed_read_next_desc(VirtQueue * vq,VRingPackedDesc * desc,MemoryRegionCache * desc_cache,unsigned int max,unsigned int * next,bool indirect)1356 static int virtqueue_packed_read_next_desc(VirtQueue *vq,
1357                                            VRingPackedDesc *desc,
1358                                            MemoryRegionCache
1359                                            *desc_cache,
1360                                            unsigned int max,
1361                                            unsigned int *next,
1362                                            bool indirect)
1363 {
1364     /* If this descriptor says it doesn't chain, we're done. */
1365     if (!indirect && !(desc->flags & VRING_DESC_F_NEXT)) {
1366         return VIRTQUEUE_READ_DESC_DONE;
1367     }
1368 
1369     ++*next;
1370     if (*next == max) {
1371         if (indirect) {
1372             return VIRTQUEUE_READ_DESC_DONE;
1373         } else {
1374             (*next) -= vq->vring.num;
1375         }
1376     }
1377 
1378     vring_packed_desc_read(vq->vdev, desc, desc_cache, *next, false);
1379     return VIRTQUEUE_READ_DESC_MORE;
1380 }
1381 
1382 /* Called within rcu_read_lock().  */
virtqueue_packed_get_avail_bytes(VirtQueue * vq,unsigned int * in_bytes,unsigned int * out_bytes,unsigned max_in_bytes,unsigned max_out_bytes,VRingMemoryRegionCaches * caches)1383 static void virtqueue_packed_get_avail_bytes(VirtQueue *vq,
1384                                              unsigned int *in_bytes,
1385                                              unsigned int *out_bytes,
1386                                              unsigned max_in_bytes,
1387                                              unsigned max_out_bytes,
1388                                              VRingMemoryRegionCaches *caches)
1389 {
1390     VirtIODevice *vdev = vq->vdev;
1391     unsigned int idx;
1392     unsigned int total_bufs, in_total, out_total;
1393     MemoryRegionCache indirect_desc_cache;
1394     MemoryRegionCache *desc_cache;
1395     int64_t len = 0;
1396     VRingPackedDesc desc;
1397     bool wrap_counter;
1398 
1399     address_space_cache_init_empty(&indirect_desc_cache);
1400 
1401     idx = vq->last_avail_idx;
1402     wrap_counter = vq->last_avail_wrap_counter;
1403     total_bufs = in_total = out_total = 0;
1404 
1405     for (;;) {
1406         unsigned int num_bufs = total_bufs;
1407         unsigned int i = idx;
1408         int rc;
1409         unsigned int max = vq->vring.num;
1410 
1411         desc_cache = &caches->desc;
1412 
1413         vring_packed_desc_read(vdev, &desc, desc_cache, idx, true);
1414         if (!is_desc_avail(desc.flags, wrap_counter)) {
1415             break;
1416         }
1417 
1418         if (desc.flags & VRING_DESC_F_INDIRECT) {
1419             if (desc.len % sizeof(VRingPackedDesc)) {
1420                 virtio_error(vdev, "Invalid size for indirect buffer table");
1421                 goto err;
1422             }
1423 
1424             /* If we've got too many, that implies a descriptor loop. */
1425             if (num_bufs >= max) {
1426                 virtio_error(vdev, "Looped descriptor");
1427                 goto err;
1428             }
1429 
1430             /* loop over the indirect descriptor table */
1431             len = address_space_cache_init(&indirect_desc_cache,
1432                                            vdev->dma_as,
1433                                            desc.addr, desc.len, false);
1434             desc_cache = &indirect_desc_cache;
1435             if (len < desc.len) {
1436                 virtio_error(vdev, "Cannot map indirect buffer");
1437                 goto err;
1438             }
1439 
1440             max = desc.len / sizeof(VRingPackedDesc);
1441             num_bufs = i = 0;
1442             vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
1443         }
1444 
1445         do {
1446             /* If we've got too many, that implies a descriptor loop. */
1447             if (++num_bufs > max) {
1448                 virtio_error(vdev, "Looped descriptor");
1449                 goto err;
1450             }
1451 
1452             if (desc.flags & VRING_DESC_F_WRITE) {
1453                 in_total += desc.len;
1454             } else {
1455                 out_total += desc.len;
1456             }
1457             if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1458                 goto done;
1459             }
1460 
1461             rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max,
1462                                                  &i, desc_cache ==
1463                                                  &indirect_desc_cache);
1464         } while (rc == VIRTQUEUE_READ_DESC_MORE);
1465 
1466         if (desc_cache == &indirect_desc_cache) {
1467             address_space_cache_destroy(&indirect_desc_cache);
1468             total_bufs++;
1469             idx++;
1470         } else {
1471             idx += num_bufs - total_bufs;
1472             total_bufs = num_bufs;
1473         }
1474 
1475         if (idx >= vq->vring.num) {
1476             idx -= vq->vring.num;
1477             wrap_counter ^= 1;
1478         }
1479     }
1480 
1481     /* Record the index and wrap counter for a kick we want */
1482     vq->shadow_avail_idx = idx;
1483     vq->shadow_avail_wrap_counter = wrap_counter;
1484 done:
1485     address_space_cache_destroy(&indirect_desc_cache);
1486     if (in_bytes) {
1487         *in_bytes = in_total;
1488     }
1489     if (out_bytes) {
1490         *out_bytes = out_total;
1491     }
1492     return;
1493 
1494 err:
1495     in_total = out_total = 0;
1496     goto done;
1497 }
1498 
virtqueue_get_avail_bytes(VirtQueue * vq,unsigned int * in_bytes,unsigned int * out_bytes,unsigned max_in_bytes,unsigned max_out_bytes)1499 int virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
1500                               unsigned int *out_bytes, unsigned max_in_bytes,
1501                               unsigned max_out_bytes)
1502 {
1503     uint16_t desc_size;
1504     VRingMemoryRegionCaches *caches;
1505 
1506     RCU_READ_LOCK_GUARD();
1507 
1508     if (unlikely(!vq->vring.desc)) {
1509         goto err;
1510     }
1511 
1512     caches = vring_get_region_caches(vq);
1513     if (!caches) {
1514         goto err;
1515     }
1516 
1517     desc_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
1518                                 sizeof(VRingPackedDesc) : sizeof(VRingDesc);
1519     if (caches->desc.len < vq->vring.num * desc_size) {
1520         virtio_error(vq->vdev, "Cannot map descriptor ring");
1521         goto err;
1522     }
1523 
1524     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1525         virtqueue_packed_get_avail_bytes(vq, in_bytes, out_bytes,
1526                                          max_in_bytes, max_out_bytes,
1527                                          caches);
1528     } else {
1529         virtqueue_split_get_avail_bytes(vq, in_bytes, out_bytes,
1530                                         max_in_bytes, max_out_bytes,
1531                                         caches);
1532     }
1533 
1534     return (int)vq->shadow_avail_idx;
1535 err:
1536     if (in_bytes) {
1537         *in_bytes = 0;
1538     }
1539     if (out_bytes) {
1540         *out_bytes = 0;
1541     }
1542 
1543     return -1;
1544 }
1545 
virtqueue_avail_bytes(VirtQueue * vq,unsigned int in_bytes,unsigned int out_bytes)1546 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
1547                           unsigned int out_bytes)
1548 {
1549     unsigned int in_total, out_total;
1550 
1551     virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
1552     return in_bytes <= in_total && out_bytes <= out_total;
1553 }
1554 
virtqueue_map_desc(VirtIODevice * vdev,unsigned int * p_num_sg,hwaddr * addr,struct iovec * iov,unsigned int max_num_sg,bool is_write,hwaddr pa,size_t sz)1555 static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
1556                                hwaddr *addr, struct iovec *iov,
1557                                unsigned int max_num_sg, bool is_write,
1558                                hwaddr pa, size_t sz)
1559 {
1560     bool ok = false;
1561     unsigned num_sg = *p_num_sg;
1562     assert(num_sg <= max_num_sg);
1563 
1564     if (!sz) {
1565         virtio_error(vdev, "virtio: zero sized buffers are not allowed");
1566         goto out;
1567     }
1568 
1569     while (sz) {
1570         hwaddr len = sz;
1571 
1572         if (num_sg == max_num_sg) {
1573             virtio_error(vdev, "virtio: too many write descriptors in "
1574                                "indirect table");
1575             goto out;
1576         }
1577 
1578         iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
1579                                               is_write ?
1580                                               DMA_DIRECTION_FROM_DEVICE :
1581                                               DMA_DIRECTION_TO_DEVICE,
1582                                               MEMTXATTRS_UNSPECIFIED);
1583         if (!iov[num_sg].iov_base) {
1584             virtio_error(vdev, "virtio: bogus descriptor or out of resources");
1585             goto out;
1586         }
1587 
1588         iov[num_sg].iov_len = len;
1589         addr[num_sg] = pa;
1590 
1591         sz -= len;
1592         pa += len;
1593         num_sg++;
1594     }
1595     ok = true;
1596 
1597 out:
1598     *p_num_sg = num_sg;
1599     return ok;
1600 }
1601 
1602 /* Only used by error code paths before we have a VirtQueueElement (therefore
1603  * virtqueue_unmap_sg() can't be used).  Assumes buffers weren't written to
1604  * yet.
1605  */
virtqueue_undo_map_desc(unsigned int out_num,unsigned int in_num,struct iovec * iov)1606 static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
1607                                     struct iovec *iov)
1608 {
1609     unsigned int i;
1610 
1611     for (i = 0; i < out_num + in_num; i++) {
1612         int is_write = i >= out_num;
1613 
1614         cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
1615         iov++;
1616     }
1617 }
1618 
virtqueue_map_iovec(VirtIODevice * vdev,struct iovec * sg,hwaddr * addr,unsigned int num_sg,bool is_write)1619 static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
1620                                 hwaddr *addr, unsigned int num_sg,
1621                                 bool is_write)
1622 {
1623     unsigned int i;
1624     hwaddr len;
1625 
1626     for (i = 0; i < num_sg; i++) {
1627         len = sg[i].iov_len;
1628         sg[i].iov_base = dma_memory_map(vdev->dma_as,
1629                                         addr[i], &len, is_write ?
1630                                         DMA_DIRECTION_FROM_DEVICE :
1631                                         DMA_DIRECTION_TO_DEVICE,
1632                                         MEMTXATTRS_UNSPECIFIED);
1633         if (!sg[i].iov_base) {
1634             error_report("virtio: error trying to map MMIO memory");
1635             exit(1);
1636         }
1637         if (len != sg[i].iov_len) {
1638             error_report("virtio: unexpected memory split");
1639             exit(1);
1640         }
1641     }
1642 }
1643 
virtqueue_map(VirtIODevice * vdev,VirtQueueElement * elem)1644 void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem)
1645 {
1646     virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, elem->in_num, true);
1647     virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, elem->out_num,
1648                                                                         false);
1649 }
1650 
virtqueue_alloc_element(size_t sz,unsigned out_num,unsigned in_num)1651 static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
1652 {
1653     VirtQueueElement *elem;
1654     size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
1655     size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
1656     size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
1657     size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
1658     size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
1659     size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
1660 
1661     assert(sz >= sizeof(VirtQueueElement));
1662     elem = g_malloc(out_sg_end);
1663     trace_virtqueue_alloc_element(elem, sz, in_num, out_num);
1664     elem->out_num = out_num;
1665     elem->in_num = in_num;
1666     elem->in_addr = (void *)elem + in_addr_ofs;
1667     elem->out_addr = (void *)elem + out_addr_ofs;
1668     elem->in_sg = (void *)elem + in_sg_ofs;
1669     elem->out_sg = (void *)elem + out_sg_ofs;
1670     return elem;
1671 }
1672 
virtqueue_split_pop(VirtQueue * vq,size_t sz)1673 static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
1674 {
1675     unsigned int i, head, max, idx;
1676     VRingMemoryRegionCaches *caches;
1677     MemoryRegionCache indirect_desc_cache;
1678     MemoryRegionCache *desc_cache;
1679     int64_t len;
1680     VirtIODevice *vdev = vq->vdev;
1681     VirtQueueElement *elem = NULL;
1682     unsigned out_num, in_num, elem_entries;
1683     hwaddr addr[VIRTQUEUE_MAX_SIZE];
1684     struct iovec iov[VIRTQUEUE_MAX_SIZE];
1685     VRingDesc desc;
1686     int rc;
1687 
1688     address_space_cache_init_empty(&indirect_desc_cache);
1689 
1690     RCU_READ_LOCK_GUARD();
1691     if (virtio_queue_empty_rcu(vq)) {
1692         goto done;
1693     }
1694     /* Needed after virtio_queue_empty(), see comment in
1695      * virtqueue_num_heads(). */
1696     smp_rmb();
1697 
1698     /* When we start there are none of either input nor output. */
1699     out_num = in_num = elem_entries = 0;
1700 
1701     max = vq->vring.num;
1702 
1703     if (vq->inuse >= vq->vring.num) {
1704         virtio_error(vdev, "Virtqueue size exceeded");
1705         goto done;
1706     }
1707 
1708     if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
1709         goto done;
1710     }
1711 
1712     if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
1713         vring_set_avail_event(vq, vq->last_avail_idx);
1714     }
1715 
1716     i = head;
1717 
1718     caches = vring_get_region_caches(vq);
1719     if (!caches) {
1720         virtio_error(vdev, "Region caches not initialized");
1721         goto done;
1722     }
1723 
1724     if (caches->desc.len < max * sizeof(VRingDesc)) {
1725         virtio_error(vdev, "Cannot map descriptor ring");
1726         goto done;
1727     }
1728 
1729     desc_cache = &caches->desc;
1730     vring_split_desc_read(vdev, &desc, desc_cache, i);
1731     if (desc.flags & VRING_DESC_F_INDIRECT) {
1732         if (!desc.len || (desc.len % sizeof(VRingDesc))) {
1733             virtio_error(vdev, "Invalid size for indirect buffer table");
1734             goto done;
1735         }
1736 
1737         /* loop over the indirect descriptor table */
1738         len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1739                                        desc.addr, desc.len, false);
1740         desc_cache = &indirect_desc_cache;
1741         if (len < desc.len) {
1742             virtio_error(vdev, "Cannot map indirect buffer");
1743             goto done;
1744         }
1745 
1746         max = desc.len / sizeof(VRingDesc);
1747         i = 0;
1748         vring_split_desc_read(vdev, &desc, desc_cache, i);
1749     }
1750 
1751     /* Collect all the descriptors */
1752     do {
1753         bool map_ok;
1754 
1755         if (desc.flags & VRING_DESC_F_WRITE) {
1756             map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
1757                                         iov + out_num,
1758                                         VIRTQUEUE_MAX_SIZE - out_num, true,
1759                                         desc.addr, desc.len);
1760         } else {
1761             if (in_num) {
1762                 virtio_error(vdev, "Incorrect order for descriptors");
1763                 goto err_undo_map;
1764             }
1765             map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
1766                                         VIRTQUEUE_MAX_SIZE, false,
1767                                         desc.addr, desc.len);
1768         }
1769         if (!map_ok) {
1770             goto err_undo_map;
1771         }
1772 
1773         /* If we've got too many, that implies a descriptor loop. */
1774         if (++elem_entries > max) {
1775             virtio_error(vdev, "Looped descriptor");
1776             goto err_undo_map;
1777         }
1778 
1779         rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max);
1780     } while (rc == VIRTQUEUE_READ_DESC_MORE);
1781 
1782     if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1783         goto err_undo_map;
1784     }
1785 
1786     /* Now copy what we have collected and mapped */
1787     elem = virtqueue_alloc_element(sz, out_num, in_num);
1788     elem->index = head;
1789     elem->ndescs = 1;
1790     for (i = 0; i < out_num; i++) {
1791         elem->out_addr[i] = addr[i];
1792         elem->out_sg[i] = iov[i];
1793     }
1794     for (i = 0; i < in_num; i++) {
1795         elem->in_addr[i] = addr[out_num + i];
1796         elem->in_sg[i] = iov[out_num + i];
1797     }
1798 
1799     if (virtio_vdev_has_feature(vdev, VIRTIO_F_IN_ORDER)) {
1800         idx = (vq->last_avail_idx - 1) % vq->vring.num;
1801         vq->used_elems[idx].index = elem->index;
1802         vq->used_elems[idx].len = elem->len;
1803         vq->used_elems[idx].ndescs = elem->ndescs;
1804     }
1805 
1806     vq->inuse++;
1807 
1808     trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
1809 done:
1810     address_space_cache_destroy(&indirect_desc_cache);
1811 
1812     return elem;
1813 
1814 err_undo_map:
1815     virtqueue_undo_map_desc(out_num, in_num, iov);
1816     goto done;
1817 }
1818 
virtqueue_packed_pop(VirtQueue * vq,size_t sz)1819 static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
1820 {
1821     unsigned int i, max;
1822     VRingMemoryRegionCaches *caches;
1823     MemoryRegionCache indirect_desc_cache;
1824     MemoryRegionCache *desc_cache;
1825     int64_t len;
1826     VirtIODevice *vdev = vq->vdev;
1827     VirtQueueElement *elem = NULL;
1828     unsigned out_num, in_num, elem_entries;
1829     hwaddr addr[VIRTQUEUE_MAX_SIZE];
1830     struct iovec iov[VIRTQUEUE_MAX_SIZE];
1831     VRingPackedDesc desc;
1832     uint16_t id;
1833     int rc;
1834 
1835     address_space_cache_init_empty(&indirect_desc_cache);
1836 
1837     RCU_READ_LOCK_GUARD();
1838     if (virtio_queue_packed_empty_rcu(vq)) {
1839         goto done;
1840     }
1841 
1842     /* When we start there are none of either input nor output. */
1843     out_num = in_num = elem_entries = 0;
1844 
1845     max = vq->vring.num;
1846 
1847     if (vq->inuse >= vq->vring.num) {
1848         virtio_error(vdev, "Virtqueue size exceeded");
1849         goto done;
1850     }
1851 
1852     i = vq->last_avail_idx;
1853 
1854     caches = vring_get_region_caches(vq);
1855     if (!caches) {
1856         virtio_error(vdev, "Region caches not initialized");
1857         goto done;
1858     }
1859 
1860     if (caches->desc.len < max * sizeof(VRingDesc)) {
1861         virtio_error(vdev, "Cannot map descriptor ring");
1862         goto done;
1863     }
1864 
1865     desc_cache = &caches->desc;
1866     vring_packed_desc_read(vdev, &desc, desc_cache, i, true);
1867     id = desc.id;
1868     if (desc.flags & VRING_DESC_F_INDIRECT) {
1869         if (desc.len % sizeof(VRingPackedDesc)) {
1870             virtio_error(vdev, "Invalid size for indirect buffer table");
1871             goto done;
1872         }
1873 
1874         /* loop over the indirect descriptor table */
1875         len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1876                                        desc.addr, desc.len, false);
1877         desc_cache = &indirect_desc_cache;
1878         if (len < desc.len) {
1879             virtio_error(vdev, "Cannot map indirect buffer");
1880             goto done;
1881         }
1882 
1883         max = desc.len / sizeof(VRingPackedDesc);
1884         i = 0;
1885         vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
1886     }
1887 
1888     /* Collect all the descriptors */
1889     do {
1890         bool map_ok;
1891 
1892         if (desc.flags & VRING_DESC_F_WRITE) {
1893             map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
1894                                         iov + out_num,
1895                                         VIRTQUEUE_MAX_SIZE - out_num, true,
1896                                         desc.addr, desc.len);
1897         } else {
1898             if (in_num) {
1899                 virtio_error(vdev, "Incorrect order for descriptors");
1900                 goto err_undo_map;
1901             }
1902             map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
1903                                         VIRTQUEUE_MAX_SIZE, false,
1904                                         desc.addr, desc.len);
1905         }
1906         if (!map_ok) {
1907             goto err_undo_map;
1908         }
1909 
1910         /* If we've got too many, that implies a descriptor loop. */
1911         if (++elem_entries > max) {
1912             virtio_error(vdev, "Looped descriptor");
1913             goto err_undo_map;
1914         }
1915 
1916         rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max, &i,
1917                                              desc_cache ==
1918                                              &indirect_desc_cache);
1919     } while (rc == VIRTQUEUE_READ_DESC_MORE);
1920 
1921     if (desc_cache != &indirect_desc_cache) {
1922         /* Buffer ID is included in the last descriptor in the list. */
1923         id = desc.id;
1924     }
1925 
1926     /* Now copy what we have collected and mapped */
1927     elem = virtqueue_alloc_element(sz, out_num, in_num);
1928     for (i = 0; i < out_num; i++) {
1929         elem->out_addr[i] = addr[i];
1930         elem->out_sg[i] = iov[i];
1931     }
1932     for (i = 0; i < in_num; i++) {
1933         elem->in_addr[i] = addr[out_num + i];
1934         elem->in_sg[i] = iov[out_num + i];
1935     }
1936 
1937     elem->index = id;
1938     elem->ndescs = (desc_cache == &indirect_desc_cache) ? 1 : elem_entries;
1939 
1940     if (virtio_vdev_has_feature(vdev, VIRTIO_F_IN_ORDER)) {
1941         vq->used_elems[vq->last_avail_idx].index = elem->index;
1942         vq->used_elems[vq->last_avail_idx].len = elem->len;
1943         vq->used_elems[vq->last_avail_idx].ndescs = elem->ndescs;
1944     }
1945 
1946     vq->last_avail_idx += elem->ndescs;
1947     vq->inuse += elem->ndescs;
1948 
1949     if (vq->last_avail_idx >= vq->vring.num) {
1950         vq->last_avail_idx -= vq->vring.num;
1951         vq->last_avail_wrap_counter ^= 1;
1952     }
1953 
1954     vq->shadow_avail_idx = vq->last_avail_idx;
1955     vq->shadow_avail_wrap_counter = vq->last_avail_wrap_counter;
1956 
1957     trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
1958 done:
1959     address_space_cache_destroy(&indirect_desc_cache);
1960 
1961     return elem;
1962 
1963 err_undo_map:
1964     virtqueue_undo_map_desc(out_num, in_num, iov);
1965     goto done;
1966 }
1967 
virtqueue_pop(VirtQueue * vq,size_t sz)1968 void *virtqueue_pop(VirtQueue *vq, size_t sz)
1969 {
1970     if (virtio_device_disabled(vq->vdev)) {
1971         return NULL;
1972     }
1973 
1974     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1975         return virtqueue_packed_pop(vq, sz);
1976     } else {
1977         return virtqueue_split_pop(vq, sz);
1978     }
1979 }
1980 
virtqueue_packed_drop_all(VirtQueue * vq)1981 static unsigned int virtqueue_packed_drop_all(VirtQueue *vq)
1982 {
1983     VRingMemoryRegionCaches *caches;
1984     MemoryRegionCache *desc_cache;
1985     unsigned int dropped = 0;
1986     VirtQueueElement elem = {};
1987     VirtIODevice *vdev = vq->vdev;
1988     VRingPackedDesc desc;
1989 
1990     RCU_READ_LOCK_GUARD();
1991 
1992     caches = vring_get_region_caches(vq);
1993     if (!caches) {
1994         return 0;
1995     }
1996 
1997     desc_cache = &caches->desc;
1998 
1999     virtio_queue_set_notification(vq, 0);
2000 
2001     while (vq->inuse < vq->vring.num) {
2002         unsigned int idx = vq->last_avail_idx;
2003         /*
2004          * works similar to virtqueue_pop but does not map buffers
2005          * and does not allocate any memory.
2006          */
2007         vring_packed_desc_read(vdev, &desc, desc_cache,
2008                                vq->last_avail_idx , true);
2009         if (!is_desc_avail(desc.flags, vq->last_avail_wrap_counter)) {
2010             break;
2011         }
2012         elem.index = desc.id;
2013         elem.ndescs = 1;
2014         while (virtqueue_packed_read_next_desc(vq, &desc, desc_cache,
2015                                                vq->vring.num, &idx, false)) {
2016             ++elem.ndescs;
2017         }
2018         /*
2019          * immediately push the element, nothing to unmap
2020          * as both in_num and out_num are set to 0.
2021          */
2022         virtqueue_push(vq, &elem, 0);
2023         dropped++;
2024         vq->last_avail_idx += elem.ndescs;
2025         if (vq->last_avail_idx >= vq->vring.num) {
2026             vq->last_avail_idx -= vq->vring.num;
2027             vq->last_avail_wrap_counter ^= 1;
2028         }
2029     }
2030 
2031     return dropped;
2032 }
2033 
virtqueue_split_drop_all(VirtQueue * vq)2034 static unsigned int virtqueue_split_drop_all(VirtQueue *vq)
2035 {
2036     unsigned int dropped = 0;
2037     VirtQueueElement elem = {};
2038     VirtIODevice *vdev = vq->vdev;
2039     bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2040 
2041     while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
2042         /* works similar to virtqueue_pop but does not map buffers
2043         * and does not allocate any memory */
2044         smp_rmb();
2045         if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
2046             break;
2047         }
2048         vq->inuse++;
2049         vq->last_avail_idx++;
2050         if (fEventIdx) {
2051             vring_set_avail_event(vq, vq->last_avail_idx);
2052         }
2053         /* immediately push the element, nothing to unmap
2054          * as both in_num and out_num are set to 0 */
2055         virtqueue_push(vq, &elem, 0);
2056         dropped++;
2057     }
2058 
2059     return dropped;
2060 }
2061 
2062 /* virtqueue_drop_all:
2063  * @vq: The #VirtQueue
2064  * Drops all queued buffers and indicates them to the guest
2065  * as if they are done. Useful when buffers can not be
2066  * processed but must be returned to the guest.
2067  */
virtqueue_drop_all(VirtQueue * vq)2068 unsigned int virtqueue_drop_all(VirtQueue *vq)
2069 {
2070     struct VirtIODevice *vdev = vq->vdev;
2071 
2072     if (virtio_device_disabled(vq->vdev)) {
2073         return 0;
2074     }
2075 
2076     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
2077         return virtqueue_packed_drop_all(vq);
2078     } else {
2079         return virtqueue_split_drop_all(vq);
2080     }
2081 }
2082 
2083 /* Reading and writing a structure directly to QEMUFile is *awful*, but
2084  * it is what QEMU has always done by mistake.  We can change it sooner
2085  * or later by bumping the version number of the affected vm states.
2086  * In the meanwhile, since the in-memory layout of VirtQueueElement
2087  * has changed, we need to marshal to and from the layout that was
2088  * used before the change.
2089  */
2090 typedef struct VirtQueueElementOld {
2091     unsigned int index;
2092     unsigned int out_num;
2093     unsigned int in_num;
2094     hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
2095     hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
2096     struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
2097     struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
2098 } VirtQueueElementOld;
2099 
qemu_get_virtqueue_element(VirtIODevice * vdev,QEMUFile * f,size_t sz)2100 void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz)
2101 {
2102     VirtQueueElement *elem;
2103     VirtQueueElementOld data;
2104     int i;
2105 
2106     qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
2107 
2108     /* TODO: teach all callers that this can fail, and return failure instead
2109      * of asserting here.
2110      * This is just one thing (there are probably more) that must be
2111      * fixed before we can allow NDEBUG compilation.
2112      */
2113     assert(ARRAY_SIZE(data.in_addr) >= data.in_num);
2114     assert(ARRAY_SIZE(data.out_addr) >= data.out_num);
2115 
2116     elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
2117     elem->index = data.index;
2118 
2119     for (i = 0; i < elem->in_num; i++) {
2120         elem->in_addr[i] = data.in_addr[i];
2121     }
2122 
2123     for (i = 0; i < elem->out_num; i++) {
2124         elem->out_addr[i] = data.out_addr[i];
2125     }
2126 
2127     for (i = 0; i < elem->in_num; i++) {
2128         /* Base is overwritten by virtqueue_map.  */
2129         elem->in_sg[i].iov_base = 0;
2130         elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
2131     }
2132 
2133     for (i = 0; i < elem->out_num; i++) {
2134         /* Base is overwritten by virtqueue_map.  */
2135         elem->out_sg[i].iov_base = 0;
2136         elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
2137     }
2138 
2139     if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
2140         qemu_get_be32s(f, &elem->ndescs);
2141     }
2142 
2143     virtqueue_map(vdev, elem);
2144     return elem;
2145 }
2146 
qemu_put_virtqueue_element(VirtIODevice * vdev,QEMUFile * f,VirtQueueElement * elem)2147 void qemu_put_virtqueue_element(VirtIODevice *vdev, QEMUFile *f,
2148                                 VirtQueueElement *elem)
2149 {
2150     VirtQueueElementOld data;
2151     int i;
2152 
2153     memset(&data, 0, sizeof(data));
2154     data.index = elem->index;
2155     data.in_num = elem->in_num;
2156     data.out_num = elem->out_num;
2157 
2158     for (i = 0; i < elem->in_num; i++) {
2159         data.in_addr[i] = elem->in_addr[i];
2160     }
2161 
2162     for (i = 0; i < elem->out_num; i++) {
2163         data.out_addr[i] = elem->out_addr[i];
2164     }
2165 
2166     for (i = 0; i < elem->in_num; i++) {
2167         /* Base is overwritten by virtqueue_map when loading.  Do not
2168          * save it, as it would leak the QEMU address space layout.  */
2169         data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
2170     }
2171 
2172     for (i = 0; i < elem->out_num; i++) {
2173         /* Do not save iov_base as above.  */
2174         data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
2175     }
2176 
2177     if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
2178         qemu_put_be32s(f, &elem->ndescs);
2179     }
2180 
2181     qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
2182 }
2183 
2184 /* virtio device */
virtio_notify_vector(VirtIODevice * vdev,uint16_t vector)2185 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
2186 {
2187     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2188     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2189 
2190     if (virtio_device_disabled(vdev)) {
2191         return;
2192     }
2193 
2194     if (k->notify) {
2195         k->notify(qbus->parent, vector);
2196     }
2197 }
2198 
virtio_update_irq(VirtIODevice * vdev)2199 void virtio_update_irq(VirtIODevice *vdev)
2200 {
2201     virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
2202 }
2203 
virtio_validate_features(VirtIODevice * vdev)2204 static int virtio_validate_features(VirtIODevice *vdev)
2205 {
2206     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2207 
2208     if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) &&
2209         !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
2210         return -EFAULT;
2211     }
2212 
2213     if (k->validate_features) {
2214         return k->validate_features(vdev);
2215     } else {
2216         return 0;
2217     }
2218 }
2219 
virtio_set_status(VirtIODevice * vdev,uint8_t val)2220 int virtio_set_status(VirtIODevice *vdev, uint8_t val)
2221 {
2222     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2223     trace_virtio_set_status(vdev, val);
2224 
2225     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2226         if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
2227             val & VIRTIO_CONFIG_S_FEATURES_OK) {
2228             int ret = virtio_validate_features(vdev);
2229 
2230             if (ret) {
2231                 return ret;
2232             }
2233         }
2234     }
2235 
2236     if ((vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) !=
2237         (val & VIRTIO_CONFIG_S_DRIVER_OK)) {
2238         virtio_set_started(vdev, val & VIRTIO_CONFIG_S_DRIVER_OK);
2239     }
2240 
2241     if (k->set_status) {
2242         k->set_status(vdev, val);
2243     }
2244     vdev->status = val;
2245 
2246     return 0;
2247 }
2248 
virtio_default_endian(void)2249 static enum virtio_device_endian virtio_default_endian(void)
2250 {
2251     if (target_words_bigendian()) {
2252         return VIRTIO_DEVICE_ENDIAN_BIG;
2253     } else {
2254         return VIRTIO_DEVICE_ENDIAN_LITTLE;
2255     }
2256 }
2257 
virtio_current_cpu_endian(void)2258 static enum virtio_device_endian virtio_current_cpu_endian(void)
2259 {
2260     if (cpu_virtio_is_big_endian(current_cpu)) {
2261         return VIRTIO_DEVICE_ENDIAN_BIG;
2262     } else {
2263         return VIRTIO_DEVICE_ENDIAN_LITTLE;
2264     }
2265 }
2266 
__virtio_queue_reset(VirtIODevice * vdev,uint32_t i)2267 static void __virtio_queue_reset(VirtIODevice *vdev, uint32_t i)
2268 {
2269     vdev->vq[i].vring.desc = 0;
2270     vdev->vq[i].vring.avail = 0;
2271     vdev->vq[i].vring.used = 0;
2272     vdev->vq[i].last_avail_idx = 0;
2273     vdev->vq[i].shadow_avail_idx = 0;
2274     vdev->vq[i].used_idx = 0;
2275     vdev->vq[i].last_avail_wrap_counter = true;
2276     vdev->vq[i].shadow_avail_wrap_counter = true;
2277     vdev->vq[i].used_wrap_counter = true;
2278     virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
2279     vdev->vq[i].signalled_used = 0;
2280     vdev->vq[i].signalled_used_valid = false;
2281     vdev->vq[i].notification = true;
2282     vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
2283     vdev->vq[i].inuse = 0;
2284     virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
2285 }
2286 
virtio_queue_reset(VirtIODevice * vdev,uint32_t queue_index)2287 void virtio_queue_reset(VirtIODevice *vdev, uint32_t queue_index)
2288 {
2289     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2290 
2291     if (k->queue_reset) {
2292         k->queue_reset(vdev, queue_index);
2293     }
2294 
2295     __virtio_queue_reset(vdev, queue_index);
2296 }
2297 
virtio_queue_enable(VirtIODevice * vdev,uint32_t queue_index)2298 void virtio_queue_enable(VirtIODevice *vdev, uint32_t queue_index)
2299 {
2300     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2301 
2302     /*
2303      * TODO: Seabios is currently out of spec and triggering this error.
2304      * So this needs to be fixed in Seabios, then this can
2305      * be re-enabled for new machine types only, and also after
2306      * being converted to LOG_GUEST_ERROR.
2307      *
2308     if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2309         error_report("queue_enable is only supported in devices of virtio "
2310                      "1.0 or later.");
2311     }
2312     */
2313 
2314     if (k->queue_enable) {
2315         k->queue_enable(vdev, queue_index);
2316     }
2317 }
2318 
2319 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val);
2320 
virtio_reset(void * opaque)2321 void virtio_reset(void *opaque)
2322 {
2323     VirtIODevice *vdev = opaque;
2324     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2325     int i;
2326 
2327     virtio_set_status(vdev, 0);
2328     if (current_cpu) {
2329         /* Guest initiated reset */
2330         vdev->device_endian = virtio_current_cpu_endian();
2331     } else {
2332         /* System reset */
2333         vdev->device_endian = virtio_default_endian();
2334     }
2335 
2336     if (k->get_vhost) {
2337         struct vhost_dev *hdev = k->get_vhost(vdev);
2338         /* Only reset when vhost back-end is connected */
2339         if (hdev && hdev->vhost_ops) {
2340             vhost_reset_device(hdev);
2341         }
2342     }
2343 
2344     if (k->reset) {
2345         k->reset(vdev);
2346     }
2347 
2348     vdev->start_on_kick = false;
2349     vdev->started = false;
2350     vdev->broken = false;
2351     virtio_set_features_nocheck(vdev, 0);
2352     vdev->queue_sel = 0;
2353     vdev->status = 0;
2354     vdev->disabled = false;
2355     qatomic_set(&vdev->isr, 0);
2356     vdev->config_vector = VIRTIO_NO_VECTOR;
2357     virtio_notify_vector(vdev, vdev->config_vector);
2358 
2359     for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2360         __virtio_queue_reset(vdev, i);
2361     }
2362 }
2363 
virtio_queue_set_addr(VirtIODevice * vdev,int n,hwaddr addr)2364 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
2365 {
2366     if (!vdev->vq[n].vring.num) {
2367         return;
2368     }
2369     vdev->vq[n].vring.desc = addr;
2370     virtio_queue_update_rings(vdev, n);
2371 }
2372 
virtio_queue_get_addr(VirtIODevice * vdev,int n)2373 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
2374 {
2375     return vdev->vq[n].vring.desc;
2376 }
2377 
virtio_queue_set_rings(VirtIODevice * vdev,int n,hwaddr desc,hwaddr avail,hwaddr used)2378 void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
2379                             hwaddr avail, hwaddr used)
2380 {
2381     if (!vdev->vq[n].vring.num) {
2382         return;
2383     }
2384     vdev->vq[n].vring.desc = desc;
2385     vdev->vq[n].vring.avail = avail;
2386     vdev->vq[n].vring.used = used;
2387     virtio_init_region_cache(vdev, n);
2388 }
2389 
virtio_queue_set_num(VirtIODevice * vdev,int n,int num)2390 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
2391 {
2392     /* Don't allow guest to flip queue between existent and
2393      * nonexistent states, or to set it to an invalid size.
2394      */
2395     if (!!num != !!vdev->vq[n].vring.num ||
2396         num > VIRTQUEUE_MAX_SIZE ||
2397         num < 0) {
2398         return;
2399     }
2400     vdev->vq[n].vring.num = num;
2401 }
2402 
virtio_vector_first_queue(VirtIODevice * vdev,uint16_t vector)2403 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
2404 {
2405     return QLIST_FIRST(&vdev->vector_queues[vector]);
2406 }
2407 
virtio_vector_next_queue(VirtQueue * vq)2408 VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
2409 {
2410     return QLIST_NEXT(vq, node);
2411 }
2412 
virtio_queue_get_num(VirtIODevice * vdev,int n)2413 int virtio_queue_get_num(VirtIODevice *vdev, int n)
2414 {
2415     return vdev->vq[n].vring.num;
2416 }
2417 
virtio_queue_get_max_num(VirtIODevice * vdev,int n)2418 int virtio_queue_get_max_num(VirtIODevice *vdev, int n)
2419 {
2420     return vdev->vq[n].vring.num_default;
2421 }
2422 
virtio_get_num_queues(VirtIODevice * vdev)2423 int virtio_get_num_queues(VirtIODevice *vdev)
2424 {
2425     int i;
2426 
2427     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2428         if (!virtio_queue_get_num(vdev, i)) {
2429             break;
2430         }
2431     }
2432 
2433     return i;
2434 }
2435 
virtio_queue_set_align(VirtIODevice * vdev,int n,int align)2436 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
2437 {
2438     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2439     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2440 
2441     /* virtio-1 compliant devices cannot change the alignment */
2442     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2443         error_report("tried to modify queue alignment for virtio-1 device");
2444         return;
2445     }
2446     /* Check that the transport told us it was going to do this
2447      * (so a buggy transport will immediately assert rather than
2448      * silently failing to migrate this state)
2449      */
2450     assert(k->has_variable_vring_alignment);
2451 
2452     if (align) {
2453         vdev->vq[n].vring.align = align;
2454         virtio_queue_update_rings(vdev, n);
2455     }
2456 }
2457 
virtio_queue_set_shadow_avail_idx(VirtQueue * vq,uint16_t shadow_avail_idx)2458 void virtio_queue_set_shadow_avail_idx(VirtQueue *vq, uint16_t shadow_avail_idx)
2459 {
2460     if (!vq->vring.desc) {
2461         return;
2462     }
2463 
2464     /*
2465      * 16-bit data for packed VQs include 1-bit wrap counter and
2466      * 15-bit shadow_avail_idx.
2467      */
2468     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
2469         vq->shadow_avail_wrap_counter = (shadow_avail_idx >> 15) & 0x1;
2470         vq->shadow_avail_idx = shadow_avail_idx & 0x7FFF;
2471     } else {
2472         vq->shadow_avail_idx = shadow_avail_idx;
2473     }
2474 }
2475 
virtio_queue_notify_vq(VirtQueue * vq)2476 static void virtio_queue_notify_vq(VirtQueue *vq)
2477 {
2478     if (vq->vring.desc && vq->handle_output) {
2479         VirtIODevice *vdev = vq->vdev;
2480 
2481         if (unlikely(vdev->broken)) {
2482             return;
2483         }
2484 
2485         trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2486         vq->handle_output(vdev, vq);
2487 
2488         if (unlikely(vdev->start_on_kick)) {
2489             virtio_set_started(vdev, true);
2490         }
2491     }
2492 }
2493 
virtio_queue_notify(VirtIODevice * vdev,int n)2494 void virtio_queue_notify(VirtIODevice *vdev, int n)
2495 {
2496     VirtQueue *vq = &vdev->vq[n];
2497 
2498     if (unlikely(!vq->vring.desc || vdev->broken)) {
2499         return;
2500     }
2501 
2502     trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2503     if (vq->host_notifier_enabled) {
2504         event_notifier_set(&vq->host_notifier);
2505     } else if (vq->handle_output) {
2506         vq->handle_output(vdev, vq);
2507 
2508         if (unlikely(vdev->start_on_kick)) {
2509             virtio_set_started(vdev, true);
2510         }
2511     }
2512 }
2513 
virtio_queue_vector(VirtIODevice * vdev,int n)2514 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
2515 {
2516     return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
2517         VIRTIO_NO_VECTOR;
2518 }
2519 
virtio_queue_set_vector(VirtIODevice * vdev,int n,uint16_t vector)2520 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
2521 {
2522     VirtQueue *vq = &vdev->vq[n];
2523 
2524     if (n < VIRTIO_QUEUE_MAX) {
2525         if (vdev->vector_queues &&
2526             vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
2527             QLIST_REMOVE(vq, node);
2528         }
2529         vdev->vq[n].vector = vector;
2530         if (vdev->vector_queues &&
2531             vector != VIRTIO_NO_VECTOR) {
2532             QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
2533         }
2534     }
2535 }
2536 
virtio_add_queue(VirtIODevice * vdev,int queue_size,VirtIOHandleOutput handle_output)2537 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
2538                             VirtIOHandleOutput handle_output)
2539 {
2540     int i;
2541 
2542     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2543         if (vdev->vq[i].vring.num == 0)
2544             break;
2545     }
2546 
2547     if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
2548         abort();
2549 
2550     vdev->vq[i].vring.num = queue_size;
2551     vdev->vq[i].vring.num_default = queue_size;
2552     vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
2553     vdev->vq[i].handle_output = handle_output;
2554     vdev->vq[i].used_elems = g_new0(VirtQueueElement, queue_size);
2555 
2556     return &vdev->vq[i];
2557 }
2558 
virtio_delete_queue(VirtQueue * vq)2559 void virtio_delete_queue(VirtQueue *vq)
2560 {
2561     vq->vring.num = 0;
2562     vq->vring.num_default = 0;
2563     vq->handle_output = NULL;
2564     g_free(vq->used_elems);
2565     vq->used_elems = NULL;
2566     virtio_virtqueue_reset_region_cache(vq);
2567 }
2568 
virtio_del_queue(VirtIODevice * vdev,int n)2569 void virtio_del_queue(VirtIODevice *vdev, int n)
2570 {
2571     if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
2572         abort();
2573     }
2574 
2575     virtio_delete_queue(&vdev->vq[n]);
2576 }
2577 
virtio_set_isr(VirtIODevice * vdev,int value)2578 static void virtio_set_isr(VirtIODevice *vdev, int value)
2579 {
2580     uint8_t old = qatomic_read(&vdev->isr);
2581 
2582     /* Do not write ISR if it does not change, so that its cacheline remains
2583      * shared in the common case where the guest does not read it.
2584      */
2585     if ((old & value) != value) {
2586         qatomic_or(&vdev->isr, value);
2587     }
2588 }
2589 
2590 /* Called within rcu_read_lock(). */
virtio_split_should_notify(VirtIODevice * vdev,VirtQueue * vq)2591 static bool virtio_split_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2592 {
2593     uint16_t old, new;
2594     bool v;
2595     /* We need to expose used array entries before checking used event. */
2596     smp_mb();
2597     /* Always notify when queue is empty (when feature acknowledge) */
2598     if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2599         !vq->inuse && virtio_queue_empty(vq)) {
2600         return true;
2601     }
2602 
2603     if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
2604         return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
2605     }
2606 
2607     v = vq->signalled_used_valid;
2608     vq->signalled_used_valid = true;
2609     old = vq->signalled_used;
2610     new = vq->signalled_used = vq->used_idx;
2611     return !v || vring_need_event(vring_get_used_event(vq), new, old);
2612 }
2613 
vring_packed_need_event(VirtQueue * vq,bool wrap,uint16_t off_wrap,uint16_t new,uint16_t old)2614 static bool vring_packed_need_event(VirtQueue *vq, bool wrap,
2615                                     uint16_t off_wrap, uint16_t new,
2616                                     uint16_t old)
2617 {
2618     int off = off_wrap & ~(1 << 15);
2619 
2620     if (wrap != off_wrap >> 15) {
2621         off -= vq->vring.num;
2622     }
2623 
2624     return vring_need_event(off, new, old);
2625 }
2626 
2627 /* Called within rcu_read_lock(). */
virtio_packed_should_notify(VirtIODevice * vdev,VirtQueue * vq)2628 static bool virtio_packed_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2629 {
2630     VRingPackedDescEvent e;
2631     uint16_t old, new;
2632     bool v;
2633     VRingMemoryRegionCaches *caches;
2634 
2635     caches = vring_get_region_caches(vq);
2636     if (!caches) {
2637         return false;
2638     }
2639 
2640     vring_packed_event_read(vdev, &caches->avail, &e);
2641 
2642     old = vq->signalled_used;
2643     new = vq->signalled_used = vq->used_idx;
2644     v = vq->signalled_used_valid;
2645     vq->signalled_used_valid = true;
2646 
2647     if (e.flags == VRING_PACKED_EVENT_FLAG_DISABLE) {
2648         return false;
2649     } else if (e.flags == VRING_PACKED_EVENT_FLAG_ENABLE) {
2650         return true;
2651     }
2652 
2653     return !v || vring_packed_need_event(vq, vq->used_wrap_counter,
2654                                          e.off_wrap, new, old);
2655 }
2656 
2657 /* Called within rcu_read_lock().  */
virtio_should_notify(VirtIODevice * vdev,VirtQueue * vq)2658 static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2659 {
2660     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
2661         return virtio_packed_should_notify(vdev, vq);
2662     } else {
2663         return virtio_split_should_notify(vdev, vq);
2664     }
2665 }
2666 
2667 /* Batch irqs while inside a defer_call_begin()/defer_call_end() section */
virtio_notify_irqfd_deferred_fn(void * opaque)2668 static void virtio_notify_irqfd_deferred_fn(void *opaque)
2669 {
2670     EventNotifier *notifier = opaque;
2671     VirtQueue *vq = container_of(notifier, VirtQueue, guest_notifier);
2672 
2673     trace_virtio_notify_irqfd_deferred_fn(vq->vdev, vq);
2674     event_notifier_set(notifier);
2675 }
2676 
virtio_notify_irqfd(VirtIODevice * vdev,VirtQueue * vq)2677 void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
2678 {
2679     WITH_RCU_READ_LOCK_GUARD() {
2680         if (!virtio_should_notify(vdev, vq)) {
2681             return;
2682         }
2683     }
2684 
2685     trace_virtio_notify_irqfd(vdev, vq);
2686 
2687     /*
2688      * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
2689      * windows drivers included in virtio-win 1.8.0 (circa 2015) are
2690      * incorrectly polling this bit during crashdump and hibernation
2691      * in MSI mode, causing a hang if this bit is never updated.
2692      * Recent releases of Windows do not really shut down, but rather
2693      * log out and hibernate to make the next startup faster.  Hence,
2694      * this manifested as a more serious hang during shutdown with
2695      *
2696      * Next driver release from 2016 fixed this problem, so working around it
2697      * is not a must, but it's easy to do so let's do it here.
2698      *
2699      * Note: it's safe to update ISR from any thread as it was switched
2700      * to an atomic operation.
2701      */
2702     virtio_set_isr(vq->vdev, 0x1);
2703     defer_call(virtio_notify_irqfd_deferred_fn, &vq->guest_notifier);
2704 }
2705 
virtio_irq(VirtQueue * vq)2706 static void virtio_irq(VirtQueue *vq)
2707 {
2708     virtio_set_isr(vq->vdev, 0x1);
2709     virtio_notify_vector(vq->vdev, vq->vector);
2710 }
2711 
virtio_notify(VirtIODevice * vdev,VirtQueue * vq)2712 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
2713 {
2714     WITH_RCU_READ_LOCK_GUARD() {
2715         if (!virtio_should_notify(vdev, vq)) {
2716             return;
2717         }
2718     }
2719 
2720     trace_virtio_notify(vdev, vq);
2721     virtio_irq(vq);
2722 }
2723 
virtio_notify_config(VirtIODevice * vdev)2724 void virtio_notify_config(VirtIODevice *vdev)
2725 {
2726     if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
2727         return;
2728 
2729     virtio_set_isr(vdev, 0x3);
2730     vdev->generation++;
2731     virtio_notify_vector(vdev, vdev->config_vector);
2732 }
2733 
virtio_device_endian_needed(void * opaque)2734 static bool virtio_device_endian_needed(void *opaque)
2735 {
2736     VirtIODevice *vdev = opaque;
2737 
2738     assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
2739     if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2740         return vdev->device_endian != virtio_default_endian();
2741     }
2742     /* Devices conforming to VIRTIO 1.0 or later are always LE. */
2743     return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
2744 }
2745 
virtio_64bit_features_needed(void * opaque)2746 static bool virtio_64bit_features_needed(void *opaque)
2747 {
2748     VirtIODevice *vdev = opaque;
2749 
2750     return (vdev->host_features >> 32) != 0;
2751 }
2752 
virtio_virtqueue_needed(void * opaque)2753 static bool virtio_virtqueue_needed(void *opaque)
2754 {
2755     VirtIODevice *vdev = opaque;
2756 
2757     return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
2758 }
2759 
virtio_packed_virtqueue_needed(void * opaque)2760 static bool virtio_packed_virtqueue_needed(void *opaque)
2761 {
2762     VirtIODevice *vdev = opaque;
2763 
2764     return virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED);
2765 }
2766 
virtio_ringsize_needed(void * opaque)2767 static bool virtio_ringsize_needed(void *opaque)
2768 {
2769     VirtIODevice *vdev = opaque;
2770     int i;
2771 
2772     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2773         if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
2774             return true;
2775         }
2776     }
2777     return false;
2778 }
2779 
virtio_extra_state_needed(void * opaque)2780 static bool virtio_extra_state_needed(void *opaque)
2781 {
2782     VirtIODevice *vdev = opaque;
2783     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2784     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2785 
2786     return k->has_extra_state &&
2787         k->has_extra_state(qbus->parent);
2788 }
2789 
virtio_broken_needed(void * opaque)2790 static bool virtio_broken_needed(void *opaque)
2791 {
2792     VirtIODevice *vdev = opaque;
2793 
2794     return vdev->broken;
2795 }
2796 
virtio_started_needed(void * opaque)2797 static bool virtio_started_needed(void *opaque)
2798 {
2799     VirtIODevice *vdev = opaque;
2800 
2801     return vdev->started;
2802 }
2803 
virtio_disabled_needed(void * opaque)2804 static bool virtio_disabled_needed(void *opaque)
2805 {
2806     VirtIODevice *vdev = opaque;
2807 
2808     return vdev->disabled;
2809 }
2810 
2811 static const VMStateDescription vmstate_virtqueue = {
2812     .name = "virtqueue_state",
2813     .version_id = 1,
2814     .minimum_version_id = 1,
2815     .fields = (const VMStateField[]) {
2816         VMSTATE_UINT64(vring.avail, struct VirtQueue),
2817         VMSTATE_UINT64(vring.used, struct VirtQueue),
2818         VMSTATE_END_OF_LIST()
2819     }
2820 };
2821 
2822 static const VMStateDescription vmstate_packed_virtqueue = {
2823     .name = "packed_virtqueue_state",
2824     .version_id = 1,
2825     .minimum_version_id = 1,
2826     .fields = (const VMStateField[]) {
2827         VMSTATE_UINT16(last_avail_idx, struct VirtQueue),
2828         VMSTATE_BOOL(last_avail_wrap_counter, struct VirtQueue),
2829         VMSTATE_UINT16(used_idx, struct VirtQueue),
2830         VMSTATE_BOOL(used_wrap_counter, struct VirtQueue),
2831         VMSTATE_UINT32(inuse, struct VirtQueue),
2832         VMSTATE_END_OF_LIST()
2833     }
2834 };
2835 
2836 static const VMStateDescription vmstate_virtio_virtqueues = {
2837     .name = "virtio/virtqueues",
2838     .version_id = 1,
2839     .minimum_version_id = 1,
2840     .needed = &virtio_virtqueue_needed,
2841     .fields = (const VMStateField[]) {
2842         VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2843                       VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
2844         VMSTATE_END_OF_LIST()
2845     }
2846 };
2847 
2848 static const VMStateDescription vmstate_virtio_packed_virtqueues = {
2849     .name = "virtio/packed_virtqueues",
2850     .version_id = 1,
2851     .minimum_version_id = 1,
2852     .needed = &virtio_packed_virtqueue_needed,
2853     .fields = (const VMStateField[]) {
2854         VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2855                       VIRTIO_QUEUE_MAX, 0, vmstate_packed_virtqueue, VirtQueue),
2856         VMSTATE_END_OF_LIST()
2857     }
2858 };
2859 
2860 static const VMStateDescription vmstate_ringsize = {
2861     .name = "ringsize_state",
2862     .version_id = 1,
2863     .minimum_version_id = 1,
2864     .fields = (const VMStateField[]) {
2865         VMSTATE_UINT32(vring.num_default, struct VirtQueue),
2866         VMSTATE_END_OF_LIST()
2867     }
2868 };
2869 
2870 static const VMStateDescription vmstate_virtio_ringsize = {
2871     .name = "virtio/ringsize",
2872     .version_id = 1,
2873     .minimum_version_id = 1,
2874     .needed = &virtio_ringsize_needed,
2875     .fields = (const VMStateField[]) {
2876         VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2877                       VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
2878         VMSTATE_END_OF_LIST()
2879     }
2880 };
2881 
get_extra_state(QEMUFile * f,void * pv,size_t size,const VMStateField * field)2882 static int get_extra_state(QEMUFile *f, void *pv, size_t size,
2883                            const VMStateField *field)
2884 {
2885     VirtIODevice *vdev = pv;
2886     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2887     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2888 
2889     if (!k->load_extra_state) {
2890         return -1;
2891     } else {
2892         return k->load_extra_state(qbus->parent, f);
2893     }
2894 }
2895 
put_extra_state(QEMUFile * f,void * pv,size_t size,const VMStateField * field,JSONWriter * vmdesc)2896 static int put_extra_state(QEMUFile *f, void *pv, size_t size,
2897                            const VMStateField *field, JSONWriter *vmdesc)
2898 {
2899     VirtIODevice *vdev = pv;
2900     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2901     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2902 
2903     k->save_extra_state(qbus->parent, f);
2904     return 0;
2905 }
2906 
2907 static const VMStateInfo vmstate_info_extra_state = {
2908     .name = "virtqueue_extra_state",
2909     .get = get_extra_state,
2910     .put = put_extra_state,
2911 };
2912 
2913 static const VMStateDescription vmstate_virtio_extra_state = {
2914     .name = "virtio/extra_state",
2915     .version_id = 1,
2916     .minimum_version_id = 1,
2917     .needed = &virtio_extra_state_needed,
2918     .fields = (const VMStateField[]) {
2919         {
2920             .name         = "extra_state",
2921             .version_id   = 0,
2922             .field_exists = NULL,
2923             .size         = 0,
2924             .info         = &vmstate_info_extra_state,
2925             .flags        = VMS_SINGLE,
2926             .offset       = 0,
2927         },
2928         VMSTATE_END_OF_LIST()
2929     }
2930 };
2931 
2932 static const VMStateDescription vmstate_virtio_device_endian = {
2933     .name = "virtio/device_endian",
2934     .version_id = 1,
2935     .minimum_version_id = 1,
2936     .needed = &virtio_device_endian_needed,
2937     .fields = (const VMStateField[]) {
2938         VMSTATE_UINT8(device_endian, VirtIODevice),
2939         VMSTATE_END_OF_LIST()
2940     }
2941 };
2942 
2943 static const VMStateDescription vmstate_virtio_64bit_features = {
2944     .name = "virtio/64bit_features",
2945     .version_id = 1,
2946     .minimum_version_id = 1,
2947     .needed = &virtio_64bit_features_needed,
2948     .fields = (const VMStateField[]) {
2949         VMSTATE_UINT64(guest_features, VirtIODevice),
2950         VMSTATE_END_OF_LIST()
2951     }
2952 };
2953 
2954 static const VMStateDescription vmstate_virtio_broken = {
2955     .name = "virtio/broken",
2956     .version_id = 1,
2957     .minimum_version_id = 1,
2958     .needed = &virtio_broken_needed,
2959     .fields = (const VMStateField[]) {
2960         VMSTATE_BOOL(broken, VirtIODevice),
2961         VMSTATE_END_OF_LIST()
2962     }
2963 };
2964 
2965 static const VMStateDescription vmstate_virtio_started = {
2966     .name = "virtio/started",
2967     .version_id = 1,
2968     .minimum_version_id = 1,
2969     .needed = &virtio_started_needed,
2970     .fields = (const VMStateField[]) {
2971         VMSTATE_BOOL(started, VirtIODevice),
2972         VMSTATE_END_OF_LIST()
2973     }
2974 };
2975 
2976 static const VMStateDescription vmstate_virtio_disabled = {
2977     .name = "virtio/disabled",
2978     .version_id = 1,
2979     .minimum_version_id = 1,
2980     .needed = &virtio_disabled_needed,
2981     .fields = (const VMStateField[]) {
2982         VMSTATE_BOOL(disabled, VirtIODevice),
2983         VMSTATE_END_OF_LIST()
2984     }
2985 };
2986 
2987 static const VMStateDescription vmstate_virtio = {
2988     .name = "virtio",
2989     .version_id = 1,
2990     .minimum_version_id = 1,
2991     .fields = (const VMStateField[]) {
2992         VMSTATE_END_OF_LIST()
2993     },
2994     .subsections = (const VMStateDescription * const []) {
2995         &vmstate_virtio_device_endian,
2996         &vmstate_virtio_64bit_features,
2997         &vmstate_virtio_virtqueues,
2998         &vmstate_virtio_ringsize,
2999         &vmstate_virtio_broken,
3000         &vmstate_virtio_extra_state,
3001         &vmstate_virtio_started,
3002         &vmstate_virtio_packed_virtqueues,
3003         &vmstate_virtio_disabled,
3004         NULL
3005     }
3006 };
3007 
virtio_save(VirtIODevice * vdev,QEMUFile * f)3008 int virtio_save(VirtIODevice *vdev, QEMUFile *f)
3009 {
3010     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3011     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3012     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
3013     uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
3014     int i;
3015 
3016     if (k->save_config) {
3017         k->save_config(qbus->parent, f);
3018     }
3019 
3020     qemu_put_8s(f, &vdev->status);
3021     qemu_put_8s(f, &vdev->isr);
3022     qemu_put_be16s(f, &vdev->queue_sel);
3023     qemu_put_be32s(f, &guest_features_lo);
3024     qemu_put_be32(f, vdev->config_len);
3025     qemu_put_buffer(f, vdev->config, vdev->config_len);
3026 
3027     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3028         if (vdev->vq[i].vring.num == 0)
3029             break;
3030     }
3031 
3032     qemu_put_be32(f, i);
3033 
3034     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3035         if (vdev->vq[i].vring.num == 0)
3036             break;
3037 
3038         qemu_put_be32(f, vdev->vq[i].vring.num);
3039         if (k->has_variable_vring_alignment) {
3040             qemu_put_be32(f, vdev->vq[i].vring.align);
3041         }
3042         /*
3043          * Save desc now, the rest of the ring addresses are saved in
3044          * subsections for VIRTIO-1 devices.
3045          */
3046         qemu_put_be64(f, vdev->vq[i].vring.desc);
3047         qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
3048         if (k->save_queue) {
3049             k->save_queue(qbus->parent, i, f);
3050         }
3051     }
3052 
3053     if (vdc->save != NULL) {
3054         vdc->save(vdev, f);
3055     }
3056 
3057     if (vdc->vmsd) {
3058         int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL);
3059         if (ret) {
3060             return ret;
3061         }
3062     }
3063 
3064     /* Subsections */
3065     return vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
3066 }
3067 
3068 /* A wrapper for use as a VMState .put function */
virtio_device_put(QEMUFile * f,void * opaque,size_t size,const VMStateField * field,JSONWriter * vmdesc)3069 static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
3070                               const VMStateField *field, JSONWriter *vmdesc)
3071 {
3072     return virtio_save(VIRTIO_DEVICE(opaque), f);
3073 }
3074 
3075 /* A wrapper for use as a VMState .get function */
3076 static int coroutine_mixed_fn
virtio_device_get(QEMUFile * f,void * opaque,size_t size,const VMStateField * field)3077 virtio_device_get(QEMUFile *f, void *opaque, size_t size,
3078                   const VMStateField *field)
3079 {
3080     VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
3081     DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
3082 
3083     return virtio_load(vdev, f, dc->vmsd->version_id);
3084 }
3085 
3086 const VMStateInfo  virtio_vmstate_info = {
3087     .name = "virtio",
3088     .get = virtio_device_get,
3089     .put = virtio_device_put,
3090 };
3091 
virtio_set_features_nocheck(VirtIODevice * vdev,uint64_t val)3092 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
3093 {
3094     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
3095     bool bad = (val & ~(vdev->host_features)) != 0;
3096 
3097     val &= vdev->host_features;
3098     if (k->set_features) {
3099         k->set_features(vdev, val);
3100     }
3101     vdev->guest_features = val;
3102     return bad ? -1 : 0;
3103 }
3104 
3105 typedef struct VirtioSetFeaturesNocheckData {
3106     Coroutine *co;
3107     VirtIODevice *vdev;
3108     uint64_t val;
3109     int ret;
3110 } VirtioSetFeaturesNocheckData;
3111 
virtio_set_features_nocheck_bh(void * opaque)3112 static void virtio_set_features_nocheck_bh(void *opaque)
3113 {
3114     VirtioSetFeaturesNocheckData *data = opaque;
3115 
3116     data->ret = virtio_set_features_nocheck(data->vdev, data->val);
3117     aio_co_wake(data->co);
3118 }
3119 
3120 static int coroutine_mixed_fn
virtio_set_features_nocheck_maybe_co(VirtIODevice * vdev,uint64_t val)3121 virtio_set_features_nocheck_maybe_co(VirtIODevice *vdev, uint64_t val)
3122 {
3123     if (qemu_in_coroutine()) {
3124         VirtioSetFeaturesNocheckData data = {
3125             .co = qemu_coroutine_self(),
3126             .vdev = vdev,
3127             .val = val,
3128         };
3129         aio_bh_schedule_oneshot(qemu_get_current_aio_context(),
3130                                 virtio_set_features_nocheck_bh, &data);
3131         qemu_coroutine_yield();
3132         return data.ret;
3133     } else {
3134         return virtio_set_features_nocheck(vdev, val);
3135     }
3136 }
3137 
virtio_set_features(VirtIODevice * vdev,uint64_t val)3138 int virtio_set_features(VirtIODevice *vdev, uint64_t val)
3139 {
3140     int ret;
3141     /*
3142      * The driver must not attempt to set features after feature negotiation
3143      * has finished.
3144      */
3145     if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
3146         return -EINVAL;
3147     }
3148 
3149     if (val & (1ull << VIRTIO_F_BAD_FEATURE)) {
3150         qemu_log_mask(LOG_GUEST_ERROR,
3151                       "%s: guest driver for %s has enabled UNUSED(30) feature bit!\n",
3152                       __func__, vdev->name);
3153     }
3154 
3155     ret = virtio_set_features_nocheck(vdev, val);
3156     if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
3157         /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches.  */
3158         int i;
3159         for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3160             if (vdev->vq[i].vring.num != 0) {
3161                 virtio_init_region_cache(vdev, i);
3162             }
3163         }
3164     }
3165     if (!ret) {
3166         if (!virtio_device_started(vdev, vdev->status) &&
3167             !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3168             vdev->start_on_kick = true;
3169         }
3170     }
3171     return ret;
3172 }
3173 
virtio_device_check_notification_compatibility(VirtIODevice * vdev,Error ** errp)3174 static void virtio_device_check_notification_compatibility(VirtIODevice *vdev,
3175                                                            Error **errp)
3176 {
3177     VirtioBusState *bus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
3178     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
3179     DeviceState *proxy = DEVICE(BUS(bus)->parent);
3180 
3181     if (virtio_host_has_feature(vdev, VIRTIO_F_NOTIFICATION_DATA) &&
3182         k->ioeventfd_enabled(proxy)) {
3183         error_setg(errp,
3184                    "notification_data=on without ioeventfd=off is not supported");
3185     }
3186 }
3187 
virtio_get_config_size(const VirtIOConfigSizeParams * params,uint64_t host_features)3188 size_t virtio_get_config_size(const VirtIOConfigSizeParams *params,
3189                               uint64_t host_features)
3190 {
3191     size_t config_size = params->min_size;
3192     const VirtIOFeature *feature_sizes = params->feature_sizes;
3193     size_t i;
3194 
3195     for (i = 0; feature_sizes[i].flags != 0; i++) {
3196         if (host_features & feature_sizes[i].flags) {
3197             config_size = MAX(feature_sizes[i].end, config_size);
3198         }
3199     }
3200 
3201     assert(config_size <= params->max_size);
3202     return config_size;
3203 }
3204 
3205 int coroutine_mixed_fn
virtio_load(VirtIODevice * vdev,QEMUFile * f,int version_id)3206 virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
3207 {
3208     int i, ret;
3209     int32_t config_len;
3210     uint32_t num;
3211     uint32_t features;
3212     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3213     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3214     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
3215 
3216     /*
3217      * We poison the endianness to ensure it does not get used before
3218      * subsections have been loaded.
3219      */
3220     vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
3221 
3222     if (k->load_config) {
3223         ret = k->load_config(qbus->parent, f);
3224         if (ret)
3225             return ret;
3226     }
3227 
3228     qemu_get_8s(f, &vdev->status);
3229     qemu_get_8s(f, &vdev->isr);
3230     qemu_get_be16s(f, &vdev->queue_sel);
3231     if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
3232         return -1;
3233     }
3234     qemu_get_be32s(f, &features);
3235 
3236     /*
3237      * Temporarily set guest_features low bits - needed by
3238      * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
3239      * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
3240      *
3241      * Note: devices should always test host features in future - don't create
3242      * new dependencies like this.
3243      */
3244     vdev->guest_features = features;
3245 
3246     config_len = qemu_get_be32(f);
3247 
3248     /*
3249      * There are cases where the incoming config can be bigger or smaller
3250      * than what we have; so load what we have space for, and skip
3251      * any excess that's in the stream.
3252      */
3253     qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
3254 
3255     while (config_len > vdev->config_len) {
3256         qemu_get_byte(f);
3257         config_len--;
3258     }
3259 
3260     if (vdc->pre_load_queues) {
3261         ret = vdc->pre_load_queues(vdev);
3262         if (ret) {
3263             return ret;
3264         }
3265     }
3266 
3267     num = qemu_get_be32(f);
3268 
3269     if (num > VIRTIO_QUEUE_MAX) {
3270         error_report("Invalid number of virtqueues: 0x%x", num);
3271         return -1;
3272     }
3273 
3274     for (i = 0; i < num; i++) {
3275         vdev->vq[i].vring.num = qemu_get_be32(f);
3276         if (k->has_variable_vring_alignment) {
3277             vdev->vq[i].vring.align = qemu_get_be32(f);
3278         }
3279         vdev->vq[i].vring.desc = qemu_get_be64(f);
3280         qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
3281         vdev->vq[i].signalled_used_valid = false;
3282         vdev->vq[i].notification = true;
3283 
3284         if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) {
3285             error_report("VQ %d address 0x0 "
3286                          "inconsistent with Host index 0x%x",
3287                          i, vdev->vq[i].last_avail_idx);
3288             return -1;
3289         }
3290         if (k->load_queue) {
3291             ret = k->load_queue(qbus->parent, i, f);
3292             if (ret)
3293                 return ret;
3294         }
3295     }
3296 
3297     virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
3298 
3299     if (vdc->load != NULL) {
3300         ret = vdc->load(vdev, f, version_id);
3301         if (ret) {
3302             return ret;
3303         }
3304     }
3305 
3306     if (vdc->vmsd) {
3307         ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
3308         if (ret) {
3309             return ret;
3310         }
3311     }
3312 
3313     /* Subsections */
3314     ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
3315     if (ret) {
3316         return ret;
3317     }
3318 
3319     if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
3320         vdev->device_endian = virtio_default_endian();
3321     }
3322 
3323     if (virtio_64bit_features_needed(vdev)) {
3324         /*
3325          * Subsection load filled vdev->guest_features.  Run them
3326          * through virtio_set_features to sanity-check them against
3327          * host_features.
3328          */
3329         uint64_t features64 = vdev->guest_features;
3330         if (virtio_set_features_nocheck_maybe_co(vdev, features64) < 0) {
3331             error_report("Features 0x%" PRIx64 " unsupported. "
3332                          "Allowed features: 0x%" PRIx64,
3333                          features64, vdev->host_features);
3334             return -1;
3335         }
3336     } else {
3337         if (virtio_set_features_nocheck_maybe_co(vdev, features) < 0) {
3338             error_report("Features 0x%x unsupported. "
3339                          "Allowed features: 0x%" PRIx64,
3340                          features, vdev->host_features);
3341             return -1;
3342         }
3343     }
3344 
3345     if (!virtio_device_started(vdev, vdev->status) &&
3346         !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3347         vdev->start_on_kick = true;
3348     }
3349 
3350     RCU_READ_LOCK_GUARD();
3351     for (i = 0; i < num; i++) {
3352         if (vdev->vq[i].vring.desc) {
3353             uint16_t nheads;
3354 
3355             /*
3356              * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
3357              * only the region cache needs to be set up.  Legacy devices need
3358              * to calculate used and avail ring addresses based on the desc
3359              * address.
3360              */
3361             if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3362                 virtio_init_region_cache(vdev, i);
3363             } else {
3364                 virtio_queue_update_rings(vdev, i);
3365             }
3366 
3367             if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3368                 vdev->vq[i].shadow_avail_idx = vdev->vq[i].last_avail_idx;
3369                 vdev->vq[i].shadow_avail_wrap_counter =
3370                                         vdev->vq[i].last_avail_wrap_counter;
3371                 continue;
3372             }
3373 
3374             nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
3375             /* Check it isn't doing strange things with descriptor numbers. */
3376             if (nheads > vdev->vq[i].vring.num) {
3377                 virtio_error(vdev, "VQ %d size 0x%x Guest index 0x%x "
3378                              "inconsistent with Host index 0x%x: delta 0x%x",
3379                              i, vdev->vq[i].vring.num,
3380                              vring_avail_idx(&vdev->vq[i]),
3381                              vdev->vq[i].last_avail_idx, nheads);
3382                 vdev->vq[i].used_idx = 0;
3383                 vdev->vq[i].shadow_avail_idx = 0;
3384                 vdev->vq[i].inuse = 0;
3385                 continue;
3386             }
3387             vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
3388             vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
3389 
3390             /*
3391              * Some devices migrate VirtQueueElements that have been popped
3392              * from the avail ring but not yet returned to the used ring.
3393              * Since max ring size < UINT16_MAX it's safe to use modulo
3394              * UINT16_MAX + 1 subtraction.
3395              */
3396             vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
3397                                 vdev->vq[i].used_idx);
3398             if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
3399                 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
3400                              "used_idx 0x%x",
3401                              i, vdev->vq[i].vring.num,
3402                              vdev->vq[i].last_avail_idx,
3403                              vdev->vq[i].used_idx);
3404                 return -1;
3405             }
3406         }
3407     }
3408 
3409     if (vdc->post_load) {
3410         ret = vdc->post_load(vdev);
3411         if (ret) {
3412             return ret;
3413         }
3414     }
3415 
3416     return 0;
3417 }
3418 
virtio_cleanup(VirtIODevice * vdev)3419 void virtio_cleanup(VirtIODevice *vdev)
3420 {
3421     qemu_del_vm_change_state_handler(vdev->vmstate);
3422 }
3423 
virtio_vmstate_change(void * opaque,bool running,RunState state)3424 static void virtio_vmstate_change(void *opaque, bool running, RunState state)
3425 {
3426     VirtIODevice *vdev = opaque;
3427     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3428     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3429     bool backend_run = running && virtio_device_started(vdev, vdev->status);
3430     vdev->vm_running = running;
3431 
3432     if (backend_run) {
3433         virtio_set_status(vdev, vdev->status);
3434     }
3435 
3436     if (k->vmstate_change) {
3437         k->vmstate_change(qbus->parent, backend_run);
3438     }
3439 
3440     if (!backend_run) {
3441         virtio_set_status(vdev, vdev->status);
3442     }
3443 }
3444 
virtio_instance_init_common(Object * proxy_obj,void * data,size_t vdev_size,const char * vdev_name)3445 void virtio_instance_init_common(Object *proxy_obj, void *data,
3446                                  size_t vdev_size, const char *vdev_name)
3447 {
3448     DeviceState *vdev = data;
3449 
3450     object_initialize_child_with_props(proxy_obj, "virtio-backend", vdev,
3451                                        vdev_size, vdev_name, &error_abort,
3452                                        NULL);
3453     qdev_alias_all_properties(vdev, proxy_obj);
3454 }
3455 
virtio_init(VirtIODevice * vdev,uint16_t device_id,size_t config_size)3456 void virtio_init(VirtIODevice *vdev, uint16_t device_id, size_t config_size)
3457 {
3458     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3459     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3460     int i;
3461     int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
3462 
3463     if (nvectors) {
3464         vdev->vector_queues =
3465             g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
3466     }
3467 
3468     vdev->start_on_kick = false;
3469     vdev->started = false;
3470     vdev->vhost_started = false;
3471     vdev->device_id = device_id;
3472     vdev->status = 0;
3473     qatomic_set(&vdev->isr, 0);
3474     vdev->queue_sel = 0;
3475     vdev->config_vector = VIRTIO_NO_VECTOR;
3476     vdev->vq = g_new0(VirtQueue, VIRTIO_QUEUE_MAX);
3477     vdev->vm_running = runstate_is_running();
3478     vdev->broken = false;
3479     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3480         vdev->vq[i].vector = VIRTIO_NO_VECTOR;
3481         vdev->vq[i].vdev = vdev;
3482         vdev->vq[i].queue_index = i;
3483         vdev->vq[i].host_notifier_enabled = false;
3484     }
3485 
3486     vdev->name = virtio_id_to_name(device_id);
3487     vdev->config_len = config_size;
3488     if (vdev->config_len) {
3489         vdev->config = g_malloc0(config_size);
3490     } else {
3491         vdev->config = NULL;
3492     }
3493     vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev),
3494             virtio_vmstate_change, vdev);
3495     vdev->device_endian = virtio_default_endian();
3496     vdev->use_guest_notifier_mask = true;
3497 }
3498 
3499 /*
3500  * Only devices that have already been around prior to defining the virtio
3501  * standard support legacy mode; this includes devices not specified in the
3502  * standard. All newer devices conform to the virtio standard only.
3503  */
virtio_legacy_allowed(VirtIODevice * vdev)3504 bool virtio_legacy_allowed(VirtIODevice *vdev)
3505 {
3506     switch (vdev->device_id) {
3507     case VIRTIO_ID_NET:
3508     case VIRTIO_ID_BLOCK:
3509     case VIRTIO_ID_CONSOLE:
3510     case VIRTIO_ID_RNG:
3511     case VIRTIO_ID_BALLOON:
3512     case VIRTIO_ID_RPMSG:
3513     case VIRTIO_ID_SCSI:
3514     case VIRTIO_ID_9P:
3515     case VIRTIO_ID_RPROC_SERIAL:
3516     case VIRTIO_ID_CAIF:
3517         return true;
3518     default:
3519         return false;
3520     }
3521 }
3522 
virtio_legacy_check_disabled(VirtIODevice * vdev)3523 bool virtio_legacy_check_disabled(VirtIODevice *vdev)
3524 {
3525     return vdev->disable_legacy_check;
3526 }
3527 
virtio_queue_get_desc_addr(VirtIODevice * vdev,int n)3528 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
3529 {
3530     return vdev->vq[n].vring.desc;
3531 }
3532 
virtio_queue_enabled_legacy(VirtIODevice * vdev,int n)3533 bool virtio_queue_enabled_legacy(VirtIODevice *vdev, int n)
3534 {
3535     return virtio_queue_get_desc_addr(vdev, n) != 0;
3536 }
3537 
virtio_queue_enabled(VirtIODevice * vdev,int n)3538 bool virtio_queue_enabled(VirtIODevice *vdev, int n)
3539 {
3540     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3541     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3542 
3543     if (k->queue_enabled) {
3544         return k->queue_enabled(qbus->parent, n);
3545     }
3546     return virtio_queue_enabled_legacy(vdev, n);
3547 }
3548 
virtio_queue_get_avail_addr(VirtIODevice * vdev,int n)3549 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
3550 {
3551     return vdev->vq[n].vring.avail;
3552 }
3553 
virtio_queue_get_used_addr(VirtIODevice * vdev,int n)3554 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
3555 {
3556     return vdev->vq[n].vring.used;
3557 }
3558 
virtio_queue_get_desc_size(VirtIODevice * vdev,int n)3559 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
3560 {
3561     return sizeof(VRingDesc) * vdev->vq[n].vring.num;
3562 }
3563 
virtio_queue_get_avail_size(VirtIODevice * vdev,int n)3564 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
3565 {
3566     int s;
3567 
3568     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3569         return sizeof(struct VRingPackedDescEvent);
3570     }
3571 
3572     s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
3573     return offsetof(VRingAvail, ring) +
3574         sizeof(uint16_t) * vdev->vq[n].vring.num + s;
3575 }
3576 
virtio_queue_get_used_size(VirtIODevice * vdev,int n)3577 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
3578 {
3579     int s;
3580 
3581     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3582         return sizeof(struct VRingPackedDescEvent);
3583     }
3584 
3585     s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
3586     return offsetof(VRingUsed, ring) +
3587         sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s;
3588 }
3589 
virtio_queue_packed_get_last_avail_idx(VirtIODevice * vdev,int n)3590 static unsigned int virtio_queue_packed_get_last_avail_idx(VirtIODevice *vdev,
3591                                                            int n)
3592 {
3593     unsigned int avail, used;
3594 
3595     avail = vdev->vq[n].last_avail_idx;
3596     avail |= ((uint16_t)vdev->vq[n].last_avail_wrap_counter) << 15;
3597 
3598     used = vdev->vq[n].used_idx;
3599     used |= ((uint16_t)vdev->vq[n].used_wrap_counter) << 15;
3600 
3601     return avail | used << 16;
3602 }
3603 
virtio_queue_split_get_last_avail_idx(VirtIODevice * vdev,int n)3604 static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice *vdev,
3605                                                       int n)
3606 {
3607     return vdev->vq[n].last_avail_idx;
3608 }
3609 
virtio_queue_get_last_avail_idx(VirtIODevice * vdev,int n)3610 unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
3611 {
3612     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3613         return virtio_queue_packed_get_last_avail_idx(vdev, n);
3614     } else {
3615         return virtio_queue_split_get_last_avail_idx(vdev, n);
3616     }
3617 }
3618 
virtio_queue_packed_set_last_avail_idx(VirtIODevice * vdev,int n,unsigned int idx)3619 static void virtio_queue_packed_set_last_avail_idx(VirtIODevice *vdev,
3620                                                    int n, unsigned int idx)
3621 {
3622     struct VirtQueue *vq = &vdev->vq[n];
3623 
3624     vq->last_avail_idx = vq->shadow_avail_idx = idx & 0x7fff;
3625     vq->last_avail_wrap_counter =
3626         vq->shadow_avail_wrap_counter = !!(idx & 0x8000);
3627     idx >>= 16;
3628     vq->used_idx = idx & 0x7fff;
3629     vq->used_wrap_counter = !!(idx & 0x8000);
3630 }
3631 
virtio_queue_split_set_last_avail_idx(VirtIODevice * vdev,int n,unsigned int idx)3632 static void virtio_queue_split_set_last_avail_idx(VirtIODevice *vdev,
3633                                                   int n, unsigned int idx)
3634 {
3635         vdev->vq[n].last_avail_idx = idx;
3636         vdev->vq[n].shadow_avail_idx = idx;
3637 }
3638 
virtio_queue_set_last_avail_idx(VirtIODevice * vdev,int n,unsigned int idx)3639 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n,
3640                                      unsigned int idx)
3641 {
3642     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3643         virtio_queue_packed_set_last_avail_idx(vdev, n, idx);
3644     } else {
3645         virtio_queue_split_set_last_avail_idx(vdev, n, idx);
3646     }
3647 }
3648 
virtio_queue_packed_restore_last_avail_idx(VirtIODevice * vdev,int n)3649 static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice *vdev,
3650                                                        int n)
3651 {
3652     /* We don't have a reference like avail idx in shared memory */
3653     return;
3654 }
3655 
virtio_queue_split_restore_last_avail_idx(VirtIODevice * vdev,int n)3656 static void virtio_queue_split_restore_last_avail_idx(VirtIODevice *vdev,
3657                                                       int n)
3658 {
3659     RCU_READ_LOCK_GUARD();
3660     if (vdev->vq[n].vring.desc) {
3661         vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
3662         vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx;
3663     }
3664 }
3665 
virtio_queue_restore_last_avail_idx(VirtIODevice * vdev,int n)3666 void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
3667 {
3668     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3669         virtio_queue_packed_restore_last_avail_idx(vdev, n);
3670     } else {
3671         virtio_queue_split_restore_last_avail_idx(vdev, n);
3672     }
3673 }
3674 
virtio_queue_packed_update_used_idx(VirtIODevice * vdev,int n)3675 static void virtio_queue_packed_update_used_idx(VirtIODevice *vdev, int n)
3676 {
3677     /* used idx was updated through set_last_avail_idx() */
3678     return;
3679 }
3680 
virtio_queue_split_update_used_idx(VirtIODevice * vdev,int n)3681 static void virtio_queue_split_update_used_idx(VirtIODevice *vdev, int n)
3682 {
3683     RCU_READ_LOCK_GUARD();
3684     if (vdev->vq[n].vring.desc) {
3685         vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
3686     }
3687 }
3688 
virtio_queue_update_used_idx(VirtIODevice * vdev,int n)3689 void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
3690 {
3691     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3692         return virtio_queue_packed_update_used_idx(vdev, n);
3693     } else {
3694         return virtio_queue_split_update_used_idx(vdev, n);
3695     }
3696 }
3697 
virtio_queue_invalidate_signalled_used(VirtIODevice * vdev,int n)3698 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
3699 {
3700     vdev->vq[n].signalled_used_valid = false;
3701 }
3702 
virtio_get_queue(VirtIODevice * vdev,int n)3703 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
3704 {
3705     return vdev->vq + n;
3706 }
3707 
virtio_get_queue_index(VirtQueue * vq)3708 uint16_t virtio_get_queue_index(VirtQueue *vq)
3709 {
3710     return vq->queue_index;
3711 }
3712 
virtio_queue_guest_notifier_read(EventNotifier * n)3713 static void virtio_queue_guest_notifier_read(EventNotifier *n)
3714 {
3715     VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
3716     if (event_notifier_test_and_clear(n)) {
3717         virtio_irq(vq);
3718     }
3719 }
virtio_config_guest_notifier_read(EventNotifier * n)3720 static void virtio_config_guest_notifier_read(EventNotifier *n)
3721 {
3722     VirtIODevice *vdev = container_of(n, VirtIODevice, config_notifier);
3723 
3724     if (event_notifier_test_and_clear(n)) {
3725         virtio_notify_config(vdev);
3726     }
3727 }
virtio_queue_set_guest_notifier_fd_handler(VirtQueue * vq,bool assign,bool with_irqfd)3728 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
3729                                                 bool with_irqfd)
3730 {
3731     if (assign && !with_irqfd) {
3732         event_notifier_set_handler(&vq->guest_notifier,
3733                                    virtio_queue_guest_notifier_read);
3734     } else {
3735         event_notifier_set_handler(&vq->guest_notifier, NULL);
3736     }
3737     if (!assign) {
3738         /* Test and clear notifier before closing it,
3739          * in case poll callback didn't have time to run. */
3740         virtio_queue_guest_notifier_read(&vq->guest_notifier);
3741     }
3742 }
3743 
virtio_config_set_guest_notifier_fd_handler(VirtIODevice * vdev,bool assign,bool with_irqfd)3744 void virtio_config_set_guest_notifier_fd_handler(VirtIODevice *vdev,
3745                                                  bool assign, bool with_irqfd)
3746 {
3747     EventNotifier *n;
3748     n = &vdev->config_notifier;
3749     if (assign && !with_irqfd) {
3750         event_notifier_set_handler(n, virtio_config_guest_notifier_read);
3751     } else {
3752         event_notifier_set_handler(n, NULL);
3753     }
3754     if (!assign) {
3755         /* Test and clear notifier before closing it,*/
3756         /* in case poll callback didn't have time to run. */
3757         virtio_config_guest_notifier_read(n);
3758     }
3759 }
3760 
virtio_queue_get_guest_notifier(VirtQueue * vq)3761 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
3762 {
3763     return &vq->guest_notifier;
3764 }
3765 
virtio_queue_host_notifier_aio_poll_begin(EventNotifier * n)3766 static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
3767 {
3768     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3769 
3770     virtio_queue_set_notification(vq, 0);
3771 }
3772 
virtio_queue_host_notifier_aio_poll(void * opaque)3773 static bool virtio_queue_host_notifier_aio_poll(void *opaque)
3774 {
3775     EventNotifier *n = opaque;
3776     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3777 
3778     return vq->vring.desc && !virtio_queue_empty(vq);
3779 }
3780 
virtio_queue_host_notifier_aio_poll_ready(EventNotifier * n)3781 static void virtio_queue_host_notifier_aio_poll_ready(EventNotifier *n)
3782 {
3783     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3784 
3785     virtio_queue_notify_vq(vq);
3786 }
3787 
virtio_queue_host_notifier_aio_poll_end(EventNotifier * n)3788 static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
3789 {
3790     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3791 
3792     /* Caller polls once more after this to catch requests that race with us */
3793     virtio_queue_set_notification(vq, 1);
3794 }
3795 
virtio_queue_aio_attach_host_notifier(VirtQueue * vq,AioContext * ctx)3796 void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx)
3797 {
3798     /*
3799      * virtio_queue_aio_detach_host_notifier() can leave notifications disabled.
3800      * Re-enable them.  (And if detach has not been used before, notifications
3801      * being enabled is still the default state while a notifier is attached;
3802      * see virtio_queue_host_notifier_aio_poll_end(), which will always leave
3803      * notifications enabled once the polling section is left.)
3804      */
3805     if (!virtio_queue_get_notification(vq)) {
3806         virtio_queue_set_notification(vq, 1);
3807     }
3808 
3809     aio_set_event_notifier(ctx, &vq->host_notifier,
3810                            virtio_queue_host_notifier_read,
3811                            virtio_queue_host_notifier_aio_poll,
3812                            virtio_queue_host_notifier_aio_poll_ready);
3813     aio_set_event_notifier_poll(ctx, &vq->host_notifier,
3814                                 virtio_queue_host_notifier_aio_poll_begin,
3815                                 virtio_queue_host_notifier_aio_poll_end);
3816 
3817     /*
3818      * We will have ignored notifications about new requests from the guest
3819      * while no notifiers were attached, so "kick" the virt queue to process
3820      * those requests now.
3821      */
3822     event_notifier_set(&vq->host_notifier);
3823 }
3824 
3825 /*
3826  * Same as virtio_queue_aio_attach_host_notifier() but without polling. Use
3827  * this for rx virtqueues and similar cases where the virtqueue handler
3828  * function does not pop all elements. When the virtqueue is left non-empty
3829  * polling consumes CPU cycles and should not be used.
3830  */
virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue * vq,AioContext * ctx)3831 void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx)
3832 {
3833     /* See virtio_queue_aio_attach_host_notifier() */
3834     if (!virtio_queue_get_notification(vq)) {
3835         virtio_queue_set_notification(vq, 1);
3836     }
3837 
3838     aio_set_event_notifier(ctx, &vq->host_notifier,
3839                            virtio_queue_host_notifier_read,
3840                            NULL, NULL);
3841 
3842     /*
3843      * See virtio_queue_aio_attach_host_notifier().
3844      * Note that this may be unnecessary for the type of virtqueues this
3845      * function is used for.  Still, it will not hurt to have a quick look into
3846      * whether we can/should process any of the virtqueue elements.
3847      */
3848     event_notifier_set(&vq->host_notifier);
3849 }
3850 
virtio_queue_aio_detach_host_notifier(VirtQueue * vq,AioContext * ctx)3851 void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx)
3852 {
3853     aio_set_event_notifier(ctx, &vq->host_notifier, NULL, NULL, NULL);
3854 
3855     /*
3856      * aio_set_event_notifier_poll() does not guarantee whether io_poll_end()
3857      * will run after io_poll_begin(), so by removing the notifier, we do not
3858      * know whether virtio_queue_host_notifier_aio_poll_end() has run after a
3859      * previous virtio_queue_host_notifier_aio_poll_begin(), i.e. whether
3860      * notifications are enabled or disabled.  It does not really matter anyway;
3861      * we just removed the notifier, so we do not care about notifications until
3862      * we potentially re-attach it.  The attach_host_notifier functions will
3863      * ensure that notifications are enabled again when they are needed.
3864      */
3865 }
3866 
virtio_queue_host_notifier_read(EventNotifier * n)3867 void virtio_queue_host_notifier_read(EventNotifier *n)
3868 {
3869     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3870     if (event_notifier_test_and_clear(n)) {
3871         virtio_queue_notify_vq(vq);
3872     }
3873 }
3874 
virtio_queue_get_host_notifier(VirtQueue * vq)3875 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
3876 {
3877     return &vq->host_notifier;
3878 }
3879 
virtio_config_get_guest_notifier(VirtIODevice * vdev)3880 EventNotifier *virtio_config_get_guest_notifier(VirtIODevice *vdev)
3881 {
3882     return &vdev->config_notifier;
3883 }
3884 
virtio_queue_set_host_notifier_enabled(VirtQueue * vq,bool enabled)3885 void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled)
3886 {
3887     vq->host_notifier_enabled = enabled;
3888 }
3889 
virtio_queue_set_host_notifier_mr(VirtIODevice * vdev,int n,MemoryRegion * mr,bool assign)3890 int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n,
3891                                       MemoryRegion *mr, bool assign)
3892 {
3893     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3894     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3895 
3896     if (k->set_host_notifier_mr) {
3897         return k->set_host_notifier_mr(qbus->parent, n, mr, assign);
3898     }
3899 
3900     return -1;
3901 }
3902 
virtio_device_set_child_bus_name(VirtIODevice * vdev,char * bus_name)3903 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
3904 {
3905     g_free(vdev->bus_name);
3906     vdev->bus_name = g_strdup(bus_name);
3907 }
3908 
virtio_error(VirtIODevice * vdev,const char * fmt,...)3909 void G_GNUC_PRINTF(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
3910 {
3911     va_list ap;
3912 
3913     va_start(ap, fmt);
3914     error_vreport(fmt, ap);
3915     va_end(ap);
3916 
3917     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3918         vdev->status = vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET;
3919         virtio_notify_config(vdev);
3920     }
3921 
3922     vdev->broken = true;
3923 }
3924 
virtio_memory_listener_commit(MemoryListener * listener)3925 static void virtio_memory_listener_commit(MemoryListener *listener)
3926 {
3927     VirtIODevice *vdev = container_of(listener, VirtIODevice, listener);
3928     int i;
3929 
3930     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3931         if (vdev->vq[i].vring.num == 0) {
3932             break;
3933         }
3934         virtio_init_region_cache(vdev, i);
3935     }
3936 }
3937 
virtio_device_realize(DeviceState * dev,Error ** errp)3938 static void virtio_device_realize(DeviceState *dev, Error **errp)
3939 {
3940     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3941     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
3942     Error *err = NULL;
3943 
3944     /* Devices should either use vmsd or the load/save methods */
3945     assert(!vdc->vmsd || !vdc->load);
3946 
3947     if (vdc->realize != NULL) {
3948         vdc->realize(dev, &err);
3949         if (err != NULL) {
3950             error_propagate(errp, err);
3951             return;
3952         }
3953     }
3954 
3955     /* Devices should not use both ioeventfd and notification data feature */
3956     virtio_device_check_notification_compatibility(vdev, &err);
3957     if (err != NULL) {
3958         error_propagate(errp, err);
3959         vdc->unrealize(dev);
3960         return;
3961     }
3962 
3963     virtio_bus_device_plugged(vdev, &err);
3964     if (err != NULL) {
3965         error_propagate(errp, err);
3966         vdc->unrealize(dev);
3967         return;
3968     }
3969 
3970     vdev->listener.commit = virtio_memory_listener_commit;
3971     vdev->listener.name = "virtio";
3972     memory_listener_register(&vdev->listener, vdev->dma_as);
3973 }
3974 
virtio_device_unrealize(DeviceState * dev)3975 static void virtio_device_unrealize(DeviceState *dev)
3976 {
3977     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3978     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
3979 
3980     memory_listener_unregister(&vdev->listener);
3981     virtio_bus_device_unplugged(vdev);
3982 
3983     if (vdc->unrealize != NULL) {
3984         vdc->unrealize(dev);
3985     }
3986 
3987     g_free(vdev->bus_name);
3988     vdev->bus_name = NULL;
3989 }
3990 
virtio_device_free_virtqueues(VirtIODevice * vdev)3991 static void virtio_device_free_virtqueues(VirtIODevice *vdev)
3992 {
3993     int i;
3994     if (!vdev->vq) {
3995         return;
3996     }
3997 
3998     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3999         if (vdev->vq[i].vring.num == 0) {
4000             break;
4001         }
4002         virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
4003     }
4004     g_free(vdev->vq);
4005 }
4006 
virtio_device_instance_finalize(Object * obj)4007 static void virtio_device_instance_finalize(Object *obj)
4008 {
4009     VirtIODevice *vdev = VIRTIO_DEVICE(obj);
4010 
4011     virtio_device_free_virtqueues(vdev);
4012 
4013     g_free(vdev->config);
4014     g_free(vdev->vector_queues);
4015 }
4016 
4017 static const Property virtio_properties[] = {
4018     DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
4019     DEFINE_PROP_BOOL("use-started", VirtIODevice, use_started, true),
4020     DEFINE_PROP_BOOL("use-disabled-flag", VirtIODevice, use_disabled_flag, true),
4021     DEFINE_PROP_BOOL("x-disable-legacy-check", VirtIODevice,
4022                      disable_legacy_check, false),
4023 };
4024 
virtio_device_start_ioeventfd_impl(VirtIODevice * vdev)4025 static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
4026 {
4027     VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
4028     int i, n, r, err;
4029 
4030     /*
4031      * Batch all the host notifiers in a single transaction to avoid
4032      * quadratic time complexity in address_space_update_ioeventfds().
4033      */
4034     memory_region_transaction_begin();
4035     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
4036         VirtQueue *vq = &vdev->vq[n];
4037         if (!virtio_queue_get_num(vdev, n)) {
4038             continue;
4039         }
4040         r = virtio_bus_set_host_notifier(qbus, n, true);
4041         if (r < 0) {
4042             err = r;
4043             goto assign_error;
4044         }
4045         event_notifier_set_handler(&vq->host_notifier,
4046                                    virtio_queue_host_notifier_read);
4047     }
4048 
4049     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
4050         /* Kick right away to begin processing requests already in vring */
4051         VirtQueue *vq = &vdev->vq[n];
4052         if (!vq->vring.num) {
4053             continue;
4054         }
4055         event_notifier_set(&vq->host_notifier);
4056     }
4057     memory_region_transaction_commit();
4058     return 0;
4059 
4060 assign_error:
4061     i = n; /* save n for a second iteration after transaction is committed. */
4062     while (--n >= 0) {
4063         VirtQueue *vq = &vdev->vq[n];
4064         if (!virtio_queue_get_num(vdev, n)) {
4065             continue;
4066         }
4067 
4068         event_notifier_set_handler(&vq->host_notifier, NULL);
4069         r = virtio_bus_set_host_notifier(qbus, n, false);
4070         assert(r >= 0);
4071     }
4072     /*
4073      * The transaction expects the ioeventfds to be open when it
4074      * commits. Do it now, before the cleanup loop.
4075      */
4076     memory_region_transaction_commit();
4077 
4078     while (--i >= 0) {
4079         if (!virtio_queue_get_num(vdev, i)) {
4080             continue;
4081         }
4082         virtio_bus_cleanup_host_notifier(qbus, i);
4083     }
4084     return err;
4085 }
4086 
virtio_device_start_ioeventfd(VirtIODevice * vdev)4087 int virtio_device_start_ioeventfd(VirtIODevice *vdev)
4088 {
4089     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
4090     VirtioBusState *vbus = VIRTIO_BUS(qbus);
4091 
4092     return virtio_bus_start_ioeventfd(vbus);
4093 }
4094 
virtio_device_stop_ioeventfd_impl(VirtIODevice * vdev)4095 static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
4096 {
4097     VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
4098     int n, r;
4099 
4100     /*
4101      * Batch all the host notifiers in a single transaction to avoid
4102      * quadratic time complexity in address_space_update_ioeventfds().
4103      */
4104     memory_region_transaction_begin();
4105     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
4106         VirtQueue *vq = &vdev->vq[n];
4107 
4108         if (!virtio_queue_get_num(vdev, n)) {
4109             continue;
4110         }
4111         event_notifier_set_handler(&vq->host_notifier, NULL);
4112         r = virtio_bus_set_host_notifier(qbus, n, false);
4113         assert(r >= 0);
4114     }
4115     /*
4116      * The transaction expects the ioeventfds to be open when it
4117      * commits. Do it now, before the cleanup loop.
4118      */
4119     memory_region_transaction_commit();
4120 
4121     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
4122         if (!virtio_queue_get_num(vdev, n)) {
4123             continue;
4124         }
4125         virtio_bus_cleanup_host_notifier(qbus, n);
4126     }
4127 }
4128 
virtio_device_grab_ioeventfd(VirtIODevice * vdev)4129 int virtio_device_grab_ioeventfd(VirtIODevice *vdev)
4130 {
4131     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
4132     VirtioBusState *vbus = VIRTIO_BUS(qbus);
4133 
4134     return virtio_bus_grab_ioeventfd(vbus);
4135 }
4136 
virtio_device_release_ioeventfd(VirtIODevice * vdev)4137 void virtio_device_release_ioeventfd(VirtIODevice *vdev)
4138 {
4139     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
4140     VirtioBusState *vbus = VIRTIO_BUS(qbus);
4141 
4142     virtio_bus_release_ioeventfd(vbus);
4143 }
4144 
virtio_device_class_init(ObjectClass * klass,void * data)4145 static void virtio_device_class_init(ObjectClass *klass, void *data)
4146 {
4147     /* Set the default value here. */
4148     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
4149     DeviceClass *dc = DEVICE_CLASS(klass);
4150 
4151     dc->realize = virtio_device_realize;
4152     dc->unrealize = virtio_device_unrealize;
4153     dc->bus_type = TYPE_VIRTIO_BUS;
4154     device_class_set_props(dc, virtio_properties);
4155     vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
4156     vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
4157 
4158     vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
4159 }
4160 
virtio_device_ioeventfd_enabled(VirtIODevice * vdev)4161 bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
4162 {
4163     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
4164     VirtioBusState *vbus = VIRTIO_BUS(qbus);
4165 
4166     return virtio_bus_ioeventfd_enabled(vbus);
4167 }
4168 
qmp_x_query_virtio_queue_status(const char * path,uint16_t queue,Error ** errp)4169 VirtQueueStatus *qmp_x_query_virtio_queue_status(const char *path,
4170                                                  uint16_t queue,
4171                                                  Error **errp)
4172 {
4173     VirtIODevice *vdev;
4174     VirtQueueStatus *status;
4175 
4176     vdev = qmp_find_virtio_device(path);
4177     if (vdev == NULL) {
4178         error_setg(errp, "Path %s is not a VirtIODevice", path);
4179         return NULL;
4180     }
4181 
4182     if (queue >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, queue)) {
4183         error_setg(errp, "Invalid virtqueue number %d", queue);
4184         return NULL;
4185     }
4186 
4187     status = g_new0(VirtQueueStatus, 1);
4188     status->name = g_strdup(vdev->name);
4189     status->queue_index = vdev->vq[queue].queue_index;
4190     status->inuse = vdev->vq[queue].inuse;
4191     status->vring_num = vdev->vq[queue].vring.num;
4192     status->vring_num_default = vdev->vq[queue].vring.num_default;
4193     status->vring_align = vdev->vq[queue].vring.align;
4194     status->vring_desc = vdev->vq[queue].vring.desc;
4195     status->vring_avail = vdev->vq[queue].vring.avail;
4196     status->vring_used = vdev->vq[queue].vring.used;
4197     status->used_idx = vdev->vq[queue].used_idx;
4198     status->signalled_used = vdev->vq[queue].signalled_used;
4199     status->signalled_used_valid = vdev->vq[queue].signalled_used_valid;
4200 
4201     if (vdev->vhost_started) {
4202         VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
4203         struct vhost_dev *hdev = vdc->get_vhost(vdev);
4204 
4205         /* check if vq index exists for vhost as well  */
4206         if (queue >= hdev->vq_index && queue < hdev->vq_index + hdev->nvqs) {
4207             status->has_last_avail_idx = true;
4208 
4209             int vhost_vq_index =
4210                 hdev->vhost_ops->vhost_get_vq_index(hdev, queue);
4211             struct vhost_vring_state state = {
4212                 .index = vhost_vq_index,
4213             };
4214 
4215             status->last_avail_idx =
4216                 hdev->vhost_ops->vhost_get_vring_base(hdev, &state);
4217         }
4218     } else {
4219         status->has_shadow_avail_idx = true;
4220         status->has_last_avail_idx = true;
4221         status->last_avail_idx = vdev->vq[queue].last_avail_idx;
4222         status->shadow_avail_idx = vdev->vq[queue].shadow_avail_idx;
4223     }
4224 
4225     return status;
4226 }
4227 
qmp_decode_vring_desc_flags(uint16_t flags)4228 static strList *qmp_decode_vring_desc_flags(uint16_t flags)
4229 {
4230     strList *list = NULL;
4231     strList *node;
4232     int i;
4233 
4234     struct {
4235         uint16_t flag;
4236         const char *value;
4237     } map[] = {
4238         { VRING_DESC_F_NEXT, "next" },
4239         { VRING_DESC_F_WRITE, "write" },
4240         { VRING_DESC_F_INDIRECT, "indirect" },
4241         { 1 << VRING_PACKED_DESC_F_AVAIL, "avail" },
4242         { 1 << VRING_PACKED_DESC_F_USED, "used" },
4243         { 0, "" }
4244     };
4245 
4246     for (i = 0; map[i].flag; i++) {
4247         if ((map[i].flag & flags) == 0) {
4248             continue;
4249         }
4250         node = g_malloc0(sizeof(strList));
4251         node->value = g_strdup(map[i].value);
4252         node->next = list;
4253         list = node;
4254     }
4255 
4256     return list;
4257 }
4258 
qmp_x_query_virtio_queue_element(const char * path,uint16_t queue,bool has_index,uint16_t index,Error ** errp)4259 VirtioQueueElement *qmp_x_query_virtio_queue_element(const char *path,
4260                                                      uint16_t queue,
4261                                                      bool has_index,
4262                                                      uint16_t index,
4263                                                      Error **errp)
4264 {
4265     VirtIODevice *vdev;
4266     VirtQueue *vq;
4267     VirtioQueueElement *element = NULL;
4268 
4269     vdev = qmp_find_virtio_device(path);
4270     if (vdev == NULL) {
4271         error_setg(errp, "Path %s is not a VirtIO device", path);
4272         return NULL;
4273     }
4274 
4275     if (queue >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, queue)) {
4276         error_setg(errp, "Invalid virtqueue number %d", queue);
4277         return NULL;
4278     }
4279     vq = &vdev->vq[queue];
4280 
4281     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
4282         error_setg(errp, "Packed ring not supported");
4283         return NULL;
4284     } else {
4285         unsigned int head, i, max;
4286         VRingMemoryRegionCaches *caches;
4287         MemoryRegionCache indirect_desc_cache;
4288         MemoryRegionCache *desc_cache;
4289         VRingDesc desc;
4290         VirtioRingDescList *list = NULL;
4291         VirtioRingDescList *node;
4292         int rc; int ndescs;
4293 
4294         address_space_cache_init_empty(&indirect_desc_cache);
4295 
4296         RCU_READ_LOCK_GUARD();
4297 
4298         max = vq->vring.num;
4299 
4300         if (!has_index) {
4301             head = vring_avail_ring(vq, vq->last_avail_idx % vq->vring.num);
4302         } else {
4303             head = vring_avail_ring(vq, index % vq->vring.num);
4304         }
4305         i = head;
4306 
4307         caches = vring_get_region_caches(vq);
4308         if (!caches) {
4309             error_setg(errp, "Region caches not initialized");
4310             return NULL;
4311         }
4312         if (caches->desc.len < max * sizeof(VRingDesc)) {
4313             error_setg(errp, "Cannot map descriptor ring");
4314             return NULL;
4315         }
4316 
4317         desc_cache = &caches->desc;
4318         vring_split_desc_read(vdev, &desc, desc_cache, i);
4319         if (desc.flags & VRING_DESC_F_INDIRECT) {
4320             int64_t len;
4321             len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
4322                                            desc.addr, desc.len, false);
4323             desc_cache = &indirect_desc_cache;
4324             if (len < desc.len) {
4325                 error_setg(errp, "Cannot map indirect buffer");
4326                 goto done;
4327             }
4328 
4329             max = desc.len / sizeof(VRingDesc);
4330             i = 0;
4331             vring_split_desc_read(vdev, &desc, desc_cache, i);
4332         }
4333 
4334         element = g_new0(VirtioQueueElement, 1);
4335         element->avail = g_new0(VirtioRingAvail, 1);
4336         element->used = g_new0(VirtioRingUsed, 1);
4337         element->name = g_strdup(vdev->name);
4338         element->index = head;
4339         element->avail->flags = vring_avail_flags(vq);
4340         element->avail->idx = vring_avail_idx(vq);
4341         element->avail->ring = head;
4342         element->used->flags = vring_used_flags(vq);
4343         element->used->idx = vring_used_idx(vq);
4344         ndescs = 0;
4345 
4346         do {
4347             /* A buggy driver may produce an infinite loop */
4348             if (ndescs >= max) {
4349                 break;
4350             }
4351             node = g_new0(VirtioRingDescList, 1);
4352             node->value = g_new0(VirtioRingDesc, 1);
4353             node->value->addr = desc.addr;
4354             node->value->len = desc.len;
4355             node->value->flags = qmp_decode_vring_desc_flags(desc.flags);
4356             node->next = list;
4357             list = node;
4358 
4359             ndescs++;
4360             rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max);
4361         } while (rc == VIRTQUEUE_READ_DESC_MORE);
4362         element->descs = list;
4363 done:
4364         address_space_cache_destroy(&indirect_desc_cache);
4365     }
4366 
4367     return element;
4368 }
4369 
4370 static const TypeInfo virtio_device_info = {
4371     .name = TYPE_VIRTIO_DEVICE,
4372     .parent = TYPE_DEVICE,
4373     .instance_size = sizeof(VirtIODevice),
4374     .class_init = virtio_device_class_init,
4375     .instance_finalize = virtio_device_instance_finalize,
4376     .abstract = true,
4377     .class_size = sizeof(VirtioDeviceClass),
4378 };
4379 
virtio_register_types(void)4380 static void virtio_register_types(void)
4381 {
4382     type_register_static(&virtio_device_info);
4383 }
4384 
type_init(virtio_register_types)4385 type_init(virtio_register_types)
4386 
4387 QEMUBH *virtio_bh_new_guarded_full(DeviceState *dev,
4388                                    QEMUBHFunc *cb, void *opaque,
4389                                    const char *name)
4390 {
4391     DeviceState *transport = qdev_get_parent_bus(dev)->parent;
4392 
4393     return qemu_bh_new_full(cb, opaque, name,
4394                             &transport->mem_reentrancy_guard);
4395 }
4396