Lines Matching +full:- +full:- +full:disable +full:- +full:vhost +full:- +full:net
10 * the COPYING file in the top-level directory.
16 #include "qapi/qapi-commands-virtio.h"
18 #include "qemu/defer-call.h"
19 #include "qemu/error-report.h"
21 #include "qemu/main-loop.h"
23 #include "qemu/target-info.h"
27 #include "hw/virtio/vhost.h"
28 #include "migration/qemu-file-types.h"
30 #include "hw/virtio/virtio-bus.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/virtio/virtio-access.h"
35 #include "virtio-qmp.h"
37 #include "standard-headers/linux/virtio_ids.h"
38 #include "standard-headers/linux/vhost_types.h"
39 #include "standard-headers/linux/virtio_blk.h"
40 #include "standard-headers/linux/virtio_console.h"
41 #include "standard-headers/linux/virtio_gpu.h"
42 #include "standard-headers/linux/virtio_net.h"
43 #include "standard-headers/linux/virtio_scsi.h"
44 #include "standard-headers/linux/virtio_i2c.h"
45 #include "standard-headers/linux/virtio_balloon.h"
46 #include "standard-headers/linux/virtio_iommu.h"
47 #include "standard-headers/linux/virtio_mem.h"
48 #include "standard-headers/linux/virtio_vsock.h"
159 [VIRTIO_ID_NET] = "virtio-net",
160 [VIRTIO_ID_BLOCK] = "virtio-blk",
161 [VIRTIO_ID_CONSOLE] = "virtio-serial",
162 [VIRTIO_ID_RNG] = "virtio-rng",
163 [VIRTIO_ID_BALLOON] = "virtio-balloon",
164 [VIRTIO_ID_IOMEM] = "virtio-iomem",
165 [VIRTIO_ID_RPMSG] = "virtio-rpmsg",
166 [VIRTIO_ID_SCSI] = "virtio-scsi",
167 [VIRTIO_ID_9P] = "virtio-9p",
168 [VIRTIO_ID_MAC80211_WLAN] = "virtio-mac-wlan",
169 [VIRTIO_ID_RPROC_SERIAL] = "virtio-rproc-serial",
170 [VIRTIO_ID_CAIF] = "virtio-caif",
171 [VIRTIO_ID_MEMORY_BALLOON] = "virtio-mem-balloon",
172 [VIRTIO_ID_GPU] = "virtio-gpu",
173 [VIRTIO_ID_CLOCK] = "virtio-clk",
174 [VIRTIO_ID_INPUT] = "virtio-input",
175 [VIRTIO_ID_VSOCK] = "vhost-vsock",
176 [VIRTIO_ID_CRYPTO] = "virtio-crypto",
177 [VIRTIO_ID_SIGNAL_DIST] = "virtio-signal",
178 [VIRTIO_ID_PSTORE] = "virtio-pstore",
179 [VIRTIO_ID_IOMMU] = "virtio-iommu",
180 [VIRTIO_ID_MEM] = "virtio-mem",
181 [VIRTIO_ID_SOUND] = "virtio-sound",
182 [VIRTIO_ID_FS] = "virtio-user-fs",
183 [VIRTIO_ID_PMEM] = "virtio-pmem",
184 [VIRTIO_ID_RPMB] = "virtio-rpmb",
185 [VIRTIO_ID_MAC80211_HWSIM] = "virtio-mac-hwsim",
186 [VIRTIO_ID_VIDEO_ENCODER] = "virtio-vid-encoder",
187 [VIRTIO_ID_VIDEO_DECODER] = "virtio-vid-decoder",
188 [VIRTIO_ID_SCMI] = "virtio-scmi",
189 [VIRTIO_ID_NITRO_SEC_MOD] = "virtio-nitro-sec-mod",
190 [VIRTIO_ID_I2C_ADAPTER] = "vhost-user-i2c",
191 [VIRTIO_ID_WATCHDOG] = "virtio-watchdog",
192 [VIRTIO_ID_CAN] = "virtio-can",
193 [VIRTIO_ID_DMABUF] = "virtio-dmabuf",
194 [VIRTIO_ID_PARAM_SERV] = "virtio-param-serv",
195 [VIRTIO_ID_AUDIO_POLICY] = "virtio-audio-pol",
196 [VIRTIO_ID_BT] = "virtio-bluetooth",
197 [VIRTIO_ID_GPIO] = "virtio-gpio"
213 vdev->name);
221 address_space_cache_destroy(&caches->desc);
222 address_space_cache_destroy(&caches->avail);
223 address_space_cache_destroy(&caches->used);
231 caches = qatomic_read(&vq->vring.caches);
232 qatomic_rcu_set(&vq->vring.caches, NULL);
240 VirtQueue *vq = &vdev->vq[n];
241 VRingMemoryRegionCaches *old = vq->vring.caches;
248 addr = vq->vring.desc;
254 packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
256 len = address_space_cache_init(&new->desc, vdev->dma_as,
264 len = address_space_cache_init(&new->used, vdev->dma_as,
265 vq->vring.used, size, true);
272 len = address_space_cache_init(&new->avail, vdev->dma_as,
273 vq->vring.avail, size, false);
279 qatomic_rcu_set(&vq->vring.caches, new);
286 address_space_cache_destroy(&new->avail);
288 address_space_cache_destroy(&new->used);
290 address_space_cache_destroy(&new->desc);
299 VRing *vring = &vdev->vq[n].vring;
301 if (!vring->num || !vring->desc || !vring->align) {
302 /* not yet setup -> nothing to do */
305 vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
306 vring->used = vring_align(vring->avail +
307 offsetof(VRingAvail, ring[vring->num]),
308 vring->align);
318 virtio_tswap64s(vdev, &desc->addr);
319 virtio_tswap32s(vdev, &desc->len);
320 virtio_tswap16s(vdev, &desc->flags);
321 virtio_tswap16s(vdev, &desc->next);
331 e->flags = virtio_lduw_phys_cached(vdev, cache, off_flags);
334 e->off_wrap = virtio_lduw_phys_cached(vdev, cache, off_off);
359 return qatomic_rcu_read(&vq->vring.caches);
372 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
385 vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
386 return vq->shadow_avail_idx;
399 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
405 return vring_avail_ring(vq, vq->vring.num);
419 virtio_tswap32s(vq->vdev, &uelem->id);
420 virtio_tswap32s(vq->vdev, &uelem->len);
421 address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
422 address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
435 return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
448 return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
458 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
459 address_space_cache_invalidate(&caches->used, pa, sizeof(val));
462 vq->used_idx = val;
469 VirtIODevice *vdev = vq->vdev;
477 flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
478 virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
479 address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
486 VirtIODevice *vdev = vq->vdev;
494 flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
495 virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
496 address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
504 if (!vq->notification) {
513 pa = offsetof(VRingUsed, ring[vq->vring.num]);
514 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
515 address_space_cache_invalidate(&caches->used, pa, sizeof(val));
522 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
547 vring_packed_event_read(vq->vdev, &caches->used, &e);
551 } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
552 off_wrap = vq->shadow_avail_idx | vq->shadow_avail_wrap_counter << 15;
553 vring_packed_off_wrap_write(vq->vdev, &caches->used, off_wrap);
561 vring_packed_flags_write(vq->vdev, &caches->used, e.flags);
570 return vq->notification;
575 vq->notification = enable;
577 if (!vq->vring.desc) {
581 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
590 return vq->vring.avail != 0;
610 vring_packed_desc_read_flags(vdev, &desc->flags, cache, i);
618 &desc->addr, sizeof(desc->addr));
620 &desc->id, sizeof(desc->id));
622 &desc->len, sizeof(desc->len));
623 virtio_tswap64s(vdev, &desc->addr);
624 virtio_tswap16s(vdev, &desc->id);
625 virtio_tswap32s(vdev, &desc->len);
638 virtio_tswap32s(vdev, &desc->len);
639 virtio_tswap16s(vdev, &desc->id);
640 address_space_write_cached(cache, off_id, &desc->id, sizeof(desc->id));
641 address_space_cache_invalidate(cache, off_id, sizeof(desc->id));
642 address_space_write_cached(cache, off_len, &desc->len, sizeof(desc->len));
643 address_space_cache_invalidate(cache, off_len, sizeof(desc->len));
653 virtio_stw_phys_cached(vdev, cache, off, desc->flags);
654 address_space_cache_invalidate(cache, off, sizeof(desc->flags));
684 if (virtio_device_disabled(vq->vdev)) {
688 if (unlikely(!vq->vring.avail)) {
692 if (vq->shadow_avail_idx != vq->last_avail_idx) {
696 return vring_avail_idx(vq) == vq->last_avail_idx;
703 if (virtio_device_disabled(vq->vdev)) {
707 if (unlikely(!vq->vring.avail)) {
711 if (vq->shadow_avail_idx != vq->last_avail_idx) {
716 empty = vring_avail_idx(vq) == vq->last_avail_idx;
726 if (unlikely(!vq->vring.desc)) {
735 vring_packed_desc_read_flags(vq->vdev, &desc.flags, &cache->desc,
736 vq->last_avail_idx);
738 return !is_desc_avail(desc.flags, vq->last_avail_wrap_counter);
749 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
758 if (unlikely(!vq->vring.avail)) {
770 if (unlikely(!vq->vring.desc)) {
779 vring_packed_desc_read(vq->vdev, &desc, &caches->desc,
782 return is_desc_avail(desc.flags, vq->shadow_avail_wrap_counter);
787 if (virtio_device_disabled(vq->vdev)) {
791 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
813 AddressSpace *dma_as = vq->vdev->dma_as;
818 for (i = 0; i < elem->in_num; i++) {
819 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
821 dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
822 elem->in_sg[i].iov_len,
828 for (i = 0; i < elem->out_num; i++)
829 dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
830 elem->out_sg[i].iov_len,
832 elem->out_sg[i].iov_len);
847 vq->inuse -= elem->ndescs;
853 vq->last_avail_idx -= num;
858 if (vq->last_avail_idx < num) {
859 vq->last_avail_idx = vq->vring.num + vq->last_avail_idx - num;
860 vq->last_avail_wrap_counter ^= 1;
862 vq->last_avail_idx -= num;
878 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
901 if (num > vq->inuse) {
905 vq->inuse -= num;
906 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
919 if (unlikely(!vq->vring.used)) {
923 idx = (idx + vq->used_idx) % vq->vring.num;
925 uelem.id = elem->index;
933 vq->used_elems[idx].index = elem->index;
934 vq->used_elems[idx].len = len;
935 vq->used_elems[idx].ndescs = elem->ndescs;
943 i = vq->used_idx % vq->vring.num;
949 max_steps = (vq->last_avail_idx - vq->used_idx) % vq->vring.num;
951 /* Search for element in vq->used_elems */
954 if (vq->used_elems[i].index == elem->index) {
955 vq->used_elems[i].len = len;
956 vq->used_elems[i].in_order_filled = true;
960 ndescs = vq->used_elems[i].ndescs;
963 if (unlikely(ndescs == 0 || ndescs > vq->vring.num)) {
966 __func__, vq->vdev->name, ndescs, i);
973 if (i >= vq->vring.num) {
974 i -= vq->vring.num;
984 __func__, vq->vdev->name, elem->index);
996 .id = elem->index,
997 .len = elem->len,
999 bool wrap_counter = vq->used_wrap_counter;
1001 if (unlikely(!vq->vring.desc)) {
1005 head = vq->used_idx + idx;
1006 if (head >= vq->vring.num) {
1007 head -= vq->vring.num;
1023 vring_packed_desc_write(vq->vdev, &desc, &caches->desc, head, strict_order);
1034 if (virtio_device_disabled(vq->vdev)) {
1038 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_IN_ORDER)) {
1040 } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1052 if (unlikely(!vq->vring.used)) {
1059 old = vq->used_idx;
1062 vq->inuse -= count;
1063 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
1064 vq->signalled_used_valid = false;
1071 if (unlikely(!vq->vring.desc)) {
1081 * the value of 'vq->used_idx' plus the 'ndescs'.
1083 ndescs += vq->used_elems[0].ndescs;
1085 virtqueue_packed_fill_desc(vq, &vq->used_elems[i], ndescs, false);
1086 ndescs += vq->used_elems[i].ndescs;
1088 virtqueue_packed_fill_desc(vq, &vq->used_elems[0], 0, true);
1090 vq->inuse -= ndescs;
1091 vq->used_idx += ndescs;
1092 if (vq->used_idx >= vq->vring.num) {
1093 vq->used_idx -= vq->vring.num;
1094 vq->used_wrap_counter ^= 1;
1095 vq->signalled_used_valid = false;
1101 unsigned int i = vq->used_idx % vq->vring.num;
1103 uint16_t old = vq->used_idx;
1108 packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED);
1111 if (unlikely(!vq->vring.desc)) {
1114 } else if (unlikely(!vq->vring.used)) {
1118 /* First expected in-order element isn't ready, nothing to do */
1119 if (!vq->used_elems[i].in_order_filled) {
1123 /* Search for filled elements in-order */
1124 while (vq->used_elems[i].in_order_filled) {
1129 if (packed && i != vq->used_idx) {
1130 virtqueue_packed_fill_desc(vq, &vq->used_elems[i], ndescs, false);
1132 uelem.id = vq->used_elems[i].index;
1133 uelem.len = vq->used_elems[i].len;
1137 vq->used_elems[i].in_order_filled = false;
1138 ndescs += vq->used_elems[i].ndescs;
1139 i += vq->used_elems[i].ndescs;
1140 if (i >= vq->vring.num) {
1141 i -= vq->vring.num;
1146 virtqueue_packed_fill_desc(vq, &vq->used_elems[vq->used_idx], 0, true);
1147 vq->used_idx += ndescs;
1148 if (vq->used_idx >= vq->vring.num) {
1149 vq->used_idx -= vq->vring.num;
1150 vq->used_wrap_counter ^= 1;
1151 vq->signalled_used_valid = false;
1158 if (unlikely((int16_t)(new - vq->signalled_used) <
1159 (uint16_t)(new - old))) {
1160 vq->signalled_used_valid = false;
1163 vq->inuse -= ndescs;
1168 if (virtio_device_disabled(vq->vdev)) {
1169 vq->inuse -= count;
1173 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_IN_ORDER)) {
1175 } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1196 avail_idx = (vq->shadow_avail_idx != idx) ? vq->shadow_avail_idx
1198 num_heads = avail_idx - idx;
1201 if (num_heads > vq->vring.num) {
1202 virtio_error(vq->vdev, "Guest moved used index from %u to %u",
1203 idx, vq->shadow_avail_idx);
1204 return -EINVAL;
1207 * On success, callers read a descriptor at vq->last_avail_idx.
1228 *head = vring_avail_ring(vq, idx % vq->vring.num);
1231 if (*head >= vq->vring.num) {
1232 virtio_error(vq->vdev, "Guest says index %u is available", *head);
1240 VIRTQUEUE_READ_DESC_ERROR = -1,
1245 /* Reads the 'desc->next' descriptor into '*desc'. */
1251 if (!(desc->flags & VRING_DESC_F_NEXT)) {
1256 if (desc->next >= max) {
1257 virtio_error(vdev, "Desc next is %u", desc->next);
1261 vring_split_desc_read(vdev, desc, desc_cache, desc->next);
1271 VirtIODevice *vdev = vq->vdev;
1280 idx = vq->last_avail_idx;
1284 MemoryRegionCache *desc_cache = &caches->desc;
1288 unsigned int max = vq->vring.num;
1312 vdev->dma_as,
1384 if (!indirect && !(desc->flags & VRING_DESC_F_NEXT)) {
1393 (*next) -= vq->vring.num;
1397 vring_packed_desc_read(vq->vdev, desc, desc_cache, *next, false);
1409 VirtIODevice *vdev = vq->vdev;
1420 idx = vq->last_avail_idx;
1421 wrap_counter = vq->last_avail_wrap_counter;
1428 unsigned int max = vq->vring.num;
1430 desc_cache = &caches->desc;
1451 vdev->dma_as,
1490 idx += num_bufs - total_bufs;
1494 if (idx >= vq->vring.num) {
1495 idx -= vq->vring.num;
1501 vq->shadow_avail_idx = idx;
1502 vq->shadow_avail_wrap_counter = wrap_counter;
1527 if (unlikely(!vq->vring.desc)) {
1536 desc_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
1538 if (caches->desc.len < vq->vring.num * desc_size) {
1539 virtio_error(vq->vdev, "Cannot map descriptor ring");
1543 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1553 return (int)vq->shadow_avail_idx;
1562 return -1;
1597 iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
1610 sz -= len;
1633 cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
1647 sg[i].iov_base = dma_memory_map(vdev->dma_as,
1665 virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, elem->in_num, true);
1666 virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, elem->out_num,
1673 size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
1674 size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
1675 size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
1676 size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
1677 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
1678 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
1683 elem->out_num = out_num;
1684 elem->in_num = in_num;
1685 elem->in_addr = (void *)elem + in_addr_ofs;
1686 elem->out_addr = (void *)elem + out_addr_ofs;
1687 elem->in_sg = (void *)elem + in_sg_ofs;
1688 elem->out_sg = (void *)elem + out_sg_ofs;
1699 VirtIODevice *vdev = vq->vdev;
1720 max = vq->vring.num;
1722 if (vq->inuse >= vq->vring.num) {
1727 if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
1732 vring_set_avail_event(vq, vq->last_avail_idx);
1743 if (caches->desc.len < max * sizeof(VRingDesc)) {
1748 desc_cache = &caches->desc;
1758 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1778 VIRTQUEUE_MAX_SIZE - out_num, true,
1808 elem->index = head;
1809 elem->ndescs = 1;
1811 elem->out_addr[i] = addr[i];
1812 elem->out_sg[i] = iov[i];
1815 elem->in_addr[i] = addr[out_num + i];
1816 elem->in_sg[i] = iov[out_num + i];
1820 idx = (vq->last_avail_idx - 1) % vq->vring.num;
1821 vq->used_elems[idx].index = elem->index;
1822 vq->used_elems[idx].len = elem->len;
1823 vq->used_elems[idx].ndescs = elem->ndescs;
1826 vq->inuse++;
1828 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
1846 VirtIODevice *vdev = vq->vdev;
1865 max = vq->vring.num;
1867 if (vq->inuse >= vq->vring.num) {
1872 i = vq->last_avail_idx;
1880 if (caches->desc.len < max * sizeof(VRingDesc)) {
1885 desc_cache = &caches->desc;
1896 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1916 VIRTQUEUE_MAX_SIZE - out_num, true,
1950 elem->out_addr[i] = addr[i];
1951 elem->out_sg[i] = iov[i];
1954 elem->in_addr[i] = addr[out_num + i];
1955 elem->in_sg[i] = iov[out_num + i];
1958 elem->index = id;
1959 elem->ndescs = (desc_cache == &indirect_desc_cache) ? 1 : elem_entries;
1962 vq->used_elems[vq->last_avail_idx].index = elem->index;
1963 vq->used_elems[vq->last_avail_idx].len = elem->len;
1964 vq->used_elems[vq->last_avail_idx].ndescs = elem->ndescs;
1967 vq->last_avail_idx += elem->ndescs;
1968 vq->inuse += elem->ndescs;
1970 if (vq->last_avail_idx >= vq->vring.num) {
1971 vq->last_avail_idx -= vq->vring.num;
1972 vq->last_avail_wrap_counter ^= 1;
1975 vq->shadow_avail_idx = vq->last_avail_idx;
1976 vq->shadow_avail_wrap_counter = vq->last_avail_wrap_counter;
1978 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
1991 if (virtio_device_disabled(vq->vdev)) {
1995 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
2008 VirtIODevice *vdev = vq->vdev;
2018 desc_cache = &caches->desc;
2022 while (vq->inuse < vq->vring.num) {
2023 unsigned int idx = vq->last_avail_idx;
2029 vq->last_avail_idx , true);
2030 if (!is_desc_avail(desc.flags, vq->last_avail_wrap_counter)) {
2036 vq->vring.num, &idx, false)) {
2045 vq->last_avail_idx += elem.ndescs;
2046 if (vq->last_avail_idx >= vq->vring.num) {
2047 vq->last_avail_idx -= vq->vring.num;
2048 vq->last_avail_wrap_counter ^= 1;
2059 VirtIODevice *vdev = vq->vdev;
2062 while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
2066 if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
2069 vq->inuse++;
2070 vq->last_avail_idx++;
2072 vring_set_avail_event(vq, vq->last_avail_idx);
2091 struct VirtIODevice *vdev = vq->vdev;
2093 if (virtio_device_disabled(vq->vdev)) {
2107 * In the meanwhile, since the in-memory layout of VirtQueueElement
2138 elem->index = data.index;
2140 for (i = 0; i < elem->in_num; i++) {
2141 elem->in_addr[i] = data.in_addr[i];
2144 for (i = 0; i < elem->out_num; i++) {
2145 elem->out_addr[i] = data.out_addr[i];
2148 for (i = 0; i < elem->in_num; i++) {
2150 elem->in_sg[i].iov_base = 0;
2151 elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
2154 for (i = 0; i < elem->out_num; i++) {
2156 elem->out_sg[i].iov_base = 0;
2157 elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
2161 qemu_get_be32s(f, &elem->ndescs);
2175 data.index = elem->index;
2176 data.in_num = elem->in_num;
2177 data.out_num = elem->out_num;
2179 for (i = 0; i < elem->in_num; i++) {
2180 data.in_addr[i] = elem->in_addr[i];
2183 for (i = 0; i < elem->out_num; i++) {
2184 data.out_addr[i] = elem->out_addr[i];
2187 for (i = 0; i < elem->in_num; i++) {
2190 data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
2193 for (i = 0; i < elem->out_num; i++) {
2195 data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
2199 qemu_put_be32s(f, &elem->ndescs);
2215 if (k->notify) {
2216 k->notify(qbus->parent, vector);
2231 return -EFAULT;
2234 if (k->validate_features) {
2235 return k->validate_features(vdev);
2248 if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
2257 if ((vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) !=
2262 if (k->set_status) {
2263 ret = k->set_status(vdev, val);
2266 vdev->name, val, vdev->status);
2269 vdev->status = val;
2294 vdev->vq[i].vring.desc = 0;
2295 vdev->vq[i].vring.avail = 0;
2296 vdev->vq[i].vring.used = 0;
2297 vdev->vq[i].last_avail_idx = 0;
2298 vdev->vq[i].shadow_avail_idx = 0;
2299 vdev->vq[i].used_idx = 0;
2300 vdev->vq[i].last_avail_wrap_counter = true;
2301 vdev->vq[i].shadow_avail_wrap_counter = true;
2302 vdev->vq[i].used_wrap_counter = true;
2304 vdev->vq[i].signalled_used = 0;
2305 vdev->vq[i].signalled_used_valid = false;
2306 vdev->vq[i].notification = true;
2307 vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
2308 vdev->vq[i].inuse = 0;
2309 virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
2316 if (k->queue_reset) {
2317 k->queue_reset(vdev, queue_index);
2330 * be re-enabled for new machine types only, and also after
2339 if (k->queue_enable) {
2340 k->queue_enable(vdev, queue_index);
2346 if (!vdev->vq[n].vring.num) {
2349 vdev->vq[n].vring.desc = addr;
2355 return vdev->vq[n].vring.desc;
2361 if (!vdev->vq[n].vring.num) {
2364 vdev->vq[n].vring.desc = desc;
2365 vdev->vq[n].vring.avail = avail;
2366 vdev->vq[n].vring.used = used;
2375 if (!!num != !!vdev->vq[n].vring.num ||
2380 vdev->vq[n].vring.num = num;
2385 return QLIST_FIRST(&vdev->vector_queues[vector]);
2395 return vdev->vq[n].vring.num;
2400 return vdev->vq[n].vring.num_default;
2421 /* virtio-1 compliant devices cannot change the alignment */
2423 error_report("tried to modify queue alignment for virtio-1 device");
2430 assert(k->has_variable_vring_alignment);
2433 vdev->vq[n].vring.align = align;
2440 if (!vq->vring.desc) {
2445 * 16-bit data for packed VQs include 1-bit wrap counter and
2446 * 15-bit shadow_avail_idx.
2448 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
2449 vq->shadow_avail_wrap_counter = (shadow_avail_idx >> 15) & 0x1;
2450 vq->shadow_avail_idx = shadow_avail_idx & 0x7FFF;
2452 vq->shadow_avail_idx = shadow_avail_idx;
2458 if (vq->vring.desc && vq->handle_output) {
2459 VirtIODevice *vdev = vq->vdev;
2461 if (unlikely(vdev->broken)) {
2465 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2466 vq->handle_output(vdev, vq);
2468 if (unlikely(vdev->start_on_kick)) {
2476 VirtQueue *vq = &vdev->vq[n];
2478 if (unlikely(!vq->vring.desc || vdev->broken)) {
2482 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2483 if (vq->host_notifier_enabled) {
2484 event_notifier_set(&vq->host_notifier);
2485 } else if (vq->handle_output) {
2486 vq->handle_output(vdev, vq);
2488 if (unlikely(vdev->start_on_kick)) {
2496 return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
2502 VirtQueue *vq = &vdev->vq[n];
2505 if (vdev->vector_queues &&
2506 vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
2509 vdev->vq[n].vector = vector;
2510 if (vdev->vector_queues &&
2512 QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
2523 if (vdev->vq[i].vring.num == 0)
2530 vdev->vq[i].vring.num = queue_size;
2531 vdev->vq[i].vring.num_default = queue_size;
2532 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
2533 vdev->vq[i].handle_output = handle_output;
2534 vdev->vq[i].used_elems = g_new0(VirtQueueElement, queue_size);
2536 return &vdev->vq[i];
2541 vq->vring.num = 0;
2542 vq->vring.num_default = 0;
2543 vq->handle_output = NULL;
2544 g_free(vq->used_elems);
2545 vq->used_elems = NULL;
2555 virtio_delete_queue(&vdev->vq[n]);
2560 uint8_t old = qatomic_read(&vdev->isr);
2566 qatomic_or(&vdev->isr, value);
2579 !vq->inuse && virtio_queue_empty(vq)) {
2587 v = vq->signalled_used_valid;
2588 vq->signalled_used_valid = true;
2589 old = vq->signalled_used;
2590 new = vq->signalled_used = vq->used_idx;
2601 off -= vq->vring.num;
2620 vring_packed_event_read(vdev, &caches->avail, &e);
2622 old = vq->signalled_used;
2623 new = vq->signalled_used = vq->used_idx;
2624 v = vq->signalled_used_valid;
2625 vq->signalled_used_valid = true;
2633 return !v || vring_packed_need_event(vq, vq->used_wrap_counter,
2653 trace_virtio_notify_irqfd_deferred_fn(vq->vdev, vq);
2669 * windows drivers included in virtio-win 1.8.0 (circa 2015) are
2682 virtio_set_isr(vq->vdev, 0x1);
2683 defer_call(virtio_notify_irqfd_deferred_fn, &vq->guest_notifier);
2688 virtio_set_isr(vq->vdev, 0x1);
2689 virtio_notify_vector(vq->vdev, vq->vector);
2706 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
2710 vdev->generation++;
2711 virtio_notify_vector(vdev, vdev->config_vector);
2718 assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
2720 return vdev->device_endian != virtio_default_endian();
2723 return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
2730 return (vdev->host_features >> 32) != 0;
2753 if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
2766 return k->has_extra_state &&
2767 k->has_extra_state(qbus->parent);
2774 return vdev->broken;
2781 return vdev->started;
2788 return vdev->disabled;
2869 if (!k->load_extra_state) {
2870 return -1;
2872 return k->load_extra_state(qbus->parent, f);
2883 k->save_extra_state(qbus->parent, f);
2993 uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
2996 if (k->save_config) {
2997 k->save_config(qbus->parent, f);
3000 qemu_put_8s(f, &vdev->status);
3001 qemu_put_8s(f, &vdev->isr);
3002 qemu_put_be16s(f, &vdev->queue_sel);
3004 qemu_put_be32(f, vdev->config_len);
3005 qemu_put_buffer(f, vdev->config, vdev->config_len);
3008 if (vdev->vq[i].vring.num == 0)
3015 if (vdev->vq[i].vring.num == 0)
3018 qemu_put_be32(f, vdev->vq[i].vring.num);
3019 if (k->has_variable_vring_alignment) {
3020 qemu_put_be32(f, vdev->vq[i].vring.align);
3024 * subsections for VIRTIO-1 devices.
3026 qemu_put_be64(f, vdev->vq[i].vring.desc);
3027 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
3028 if (k->save_queue) {
3029 k->save_queue(qbus->parent, i, f);
3033 if (vdc->save != NULL) {
3034 vdc->save(vdev, f);
3037 if (vdc->vmsd) {
3038 int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL);
3063 return virtio_load(vdev, f, dc->vmsd->version_id);
3075 bool bad = (val & ~(vdev->host_features)) != 0;
3077 val &= vdev->host_features;
3078 if (k->set_features) {
3079 k->set_features(vdev, val);
3081 vdev->guest_features = val;
3082 return bad ? -1 : 0;
3096 data->ret = virtio_set_features_nocheck(data->vdev, data->val);
3097 aio_co_wake(data->co);
3125 if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
3126 return -EINVAL;
3132 __func__, vdev->name);
3140 if (vdev->vq[i].vring.num != 0) {
3146 if (!virtio_device_started(vdev, vdev->status) &&
3148 vdev->start_on_kick = true;
3163 vdev->device_endian = virtio_current_cpu_endian();
3166 vdev->device_endian = virtio_default_endian();
3169 if (k->get_vhost) {
3170 struct vhost_dev *hdev = k->get_vhost(vdev);
3171 /* Only reset when vhost back-end is connected */
3172 if (hdev && hdev->vhost_ops) {
3177 if (k->reset) {
3178 k->reset(vdev);
3181 vdev->start_on_kick = false;
3182 vdev->started = false;
3183 vdev->broken = false;
3185 vdev->queue_sel = 0;
3186 vdev->status = 0;
3187 vdev->disabled = false;
3188 qatomic_set(&vdev->isr, 0);
3189 vdev->config_vector = VIRTIO_NO_VECTOR;
3190 virtio_notify_vector(vdev, vdev->config_vector);
3202 DeviceState *proxy = DEVICE(BUS(bus)->parent);
3205 k->ioeventfd_enabled(proxy)) {
3214 size_t config_size = params->min_size;
3215 const VirtIOFeature *feature_sizes = params->feature_sizes;
3224 assert(config_size <= params->max_size);
3243 vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
3245 if (k->load_config) {
3246 ret = k->load_config(qbus->parent, f);
3251 qemu_get_8s(f, &vdev->status);
3252 qemu_get_8s(f, &vdev->isr);
3253 qemu_get_be16s(f, &vdev->queue_sel);
3254 if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
3255 return -1;
3260 * Temporarily set guest_features low bits - needed by
3261 * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
3264 * Note: devices should always test host features in future - don't create
3267 vdev->guest_features = features;
3276 qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
3278 while (config_len > vdev->config_len) {
3280 config_len--;
3287 return -1;
3290 if (vdc->pre_load_queues) {
3291 ret = vdc->pre_load_queues(vdev, num);
3298 vdev->vq[i].vring.num = qemu_get_be32(f);
3299 if (k->has_variable_vring_alignment) {
3300 vdev->vq[i].vring.align = qemu_get_be32(f);
3302 vdev->vq[i].vring.desc = qemu_get_be64(f);
3303 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
3304 vdev->vq[i].signalled_used_valid = false;
3305 vdev->vq[i].notification = true;
3307 if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) {
3310 i, vdev->vq[i].last_avail_idx);
3311 return -1;
3313 if (k->load_queue) {
3314 ret = k->load_queue(qbus->parent, i, f);
3322 if (vdc->load != NULL) {
3323 ret = vdc->load(vdev, f, version_id);
3329 if (vdc->vmsd) {
3330 ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
3342 if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
3343 vdev->device_endian = virtio_default_endian();
3348 * Subsection load filled vdev->guest_features. Run them
3349 * through virtio_set_features to sanity-check them against
3352 uint64_t features64 = vdev->guest_features;
3356 features64, vdev->host_features);
3357 return -1;
3363 features, vdev->host_features);
3364 return -1;
3368 if (!virtio_device_started(vdev, vdev->status) &&
3370 vdev->start_on_kick = true;
3375 if (vdev->vq[i].vring.desc) {
3379 * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
3391 vdev->vq[i].shadow_avail_idx = vdev->vq[i].last_avail_idx;
3392 vdev->vq[i].shadow_avail_wrap_counter =
3393 vdev->vq[i].last_avail_wrap_counter;
3397 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
3399 if (nheads > vdev->vq[i].vring.num) {
3402 i, vdev->vq[i].vring.num,
3403 vring_avail_idx(&vdev->vq[i]),
3404 vdev->vq[i].last_avail_idx, nheads);
3405 vdev->vq[i].used_idx = 0;
3406 vdev->vq[i].shadow_avail_idx = 0;
3407 vdev->vq[i].inuse = 0;
3410 vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
3411 vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
3419 vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
3420 vdev->vq[i].used_idx);
3421 if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
3422 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
3424 i, vdev->vq[i].vring.num,
3425 vdev->vq[i].last_avail_idx,
3426 vdev->vq[i].used_idx);
3427 return -1;
3432 if (vdc->post_load) {
3433 ret = vdc->post_load(vdev);
3444 qemu_del_vm_change_state_handler(vdev->vmstate);
3452 bool backend_run = running && virtio_device_started(vdev, vdev->status);
3453 vdev->vm_running = running;
3456 virtio_set_status(vdev, vdev->status);
3459 if (k->vmstate_change) {
3460 k->vmstate_change(qbus->parent, backend_run);
3464 int ret = virtio_set_status(vdev, vdev->status);
3477 object_initialize_child_with_props(proxy_obj, "virtio-backend", vdev,
3488 int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
3491 vdev->vector_queues =
3492 g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
3495 vdev->start_on_kick = false;
3496 vdev->started = false;
3497 vdev->vhost_started = false;
3498 vdev->device_id = device_id;
3499 vdev->status = 0;
3500 qatomic_set(&vdev->isr, 0);
3501 vdev->queue_sel = 0;
3502 vdev->config_vector = VIRTIO_NO_VECTOR;
3503 vdev->vq = g_new0(VirtQueue, VIRTIO_QUEUE_MAX);
3504 vdev->vm_running = runstate_is_running();
3505 vdev->broken = false;
3507 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
3508 vdev->vq[i].vdev = vdev;
3509 vdev->vq[i].queue_index = i;
3510 vdev->vq[i].host_notifier_enabled = false;
3513 vdev->name = virtio_id_to_name(device_id);
3514 vdev->config_len = config_size;
3515 if (vdev->config_len) {
3516 vdev->config = g_malloc0(config_size);
3518 vdev->config = NULL;
3520 vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev),
3522 vdev->device_endian = virtio_default_endian();
3523 vdev->use_guest_notifier_mask = true;
3533 switch (vdev->device_id) {
3552 return vdev->disable_legacy_check;
3557 return vdev->vq[n].vring.desc;
3570 if (k->queue_enabled) {
3571 return k->queue_enabled(qbus->parent, n);
3578 return vdev->vq[n].vring.avail;
3583 return vdev->vq[n].vring.used;
3588 return sizeof(VRingDesc) * vdev->vq[n].vring.num;
3601 sizeof(uint16_t) * vdev->vq[n].vring.num + s;
3614 sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s;
3622 avail = vdev->vq[n].last_avail_idx;
3623 avail |= ((uint16_t)vdev->vq[n].last_avail_wrap_counter) << 15;
3625 used = vdev->vq[n].used_idx;
3626 used |= ((uint16_t)vdev->vq[n].used_wrap_counter) << 15;
3634 return vdev->vq[n].last_avail_idx;
3649 struct VirtQueue *vq = &vdev->vq[n];
3651 vq->last_avail_idx = vq->shadow_avail_idx = idx & 0x7fff;
3652 vq->last_avail_wrap_counter =
3653 vq->shadow_avail_wrap_counter = !!(idx & 0x8000);
3655 vq->used_idx = idx & 0x7fff;
3656 vq->used_wrap_counter = !!(idx & 0x8000);
3662 vdev->vq[n].last_avail_idx = idx;
3663 vdev->vq[n].shadow_avail_idx = idx;
3686 if (vdev->vq[n].vring.desc) {
3687 vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
3688 vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx;
3709 if (vdev->vq[n].vring.desc) {
3710 vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
3725 vdev->vq[n].signalled_used_valid = false;
3730 return vdev->vq + n;
3735 return vq->queue_index;
3757 event_notifier_set_handler(&vq->guest_notifier,
3760 event_notifier_set_handler(&vq->guest_notifier, NULL);
3765 virtio_queue_guest_notifier_read(&vq->guest_notifier);
3773 n = &vdev->config_notifier;
3788 return &vq->guest_notifier;
3803 return vq->vring.desc && !virtio_queue_empty(vq);
3825 * Re-enable them. (And if detach has not been used before, notifications
3834 aio_set_event_notifier(ctx, &vq->host_notifier,
3838 aio_set_event_notifier_poll(ctx, &vq->host_notifier,
3847 event_notifier_set(&vq->host_notifier);
3853 * function does not pop all elements. When the virtqueue is left non-empty
3863 aio_set_event_notifier(ctx, &vq->host_notifier,
3873 event_notifier_set(&vq->host_notifier);
3878 aio_set_event_notifier(ctx, &vq->host_notifier, NULL, NULL, NULL);
3887 * we potentially re-attach it. The attach_host_notifier functions will
3902 return &vq->host_notifier;
3907 return &vdev->config_notifier;
3912 vq->host_notifier_enabled = enabled;
3921 if (k->set_host_notifier_mr) {
3922 return k->set_host_notifier_mr(qbus->parent, n, mr, assign);
3925 return -1;
3930 g_free(vdev->bus_name);
3931 vdev->bus_name = g_strdup(bus_name);
3943 vdev->status = vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET;
3947 vdev->broken = true;
3956 if (vdev->vq[i].vring.num == 0) {
3970 assert(!vdc->vmsd || !vdc->load);
3972 if (vdc->realize != NULL) {
3973 vdc->realize(dev, &err);
3984 vdc->unrealize(dev);
3991 vdc->unrealize(dev);
3995 vdev->listener.commit = virtio_memory_listener_commit;
3996 vdev->listener.name = "virtio";
3997 memory_listener_register(&vdev->listener, vdev->dma_as);
4005 memory_listener_unregister(&vdev->listener);
4008 if (vdc->unrealize != NULL) {
4009 vdc->unrealize(dev);
4012 g_free(vdev->bus_name);
4013 vdev->bus_name = NULL;
4019 if (!vdev->vq) {
4024 if (vdev->vq[i].vring.num == 0) {
4027 virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
4029 g_free(vdev->vq);
4038 g_free(vdev->config);
4039 g_free(vdev->vector_queues);
4044 DEFINE_PROP_BOOL("use-started", VirtIODevice, use_started, true),
4045 DEFINE_PROP_BOOL("use-disabled-flag", VirtIODevice, use_disabled_flag, true),
4046 DEFINE_PROP_BOOL("x-disable-legacy-check", VirtIODevice,
4061 VirtQueue *vq = &vdev->vq[n];
4070 event_notifier_set_handler(&vq->host_notifier,
4076 VirtQueue *vq = &vdev->vq[n];
4077 if (!vq->vring.num) {
4080 event_notifier_set(&vq->host_notifier);
4087 while (--n >= 0) {
4088 VirtQueue *vq = &vdev->vq[n];
4093 event_notifier_set_handler(&vq->host_notifier, NULL);
4103 while (--i >= 0) {
4131 VirtQueue *vq = &vdev->vq[n];
4136 event_notifier_set_handler(&vq->host_notifier, NULL);
4176 dc->realize = virtio_device_realize;
4177 dc->unrealize = virtio_device_unrealize;
4178 dc->bus_type = TYPE_VIRTIO_BUS;
4180 vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
4181 vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
4183 vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
4213 status->name = g_strdup(vdev->name);
4214 status->queue_index = vdev->vq[queue].queue_index;
4215 status->inuse = vdev->vq[queue].inuse;
4216 status->vring_num = vdev->vq[queue].vring.num;
4217 status->vring_num_default = vdev->vq[queue].vring.num_default;
4218 status->vring_align = vdev->vq[queue].vring.align;
4219 status->vring_desc = vdev->vq[queue].vring.desc;
4220 status->vring_avail = vdev->vq[queue].vring.avail;
4221 status->vring_used = vdev->vq[queue].vring.used;
4222 status->used_idx = vdev->vq[queue].used_idx;
4223 status->signalled_used = vdev->vq[queue].signalled_used;
4224 status->signalled_used_valid = vdev->vq[queue].signalled_used_valid;
4226 if (vdev->vhost_started) {
4228 struct vhost_dev *hdev = vdc->get_vhost(vdev);
4230 /* check if vq index exists for vhost as well */
4231 if (queue >= hdev->vq_index && queue < hdev->vq_index + hdev->nvqs) {
4232 status->has_last_avail_idx = true;
4235 hdev->vhost_ops->vhost_get_vq_index(hdev, queue);
4240 status->last_avail_idx =
4241 hdev->vhost_ops->vhost_get_vring_base(hdev, &state);
4244 status->has_shadow_avail_idx = true;
4245 status->has_last_avail_idx = true;
4246 status->last_avail_idx = vdev->vq[queue].last_avail_idx;
4247 status->shadow_avail_idx = vdev->vq[queue].shadow_avail_idx;
4276 node->value = g_strdup(map[i].value);
4277 node->next = list;
4304 vq = &vdev->vq[queue];
4323 max = vq->vring.num;
4326 head = vring_avail_ring(vq, vq->last_avail_idx % vq->vring.num);
4328 head = vring_avail_ring(vq, index % vq->vring.num);
4337 if (caches->desc.len < max * sizeof(VRingDesc)) {
4342 desc_cache = &caches->desc;
4346 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
4360 element->avail = g_new0(VirtioRingAvail, 1);
4361 element->used = g_new0(VirtioRingUsed, 1);
4362 element->name = g_strdup(vdev->name);
4363 element->index = head;
4364 element->avail->flags = vring_avail_flags(vq);
4365 element->avail->idx = vring_avail_idx(vq);
4366 element->avail->ring = head;
4367 element->used->flags = vring_used_flags(vq);
4368 element->used->idx = vring_used_idx(vq);
4377 node->value = g_new0(VirtioRingDesc, 1);
4378 node->value->addr = desc.addr;
4379 node->value->len = desc.len;
4380 node->value->flags = qmp_decode_vring_desc_flags(desc.flags);
4381 node->next = list;
4387 element->descs = list;
4416 DeviceState *transport = qdev_get_parent_bus(dev)->parent;
4419 &transport->mem_reentrancy_guard);