Lines Matching refs:vq

218 static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)  in virtio_virtqueue_reset_region_cache()  argument
222 caches = qatomic_read(&vq->vring.caches); in virtio_virtqueue_reset_region_cache()
223 qatomic_rcu_set(&vq->vring.caches, NULL); in virtio_virtqueue_reset_region_cache()
231 VirtQueue *vq = &vdev->vq[n]; in virtio_init_region_cache() local
232 VRingMemoryRegionCaches *old = vq->vring.caches; in virtio_init_region_cache()
239 addr = vq->vring.desc; in virtio_init_region_cache()
245 packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ? in virtio_init_region_cache()
256 vq->vring.used, size, true); in virtio_init_region_cache()
264 vq->vring.avail, size, false); in virtio_init_region_cache()
270 qatomic_rcu_set(&vq->vring.caches, new); in virtio_init_region_cache()
284 virtio_virtqueue_reset_region_cache(vq); in virtio_init_region_cache()
290 VRing *vring = &vdev->vq[n].vring; in virtio_queue_update_rings()
348 static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq) in vring_get_region_caches() argument
350 return qatomic_rcu_read(&vq->vring.caches); in vring_get_region_caches()
354 static inline uint16_t vring_avail_flags(VirtQueue *vq) in vring_avail_flags() argument
356 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); in vring_avail_flags()
363 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa); in vring_avail_flags()
367 static inline uint16_t vring_avail_idx(VirtQueue *vq) in vring_avail_idx() argument
369 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); in vring_avail_idx()
376 vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa); in vring_avail_idx()
377 return vq->shadow_avail_idx; in vring_avail_idx()
381 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i) in vring_avail_ring() argument
383 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); in vring_avail_ring()
390 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa); in vring_avail_ring()
394 static inline uint16_t vring_get_used_event(VirtQueue *vq) in vring_get_used_event() argument
396 return vring_avail_ring(vq, vq->vring.num); in vring_get_used_event()
400 static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem, in vring_used_write() argument
403 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); in vring_used_write()
410 virtio_tswap32s(vq->vdev, &uelem->id); in vring_used_write()
411 virtio_tswap32s(vq->vdev, &uelem->len); in vring_used_write()
417 static inline uint16_t vring_used_flags(VirtQueue *vq) in vring_used_flags() argument
419 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); in vring_used_flags()
426 return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); in vring_used_flags()
430 static uint16_t vring_used_idx(VirtQueue *vq) in vring_used_idx() argument
432 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); in vring_used_idx()
439 return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); in vring_used_idx()
443 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val) in vring_used_idx_set() argument
445 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); in vring_used_idx_set()
449 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val); in vring_used_idx_set()
453 vq->used_idx = val; in vring_used_idx_set()
457 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask) in vring_used_flags_set_bit() argument
459 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); in vring_used_flags_set_bit()
460 VirtIODevice *vdev = vq->vdev; in vring_used_flags_set_bit()
468 flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); in vring_used_flags_set_bit()
474 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask) in vring_used_flags_unset_bit() argument
476 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); in vring_used_flags_unset_bit()
477 VirtIODevice *vdev = vq->vdev; in vring_used_flags_unset_bit()
485 flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); in vring_used_flags_unset_bit()
491 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val) in vring_set_avail_event() argument
495 if (!vq->notification) { in vring_set_avail_event()
499 caches = vring_get_region_caches(vq); in vring_set_avail_event()
504 pa = offsetof(VRingUsed, ring[vq->vring.num]); in vring_set_avail_event()
505 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val); in vring_set_avail_event()
509 static void virtio_queue_split_set_notification(VirtQueue *vq, int enable) in virtio_queue_split_set_notification() argument
513 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) { in virtio_queue_split_set_notification()
514 vring_set_avail_event(vq, vring_avail_idx(vq)); in virtio_queue_split_set_notification()
516 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); in virtio_queue_split_set_notification()
518 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); in virtio_queue_split_set_notification()
526 static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable) in virtio_queue_packed_set_notification() argument
533 caches = vring_get_region_caches(vq); in virtio_queue_packed_set_notification()
538 vring_packed_event_read(vq->vdev, &caches->used, &e); in virtio_queue_packed_set_notification()
542 } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) { in virtio_queue_packed_set_notification()
543 off_wrap = vq->shadow_avail_idx | vq->shadow_avail_wrap_counter << 15; in virtio_queue_packed_set_notification()
544 vring_packed_off_wrap_write(vq->vdev, &caches->used, off_wrap); in virtio_queue_packed_set_notification()
552 vring_packed_flags_write(vq->vdev, &caches->used, e.flags); in virtio_queue_packed_set_notification()
559 bool virtio_queue_get_notification(VirtQueue *vq) in virtio_queue_get_notification() argument
561 return vq->notification; in virtio_queue_get_notification()
564 void virtio_queue_set_notification(VirtQueue *vq, int enable) in virtio_queue_set_notification() argument
566 vq->notification = enable; in virtio_queue_set_notification()
568 if (!vq->vring.desc) { in virtio_queue_set_notification()
572 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { in virtio_queue_set_notification()
573 virtio_queue_packed_set_notification(vq, enable); in virtio_queue_set_notification()
575 virtio_queue_split_set_notification(vq, enable); in virtio_queue_set_notification()
579 int virtio_queue_ready(VirtQueue *vq) in virtio_queue_ready() argument
581 return vq->vring.avail != 0; in virtio_queue_ready()
673 static int virtio_queue_empty_rcu(VirtQueue *vq) in virtio_queue_empty_rcu() argument
675 if (virtio_device_disabled(vq->vdev)) { in virtio_queue_empty_rcu()
679 if (unlikely(!vq->vring.avail)) { in virtio_queue_empty_rcu()
683 if (vq->shadow_avail_idx != vq->last_avail_idx) { in virtio_queue_empty_rcu()
687 return vring_avail_idx(vq) == vq->last_avail_idx; in virtio_queue_empty_rcu()
690 static int virtio_queue_split_empty(VirtQueue *vq) in virtio_queue_split_empty() argument
694 if (virtio_device_disabled(vq->vdev)) { in virtio_queue_split_empty()
698 if (unlikely(!vq->vring.avail)) { in virtio_queue_split_empty()
702 if (vq->shadow_avail_idx != vq->last_avail_idx) { in virtio_queue_split_empty()
707 empty = vring_avail_idx(vq) == vq->last_avail_idx; in virtio_queue_split_empty()
712 static int virtio_queue_packed_empty_rcu(VirtQueue *vq) in virtio_queue_packed_empty_rcu() argument
717 if (unlikely(!vq->vring.desc)) { in virtio_queue_packed_empty_rcu()
721 cache = vring_get_region_caches(vq); in virtio_queue_packed_empty_rcu()
726 vring_packed_desc_read_flags(vq->vdev, &desc.flags, &cache->desc, in virtio_queue_packed_empty_rcu()
727 vq->last_avail_idx); in virtio_queue_packed_empty_rcu()
729 return !is_desc_avail(desc.flags, vq->last_avail_wrap_counter); in virtio_queue_packed_empty_rcu()
732 static int virtio_queue_packed_empty(VirtQueue *vq) in virtio_queue_packed_empty() argument
735 return virtio_queue_packed_empty_rcu(vq); in virtio_queue_packed_empty()
738 int virtio_queue_empty(VirtQueue *vq) in virtio_queue_empty() argument
740 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { in virtio_queue_empty()
741 return virtio_queue_packed_empty(vq); in virtio_queue_empty()
743 return virtio_queue_split_empty(vq); in virtio_queue_empty()
747 static bool virtio_queue_split_poll(VirtQueue *vq, unsigned shadow_idx) in virtio_queue_split_poll() argument
749 if (unlikely(!vq->vring.avail)) { in virtio_queue_split_poll()
753 return (uint16_t)shadow_idx != vring_avail_idx(vq); in virtio_queue_split_poll()
756 static bool virtio_queue_packed_poll(VirtQueue *vq, unsigned shadow_idx) in virtio_queue_packed_poll() argument
761 if (unlikely(!vq->vring.desc)) { in virtio_queue_packed_poll()
765 caches = vring_get_region_caches(vq); in virtio_queue_packed_poll()
770 vring_packed_desc_read(vq->vdev, &desc, &caches->desc, in virtio_queue_packed_poll()
773 return is_desc_avail(desc.flags, vq->shadow_avail_wrap_counter); in virtio_queue_packed_poll()
776 static bool virtio_queue_poll(VirtQueue *vq, unsigned shadow_idx) in virtio_queue_poll() argument
778 if (virtio_device_disabled(vq->vdev)) { in virtio_queue_poll()
782 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { in virtio_queue_poll()
783 return virtio_queue_packed_poll(vq, shadow_idx); in virtio_queue_poll()
785 return virtio_queue_split_poll(vq, shadow_idx); in virtio_queue_poll()
789 bool virtio_queue_enable_notification_and_check(VirtQueue *vq, in virtio_queue_enable_notification_and_check() argument
792 virtio_queue_set_notification(vq, 1); in virtio_queue_enable_notification_and_check()
795 return virtio_queue_poll(vq, (unsigned)opaque); in virtio_queue_enable_notification_and_check()
801 static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem, in virtqueue_unmap_sg() argument
804 AddressSpace *dma_as = vq->vdev->dma_as; in virtqueue_unmap_sg()
835 void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem, in virtqueue_detach_element() argument
838 vq->inuse -= elem->ndescs; in virtqueue_detach_element()
839 virtqueue_unmap_sg(vq, elem, len); in virtqueue_detach_element()
842 static void virtqueue_split_rewind(VirtQueue *vq, unsigned int num) in virtqueue_split_rewind() argument
844 vq->last_avail_idx -= num; in virtqueue_split_rewind()
847 static void virtqueue_packed_rewind(VirtQueue *vq, unsigned int num) in virtqueue_packed_rewind() argument
849 if (vq->last_avail_idx < num) { in virtqueue_packed_rewind()
850 vq->last_avail_idx = vq->vring.num + vq->last_avail_idx - num; in virtqueue_packed_rewind()
851 vq->last_avail_wrap_counter ^= 1; in virtqueue_packed_rewind()
853 vq->last_avail_idx -= num; in virtqueue_packed_rewind()
865 void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem, in virtqueue_unpop() argument
869 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { in virtqueue_unpop()
870 virtqueue_packed_rewind(vq, 1); in virtqueue_unpop()
872 virtqueue_split_rewind(vq, 1); in virtqueue_unpop()
875 virtqueue_detach_element(vq, elem, len); in virtqueue_unpop()
890 bool virtqueue_rewind(VirtQueue *vq, unsigned int num) in virtqueue_rewind() argument
892 if (num > vq->inuse) { in virtqueue_rewind()
896 vq->inuse -= num; in virtqueue_rewind()
897 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { in virtqueue_rewind()
898 virtqueue_packed_rewind(vq, num); in virtqueue_rewind()
900 virtqueue_split_rewind(vq, num); in virtqueue_rewind()
905 static void virtqueue_split_fill(VirtQueue *vq, const VirtQueueElement *elem, in virtqueue_split_fill() argument
910 if (unlikely(!vq->vring.used)) { in virtqueue_split_fill()
914 idx = (idx + vq->used_idx) % vq->vring.num; in virtqueue_split_fill()
918 vring_used_write(vq, &uelem, idx); in virtqueue_split_fill()
921 static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem, in virtqueue_packed_fill() argument
924 vq->used_elems[idx].index = elem->index; in virtqueue_packed_fill()
925 vq->used_elems[idx].len = len; in virtqueue_packed_fill()
926 vq->used_elems[idx].ndescs = elem->ndescs; in virtqueue_packed_fill()
929 static void virtqueue_ordered_fill(VirtQueue *vq, const VirtQueueElement *elem, in virtqueue_ordered_fill() argument
934 i = vq->used_idx % vq->vring.num; in virtqueue_ordered_fill()
940 max_steps = (vq->last_avail_idx - vq->used_idx) % vq->vring.num; in virtqueue_ordered_fill()
945 if (vq->used_elems[i].index == elem->index) { in virtqueue_ordered_fill()
946 vq->used_elems[i].len = len; in virtqueue_ordered_fill()
947 vq->used_elems[i].in_order_filled = true; in virtqueue_ordered_fill()
951 i += vq->used_elems[i].ndescs; in virtqueue_ordered_fill()
952 steps += vq->used_elems[i].ndescs; in virtqueue_ordered_fill()
954 if (i >= vq->vring.num) { in virtqueue_ordered_fill()
955 i -= vq->vring.num; in virtqueue_ordered_fill()
965 __func__, vq->vdev->name, elem->index); in virtqueue_ordered_fill()
969 static void virtqueue_packed_fill_desc(VirtQueue *vq, in virtqueue_packed_fill_desc() argument
980 bool wrap_counter = vq->used_wrap_counter; in virtqueue_packed_fill_desc()
982 if (unlikely(!vq->vring.desc)) { in virtqueue_packed_fill_desc()
986 head = vq->used_idx + idx; in virtqueue_packed_fill_desc()
987 if (head >= vq->vring.num) { in virtqueue_packed_fill_desc()
988 head -= vq->vring.num; in virtqueue_packed_fill_desc()
999 caches = vring_get_region_caches(vq); in virtqueue_packed_fill_desc()
1004 vring_packed_desc_write(vq->vdev, &desc, &caches->desc, head, strict_order); in virtqueue_packed_fill_desc()
1008 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, in virtqueue_fill() argument
1011 trace_virtqueue_fill(vq, elem, len, idx); in virtqueue_fill()
1013 virtqueue_unmap_sg(vq, elem, len); in virtqueue_fill()
1015 if (virtio_device_disabled(vq->vdev)) { in virtqueue_fill()
1019 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_IN_ORDER)) { in virtqueue_fill()
1020 virtqueue_ordered_fill(vq, elem, len); in virtqueue_fill()
1021 } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { in virtqueue_fill()
1022 virtqueue_packed_fill(vq, elem, len, idx); in virtqueue_fill()
1024 virtqueue_split_fill(vq, elem, len, idx); in virtqueue_fill()
1029 static void virtqueue_split_flush(VirtQueue *vq, unsigned int count) in virtqueue_split_flush() argument
1033 if (unlikely(!vq->vring.used)) { in virtqueue_split_flush()
1039 trace_virtqueue_flush(vq, count); in virtqueue_split_flush()
1040 old = vq->used_idx; in virtqueue_split_flush()
1042 vring_used_idx_set(vq, new); in virtqueue_split_flush()
1043 vq->inuse -= count; in virtqueue_split_flush()
1044 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) in virtqueue_split_flush()
1045 vq->signalled_used_valid = false; in virtqueue_split_flush()
1048 static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count) in virtqueue_packed_flush() argument
1052 if (unlikely(!vq->vring.desc)) { in virtqueue_packed_flush()
1064 ndescs += vq->used_elems[0].ndescs; in virtqueue_packed_flush()
1066 virtqueue_packed_fill_desc(vq, &vq->used_elems[i], ndescs, false); in virtqueue_packed_flush()
1067 ndescs += vq->used_elems[i].ndescs; in virtqueue_packed_flush()
1069 virtqueue_packed_fill_desc(vq, &vq->used_elems[0], 0, true); in virtqueue_packed_flush()
1071 vq->inuse -= ndescs; in virtqueue_packed_flush()
1072 vq->used_idx += ndescs; in virtqueue_packed_flush()
1073 if (vq->used_idx >= vq->vring.num) { in virtqueue_packed_flush()
1074 vq->used_idx -= vq->vring.num; in virtqueue_packed_flush()
1075 vq->used_wrap_counter ^= 1; in virtqueue_packed_flush()
1076 vq->signalled_used_valid = false; in virtqueue_packed_flush()
1080 static void virtqueue_ordered_flush(VirtQueue *vq) in virtqueue_ordered_flush() argument
1082 unsigned int i = vq->used_idx % vq->vring.num; in virtqueue_ordered_flush()
1084 uint16_t old = vq->used_idx; in virtqueue_ordered_flush()
1089 packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED); in virtqueue_ordered_flush()
1092 if (unlikely(!vq->vring.desc)) { in virtqueue_ordered_flush()
1095 } else if (unlikely(!vq->vring.used)) { in virtqueue_ordered_flush()
1100 if (!vq->used_elems[i].in_order_filled) { in virtqueue_ordered_flush()
1105 while (vq->used_elems[i].in_order_filled) { in virtqueue_ordered_flush()
1110 if (packed && i != vq->used_idx) { in virtqueue_ordered_flush()
1111 virtqueue_packed_fill_desc(vq, &vq->used_elems[i], ndescs, false); in virtqueue_ordered_flush()
1113 uelem.id = vq->used_elems[i].index; in virtqueue_ordered_flush()
1114 uelem.len = vq->used_elems[i].len; in virtqueue_ordered_flush()
1115 vring_used_write(vq, &uelem, i); in virtqueue_ordered_flush()
1118 vq->used_elems[i].in_order_filled = false; in virtqueue_ordered_flush()
1119 ndescs += vq->used_elems[i].ndescs; in virtqueue_ordered_flush()
1120 i += vq->used_elems[i].ndescs; in virtqueue_ordered_flush()
1121 if (i >= vq->vring.num) { in virtqueue_ordered_flush()
1122 i -= vq->vring.num; in virtqueue_ordered_flush()
1127 virtqueue_packed_fill_desc(vq, &vq->used_elems[vq->used_idx], 0, true); in virtqueue_ordered_flush()
1128 vq->used_idx += ndescs; in virtqueue_ordered_flush()
1129 if (vq->used_idx >= vq->vring.num) { in virtqueue_ordered_flush()
1130 vq->used_idx -= vq->vring.num; in virtqueue_ordered_flush()
1131 vq->used_wrap_counter ^= 1; in virtqueue_ordered_flush()
1132 vq->signalled_used_valid = false; in virtqueue_ordered_flush()
1138 vring_used_idx_set(vq, new); in virtqueue_ordered_flush()
1139 if (unlikely((int16_t)(new - vq->signalled_used) < in virtqueue_ordered_flush()
1141 vq->signalled_used_valid = false; in virtqueue_ordered_flush()
1144 vq->inuse -= ndescs; in virtqueue_ordered_flush()
1147 void virtqueue_flush(VirtQueue *vq, unsigned int count) in virtqueue_flush() argument
1149 if (virtio_device_disabled(vq->vdev)) { in virtqueue_flush()
1150 vq->inuse -= count; in virtqueue_flush()
1154 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_IN_ORDER)) { in virtqueue_flush()
1155 virtqueue_ordered_flush(vq); in virtqueue_flush()
1156 } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { in virtqueue_flush()
1157 virtqueue_packed_flush(vq, count); in virtqueue_flush()
1159 virtqueue_split_flush(vq, count); in virtqueue_flush()
1163 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem, in virtqueue_push() argument
1167 virtqueue_fill(vq, elem, len, 0); in virtqueue_push()
1168 virtqueue_flush(vq, 1); in virtqueue_push()
1172 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx) in virtqueue_num_heads() argument
1177 avail_idx = (vq->shadow_avail_idx != idx) ? vq->shadow_avail_idx in virtqueue_num_heads()
1178 : vring_avail_idx(vq); in virtqueue_num_heads()
1182 if (num_heads > vq->vring.num) { in virtqueue_num_heads()
1183 virtio_error(vq->vdev, "Guest moved used index from %u to %u", in virtqueue_num_heads()
1184 idx, vq->shadow_avail_idx); in virtqueue_num_heads()
1204 static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx, in virtqueue_get_head() argument
1209 *head = vring_avail_ring(vq, idx % vq->vring.num); in virtqueue_get_head()
1212 if (*head >= vq->vring.num) { in virtqueue_get_head()
1213 virtio_error(vq->vdev, "Guest says index %u is available", *head); in virtqueue_get_head()
1247 static void virtqueue_split_get_avail_bytes(VirtQueue *vq, in virtqueue_split_get_avail_bytes() argument
1252 VirtIODevice *vdev = vq->vdev; in virtqueue_split_get_avail_bytes()
1261 idx = vq->last_avail_idx; in virtqueue_split_get_avail_bytes()
1264 while ((rc = virtqueue_num_heads(vq, idx)) > 0) { in virtqueue_split_get_avail_bytes()
1269 unsigned int max = vq->vring.num; in virtqueue_split_get_avail_bytes()
1273 if (!virtqueue_get_head(vq, idx++, &i)) { in virtqueue_split_get_avail_bytes()
1356 static int virtqueue_packed_read_next_desc(VirtQueue *vq, in virtqueue_packed_read_next_desc() argument
1374 (*next) -= vq->vring.num; in virtqueue_packed_read_next_desc()
1378 vring_packed_desc_read(vq->vdev, desc, desc_cache, *next, false); in virtqueue_packed_read_next_desc()
1383 static void virtqueue_packed_get_avail_bytes(VirtQueue *vq, in virtqueue_packed_get_avail_bytes() argument
1390 VirtIODevice *vdev = vq->vdev; in virtqueue_packed_get_avail_bytes()
1401 idx = vq->last_avail_idx; in virtqueue_packed_get_avail_bytes()
1402 wrap_counter = vq->last_avail_wrap_counter; in virtqueue_packed_get_avail_bytes()
1409 unsigned int max = vq->vring.num; in virtqueue_packed_get_avail_bytes()
1461 rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max, in virtqueue_packed_get_avail_bytes()
1475 if (idx >= vq->vring.num) { in virtqueue_packed_get_avail_bytes()
1476 idx -= vq->vring.num; in virtqueue_packed_get_avail_bytes()
1482 vq->shadow_avail_idx = idx; in virtqueue_packed_get_avail_bytes()
1483 vq->shadow_avail_wrap_counter = wrap_counter; in virtqueue_packed_get_avail_bytes()
1499 int virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, in virtqueue_get_avail_bytes() argument
1508 if (unlikely(!vq->vring.desc)) { in virtqueue_get_avail_bytes()
1512 caches = vring_get_region_caches(vq); in virtqueue_get_avail_bytes()
1517 desc_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ? in virtqueue_get_avail_bytes()
1519 if (caches->desc.len < vq->vring.num * desc_size) { in virtqueue_get_avail_bytes()
1520 virtio_error(vq->vdev, "Cannot map descriptor ring"); in virtqueue_get_avail_bytes()
1524 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { in virtqueue_get_avail_bytes()
1525 virtqueue_packed_get_avail_bytes(vq, in_bytes, out_bytes, in virtqueue_get_avail_bytes()
1529 virtqueue_split_get_avail_bytes(vq, in_bytes, out_bytes, in virtqueue_get_avail_bytes()
1534 return (int)vq->shadow_avail_idx; in virtqueue_get_avail_bytes()
1546 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes, in virtqueue_avail_bytes() argument
1551 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes); in virtqueue_avail_bytes()
1673 static void *virtqueue_split_pop(VirtQueue *vq, size_t sz) in virtqueue_split_pop() argument
1680 VirtIODevice *vdev = vq->vdev; in virtqueue_split_pop()
1691 if (virtio_queue_empty_rcu(vq)) { in virtqueue_split_pop()
1701 max = vq->vring.num; in virtqueue_split_pop()
1703 if (vq->inuse >= vq->vring.num) { in virtqueue_split_pop()
1708 if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) { in virtqueue_split_pop()
1713 vring_set_avail_event(vq, vq->last_avail_idx); in virtqueue_split_pop()
1718 caches = vring_get_region_caches(vq); in virtqueue_split_pop()
1800 idx = (vq->last_avail_idx - 1) % vq->vring.num; in virtqueue_split_pop()
1801 vq->used_elems[idx].index = elem->index; in virtqueue_split_pop()
1802 vq->used_elems[idx].len = elem->len; in virtqueue_split_pop()
1803 vq->used_elems[idx].ndescs = elem->ndescs; in virtqueue_split_pop()
1806 vq->inuse++; in virtqueue_split_pop()
1808 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num); in virtqueue_split_pop()
1819 static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz) in virtqueue_packed_pop() argument
1826 VirtIODevice *vdev = vq->vdev; in virtqueue_packed_pop()
1838 if (virtio_queue_packed_empty_rcu(vq)) { in virtqueue_packed_pop()
1845 max = vq->vring.num; in virtqueue_packed_pop()
1847 if (vq->inuse >= vq->vring.num) { in virtqueue_packed_pop()
1852 i = vq->last_avail_idx; in virtqueue_packed_pop()
1854 caches = vring_get_region_caches(vq); in virtqueue_packed_pop()
1916 rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max, &i, in virtqueue_packed_pop()
1941 vq->used_elems[vq->last_avail_idx].index = elem->index; in virtqueue_packed_pop()
1942 vq->used_elems[vq->last_avail_idx].len = elem->len; in virtqueue_packed_pop()
1943 vq->used_elems[vq->last_avail_idx].ndescs = elem->ndescs; in virtqueue_packed_pop()
1946 vq->last_avail_idx += elem->ndescs; in virtqueue_packed_pop()
1947 vq->inuse += elem->ndescs; in virtqueue_packed_pop()
1949 if (vq->last_avail_idx >= vq->vring.num) { in virtqueue_packed_pop()
1950 vq->last_avail_idx -= vq->vring.num; in virtqueue_packed_pop()
1951 vq->last_avail_wrap_counter ^= 1; in virtqueue_packed_pop()
1954 vq->shadow_avail_idx = vq->last_avail_idx; in virtqueue_packed_pop()
1955 vq->shadow_avail_wrap_counter = vq->last_avail_wrap_counter; in virtqueue_packed_pop()
1957 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num); in virtqueue_packed_pop()
1968 void *virtqueue_pop(VirtQueue *vq, size_t sz) in virtqueue_pop() argument
1970 if (virtio_device_disabled(vq->vdev)) { in virtqueue_pop()
1974 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { in virtqueue_pop()
1975 return virtqueue_packed_pop(vq, sz); in virtqueue_pop()
1977 return virtqueue_split_pop(vq, sz); in virtqueue_pop()
1981 static unsigned int virtqueue_packed_drop_all(VirtQueue *vq) in virtqueue_packed_drop_all() argument
1987 VirtIODevice *vdev = vq->vdev; in virtqueue_packed_drop_all()
1992 caches = vring_get_region_caches(vq); in virtqueue_packed_drop_all()
1999 virtio_queue_set_notification(vq, 0); in virtqueue_packed_drop_all()
2001 while (vq->inuse < vq->vring.num) { in virtqueue_packed_drop_all()
2002 unsigned int idx = vq->last_avail_idx; in virtqueue_packed_drop_all()
2008 vq->last_avail_idx , true); in virtqueue_packed_drop_all()
2009 if (!is_desc_avail(desc.flags, vq->last_avail_wrap_counter)) { in virtqueue_packed_drop_all()
2014 while (virtqueue_packed_read_next_desc(vq, &desc, desc_cache, in virtqueue_packed_drop_all()
2015 vq->vring.num, &idx, false)) { in virtqueue_packed_drop_all()
2022 virtqueue_push(vq, &elem, 0); in virtqueue_packed_drop_all()
2024 vq->last_avail_idx += elem.ndescs; in virtqueue_packed_drop_all()
2025 if (vq->last_avail_idx >= vq->vring.num) { in virtqueue_packed_drop_all()
2026 vq->last_avail_idx -= vq->vring.num; in virtqueue_packed_drop_all()
2027 vq->last_avail_wrap_counter ^= 1; in virtqueue_packed_drop_all()
2034 static unsigned int virtqueue_split_drop_all(VirtQueue *vq) in virtqueue_split_drop_all() argument
2038 VirtIODevice *vdev = vq->vdev; in virtqueue_split_drop_all()
2041 while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) { in virtqueue_split_drop_all()
2045 if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) { in virtqueue_split_drop_all()
2048 vq->inuse++; in virtqueue_split_drop_all()
2049 vq->last_avail_idx++; in virtqueue_split_drop_all()
2051 vring_set_avail_event(vq, vq->last_avail_idx); in virtqueue_split_drop_all()
2055 virtqueue_push(vq, &elem, 0); in virtqueue_split_drop_all()
2068 unsigned int virtqueue_drop_all(VirtQueue *vq) in virtqueue_drop_all() argument
2070 struct VirtIODevice *vdev = vq->vdev; in virtqueue_drop_all()
2072 if (virtio_device_disabled(vq->vdev)) { in virtqueue_drop_all()
2077 return virtqueue_packed_drop_all(vq); in virtqueue_drop_all()
2079 return virtqueue_split_drop_all(vq); in virtqueue_drop_all()
2269 vdev->vq[i].vring.desc = 0; in __virtio_queue_reset()
2270 vdev->vq[i].vring.avail = 0; in __virtio_queue_reset()
2271 vdev->vq[i].vring.used = 0; in __virtio_queue_reset()
2272 vdev->vq[i].last_avail_idx = 0; in __virtio_queue_reset()
2273 vdev->vq[i].shadow_avail_idx = 0; in __virtio_queue_reset()
2274 vdev->vq[i].used_idx = 0; in __virtio_queue_reset()
2275 vdev->vq[i].last_avail_wrap_counter = true; in __virtio_queue_reset()
2276 vdev->vq[i].shadow_avail_wrap_counter = true; in __virtio_queue_reset()
2277 vdev->vq[i].used_wrap_counter = true; in __virtio_queue_reset()
2279 vdev->vq[i].signalled_used = 0; in __virtio_queue_reset()
2280 vdev->vq[i].signalled_used_valid = false; in __virtio_queue_reset()
2281 vdev->vq[i].notification = true; in __virtio_queue_reset()
2282 vdev->vq[i].vring.num = vdev->vq[i].vring.num_default; in __virtio_queue_reset()
2283 vdev->vq[i].inuse = 0; in __virtio_queue_reset()
2284 virtio_virtqueue_reset_region_cache(&vdev->vq[i]); in __virtio_queue_reset()
2360 if (!vdev->vq[n].vring.num) { in virtio_queue_set_addr()
2363 vdev->vq[n].vring.desc = addr; in virtio_queue_set_addr()
2369 return vdev->vq[n].vring.desc; in virtio_queue_get_addr()
2375 if (!vdev->vq[n].vring.num) { in virtio_queue_set_rings()
2378 vdev->vq[n].vring.desc = desc; in virtio_queue_set_rings()
2379 vdev->vq[n].vring.avail = avail; in virtio_queue_set_rings()
2380 vdev->vq[n].vring.used = used; in virtio_queue_set_rings()
2389 if (!!num != !!vdev->vq[n].vring.num || in virtio_queue_set_num()
2394 vdev->vq[n].vring.num = num; in virtio_queue_set_num()
2402 VirtQueue *virtio_vector_next_queue(VirtQueue *vq) in virtio_vector_next_queue() argument
2404 return QLIST_NEXT(vq, node); in virtio_vector_next_queue()
2409 return vdev->vq[n].vring.num; in virtio_queue_get_num()
2414 return vdev->vq[n].vring.num_default; in virtio_queue_get_max_num()
2447 vdev->vq[n].vring.align = align; in virtio_queue_set_align()
2452 void virtio_queue_set_shadow_avail_idx(VirtQueue *vq, uint16_t shadow_avail_idx) in virtio_queue_set_shadow_avail_idx() argument
2454 if (!vq->vring.desc) { in virtio_queue_set_shadow_avail_idx()
2462 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { in virtio_queue_set_shadow_avail_idx()
2463 vq->shadow_avail_wrap_counter = (shadow_avail_idx >> 15) & 0x1; in virtio_queue_set_shadow_avail_idx()
2464 vq->shadow_avail_idx = shadow_avail_idx & 0x7FFF; in virtio_queue_set_shadow_avail_idx()
2466 vq->shadow_avail_idx = shadow_avail_idx; in virtio_queue_set_shadow_avail_idx()
2470 static void virtio_queue_notify_vq(VirtQueue *vq) in virtio_queue_notify_vq() argument
2472 if (vq->vring.desc && vq->handle_output) { in virtio_queue_notify_vq()
2473 VirtIODevice *vdev = vq->vdev; in virtio_queue_notify_vq()
2479 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); in virtio_queue_notify_vq()
2480 vq->handle_output(vdev, vq); in virtio_queue_notify_vq()
2490 VirtQueue *vq = &vdev->vq[n]; in virtio_queue_notify() local
2492 if (unlikely(!vq->vring.desc || vdev->broken)) { in virtio_queue_notify()
2496 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); in virtio_queue_notify()
2497 if (vq->host_notifier_enabled) { in virtio_queue_notify()
2498 event_notifier_set(&vq->host_notifier); in virtio_queue_notify()
2499 } else if (vq->handle_output) { in virtio_queue_notify()
2500 vq->handle_output(vdev, vq); in virtio_queue_notify()
2510 return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector : in virtio_queue_vector()
2516 VirtQueue *vq = &vdev->vq[n]; in virtio_queue_set_vector() local
2520 vdev->vq[n].vector != VIRTIO_NO_VECTOR) { in virtio_queue_set_vector()
2521 QLIST_REMOVE(vq, node); in virtio_queue_set_vector()
2523 vdev->vq[n].vector = vector; in virtio_queue_set_vector()
2526 QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node); in virtio_queue_set_vector()
2537 if (vdev->vq[i].vring.num == 0) in virtio_add_queue()
2544 vdev->vq[i].vring.num = queue_size; in virtio_add_queue()
2545 vdev->vq[i].vring.num_default = queue_size; in virtio_add_queue()
2546 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN; in virtio_add_queue()
2547 vdev->vq[i].handle_output = handle_output; in virtio_add_queue()
2548 vdev->vq[i].used_elems = g_new0(VirtQueueElement, queue_size); in virtio_add_queue()
2550 return &vdev->vq[i]; in virtio_add_queue()
2553 void virtio_delete_queue(VirtQueue *vq) in virtio_delete_queue() argument
2555 vq->vring.num = 0; in virtio_delete_queue()
2556 vq->vring.num_default = 0; in virtio_delete_queue()
2557 vq->handle_output = NULL; in virtio_delete_queue()
2558 g_free(vq->used_elems); in virtio_delete_queue()
2559 vq->used_elems = NULL; in virtio_delete_queue()
2560 virtio_virtqueue_reset_region_cache(vq); in virtio_delete_queue()
2569 virtio_delete_queue(&vdev->vq[n]); in virtio_del_queue()
2585 static bool virtio_split_should_notify(VirtIODevice *vdev, VirtQueue *vq) in virtio_split_should_notify() argument
2593 !vq->inuse && virtio_queue_empty(vq)) { in virtio_split_should_notify()
2598 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT); in virtio_split_should_notify()
2601 v = vq->signalled_used_valid; in virtio_split_should_notify()
2602 vq->signalled_used_valid = true; in virtio_split_should_notify()
2603 old = vq->signalled_used; in virtio_split_should_notify()
2604 new = vq->signalled_used = vq->used_idx; in virtio_split_should_notify()
2605 return !v || vring_need_event(vring_get_used_event(vq), new, old); in virtio_split_should_notify()
2608 static bool vring_packed_need_event(VirtQueue *vq, bool wrap, in vring_packed_need_event() argument
2615 off -= vq->vring.num; in vring_packed_need_event()
2622 static bool virtio_packed_should_notify(VirtIODevice *vdev, VirtQueue *vq) in virtio_packed_should_notify() argument
2629 caches = vring_get_region_caches(vq); in virtio_packed_should_notify()
2636 old = vq->signalled_used; in virtio_packed_should_notify()
2637 new = vq->signalled_used = vq->used_idx; in virtio_packed_should_notify()
2638 v = vq->signalled_used_valid; in virtio_packed_should_notify()
2639 vq->signalled_used_valid = true; in virtio_packed_should_notify()
2647 return !v || vring_packed_need_event(vq, vq->used_wrap_counter, in virtio_packed_should_notify()
2652 static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq) in virtio_should_notify() argument
2655 return virtio_packed_should_notify(vdev, vq); in virtio_should_notify()
2657 return virtio_split_should_notify(vdev, vq); in virtio_should_notify()
2665 VirtQueue *vq = container_of(notifier, VirtQueue, guest_notifier); in virtio_notify_irqfd_deferred_fn() local
2667 trace_virtio_notify_irqfd_deferred_fn(vq->vdev, vq); in virtio_notify_irqfd_deferred_fn()
2671 void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq) in virtio_notify_irqfd() argument
2674 if (!virtio_should_notify(vdev, vq)) { in virtio_notify_irqfd()
2679 trace_virtio_notify_irqfd(vdev, vq); in virtio_notify_irqfd()
2696 virtio_set_isr(vq->vdev, 0x1); in virtio_notify_irqfd()
2697 defer_call(virtio_notify_irqfd_deferred_fn, &vq->guest_notifier); in virtio_notify_irqfd()
2700 static void virtio_irq(VirtQueue *vq) in virtio_irq() argument
2702 virtio_set_isr(vq->vdev, 0x1); in virtio_irq()
2703 virtio_notify_vector(vq->vdev, vq->vector); in virtio_irq()
2706 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq) in virtio_notify() argument
2709 if (!virtio_should_notify(vdev, vq)) { in virtio_notify()
2714 trace_virtio_notify(vdev, vq); in virtio_notify()
2715 virtio_irq(vq); in virtio_notify()
2767 if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) { in virtio_ringsize_needed()
2836 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2848 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2870 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
3022 if (vdev->vq[i].vring.num == 0) in virtio_save()
3029 if (vdev->vq[i].vring.num == 0) in virtio_save()
3032 qemu_put_be32(f, vdev->vq[i].vring.num); in virtio_save()
3034 qemu_put_be32(f, vdev->vq[i].vring.align); in virtio_save()
3040 qemu_put_be64(f, vdev->vq[i].vring.desc); in virtio_save()
3041 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx); in virtio_save()
3154 if (vdev->vq[i].vring.num != 0) { in virtio_set_features()
3262 vdev->vq[i].vring.num = qemu_get_be32(f); in virtio_load()
3264 vdev->vq[i].vring.align = qemu_get_be32(f); in virtio_load()
3266 vdev->vq[i].vring.desc = qemu_get_be64(f); in virtio_load()
3267 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx); in virtio_load()
3268 vdev->vq[i].signalled_used_valid = false; in virtio_load()
3269 vdev->vq[i].notification = true; in virtio_load()
3271 if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) { in virtio_load()
3274 i, vdev->vq[i].last_avail_idx); in virtio_load()
3339 if (vdev->vq[i].vring.desc) { in virtio_load()
3355 vdev->vq[i].shadow_avail_idx = vdev->vq[i].last_avail_idx; in virtio_load()
3356 vdev->vq[i].shadow_avail_wrap_counter = in virtio_load()
3357 vdev->vq[i].last_avail_wrap_counter; in virtio_load()
3361 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx; in virtio_load()
3363 if (nheads > vdev->vq[i].vring.num) { in virtio_load()
3366 i, vdev->vq[i].vring.num, in virtio_load()
3367 vring_avail_idx(&vdev->vq[i]), in virtio_load()
3368 vdev->vq[i].last_avail_idx, nheads); in virtio_load()
3369 vdev->vq[i].used_idx = 0; in virtio_load()
3370 vdev->vq[i].shadow_avail_idx = 0; in virtio_load()
3371 vdev->vq[i].inuse = 0; in virtio_load()
3374 vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]); in virtio_load()
3375 vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]); in virtio_load()
3383 vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx - in virtio_load()
3384 vdev->vq[i].used_idx); in virtio_load()
3385 if (vdev->vq[i].inuse > vdev->vq[i].vring.num) { in virtio_load()
3388 i, vdev->vq[i].vring.num, in virtio_load()
3389 vdev->vq[i].last_avail_idx, in virtio_load()
3390 vdev->vq[i].used_idx); in virtio_load()
3463 vdev->vq = g_new0(VirtQueue, VIRTIO_QUEUE_MAX); in virtio_init()
3467 vdev->vq[i].vector = VIRTIO_NO_VECTOR; in virtio_init()
3468 vdev->vq[i].vdev = vdev; in virtio_init()
3469 vdev->vq[i].queue_index = i; in virtio_init()
3470 vdev->vq[i].host_notifier_enabled = false; in virtio_init()
3517 return vdev->vq[n].vring.desc; in virtio_queue_get_desc_addr()
3538 return vdev->vq[n].vring.avail; in virtio_queue_get_avail_addr()
3543 return vdev->vq[n].vring.used; in virtio_queue_get_used_addr()
3548 return sizeof(VRingDesc) * vdev->vq[n].vring.num; in virtio_queue_get_desc_size()
3561 sizeof(uint16_t) * vdev->vq[n].vring.num + s; in virtio_queue_get_avail_size()
3574 sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s; in virtio_queue_get_used_size()
3582 avail = vdev->vq[n].last_avail_idx; in virtio_queue_packed_get_last_avail_idx()
3583 avail |= ((uint16_t)vdev->vq[n].last_avail_wrap_counter) << 15; in virtio_queue_packed_get_last_avail_idx()
3585 used = vdev->vq[n].used_idx; in virtio_queue_packed_get_last_avail_idx()
3586 used |= ((uint16_t)vdev->vq[n].used_wrap_counter) << 15; in virtio_queue_packed_get_last_avail_idx()
3594 return vdev->vq[n].last_avail_idx; in virtio_queue_split_get_last_avail_idx()
3609 struct VirtQueue *vq = &vdev->vq[n]; in virtio_queue_packed_set_last_avail_idx() local
3611 vq->last_avail_idx = vq->shadow_avail_idx = idx & 0x7fff; in virtio_queue_packed_set_last_avail_idx()
3612 vq->last_avail_wrap_counter = in virtio_queue_packed_set_last_avail_idx()
3613 vq->shadow_avail_wrap_counter = !!(idx & 0x8000); in virtio_queue_packed_set_last_avail_idx()
3615 vq->used_idx = idx & 0x7fff; in virtio_queue_packed_set_last_avail_idx()
3616 vq->used_wrap_counter = !!(idx & 0x8000); in virtio_queue_packed_set_last_avail_idx()
3622 vdev->vq[n].last_avail_idx = idx; in virtio_queue_split_set_last_avail_idx()
3623 vdev->vq[n].shadow_avail_idx = idx; in virtio_queue_split_set_last_avail_idx()
3647 if (vdev->vq[n].vring.desc) { in virtio_queue_split_restore_last_avail_idx()
3648 vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]); in virtio_queue_split_restore_last_avail_idx()
3649 vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx; in virtio_queue_split_restore_last_avail_idx()
3671 if (vdev->vq[n].vring.desc) { in virtio_split_packed_update_used_idx()
3672 vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]); in virtio_split_packed_update_used_idx()
3687 vdev->vq[n].signalled_used_valid = false; in virtio_queue_invalidate_signalled_used()
3692 return vdev->vq + n; in virtio_get_queue()
3695 uint16_t virtio_get_queue_index(VirtQueue *vq) in virtio_get_queue_index() argument
3697 return vq->queue_index; in virtio_get_queue_index()
3702 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier); in virtio_queue_guest_notifier_read() local
3704 virtio_irq(vq); in virtio_queue_guest_notifier_read()
3715 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign, in virtio_queue_set_guest_notifier_fd_handler() argument
3719 event_notifier_set_handler(&vq->guest_notifier, in virtio_queue_set_guest_notifier_fd_handler()
3722 event_notifier_set_handler(&vq->guest_notifier, NULL); in virtio_queue_set_guest_notifier_fd_handler()
3727 virtio_queue_guest_notifier_read(&vq->guest_notifier); in virtio_queue_set_guest_notifier_fd_handler()
3748 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq) in virtio_queue_get_guest_notifier() argument
3750 return &vq->guest_notifier; in virtio_queue_get_guest_notifier()
3755 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); in virtio_queue_host_notifier_aio_poll_begin() local
3757 virtio_queue_set_notification(vq, 0); in virtio_queue_host_notifier_aio_poll_begin()
3763 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); in virtio_queue_host_notifier_aio_poll() local
3765 return vq->vring.desc && !virtio_queue_empty(vq); in virtio_queue_host_notifier_aio_poll()
3770 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); in virtio_queue_host_notifier_aio_poll_ready() local
3772 virtio_queue_notify_vq(vq); in virtio_queue_host_notifier_aio_poll_ready()
3777 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); in virtio_queue_host_notifier_aio_poll_end() local
3780 virtio_queue_set_notification(vq, 1); in virtio_queue_host_notifier_aio_poll_end()
3783 void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx) in virtio_queue_aio_attach_host_notifier() argument
3792 if (!virtio_queue_get_notification(vq)) { in virtio_queue_aio_attach_host_notifier()
3793 virtio_queue_set_notification(vq, 1); in virtio_queue_aio_attach_host_notifier()
3796 aio_set_event_notifier(ctx, &vq->host_notifier, in virtio_queue_aio_attach_host_notifier()
3800 aio_set_event_notifier_poll(ctx, &vq->host_notifier, in virtio_queue_aio_attach_host_notifier()
3809 event_notifier_set(&vq->host_notifier); in virtio_queue_aio_attach_host_notifier()
3818 void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx) in virtio_queue_aio_attach_host_notifier_no_poll() argument
3821 if (!virtio_queue_get_notification(vq)) { in virtio_queue_aio_attach_host_notifier_no_poll()
3822 virtio_queue_set_notification(vq, 1); in virtio_queue_aio_attach_host_notifier_no_poll()
3825 aio_set_event_notifier(ctx, &vq->host_notifier, in virtio_queue_aio_attach_host_notifier_no_poll()
3835 event_notifier_set(&vq->host_notifier); in virtio_queue_aio_attach_host_notifier_no_poll()
3838 void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx) in virtio_queue_aio_detach_host_notifier() argument
3840 aio_set_event_notifier(ctx, &vq->host_notifier, NULL, NULL, NULL); in virtio_queue_aio_detach_host_notifier()
3856 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); in virtio_queue_host_notifier_read() local
3858 virtio_queue_notify_vq(vq); in virtio_queue_host_notifier_read()
3862 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq) in virtio_queue_get_host_notifier() argument
3864 return &vq->host_notifier; in virtio_queue_get_host_notifier()
3872 void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled) in virtio_queue_set_host_notifier_enabled() argument
3874 vq->host_notifier_enabled = enabled; in virtio_queue_set_host_notifier_enabled()
3918 if (vdev->vq[i].vring.num == 0) { in virtio_memory_listener_commit()
3981 if (!vdev->vq) { in virtio_device_free_virtqueues()
3986 if (vdev->vq[i].vring.num == 0) { in virtio_device_free_virtqueues()
3989 virtio_virtqueue_reset_region_cache(&vdev->vq[i]); in virtio_device_free_virtqueues()
3991 g_free(vdev->vq); in virtio_device_free_virtqueues()
4024 VirtQueue *vq = &vdev->vq[n]; in virtio_device_start_ioeventfd_impl() local
4033 event_notifier_set_handler(&vq->host_notifier, in virtio_device_start_ioeventfd_impl()
4039 VirtQueue *vq = &vdev->vq[n]; in virtio_device_start_ioeventfd_impl() local
4040 if (!vq->vring.num) { in virtio_device_start_ioeventfd_impl()
4043 event_notifier_set(&vq->host_notifier); in virtio_device_start_ioeventfd_impl()
4051 VirtQueue *vq = &vdev->vq[n]; in virtio_device_start_ioeventfd_impl() local
4056 event_notifier_set_handler(&vq->host_notifier, NULL); in virtio_device_start_ioeventfd_impl()
4094 VirtQueue *vq = &vdev->vq[n]; in virtio_device_stop_ioeventfd_impl() local
4099 event_notifier_set_handler(&vq->host_notifier, NULL); in virtio_device_stop_ioeventfd_impl()
4177 status->queue_index = vdev->vq[queue].queue_index; in qmp_x_query_virtio_queue_status()
4178 status->inuse = vdev->vq[queue].inuse; in qmp_x_query_virtio_queue_status()
4179 status->vring_num = vdev->vq[queue].vring.num; in qmp_x_query_virtio_queue_status()
4180 status->vring_num_default = vdev->vq[queue].vring.num_default; in qmp_x_query_virtio_queue_status()
4181 status->vring_align = vdev->vq[queue].vring.align; in qmp_x_query_virtio_queue_status()
4182 status->vring_desc = vdev->vq[queue].vring.desc; in qmp_x_query_virtio_queue_status()
4183 status->vring_avail = vdev->vq[queue].vring.avail; in qmp_x_query_virtio_queue_status()
4184 status->vring_used = vdev->vq[queue].vring.used; in qmp_x_query_virtio_queue_status()
4185 status->used_idx = vdev->vq[queue].used_idx; in qmp_x_query_virtio_queue_status()
4186 status->signalled_used = vdev->vq[queue].signalled_used; in qmp_x_query_virtio_queue_status()
4187 status->signalled_used_valid = vdev->vq[queue].signalled_used_valid; in qmp_x_query_virtio_queue_status()
4209 status->last_avail_idx = vdev->vq[queue].last_avail_idx; in qmp_x_query_virtio_queue_status()
4210 status->shadow_avail_idx = vdev->vq[queue].shadow_avail_idx; in qmp_x_query_virtio_queue_status()
4254 VirtQueue *vq; in qmp_x_query_virtio_queue_element() local
4267 vq = &vdev->vq[queue]; in qmp_x_query_virtio_queue_element()
4286 max = vq->vring.num; in qmp_x_query_virtio_queue_element()
4289 head = vring_avail_ring(vq, vq->last_avail_idx % vq->vring.num); in qmp_x_query_virtio_queue_element()
4291 head = vring_avail_ring(vq, index % vq->vring.num); in qmp_x_query_virtio_queue_element()
4295 caches = vring_get_region_caches(vq); in qmp_x_query_virtio_queue_element()
4327 element->avail->flags = vring_avail_flags(vq); in qmp_x_query_virtio_queue_element()
4328 element->avail->idx = vring_avail_idx(vq); in qmp_x_query_virtio_queue_element()
4330 element->used->flags = vring_used_flags(vq); in qmp_x_query_virtio_queue_element()
4331 element->used->idx = vring_used_idx(vq); in qmp_x_query_virtio_queue_element()