Home
last modified time | relevance | path

Searched refs:vq (Results 1 – 25 of 344) sorted by relevance

12345678910>>...14

/openbmc/u-boot/drivers/virtio/
H A Dvirtio_ring.c16 int virtqueue_add(struct virtqueue *vq, struct virtio_sg *sgs[], in virtqueue_add() argument
26 head = vq->free_head; in virtqueue_add()
28 desc = vq->vring.desc; in virtqueue_add()
32 if (vq->num_free < descs_used) { in virtqueue_add()
34 descs_used, vq->num_free); in virtqueue_add()
41 virtio_notify(vq->vdev, vq); in virtqueue_add()
48 desc[i].flags = cpu_to_virtio16(vq->vdev, VRING_DESC_F_NEXT); in virtqueue_add()
49 desc[i].addr = cpu_to_virtio64(vq->vdev, (u64)(size_t)sg->addr); in virtqueue_add()
50 desc[i].len = cpu_to_virtio32(vq->vdev, sg->length); in virtqueue_add()
53 i = virtio16_to_cpu(vq->vdev, desc[i].next); in virtqueue_add()
[all …]
/openbmc/linux/drivers/virtio/
H A Dvirtio_ring.c22 dev_err(&(_vq)->vq.vdev->dev, \
23 "%s:"fmt, (_vq)->vq.name, ##args); \
31 (_vq)->vq.name, (_vq)->in_use); \
59 dev_err(&_vq->vq.vdev->dev, \
60 "%s:"fmt, (_vq)->vq.name, ##args); \
63 #define START_USE(vq) argument
64 #define END_USE(vq) argument
65 #define LAST_ADD_TIME_UPDATE(vq) argument
66 #define LAST_ADD_TIME_CHECK(vq) argument
67 #define LAST_ADD_TIME_INVALID(vq) argument
[all …]
H A Dvirtio_pci_modern.c182 static int vp_active_vq(struct virtqueue *vq, u16 msix_vec) in vp_active_vq() argument
184 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); in vp_active_vq()
188 index = vq->index; in vp_active_vq()
191 vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq)); in vp_active_vq()
192 vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq), in vp_active_vq()
193 virtqueue_get_avail_addr(vq), in vp_active_vq()
194 virtqueue_get_used_addr(vq)); in vp_active_vq()
205 static int vp_modern_disable_vq_and_reset(struct virtqueue *vq) in vp_modern_disable_vq_and_reset() argument
207 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); in vp_modern_disable_vq_and_reset()
212 if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET)) in vp_modern_disable_vq_and_reset()
[all …]
/openbmc/linux/drivers/vhost/
H A Dvhost.c49 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num]) argument
50 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num]) argument
53 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) in vhost_disable_cross_endian() argument
55 vq->user_be = !virtio_legacy_is_little_endian(); in vhost_disable_cross_endian()
58 static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq) in vhost_enable_cross_endian_big() argument
60 vq->user_be = true; in vhost_enable_cross_endian_big()
63 static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq) in vhost_enable_cross_endian_little() argument
65 vq->user_be = false; in vhost_enable_cross_endian_little()
68 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) in vhost_set_vring_endian() argument
72 if (vq->private_data) in vhost_set_vring_endian()
[all …]
H A Dtest.c45 struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ]; in handle_vq() local
51 mutex_lock(&vq->mutex); in handle_vq()
52 private = vhost_vq_get_backend(vq); in handle_vq()
54 mutex_unlock(&vq->mutex); in handle_vq()
58 vhost_disable_notify(&n->dev, vq); in handle_vq()
61 head = vhost_get_vq_desc(vq, vq->iov, in handle_vq()
62 ARRAY_SIZE(vq->iov), in handle_vq()
69 if (head == vq->num) { in handle_vq()
70 if (unlikely(vhost_enable_notify(&n->dev, vq))) { in handle_vq()
71 vhost_disable_notify(&n->dev, vq); in handle_vq()
[all …]
H A Dnet.c98 struct vhost_virtqueue *vq; member
109 struct vhost_virtqueue vq; member
229 static void vhost_net_enable_zcopy(int vq) in vhost_net_enable_zcopy() argument
231 vhost_net_zcopy_mask |= 0x1 << vq; in vhost_net_enable_zcopy()
235 vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) in vhost_net_ubuf_alloc() argument
246 ubufs->vq = vq; in vhost_net_ubuf_alloc()
360 struct vhost_virtqueue *vq) in vhost_zerocopy_signal_used() argument
363 container_of(vq, struct vhost_net_virtqueue, vq); in vhost_zerocopy_signal_used()
368 if (vq->heads[i].len == VHOST_DMA_FAILED_LEN) in vhost_zerocopy_signal_used()
370 if (VHOST_DMA_IS_DONE(vq->heads[i].len)) { in vhost_zerocopy_signal_used()
[all …]
H A Dvhost.h50 struct vhost_virtqueue *vq; member
55 struct vhost_virtqueue *vq);
158 struct vhost_virtqueue *vq; member
183 bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
200 bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
210 void vhost_vq_flush(struct vhost_virtqueue *vq);
211 bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work);
212 bool vhost_vq_has_work(struct vhost_virtqueue *vq);
213 bool vhost_vq_is_setup(struct vhost_virtqueue *vq);
227 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
[all …]
H A Dvsock.c91 struct vhost_virtqueue *vq) in vhost_transport_do_send_pkt() argument
98 mutex_lock(&vq->mutex); in vhost_transport_do_send_pkt()
100 if (!vhost_vq_get_backend(vq)) in vhost_transport_do_send_pkt()
103 if (!vq_meta_prefetch(vq)) in vhost_transport_do_send_pkt()
107 vhost_disable_notify(&vsock->dev, vq); in vhost_transport_do_send_pkt()
122 vhost_enable_notify(&vsock->dev, vq); in vhost_transport_do_send_pkt()
126 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), in vhost_transport_do_send_pkt()
133 if (head == vq->num) { in vhost_transport_do_send_pkt()
138 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) { in vhost_transport_do_send_pkt()
139 vhost_disable_notify(&vsock->dev, vq); in vhost_transport_do_send_pkt()
[all …]
H A Dscsi.c174 struct vhost_virtqueue vq; member
256 struct vhost_virtqueue *vq; in vhost_scsi_init_inflight() local
260 vq = &vs->vqs[i].vq; in vhost_scsi_init_inflight()
262 mutex_lock(&vq->mutex); in vhost_scsi_init_inflight()
275 mutex_unlock(&vq->mutex); in vhost_scsi_init_inflight()
280 vhost_scsi_get_inflight(struct vhost_virtqueue *vq) in vhost_scsi_get_inflight() argument
285 svq = container_of(vq, struct vhost_scsi_virtqueue, vq); in vhost_scsi_get_inflight()
331 struct vhost_scsi_virtqueue, vq); in vhost_scsi_release_cmd_res()
366 struct vhost_virtqueue *vq = &tmf->svq->vq; in vhost_scsi_release_cmd() local
368 vhost_vq_work_queue(vq, &tmf->vwork); in vhost_scsi_release_cmd()
[all …]
/openbmc/qemu/hw/virtio/
H A Dvirtio.c218 static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq) in virtio_virtqueue_reset_region_cache() argument
222 caches = qatomic_read(&vq->vring.caches); in virtio_virtqueue_reset_region_cache()
223 qatomic_rcu_set(&vq->vring.caches, NULL); in virtio_virtqueue_reset_region_cache()
231 VirtQueue *vq = &vdev->vq[n]; in virtio_init_region_cache() local
232 VRingMemoryRegionCaches *old = vq->vring.caches; in virtio_init_region_cache()
239 addr = vq->vring.desc; in virtio_init_region_cache()
245 packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ? in virtio_init_region_cache()
256 vq->vring.used, size, true); in virtio_init_region_cache()
264 vq->vring.avail, size, false); in virtio_init_region_cache()
270 qatomic_rcu_set(&vq->vring.caches, new); in virtio_init_region_cache()
[all …]
/openbmc/qemu/subprojects/libvduse/
H A Dlibvduse.c185 VduseDev *vduse_queue_get_dev(VduseVirtq *vq) in vduse_queue_get_dev() argument
187 return vq->dev; in vduse_queue_get_dev()
190 int vduse_queue_get_fd(VduseVirtq *vq) in vduse_queue_get_fd() argument
192 return vq->fd; in vduse_queue_get_fd()
228 static int vduse_queue_check_inflights(VduseVirtq *vq) in vduse_queue_check_inflights() argument
231 VduseDev *dev = vq->dev; in vduse_queue_check_inflights()
233 vq->used_idx = le16toh(vq->vring.used->idx); in vduse_queue_check_inflights()
234 vq->resubmit_num = 0; in vduse_queue_check_inflights()
235 vq->resubmit_list = NULL; in vduse_queue_check_inflights()
236 vq->counter = 0; in vduse_queue_check_inflights()
[all …]
/openbmc/qemu/tests/qtest/libqos/
H A Dvirtio.c132 void qvirtqueue_cleanup(const QVirtioBus *bus, QVirtQueue *vq, in qvirtqueue_cleanup() argument
135 return bus->virtqueue_cleanup(vq, alloc); in qvirtqueue_cleanup()
168 QVirtQueue *vq, gint64 timeout_us) in qvirtio_wait_queue_isr() argument
174 if (d->bus->get_queue_isr_status(d, vq)) { in qvirtio_wait_queue_isr()
187 QVirtQueue *vq, in qvirtio_wait_status_byte_no_isr() argument
196 g_assert(!d->bus->get_queue_isr_status(d, vq)); in qvirtio_wait_status_byte_no_isr()
212 QVirtQueue *vq, in qvirtio_wait_used_elem() argument
224 if (d->bus->get_queue_isr_status(d, vq) && in qvirtio_wait_used_elem()
225 qvirtqueue_get_buf(qts, vq, &got_desc_idx, len)) { in qvirtio_wait_used_elem()
239 void qvring_init(QTestState *qts, const QGuestAllocator *alloc, QVirtQueue *vq, in qvring_init() argument
[all …]
H A Dvirtio-mmio.c92 static bool qvirtio_mmio_get_queue_isr_status(QVirtioDevice *d, QVirtQueue *vq) in qvirtio_mmio_get_queue_isr_status() argument
146 static void qvirtio_mmio_set_queue_address(QVirtioDevice *d, QVirtQueue *vq) in qvirtio_mmio_set_queue_address() argument
149 uint64_t pfn = vq->desc / dev->page_size; in qvirtio_mmio_set_queue_address()
158 QVirtQueue *vq; in qvirtio_mmio_virtqueue_setup() local
161 vq = g_malloc0(sizeof(*vq)); in qvirtio_mmio_virtqueue_setup()
162 vq->vdev = d; in qvirtio_mmio_virtqueue_setup()
166 vq->index = index; in qvirtio_mmio_virtqueue_setup()
167 vq->size = qvirtio_mmio_get_queue_size(d); in qvirtio_mmio_virtqueue_setup()
168 vq->free_head = 0; in qvirtio_mmio_virtqueue_setup()
169 vq->num_free = vq->size; in qvirtio_mmio_virtqueue_setup()
[all …]
/openbmc/linux/arch/arm64/include/uapi/asm/
H A Dptrace.h169 #define SVE_PT_FPSIMD_SIZE(vq, flags) (sizeof(struct user_fpsimd_state)) argument
200 #define SVE_PT_SVE_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq) argument
201 #define SVE_PT_SVE_PREG_SIZE(vq) __SVE_PREG_SIZE(vq) argument
202 #define SVE_PT_SVE_FFR_SIZE(vq) __SVE_FFR_SIZE(vq) argument
210 #define SVE_PT_SVE_ZREG_OFFSET(vq, n) \ argument
211 (SVE_PT_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n))
212 #define SVE_PT_SVE_ZREGS_SIZE(vq) \ argument
213 (SVE_PT_SVE_ZREG_OFFSET(vq, __SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET)
215 #define SVE_PT_SVE_PREGS_OFFSET(vq) \ argument
216 (SVE_PT_REGS_OFFSET + __SVE_PREGS_OFFSET(vq))
[all …]
H A Dsigcontext.h196 #define sve_vl_from_vq(vq) __sve_vl_from_vq(vq) argument
261 #define SVE_SIG_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq) argument
262 #define SVE_SIG_PREG_SIZE(vq) __SVE_PREG_SIZE(vq) argument
263 #define SVE_SIG_FFR_SIZE(vq) __SVE_FFR_SIZE(vq) argument
271 #define SVE_SIG_ZREG_OFFSET(vq, n) \ argument
272 (SVE_SIG_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n))
273 #define SVE_SIG_ZREGS_SIZE(vq) __SVE_ZREGS_SIZE(vq) argument
275 #define SVE_SIG_PREGS_OFFSET(vq) \ argument
276 (SVE_SIG_REGS_OFFSET + __SVE_PREGS_OFFSET(vq))
277 #define SVE_SIG_PREG_OFFSET(vq, n) \ argument
[all …]
H A Dsve_context.h31 #define __sve_vl_from_vq(vq) ((vq) * __SVE_VQ_BYTES) argument
33 #define __SVE_ZREG_SIZE(vq) ((__u32)(vq) * __SVE_VQ_BYTES) argument
34 #define __SVE_PREG_SIZE(vq) ((__u32)(vq) * (__SVE_VQ_BYTES / 8)) argument
35 #define __SVE_FFR_SIZE(vq) __SVE_PREG_SIZE(vq) argument
38 #define __SVE_ZREG_OFFSET(vq, n) \ argument
39 (__SVE_ZREGS_OFFSET + __SVE_ZREG_SIZE(vq) * (n))
40 #define __SVE_ZREGS_SIZE(vq) \ argument
41 (__SVE_ZREG_OFFSET(vq, __SVE_NUM_ZREGS) - __SVE_ZREGS_OFFSET)
43 #define __SVE_PREGS_OFFSET(vq) \ argument
44 (__SVE_ZREGS_OFFSET + __SVE_ZREGS_SIZE(vq))
[all …]
/openbmc/qemu/linux-headers/asm-arm64/
H A Dsve_context.h42 #define __sve_vl_from_vq(vq) ((vq) * __SVE_VQ_BYTES) argument
44 #define __SVE_ZREG_SIZE(vq) ((__u32)(vq) * __SVE_VQ_BYTES) argument
45 #define __SVE_PREG_SIZE(vq) ((__u32)(vq) * (__SVE_VQ_BYTES / 8)) argument
46 #define __SVE_FFR_SIZE(vq) __SVE_PREG_SIZE(vq) argument
49 #define __SVE_ZREG_OFFSET(vq, n) \ argument
50 (__SVE_ZREGS_OFFSET + __SVE_ZREG_SIZE(vq) * (n))
51 #define __SVE_ZREGS_SIZE(vq) \ argument
52 (__SVE_ZREG_OFFSET(vq, __SVE_NUM_ZREGS) - __SVE_ZREGS_OFFSET)
54 #define __SVE_PREGS_OFFSET(vq) \ argument
55 (__SVE_ZREGS_OFFSET + __SVE_ZREGS_SIZE(vq))
[all …]
/openbmc/qemu/subprojects/libvhost-user/
H A Dlibvhost-user.c287 map_ring(VuDev *dev, VuVirtq *vq) in map_ring() argument
289 vq->vring.desc = qva_to_va(dev, vq->vra.desc_user_addr); in map_ring()
290 vq->vring.used = qva_to_va(dev, vq->vra.used_user_addr); in map_ring()
291 vq->vring.avail = qva_to_va(dev, vq->vra.avail_user_addr); in map_ring()
294 DPRINT(" vring_desc at %p\n", vq->vring.desc); in map_ring()
295 DPRINT(" vring_used at %p\n", vq->vring.used); in map_ring()
296 DPRINT(" vring_avail at %p\n", vq->vring.avail); in map_ring()
298 return !(vq->vring.desc && vq->vring.used && vq->vring.avail); in map_ring()
302 vu_is_vq_usable(VuDev *dev, VuVirtq *vq) in vu_is_vq_usable() argument
308 if (likely(vq->vring.avail)) { in vu_is_vq_usable()
[all …]
/openbmc/linux/include/linux/
H A Dvirtio.h32 void (*callback)(struct virtqueue *vq);
42 int virtqueue_add_outbuf(struct virtqueue *vq,
47 int virtqueue_add_inbuf(struct virtqueue *vq,
52 int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
58 int virtqueue_add_sgs(struct virtqueue *vq,
65 struct device *virtqueue_dma_dev(struct virtqueue *vq);
67 bool virtqueue_kick(struct virtqueue *vq);
69 bool virtqueue_kick_prepare(struct virtqueue *vq);
71 bool virtqueue_notify(struct virtqueue *vq);
73 void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
[all …]
/openbmc/qemu/tests/qtest/
H A Dvirtio-blk-test.c124 QVirtQueue *vq; in test_basic() local
136 vq = qvirtqueue_setup(dev, alloc, 0); in test_basic()
152 free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true); in test_basic()
153 qvirtqueue_add(qts, vq, req_addr + 16, 512, false, true); in test_basic()
154 qvirtqueue_add(qts, vq, req_addr + 528, 1, true, false); in test_basic()
156 qvirtqueue_kick(qts, dev, vq, free_head); in test_basic()
158 qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL, in test_basic()
175 free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true); in test_basic()
176 qvirtqueue_add(qts, vq, req_addr + 16, 512, true, true); in test_basic()
177 qvirtqueue_add(qts, vq, req_addr + 528, 1, true, false); in test_basic()
[all …]
H A Dvhost-user-blk-test.c100 QVirtQueue *vq, in test_invalid_discard_write_zeroes() argument
125 free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true); in test_invalid_discard_write_zeroes()
126 qvirtqueue_add(qts, vq, req_addr + 16, sizeof(dwz_hdr2), false, true); in test_invalid_discard_write_zeroes()
127 qvirtqueue_add(qts, vq, req_addr + 16 + sizeof(dwz_hdr2), 1, true, in test_invalid_discard_write_zeroes()
130 qvirtqueue_kick(qts, dev, vq, free_head); in test_invalid_discard_write_zeroes()
132 qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL, in test_invalid_discard_write_zeroes()
150 free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true); in test_invalid_discard_write_zeroes()
151 qvirtqueue_add(qts, vq, req_addr + 16, sizeof(dwz_hdr), false, true); in test_invalid_discard_write_zeroes()
152 qvirtqueue_add(qts, vq, req_addr + 16 + sizeof(dwz_hdr), 1, true, in test_invalid_discard_write_zeroes()
155 qvirtqueue_kick(qts, dev, vq, free_head); in test_invalid_discard_write_zeroes()
[all …]
/openbmc/linux/tools/virtio/linux/
H A Dvirtio.h22 void (*callback)(struct virtqueue *vq);
33 int virtqueue_add_sgs(struct virtqueue *vq,
40 int virtqueue_add_outbuf(struct virtqueue *vq,
45 int virtqueue_add_inbuf(struct virtqueue *vq,
50 bool virtqueue_kick(struct virtqueue *vq);
52 void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
54 void virtqueue_disable_cb(struct virtqueue *vq);
56 bool virtqueue_enable_cb(struct virtqueue *vq);
57 bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
59 void *virtqueue_detach_unused_buf(struct virtqueue *vq);
[all …]
/openbmc/linux/drivers/gpu/drm/virtio/
H A Dvirtgpu_trace.h12 TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr, u32 seqno),
13 TP_ARGS(vq, hdr, seqno),
16 __field(unsigned int, vq)
17 __string(name, vq->name)
26 __entry->dev = vq->vdev->index;
27 __entry->vq = vq->index;
28 __assign_str(name, vq->name);
33 __entry->num_free = vq->num_free;
37 __entry->dev, __entry->vq, __get_str(name),
43 TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr, u32 seqno),
[all …]
/openbmc/linux/drivers/vdpa/vdpa_sim/
H A Dvdpa_sim_blk.c110 struct vdpasim_virtqueue *vq) in vdpasim_blk_handle_req() argument
123 ret = vringh_getdesc_iotlb(&vq->vring, &vq->out_iov, &vq->in_iov, in vdpasim_blk_handle_req()
124 &vq->head, GFP_ATOMIC); in vdpasim_blk_handle_req()
128 if (vq->out_iov.used < 1 || vq->in_iov.used < 1) { in vdpasim_blk_handle_req()
130 vq->out_iov.used, vq->in_iov.used); in vdpasim_blk_handle_req()
134 if (vq->in_iov.iov[vq->in_iov.used - 1].iov_len < 1) { in vdpasim_blk_handle_req()
142 to_push = vringh_kiov_length(&vq->in_iov) - 1; in vdpasim_blk_handle_req()
144 to_pull = vringh_kiov_length(&vq->out_iov); in vdpasim_blk_handle_req()
146 bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov, &hdr, in vdpasim_blk_handle_req()
179 bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov, in vdpasim_blk_handle_req()
[all …]
/openbmc/qemu/linux-user/aarch64/
H A Dtarget_prctl.h32 uint32_t vq, old_vq; in do_prctl_sve_set_vl() local
42 vq = MAX(arg2 / 16, 1); in do_prctl_sve_set_vl()
43 vq = MIN(vq, ARM_MAX_VQ); in do_prctl_sve_set_vl()
44 env->vfp.zcr_el[1] = vq - 1; in do_prctl_sve_set_vl()
47 vq = sve_vq(env); in do_prctl_sve_set_vl()
48 if (vq < old_vq) { in do_prctl_sve_set_vl()
49 aarch64_sve_narrow_vq(env, vq); in do_prctl_sve_set_vl()
51 return vq * 16; in do_prctl_sve_set_vl()
76 int vq, old_vq; in do_prctl_sme_set_vl() local
86 vq = MAX(arg2 / 16, 1); in do_prctl_sme_set_vl()
[all …]

12345678910>>...14