Lines Matching refs:vq

98 	struct vhost_virtqueue *vq;  member
109 struct vhost_virtqueue vq; member
229 static void vhost_net_enable_zcopy(int vq) in vhost_net_enable_zcopy() argument
231 vhost_net_zcopy_mask |= 0x1 << vq; in vhost_net_enable_zcopy()
235 vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) in vhost_net_ubuf_alloc() argument
246 ubufs->vq = vq; in vhost_net_ubuf_alloc()
360 struct vhost_virtqueue *vq) in vhost_zerocopy_signal_used() argument
363 container_of(vq, struct vhost_net_virtqueue, vq); in vhost_zerocopy_signal_used()
368 if (vq->heads[i].len == VHOST_DMA_FAILED_LEN) in vhost_zerocopy_signal_used()
370 if (VHOST_DMA_IS_DONE(vq->heads[i].len)) { in vhost_zerocopy_signal_used()
371 vq->heads[i].len = VHOST_DMA_CLEAR_LEN; in vhost_zerocopy_signal_used()
378 vhost_add_used_and_signal_n(vq->dev, vq, in vhost_zerocopy_signal_used()
379 &vq->heads[nvq->done_idx], add); in vhost_zerocopy_signal_used()
390 struct vhost_virtqueue *vq = ubufs->vq; in vhost_zerocopy_callback() local
396 vq->heads[ubuf->desc].len = success ? in vhost_zerocopy_callback()
408 vhost_poll_queue(&vq->poll); in vhost_zerocopy_callback()
425 struct vhost_virtqueue *vq) in vhost_net_disable_vq() argument
428 container_of(vq, struct vhost_net_virtqueue, vq); in vhost_net_disable_vq()
430 if (!vhost_vq_get_backend(vq)) in vhost_net_disable_vq()
436 struct vhost_virtqueue *vq) in vhost_net_enable_vq() argument
439 container_of(vq, struct vhost_net_virtqueue, vq); in vhost_net_enable_vq()
443 sock = vhost_vq_get_backend(vq); in vhost_net_enable_vq()
452 struct vhost_virtqueue *vq = &nvq->vq; in vhost_net_signal_used() local
453 struct vhost_dev *dev = vq->dev; in vhost_net_signal_used()
458 vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx); in vhost_net_signal_used()
481 vq_err(&nvq->vq, "Fail to batch sending packets\n"); in vhost_tx_batch()
511 struct vhost_virtqueue *vq) in vhost_net_busy_poll_try_queue() argument
513 if (!vhost_vq_avail_empty(&net->dev, vq)) { in vhost_net_busy_poll_try_queue()
514 vhost_poll_queue(&vq->poll); in vhost_net_busy_poll_try_queue()
515 } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { in vhost_net_busy_poll_try_queue()
516 vhost_disable_notify(&net->dev, vq); in vhost_net_busy_poll_try_queue()
517 vhost_poll_queue(&vq->poll); in vhost_net_busy_poll_try_queue()
530 struct vhost_virtqueue *vq = poll_rx ? tvq : rvq; in vhost_net_busy_poll() local
536 if (!mutex_trylock(&vq->mutex)) in vhost_net_busy_poll()
539 vhost_disable_notify(&net->dev, vq); in vhost_net_busy_poll()
549 if (vhost_vq_has_work(vq)) { in vhost_net_busy_poll()
565 vhost_net_busy_poll_try_queue(net, vq); in vhost_net_busy_poll()
569 mutex_unlock(&vq->mutex); in vhost_net_busy_poll()
578 struct vhost_virtqueue *rvq = &rnvq->vq; in vhost_net_tx_get_vq_desc()
579 struct vhost_virtqueue *tvq = &tnvq->vq; in vhost_net_tx_get_vq_desc()
603 struct vhost_virtqueue *vq = &nvq->vq; in vhost_exceeds_maxpend() local
606 min_t(unsigned int, VHOST_MAX_PEND, vq->num >> 2); in vhost_exceeds_maxpend()
609 static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter, in init_iov_iter() argument
613 size_t len = iov_length(vq->iov, out); in init_iov_iter()
615 iov_iter_init(iter, ITER_SOURCE, vq->iov, out, len); in init_iov_iter()
627 struct vhost_virtqueue *vq = &nvq->vq; in get_tx_bufs() local
632 if (ret < 0 || ret == vq->num) in get_tx_bufs()
636 vq_err(vq, "Unexpected descriptor format for TX: out %d, int %d\n", in get_tx_bufs()
642 *len = init_iov_iter(vq, &msg->msg_iter, nvq->vhost_hlen, *out); in get_tx_bufs()
644 vq_err(vq, "Unexpected header len for TX: %zd expected %zd\n", in get_tx_bufs()
652 static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len) in tx_can_batch() argument
655 !vhost_vq_avail_empty(vq->dev, vq); in tx_can_batch()
698 struct vhost_virtqueue *vq = &nvq->vq; in vhost_net_build_xdp() local
699 struct vhost_net *net = container_of(vq->dev, struct vhost_net, in vhost_net_build_xdp()
701 struct socket *sock = vhost_vq_get_backend(vq); in vhost_net_build_xdp()
739 vhost16_to_cpu(vq, gso->csum_start) + in vhost_net_build_xdp()
740 vhost16_to_cpu(vq, gso->csum_offset) + 2 > in vhost_net_build_xdp()
741 vhost16_to_cpu(vq, gso->hdr_len)) { in vhost_net_build_xdp()
742 gso->hdr_len = cpu_to_vhost16(vq, in vhost_net_build_xdp()
743 vhost16_to_cpu(vq, gso->csum_start) + in vhost_net_build_xdp()
744 vhost16_to_cpu(vq, gso->csum_offset) + 2); in vhost_net_build_xdp()
746 if (vhost16_to_cpu(vq, gso->hdr_len) > len) in vhost_net_build_xdp()
772 struct vhost_virtqueue *vq = &nvq->vq; in handle_tx_copy() local
799 if (head == vq->num) { in handle_tx_copy()
801 vhost_poll_queue(&vq->poll); in handle_tx_copy()
803 vq))) { in handle_tx_copy()
804 vhost_disable_notify(&net->dev, vq); in handle_tx_copy()
821 vhost_discard_vq_desc(vq, 1); in handle_tx_copy()
822 vhost_net_enable_vq(net, vq); in handle_tx_copy()
833 if (tx_can_batch(vq, total_len)) in handle_tx_copy()
842 vhost_discard_vq_desc(vq, 1); in handle_tx_copy()
843 vhost_net_enable_vq(net, vq); in handle_tx_copy()
851 vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head); in handle_tx_copy()
852 vq->heads[nvq->done_idx].len = 0; in handle_tx_copy()
854 } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len))); in handle_tx_copy()
862 struct vhost_virtqueue *vq = &nvq->vq; in handle_tx_zerocopy() local
884 vhost_zerocopy_signal_used(net, vq); in handle_tx_zerocopy()
893 if (head == vq->num) { in handle_tx_zerocopy()
895 vhost_poll_queue(&vq->poll); in handle_tx_zerocopy()
896 } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { in handle_tx_zerocopy()
897 vhost_disable_notify(&net->dev, vq); in handle_tx_zerocopy()
910 vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head); in handle_tx_zerocopy()
911 vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS; in handle_tx_zerocopy()
929 if (tx_can_batch(vq, total_len) && in handle_tx_zerocopy()
941 if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS) in handle_tx_zerocopy()
947 vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN; in handle_tx_zerocopy()
950 vhost_discard_vq_desc(vq, 1); in handle_tx_zerocopy()
951 vhost_net_enable_vq(net, vq); in handle_tx_zerocopy()
959 vhost_add_used_and_signal(&net->dev, vq, head, 0); in handle_tx_zerocopy()
961 vhost_zerocopy_signal_used(net, vq); in handle_tx_zerocopy()
963 } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len))); in handle_tx_zerocopy()
971 struct vhost_virtqueue *vq = &nvq->vq; in handle_tx() local
974 mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX); in handle_tx()
975 sock = vhost_vq_get_backend(vq); in handle_tx()
979 if (!vq_meta_prefetch(vq)) in handle_tx()
982 vhost_disable_notify(&net->dev, vq); in handle_tx()
983 vhost_net_disable_vq(net, vq); in handle_tx()
991 mutex_unlock(&vq->mutex); in handle_tx()
1020 struct vhost_virtqueue *rvq = &rnvq->vq; in vhost_net_rx_peek_head_len()
1021 struct vhost_virtqueue *tvq = &tnvq->vq; in vhost_net_rx_peek_head_len()
1046 static int get_rx_bufs(struct vhost_virtqueue *vq, in get_rx_bufs() argument
1069 r = vhost_get_vq_desc(vq, vq->iov + seg, in get_rx_bufs()
1070 ARRAY_SIZE(vq->iov) - seg, &out, in get_rx_bufs()
1076 if (d == vq->num) { in get_rx_bufs()
1081 vq_err(vq, "unexpected descriptor format for RX: " in get_rx_bufs()
1090 heads[headcount].id = cpu_to_vhost32(vq, d); in get_rx_bufs()
1091 len = iov_length(vq->iov + seg, in); in get_rx_bufs()
1092 heads[headcount].len = cpu_to_vhost32(vq, len); in get_rx_bufs()
1097 heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen); in get_rx_bufs()
1109 vhost_discard_vq_desc(vq, headcount); in get_rx_bufs()
1118 struct vhost_virtqueue *vq = &nvq->vq; in handle_rx() local
1143 mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX); in handle_rx()
1144 sock = vhost_vq_get_backend(vq); in handle_rx()
1148 if (!vq_meta_prefetch(vq)) in handle_rx()
1151 vhost_disable_notify(&net->dev, vq); in handle_rx()
1152 vhost_net_disable_vq(net, vq); in handle_rx()
1157 vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ? in handle_rx()
1158 vq->log : NULL; in handle_rx()
1159 mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF); in handle_rx()
1168 headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, in handle_rx()
1177 vhost_poll_queue(&vq->poll); in handle_rx()
1178 } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { in handle_rx()
1181 vhost_disable_notify(&net->dev, vq); in handle_rx()
1193 iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, 1, 1); in handle_rx()
1200 iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, in, vhost_len); in handle_rx()
1216 vhost_discard_vq_desc(vq, headcount); in handle_rx()
1223 vq_err(vq, "Unable to write vnet_hdr " in handle_rx()
1224 "at addr %p\n", vq->iov->iov_base); in handle_rx()
1235 num_buffers = cpu_to_vhost16(vq, headcount); in handle_rx()
1239 vq_err(vq, "Failed num_buffers write"); in handle_rx()
1240 vhost_discard_vq_desc(vq, headcount); in handle_rx()
1247 vhost_log_write(vq, vq_log, log, vhost_len, in handle_rx()
1248 vq->iov, in); in handle_rx()
1250 } while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len))); in handle_rx()
1253 vhost_poll_queue(&vq->poll); in handle_rx()
1255 vhost_net_enable_vq(net, vq); in handle_rx()
1258 mutex_unlock(&vq->mutex); in handle_rx()
1263 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, in handle_tx_kick() local
1265 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); in handle_tx_kick()
1272 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, in handle_rx_kick() local
1274 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); in handle_rx_kick()
1330 vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq; in vhost_net_open()
1331 vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq; in vhost_net_open()
1332 n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick; in vhost_net_open()
1333 n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick; in vhost_net_open()
1363 struct vhost_virtqueue *vq) in vhost_net_stop_vq() argument
1367 container_of(vq, struct vhost_net_virtqueue, vq); in vhost_net_stop_vq()
1369 mutex_lock(&vq->mutex); in vhost_net_stop_vq()
1370 sock = vhost_vq_get_backend(vq); in vhost_net_stop_vq()
1371 vhost_net_disable_vq(n, vq); in vhost_net_stop_vq()
1372 vhost_vq_set_backend(vq, NULL); in vhost_net_stop_vq()
1375 mutex_unlock(&vq->mutex); in vhost_net_stop_vq()
1382 *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq); in vhost_net_stop()
1383 *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq); in vhost_net_stop()
1390 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
1392 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
1395 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
1398 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
1504 struct vhost_virtqueue *vq; in vhost_net_set_backend() local
1518 vq = &n->vqs[index].vq; in vhost_net_set_backend()
1520 mutex_lock(&vq->mutex); in vhost_net_set_backend()
1526 if (!vhost_vq_access_ok(vq)) { in vhost_net_set_backend()
1537 oldsock = vhost_vq_get_backend(vq); in vhost_net_set_backend()
1539 ubufs = vhost_net_ubuf_alloc(vq, in vhost_net_set_backend()
1546 vhost_net_disable_vq(n, vq); in vhost_net_set_backend()
1547 vhost_vq_set_backend(vq, sock); in vhost_net_set_backend()
1549 r = vhost_vq_init_access(vq); in vhost_net_set_backend()
1552 r = vhost_net_enable_vq(n, vq); in vhost_net_set_backend()
1570 mutex_unlock(&vq->mutex); in vhost_net_set_backend()
1574 mutex_lock(&vq->mutex); in vhost_net_set_backend()
1575 vhost_zerocopy_signal_used(n, vq); in vhost_net_set_backend()
1576 mutex_unlock(&vq->mutex); in vhost_net_set_backend()
1588 vhost_vq_set_backend(vq, oldsock); in vhost_net_set_backend()
1589 vhost_net_enable_vq(n, vq); in vhost_net_set_backend()
1596 mutex_unlock(&vq->mutex); in vhost_net_set_backend()
1661 mutex_lock(&n->vqs[i].vq.mutex); in vhost_net_set_features()
1662 n->vqs[i].vq.acked_features = features; in vhost_net_set_features()
1665 mutex_unlock(&n->vqs[i].vq.mutex); in vhost_net_set_features()