Lines Matching +full:dma +full:- +full:poll +full:- +full:cnt

1 // SPDX-License-Identifier: GPL-2.0-only
5 * virtio-net server in host kernel.
41 " 1 -Enable; 0 - Disable");
61 /* Lower device DMA failed */
63 /* Lower device DMA done */
65 /* Lower device DMA in progress */
113 /* last used idx for outstanding DMA zerocopy buffers */
115 /* For TX, first used idx for DMA done zerocopy buffers
135 struct vhost_poll poll[VHOST_NET_VQ_MAX]; member
154 if (rxq->tail != rxq->head) in vhost_net_buf_get_ptr()
155 return rxq->queue[rxq->head]; in vhost_net_buf_get_ptr()
162 return rxq->tail - rxq->head; in vhost_net_buf_get_size()
167 return rxq->tail == rxq->head; in vhost_net_buf_is_empty()
173 ++rxq->head; in vhost_net_buf_consume()
179 struct vhost_net_buf *rxq = &nvq->rxq; in vhost_net_buf_produce()
181 rxq->head = 0; in vhost_net_buf_produce()
182 rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue, in vhost_net_buf_produce()
184 return rxq->tail; in vhost_net_buf_produce()
189 struct vhost_net_buf *rxq = &nvq->rxq; in vhost_net_buf_unproduce()
191 if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) { in vhost_net_buf_unproduce()
192 ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head, in vhost_net_buf_unproduce()
195 rxq->head = rxq->tail = 0; in vhost_net_buf_unproduce()
204 return xdpf->len; in vhost_net_buf_peek_len()
212 struct vhost_net_buf *rxq = &nvq->rxq; in vhost_net_buf_peek()
226 rxq->head = rxq->tail = 0; in vhost_net_buf_init()
243 return ERR_PTR(-ENOMEM); in vhost_net_ubuf_alloc()
244 atomic_set(&ubufs->refcount, 1); in vhost_net_ubuf_alloc()
245 init_waitqueue_head(&ubufs->wait); in vhost_net_ubuf_alloc()
246 ubufs->vq = vq; in vhost_net_ubuf_alloc()
252 int r = atomic_sub_return(1, &ubufs->refcount); in vhost_net_ubuf_put()
254 wake_up(&ubufs->wait); in vhost_net_ubuf_put()
261 wait_event(ubufs->wait, !atomic_read(&ubufs->refcount)); in vhost_net_ubuf_put_and_wait()
275 kfree(n->vqs[i].ubuf_info); in vhost_net_clear_ubuf_info()
276 n->vqs[i].ubuf_info = NULL; in vhost_net_clear_ubuf_info()
289 n->vqs[i].ubuf_info = in vhost_net_set_ubuf_info()
291 sizeof(*n->vqs[i].ubuf_info), in vhost_net_set_ubuf_info()
293 if (!n->vqs[i].ubuf_info) in vhost_net_set_ubuf_info()
300 return -ENOMEM; in vhost_net_set_ubuf_info()
310 n->vqs[i].done_idx = 0; in vhost_net_vq_reset()
311 n->vqs[i].upend_idx = 0; in vhost_net_vq_reset()
312 n->vqs[i].ubufs = NULL; in vhost_net_vq_reset()
313 n->vqs[i].vhost_hlen = 0; in vhost_net_vq_reset()
314 n->vqs[i].sock_hlen = 0; in vhost_net_vq_reset()
315 vhost_net_buf_init(&n->vqs[i].rxq); in vhost_net_vq_reset()
322 ++net->tx_packets; in vhost_net_tx_packet()
323 if (net->tx_packets < 1024) in vhost_net_tx_packet()
325 net->tx_packets = 0; in vhost_net_tx_packet()
326 net->tx_zcopy_err = 0; in vhost_net_tx_packet()
331 ++net->tx_zcopy_err; in vhost_net_tx_err()
339 return !net->tx_flush && in vhost_net_tx_select_zcopy()
340 net->tx_packets / 64 >= net->tx_zcopy_err; in vhost_net_tx_select_zcopy()
346 sock_flag(sock->sk, SOCK_ZEROCOPY); in vhost_sock_zcopy()
351 return sock_flag(sock->sk, SOCK_XDP); in vhost_sock_xdp()
354 /* In case of DMA done not in order in lower device driver for some reason.
356 * of used idx. Once lower device DMA done contiguously, we will signal KVM
367 for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) { in vhost_zerocopy_signal_used()
368 if (vq->heads[i].len == VHOST_DMA_FAILED_LEN) in vhost_zerocopy_signal_used()
370 if (VHOST_DMA_IS_DONE(vq->heads[i].len)) { in vhost_zerocopy_signal_used()
371 vq->heads[i].len = VHOST_DMA_CLEAR_LEN; in vhost_zerocopy_signal_used()
377 add = min(UIO_MAXIOV - nvq->done_idx, j); in vhost_zerocopy_signal_used()
378 vhost_add_used_and_signal_n(vq->dev, vq, in vhost_zerocopy_signal_used()
379 &vq->heads[nvq->done_idx], add); in vhost_zerocopy_signal_used()
380 nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV; in vhost_zerocopy_signal_used()
381 j -= add; in vhost_zerocopy_signal_used()
389 struct vhost_net_ubuf_ref *ubufs = ubuf->ctx; in vhost_zerocopy_callback()
390 struct vhost_virtqueue *vq = ubufs->vq; in vhost_zerocopy_callback()
391 int cnt; in vhost_zerocopy_callback() local
395 /* set len to mark this desc buffers done DMA */ in vhost_zerocopy_callback()
396 vq->heads[ubuf->desc].len = success ? in vhost_zerocopy_callback()
398 cnt = vhost_net_ubuf_put(ubufs); in vhost_zerocopy_callback()
407 if (cnt <= 1 || !(cnt % 16)) in vhost_zerocopy_callback()
408 vhost_poll_queue(&vq->poll); in vhost_zerocopy_callback()
429 struct vhost_poll *poll = n->poll + (nvq - n->vqs); in vhost_net_disable_vq() local
432 vhost_poll_stop(poll); in vhost_net_disable_vq()
440 struct vhost_poll *poll = n->poll + (nvq - n->vqs); in vhost_net_enable_vq() local
447 return vhost_poll_start(poll, sock->file); in vhost_net_enable_vq()
452 struct vhost_virtqueue *vq = &nvq->vq; in vhost_net_signal_used()
453 struct vhost_dev *dev = vq->dev; in vhost_net_signal_used()
455 if (!nvq->done_idx) in vhost_net_signal_used()
458 vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx); in vhost_net_signal_used()
459 nvq->done_idx = 0; in vhost_net_signal_used()
469 .num = nvq->batched_xdp, in vhost_tx_batch()
470 .ptr = nvq->xdp, in vhost_tx_batch()
474 if (nvq->batched_xdp == 0) in vhost_tx_batch()
477 msghdr->msg_control = &ctl; in vhost_tx_batch()
478 msghdr->msg_controllen = sizeof(ctl); in vhost_tx_batch()
479 err = sock->ops->sendmsg(sock, msghdr, 0); in vhost_tx_batch()
481 vq_err(&nvq->vq, "Fail to batch sending packets\n"); in vhost_tx_batch()
487 for (i = 0; i < nvq->batched_xdp; ++i) in vhost_tx_batch()
488 put_page(virt_to_head_page(nvq->xdp[i].data)); in vhost_tx_batch()
489 nvq->batched_xdp = 0; in vhost_tx_batch()
490 nvq->done_idx = 0; in vhost_tx_batch()
496 nvq->batched_xdp = 0; in vhost_tx_batch()
504 if (sock->ops->peek_len) in sock_has_rx_data()
505 return sock->ops->peek_len(sock); in sock_has_rx_data()
507 return skb_queue_empty(&sock->sk->sk_receive_queue); in sock_has_rx_data()
513 if (!vhost_vq_avail_empty(&net->dev, vq)) { in vhost_net_busy_poll_try_queue()
514 vhost_poll_queue(&vq->poll); in vhost_net_busy_poll_try_queue()
515 } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { in vhost_net_busy_poll_try_queue()
516 vhost_disable_notify(&net->dev, vq); in vhost_net_busy_poll_try_queue()
517 vhost_poll_queue(&vq->poll); in vhost_net_busy_poll_try_queue()
536 if (!mutex_trylock(&vq->mutex)) in vhost_net_busy_poll()
539 vhost_disable_notify(&net->dev, vq); in vhost_net_busy_poll()
542 busyloop_timeout = poll_rx ? rvq->busyloop_timeout: in vhost_net_busy_poll()
543 tvq->busyloop_timeout; in vhost_net_busy_poll()
555 !vhost_vq_avail_empty(&net->dev, rvq)) || in vhost_net_busy_poll()
556 !vhost_vq_avail_empty(&net->dev, tvq)) in vhost_net_busy_poll()
567 vhost_enable_notify(&net->dev, rvq); in vhost_net_busy_poll()
569 mutex_unlock(&vq->mutex); in vhost_net_busy_poll()
577 struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX]; in vhost_net_tx_get_vq_desc()
578 struct vhost_virtqueue *rvq = &rnvq->vq; in vhost_net_tx_get_vq_desc()
579 struct vhost_virtqueue *tvq = &tnvq->vq; in vhost_net_tx_get_vq_desc()
581 int r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov), in vhost_net_tx_get_vq_desc()
584 if (r == tvq->num && tvq->busyloop_timeout) { in vhost_net_tx_get_vq_desc()
593 r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov), in vhost_net_tx_get_vq_desc()
602 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; in vhost_exceeds_maxpend()
603 struct vhost_virtqueue *vq = &nvq->vq; in vhost_exceeds_maxpend()
605 return (nvq->upend_idx + UIO_MAXIOV - nvq->done_idx) % UIO_MAXIOV > in vhost_exceeds_maxpend()
606 min_t(unsigned int, VHOST_MAX_PEND, vq->num >> 2); in vhost_exceeds_maxpend()
613 size_t len = iov_length(vq->iov, out); in init_iov_iter()
615 iov_iter_init(iter, ITER_SOURCE, vq->iov, out, len); in init_iov_iter()
627 struct vhost_virtqueue *vq = &nvq->vq; in get_tx_bufs()
632 if (ret < 0 || ret == vq->num) in get_tx_bufs()
638 return -EFAULT; in get_tx_bufs()
642 *len = init_iov_iter(vq, &msg->msg_iter, nvq->vhost_hlen, *out); in get_tx_bufs()
645 *len, nvq->vhost_hlen); in get_tx_bufs()
646 return -EFAULT; in get_tx_bufs()
655 !vhost_vq_avail_empty(vq->dev, vq); in tx_can_batch()
661 if (pfrag->page) { in vhost_net_page_frag_refill()
662 if (pfrag->offset + sz <= pfrag->size) in vhost_net_page_frag_refill()
664 __page_frag_cache_drain(pfrag->page, net->refcnt_bias); in vhost_net_page_frag_refill()
667 pfrag->offset = 0; in vhost_net_page_frag_refill()
668 net->refcnt_bias = 0; in vhost_net_page_frag_refill()
671 pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) | in vhost_net_page_frag_refill()
675 if (likely(pfrag->page)) { in vhost_net_page_frag_refill()
676 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER; in vhost_net_page_frag_refill()
680 pfrag->page = alloc_page(gfp); in vhost_net_page_frag_refill()
681 if (likely(pfrag->page)) { in vhost_net_page_frag_refill()
682 pfrag->size = PAGE_SIZE; in vhost_net_page_frag_refill()
688 net->refcnt_bias = USHRT_MAX; in vhost_net_page_frag_refill()
689 page_ref_add(pfrag->page, USHRT_MAX - 1); in vhost_net_page_frag_refill()
698 struct vhost_virtqueue *vq = &nvq->vq; in vhost_net_build_xdp()
699 struct vhost_net *net = container_of(vq->dev, struct vhost_net, in vhost_net_build_xdp()
702 struct page_frag *alloc_frag = &net->page_frag; in vhost_net_build_xdp()
704 struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp]; in vhost_net_build_xdp()
709 int pad = SKB_DATA_ALIGN(VHOST_NET_RX_PAD + headroom + nvq->sock_hlen); in vhost_net_build_xdp()
710 int sock_hlen = nvq->sock_hlen; in vhost_net_build_xdp()
714 if (unlikely(len < nvq->sock_hlen)) in vhost_net_build_xdp()
715 return -EFAULT; in vhost_net_build_xdp()
719 return -ENOSPC; in vhost_net_build_xdp()
722 alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); in vhost_net_build_xdp()
725 return -ENOMEM; in vhost_net_build_xdp()
727 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; in vhost_net_build_xdp()
728 copied = copy_page_from_iter(alloc_frag->page, in vhost_net_build_xdp()
729 alloc_frag->offset + in vhost_net_build_xdp()
733 return -EFAULT; in vhost_net_build_xdp()
736 gso = &hdr->gso; in vhost_net_build_xdp()
738 if ((gso->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && in vhost_net_build_xdp()
739 vhost16_to_cpu(vq, gso->csum_start) + in vhost_net_build_xdp()
740 vhost16_to_cpu(vq, gso->csum_offset) + 2 > in vhost_net_build_xdp()
741 vhost16_to_cpu(vq, gso->hdr_len)) { in vhost_net_build_xdp()
742 gso->hdr_len = cpu_to_vhost16(vq, in vhost_net_build_xdp()
743 vhost16_to_cpu(vq, gso->csum_start) + in vhost_net_build_xdp()
744 vhost16_to_cpu(vq, gso->csum_offset) + 2); in vhost_net_build_xdp()
746 if (vhost16_to_cpu(vq, gso->hdr_len) > len) in vhost_net_build_xdp()
747 return -EINVAL; in vhost_net_build_xdp()
750 len -= sock_hlen; in vhost_net_build_xdp()
751 copied = copy_page_from_iter(alloc_frag->page, in vhost_net_build_xdp()
752 alloc_frag->offset + pad, in vhost_net_build_xdp()
755 return -EFAULT; in vhost_net_build_xdp()
759 hdr->buflen = buflen; in vhost_net_build_xdp()
761 --net->refcnt_bias; in vhost_net_build_xdp()
762 alloc_frag->offset += buflen; in vhost_net_build_xdp()
764 ++nvq->batched_xdp; in vhost_net_build_xdp()
771 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; in handle_tx_copy()
772 struct vhost_virtqueue *vq = &nvq->vq; in handle_tx_copy()
785 bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX); in handle_tx_copy()
790 if (nvq->done_idx == VHOST_NET_BATCH) in handle_tx_copy()
799 if (head == vq->num) { in handle_tx_copy()
801 vhost_poll_queue(&vq->poll); in handle_tx_copy()
802 } else if (unlikely(vhost_enable_notify(&net->dev, in handle_tx_copy()
804 vhost_disable_notify(&net->dev, vq); in handle_tx_copy()
819 } else if (unlikely(err != -ENOSPC)) { in handle_tx_copy()
839 err = sock->ops->sendmsg(sock, &msg, len); in handle_tx_copy()
841 if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) { in handle_tx_copy()
851 vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head); in handle_tx_copy()
852 vq->heads[nvq->done_idx].len = 0; in handle_tx_copy()
853 ++nvq->done_idx; in handle_tx_copy()
861 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; in handle_tx_zerocopy()
862 struct vhost_virtqueue *vq = &nvq->vq; in handle_tx_zerocopy()
893 if (head == vq->num) { in handle_tx_zerocopy()
895 vhost_poll_queue(&vq->poll); in handle_tx_zerocopy()
896 } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { in handle_tx_zerocopy()
897 vhost_disable_notify(&net->dev, vq); in handle_tx_zerocopy()
909 ubuf = nvq->ubuf_info + nvq->upend_idx; in handle_tx_zerocopy()
910 vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head); in handle_tx_zerocopy()
911 vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS; in handle_tx_zerocopy()
912 ubuf->ctx = nvq->ubufs; in handle_tx_zerocopy()
913 ubuf->desc = nvq->upend_idx; in handle_tx_zerocopy()
914 ubuf->ubuf.callback = vhost_zerocopy_callback; in handle_tx_zerocopy()
915 ubuf->ubuf.flags = SKBFL_ZEROCOPY_FRAG; in handle_tx_zerocopy()
916 refcount_set(&ubuf->ubuf.refcnt, 1); in handle_tx_zerocopy()
919 ctl.ptr = &ubuf->ubuf; in handle_tx_zerocopy()
921 ubufs = nvq->ubufs; in handle_tx_zerocopy()
922 atomic_inc(&ubufs->refcount); in handle_tx_zerocopy()
923 nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; in handle_tx_zerocopy()
936 err = sock->ops->sendmsg(sock, &msg, len); in handle_tx_zerocopy()
938 bool retry = err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS; in handle_tx_zerocopy()
941 if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS) in handle_tx_zerocopy()
944 nvq->upend_idx = ((unsigned)nvq->upend_idx - 1) in handle_tx_zerocopy()
947 vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN; in handle_tx_zerocopy()
959 vhost_add_used_and_signal(&net->dev, vq, head, 0); in handle_tx_zerocopy()
966 /* Expects to be always run from workqueue - which acts as
967 * read-size critical section for our kind of RCU. */
970 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; in handle_tx()
971 struct vhost_virtqueue *vq = &nvq->vq; in handle_tx()
974 mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX); in handle_tx()
982 vhost_disable_notify(&net->dev, vq); in handle_tx()
991 mutex_unlock(&vq->mutex); in handle_tx()
1000 if (rvq->rx_ring) in peek_head_len()
1003 spin_lock_irqsave(&sk->sk_receive_queue.lock, flags); in peek_head_len()
1004 head = skb_peek(&sk->sk_receive_queue); in peek_head_len()
1006 len = head->len; in peek_head_len()
1011 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags); in peek_head_len()
1018 struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX]; in vhost_net_rx_peek_head_len()
1019 struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX]; in vhost_net_rx_peek_head_len()
1020 struct vhost_virtqueue *rvq = &rnvq->vq; in vhost_net_rx_peek_head_len()
1021 struct vhost_virtqueue *tvq = &tnvq->vq; in vhost_net_rx_peek_head_len()
1024 if (!len && rvq->busyloop_timeout) { in vhost_net_rx_peek_head_len()
1036 /* This is a multi-buffer version of vhost_get_desc, that works if
1038 * @vq - the relevant virtqueue
1039 * @datalen - data length we'll be reading
1040 * @iovcount - returned count of io vectors we fill
1041 * @log - vhost log
1042 * @log_num - log offset
1043 * @quota - headcount quota, 1 for big buffer
1066 r = -ENOBUFS; in get_rx_bufs()
1069 r = vhost_get_vq_desc(vq, vq->iov + seg, in get_rx_bufs()
1070 ARRAY_SIZE(vq->iov) - seg, &out, in get_rx_bufs()
1076 if (d == vq->num) { in get_rx_bufs()
1083 r = -EINVAL; in get_rx_bufs()
1091 len = iov_length(vq->iov + seg, in); in get_rx_bufs()
1093 datalen -= len; in get_rx_bufs()
1097 heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen); in get_rx_bufs()
1113 /* Expects to be always run from workqueue - which acts as
1114 * read-size critical section for our kind of RCU. */
1117 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX]; in handle_rx()
1118 struct vhost_virtqueue *vq = &nvq->vq; in handle_rx()
1143 mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX); in handle_rx()
1151 vhost_disable_notify(&net->dev, vq); in handle_rx()
1154 vhost_hlen = nvq->vhost_hlen; in handle_rx()
1155 sock_hlen = nvq->sock_hlen; in handle_rx()
1158 vq->log : NULL; in handle_rx()
1162 sock_len = vhost_net_rx_peek_head_len(net, sock->sk, in handle_rx()
1168 headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, in handle_rx()
1177 vhost_poll_queue(&vq->poll); in handle_rx()
1178 } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { in handle_rx()
1181 vhost_disable_notify(&net->dev, vq); in handle_rx()
1189 if (nvq->rx_ring) in handle_rx()
1190 msg.msg_control = vhost_net_buf_consume(&nvq->rxq); in handle_rx()
1193 iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, 1, 1); in handle_rx()
1194 err = sock->ops->recvmsg(sock, &msg, in handle_rx()
1200 iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, in, vhost_len); in handle_rx()
1208 err = sock->ops->recvmsg(sock, &msg, in handle_rx()
1224 "at addr %p\n", vq->iov->iov_base); in handle_rx()
1229 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF in handle_rx()
1243 nvq->done_idx += headcount; in handle_rx()
1244 if (nvq->done_idx > VHOST_NET_BATCH) in handle_rx()
1248 vq->iov, in); in handle_rx()
1253 vhost_poll_queue(&vq->poll); in handle_rx()
1258 mutex_unlock(&vq->mutex); in handle_rx()
1264 poll.work); in handle_tx_kick()
1265 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); in handle_tx_kick()
1273 poll.work); in handle_rx_kick()
1274 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); in handle_rx_kick()
1282 poll[VHOST_NET_VQ_TX].work); in handle_tx_net()
1289 poll[VHOST_NET_VQ_RX].work); in handle_rx_net()
1304 return -ENOMEM; in vhost_net_open()
1308 return -ENOMEM; in vhost_net_open()
1316 return -ENOMEM; in vhost_net_open()
1318 n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue; in vhost_net_open()
1325 return -ENOMEM; in vhost_net_open()
1327 n->vqs[VHOST_NET_VQ_TX].xdp = xdp; in vhost_net_open()
1329 dev = &n->dev; in vhost_net_open()
1330 vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq; in vhost_net_open()
1331 vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq; in vhost_net_open()
1332 n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick; in vhost_net_open()
1333 n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick; in vhost_net_open()
1335 n->vqs[i].ubufs = NULL; in vhost_net_open()
1336 n->vqs[i].ubuf_info = NULL; in vhost_net_open()
1337 n->vqs[i].upend_idx = 0; in vhost_net_open()
1338 n->vqs[i].done_idx = 0; in vhost_net_open()
1339 n->vqs[i].batched_xdp = 0; in vhost_net_open()
1340 n->vqs[i].vhost_hlen = 0; in vhost_net_open()
1341 n->vqs[i].sock_hlen = 0; in vhost_net_open()
1342 n->vqs[i].rx_ring = NULL; in vhost_net_open()
1343 vhost_net_buf_init(&n->vqs[i].rxq); in vhost_net_open()
1350 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev, in vhost_net_open()
1352 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev, in vhost_net_open()
1355 f->private_data = n; in vhost_net_open()
1356 n->page_frag.page = NULL; in vhost_net_open()
1357 n->refcnt_bias = 0; in vhost_net_open()
1369 mutex_lock(&vq->mutex); in vhost_net_stop_vq()
1374 nvq->rx_ring = NULL; in vhost_net_stop_vq()
1375 mutex_unlock(&vq->mutex); in vhost_net_stop_vq()
1382 *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq); in vhost_net_stop()
1383 *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq); in vhost_net_stop()
1388 vhost_dev_flush(&n->dev); in vhost_net_flush()
1389 if (n->vqs[VHOST_NET_VQ_TX].ubufs) { in vhost_net_flush()
1390 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
1391 n->tx_flush = true; in vhost_net_flush()
1392 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
1394 vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); in vhost_net_flush()
1395 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
1396 n->tx_flush = false; in vhost_net_flush()
1397 atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1); in vhost_net_flush()
1398 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
1404 struct vhost_net *n = f->private_data; in vhost_net_release()
1410 vhost_dev_stop(&n->dev); in vhost_net_release()
1411 vhost_dev_cleanup(&n->dev); in vhost_net_release()
1420 * since jobs can re-queue themselves. */ in vhost_net_release()
1422 kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue); in vhost_net_release()
1423 kfree(n->vqs[VHOST_NET_VQ_TX].xdp); in vhost_net_release()
1424 kfree(n->dev.vqs); in vhost_net_release()
1425 if (n->page_frag.page) in vhost_net_release()
1426 __page_frag_cache_drain(n->page_frag.page, n->refcnt_bias); in vhost_net_release()
1437 return ERR_PTR(-ENOTSOCK); in get_raw_socket()
1440 if (sock->sk->sk_type != SOCK_RAW) { in get_raw_socket()
1441 r = -ESOCKTNOSUPPORT; in get_raw_socket()
1445 if (sock->sk->sk_family != AF_PACKET) { in get_raw_socket()
1446 r = -EPFNOSUPPORT; in get_raw_socket()
1475 return ERR_PTR(-EBADF); in get_tap_socket()
1490 if (fd == -1) in get_socket()
1498 return ERR_PTR(-ENOTSOCK); in get_socket()
1509 mutex_lock(&n->dev.mutex); in vhost_net_set_backend()
1510 r = vhost_dev_check_owner(&n->dev); in vhost_net_set_backend()
1515 r = -ENOBUFS; in vhost_net_set_backend()
1518 vq = &n->vqs[index].vq; in vhost_net_set_backend()
1519 nvq = &n->vqs[index]; in vhost_net_set_backend()
1520 mutex_lock(&vq->mutex); in vhost_net_set_backend()
1522 if (fd == -1) in vhost_net_set_backend()
1523 vhost_clear_msg(&n->dev); in vhost_net_set_backend()
1527 r = -EFAULT; in vhost_net_set_backend()
1557 nvq->rx_ring = get_tap_ptr_ring(sock->file); in vhost_net_set_backend()
1559 nvq->rx_ring = NULL; in vhost_net_set_backend()
1562 oldubufs = nvq->ubufs; in vhost_net_set_backend()
1563 nvq->ubufs = ubufs; in vhost_net_set_backend()
1565 n->tx_packets = 0; in vhost_net_set_backend()
1566 n->tx_zcopy_err = 0; in vhost_net_set_backend()
1567 n->tx_flush = false; in vhost_net_set_backend()
1570 mutex_unlock(&vq->mutex); in vhost_net_set_backend()
1574 mutex_lock(&vq->mutex); in vhost_net_set_backend()
1576 mutex_unlock(&vq->mutex); in vhost_net_set_backend()
1580 vhost_dev_flush(&n->dev); in vhost_net_set_backend()
1584 mutex_unlock(&n->dev.mutex); in vhost_net_set_backend()
1596 mutex_unlock(&vq->mutex); in vhost_net_set_backend()
1598 mutex_unlock(&n->dev.mutex); in vhost_net_set_backend()
1609 mutex_lock(&n->dev.mutex); in vhost_net_reset_owner()
1610 err = vhost_dev_check_owner(&n->dev); in vhost_net_reset_owner()
1615 err = -ENOMEM; in vhost_net_reset_owner()
1620 vhost_dev_stop(&n->dev); in vhost_net_reset_owner()
1621 vhost_dev_reset_owner(&n->dev, umem); in vhost_net_reset_owner()
1624 mutex_unlock(&n->dev.mutex); in vhost_net_reset_owner()
1650 mutex_lock(&n->dev.mutex); in vhost_net_set_features()
1652 !vhost_log_access_ok(&n->dev)) in vhost_net_set_features()
1656 if (vhost_init_device_iotlb(&n->dev)) in vhost_net_set_features()
1661 mutex_lock(&n->vqs[i].vq.mutex); in vhost_net_set_features()
1662 n->vqs[i].vq.acked_features = features; in vhost_net_set_features()
1663 n->vqs[i].vhost_hlen = vhost_hlen; in vhost_net_set_features()
1664 n->vqs[i].sock_hlen = sock_hlen; in vhost_net_set_features()
1665 mutex_unlock(&n->vqs[i].vq.mutex); in vhost_net_set_features()
1667 mutex_unlock(&n->dev.mutex); in vhost_net_set_features()
1671 mutex_unlock(&n->dev.mutex); in vhost_net_set_features()
1672 return -EFAULT; in vhost_net_set_features()
1679 mutex_lock(&n->dev.mutex); in vhost_net_set_owner()
1680 if (vhost_dev_has_owner(&n->dev)) { in vhost_net_set_owner()
1681 r = -EBUSY; in vhost_net_set_owner()
1687 r = vhost_dev_set_owner(&n->dev); in vhost_net_set_owner()
1692 mutex_unlock(&n->dev.mutex); in vhost_net_set_owner()
1699 struct vhost_net *n = f->private_data; in vhost_net_ioctl()
1709 return -EFAULT; in vhost_net_ioctl()
1714 return -EFAULT; in vhost_net_ioctl()
1718 return -EFAULT; in vhost_net_ioctl()
1720 return -EOPNOTSUPP; in vhost_net_ioctl()
1725 return -EFAULT; in vhost_net_ioctl()
1729 return -EFAULT; in vhost_net_ioctl()
1731 return -EOPNOTSUPP; in vhost_net_ioctl()
1732 vhost_set_backend_features(&n->dev, features); in vhost_net_ioctl()
1739 mutex_lock(&n->dev.mutex); in vhost_net_ioctl()
1740 r = vhost_dev_ioctl(&n->dev, ioctl, argp); in vhost_net_ioctl()
1741 if (r == -ENOIOCTLCMD) in vhost_net_ioctl()
1742 r = vhost_vring_ioctl(&n->dev, ioctl, argp); in vhost_net_ioctl()
1745 mutex_unlock(&n->dev.mutex); in vhost_net_ioctl()
1752 struct file *file = iocb->ki_filp; in vhost_net_chr_read_iter()
1753 struct vhost_net *n = file->private_data; in vhost_net_chr_read_iter()
1754 struct vhost_dev *dev = &n->dev; in vhost_net_chr_read_iter()
1755 int noblock = file->f_flags & O_NONBLOCK; in vhost_net_chr_read_iter()
1763 struct file *file = iocb->ki_filp; in vhost_net_chr_write_iter()
1764 struct vhost_net *n = file->private_data; in vhost_net_chr_write_iter()
1765 struct vhost_dev *dev = &n->dev; in vhost_net_chr_write_iter()
1772 struct vhost_net *n = file->private_data; in vhost_net_chr_poll()
1773 struct vhost_dev *dev = &n->dev; in vhost_net_chr_poll()
1783 .poll = vhost_net_chr_poll,
1792 .name = "vhost-net",
1815 MODULE_ALIAS("devname:vhost-net");