virtio_net.c (e2e5c2a3f90f57f097129b13efe823b8dd32a40c) virtio_net.c (3ffd05c2cccd28b9bd50956a4ec0c9a6cb9cfa2e)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* A network driver using virtio.
3 *
4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5 */
6//#define DEBUG
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>

--- 320 unchanged lines hidden (view full) ---

329struct virtio_net_common_hdr {
330 union {
331 struct virtio_net_hdr hdr;
332 struct virtio_net_hdr_mrg_rxbuf mrg_hdr;
333 struct virtio_net_hdr_v1_hash hash_v1_hdr;
334 };
335};
336
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* A network driver using virtio.
3 *
4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5 */
6//#define DEBUG
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>

--- 320 unchanged lines hidden (view full) ---

329struct virtio_net_common_hdr {
330 union {
331 struct virtio_net_hdr hdr;
332 struct virtio_net_hdr_mrg_rxbuf mrg_hdr;
333 struct virtio_net_hdr_v1_hash hash_v1_hdr;
334 };
335};
336
337static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
338static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
339
340static bool is_xdp_frame(void *ptr)
341{
342 return (unsigned long)ptr & VIRTIO_XDP_FLAG;
343}
344
345static void *xdp_to_ptr(struct xdp_frame *ptr)

--- 57 unchanged lines hidden (view full) ---

403 rq->pages = (struct page *)p->private;
404 /* clear private here, it is used to chain pages */
405 p->private = 0;
406 } else
407 p = alloc_page(gfp_mask);
408 return p;
409}
410
337static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
338
339static bool is_xdp_frame(void *ptr)
340{
341 return (unsigned long)ptr & VIRTIO_XDP_FLAG;
342}
343
344static void *xdp_to_ptr(struct xdp_frame *ptr)

--- 57 unchanged lines hidden (view full) ---

402 rq->pages = (struct page *)p->private;
403 /* clear private here, it is used to chain pages */
404 p->private = 0;
405 } else
406 p = alloc_page(gfp_mask);
407 return p;
408}
409
410static void virtnet_rq_free_buf(struct virtnet_info *vi,
411 struct receive_queue *rq, void *buf)
412{
413 if (vi->mergeable_rx_bufs)
414 put_page(virt_to_head_page(buf));
415 else if (vi->big_packets)
416 give_pages(rq, buf);
417 else
418 put_page(virt_to_head_page(buf));
419}
420
411static void enable_delayed_refill(struct virtnet_info *vi)
412{
413 spin_lock_bh(&vi->refill_lock);
414 vi->refill_enabled = true;
415 spin_unlock_bh(&vi->refill_lock);
416}
417
418static void disable_delayed_refill(struct virtnet_info *vi)

--- 210 unchanged lines hidden (view full) ---

629
630 buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
631 if (buf && rq->do_dma)
632 virtnet_rq_unmap(rq, buf, *len);
633
634 return buf;
635}
636
421static void enable_delayed_refill(struct virtnet_info *vi)
422{
423 spin_lock_bh(&vi->refill_lock);
424 vi->refill_enabled = true;
425 spin_unlock_bh(&vi->refill_lock);
426}
427
428static void disable_delayed_refill(struct virtnet_info *vi)

--- 210 unchanged lines hidden (view full) ---

639
640 buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
641 if (buf && rq->do_dma)
642 virtnet_rq_unmap(rq, buf, *len);
643
644 return buf;
645}
646
637static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq)
638{
639 void *buf;
640
641 buf = virtqueue_detach_unused_buf(rq->vq);
642 if (buf && rq->do_dma)
643 virtnet_rq_unmap(rq, buf, 0);
644
645 return buf;
646}
647
648static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
649{
650 struct virtnet_rq_dma *dma;
651 dma_addr_t addr;
652 u32 offset;
653 void *head;
654
655 if (!rq->do_dma) {

--- 83 unchanged lines hidden (view full) ---

739 for (i = 0; i < vi->max_queue_pairs; i++) {
740 if (virtqueue_set_dma_premapped(vi->rq[i].vq))
741 continue;
742
743 vi->rq[i].do_dma = true;
744 }
745}
746
647static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
648{
649 struct virtnet_rq_dma *dma;
650 dma_addr_t addr;
651 u32 offset;
652 void *head;
653
654 if (!rq->do_dma) {

--- 83 unchanged lines hidden (view full) ---

738 for (i = 0; i < vi->max_queue_pairs; i++) {
739 if (virtqueue_set_dma_premapped(vi->rq[i].vq))
740 continue;
741
742 vi->rq[i].do_dma = true;
743 }
744}
745
746static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
747{
748 struct virtnet_info *vi = vq->vdev->priv;
749 struct receive_queue *rq;
750 int i = vq2rxq(vq);
751
752 rq = &vi->rq[i];
753
754 if (rq->do_dma)
755 virtnet_rq_unmap(rq, buf, 0);
756
757 virtnet_rq_free_buf(vi, rq, buf);
758}
759
747static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
748{
749 unsigned int len;
750 unsigned int packets = 0;
751 unsigned int bytes = 0;
752 void *ptr;
753
754 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {

--- 1004 unchanged lines hidden (view full) ---

1759{
1760 struct net_device *dev = vi->dev;
1761 struct sk_buff *skb;
1762 struct virtio_net_common_hdr *hdr;
1763
1764 if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
1765 pr_debug("%s: short packet %i\n", dev->name, len);
1766 DEV_STATS_INC(dev, rx_length_errors);
760static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
761{
762 unsigned int len;
763 unsigned int packets = 0;
764 unsigned int bytes = 0;
765 void *ptr;
766
767 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {

--- 1004 unchanged lines hidden (view full) ---

1772{
1773 struct net_device *dev = vi->dev;
1774 struct sk_buff *skb;
1775 struct virtio_net_common_hdr *hdr;
1776
1777 if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
1778 pr_debug("%s: short packet %i\n", dev->name, len);
1779 DEV_STATS_INC(dev, rx_length_errors);
1767 virtnet_rq_free_unused_buf(rq->vq, buf);
1780 virtnet_rq_free_buf(vi, rq, buf);
1768 return;
1769 }
1770
1771 if (vi->mergeable_rx_bufs)
1772 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
1773 stats);
1774 else if (vi->big_packets)
1775 skb = receive_big(dev, vi, rq, buf, len, stats);

--- 611 unchanged lines hidden (view full) ---

2387 bool running = netif_running(vi->dev);
2388 int err, qindex;
2389
2390 qindex = rq - vi->rq;
2391
2392 if (running)
2393 napi_disable(&rq->napi);
2394
1781 return;
1782 }
1783
1784 if (vi->mergeable_rx_bufs)
1785 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
1786 stats);
1787 else if (vi->big_packets)
1788 skb = receive_big(dev, vi, rq, buf, len, stats);

--- 611 unchanged lines hidden (view full) ---

2400 bool running = netif_running(vi->dev);
2401 int err, qindex;
2402
2403 qindex = rq - vi->rq;
2404
2405 if (running)
2406 napi_disable(&rq->napi);
2407
2395 err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
2408 err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
2396 if (err)
2397 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
2398
2399 if (!try_fill_recv(vi, rq, GFP_KERNEL))
2400 schedule_delayed_work(&vi->refill, 0);
2401
2402 if (running)
2403 virtnet_napi_enable(rq->vq, &rq->napi);

--- 1622 unchanged lines hidden (view full) ---

4026static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
4027{
4028 if (!is_xdp_frame(buf))
4029 dev_kfree_skb(buf);
4030 else
4031 xdp_return_frame(ptr_to_xdp(buf));
4032}
4033
2409 if (err)
2410 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
2411
2412 if (!try_fill_recv(vi, rq, GFP_KERNEL))
2413 schedule_delayed_work(&vi->refill, 0);
2414
2415 if (running)
2416 virtnet_napi_enable(rq->vq, &rq->napi);

--- 1622 unchanged lines hidden (view full) ---

4039static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
4040{
4041 if (!is_xdp_frame(buf))
4042 dev_kfree_skb(buf);
4043 else
4044 xdp_return_frame(ptr_to_xdp(buf));
4045}
4046
4034static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
4035{
4036 struct virtnet_info *vi = vq->vdev->priv;
4037 int i = vq2rxq(vq);
4038
4039 if (vi->mergeable_rx_bufs)
4040 put_page(virt_to_head_page(buf));
4041 else if (vi->big_packets)
4042 give_pages(&vi->rq[i], buf);
4043 else
4044 put_page(virt_to_head_page(buf));
4045}
4046
4047static void free_unused_bufs(struct virtnet_info *vi)
4048{
4049 void *buf;
4050 int i;
4051
4052 for (i = 0; i < vi->max_queue_pairs; i++) {
4053 struct virtqueue *vq = vi->sq[i].vq;
4054 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
4055 virtnet_sq_free_unused_buf(vq, buf);
4056 cond_resched();
4057 }
4058
4059 for (i = 0; i < vi->max_queue_pairs; i++) {
4047static void free_unused_bufs(struct virtnet_info *vi)
4048{
4049 void *buf;
4050 int i;
4051
4052 for (i = 0; i < vi->max_queue_pairs; i++) {
4053 struct virtqueue *vq = vi->sq[i].vq;
4054 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
4055 virtnet_sq_free_unused_buf(vq, buf);
4056 cond_resched();
4057 }
4058
4059 for (i = 0; i < vi->max_queue_pairs; i++) {
4060 struct receive_queue *rq = &vi->rq[i];
4060 struct virtqueue *vq = vi->rq[i].vq;
4061
4061
4062 while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL)
4063 virtnet_rq_free_unused_buf(rq->vq, buf);
4062 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
4063 virtnet_rq_unmap_free_buf(vq, buf);
4064 cond_resched();
4065 }
4066}
4067
4068static void virtnet_del_vqs(struct virtnet_info *vi)
4069{
4070 struct virtio_device *vdev = vi->vdev;
4071

--- 738 unchanged lines hidden ---
4064 cond_resched();
4065 }
4066}
4067
4068static void virtnet_del_vqs(struct virtnet_info *vi)
4069{
4070 struct virtio_device *vdev = vi->vdev;
4071

--- 738 unchanged lines hidden ---