virtio_net.c (57a30218fa25c469ed507964bbf028b7a064309a) virtio_net.c (7c06458c102ee66068c03780527fcfc9b954ad91)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* A network driver using virtio.
3 *
4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5 */
6//#define DEBUG
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>

--- 120 unchanged lines hidden (view full) ---

129struct send_queue {
130 /* Virtqueue associated with this send _queue */
131 struct virtqueue *vq;
132
133 /* TX: fragments + linear part + virtio header */
134 struct scatterlist sg[MAX_SKB_FRAGS + 2];
135
136 /* Name of the send queue: output.$index */
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* A network driver using virtio.
3 *
4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5 */
6//#define DEBUG
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>

--- 120 unchanged lines hidden (view full) ---

129struct send_queue {
130 /* Virtqueue associated with this send _queue */
131 struct virtqueue *vq;
132
133 /* TX: fragments + linear part + virtio header */
134 struct scatterlist sg[MAX_SKB_FRAGS + 2];
135
136 /* Name of the send queue: output.$index */
137 char name[40];
137 char name[16];
138
139 struct virtnet_sq_stats stats;
140
141 struct napi_struct napi;
142
143 /* Record whether sq is in reset state. */
144 bool reset;
145};

--- 20 unchanged lines hidden (view full) ---

166
167 /* RX: fragments + linear part + virtio header */
168 struct scatterlist sg[MAX_SKB_FRAGS + 2];
169
170 /* Min single buffer size for mergeable buffers case. */
171 unsigned int min_buf_len;
172
173 /* Name of this receive queue: input.$index */
138
139 struct virtnet_sq_stats stats;
140
141 struct napi_struct napi;
142
143 /* Record whether sq is in reset state. */
144 bool reset;
145};

--- 20 unchanged lines hidden (view full) ---

166
167 /* RX: fragments + linear part + virtio header */
168 struct scatterlist sg[MAX_SKB_FRAGS + 2];
169
170 /* Min single buffer size for mergeable buffers case. */
171 unsigned int min_buf_len;
172
173 /* Name of this receive queue: input.$index */
174 char name[40];
174 char name[16];
175
176 struct xdp_rxq_info xdp_rxq;
177};
178
179/* This structure can contain rss message with maximum settings for indirection table and keysize
180 * Note, that default structure that describes RSS configuration virtio_net_rss_config
181 * contains same info but can't handle table values.
182 * In any case, structure would be passed to virtio hw through sg_buf split by parts

--- 258 unchanged lines hidden (view full) ---

441{
442 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
443}
444
445/* Called from bottom half context */
446static struct sk_buff *page_to_skb(struct virtnet_info *vi,
447 struct receive_queue *rq,
448 struct page *page, unsigned int offset,
175
176 struct xdp_rxq_info xdp_rxq;
177};
178
179/* This structure can contain rss message with maximum settings for indirection table and keysize
180 * Note, that default structure that describes RSS configuration virtio_net_rss_config
181 * contains same info but can't handle table values.
182 * In any case, structure would be passed to virtio hw through sg_buf split by parts

--- 258 unchanged lines hidden (view full) ---

441{
442 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
443}
444
445/* Called from bottom half context */
446static struct sk_buff *page_to_skb(struct virtnet_info *vi,
447 struct receive_queue *rq,
448 struct page *page, unsigned int offset,
449 unsigned int len, unsigned int truesize,
450 bool hdr_valid, unsigned int metasize,
451 unsigned int headroom)
449 unsigned int len, unsigned int truesize)
452{
453 struct sk_buff *skb;
454 struct virtio_net_hdr_mrg_rxbuf *hdr;
455 unsigned int copy, hdr_len, hdr_padded_len;
456 struct page *page_to_free = NULL;
457 int tailroom, shinfo_size;
458 char *p, *hdr_p, *buf;
459
460 p = page_address(page) + offset;
461 hdr_p = p;
462
463 hdr_len = vi->hdr_len;
464 if (vi->mergeable_rx_bufs)
465 hdr_padded_len = hdr_len;
466 else
467 hdr_padded_len = sizeof(struct padded_vnet_hdr);
468
450{
451 struct sk_buff *skb;
452 struct virtio_net_hdr_mrg_rxbuf *hdr;
453 unsigned int copy, hdr_len, hdr_padded_len;
454 struct page *page_to_free = NULL;
455 int tailroom, shinfo_size;
456 char *p, *hdr_p, *buf;
457
458 p = page_address(page) + offset;
459 hdr_p = p;
460
461 hdr_len = vi->hdr_len;
462 if (vi->mergeable_rx_bufs)
463 hdr_padded_len = hdr_len;
464 else
465 hdr_padded_len = sizeof(struct padded_vnet_hdr);
466
469 /* If headroom is not 0, there is an offset between the beginning of the
470 * data and the allocated space, otherwise the data and the allocated
471 * space are aligned.
472 *
473 * Buffers with headroom use PAGE_SIZE as alloc size, see
474 * add_recvbuf_mergeable() + get_mergeable_buf_len()
475 */
476 truesize = headroom ? PAGE_SIZE : truesize;
477 tailroom = truesize - headroom;
478 buf = p - headroom;
479
467 buf = p;
480 len -= hdr_len;
481 offset += hdr_padded_len;
482 p += hdr_padded_len;
468 len -= hdr_len;
469 offset += hdr_padded_len;
470 p += hdr_padded_len;
483 tailroom -= hdr_padded_len + len;
471 tailroom = truesize - hdr_padded_len - len;
484
485 shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
486
487 /* copy small packet so we can reuse these pages */
488 if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
489 skb = build_skb(buf, truesize);
490 if (unlikely(!skb))
491 return NULL;

--- 13 unchanged lines hidden (view full) ---

505 return NULL;
506
507 /* Copy all frame if it fits skb->head, otherwise
508 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
509 */
510 if (len <= skb_tailroom(skb))
511 copy = len;
512 else
472
473 shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
474
475 /* copy small packet so we can reuse these pages */
476 if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
477 skb = build_skb(buf, truesize);
478 if (unlikely(!skb))
479 return NULL;

--- 13 unchanged lines hidden (view full) ---

493 return NULL;
494
495 /* Copy all frame if it fits skb->head, otherwise
496 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
497 */
498 if (len <= skb_tailroom(skb))
499 copy = len;
500 else
513 copy = ETH_HLEN + metasize;
501 copy = ETH_HLEN;
514 skb_put_data(skb, p, copy);
515
516 len -= copy;
517 offset += copy;
518
519 if (vi->mergeable_rx_bufs) {
520 if (len)
521 skb_add_rx_frag(skb, 0, page, offset, len, truesize);

--- 22 unchanged lines hidden (view full) ---

544 page = (struct page *)page->private;
545 offset = 0;
546 }
547
548 if (page)
549 give_pages(rq, page);
550
551ok:
502 skb_put_data(skb, p, copy);
503
504 len -= copy;
505 offset += copy;
506
507 if (vi->mergeable_rx_bufs) {
508 if (len)
509 skb_add_rx_frag(skb, 0, page, offset, len, truesize);

--- 22 unchanged lines hidden (view full) ---

532 page = (struct page *)page->private;
533 offset = 0;
534 }
535
536 if (page)
537 give_pages(rq, page);
538
539ok:
552 /* hdr_valid means no XDP, so we can copy the vnet header */
553 if (hdr_valid) {
554 hdr = skb_vnet_hdr(skb);
555 memcpy(hdr, hdr_p, hdr_len);
556 }
540 hdr = skb_vnet_hdr(skb);
541 memcpy(hdr, hdr_p, hdr_len);
557 if (page_to_free)
558 put_page(page_to_free);
559
542 if (page_to_free)
543 put_page(page_to_free);
544
560 if (metasize) {
561 __skb_pull(skb, metasize);
562 skb_metadata_set(skb, metasize);
563 }
564
565 return skb;
566}
567
568static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
569 struct send_queue *sq,
570 struct xdp_frame *xdpf)
571{
572 struct virtio_net_hdr_mrg_rxbuf *hdr;
545 return skb;
546}
547
548static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
549 struct send_queue *sq,
550 struct xdp_frame *xdpf)
551{
552 struct virtio_net_hdr_mrg_rxbuf *hdr;
573 int err;
553 struct skb_shared_info *shinfo;
554 u8 nr_frags = 0;
555 int err, i;
574
575 if (unlikely(xdpf->headroom < vi->hdr_len))
576 return -EOVERFLOW;
577
556
557 if (unlikely(xdpf->headroom < vi->hdr_len))
558 return -EOVERFLOW;
559
578 /* Make room for virtqueue hdr (also change xdpf->headroom?) */
560 if (unlikely(xdp_frame_has_frags(xdpf))) {
561 shinfo = xdp_get_shared_info_from_frame(xdpf);
562 nr_frags = shinfo->nr_frags;
563 }
564
565 /* In wrapping function virtnet_xdp_xmit(), we need to free
566 * up the pending old buffers, where we need to calculate the
567 * position of skb_shared_info in xdp_get_frame_len() and
568 * xdp_return_frame(), which will involve to xdpf->data and
569 * xdpf->headroom. Therefore, we need to update the value of
570 * headroom synchronously here.
571 */
572 xdpf->headroom -= vi->hdr_len;
579 xdpf->data -= vi->hdr_len;
580 /* Zero header and leave csum up to XDP layers */
581 hdr = xdpf->data;
582 memset(hdr, 0, vi->hdr_len);
583 xdpf->len += vi->hdr_len;
584
573 xdpf->data -= vi->hdr_len;
574 /* Zero header and leave csum up to XDP layers */
575 hdr = xdpf->data;
576 memset(hdr, 0, vi->hdr_len);
577 xdpf->len += vi->hdr_len;
578
585 sg_init_one(sq->sg, xdpf->data, xdpf->len);
579 sg_init_table(sq->sg, nr_frags + 1);
580 sg_set_buf(sq->sg, xdpf->data, xdpf->len);
581 for (i = 0; i < nr_frags; i++) {
582 skb_frag_t *frag = &shinfo->frags[i];
586
583
587 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
588 GFP_ATOMIC);
584 sg_set_page(&sq->sg[i + 1], skb_frag_page(frag),
585 skb_frag_size(frag), skb_frag_off(frag));
586 }
587
588 err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1,
589 xdp_to_ptr(xdpf), GFP_ATOMIC);
589 if (unlikely(err))
590 return -ENOSPC; /* Caller handle free/refcnt */
591
592 return 0;
593}
594
595/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
596 * the current cpu, so it does not need to be locked.

--- 63 unchanged lines hidden (view full) ---

660 goto out;
661 }
662
663 /* Free up any pending old buffers before queueing new ones. */
664 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
665 if (likely(is_xdp_frame(ptr))) {
666 struct xdp_frame *frame = ptr_to_xdp(ptr);
667
590 if (unlikely(err))
591 return -ENOSPC; /* Caller handle free/refcnt */
592
593 return 0;
594}
595
596/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
597 * the current cpu, so it does not need to be locked.

--- 63 unchanged lines hidden (view full) ---

661 goto out;
662 }
663
664 /* Free up any pending old buffers before queueing new ones. */
665 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
666 if (likely(is_xdp_frame(ptr))) {
667 struct xdp_frame *frame = ptr_to_xdp(ptr);
668
668 bytes += frame->len;
669 bytes += xdp_get_frame_len(frame);
669 xdp_return_frame(frame);
670 } else {
671 struct sk_buff *skb = ptr;
672
673 bytes += skb->len;
674 napi_consume_skb(skb, false);
675 }
676 packets++;

--- 242 unchanged lines hidden (view full) ---

919 struct virtnet_info *vi,
920 struct receive_queue *rq,
921 void *buf,
922 unsigned int len,
923 struct virtnet_rq_stats *stats)
924{
925 struct page *page = buf;
926 struct sk_buff *skb =
670 xdp_return_frame(frame);
671 } else {
672 struct sk_buff *skb = ptr;
673
674 bytes += skb->len;
675 napi_consume_skb(skb, false);
676 }
677 packets++;

--- 242 unchanged lines hidden (view full) ---

920 struct virtnet_info *vi,
921 struct receive_queue *rq,
922 void *buf,
923 unsigned int len,
924 struct virtnet_rq_stats *stats)
925{
926 struct page *page = buf;
927 struct sk_buff *skb =
927 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0, 0);
928 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
928
929 stats->bytes += len - vi->hdr_len;
930 if (unlikely(!skb))
931 goto err;
932
933 return skb;
934
935err:
936 stats->drops++;
937 give_pages(rq, page);
938 return NULL;
939}
940
929
930 stats->bytes += len - vi->hdr_len;
931 if (unlikely(!skb))
932 goto err;
933
934 return skb;
935
936err:
937 stats->drops++;
938 give_pages(rq, page);
939 return NULL;
940}
941
942/* Why not use xdp_build_skb_from_frame() ?
943 * XDP core assumes that xdp frags are PAGE_SIZE in length, while in
944 * virtio-net there are 2 points that do not match its requirements:
945 * 1. The size of the prefilled buffer is not fixed before xdp is set.
946 * 2. xdp_build_skb_from_frame() does more checks that we don't need,
947 * like eth_type_trans() (which virtio-net does in receive_buf()).
948 */
949static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
950 struct virtnet_info *vi,
951 struct xdp_buff *xdp,
952 unsigned int xdp_frags_truesz)
953{
954 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
955 unsigned int headroom, data_len;
956 struct sk_buff *skb;
957 int metasize;
958 u8 nr_frags;
959
960 if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
961 pr_debug("Error building skb as missing reserved tailroom for xdp");
962 return NULL;
963 }
964
965 if (unlikely(xdp_buff_has_frags(xdp)))
966 nr_frags = sinfo->nr_frags;
967
968 skb = build_skb(xdp->data_hard_start, xdp->frame_sz);
969 if (unlikely(!skb))
970 return NULL;
971
972 headroom = xdp->data - xdp->data_hard_start;
973 data_len = xdp->data_end - xdp->data;
974 skb_reserve(skb, headroom);
975 __skb_put(skb, data_len);
976
977 metasize = xdp->data - xdp->data_meta;
978 metasize = metasize > 0 ? metasize : 0;
979 if (metasize)
980 skb_metadata_set(skb, metasize);
981
982 if (unlikely(xdp_buff_has_frags(xdp)))
983 xdp_update_skb_shared_info(skb, nr_frags,
984 sinfo->xdp_frags_size,
985 xdp_frags_truesz,
986 xdp_buff_is_frag_pfmemalloc(xdp));
987
988 return skb;
989}
990
991/* TODO: build xdp in big mode */
992static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
993 struct virtnet_info *vi,
994 struct receive_queue *rq,
995 struct xdp_buff *xdp,
996 void *buf,
997 unsigned int len,
998 unsigned int frame_sz,
999 u16 *num_buf,
1000 unsigned int *xdp_frags_truesize,
1001 struct virtnet_rq_stats *stats)
1002{
1003 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1004 unsigned int headroom, tailroom, room;
1005 unsigned int truesize, cur_frag_size;
1006 struct skb_shared_info *shinfo;
1007 unsigned int xdp_frags_truesz = 0;
1008 struct page *page;
1009 skb_frag_t *frag;
1010 int offset;
1011 void *ctx;
1012
1013 xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
1014 xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM,
1015 VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
1016
1017 if (*num_buf > 1) {
1018 /* If we want to build multi-buffer xdp, we need
1019 * to specify that the flags of xdp_buff have the
1020 * XDP_FLAGS_HAS_FRAG bit.
1021 */
1022 if (!xdp_buff_has_frags(xdp))
1023 xdp_buff_set_frags_flag(xdp);
1024
1025 shinfo = xdp_get_shared_info_from_buff(xdp);
1026 shinfo->nr_frags = 0;
1027 shinfo->xdp_frags_size = 0;
1028 }
1029
1030 if ((*num_buf - 1) > MAX_SKB_FRAGS)
1031 return -EINVAL;
1032
1033 while ((--*num_buf) >= 1) {
1034 buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
1035 if (unlikely(!buf)) {
1036 pr_debug("%s: rx error: %d buffers out of %d missing\n",
1037 dev->name, *num_buf,
1038 virtio16_to_cpu(vi->vdev, hdr->num_buffers));
1039 dev->stats.rx_length_errors++;
1040 return -EINVAL;
1041 }
1042
1043 stats->bytes += len;
1044 page = virt_to_head_page(buf);
1045 offset = buf - page_address(page);
1046
1047 truesize = mergeable_ctx_to_truesize(ctx);
1048 headroom = mergeable_ctx_to_headroom(ctx);
1049 tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1050 room = SKB_DATA_ALIGN(headroom + tailroom);
1051
1052 cur_frag_size = truesize;
1053 xdp_frags_truesz += cur_frag_size;
1054 if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) {
1055 put_page(page);
1056 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1057 dev->name, len, (unsigned long)(truesize - room));
1058 dev->stats.rx_length_errors++;
1059 return -EINVAL;
1060 }
1061
1062 frag = &shinfo->frags[shinfo->nr_frags++];
1063 __skb_frag_set_page(frag, page);
1064 skb_frag_off_set(frag, offset);
1065 skb_frag_size_set(frag, len);
1066 if (page_is_pfmemalloc(page))
1067 xdp_buff_set_frag_pfmemalloc(xdp);
1068
1069 shinfo->xdp_frags_size += len;
1070 }
1071
1072 *xdp_frags_truesize = xdp_frags_truesz;
1073 return 0;
1074}
1075
941static struct sk_buff *receive_mergeable(struct net_device *dev,
942 struct virtnet_info *vi,
943 struct receive_queue *rq,
944 void *buf,
945 void *ctx,
946 unsigned int len,
947 unsigned int *xdp_xmit,
948 struct virtnet_rq_stats *stats)
949{
950 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
951 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
952 struct page *page = virt_to_head_page(buf);
953 int offset = buf - page_address(page);
954 struct sk_buff *head_skb, *curr_skb;
955 struct bpf_prog *xdp_prog;
956 unsigned int truesize = mergeable_ctx_to_truesize(ctx);
957 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1076static struct sk_buff *receive_mergeable(struct net_device *dev,
1077 struct virtnet_info *vi,
1078 struct receive_queue *rq,
1079 void *buf,
1080 void *ctx,
1081 unsigned int len,
1082 unsigned int *xdp_xmit,
1083 struct virtnet_rq_stats *stats)
1084{
1085 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1086 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1087 struct page *page = virt_to_head_page(buf);
1088 int offset = buf - page_address(page);
1089 struct sk_buff *head_skb, *curr_skb;
1090 struct bpf_prog *xdp_prog;
1091 unsigned int truesize = mergeable_ctx_to_truesize(ctx);
1092 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
958 unsigned int metasize = 0;
959 unsigned int frame_sz;
1093 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1094 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1095 unsigned int frame_sz, xdp_room;
960 int err;
961
962 head_skb = NULL;
963 stats->bytes += len - vi->hdr_len;
964
1096 int err;
1097
1098 head_skb = NULL;
1099 stats->bytes += len - vi->hdr_len;
1100
965 if (unlikely(len > truesize)) {
1101 if (unlikely(len > truesize - room)) {
966 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1102 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
967 dev->name, len, (unsigned long)ctx);
1103 dev->name, len, (unsigned long)(truesize - room));
968 dev->stats.rx_length_errors++;
969 goto err_skb;
970 }
971
972 if (likely(!vi->xdp_enabled)) {
973 xdp_prog = NULL;
974 goto skip_xdp;
975 }
976
977 rcu_read_lock();
978 xdp_prog = rcu_dereference(rq->xdp_prog);
979 if (xdp_prog) {
1104 dev->stats.rx_length_errors++;
1105 goto err_skb;
1106 }
1107
1108 if (likely(!vi->xdp_enabled)) {
1109 xdp_prog = NULL;
1110 goto skip_xdp;
1111 }
1112
1113 rcu_read_lock();
1114 xdp_prog = rcu_dereference(rq->xdp_prog);
1115 if (xdp_prog) {
1116 unsigned int xdp_frags_truesz = 0;
1117 struct skb_shared_info *shinfo;
980 struct xdp_frame *xdpf;
981 struct page *xdp_page;
982 struct xdp_buff xdp;
983 void *data;
984 u32 act;
1118 struct xdp_frame *xdpf;
1119 struct page *xdp_page;
1120 struct xdp_buff xdp;
1121 void *data;
1122 u32 act;
1123 int i;
985
986 /* Transient failure which in theory could occur if
987 * in-flight packets from before XDP was enabled reach
988 * the receive path after XDP is loaded.
989 */
990 if (unlikely(hdr->hdr.gso_type))
991 goto err_xdp;
992
1124
1125 /* Transient failure which in theory could occur if
1126 * in-flight packets from before XDP was enabled reach
1127 * the receive path after XDP is loaded.
1128 */
1129 if (unlikely(hdr->hdr.gso_type))
1130 goto err_xdp;
1131
993 /* Buffers with headroom use PAGE_SIZE as alloc size,
994 * see add_recvbuf_mergeable() + get_mergeable_buf_len()
1132 /* Now XDP core assumes frag size is PAGE_SIZE, but buffers
1133 * with headroom may add hole in truesize, which
1134 * make their length exceed PAGE_SIZE. So we disabled the
1135 * hole mechanism for xdp. See add_recvbuf_mergeable().
995 */
1136 */
996 frame_sz = headroom ? PAGE_SIZE : truesize;
1137 frame_sz = truesize;
997
1138
998 /* This happens when rx buffer size is underestimated
999 * or headroom is not enough because of the buffer
1000 * was refilled before XDP is set. This should only
1001 * happen for the first several packets, so we don't
1002 * care much about its performance.
1139 /* This happens when headroom is not enough because
1140 * of the buffer was prefilled before XDP is set.
1141 * This should only happen for the first several packets.
1142 * In fact, vq reset can be used here to help us clean up
1143 * the prefilled buffers, but many existing devices do not
1144 * support it, and we don't want to bother users who are
1145 * using xdp normally.
1003 */
1146 */
1004 if (unlikely(num_buf > 1 ||
1005 headroom < virtnet_get_headroom(vi))) {
1147 if (!xdp_prog->aux->xdp_has_frags &&
1148 (num_buf > 1 || headroom < virtnet_get_headroom(vi))) {
1006 /* linearize data for XDP */
1007 xdp_page = xdp_linearize_page(rq, &num_buf,
1008 page, offset,
1009 VIRTIO_XDP_HEADROOM,
1010 &len);
1011 frame_sz = PAGE_SIZE;
1012
1013 if (!xdp_page)
1014 goto err_xdp;
1015 offset = VIRTIO_XDP_HEADROOM;
1149 /* linearize data for XDP */
1150 xdp_page = xdp_linearize_page(rq, &num_buf,
1151 page, offset,
1152 VIRTIO_XDP_HEADROOM,
1153 &len);
1154 frame_sz = PAGE_SIZE;
1155
1156 if (!xdp_page)
1157 goto err_xdp;
1158 offset = VIRTIO_XDP_HEADROOM;
1159 } else if (unlikely(headroom < virtnet_get_headroom(vi))) {
1160 xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
1161 sizeof(struct skb_shared_info));
1162 if (len + xdp_room > PAGE_SIZE)
1163 goto err_xdp;
1164
1165 xdp_page = alloc_page(GFP_ATOMIC);
1166 if (!xdp_page)
1167 goto err_xdp;
1168
1169 memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM,
1170 page_address(page) + offset, len);
1171 frame_sz = PAGE_SIZE;
1172 offset = VIRTIO_XDP_HEADROOM;
1016 } else {
1017 xdp_page = page;
1018 }
1019
1173 } else {
1174 xdp_page = page;
1175 }
1176
1020 /* Allow consuming headroom but reserve enough space to push
1021 * the descriptor on if we get an XDP_TX return code.
1022 */
1023 data = page_address(xdp_page) + offset;
1177 data = page_address(xdp_page) + offset;
1024 xdp_init_buff(&xdp, frame_sz - vi->hdr_len, &rq->xdp_rxq);
1025 xdp_prepare_buff(&xdp, data - VIRTIO_XDP_HEADROOM + vi->hdr_len,
1026 VIRTIO_XDP_HEADROOM, len - vi->hdr_len, true);
1178 err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
1179 &num_buf, &xdp_frags_truesz, stats);
1180 if (unlikely(err))
1181 goto err_xdp_frags;
1027
1028 act = bpf_prog_run_xdp(xdp_prog, &xdp);
1029 stats->xdp_packets++;
1030
1031 switch (act) {
1032 case XDP_PASS:
1182
1183 act = bpf_prog_run_xdp(xdp_prog, &xdp);
1184 stats->xdp_packets++;
1185
1186 switch (act) {
1187 case XDP_PASS:
1033 metasize = xdp.data - xdp.data_meta;
1034
1035 /* recalculate offset to account for any header
1036 * adjustments and minus the metasize to copy the
1037 * metadata in page_to_skb(). Note other cases do not
1038 * build an skb and avoid using offset
1039 */
1040 offset = xdp.data - page_address(xdp_page) -
1041 vi->hdr_len - metasize;
1042
1043 /* recalculate len if xdp.data, xdp.data_end or
1044 * xdp.data_meta were adjusted
1045 */
1046 len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
1047
1048 /* recalculate headroom if xdp.data or xdp_data_meta
1049 * were adjusted, note that offset should always point
1050 * to the start of the reserved bytes for virtio_net
1051 * header which are followed by xdp.data, that means
1052 * that offset is equal to the headroom (when buf is
1053 * starting at the beginning of the page, otherwise
1054 * there is a base offset inside the page) but it's used
1055 * with a different starting point (buf start) than
1056 * xdp.data (buf start + vnet hdr size). If xdp.data or
1057 * data_meta were adjusted by the xdp prog then the
1058 * headroom size has changed and so has the offset, we
1059 * can use data_hard_start, which points at buf start +
1060 * vnet hdr size, to calculate the new headroom and use
1061 * it later to compute buf start in page_to_skb()
1062 */
1063 headroom = xdp.data - xdp.data_hard_start - metasize;
1064
1065 /* We can only create skb based on xdp_page. */
1066 if (unlikely(xdp_page != page)) {
1067 rcu_read_unlock();
1188 if (unlikely(xdp_page != page))
1068 put_page(page);
1189 put_page(page);
1069 head_skb = page_to_skb(vi, rq, xdp_page, offset,
1070 len, PAGE_SIZE, false,
1071 metasize,
1072 headroom);
1073 return head_skb;
1074 }
1075 break;
1190 head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
1191 rcu_read_unlock();
1192 return head_skb;
1076 case XDP_TX:
1077 stats->xdp_tx++;
1078 xdpf = xdp_convert_buff_to_frame(&xdp);
1079 if (unlikely(!xdpf)) {
1193 case XDP_TX:
1194 stats->xdp_tx++;
1195 xdpf = xdp_convert_buff_to_frame(&xdp);
1196 if (unlikely(!xdpf)) {
1080 if (unlikely(xdp_page != page))
1081 put_page(xdp_page);
1082 goto err_xdp;
1197 netdev_dbg(dev, "convert buff to frame failed for xdp\n");
1198 goto err_xdp_frags;
1083 }
1084 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
1085 if (unlikely(!err)) {
1086 xdp_return_frame_rx_napi(xdpf);
1087 } else if (unlikely(err < 0)) {
1088 trace_xdp_exception(vi->dev, xdp_prog, act);
1199 }
1200 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
1201 if (unlikely(!err)) {
1202 xdp_return_frame_rx_napi(xdpf);
1203 } else if (unlikely(err < 0)) {
1204 trace_xdp_exception(vi->dev, xdp_prog, act);
1089 if (unlikely(xdp_page != page))
1090 put_page(xdp_page);
1091 goto err_xdp;
1205 goto err_xdp_frags;
1092 }
1093 *xdp_xmit |= VIRTIO_XDP_TX;
1094 if (unlikely(xdp_page != page))
1095 put_page(page);
1096 rcu_read_unlock();
1097 goto xdp_xmit;
1098 case XDP_REDIRECT:
1099 stats->xdp_redirects++;
1100 err = xdp_do_redirect(dev, &xdp, xdp_prog);
1206 }
1207 *xdp_xmit |= VIRTIO_XDP_TX;
1208 if (unlikely(xdp_page != page))
1209 put_page(page);
1210 rcu_read_unlock();
1211 goto xdp_xmit;
1212 case XDP_REDIRECT:
1213 stats->xdp_redirects++;
1214 err = xdp_do_redirect(dev, &xdp, xdp_prog);
1101 if (err) {
1102 if (unlikely(xdp_page != page))
1103 put_page(xdp_page);
1104 goto err_xdp;
1105 }
1215 if (err)
1216 goto err_xdp_frags;
1106 *xdp_xmit |= VIRTIO_XDP_REDIR;
1107 if (unlikely(xdp_page != page))
1108 put_page(page);
1109 rcu_read_unlock();
1110 goto xdp_xmit;
1111 default:
1112 bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
1113 fallthrough;
1114 case XDP_ABORTED:
1115 trace_xdp_exception(vi->dev, xdp_prog, act);
1116 fallthrough;
1117 case XDP_DROP:
1217 *xdp_xmit |= VIRTIO_XDP_REDIR;
1218 if (unlikely(xdp_page != page))
1219 put_page(page);
1220 rcu_read_unlock();
1221 goto xdp_xmit;
1222 default:
1223 bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
1224 fallthrough;
1225 case XDP_ABORTED:
1226 trace_xdp_exception(vi->dev, xdp_prog, act);
1227 fallthrough;
1228 case XDP_DROP:
1118 if (unlikely(xdp_page != page))
1119 __free_pages(xdp_page, 0);
1120 goto err_xdp;
1229 goto err_xdp_frags;
1121 }
1230 }
1231err_xdp_frags:
1232 if (unlikely(xdp_page != page))
1233 __free_pages(xdp_page, 0);
1234
1235 if (xdp_buff_has_frags(&xdp)) {
1236 shinfo = xdp_get_shared_info_from_buff(&xdp);
1237 for (i = 0; i < shinfo->nr_frags; i++) {
1238 xdp_page = skb_frag_page(&shinfo->frags[i]);
1239 put_page(xdp_page);
1240 }
1241 }
1242
1243 goto err_xdp;
1122 }
1123 rcu_read_unlock();
1124
1125skip_xdp:
1244 }
1245 rcu_read_unlock();
1246
1247skip_xdp:
1126 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
1127 metasize, headroom);
1248 head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
1128 curr_skb = head_skb;
1129
1130 if (unlikely(!curr_skb))
1131 goto err_skb;
1132 while (--num_buf) {
1133 int num_skb_frags;
1134
1135 buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);

--- 5 unchanged lines hidden (view full) ---

1141 dev->stats.rx_length_errors++;
1142 goto err_buf;
1143 }
1144
1145 stats->bytes += len;
1146 page = virt_to_head_page(buf);
1147
1148 truesize = mergeable_ctx_to_truesize(ctx);
1249 curr_skb = head_skb;
1250
1251 if (unlikely(!curr_skb))
1252 goto err_skb;
1253 while (--num_buf) {
1254 int num_skb_frags;
1255
1256 buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);

--- 5 unchanged lines hidden (view full) ---

1262 dev->stats.rx_length_errors++;
1263 goto err_buf;
1264 }
1265
1266 stats->bytes += len;
1267 page = virt_to_head_page(buf);
1268
1269 truesize = mergeable_ctx_to_truesize(ctx);
1149 if (unlikely(len > truesize)) {
1270 headroom = mergeable_ctx_to_headroom(ctx);
1271 tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1272 room = SKB_DATA_ALIGN(headroom + tailroom);
1273 if (unlikely(len > truesize - room)) {
1150 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1274 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1151 dev->name, len, (unsigned long)ctx);
1275 dev->name, len, (unsigned long)(truesize - room));
1152 dev->stats.rx_length_errors++;
1153 goto err_skb;
1154 }
1155
1156 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
1157 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
1158 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
1159

--- 86 unchanged lines hidden (view full) ---

1246{
1247 struct net_device *dev = vi->dev;
1248 struct sk_buff *skb;
1249 struct virtio_net_hdr_mrg_rxbuf *hdr;
1250
1251 if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
1252 pr_debug("%s: short packet %i\n", dev->name, len);
1253 dev->stats.rx_length_errors++;
1276 dev->stats.rx_length_errors++;
1277 goto err_skb;
1278 }
1279
1280 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
1281 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
1282 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
1283

--- 86 unchanged lines hidden (view full) ---

1370{
1371 struct net_device *dev = vi->dev;
1372 struct sk_buff *skb;
1373 struct virtio_net_hdr_mrg_rxbuf *hdr;
1374
1375 if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
1376 pr_debug("%s: short packet %i\n", dev->name, len);
1377 dev->stats.rx_length_errors++;
1254 if (vi->mergeable_rx_bufs) {
1255 put_page(virt_to_head_page(buf));
1256 } else if (vi->big_packets) {
1257 give_pages(rq, buf);
1258 } else {
1259 put_page(virt_to_head_page(buf));
1260 }
1378 virtnet_rq_free_unused_buf(rq->vq, buf);
1261 return;
1262 }
1263
1264 if (vi->mergeable_rx_bufs)
1265 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
1266 stats);
1267 else if (vi->big_packets)
1268 skb = receive_big(dev, vi, rq, buf, len, stats);

--- 152 unchanged lines hidden (view full) ---

1421 buf += headroom; /* advance address leaving hole at front of pkt */
1422 get_page(alloc_frag->page);
1423 alloc_frag->offset += len + room;
1424 hole = alloc_frag->size - alloc_frag->offset;
1425 if (hole < len + room) {
1426 /* To avoid internal fragmentation, if there is very likely not
1427 * enough space for another buffer, add the remaining space to
1428 * the current buffer.
1379 return;
1380 }
1381
1382 if (vi->mergeable_rx_bufs)
1383 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
1384 stats);
1385 else if (vi->big_packets)
1386 skb = receive_big(dev, vi, rq, buf, len, stats);

--- 152 unchanged lines hidden (view full) ---

1539 buf += headroom; /* advance address leaving hole at front of pkt */
1540 get_page(alloc_frag->page);
1541 alloc_frag->offset += len + room;
1542 hole = alloc_frag->size - alloc_frag->offset;
1543 if (hole < len + room) {
1544 /* To avoid internal fragmentation, if there is very likely not
1545 * enough space for another buffer, add the remaining space to
1546 * the current buffer.
1547 * XDP core assumes that frame_size of xdp_buff and the length
1548 * of the frag are PAGE_SIZE, so we disable the hole mechanism.
1429 */
1549 */
1430 len += hole;
1550 if (!headroom)
1551 len += hole;
1431 alloc_frag->offset += hole;
1432 }
1433
1434 sg_init_one(rq->sg, buf, len);
1552 alloc_frag->offset += hole;
1553 }
1554
1555 sg_init_one(rq->sg, buf, len);
1435 ctx = mergeable_len_to_ctx(len, headroom);
1556 ctx = mergeable_len_to_ctx(len + room, headroom);
1436 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1437 if (err < 0)
1438 put_page(virt_to_head_page(buf));
1439
1440 return err;
1441}
1442
1443/*

--- 159 unchanged lines hidden (view full) ---

1603
1604 pr_debug("Sent skb %p\n", skb);
1605
1606 bytes += skb->len;
1607 napi_consume_skb(skb, in_napi);
1608 } else {
1609 struct xdp_frame *frame = ptr_to_xdp(ptr);
1610
1557 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1558 if (err < 0)
1559 put_page(virt_to_head_page(buf));
1560
1561 return err;
1562}
1563
1564/*

--- 159 unchanged lines hidden (view full) ---

1724
1725 pr_debug("Sent skb %p\n", skb);
1726
1727 bytes += skb->len;
1728 napi_consume_skb(skb, in_napi);
1729 } else {
1730 struct xdp_frame *frame = ptr_to_xdp(ptr);
1731
1611 bytes += frame->len;
1732 bytes += xdp_get_frame_len(frame);
1612 xdp_return_frame(frame);
1613 }
1614 packets++;
1615 }
1616
1617 /* Avoid overhead when no packets have been processed
1618 * happens when called speculatively from start_xmit.
1619 */

--- 52 unchanged lines hidden (view full) ---

1672 struct send_queue *sq;
1673 unsigned int received;
1674 unsigned int xdp_xmit = 0;
1675
1676 virtnet_poll_cleantx(rq);
1677
1678 received = virtnet_receive(rq, budget, &xdp_xmit);
1679
1733 xdp_return_frame(frame);
1734 }
1735 packets++;
1736 }
1737
1738 /* Avoid overhead when no packets have been processed
1739 * happens when called speculatively from start_xmit.
1740 */

--- 52 unchanged lines hidden (view full) ---

1793 struct send_queue *sq;
1794 unsigned int received;
1795 unsigned int xdp_xmit = 0;
1796
1797 virtnet_poll_cleantx(rq);
1798
1799 received = virtnet_receive(rq, budget, &xdp_xmit);
1800
1801 if (xdp_xmit & VIRTIO_XDP_REDIR)
1802 xdp_do_flush();
1803
1680 /* Out of packets? */
1681 if (received < budget)
1682 virtqueue_napi_complete(napi, rq->vq, received);
1683
1804 /* Out of packets? */
1805 if (received < budget)
1806 virtqueue_napi_complete(napi, rq->vq, received);
1807
1684 if (xdp_xmit & VIRTIO_XDP_REDIR)
1685 xdp_do_flush();
1686
1687 if (xdp_xmit & VIRTIO_XDP_TX) {
1688 sq = virtnet_xdp_get_sq(vi);
1689 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
1690 u64_stats_update_begin(&sq->stats.syncp);
1691 sq->stats.kicks++;
1692 u64_stats_update_end(&sq->stats.syncp);
1693 }
1694 virtnet_xdp_put_sq(vi, sq);

--- 1380 unchanged lines hidden (view full) ---

3075 return 0;
3076
3077 return virtnet_set_guest_offloads(vi, offloads);
3078}
3079
3080static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
3081 struct netlink_ext_ack *extack)
3082{
1808 if (xdp_xmit & VIRTIO_XDP_TX) {
1809 sq = virtnet_xdp_get_sq(vi);
1810 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
1811 u64_stats_update_begin(&sq->stats.syncp);
1812 sq->stats.kicks++;
1813 u64_stats_update_end(&sq->stats.syncp);
1814 }
1815 virtnet_xdp_put_sq(vi, sq);

--- 1380 unchanged lines hidden (view full) ---

3196 return 0;
3197
3198 return virtnet_set_guest_offloads(vi, offloads);
3199}
3200
3201static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
3202 struct netlink_ext_ack *extack)
3203{
3083 unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
3204 unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
3205 sizeof(struct skb_shared_info));
3206 unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN;
3084 struct virtnet_info *vi = netdev_priv(dev);
3085 struct bpf_prog *old_prog;
3086 u16 xdp_qp = 0, curr_qp;
3087 int i, err;
3088
3089 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
3090 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3091 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||

--- 6 unchanged lines hidden (view full) ---

3098 return -EOPNOTSUPP;
3099 }
3100
3101 if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
3102 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
3103 return -EINVAL;
3104 }
3105
3207 struct virtnet_info *vi = netdev_priv(dev);
3208 struct bpf_prog *old_prog;
3209 u16 xdp_qp = 0, curr_qp;
3210 int i, err;
3211
3212 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
3213 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3214 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||

--- 6 unchanged lines hidden (view full) ---

3221 return -EOPNOTSUPP;
3222 }
3223
3224 if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
3225 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
3226 return -EINVAL;
3227 }
3228
3106 if (dev->mtu > max_sz) {
3107 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
3108 netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
3229 if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) {
3230 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags");
3231 netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz);
3109 return -EINVAL;
3110 }
3111
3112 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
3113 if (prog)
3114 xdp_qp = nr_cpu_ids;
3115
3116 /* XDP requires extra queues for XDP_TX */

--- 568 unchanged lines hidden (view full) ---

3685 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
3686 int mtu = virtio_cread16(vdev,
3687 offsetof(struct virtio_net_config,
3688 mtu));
3689 if (mtu < MIN_MTU)
3690 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
3691 }
3692
3232 return -EINVAL;
3233 }
3234
3235 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
3236 if (prog)
3237 xdp_qp = nr_cpu_ids;
3238
3239 /* XDP requires extra queues for XDP_TX */

--- 568 unchanged lines hidden (view full) ---

3808 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
3809 int mtu = virtio_cread16(vdev,
3810 offsetof(struct virtio_net_config,
3811 mtu));
3812 if (mtu < MIN_MTU)
3813 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
3814 }
3815
3816 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) &&
3817 !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
3818 dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby");
3819 __virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY);
3820 }
3821
3693 return 0;
3694}
3695
3696static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
3697{
3698 return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3699 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
3700 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||

--- 425 unchanged lines hidden ---
3822 return 0;
3823}
3824
3825static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
3826{
3827 return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3828 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
3829 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||

--- 425 unchanged lines hidden ---