virtio_net.c (dbe4fec2447dd215964aad88b0e06f96c6958ee9) virtio_net.c (00765f8ed74240419091a1708c195405b55fe243)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* A network driver using virtio.
3 *
4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5 */
6//#define DEBUG
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>

--- 775 unchanged lines hidden (view full) ---

784 sq->stats.xdp_tx_drops += n - nxmit;
785 sq->stats.kicks += kicks;
786 u64_stats_update_end(&sq->stats.syncp);
787
788 virtnet_xdp_put_sq(vi, sq);
789 return ret;
790}
791
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* A network driver using virtio.
3 *
4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5 */
6//#define DEBUG
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>

--- 775 unchanged lines hidden (view full) ---

784 sq->stats.xdp_tx_drops += n - nxmit;
785 sq->stats.kicks += kicks;
786 u64_stats_update_end(&sq->stats.syncp);
787
788 virtnet_xdp_put_sq(vi, sq);
789 return ret;
790}
791
792static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
793 struct net_device *dev,
794 unsigned int *xdp_xmit,
795 struct virtnet_rq_stats *stats)
796{
797 struct xdp_frame *xdpf;
798 int err;
799 u32 act;
800
801 act = bpf_prog_run_xdp(xdp_prog, xdp);
802 stats->xdp_packets++;
803
804 switch (act) {
805 case XDP_PASS:
806 return act;
807
808 case XDP_TX:
809 stats->xdp_tx++;
810 xdpf = xdp_convert_buff_to_frame(xdp);
811 if (unlikely(!xdpf)) {
812 netdev_dbg(dev, "convert buff to frame failed for xdp\n");
813 return XDP_DROP;
814 }
815
816 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
817 if (unlikely(!err)) {
818 xdp_return_frame_rx_napi(xdpf);
819 } else if (unlikely(err < 0)) {
820 trace_xdp_exception(dev, xdp_prog, act);
821 return XDP_DROP;
822 }
823 *xdp_xmit |= VIRTIO_XDP_TX;
824 return act;
825
826 case XDP_REDIRECT:
827 stats->xdp_redirects++;
828 err = xdp_do_redirect(dev, xdp, xdp_prog);
829 if (err)
830 return XDP_DROP;
831
832 *xdp_xmit |= VIRTIO_XDP_REDIR;
833 return act;
834
835 default:
836 bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
837 fallthrough;
838 case XDP_ABORTED:
839 trace_xdp_exception(dev, xdp_prog, act);
840 fallthrough;
841 case XDP_DROP:
842 return XDP_DROP;
843 }
844}
845
792static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
793{
794 return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
795}
796
797/* We copy the packet for XDP in the following cases:
798 *
799 * 1) Packet is scattered across multiple rx buffers.

--- 75 unchanged lines hidden (view full) ---

875 unsigned int xdp_headroom = (unsigned long)ctx;
876 unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
877 unsigned int headroom = vi->hdr_len + header_offset;
878 unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
879 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
880 struct page *page = virt_to_head_page(buf);
881 unsigned int delta = 0;
882 struct page *xdp_page;
846static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
847{
848 return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
849}
850
851/* We copy the packet for XDP in the following cases:
852 *
853 * 1) Packet is scattered across multiple rx buffers.

--- 75 unchanged lines hidden (view full) ---

929 unsigned int xdp_headroom = (unsigned long)ctx;
930 unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
931 unsigned int headroom = vi->hdr_len + header_offset;
932 unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
933 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
934 struct page *page = virt_to_head_page(buf);
935 unsigned int delta = 0;
936 struct page *xdp_page;
883 int err;
884 unsigned int metasize = 0;
885
886 len -= vi->hdr_len;
887 stats->bytes += len;
888
889 if (unlikely(len > GOOD_PACKET_LEN)) {
890 pr_debug("%s: rx error: len %u exceeds max size %d\n",
891 dev->name, len, GOOD_PACKET_LEN);

--- 5 unchanged lines hidden (view full) ---

897 xdp_prog = NULL;
898 goto skip_xdp;
899 }
900
901 rcu_read_lock();
902 xdp_prog = rcu_dereference(rq->xdp_prog);
903 if (xdp_prog) {
904 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
937 unsigned int metasize = 0;
938
939 len -= vi->hdr_len;
940 stats->bytes += len;
941
942 if (unlikely(len > GOOD_PACKET_LEN)) {
943 pr_debug("%s: rx error: len %u exceeds max size %d\n",
944 dev->name, len, GOOD_PACKET_LEN);

--- 5 unchanged lines hidden (view full) ---

950 xdp_prog = NULL;
951 goto skip_xdp;
952 }
953
954 rcu_read_lock();
955 xdp_prog = rcu_dereference(rq->xdp_prog);
956 if (xdp_prog) {
957 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
905 struct xdp_frame *xdpf;
906 struct xdp_buff xdp;
907 void *orig_data;
908 u32 act;
909
910 if (unlikely(hdr->hdr.gso_type))
911 goto err_xdp;
912
913 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {

--- 16 unchanged lines hidden (view full) ---

930 put_page(page);
931 page = xdp_page;
932 }
933
934 xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
935 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
936 xdp_headroom, len, true);
937 orig_data = xdp.data;
958 struct xdp_buff xdp;
959 void *orig_data;
960 u32 act;
961
962 if (unlikely(hdr->hdr.gso_type))
963 goto err_xdp;
964
965 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {

--- 16 unchanged lines hidden (view full) ---

982 put_page(page);
983 page = xdp_page;
984 }
985
986 xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
987 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
988 xdp_headroom, len, true);
989 orig_data = xdp.data;
938 act = bpf_prog_run_xdp(xdp_prog, &xdp);
939 stats->xdp_packets++;
940
990
991 act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
992
941 switch (act) {
942 case XDP_PASS:
943 /* Recalculate length in case bpf program changed it */
944 delta = orig_data - xdp.data;
945 len = xdp.data_end - xdp.data;
946 metasize = xdp.data - xdp.data_meta;
947 break;
948 case XDP_TX:
993 switch (act) {
994 case XDP_PASS:
995 /* Recalculate length in case bpf program changed it */
996 delta = orig_data - xdp.data;
997 len = xdp.data_end - xdp.data;
998 metasize = xdp.data - xdp.data_meta;
999 break;
1000 case XDP_TX:
949 stats->xdp_tx++;
950 xdpf = xdp_convert_buff_to_frame(&xdp);
951 if (unlikely(!xdpf))
952 goto err_xdp;
953 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
954 if (unlikely(!err)) {
955 xdp_return_frame_rx_napi(xdpf);
956 } else if (unlikely(err < 0)) {
957 trace_xdp_exception(vi->dev, xdp_prog, act);
958 goto err_xdp;
959 }
960 *xdp_xmit |= VIRTIO_XDP_TX;
961 rcu_read_unlock();
962 goto xdp_xmit;
963 case XDP_REDIRECT:
1001 case XDP_REDIRECT:
964 stats->xdp_redirects++;
965 err = xdp_do_redirect(dev, &xdp, xdp_prog);
966 if (err)
967 goto err_xdp;
968 *xdp_xmit |= VIRTIO_XDP_REDIR;
969 rcu_read_unlock();
970 goto xdp_xmit;
971 default:
1002 rcu_read_unlock();
1003 goto xdp_xmit;
1004 default:
972 bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
973 fallthrough;
974 case XDP_ABORTED:
975 trace_xdp_exception(vi->dev, xdp_prog, act);
976 goto err_xdp;
1005 goto err_xdp;
977 case XDP_DROP:
978 goto err_xdp;
979 }
980 }
981 rcu_read_unlock();
982
983skip_xdp:
984 skb = build_skb(buf, buflen);
985 if (!skb)
986 goto err;

--- 290 unchanged lines hidden (view full) ---

1277 goto skip_xdp;
1278 }
1279
1280 rcu_read_lock();
1281 xdp_prog = rcu_dereference(rq->xdp_prog);
1282 if (xdp_prog) {
1283 unsigned int xdp_frags_truesz = 0;
1284 struct skb_shared_info *shinfo;
1006 }
1007 }
1008 rcu_read_unlock();
1009
1010skip_xdp:
1011 skb = build_skb(buf, buflen);
1012 if (!skb)
1013 goto err;

--- 290 unchanged lines hidden (view full) ---

1304 goto skip_xdp;
1305 }
1306
1307 rcu_read_lock();
1308 xdp_prog = rcu_dereference(rq->xdp_prog);
1309 if (xdp_prog) {
1310 unsigned int xdp_frags_truesz = 0;
1311 struct skb_shared_info *shinfo;
1285 struct xdp_frame *xdpf;
1286 struct page *xdp_page;
1287 struct xdp_buff xdp;
1288 void *data;
1289 u32 act;
1290 int i;
1291
1292 data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz,
1293 &num_buf, &page, offset, &len, hdr);
1294 if (unlikely(!data))
1295 goto err_xdp;
1296
1297 err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
1298 &num_buf, &xdp_frags_truesz, stats);
1299 if (unlikely(err))
1300 goto err_xdp_frags;
1301
1312 struct page *xdp_page;
1313 struct xdp_buff xdp;
1314 void *data;
1315 u32 act;
1316 int i;
1317
1318 data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz,
1319 &num_buf, &page, offset, &len, hdr);
1320 if (unlikely(!data))
1321 goto err_xdp;
1322
1323 err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
1324 &num_buf, &xdp_frags_truesz, stats);
1325 if (unlikely(err))
1326 goto err_xdp_frags;
1327
1302 act = bpf_prog_run_xdp(xdp_prog, &xdp);
1303 stats->xdp_packets++;
1328 act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1304
1305 switch (act) {
1306 case XDP_PASS:
1307 head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
1308 if (unlikely(!head_skb))
1309 goto err_xdp_frags;
1310
1311 rcu_read_unlock();
1312 return head_skb;
1313 case XDP_TX:
1329
1330 switch (act) {
1331 case XDP_PASS:
1332 head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
1333 if (unlikely(!head_skb))
1334 goto err_xdp_frags;
1335
1336 rcu_read_unlock();
1337 return head_skb;
1338 case XDP_TX:
1314 stats->xdp_tx++;
1315 xdpf = xdp_convert_buff_to_frame(&xdp);
1316 if (unlikely(!xdpf)) {
1317 netdev_dbg(dev, "convert buff to frame failed for xdp\n");
1318 goto err_xdp_frags;
1319 }
1320 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
1321 if (unlikely(!err)) {
1322 xdp_return_frame_rx_napi(xdpf);
1323 } else if (unlikely(err < 0)) {
1324 trace_xdp_exception(vi->dev, xdp_prog, act);
1325 goto err_xdp_frags;
1326 }
1327 *xdp_xmit |= VIRTIO_XDP_TX;
1328 rcu_read_unlock();
1329 goto xdp_xmit;
1330 case XDP_REDIRECT:
1339 case XDP_REDIRECT:
1331 stats->xdp_redirects++;
1332 err = xdp_do_redirect(dev, &xdp, xdp_prog);
1333 if (err)
1334 goto err_xdp_frags;
1335 *xdp_xmit |= VIRTIO_XDP_REDIR;
1336 rcu_read_unlock();
1337 goto xdp_xmit;
1338 default:
1340 rcu_read_unlock();
1341 goto xdp_xmit;
1342 default:
1339 bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
1340 fallthrough;
1341 case XDP_ABORTED:
1342 trace_xdp_exception(vi->dev, xdp_prog, act);
1343 fallthrough;
1344 case XDP_DROP:
1345 goto err_xdp_frags;
1343 break;
1346 }
1347err_xdp_frags:
1348 if (xdp_buff_has_frags(&xdp)) {
1349 shinfo = xdp_get_shared_info_from_buff(&xdp);
1350 for (i = 0; i < shinfo->nr_frags; i++) {
1351 xdp_page = skb_frag_page(&shinfo->frags[i]);
1352 put_page(xdp_page);
1353 }

--- 2974 unchanged lines hidden ---
1344 }
1345err_xdp_frags:
1346 if (xdp_buff_has_frags(&xdp)) {
1347 shinfo = xdp_get_shared_info_from_buff(&xdp);
1348 for (i = 0; i < shinfo->nr_frags; i++) {
1349 xdp_page = skb_frag_page(&shinfo->frags[i]);
1350 put_page(xdp_page);
1351 }

--- 2974 unchanged lines hidden ---