virtio_net.c (4cb00b13c064088352a4f2ca4a8279010ad218a8) virtio_net.c (d8f2835a4746f26523cb512dc17e2b0a00dd31a9)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* A network driver using virtio.
3 *
4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5 */
6//#define DEBUG
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>

--- 1294 unchanged lines hidden (view full) ---

1303
1304 put_page(*page);
1305
1306 *page = xdp_page;
1307
1308 return page_address(*page) + VIRTIO_XDP_HEADROOM;
1309}
1310
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* A network driver using virtio.
3 *
4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5 */
6//#define DEBUG
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>

--- 1294 unchanged lines hidden (view full) ---

1303
1304 put_page(*page);
1305
1306 *page = xdp_page;
1307
1308 return page_address(*page) + VIRTIO_XDP_HEADROOM;
1309}
1310
1311static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
1312 struct virtnet_info *vi,
1313 struct receive_queue *rq,
1314 struct bpf_prog *xdp_prog,
1315 void *buf,
1316 void *ctx,
1317 unsigned int len,
1318 unsigned int *xdp_xmit,
1319 struct virtnet_rq_stats *stats)
1320{
1321 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1322 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1323 struct page *page = virt_to_head_page(buf);
1324 int offset = buf - page_address(page);
1325 unsigned int xdp_frags_truesz = 0;
1326 struct sk_buff *head_skb;
1327 unsigned int frame_sz;
1328 struct xdp_buff xdp;
1329 void *data;
1330 u32 act;
1331 int err;
1332
1333 data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page,
1334 offset, &len, hdr);
1335 if (unlikely(!data))
1336 goto err_xdp;
1337
1338 err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
1339 &num_buf, &xdp_frags_truesz, stats);
1340 if (unlikely(err))
1341 goto err_xdp;
1342
1343 act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1344
1345 switch (act) {
1346 case XDP_PASS:
1347 head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
1348 if (unlikely(!head_skb))
1349 break;
1350 return head_skb;
1351
1352 case XDP_TX:
1353 case XDP_REDIRECT:
1354 return NULL;
1355
1356 default:
1357 break;
1358 }
1359
1360 put_xdp_frags(&xdp);
1361
1362err_xdp:
1363 put_page(page);
1364 mergeable_buf_free(rq, num_buf, dev, stats);
1365
1366 stats->xdp_drops++;
1367 stats->drops++;
1368 return NULL;
1369}
1370
1311static struct sk_buff *receive_mergeable(struct net_device *dev,
1312 struct virtnet_info *vi,
1313 struct receive_queue *rq,
1314 void *buf,
1315 void *ctx,
1316 unsigned int len,
1317 unsigned int *xdp_xmit,
1318 struct virtnet_rq_stats *stats)
1319{
1320 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1321 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1322 struct page *page = virt_to_head_page(buf);
1323 int offset = buf - page_address(page);
1324 struct sk_buff *head_skb, *curr_skb;
1325 struct bpf_prog *xdp_prog;
1326 unsigned int truesize = mergeable_ctx_to_truesize(ctx);
1327 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1328 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1329 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1371static struct sk_buff *receive_mergeable(struct net_device *dev,
1372 struct virtnet_info *vi,
1373 struct receive_queue *rq,
1374 void *buf,
1375 void *ctx,
1376 unsigned int len,
1377 unsigned int *xdp_xmit,
1378 struct virtnet_rq_stats *stats)
1379{
1380 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1381 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1382 struct page *page = virt_to_head_page(buf);
1383 int offset = buf - page_address(page);
1384 struct sk_buff *head_skb, *curr_skb;
1385 struct bpf_prog *xdp_prog;
1386 unsigned int truesize = mergeable_ctx_to_truesize(ctx);
1387 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1388 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1389 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1330 unsigned int frame_sz;
1331 int err;
1332
1333 head_skb = NULL;
1334 stats->bytes += len - vi->hdr_len;
1335
1336 if (unlikely(len > truesize - room)) {
1337 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1338 dev->name, len, (unsigned long)(truesize - room));
1339 dev->stats.rx_length_errors++;
1340 goto err_skb;
1341 }
1342
1343 if (likely(!vi->xdp_enabled)) {
1344 xdp_prog = NULL;
1345 goto skip_xdp;
1346 }
1347
1348 rcu_read_lock();
1349 xdp_prog = rcu_dereference(rq->xdp_prog);
1350 if (xdp_prog) {
1390
1391 head_skb = NULL;
1392 stats->bytes += len - vi->hdr_len;
1393
1394 if (unlikely(len > truesize - room)) {
1395 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1396 dev->name, len, (unsigned long)(truesize - room));
1397 dev->stats.rx_length_errors++;
1398 goto err_skb;
1399 }
1400
1401 if (likely(!vi->xdp_enabled)) {
1402 xdp_prog = NULL;
1403 goto skip_xdp;
1404 }
1405
1406 rcu_read_lock();
1407 xdp_prog = rcu_dereference(rq->xdp_prog);
1408 if (xdp_prog) {
1351 unsigned int xdp_frags_truesz = 0;
1352 struct xdp_buff xdp;
1353 void *data;
1354 u32 act;
1355
1356 data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz,
1357 &num_buf, &page, offset, &len, hdr);
1358 if (unlikely(!data))
1359 goto err_xdp;
1360
1361 err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
1362 &num_buf, &xdp_frags_truesz, stats);
1363 if (unlikely(err))
1364 goto err_xdp;
1365
1366 act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1367
1368 switch (act) {
1369 case XDP_PASS:
1370 head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
1371 if (unlikely(!head_skb))
1372 goto err_xdp_frags;
1373
1374 rcu_read_unlock();
1375 return head_skb;
1376 case XDP_TX:
1377 case XDP_REDIRECT:
1378 rcu_read_unlock();
1379 goto xdp_xmit;
1380 default:
1381 break;
1382 }
1383err_xdp_frags:
1384 put_xdp_frags(&xdp);
1385 goto err_xdp;
1409 head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx,
1410 len, xdp_xmit, stats);
1411 rcu_read_unlock();
1412 return head_skb;
1386 }
1387 rcu_read_unlock();
1388
1389skip_xdp:
1390 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
1391 curr_skb = head_skb;
1392
1393 if (unlikely(!curr_skb))

--- 53 unchanged lines hidden (view full) ---

1447 skb_add_rx_frag(curr_skb, num_skb_frags, page,
1448 offset, len, truesize);
1449 }
1450 }
1451
1452 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
1453 return head_skb;
1454
1413 }
1414 rcu_read_unlock();
1415
1416skip_xdp:
1417 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
1418 curr_skb = head_skb;
1419
1420 if (unlikely(!curr_skb))

--- 53 unchanged lines hidden (view full) ---

1474 skb_add_rx_frag(curr_skb, num_skb_frags, page,
1475 offset, len, truesize);
1476 }
1477 }
1478
1479 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
1480 return head_skb;
1481
1455err_xdp:
1456 rcu_read_unlock();
1457 stats->xdp_drops++;
1458err_skb:
1459 put_page(page);
1460 mergeable_buf_free(rq, num_buf, dev, stats);
1461
1462err_buf:
1463 stats->drops++;
1464 dev_kfree_skb(head_skb);
1482err_skb:
1483 put_page(page);
1484 mergeable_buf_free(rq, num_buf, dev, stats);
1485
1486err_buf:
1487 stats->drops++;
1488 dev_kfree_skb(head_skb);
1465xdp_xmit:
1466 return NULL;
1467}
1468
1469static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
1470 struct sk_buff *skb)
1471{
1472 enum pkt_hash_types rss_hash_type;
1473

--- 2873 unchanged lines hidden ---
1489 return NULL;
1490}
1491
1492static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
1493 struct sk_buff *skb)
1494{
1495 enum pkt_hash_types rss_hash_type;
1496

--- 2873 unchanged lines hidden ---