1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 4 #include "rx.h" 5 #include "en/xdp.h" 6 #include <net/xdp_sock_drv.h> 7 #include <linux/filter.h> 8 9 /* RX data path */ 10 11 static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, void *data, 12 u32 cqe_bcnt) 13 { 14 struct sk_buff *skb; 15 16 skb = napi_alloc_skb(rq->cq.napi, cqe_bcnt); 17 if (unlikely(!skb)) { 18 rq->stats->buff_alloc_err++; 19 return NULL; 20 } 21 22 skb_put_data(skb, data, cqe_bcnt); 23 24 return skb; 25 } 26 27 struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, 28 struct mlx5e_mpw_info *wi, 29 u16 cqe_bcnt, 30 u32 head_offset, 31 u32 page_idx) 32 { 33 struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk; 34 struct bpf_prog *prog; 35 36 /* Check packet size. Note LRO doesn't use linear SKB */ 37 if (unlikely(cqe_bcnt > rq->hw_mtu)) { 38 rq->stats->oversize_pkts_sw_drop++; 39 return NULL; 40 } 41 42 /* head_offset is not used in this function, because xdp->data and the 43 * DMA address point directly to the necessary place. Furthermore, in 44 * the current implementation, UMR pages are mapped to XSK frames, so 45 * head_offset should always be 0. 46 */ 47 WARN_ON_ONCE(head_offset); 48 49 xdp->data_end = xdp->data + cqe_bcnt; 50 xdp_set_data_meta_invalid(xdp); 51 xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool); 52 net_prefetch(xdp->data); 53 54 /* Possible flows: 55 * - XDP_REDIRECT to XSKMAP: 56 * The page is owned by the userspace from now. 57 * - XDP_TX and other XDP_REDIRECTs: 58 * The page was returned by ZCA and recycled. 59 * - XDP_DROP: 60 * Recycle the page. 61 * - XDP_PASS: 62 * Allocate an SKB, copy the data and recycle the page. 63 * 64 * Pages to be recycled go to the Reuse Ring on MPWQE deallocation. Its 65 * size is the same as the Driver RX Ring's size, and pages for WQEs are 66 * allocated first from the Reuse Ring, so it has enough space. 67 */ 68 69 prog = rcu_dereference(rq->xdp_prog); 70 if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp))) { 71 if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))) 72 __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */ 73 return NULL; /* page/packet was consumed by XDP */ 74 } 75 76 /* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the 77 * frame. On SKB allocation failure, NULL is returned. 78 */ 79 return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data); 80 } 81 82 struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, 83 struct mlx5e_wqe_frag_info *wi, 84 u32 cqe_bcnt) 85 { 86 struct xdp_buff *xdp = wi->di->xsk; 87 struct bpf_prog *prog; 88 89 /* wi->offset is not used in this function, because xdp->data and the 90 * DMA address point directly to the necessary place. Furthermore, the 91 * XSK allocator allocates frames per packet, instead of pages, so 92 * wi->offset should always be 0. 93 */ 94 WARN_ON_ONCE(wi->offset); 95 96 xdp->data_end = xdp->data + cqe_bcnt; 97 xdp_set_data_meta_invalid(xdp); 98 xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool); 99 net_prefetch(xdp->data); 100 101 prog = rcu_dereference(rq->xdp_prog); 102 if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp))) 103 return NULL; /* page/packet was consumed by XDP */ 104 105 /* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse 106 * will be handled by mlx5e_put_rx_frag. 107 * On SKB allocation failure, NULL is returned. 108 */ 109 return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data); 110 } 111