1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 4 #include "rx.h" 5 #include "en/xdp.h" 6 #include <net/xdp_sock_drv.h> 7 8 /* RX data path */ 9 10 static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, void *data, 11 u32 cqe_bcnt) 12 { 13 struct sk_buff *skb; 14 15 skb = napi_alloc_skb(rq->cq.napi, cqe_bcnt); 16 if (unlikely(!skb)) { 17 rq->stats->buff_alloc_err++; 18 return NULL; 19 } 20 21 skb_put_data(skb, data, cqe_bcnt); 22 23 return skb; 24 } 25 26 struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, 27 struct mlx5e_mpw_info *wi, 28 u16 cqe_bcnt, 29 u32 head_offset, 30 u32 page_idx) 31 { 32 struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk; 33 u32 cqe_bcnt32 = cqe_bcnt; 34 35 /* Check packet size. Note LRO doesn't use linear SKB */ 36 if (unlikely(cqe_bcnt > rq->hw_mtu)) { 37 rq->stats->oversize_pkts_sw_drop++; 38 return NULL; 39 } 40 41 /* head_offset is not used in this function, because xdp->data and the 42 * DMA address point directly to the necessary place. Furthermore, in 43 * the current implementation, UMR pages are mapped to XSK frames, so 44 * head_offset should always be 0. 45 */ 46 WARN_ON_ONCE(head_offset); 47 48 xdp->data_end = xdp->data + cqe_bcnt32; 49 xdp_set_data_meta_invalid(xdp); 50 xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool); 51 net_prefetch(xdp->data); 52 53 /* Possible flows: 54 * - XDP_REDIRECT to XSKMAP: 55 * The page is owned by the userspace from now. 56 * - XDP_TX and other XDP_REDIRECTs: 57 * The page was returned by ZCA and recycled. 58 * - XDP_DROP: 59 * Recycle the page. 60 * - XDP_PASS: 61 * Allocate an SKB, copy the data and recycle the page. 62 * 63 * Pages to be recycled go to the Reuse Ring on MPWQE deallocation. Its 64 * size is the same as the Driver RX Ring's size, and pages for WQEs are 65 * allocated first from the Reuse Ring, so it has enough space. 66 */ 67 68 if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp))) { 69 if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))) 70 __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */ 71 return NULL; /* page/packet was consumed by XDP */ 72 } 73 74 /* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the 75 * frame. On SKB allocation failure, NULL is returned. 76 */ 77 return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt32); 78 } 79 80 struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, 81 struct mlx5_cqe64 *cqe, 82 struct mlx5e_wqe_frag_info *wi, 83 u32 cqe_bcnt) 84 { 85 struct xdp_buff *xdp = wi->di->xsk; 86 87 /* wi->offset is not used in this function, because xdp->data and the 88 * DMA address point directly to the necessary place. Furthermore, the 89 * XSK allocator allocates frames per packet, instead of pages, so 90 * wi->offset should always be 0. 91 */ 92 WARN_ON_ONCE(wi->offset); 93 94 xdp->data_end = xdp->data + cqe_bcnt; 95 xdp_set_data_meta_invalid(xdp); 96 xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool); 97 net_prefetch(xdp->data); 98 99 if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) { 100 rq->stats->wqe_err++; 101 return NULL; 102 } 103 104 if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt, xdp))) 105 return NULL; /* page/packet was consumed by XDP */ 106 107 /* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse 108 * will be handled by mlx5e_put_rx_frag. 109 * On SKB allocation failure, NULL is returned. 110 */ 111 return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt); 112 } 113