1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 4 #include "rx.h" 5 #include "en/xdp.h" 6 #include <net/xdp_sock_drv.h> 7 8 /* RX data path */ 9 10 static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, void *data, 11 u32 cqe_bcnt) 12 { 13 struct sk_buff *skb; 14 15 skb = napi_alloc_skb(rq->cq.napi, cqe_bcnt); 16 if (unlikely(!skb)) { 17 rq->stats->buff_alloc_err++; 18 return NULL; 19 } 20 21 skb_put_data(skb, data, cqe_bcnt); 22 23 return skb; 24 } 25 26 struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, 27 struct mlx5e_mpw_info *wi, 28 u16 cqe_bcnt, 29 u32 head_offset, 30 u32 page_idx) 31 { 32 struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk; 33 u32 cqe_bcnt32 = cqe_bcnt; 34 bool consumed; 35 36 /* Check packet size. Note LRO doesn't use linear SKB */ 37 if (unlikely(cqe_bcnt > rq->hw_mtu)) { 38 rq->stats->oversize_pkts_sw_drop++; 39 return NULL; 40 } 41 42 /* head_offset is not used in this function, because xdp->data and the 43 * DMA address point directly to the necessary place. Furthermore, in 44 * the current implementation, UMR pages are mapped to XSK frames, so 45 * head_offset should always be 0. 46 */ 47 WARN_ON_ONCE(head_offset); 48 49 xdp->data_end = xdp->data + cqe_bcnt32; 50 xdp_set_data_meta_invalid(xdp); 51 xsk_buff_dma_sync_for_cpu(xdp); 52 prefetch(xdp->data); 53 54 rcu_read_lock(); 55 consumed = mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp); 56 rcu_read_unlock(); 57 58 /* Possible flows: 59 * - XDP_REDIRECT to XSKMAP: 60 * The page is owned by the userspace from now. 61 * - XDP_TX and other XDP_REDIRECTs: 62 * The page was returned by ZCA and recycled. 63 * - XDP_DROP: 64 * Recycle the page. 65 * - XDP_PASS: 66 * Allocate an SKB, copy the data and recycle the page. 67 * 68 * Pages to be recycled go to the Reuse Ring on MPWQE deallocation. Its 69 * size is the same as the Driver RX Ring's size, and pages for WQEs are 70 * allocated first from the Reuse Ring, so it has enough space. 71 */ 72 73 if (likely(consumed)) { 74 if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))) 75 __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */ 76 return NULL; /* page/packet was consumed by XDP */ 77 } 78 79 /* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the 80 * frame. On SKB allocation failure, NULL is returned. 81 */ 82 return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt32); 83 } 84 85 struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, 86 struct mlx5_cqe64 *cqe, 87 struct mlx5e_wqe_frag_info *wi, 88 u32 cqe_bcnt) 89 { 90 struct xdp_buff *xdp = wi->di->xsk; 91 bool consumed; 92 93 /* wi->offset is not used in this function, because xdp->data and the 94 * DMA address point directly to the necessary place. Furthermore, the 95 * XSK allocator allocates frames per packet, instead of pages, so 96 * wi->offset should always be 0. 97 */ 98 WARN_ON_ONCE(wi->offset); 99 100 xdp->data_end = xdp->data + cqe_bcnt; 101 xdp_set_data_meta_invalid(xdp); 102 xsk_buff_dma_sync_for_cpu(xdp); 103 prefetch(xdp->data); 104 105 if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) { 106 rq->stats->wqe_err++; 107 return NULL; 108 } 109 110 rcu_read_lock(); 111 consumed = mlx5e_xdp_handle(rq, NULL, &cqe_bcnt, xdp); 112 rcu_read_unlock(); 113 114 if (likely(consumed)) 115 return NULL; /* page/packet was consumed by XDP */ 116 117 /* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse 118 * will be handled by mlx5e_put_rx_frag. 119 * On SKB allocation failure, NULL is returned. 120 */ 121 return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt); 122 } 123