1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 4 #ifndef __MLX5_EN_XSK_RX_H__ 5 #define __MLX5_EN_XSK_RX_H__ 6 7 #include "en.h" 8 #include <net/xdp_sock_drv.h> 9 10 #define MLX5E_MTT_PTAG_MASK 0xfffffffffffffff8ULL 11 12 /* RX data path */ 13 14 struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, 15 struct mlx5e_mpw_info *wi, 16 u16 cqe_bcnt, 17 u32 head_offset, 18 u32 page_idx); 19 struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, 20 struct mlx5e_wqe_frag_info *wi, 21 u32 cqe_bcnt); 22 23 static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq, 24 struct mlx5e_dma_info *dma_info) 25 { 26 retry: 27 dma_info->xsk = xsk_buff_alloc(rq->xsk_pool); 28 if (!dma_info->xsk) 29 return -ENOMEM; 30 31 /* Store the DMA address without headroom. In striding RQ case, we just 32 * provide pages for UMR, and headroom is counted at the setup stage 33 * when creating a WQE. In non-striding RQ case, headroom is accounted 34 * in mlx5e_alloc_rx_wqe. 35 */ 36 dma_info->addr = xsk_buff_xdp_get_frame_dma(dma_info->xsk); 37 38 /* MTT page mapping has alignment requirements. If they are not 39 * satisfied, leak the descriptor so that it won't come again, and try 40 * to allocate a new one. 41 */ 42 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { 43 if (unlikely(dma_info->addr & ~MLX5E_MTT_PTAG_MASK)) { 44 xsk_buff_discard(dma_info->xsk); 45 goto retry; 46 } 47 } 48 49 return 0; 50 } 51 52 static inline bool mlx5e_xsk_update_rx_wakeup(struct mlx5e_rq *rq, bool alloc_err) 53 { 54 if (!xsk_uses_need_wakeup(rq->xsk_pool)) 55 return alloc_err; 56 57 if (unlikely(alloc_err)) 58 xsk_set_rx_need_wakeup(rq->xsk_pool); 59 else 60 xsk_clear_rx_need_wakeup(rq->xsk_pool); 61 62 return false; 63 } 64 65 #endif /* __MLX5_EN_XSK_RX_H__ */ 66