1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 4 #include "tx.h" 5 #include "pool.h" 6 #include "en/xdp.h" 7 #include "en/params.h" 8 #include <net/xdp_sock_drv.h> 9 10 int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) 11 { 12 struct mlx5e_priv *priv = netdev_priv(dev); 13 struct mlx5e_params *params = &priv->channels.params; 14 struct mlx5e_channel *c; 15 16 if (unlikely(!mlx5e_xdp_is_active(priv))) 17 return -ENETDOWN; 18 19 if (unlikely(qid >= params->num_channels)) 20 return -EINVAL; 21 22 c = priv->channels.c[qid]; 23 24 if (!napi_if_scheduled_mark_missed(&c->napi)) { 25 /* To avoid WQE overrun, don't post a NOP if async_icosq is not 26 * active and not polled by NAPI. Return 0, because the upcoming 27 * activate will trigger the IRQ for us. 28 */ 29 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->async_icosq.state))) 30 return 0; 31 32 if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state)) 33 return 0; 34 35 mlx5e_trigger_napi_icosq(c); 36 } 37 38 return 0; 39 } 40 41 /* When TX fails (because of the size of the packet), we need to get completions 42 * in order, so post a NOP to get a CQE. Since AF_XDP doesn't distinguish 43 * between successful TX and errors, handling in mlx5e_poll_xdpsq_cq is the 44 * same. 45 */ 46 static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq, 47 struct mlx5e_xdp_info *xdpi) 48 { 49 u16 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); 50 struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi]; 51 struct mlx5e_tx_wqe *nopwqe; 52 53 wi->num_wqebbs = 1; 54 wi->num_pkts = 1; 55 56 nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); 57 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi); 58 sq->doorbell_cseg = &nopwqe->ctrl; 59 } 60 61 bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget) 62 { 63 struct xsk_buff_pool *pool = sq->xsk_pool; 64 struct mlx5e_xmit_data xdptxd; 65 struct mlx5e_xdp_info xdpi; 66 bool work_done = true; 67 bool flush = false; 68 69 xdpi.mode = MLX5E_XDP_XMIT_MODE_XSK; 70 71 for (; budget; budget--) { 72 int check_result = INDIRECT_CALL_2(sq->xmit_xdp_frame_check, 73 mlx5e_xmit_xdp_frame_check_mpwqe, 74 mlx5e_xmit_xdp_frame_check, 75 sq); 76 struct xdp_desc desc; 77 bool ret; 78 79 if (unlikely(check_result < 0)) { 80 work_done = false; 81 break; 82 } 83 84 if (!xsk_tx_peek_desc(pool, &desc)) { 85 /* TX will get stuck until something wakes it up by 86 * triggering NAPI. Currently it's expected that the 87 * application calls sendto() if there are consumed, but 88 * not completed frames. 89 */ 90 break; 91 } 92 93 xdptxd.dma_addr = xsk_buff_raw_get_dma(pool, desc.addr); 94 xdptxd.data = xsk_buff_raw_get_data(pool, desc.addr); 95 xdptxd.len = desc.len; 96 97 xsk_buff_raw_dma_sync_for_device(pool, xdptxd.dma_addr, xdptxd.len); 98 99 ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, 100 mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 101 check_result); 102 if (unlikely(!ret)) { 103 if (sq->mpwqe.wqe) 104 mlx5e_xdp_mpwqe_complete(sq); 105 106 mlx5e_xsk_tx_post_err(sq, &xdpi); 107 } else { 108 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi); 109 } 110 111 flush = true; 112 } 113 114 if (flush) { 115 if (sq->mpwqe.wqe) 116 mlx5e_xdp_mpwqe_complete(sq); 117 mlx5e_xmit_xdp_doorbell(sq); 118 119 xsk_tx_release(pool); 120 } 121 122 return !(budget && work_done); 123 } 124