1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 4 #include "setup.h" 5 #include "en/params.h" 6 #include "en/txrx.h" 7 #include "en/health.h" 8 #include <net/xdp_sock_drv.h> 9 10 /* The limitation of 2048 can be altered, but shouldn't go beyond the minimal 11 * stride size of striding RQ. 12 */ 13 #define MLX5E_MIN_XSK_CHUNK_SIZE max(2048, XDP_UMEM_MIN_CHUNK_SIZE) 14 15 bool mlx5e_validate_xsk_param(struct mlx5e_params *params, 16 struct mlx5e_xsk_param *xsk, 17 struct mlx5_core_dev *mdev) 18 { 19 /* AF_XDP doesn't support frames larger than PAGE_SIZE. */ 20 if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) 21 return false; 22 23 /* frag_sz is different for regular and XSK RQs, so ensure that linear 24 * SKB mode is possible. 25 */ 26 switch (params->rq_wq_type) { 27 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 28 return !mlx5e_mpwrq_validate_xsk(mdev, params, xsk); 29 default: /* MLX5_WQ_TYPE_CYCLIC */ 30 return mlx5e_rx_is_linear_skb(mdev, params, xsk); 31 } 32 } 33 34 static void mlx5e_build_xsk_cparam(struct mlx5_core_dev *mdev, 35 struct mlx5e_params *params, 36 struct mlx5e_xsk_param *xsk, 37 u16 q_counter, 38 struct mlx5e_channel_param *cparam) 39 { 40 mlx5e_build_rq_param(mdev, params, xsk, q_counter, &cparam->rq); 41 mlx5e_build_xdpsq_param(mdev, params, xsk, &cparam->xdp_sq); 42 } 43 44 static int mlx5e_init_xsk_rq(struct mlx5e_channel *c, 45 struct mlx5e_params *params, 46 struct xsk_buff_pool *pool, 47 struct mlx5e_xsk_param *xsk, 48 struct mlx5e_rq *rq) 49 { 50 struct mlx5_core_dev *mdev = c->mdev; 51 int rq_xdp_ix; 52 int err; 53 54 rq->wq_type = params->rq_wq_type; 55 rq->pdev = c->pdev; 56 rq->netdev = c->netdev; 57 rq->priv = c->priv; 58 rq->tstamp = c->tstamp; 59 rq->clock = &mdev->clock; 60 rq->icosq = &c->icosq; 61 rq->ix = c->ix; 62 rq->channel = c; 63 rq->mdev = mdev; 64 rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); 65 rq->xdpsq = &c->rq_xdpsq; 66 rq->xsk_pool = pool; 67 rq->stats = &c->priv->channel_stats[c->ix]->xskrq; 68 rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev); 69 rq_xdp_ix = c->ix; 70 err = mlx5e_rq_set_handlers(rq, params, xsk); 71 if (err) 72 return err; 73 74 return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, 0); 75 } 76 77 static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *params, 78 struct mlx5e_rq_param *rq_params, struct xsk_buff_pool *pool, 79 struct mlx5e_xsk_param *xsk) 80 { 81 int err; 82 83 err = mlx5e_init_xsk_rq(c, params, pool, xsk, &c->xskrq); 84 if (err) 85 return err; 86 87 return mlx5e_open_rq(params, rq_params, xsk, cpu_to_node(c->cpu), &c->xskrq); 88 } 89 90 int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, 91 struct mlx5e_xsk_param *xsk, struct xsk_buff_pool *pool, 92 struct mlx5e_channel *c) 93 { 94 struct mlx5e_channel_param *cparam; 95 struct mlx5e_create_cq_param ccp; 96 int err; 97 98 mlx5e_build_create_cq_param(&ccp, c); 99 100 if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev)) 101 return -EINVAL; 102 103 cparam = kvzalloc(sizeof(*cparam), GFP_KERNEL); 104 if (!cparam) 105 return -ENOMEM; 106 107 mlx5e_build_xsk_cparam(priv->mdev, params, xsk, priv->q_counter, cparam); 108 109 err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, 110 &c->xskrq.cq); 111 if (unlikely(err)) 112 goto err_free_cparam; 113 114 err = mlx5e_open_xsk_rq(c, params, &cparam->rq, pool, xsk); 115 if (unlikely(err)) 116 goto err_close_rx_cq; 117 118 err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp, 119 &c->xsksq.cq); 120 if (unlikely(err)) 121 goto err_close_rq; 122 123 /* Create a separate SQ, so that when the buff pool is disabled, we could 124 * close this SQ safely and stop receiving CQEs. In other case, e.g., if 125 * the XDPSQ was used instead, we might run into trouble when the buff pool 126 * is disabled and then re-enabled, but the SQ continues receiving CQEs 127 * from the old buff pool. 128 */ 129 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, pool, &c->xsksq, true); 130 if (unlikely(err)) 131 goto err_close_tx_cq; 132 133 kvfree(cparam); 134 135 set_bit(MLX5E_CHANNEL_STATE_XSK, c->state); 136 137 return 0; 138 139 err_close_tx_cq: 140 mlx5e_close_cq(&c->xsksq.cq); 141 142 err_close_rq: 143 mlx5e_close_rq(&c->xskrq); 144 145 err_close_rx_cq: 146 mlx5e_close_cq(&c->xskrq.cq); 147 148 err_free_cparam: 149 kvfree(cparam); 150 151 return err; 152 } 153 154 void mlx5e_close_xsk(struct mlx5e_channel *c) 155 { 156 clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state); 157 synchronize_net(); /* Sync with NAPI. */ 158 159 mlx5e_close_rq(&c->xskrq); 160 mlx5e_close_cq(&c->xskrq.cq); 161 mlx5e_close_xdpsq(&c->xsksq); 162 mlx5e_close_cq(&c->xsksq.cq); 163 164 memset(&c->xskrq, 0, sizeof(c->xskrq)); 165 memset(&c->xsksq, 0, sizeof(c->xsksq)); 166 } 167 168 void mlx5e_activate_xsk(struct mlx5e_channel *c) 169 { 170 /* ICOSQ recovery deactivates RQs. Suspend the recovery to avoid 171 * activating XSKRQ in the middle of recovery. 172 */ 173 mlx5e_reporter_icosq_suspend_recovery(c); 174 set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); 175 mlx5e_reporter_icosq_resume_recovery(c); 176 177 /* TX queue is created active. */ 178 } 179 180 void mlx5e_deactivate_xsk(struct mlx5e_channel *c) 181 { 182 /* ICOSQ recovery may reactivate XSKRQ if clear_bit is called in the 183 * middle of recovery. Suspend the recovery to avoid it. 184 */ 185 mlx5e_reporter_icosq_suspend_recovery(c); 186 clear_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); 187 mlx5e_reporter_icosq_resume_recovery(c); 188 synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */ 189 190 /* TX queue is disabled on close. */ 191 } 192