1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 4 #include "setup.h" 5 #include "en/params.h" 6 #include "en/txrx.h" 7 8 /* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may 9 * change unexpectedly, and mlx5e has a minimum valid stride size for striding 10 * RQ, keep this check in the driver. 11 */ 12 #define MLX5E_MIN_XSK_CHUNK_SIZE 2048 13 14 bool mlx5e_validate_xsk_param(struct mlx5e_params *params, 15 struct mlx5e_xsk_param *xsk, 16 struct mlx5_core_dev *mdev) 17 { 18 /* AF_XDP doesn't support frames larger than PAGE_SIZE. */ 19 if (xsk->chunk_size > PAGE_SIZE || 20 xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) 21 return false; 22 23 /* Current MTU and XSK headroom don't allow packets to fit the frames. */ 24 if (mlx5e_rx_get_min_frag_sz(params, xsk) > xsk->chunk_size) 25 return false; 26 27 /* frag_sz is different for regular and XSK RQs, so ensure that linear 28 * SKB mode is possible. 29 */ 30 switch (params->rq_wq_type) { 31 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 32 return mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk); 33 default: /* MLX5_WQ_TYPE_CYCLIC */ 34 return mlx5e_rx_is_linear_skb(params, xsk); 35 } 36 } 37 38 static void mlx5e_build_xsk_cparam(struct mlx5_core_dev *mdev, 39 struct mlx5e_params *params, 40 struct mlx5e_xsk_param *xsk, 41 u16 q_counter, 42 struct mlx5e_channel_param *cparam) 43 { 44 mlx5e_build_rq_param(mdev, params, xsk, q_counter, &cparam->rq); 45 mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq); 46 } 47 48 static int mlx5e_init_xsk_rq(struct mlx5e_channel *c, 49 struct mlx5e_params *params, 50 struct xsk_buff_pool *pool, 51 struct mlx5e_xsk_param *xsk, 52 struct mlx5e_rq *rq) 53 { 54 struct mlx5_core_dev *mdev = c->mdev; 55 int rq_xdp_ix; 56 int err; 57 58 rq->wq_type = params->rq_wq_type; 59 rq->pdev = c->pdev; 60 rq->netdev = c->netdev; 61 rq->priv = c->priv; 62 rq->tstamp = c->tstamp; 63 rq->clock = &mdev->clock; 64 rq->icosq = &c->icosq; 65 rq->ix = c->ix; 66 rq->mdev = mdev; 67 rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); 68 rq->xdpsq = &c->rq_xdpsq; 69 rq->xsk_pool = pool; 70 rq->stats = &c->priv->channel_stats[c->ix].xskrq; 71 rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev); 72 rq_xdp_ix = c->ix + params->num_channels * MLX5E_RQ_GROUP_XSK; 73 err = mlx5e_rq_set_handlers(rq, params, xsk); 74 if (err) 75 return err; 76 77 return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, 0); 78 } 79 80 static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *params, 81 struct mlx5e_rq_param *rq_params, struct xsk_buff_pool *pool, 82 struct mlx5e_xsk_param *xsk) 83 { 84 int err; 85 86 err = mlx5e_init_xsk_rq(c, params, pool, xsk, &c->xskrq); 87 if (err) 88 return err; 89 90 return mlx5e_open_rq(params, rq_params, xsk, cpu_to_node(c->cpu), &c->xskrq); 91 } 92 93 int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, 94 struct mlx5e_xsk_param *xsk, struct xsk_buff_pool *pool, 95 struct mlx5e_channel *c) 96 { 97 struct mlx5e_channel_param *cparam; 98 struct mlx5e_create_cq_param ccp; 99 int err; 100 101 mlx5e_build_create_cq_param(&ccp, c); 102 103 if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev)) 104 return -EINVAL; 105 106 cparam = kvzalloc(sizeof(*cparam), GFP_KERNEL); 107 if (!cparam) 108 return -ENOMEM; 109 110 mlx5e_build_xsk_cparam(priv->mdev, params, xsk, priv->q_counter, cparam); 111 112 err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, 113 &c->xskrq.cq); 114 if (unlikely(err)) 115 goto err_free_cparam; 116 117 err = mlx5e_open_xsk_rq(c, params, &cparam->rq, pool, xsk); 118 if (unlikely(err)) 119 goto err_close_rx_cq; 120 121 err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp, 122 &c->xsksq.cq); 123 if (unlikely(err)) 124 goto err_close_rq; 125 126 /* Create a separate SQ, so that when the buff pool is disabled, we could 127 * close this SQ safely and stop receiving CQEs. In other case, e.g., if 128 * the XDPSQ was used instead, we might run into trouble when the buff pool 129 * is disabled and then reenabled, but the SQ continues receiving CQEs 130 * from the old buff pool. 131 */ 132 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, pool, &c->xsksq, true); 133 if (unlikely(err)) 134 goto err_close_tx_cq; 135 136 kvfree(cparam); 137 138 set_bit(MLX5E_CHANNEL_STATE_XSK, c->state); 139 140 return 0; 141 142 err_close_tx_cq: 143 mlx5e_close_cq(&c->xsksq.cq); 144 145 err_close_rq: 146 mlx5e_close_rq(&c->xskrq); 147 148 err_close_rx_cq: 149 mlx5e_close_cq(&c->xskrq.cq); 150 151 err_free_cparam: 152 kvfree(cparam); 153 154 return err; 155 } 156 157 void mlx5e_close_xsk(struct mlx5e_channel *c) 158 { 159 clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state); 160 synchronize_net(); /* Sync with the XSK wakeup and with NAPI. */ 161 162 mlx5e_close_rq(&c->xskrq); 163 mlx5e_close_cq(&c->xskrq.cq); 164 mlx5e_close_xdpsq(&c->xsksq); 165 mlx5e_close_cq(&c->xsksq.cq); 166 167 memset(&c->xskrq, 0, sizeof(c->xskrq)); 168 memset(&c->xsksq, 0, sizeof(c->xsksq)); 169 } 170 171 void mlx5e_activate_xsk(struct mlx5e_channel *c) 172 { 173 set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); 174 /* TX queue is created active. */ 175 176 spin_lock_bh(&c->async_icosq_lock); 177 mlx5e_trigger_irq(&c->async_icosq); 178 spin_unlock_bh(&c->async_icosq_lock); 179 } 180 181 void mlx5e_deactivate_xsk(struct mlx5e_channel *c) 182 { 183 mlx5e_deactivate_rq(&c->xskrq); 184 /* TX queue is disabled on close. */ 185 } 186 187 static int mlx5e_redirect_xsk_rqt(struct mlx5e_priv *priv, u16 ix, u32 rqn) 188 { 189 struct mlx5e_redirect_rqt_param direct_rrp = { 190 .is_rss = false, 191 { 192 .rqn = rqn, 193 }, 194 }; 195 196 u32 rqtn = priv->xsk_tir[ix].rqt.rqtn; 197 198 return mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp); 199 } 200 201 int mlx5e_xsk_redirect_rqt_to_channel(struct mlx5e_priv *priv, struct mlx5e_channel *c) 202 { 203 return mlx5e_redirect_xsk_rqt(priv, c->ix, c->xskrq.rqn); 204 } 205 206 int mlx5e_xsk_redirect_rqt_to_drop(struct mlx5e_priv *priv, u16 ix) 207 { 208 return mlx5e_redirect_xsk_rqt(priv, ix, priv->drop_rq.rqn); 209 } 210 211 int mlx5e_xsk_redirect_rqts_to_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs) 212 { 213 int err, i; 214 215 if (!priv->xsk.refcnt) 216 return 0; 217 218 for (i = 0; i < chs->num; i++) { 219 struct mlx5e_channel *c = chs->c[i]; 220 221 if (!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) 222 continue; 223 224 err = mlx5e_xsk_redirect_rqt_to_channel(priv, c); 225 if (unlikely(err)) 226 goto err_stop; 227 } 228 229 return 0; 230 231 err_stop: 232 for (i--; i >= 0; i--) { 233 if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state)) 234 continue; 235 236 mlx5e_xsk_redirect_rqt_to_drop(priv, i); 237 } 238 239 return err; 240 } 241 242 void mlx5e_xsk_redirect_rqts_to_drop(struct mlx5e_priv *priv, struct mlx5e_channels *chs) 243 { 244 int i; 245 246 if (!priv->xsk.refcnt) 247 return; 248 249 for (i = 0; i < chs->num; i++) { 250 if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state)) 251 continue; 252 253 mlx5e_xsk_redirect_rqt_to_drop(priv, i); 254 } 255 } 256