1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 4 #include "setup.h" 5 #include "en/params.h" 6 7 /* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may 8 * change unexpectedly, and mlx5e has a minimum valid stride size for striding 9 * RQ, keep this check in the driver. 10 */ 11 #define MLX5E_MIN_XSK_CHUNK_SIZE 2048 12 13 bool mlx5e_validate_xsk_param(struct mlx5e_params *params, 14 struct mlx5e_xsk_param *xsk, 15 struct mlx5_core_dev *mdev) 16 { 17 /* AF_XDP doesn't support frames larger than PAGE_SIZE. */ 18 if (xsk->chunk_size > PAGE_SIZE || 19 xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) 20 return false; 21 22 /* Current MTU and XSK headroom don't allow packets to fit the frames. */ 23 if (mlx5e_rx_get_min_frag_sz(params, xsk) > xsk->chunk_size) 24 return false; 25 26 /* frag_sz is different for regular and XSK RQs, so ensure that linear 27 * SKB mode is possible. 28 */ 29 switch (params->rq_wq_type) { 30 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 31 return mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk); 32 default: /* MLX5_WQ_TYPE_CYCLIC */ 33 return mlx5e_rx_is_linear_skb(params, xsk); 34 } 35 } 36 37 static void mlx5e_build_xskicosq_param(struct mlx5e_priv *priv, 38 u8 log_wq_size, 39 struct mlx5e_sq_param *param) 40 { 41 void *sqc = param->sqc; 42 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 43 44 mlx5e_build_sq_param_common(priv, param); 45 46 MLX5_SET(wq, wq, log_wq_sz, log_wq_size); 47 } 48 49 static void mlx5e_build_xsk_cparam(struct mlx5e_priv *priv, 50 struct mlx5e_params *params, 51 struct mlx5e_xsk_param *xsk, 52 struct mlx5e_channel_param *cparam) 53 { 54 const u8 xskicosq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; 55 56 mlx5e_build_rq_param(priv, params, xsk, &cparam->rq); 57 mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq); 58 mlx5e_build_xskicosq_param(priv, xskicosq_size, &cparam->icosq); 59 mlx5e_build_rx_cq_param(priv, params, xsk, &cparam->rx_cq); 60 mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq); 61 mlx5e_build_ico_cq_param(priv, xskicosq_size, &cparam->icosq_cq); 62 } 63 64 int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, 65 struct mlx5e_xsk_param *xsk, struct xdp_umem *umem, 66 struct mlx5e_channel *c) 67 { 68 struct mlx5e_channel_param *cparam; 69 struct dim_cq_moder icocq_moder = {}; 70 int err; 71 72 if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev)) 73 return -EINVAL; 74 75 cparam = kvzalloc(sizeof(*cparam), GFP_KERNEL); 76 if (!cparam) 77 return -ENOMEM; 78 79 mlx5e_build_xsk_cparam(priv, params, xsk, cparam); 80 81 err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->xskrq.cq); 82 if (unlikely(err)) 83 goto err_free_cparam; 84 85 err = mlx5e_open_rq(c, params, &cparam->rq, xsk, umem, &c->xskrq); 86 if (unlikely(err)) 87 goto err_close_rx_cq; 88 89 err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xsksq.cq); 90 if (unlikely(err)) 91 goto err_close_rq; 92 93 /* Create a separate SQ, so that when the UMEM is disabled, we could 94 * close this SQ safely and stop receiving CQEs. In other case, e.g., if 95 * the XDPSQ was used instead, we might run into trouble when the UMEM 96 * is disabled and then reenabled, but the SQ continues receiving CQEs 97 * from the old UMEM. 98 */ 99 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, umem, &c->xsksq, true); 100 if (unlikely(err)) 101 goto err_close_tx_cq; 102 103 err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->xskicosq.cq); 104 if (unlikely(err)) 105 goto err_close_sq; 106 107 /* Create a dedicated SQ for posting NOPs whenever we need an IRQ to be 108 * triggered and NAPI to be called on the correct CPU. 109 */ 110 err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->xskicosq); 111 if (unlikely(err)) 112 goto err_close_icocq; 113 114 kvfree(cparam); 115 116 spin_lock_init(&c->xskicosq_lock); 117 118 set_bit(MLX5E_CHANNEL_STATE_XSK, c->state); 119 120 return 0; 121 122 err_close_icocq: 123 mlx5e_close_cq(&c->xskicosq.cq); 124 125 err_close_sq: 126 mlx5e_close_xdpsq(&c->xsksq); 127 128 err_close_tx_cq: 129 mlx5e_close_cq(&c->xsksq.cq); 130 131 err_close_rq: 132 mlx5e_close_rq(&c->xskrq); 133 134 err_close_rx_cq: 135 mlx5e_close_cq(&c->xskrq.cq); 136 137 err_free_cparam: 138 kvfree(cparam); 139 140 return err; 141 } 142 143 void mlx5e_close_xsk(struct mlx5e_channel *c) 144 { 145 clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state); 146 napi_synchronize(&c->napi); 147 148 mlx5e_close_rq(&c->xskrq); 149 mlx5e_close_cq(&c->xskrq.cq); 150 mlx5e_close_icosq(&c->xskicosq); 151 mlx5e_close_cq(&c->xskicosq.cq); 152 mlx5e_close_xdpsq(&c->xsksq); 153 mlx5e_close_cq(&c->xsksq.cq); 154 } 155 156 void mlx5e_activate_xsk(struct mlx5e_channel *c) 157 { 158 set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); 159 /* TX queue is created active. */ 160 mlx5e_trigger_irq(&c->xskicosq); 161 } 162 163 void mlx5e_deactivate_xsk(struct mlx5e_channel *c) 164 { 165 mlx5e_deactivate_rq(&c->xskrq); 166 /* TX queue is disabled on close. */ 167 } 168 169 static int mlx5e_redirect_xsk_rqt(struct mlx5e_priv *priv, u16 ix, u32 rqn) 170 { 171 struct mlx5e_redirect_rqt_param direct_rrp = { 172 .is_rss = false, 173 { 174 .rqn = rqn, 175 }, 176 }; 177 178 u32 rqtn = priv->xsk_tir[ix].rqt.rqtn; 179 180 return mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp); 181 } 182 183 int mlx5e_xsk_redirect_rqt_to_channel(struct mlx5e_priv *priv, struct mlx5e_channel *c) 184 { 185 return mlx5e_redirect_xsk_rqt(priv, c->ix, c->xskrq.rqn); 186 } 187 188 int mlx5e_xsk_redirect_rqt_to_drop(struct mlx5e_priv *priv, u16 ix) 189 { 190 return mlx5e_redirect_xsk_rqt(priv, ix, priv->drop_rq.rqn); 191 } 192 193 int mlx5e_xsk_redirect_rqts_to_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs) 194 { 195 int err, i; 196 197 if (!priv->xsk.refcnt) 198 return 0; 199 200 for (i = 0; i < chs->num; i++) { 201 struct mlx5e_channel *c = chs->c[i]; 202 203 if (!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) 204 continue; 205 206 err = mlx5e_xsk_redirect_rqt_to_channel(priv, c); 207 if (unlikely(err)) 208 goto err_stop; 209 } 210 211 return 0; 212 213 err_stop: 214 for (i--; i >= 0; i--) { 215 if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state)) 216 continue; 217 218 mlx5e_xsk_redirect_rqt_to_drop(priv, i); 219 } 220 221 return err; 222 } 223 224 void mlx5e_xsk_redirect_rqts_to_drop(struct mlx5e_priv *priv, struct mlx5e_channels *chs) 225 { 226 int i; 227 228 if (!priv->xsk.refcnt) 229 return; 230 231 for (i = 0; i < chs->num; i++) { 232 if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state)) 233 continue; 234 235 mlx5e_xsk_redirect_rqt_to_drop(priv, i); 236 } 237 } 238