1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 4 #include "setup.h" 5 #include "en/params.h" 6 7 bool mlx5e_validate_xsk_param(struct mlx5e_params *params, 8 struct mlx5e_xsk_param *xsk, 9 struct mlx5_core_dev *mdev) 10 { 11 /* AF_XDP doesn't support frames larger than PAGE_SIZE, and the current 12 * mlx5e XDP implementation doesn't support multiple packets per page. 13 */ 14 if (xsk->chunk_size != PAGE_SIZE) 15 return false; 16 17 /* Current MTU and XSK headroom don't allow packets to fit the frames. */ 18 if (mlx5e_rx_get_linear_frag_sz(params, xsk) > xsk->chunk_size) 19 return false; 20 21 /* frag_sz is different for regular and XSK RQs, so ensure that linear 22 * SKB mode is possible. 23 */ 24 switch (params->rq_wq_type) { 25 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 26 return mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk); 27 default: /* MLX5_WQ_TYPE_CYCLIC */ 28 return mlx5e_rx_is_linear_skb(params, xsk); 29 } 30 } 31 32 static void mlx5e_build_xskicosq_param(struct mlx5e_priv *priv, 33 u8 log_wq_size, 34 struct mlx5e_sq_param *param) 35 { 36 void *sqc = param->sqc; 37 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 38 39 mlx5e_build_sq_param_common(priv, param); 40 41 MLX5_SET(wq, wq, log_wq_sz, log_wq_size); 42 } 43 44 static void mlx5e_build_xsk_cparam(struct mlx5e_priv *priv, 45 struct mlx5e_params *params, 46 struct mlx5e_xsk_param *xsk, 47 struct mlx5e_channel_param *cparam) 48 { 49 const u8 xskicosq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; 50 51 mlx5e_build_rq_param(priv, params, xsk, &cparam->rq); 52 mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq); 53 mlx5e_build_xskicosq_param(priv, xskicosq_size, &cparam->icosq); 54 mlx5e_build_rx_cq_param(priv, params, xsk, &cparam->rx_cq); 55 mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq); 56 mlx5e_build_ico_cq_param(priv, xskicosq_size, &cparam->icosq_cq); 57 } 58 59 int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, 60 struct mlx5e_xsk_param *xsk, struct xdp_umem *umem, 61 struct mlx5e_channel *c) 62 { 63 struct mlx5e_channel_param cparam = {}; 64 struct dim_cq_moder icocq_moder = {}; 65 int err; 66 67 if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev)) 68 return -EINVAL; 69 70 mlx5e_build_xsk_cparam(priv, params, xsk, &cparam); 71 72 err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam.rx_cq, &c->xskrq.cq); 73 if (unlikely(err)) 74 return err; 75 76 err = mlx5e_open_rq(c, params, &cparam.rq, xsk, umem, &c->xskrq); 77 if (unlikely(err)) 78 goto err_close_rx_cq; 79 80 err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam.tx_cq, &c->xsksq.cq); 81 if (unlikely(err)) 82 goto err_close_rq; 83 84 /* Create a separate SQ, so that when the UMEM is disabled, we could 85 * close this SQ safely and stop receiving CQEs. In other case, e.g., if 86 * the XDPSQ was used instead, we might run into trouble when the UMEM 87 * is disabled and then reenabled, but the SQ continues receiving CQEs 88 * from the old UMEM. 89 */ 90 err = mlx5e_open_xdpsq(c, params, &cparam.xdp_sq, umem, &c->xsksq, true); 91 if (unlikely(err)) 92 goto err_close_tx_cq; 93 94 err = mlx5e_open_cq(c, icocq_moder, &cparam.icosq_cq, &c->xskicosq.cq); 95 if (unlikely(err)) 96 goto err_close_sq; 97 98 /* Create a dedicated SQ for posting NOPs whenever we need an IRQ to be 99 * triggered and NAPI to be called on the correct CPU. 100 */ 101 err = mlx5e_open_icosq(c, params, &cparam.icosq, &c->xskicosq); 102 if (unlikely(err)) 103 goto err_close_icocq; 104 105 spin_lock_init(&c->xskicosq_lock); 106 107 set_bit(MLX5E_CHANNEL_STATE_XSK, c->state); 108 109 return 0; 110 111 err_close_icocq: 112 mlx5e_close_cq(&c->xskicosq.cq); 113 114 err_close_sq: 115 mlx5e_close_xdpsq(&c->xsksq); 116 117 err_close_tx_cq: 118 mlx5e_close_cq(&c->xsksq.cq); 119 120 err_close_rq: 121 mlx5e_close_rq(&c->xskrq); 122 123 err_close_rx_cq: 124 mlx5e_close_cq(&c->xskrq.cq); 125 126 return err; 127 } 128 129 void mlx5e_close_xsk(struct mlx5e_channel *c) 130 { 131 clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state); 132 napi_synchronize(&c->napi); 133 134 mlx5e_close_rq(&c->xskrq); 135 mlx5e_close_cq(&c->xskrq.cq); 136 mlx5e_close_icosq(&c->xskicosq); 137 mlx5e_close_cq(&c->xskicosq.cq); 138 mlx5e_close_xdpsq(&c->xsksq); 139 mlx5e_close_cq(&c->xsksq.cq); 140 } 141 142 void mlx5e_activate_xsk(struct mlx5e_channel *c) 143 { 144 set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); 145 /* TX queue is created active. */ 146 147 spin_lock(&c->xskicosq_lock); 148 mlx5e_trigger_irq(&c->xskicosq); 149 spin_unlock(&c->xskicosq_lock); 150 } 151 152 void mlx5e_deactivate_xsk(struct mlx5e_channel *c) 153 { 154 mlx5e_deactivate_rq(&c->xskrq); 155 /* TX queue is disabled on close. */ 156 } 157 158 static int mlx5e_redirect_xsk_rqt(struct mlx5e_priv *priv, u16 ix, u32 rqn) 159 { 160 struct mlx5e_redirect_rqt_param direct_rrp = { 161 .is_rss = false, 162 { 163 .rqn = rqn, 164 }, 165 }; 166 167 u32 rqtn = priv->xsk_tir[ix].rqt.rqtn; 168 169 return mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp); 170 } 171 172 int mlx5e_xsk_redirect_rqt_to_channel(struct mlx5e_priv *priv, struct mlx5e_channel *c) 173 { 174 return mlx5e_redirect_xsk_rqt(priv, c->ix, c->xskrq.rqn); 175 } 176 177 int mlx5e_xsk_redirect_rqt_to_drop(struct mlx5e_priv *priv, u16 ix) 178 { 179 return mlx5e_redirect_xsk_rqt(priv, ix, priv->drop_rq.rqn); 180 } 181 182 int mlx5e_xsk_redirect_rqts_to_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs) 183 { 184 int err, i; 185 186 if (!priv->xsk.refcnt) 187 return 0; 188 189 for (i = 0; i < chs->num; i++) { 190 struct mlx5e_channel *c = chs->c[i]; 191 192 if (!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) 193 continue; 194 195 err = mlx5e_xsk_redirect_rqt_to_channel(priv, c); 196 if (unlikely(err)) 197 goto err_stop; 198 } 199 200 return 0; 201 202 err_stop: 203 for (i--; i >= 0; i--) { 204 if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state)) 205 continue; 206 207 mlx5e_xsk_redirect_rqt_to_drop(priv, i); 208 } 209 210 return err; 211 } 212 213 void mlx5e_xsk_redirect_rqts_to_drop(struct mlx5e_priv *priv, struct mlx5e_channels *chs) 214 { 215 int i; 216 217 if (!priv->xsk.refcnt) 218 return; 219 220 for (i = 0; i < chs->num; i++) { 221 if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state)) 222 continue; 223 224 mlx5e_xsk_redirect_rqt_to_drop(priv, i); 225 } 226 } 227