1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 4 #include "setup.h" 5 #include "en/params.h" 6 #include "en/txrx.h" 7 8 /* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may 9 * change unexpectedly, and mlx5e has a minimum valid stride size for striding 10 * RQ, keep this check in the driver. 11 */ 12 #define MLX5E_MIN_XSK_CHUNK_SIZE 2048 13 14 bool mlx5e_validate_xsk_param(struct mlx5e_params *params, 15 struct mlx5e_xsk_param *xsk, 16 struct mlx5_core_dev *mdev) 17 { 18 /* AF_XDP doesn't support frames larger than PAGE_SIZE. */ 19 if (xsk->chunk_size > PAGE_SIZE || 20 xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) 21 return false; 22 23 /* Current MTU and XSK headroom don't allow packets to fit the frames. */ 24 if (mlx5e_rx_get_min_frag_sz(params, xsk) > xsk->chunk_size) 25 return false; 26 27 /* frag_sz is different for regular and XSK RQs, so ensure that linear 28 * SKB mode is possible. 29 */ 30 switch (params->rq_wq_type) { 31 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 32 return mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk); 33 default: /* MLX5_WQ_TYPE_CYCLIC */ 34 return mlx5e_rx_is_linear_skb(params, xsk); 35 } 36 } 37 38 static void mlx5e_build_xsk_cparam(struct mlx5e_priv *priv, 39 struct mlx5e_params *params, 40 struct mlx5e_xsk_param *xsk, 41 struct mlx5e_channel_param *cparam) 42 { 43 mlx5e_build_rq_param(priv, params, xsk, &cparam->rq); 44 mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq); 45 } 46 47 int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, 48 struct mlx5e_xsk_param *xsk, struct xdp_umem *umem, 49 struct mlx5e_channel *c) 50 { 51 struct mlx5e_channel_param *cparam; 52 int err; 53 54 if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev)) 55 return -EINVAL; 56 57 cparam = kvzalloc(sizeof(*cparam), GFP_KERNEL); 58 if (!cparam) 59 return -ENOMEM; 60 61 mlx5e_build_xsk_cparam(priv, params, xsk, cparam); 62 63 err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rq.cqp, &c->xskrq.cq); 64 if (unlikely(err)) 65 goto err_free_cparam; 66 67 err = mlx5e_open_rq(c, params, &cparam->rq, xsk, umem, &c->xskrq); 68 if (unlikely(err)) 69 goto err_close_rx_cq; 70 71 err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &c->xsksq.cq); 72 if (unlikely(err)) 73 goto err_close_rq; 74 75 /* Create a separate SQ, so that when the UMEM is disabled, we could 76 * close this SQ safely and stop receiving CQEs. In other case, e.g., if 77 * the XDPSQ was used instead, we might run into trouble when the UMEM 78 * is disabled and then reenabled, but the SQ continues receiving CQEs 79 * from the old UMEM. 80 */ 81 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, umem, &c->xsksq, true); 82 if (unlikely(err)) 83 goto err_close_tx_cq; 84 85 kvfree(cparam); 86 87 set_bit(MLX5E_CHANNEL_STATE_XSK, c->state); 88 89 return 0; 90 91 err_close_tx_cq: 92 mlx5e_close_cq(&c->xsksq.cq); 93 94 err_close_rq: 95 mlx5e_close_rq(&c->xskrq); 96 97 err_close_rx_cq: 98 mlx5e_close_cq(&c->xskrq.cq); 99 100 err_free_cparam: 101 kvfree(cparam); 102 103 return err; 104 } 105 106 void mlx5e_close_xsk(struct mlx5e_channel *c) 107 { 108 clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state); 109 napi_synchronize(&c->napi); 110 synchronize_rcu(); /* Sync with the XSK wakeup. */ 111 112 mlx5e_close_rq(&c->xskrq); 113 mlx5e_close_cq(&c->xskrq.cq); 114 mlx5e_close_xdpsq(&c->xsksq); 115 mlx5e_close_cq(&c->xsksq.cq); 116 117 memset(&c->xskrq, 0, sizeof(c->xskrq)); 118 memset(&c->xsksq, 0, sizeof(c->xsksq)); 119 } 120 121 void mlx5e_activate_xsk(struct mlx5e_channel *c) 122 { 123 set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); 124 /* TX queue is created active. */ 125 126 spin_lock(&c->async_icosq_lock); 127 mlx5e_trigger_irq(&c->async_icosq); 128 spin_unlock(&c->async_icosq_lock); 129 } 130 131 void mlx5e_deactivate_xsk(struct mlx5e_channel *c) 132 { 133 mlx5e_deactivate_rq(&c->xskrq); 134 /* TX queue is disabled on close. */ 135 } 136 137 static int mlx5e_redirect_xsk_rqt(struct mlx5e_priv *priv, u16 ix, u32 rqn) 138 { 139 struct mlx5e_redirect_rqt_param direct_rrp = { 140 .is_rss = false, 141 { 142 .rqn = rqn, 143 }, 144 }; 145 146 u32 rqtn = priv->xsk_tir[ix].rqt.rqtn; 147 148 return mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp); 149 } 150 151 int mlx5e_xsk_redirect_rqt_to_channel(struct mlx5e_priv *priv, struct mlx5e_channel *c) 152 { 153 return mlx5e_redirect_xsk_rqt(priv, c->ix, c->xskrq.rqn); 154 } 155 156 int mlx5e_xsk_redirect_rqt_to_drop(struct mlx5e_priv *priv, u16 ix) 157 { 158 return mlx5e_redirect_xsk_rqt(priv, ix, priv->drop_rq.rqn); 159 } 160 161 int mlx5e_xsk_redirect_rqts_to_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs) 162 { 163 int err, i; 164 165 if (!priv->xsk.refcnt) 166 return 0; 167 168 for (i = 0; i < chs->num; i++) { 169 struct mlx5e_channel *c = chs->c[i]; 170 171 if (!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) 172 continue; 173 174 err = mlx5e_xsk_redirect_rqt_to_channel(priv, c); 175 if (unlikely(err)) 176 goto err_stop; 177 } 178 179 return 0; 180 181 err_stop: 182 for (i--; i >= 0; i--) { 183 if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state)) 184 continue; 185 186 mlx5e_xsk_redirect_rqt_to_drop(priv, i); 187 } 188 189 return err; 190 } 191 192 void mlx5e_xsk_redirect_rqts_to_drop(struct mlx5e_priv *priv, struct mlx5e_channels *chs) 193 { 194 int i; 195 196 if (!priv->xsk.refcnt) 197 return; 198 199 for (i = 0; i < chs->num; i++) { 200 if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state)) 201 continue; 202 203 mlx5e_xsk_redirect_rqt_to_drop(priv, i); 204 } 205 } 206