1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 4 #include "setup.h" 5 #include "en/params.h" 6 #include "en/txrx.h" 7 8 /* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may 9 * change unexpectedly, and mlx5e has a minimum valid stride size for striding 10 * RQ, keep this check in the driver. 11 */ 12 #define MLX5E_MIN_XSK_CHUNK_SIZE 2048 13 14 bool mlx5e_validate_xsk_param(struct mlx5e_params *params, 15 struct mlx5e_xsk_param *xsk, 16 struct mlx5_core_dev *mdev) 17 { 18 /* AF_XDP doesn't support frames larger than PAGE_SIZE. */ 19 if (xsk->chunk_size > PAGE_SIZE || 20 xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) 21 return false; 22 23 /* Current MTU and XSK headroom don't allow packets to fit the frames. */ 24 if (mlx5e_rx_get_min_frag_sz(params, xsk) > xsk->chunk_size) 25 return false; 26 27 /* frag_sz is different for regular and XSK RQs, so ensure that linear 28 * SKB mode is possible. 29 */ 30 switch (params->rq_wq_type) { 31 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 32 return mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk); 33 default: /* MLX5_WQ_TYPE_CYCLIC */ 34 return mlx5e_rx_is_linear_skb(params, xsk); 35 } 36 } 37 38 static void mlx5e_build_xsk_cparam(struct mlx5e_priv *priv, 39 struct mlx5e_params *params, 40 struct mlx5e_xsk_param *xsk, 41 struct mlx5e_channel_param *cparam) 42 { 43 mlx5e_build_rq_param(priv, params, xsk, &cparam->rq); 44 mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq); 45 } 46 47 int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, 48 struct mlx5e_xsk_param *xsk, struct xsk_buff_pool *pool, 49 struct mlx5e_channel *c) 50 { 51 struct mlx5e_channel_param *cparam; 52 struct mlx5e_create_cq_param ccp; 53 int err; 54 55 mlx5e_build_create_cq_param(&ccp, c); 56 57 if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev)) 58 return -EINVAL; 59 60 cparam = kvzalloc(sizeof(*cparam), GFP_KERNEL); 61 if (!cparam) 62 return -ENOMEM; 63 64 mlx5e_build_xsk_cparam(priv, params, xsk, cparam); 65 66 err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, 67 &c->xskrq.cq); 68 if (unlikely(err)) 69 goto err_free_cparam; 70 71 err = mlx5e_open_rq(c, params, &cparam->rq, xsk, pool, &c->xskrq); 72 if (unlikely(err)) 73 goto err_close_rx_cq; 74 75 err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp, 76 &c->xsksq.cq); 77 if (unlikely(err)) 78 goto err_close_rq; 79 80 /* Create a separate SQ, so that when the buff pool is disabled, we could 81 * close this SQ safely and stop receiving CQEs. In other case, e.g., if 82 * the XDPSQ was used instead, we might run into trouble when the buff pool 83 * is disabled and then reenabled, but the SQ continues receiving CQEs 84 * from the old buff pool. 85 */ 86 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, pool, &c->xsksq, true); 87 if (unlikely(err)) 88 goto err_close_tx_cq; 89 90 kvfree(cparam); 91 92 set_bit(MLX5E_CHANNEL_STATE_XSK, c->state); 93 94 return 0; 95 96 err_close_tx_cq: 97 mlx5e_close_cq(&c->xsksq.cq); 98 99 err_close_rq: 100 mlx5e_close_rq(&c->xskrq); 101 102 err_close_rx_cq: 103 mlx5e_close_cq(&c->xskrq.cq); 104 105 err_free_cparam: 106 kvfree(cparam); 107 108 return err; 109 } 110 111 void mlx5e_close_xsk(struct mlx5e_channel *c) 112 { 113 clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state); 114 synchronize_rcu(); /* Sync with the XSK wakeup and with NAPI. */ 115 116 mlx5e_close_rq(&c->xskrq); 117 mlx5e_close_cq(&c->xskrq.cq); 118 mlx5e_close_xdpsq(&c->xsksq); 119 mlx5e_close_cq(&c->xsksq.cq); 120 121 memset(&c->xskrq, 0, sizeof(c->xskrq)); 122 memset(&c->xsksq, 0, sizeof(c->xsksq)); 123 } 124 125 void mlx5e_activate_xsk(struct mlx5e_channel *c) 126 { 127 set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); 128 /* TX queue is created active. */ 129 130 spin_lock_bh(&c->async_icosq_lock); 131 mlx5e_trigger_irq(&c->async_icosq); 132 spin_unlock_bh(&c->async_icosq_lock); 133 } 134 135 void mlx5e_deactivate_xsk(struct mlx5e_channel *c) 136 { 137 mlx5e_deactivate_rq(&c->xskrq); 138 /* TX queue is disabled on close. */ 139 } 140 141 static int mlx5e_redirect_xsk_rqt(struct mlx5e_priv *priv, u16 ix, u32 rqn) 142 { 143 struct mlx5e_redirect_rqt_param direct_rrp = { 144 .is_rss = false, 145 { 146 .rqn = rqn, 147 }, 148 }; 149 150 u32 rqtn = priv->xsk_tir[ix].rqt.rqtn; 151 152 return mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp); 153 } 154 155 int mlx5e_xsk_redirect_rqt_to_channel(struct mlx5e_priv *priv, struct mlx5e_channel *c) 156 { 157 return mlx5e_redirect_xsk_rqt(priv, c->ix, c->xskrq.rqn); 158 } 159 160 int mlx5e_xsk_redirect_rqt_to_drop(struct mlx5e_priv *priv, u16 ix) 161 { 162 return mlx5e_redirect_xsk_rqt(priv, ix, priv->drop_rq.rqn); 163 } 164 165 int mlx5e_xsk_redirect_rqts_to_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs) 166 { 167 int err, i; 168 169 if (!priv->xsk.refcnt) 170 return 0; 171 172 for (i = 0; i < chs->num; i++) { 173 struct mlx5e_channel *c = chs->c[i]; 174 175 if (!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) 176 continue; 177 178 err = mlx5e_xsk_redirect_rqt_to_channel(priv, c); 179 if (unlikely(err)) 180 goto err_stop; 181 } 182 183 return 0; 184 185 err_stop: 186 for (i--; i >= 0; i--) { 187 if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state)) 188 continue; 189 190 mlx5e_xsk_redirect_rqt_to_drop(priv, i); 191 } 192 193 return err; 194 } 195 196 void mlx5e_xsk_redirect_rqts_to_drop(struct mlx5e_priv *priv, struct mlx5e_channels *chs) 197 { 198 int i; 199 200 if (!priv->xsk.refcnt) 201 return; 202 203 for (i = 0; i < chs->num; i++) { 204 if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state)) 205 continue; 206 207 mlx5e_xsk_redirect_rqt_to_drop(priv, i); 208 } 209 } 210