1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 4 #include "setup.h" 5 #include "en/params.h" 6 #include "en/txrx.h" 7 #include "en/health.h" 8 #include <net/xdp_sock_drv.h> 9 10 static int mlx5e_legacy_rq_validate_xsk(struct mlx5_core_dev *mdev, 11 struct mlx5e_params *params, 12 struct mlx5e_xsk_param *xsk) 13 { 14 if (!mlx5e_rx_is_linear_skb(mdev, params, xsk)) { 15 mlx5_core_err(mdev, "Legacy RQ linear mode for XSK can't be activated with current params\n"); 16 return -EINVAL; 17 } 18 19 return 0; 20 } 21 22 /* The limitation of 2048 can be altered, but shouldn't go beyond the minimal 23 * stride size of striding RQ. 24 */ 25 #define MLX5E_MIN_XSK_CHUNK_SIZE max(2048, XDP_UMEM_MIN_CHUNK_SIZE) 26 27 bool mlx5e_validate_xsk_param(struct mlx5e_params *params, 28 struct mlx5e_xsk_param *xsk, 29 struct mlx5_core_dev *mdev) 30 { 31 /* AF_XDP doesn't support frames larger than PAGE_SIZE. */ 32 if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) { 33 mlx5_core_err(mdev, "XSK chunk size %u out of bounds [%u, %lu]\n", xsk->chunk_size, 34 MLX5E_MIN_XSK_CHUNK_SIZE, PAGE_SIZE); 35 return false; 36 } 37 38 /* frag_sz is different for regular and XSK RQs, so ensure that linear 39 * SKB mode is possible. 40 */ 41 switch (params->rq_wq_type) { 42 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 43 return !mlx5e_mpwrq_validate_xsk(mdev, params, xsk); 44 default: /* MLX5_WQ_TYPE_CYCLIC */ 45 return !mlx5e_legacy_rq_validate_xsk(mdev, params, xsk); 46 } 47 } 48 49 static void mlx5e_build_xsk_cparam(struct mlx5_core_dev *mdev, 50 struct mlx5e_params *params, 51 struct mlx5e_xsk_param *xsk, 52 u16 q_counter, 53 struct mlx5e_channel_param *cparam) 54 { 55 mlx5e_build_rq_param(mdev, params, xsk, q_counter, &cparam->rq); 56 mlx5e_build_xdpsq_param(mdev, params, xsk, &cparam->xdp_sq); 57 } 58 59 static int mlx5e_init_xsk_rq(struct mlx5e_channel *c, 60 struct mlx5e_params *params, 61 struct xsk_buff_pool *pool, 62 struct mlx5e_xsk_param *xsk, 63 struct mlx5e_rq *rq) 64 { 65 struct mlx5_core_dev *mdev = c->mdev; 66 int rq_xdp_ix; 67 int err; 68 69 rq->wq_type = params->rq_wq_type; 70 rq->pdev = c->pdev; 71 rq->netdev = c->netdev; 72 rq->priv = c->priv; 73 rq->tstamp = c->tstamp; 74 rq->clock = &mdev->clock; 75 rq->icosq = &c->icosq; 76 rq->ix = c->ix; 77 rq->channel = c; 78 rq->mdev = mdev; 79 rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); 80 rq->xdpsq = &c->rq_xdpsq; 81 rq->xsk_pool = pool; 82 rq->stats = &c->priv->channel_stats[c->ix]->xskrq; 83 rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev); 84 rq_xdp_ix = c->ix; 85 err = mlx5e_rq_set_handlers(rq, params, xsk); 86 if (err) 87 return err; 88 89 return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, 0); 90 } 91 92 static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *params, 93 struct mlx5e_rq_param *rq_params, struct xsk_buff_pool *pool, 94 struct mlx5e_xsk_param *xsk) 95 { 96 int err; 97 98 err = mlx5e_init_xsk_rq(c, params, pool, xsk, &c->xskrq); 99 if (err) 100 return err; 101 102 return mlx5e_open_rq(params, rq_params, xsk, cpu_to_node(c->cpu), &c->xskrq); 103 } 104 105 int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, 106 struct mlx5e_xsk_param *xsk, struct xsk_buff_pool *pool, 107 struct mlx5e_channel *c) 108 { 109 struct mlx5e_channel_param *cparam; 110 struct mlx5e_create_cq_param ccp; 111 int err; 112 113 mlx5e_build_create_cq_param(&ccp, c); 114 115 if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev)) 116 return -EINVAL; 117 118 cparam = kvzalloc(sizeof(*cparam), GFP_KERNEL); 119 if (!cparam) 120 return -ENOMEM; 121 122 mlx5e_build_xsk_cparam(priv->mdev, params, xsk, priv->q_counter, cparam); 123 124 err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, 125 &c->xskrq.cq); 126 if (unlikely(err)) 127 goto err_free_cparam; 128 129 err = mlx5e_open_xsk_rq(c, params, &cparam->rq, pool, xsk); 130 if (unlikely(err)) 131 goto err_close_rx_cq; 132 133 err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp, 134 &c->xsksq.cq); 135 if (unlikely(err)) 136 goto err_close_rq; 137 138 /* Create a separate SQ, so that when the buff pool is disabled, we could 139 * close this SQ safely and stop receiving CQEs. In other case, e.g., if 140 * the XDPSQ was used instead, we might run into trouble when the buff pool 141 * is disabled and then re-enabled, but the SQ continues receiving CQEs 142 * from the old buff pool. 143 */ 144 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, pool, &c->xsksq, true); 145 if (unlikely(err)) 146 goto err_close_tx_cq; 147 148 kvfree(cparam); 149 150 set_bit(MLX5E_CHANNEL_STATE_XSK, c->state); 151 152 return 0; 153 154 err_close_tx_cq: 155 mlx5e_close_cq(&c->xsksq.cq); 156 157 err_close_rq: 158 mlx5e_close_rq(&c->xskrq); 159 160 err_close_rx_cq: 161 mlx5e_close_cq(&c->xskrq.cq); 162 163 err_free_cparam: 164 kvfree(cparam); 165 166 return err; 167 } 168 169 void mlx5e_close_xsk(struct mlx5e_channel *c) 170 { 171 clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state); 172 synchronize_net(); /* Sync with NAPI. */ 173 174 mlx5e_close_rq(&c->xskrq); 175 mlx5e_close_cq(&c->xskrq.cq); 176 mlx5e_close_xdpsq(&c->xsksq); 177 mlx5e_close_cq(&c->xsksq.cq); 178 179 memset(&c->xskrq, 0, sizeof(c->xskrq)); 180 memset(&c->xsksq, 0, sizeof(c->xsksq)); 181 } 182 183 void mlx5e_activate_xsk(struct mlx5e_channel *c) 184 { 185 /* ICOSQ recovery deactivates RQs. Suspend the recovery to avoid 186 * activating XSKRQ in the middle of recovery. 187 */ 188 mlx5e_reporter_icosq_suspend_recovery(c); 189 set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); 190 mlx5e_reporter_icosq_resume_recovery(c); 191 192 /* TX queue is created active. */ 193 } 194 195 void mlx5e_deactivate_xsk(struct mlx5e_channel *c) 196 { 197 /* ICOSQ recovery may reactivate XSKRQ if clear_bit is called in the 198 * middle of recovery. Suspend the recovery to avoid it. 199 */ 200 mlx5e_reporter_icosq_suspend_recovery(c); 201 clear_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); 202 mlx5e_reporter_icosq_resume_recovery(c); 203 synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */ 204 205 /* TX queue is disabled on close. */ 206 } 207