1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 4 #include "setup.h" 5 #include "en/params.h" 6 #include "en/txrx.h" 7 #include "en/health.h" 8 9 /* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may 10 * change unexpectedly, and mlx5e has a minimum valid stride size for striding 11 * RQ, keep this check in the driver. 12 */ 13 #define MLX5E_MIN_XSK_CHUNK_SIZE 2048 14 15 bool mlx5e_validate_xsk_param(struct mlx5e_params *params, 16 struct mlx5e_xsk_param *xsk, 17 struct mlx5_core_dev *mdev) 18 { 19 /* AF_XDP doesn't support frames larger than PAGE_SIZE. */ 20 if (xsk->chunk_size > PAGE_SIZE || 21 xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) 22 return false; 23 24 /* frag_sz is different for regular and XSK RQs, so ensure that linear 25 * SKB mode is possible. 26 */ 27 switch (params->rq_wq_type) { 28 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 29 return !mlx5e_mpwrq_validate_xsk(mdev, params, xsk); 30 default: /* MLX5_WQ_TYPE_CYCLIC */ 31 return mlx5e_rx_is_linear_skb(params, xsk); 32 } 33 } 34 35 static void mlx5e_build_xsk_cparam(struct mlx5_core_dev *mdev, 36 struct mlx5e_params *params, 37 struct mlx5e_xsk_param *xsk, 38 u16 q_counter, 39 struct mlx5e_channel_param *cparam) 40 { 41 mlx5e_build_rq_param(mdev, params, xsk, q_counter, &cparam->rq); 42 mlx5e_build_xdpsq_param(mdev, params, xsk, &cparam->xdp_sq); 43 } 44 45 static int mlx5e_init_xsk_rq(struct mlx5e_channel *c, 46 struct mlx5e_params *params, 47 struct xsk_buff_pool *pool, 48 struct mlx5e_xsk_param *xsk, 49 struct mlx5e_rq *rq) 50 { 51 struct mlx5_core_dev *mdev = c->mdev; 52 int rq_xdp_ix; 53 int err; 54 55 rq->wq_type = params->rq_wq_type; 56 rq->pdev = c->pdev; 57 rq->netdev = c->netdev; 58 rq->priv = c->priv; 59 rq->tstamp = c->tstamp; 60 rq->clock = &mdev->clock; 61 rq->icosq = &c->icosq; 62 rq->ix = c->ix; 63 rq->channel = c; 64 rq->mdev = mdev; 65 rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); 66 rq->xdpsq = &c->rq_xdpsq; 67 rq->xsk_pool = pool; 68 rq->stats = &c->priv->channel_stats[c->ix]->xskrq; 69 rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev); 70 rq_xdp_ix = c->ix + params->num_channels * MLX5E_RQ_GROUP_XSK; 71 err = mlx5e_rq_set_handlers(rq, params, xsk); 72 if (err) 73 return err; 74 75 return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, 0); 76 } 77 78 static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *params, 79 struct mlx5e_rq_param *rq_params, struct xsk_buff_pool *pool, 80 struct mlx5e_xsk_param *xsk) 81 { 82 int err; 83 84 err = mlx5e_init_xsk_rq(c, params, pool, xsk, &c->xskrq); 85 if (err) 86 return err; 87 88 return mlx5e_open_rq(params, rq_params, xsk, cpu_to_node(c->cpu), &c->xskrq); 89 } 90 91 int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, 92 struct mlx5e_xsk_param *xsk, struct xsk_buff_pool *pool, 93 struct mlx5e_channel *c) 94 { 95 struct mlx5e_channel_param *cparam; 96 struct mlx5e_create_cq_param ccp; 97 int err; 98 99 mlx5e_build_create_cq_param(&ccp, c); 100 101 if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev)) 102 return -EINVAL; 103 104 cparam = kvzalloc(sizeof(*cparam), GFP_KERNEL); 105 if (!cparam) 106 return -ENOMEM; 107 108 mlx5e_build_xsk_cparam(priv->mdev, params, xsk, priv->q_counter, cparam); 109 110 err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, 111 &c->xskrq.cq); 112 if (unlikely(err)) 113 goto err_free_cparam; 114 115 err = mlx5e_open_xsk_rq(c, params, &cparam->rq, pool, xsk); 116 if (unlikely(err)) 117 goto err_close_rx_cq; 118 119 err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp, 120 &c->xsksq.cq); 121 if (unlikely(err)) 122 goto err_close_rq; 123 124 /* Create a separate SQ, so that when the buff pool is disabled, we could 125 * close this SQ safely and stop receiving CQEs. In other case, e.g., if 126 * the XDPSQ was used instead, we might run into trouble when the buff pool 127 * is disabled and then re-enabled, but the SQ continues receiving CQEs 128 * from the old buff pool. 129 */ 130 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, pool, &c->xsksq, true); 131 if (unlikely(err)) 132 goto err_close_tx_cq; 133 134 kvfree(cparam); 135 136 set_bit(MLX5E_CHANNEL_STATE_XSK, c->state); 137 138 return 0; 139 140 err_close_tx_cq: 141 mlx5e_close_cq(&c->xsksq.cq); 142 143 err_close_rq: 144 mlx5e_close_rq(&c->xskrq); 145 146 err_close_rx_cq: 147 mlx5e_close_cq(&c->xskrq.cq); 148 149 err_free_cparam: 150 kvfree(cparam); 151 152 return err; 153 } 154 155 void mlx5e_close_xsk(struct mlx5e_channel *c) 156 { 157 clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state); 158 synchronize_net(); /* Sync with the XSK wakeup and with NAPI. */ 159 160 mlx5e_close_rq(&c->xskrq); 161 mlx5e_close_cq(&c->xskrq.cq); 162 mlx5e_close_xdpsq(&c->xsksq); 163 mlx5e_close_cq(&c->xsksq.cq); 164 165 memset(&c->xskrq, 0, sizeof(c->xskrq)); 166 memset(&c->xsksq, 0, sizeof(c->xsksq)); 167 } 168 169 void mlx5e_activate_xsk(struct mlx5e_channel *c) 170 { 171 /* ICOSQ recovery deactivates RQs. Suspend the recovery to avoid 172 * activating XSKRQ in the middle of recovery. 173 */ 174 mlx5e_reporter_icosq_suspend_recovery(c); 175 set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); 176 mlx5e_reporter_icosq_resume_recovery(c); 177 178 /* TX queue is created active. */ 179 } 180 181 void mlx5e_deactivate_xsk(struct mlx5e_channel *c) 182 { 183 /* ICOSQ recovery may reactivate XSKRQ if clear_bit is called in the 184 * middle of recovery. Suspend the recovery to avoid it. 185 */ 186 mlx5e_reporter_icosq_suspend_recovery(c); 187 clear_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); 188 mlx5e_reporter_icosq_resume_recovery(c); 189 synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */ 190 191 /* TX queue is disabled on close. */ 192 } 193