1 /* 2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/irq.h> 34 #include <net/xdp_sock_drv.h> 35 #include "en.h" 36 #include "en/txrx.h" 37 #include "en/xdp.h" 38 #include "en/xsk/rx.h" 39 #include "en/xsk/tx.h" 40 #include "en_accel/ktls_txrx.h" 41 42 static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c) 43 { 44 int current_cpu = smp_processor_id(); 45 46 return cpumask_test_cpu(current_cpu, c->aff_mask); 47 } 48 49 static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq) 50 { 51 struct mlx5e_sq_stats *stats = sq->stats; 52 struct dim_sample dim_sample = {}; 53 54 if (unlikely(!test_bit(MLX5E_SQ_STATE_DIM, &sq->state))) 55 return; 56 57 dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample); 58 net_dim(&sq->dim, dim_sample); 59 } 60 61 static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq) 62 { 63 struct mlx5e_rq_stats *stats = rq->stats; 64 struct dim_sample dim_sample = {}; 65 66 if (unlikely(!test_bit(MLX5E_RQ_STATE_DIM, &rq->state))) 67 return; 68 69 dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample); 70 net_dim(&rq->dim, dim_sample); 71 } 72 73 void mlx5e_trigger_irq(struct mlx5e_icosq *sq) 74 { 75 struct mlx5_wq_cyc *wq = &sq->wq; 76 struct mlx5e_tx_wqe *nopwqe; 77 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 78 79 sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { 80 .wqe_type = MLX5E_ICOSQ_WQE_NOP, 81 .num_wqebbs = 1, 82 }; 83 84 nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); 85 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl); 86 } 87 88 static bool mlx5e_napi_xsk_post(struct mlx5e_xdpsq *xsksq, struct mlx5e_rq *xskrq) 89 { 90 bool need_wakeup = xsk_uses_need_wakeup(xskrq->xsk_pool); 91 bool busy_xsk = false, xsk_rx_alloc_err; 92 93 /* If SQ is empty, there are no TX completions to trigger NAPI, so set 94 * need_wakeup. Do it before queuing packets for TX to avoid race 95 * condition with userspace. 96 */ 97 if (need_wakeup && xsksq->pc == xsksq->cc) 98 xsk_set_tx_need_wakeup(xsksq->xsk_pool); 99 busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET); 100 /* If we queued some packets for TX, no need for wakeup anymore. */ 101 if (need_wakeup && xsksq->pc != xsksq->cc) 102 xsk_clear_tx_need_wakeup(xsksq->xsk_pool); 103 104 /* If WQ is empty, RX won't trigger NAPI, so set need_wakeup. Do it 105 * before refilling to avoid race condition with userspace. 106 */ 107 if (need_wakeup && !mlx5e_rqwq_get_cur_sz(xskrq)) 108 xsk_set_rx_need_wakeup(xskrq->xsk_pool); 109 xsk_rx_alloc_err = INDIRECT_CALL_2(xskrq->post_wqes, 110 mlx5e_post_rx_mpwqes, 111 mlx5e_post_rx_wqes, 112 xskrq); 113 /* Ask for wakeup if WQ is not full after refill. */ 114 if (!need_wakeup) 115 busy_xsk |= xsk_rx_alloc_err; 116 else if (xsk_rx_alloc_err) 117 xsk_set_rx_need_wakeup(xskrq->xsk_pool); 118 else 119 xsk_clear_rx_need_wakeup(xskrq->xsk_pool); 120 121 return busy_xsk; 122 } 123 124 int mlx5e_napi_poll(struct napi_struct *napi, int budget) 125 { 126 struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, 127 napi); 128 struct mlx5e_ch_stats *ch_stats = c->stats; 129 struct mlx5e_xdpsq *xsksq = &c->xsksq; 130 struct mlx5e_txqsq __rcu **qos_sqs; 131 struct mlx5e_rq *xskrq = &c->xskrq; 132 struct mlx5e_rq *rq = &c->rq; 133 bool aff_change = false; 134 bool busy_xsk = false; 135 bool busy = false; 136 int work_done = 0; 137 u16 qos_sqs_size; 138 bool xsk_open; 139 int i; 140 141 rcu_read_lock(); 142 143 qos_sqs = rcu_dereference(c->qos_sqs); 144 145 xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state); 146 147 ch_stats->poll++; 148 149 for (i = 0; i < c->num_tc; i++) 150 busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget); 151 152 if (unlikely(qos_sqs)) { 153 smp_rmb(); /* Pairs with mlx5e_qos_alloc_queues. */ 154 qos_sqs_size = READ_ONCE(c->qos_sqs_size); 155 156 for (i = 0; i < qos_sqs_size; i++) { 157 struct mlx5e_txqsq *sq = rcu_dereference(qos_sqs[i]); 158 159 if (sq) 160 busy |= mlx5e_poll_tx_cq(&sq->cq, budget); 161 } 162 } 163 164 busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq); 165 166 if (c->xdp) 167 busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq); 168 169 if (likely(budget)) { /* budget=0 means: don't poll rx rings */ 170 if (xsk_open) 171 work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget); 172 173 if (likely(budget - work_done)) 174 work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done); 175 176 busy |= work_done == budget; 177 } 178 179 mlx5e_poll_ico_cq(&c->icosq.cq); 180 if (mlx5e_poll_ico_cq(&c->async_icosq.cq)) 181 /* Don't clear the flag if nothing was polled to prevent 182 * queueing more WQEs and overflowing the async ICOSQ. 183 */ 184 clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state); 185 186 /* Keep after async ICOSQ CQ poll */ 187 if (unlikely(mlx5e_ktls_rx_pending_resync_list(c, budget))) 188 busy |= mlx5e_ktls_rx_handle_resync_list(c, budget); 189 190 busy |= INDIRECT_CALL_2(rq->post_wqes, 191 mlx5e_post_rx_mpwqes, 192 mlx5e_post_rx_wqes, 193 rq); 194 if (xsk_open) { 195 busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq); 196 busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq); 197 } 198 199 busy |= busy_xsk; 200 201 if (busy) { 202 if (likely(mlx5e_channel_no_affinity_change(c))) { 203 work_done = budget; 204 goto out; 205 } 206 ch_stats->aff_change++; 207 aff_change = true; 208 if (budget && work_done == budget) 209 work_done--; 210 } 211 212 if (unlikely(!napi_complete_done(napi, work_done))) 213 goto out; 214 215 ch_stats->arm++; 216 217 for (i = 0; i < c->num_tc; i++) { 218 mlx5e_handle_tx_dim(&c->sq[i]); 219 mlx5e_cq_arm(&c->sq[i].cq); 220 } 221 if (unlikely(qos_sqs)) { 222 for (i = 0; i < qos_sqs_size; i++) { 223 struct mlx5e_txqsq *sq = rcu_dereference(qos_sqs[i]); 224 225 if (sq) { 226 mlx5e_handle_tx_dim(sq); 227 mlx5e_cq_arm(&sq->cq); 228 } 229 } 230 } 231 232 mlx5e_handle_rx_dim(rq); 233 234 mlx5e_cq_arm(&rq->cq); 235 mlx5e_cq_arm(&c->icosq.cq); 236 mlx5e_cq_arm(&c->async_icosq.cq); 237 mlx5e_cq_arm(&c->xdpsq.cq); 238 239 if (xsk_open) { 240 mlx5e_handle_rx_dim(xskrq); 241 mlx5e_cq_arm(&xsksq->cq); 242 mlx5e_cq_arm(&xskrq->cq); 243 } 244 245 if (unlikely(aff_change && busy_xsk)) { 246 mlx5e_trigger_irq(&c->icosq); 247 ch_stats->force_irq++; 248 } 249 250 out: 251 rcu_read_unlock(); 252 253 return work_done; 254 } 255 256 void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe) 257 { 258 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); 259 260 napi_schedule(cq->napi); 261 cq->event_ctr++; 262 cq->ch_stats->events++; 263 } 264 265 void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event) 266 { 267 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); 268 struct net_device *netdev = cq->netdev; 269 270 netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n", 271 __func__, mcq->cqn, event); 272 } 273