1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include "health.h"
5 #include "en/ptp.h"
6 #include "en/devlink.h"
7 #include "lib/tout.h"
8
9 /* Keep this string array consistent with the MLX5E_SQ_STATE_* enums in en.h */
10 static const char * const sq_sw_state_type_name[] = {
11 [MLX5E_SQ_STATE_ENABLED] = "enabled",
12 [MLX5E_SQ_STATE_MPWQE] = "mpwqe",
13 [MLX5E_SQ_STATE_RECOVERING] = "recovering",
14 [MLX5E_SQ_STATE_IPSEC] = "ipsec",
15 [MLX5E_SQ_STATE_DIM] = "dim",
16 [MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE] = "vlan_need_l2_inline",
17 [MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx",
18 [MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync",
19 [MLX5E_SQ_STATE_XDP_MULTIBUF] = "xdp_multibuf",
20 };
21
mlx5e_wait_for_sq_flush(struct mlx5e_txqsq * sq)22 static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
23 {
24 struct mlx5_core_dev *dev = sq->mdev;
25 unsigned long exp_time;
26
27 exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FLUSH_ON_ERROR));
28
29 while (time_before(jiffies, exp_time)) {
30 if (sq->cc == sq->pc)
31 return 0;
32
33 msleep(20);
34 }
35
36 netdev_err(sq->netdev,
37 "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n",
38 sq->sqn, sq->cc, sq->pc);
39
40 return -ETIMEDOUT;
41 }
42
mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq * sq)43 static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
44 {
45 WARN_ONCE(sq->cc != sq->pc,
46 "SQ 0x%x: cc (0x%x) != pc (0x%x)\n",
47 sq->sqn, sq->cc, sq->pc);
48 sq->cc = 0;
49 sq->dma_fifo_cc = 0;
50 sq->pc = 0;
51 }
52
mlx5e_health_sq_put_sw_state(struct devlink_fmsg * fmsg,struct mlx5e_txqsq * sq)53 static int mlx5e_health_sq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *sq)
54 {
55 int err;
56 int i;
57
58 BUILD_BUG_ON_MSG(ARRAY_SIZE(sq_sw_state_type_name) != MLX5E_NUM_SQ_STATES,
59 "sq_sw_state_type_name string array must be consistent with MLX5E_SQ_STATE_* enum in en.h");
60 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State");
61 if (err)
62 return err;
63
64 for (i = 0; i < ARRAY_SIZE(sq_sw_state_type_name); ++i) {
65 err = devlink_fmsg_u32_pair_put(fmsg, sq_sw_state_type_name[i],
66 test_bit(i, &sq->state));
67 if (err)
68 return err;
69 }
70
71 return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
72 }
73
mlx5e_tx_reporter_err_cqe_recover(void * ctx)74 static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
75 {
76 struct mlx5_core_dev *mdev;
77 struct net_device *dev;
78 struct mlx5e_txqsq *sq;
79 u8 state;
80 int err;
81
82 sq = ctx;
83 mdev = sq->mdev;
84 dev = sq->netdev;
85
86 if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
87 return 0;
88
89 err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
90 if (err) {
91 netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
92 sq->sqn, err);
93 goto out;
94 }
95
96 if (state != MLX5_SQC_STATE_ERR)
97 goto out;
98
99 mlx5e_tx_disable_queue(sq->txq);
100
101 err = mlx5e_wait_for_sq_flush(sq);
102 if (err)
103 goto out;
104
105 /* At this point, no new packets will arrive from the stack as TXQ is
106 * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
107 * pending WQEs. SQ can safely reset the SQ.
108 */
109
110 err = mlx5e_health_sq_to_ready(mdev, dev, sq->sqn);
111 if (err)
112 goto out;
113
114 mlx5e_reset_txqsq_cc_pc(sq);
115 sq->stats->recover++;
116 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
117 mlx5e_activate_txqsq(sq);
118 if (sq->channel)
119 mlx5e_trigger_napi_icosq(sq->channel);
120 else
121 mlx5e_trigger_napi_sched(sq->cq.napi);
122
123 return 0;
124 out:
125 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
126 return err;
127 }
128
129 struct mlx5e_tx_timeout_ctx {
130 struct mlx5e_txqsq *sq;
131 signed int status;
132 };
133
mlx5e_tx_reporter_timeout_recover(void * ctx)134 static int mlx5e_tx_reporter_timeout_recover(void *ctx)
135 {
136 struct mlx5e_tx_timeout_ctx *to_ctx;
137 struct mlx5e_priv *priv;
138 struct mlx5_eq_comp *eq;
139 struct mlx5e_txqsq *sq;
140 int err;
141
142 to_ctx = ctx;
143 sq = to_ctx->sq;
144 eq = sq->cq.mcq.eq;
145 priv = sq->priv;
146 err = mlx5e_health_channel_eq_recover(sq->netdev, eq, sq->cq.ch_stats);
147 if (!err) {
148 to_ctx->status = 0; /* this sq recovered */
149 return err;
150 }
151
152 mutex_lock(&priv->state_lock);
153 err = mlx5e_safe_reopen_channels(priv);
154 mutex_unlock(&priv->state_lock);
155 if (!err) {
156 to_ctx->status = 1; /* all channels recovered */
157 return err;
158 }
159
160 to_ctx->status = err;
161 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
162 netdev_err(priv->netdev,
163 "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n",
164 err);
165
166 return err;
167 }
168
mlx5e_tx_reporter_ptpsq_unhealthy_recover(void * ctx)169 static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
170 {
171 struct mlx5e_ptpsq *ptpsq = ctx;
172 struct mlx5e_channels *chs;
173 struct net_device *netdev;
174 struct mlx5e_priv *priv;
175 int carrier_ok;
176 int err;
177
178 if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &ptpsq->txqsq.state))
179 return 0;
180
181 priv = ptpsq->txqsq.priv;
182
183 mutex_lock(&priv->state_lock);
184 chs = &priv->channels;
185 netdev = priv->netdev;
186
187 carrier_ok = netif_carrier_ok(netdev);
188 netif_carrier_off(netdev);
189
190 mlx5e_deactivate_priv_channels(priv);
191
192 mlx5e_ptp_close(chs->ptp);
193 err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
194
195 mlx5e_activate_priv_channels(priv);
196
197 /* return carrier back if needed */
198 if (carrier_ok)
199 netif_carrier_on(netdev);
200
201 mutex_unlock(&priv->state_lock);
202
203 return err;
204 }
205
206 /* state lock cannot be grabbed within this function.
207 * It can cause a dead lock or a read-after-free.
208 */
mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_err_ctx * err_ctx)209 static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx)
210 {
211 return err_ctx->recover(err_ctx->ctx);
212 }
213
mlx5e_tx_reporter_recover(struct devlink_health_reporter * reporter,void * context,struct netlink_ext_ack * extack)214 static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter,
215 void *context,
216 struct netlink_ext_ack *extack)
217 {
218 struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
219 struct mlx5e_err_ctx *err_ctx = context;
220
221 return err_ctx ? mlx5e_tx_reporter_recover_from_ctx(err_ctx) :
222 mlx5e_health_recover_channels(priv);
223 }
224
225 static int
mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg * fmsg,struct mlx5e_txqsq * sq,int tc)226 mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg,
227 struct mlx5e_txqsq *sq, int tc)
228 {
229 bool stopped = netif_xmit_stopped(sq->txq);
230 struct mlx5e_priv *priv = sq->priv;
231 u8 state;
232 int err;
233
234 err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
235 if (err)
236 return err;
237
238 err = devlink_fmsg_u32_pair_put(fmsg, "tc", tc);
239 if (err)
240 return err;
241
242 err = devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix);
243 if (err)
244 return err;
245
246 err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn);
247 if (err)
248 return err;
249
250 err = devlink_fmsg_u8_pair_put(fmsg, "HW state", state);
251 if (err)
252 return err;
253
254 err = devlink_fmsg_bool_pair_put(fmsg, "stopped", stopped);
255 if (err)
256 return err;
257
258 err = devlink_fmsg_u32_pair_put(fmsg, "cc", sq->cc);
259 if (err)
260 return err;
261
262 err = devlink_fmsg_u32_pair_put(fmsg, "pc", sq->pc);
263 if (err)
264 return err;
265
266 err = mlx5e_health_sq_put_sw_state(fmsg, sq);
267 if (err)
268 return err;
269
270 err = mlx5e_health_cq_diag_fmsg(&sq->cq, fmsg);
271 if (err)
272 return err;
273
274 return mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg);
275 }
276
277 static int
mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg * fmsg,struct mlx5e_txqsq * sq,int tc)278 mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
279 struct mlx5e_txqsq *sq, int tc)
280 {
281 int err;
282
283 err = devlink_fmsg_obj_nest_start(fmsg);
284 if (err)
285 return err;
286
287 err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix);
288 if (err)
289 return err;
290
291 err = mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, sq, tc);
292 if (err)
293 return err;
294
295 err = devlink_fmsg_obj_nest_end(fmsg);
296 if (err)
297 return err;
298
299 return 0;
300 }
301
302 static int
mlx5e_tx_reporter_build_diagnose_output_ptpsq(struct devlink_fmsg * fmsg,struct mlx5e_ptpsq * ptpsq,int tc)303 mlx5e_tx_reporter_build_diagnose_output_ptpsq(struct devlink_fmsg *fmsg,
304 struct mlx5e_ptpsq *ptpsq, int tc)
305 {
306 int err;
307
308 err = devlink_fmsg_obj_nest_start(fmsg);
309 if (err)
310 return err;
311
312 err = devlink_fmsg_string_pair_put(fmsg, "channel", "ptp");
313 if (err)
314 return err;
315
316 err = mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, &ptpsq->txqsq, tc);
317 if (err)
318 return err;
319
320 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS");
321 if (err)
322 return err;
323
324 err = mlx5e_health_cq_diag_fmsg(&ptpsq->ts_cq, fmsg);
325 if (err)
326 return err;
327
328 err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
329 if (err)
330 return err;
331
332 err = devlink_fmsg_obj_nest_end(fmsg);
333 if (err)
334 return err;
335
336 return 0;
337 }
338
339 static int
mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg * fmsg,struct mlx5e_txqsq * txqsq)340 mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg,
341 struct mlx5e_txqsq *txqsq)
342 {
343 u32 sq_stride, sq_sz;
344 bool real_time;
345 int err;
346
347 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
348 if (err)
349 return err;
350
351 real_time = mlx5_is_real_time_sq(txqsq->mdev);
352 sq_sz = mlx5_wq_cyc_get_size(&txqsq->wq);
353 sq_stride = MLX5_SEND_WQE_BB;
354
355 err = devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride);
356 if (err)
357 return err;
358
359 err = devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz);
360 if (err)
361 return err;
362
363 err = devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC");
364 if (err)
365 return err;
366
367 err = mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg);
368 if (err)
369 return err;
370
371 return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
372 }
373
374 static int
mlx5e_tx_reporter_diagnose_generic_tx_port_ts(struct devlink_fmsg * fmsg,struct mlx5e_ptpsq * ptpsq)375 mlx5e_tx_reporter_diagnose_generic_tx_port_ts(struct devlink_fmsg *fmsg,
376 struct mlx5e_ptpsq *ptpsq)
377 {
378 int err;
379
380 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS");
381 if (err)
382 return err;
383
384 err = mlx5e_health_cq_common_diag_fmsg(&ptpsq->ts_cq, fmsg);
385 if (err)
386 return err;
387
388 return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
389 }
390
391 static int
mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg)392 mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter,
393 struct devlink_fmsg *fmsg)
394 {
395 struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
396 struct mlx5e_txqsq *generic_sq = priv->txq2sq[0];
397 struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
398 struct mlx5e_ptpsq *generic_ptpsq;
399 int err;
400
401 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config");
402 if (err)
403 return err;
404
405 err = mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, generic_sq);
406 if (err)
407 return err;
408
409 if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state))
410 goto out;
411
412 generic_ptpsq = &ptp_ch->ptpsq[0];
413
414 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
415 if (err)
416 return err;
417
418 err = mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, &generic_ptpsq->txqsq);
419 if (err)
420 return err;
421
422 err = mlx5e_tx_reporter_diagnose_generic_tx_port_ts(fmsg, generic_ptpsq);
423 if (err)
424 return err;
425
426 err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
427 if (err)
428 return err;
429
430 out:
431 return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
432 }
433
mlx5e_tx_reporter_diagnose(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,struct netlink_ext_ack * extack)434 static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
435 struct devlink_fmsg *fmsg,
436 struct netlink_ext_ack *extack)
437 {
438 struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
439 struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
440
441 int i, tc, err = 0;
442
443 mutex_lock(&priv->state_lock);
444
445 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
446 goto unlock;
447
448 err = mlx5e_tx_reporter_diagnose_common_config(reporter, fmsg);
449 if (err)
450 goto unlock;
451
452 err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
453 if (err)
454 goto unlock;
455
456 for (i = 0; i < priv->channels.num; i++) {
457 struct mlx5e_channel *c = priv->channels.c[i];
458
459 for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
460 struct mlx5e_txqsq *sq = &c->sq[tc];
461
462 err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc);
463 if (err)
464 goto unlock;
465 }
466 }
467
468 if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state))
469 goto close_sqs_nest;
470
471 for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
472 err = mlx5e_tx_reporter_build_diagnose_output_ptpsq(fmsg,
473 &ptp_ch->ptpsq[tc],
474 tc);
475 if (err)
476 goto unlock;
477 }
478
479 close_sqs_nest:
480 err = devlink_fmsg_arr_pair_nest_end(fmsg);
481 if (err)
482 goto unlock;
483
484 unlock:
485 mutex_unlock(&priv->state_lock);
486 return err;
487 }
488
mlx5e_tx_reporter_dump_sq(struct mlx5e_priv * priv,struct devlink_fmsg * fmsg,void * ctx)489 static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
490 void *ctx)
491 {
492 struct mlx5_rsc_key key = {};
493 struct mlx5e_txqsq *sq = ctx;
494 int err;
495
496 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
497 return 0;
498
499 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
500 if (err)
501 return err;
502
503 key.size = PAGE_SIZE;
504 key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
505 err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
506 if (err)
507 return err;
508
509 err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
510 if (err)
511 return err;
512
513 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
514 if (err)
515 return err;
516
517 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
518 if (err)
519 return err;
520
521 key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
522 key.index1 = sq->sqn;
523 key.num_of_obj1 = 1;
524
525 err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
526 if (err)
527 return err;
528
529 err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
530 if (err)
531 return err;
532
533 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff");
534 if (err)
535 return err;
536
537 key.rsc = MLX5_SGMT_TYPE_SND_BUFF;
538 key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
539 err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
540 if (err)
541 return err;
542
543 err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
544 if (err)
545 return err;
546
547 return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
548 }
549
mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv * priv,struct devlink_fmsg * fmsg,void * ctx)550 static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
551 void *ctx)
552 {
553 struct mlx5e_tx_timeout_ctx *to_ctx = ctx;
554
555 return mlx5e_tx_reporter_dump_sq(priv, fmsg, to_ctx->sq);
556 }
557
mlx5e_tx_reporter_ptpsq_unhealthy_dump(struct mlx5e_priv * priv,struct devlink_fmsg * fmsg,void * ctx)558 static int mlx5e_tx_reporter_ptpsq_unhealthy_dump(struct mlx5e_priv *priv,
559 struct devlink_fmsg *fmsg,
560 void *ctx)
561 {
562 struct mlx5e_ptpsq *ptpsq = ctx;
563
564 return mlx5e_tx_reporter_dump_sq(priv, fmsg, &ptpsq->txqsq);
565 }
566
mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv * priv,struct devlink_fmsg * fmsg)567 static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
568 struct devlink_fmsg *fmsg)
569 {
570 struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
571 struct mlx5_rsc_key key = {};
572 int i, tc, err;
573
574 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
575 return 0;
576
577 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
578 if (err)
579 return err;
580
581 key.size = PAGE_SIZE;
582 key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
583 err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
584 if (err)
585 return err;
586
587 err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
588 if (err)
589 return err;
590
591 err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
592 if (err)
593 return err;
594
595 for (i = 0; i < priv->channels.num; i++) {
596 struct mlx5e_channel *c = priv->channels.c[i];
597
598 for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
599 struct mlx5e_txqsq *sq = &c->sq[tc];
600
601 err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "SQ");
602 if (err)
603 return err;
604 }
605 }
606
607 if (ptp_ch && test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) {
608 for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
609 struct mlx5e_txqsq *sq = &ptp_ch->ptpsq[tc].txqsq;
610
611 err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "PTP SQ");
612 if (err)
613 return err;
614 }
615 }
616
617 return devlink_fmsg_arr_pair_nest_end(fmsg);
618 }
619
mlx5e_tx_reporter_dump_from_ctx(struct mlx5e_priv * priv,struct mlx5e_err_ctx * err_ctx,struct devlink_fmsg * fmsg)620 static int mlx5e_tx_reporter_dump_from_ctx(struct mlx5e_priv *priv,
621 struct mlx5e_err_ctx *err_ctx,
622 struct devlink_fmsg *fmsg)
623 {
624 return err_ctx->dump(priv, fmsg, err_ctx->ctx);
625 }
626
mlx5e_tx_reporter_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * context,struct netlink_ext_ack * extack)627 static int mlx5e_tx_reporter_dump(struct devlink_health_reporter *reporter,
628 struct devlink_fmsg *fmsg, void *context,
629 struct netlink_ext_ack *extack)
630 {
631 struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
632 struct mlx5e_err_ctx *err_ctx = context;
633
634 return err_ctx ? mlx5e_tx_reporter_dump_from_ctx(priv, err_ctx, fmsg) :
635 mlx5e_tx_reporter_dump_all_sqs(priv, fmsg);
636 }
637
mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq * sq)638 void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq)
639 {
640 char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
641 struct mlx5e_priv *priv = sq->priv;
642 struct mlx5e_err_ctx err_ctx = {};
643
644 err_ctx.ctx = sq;
645 err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover;
646 err_ctx.dump = mlx5e_tx_reporter_dump_sq;
647 snprintf(err_str, sizeof(err_str), "ERR CQE on SQ: 0x%x", sq->sqn);
648
649 mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
650 }
651
mlx5e_reporter_tx_timeout(struct mlx5e_txqsq * sq)652 int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
653 {
654 char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
655 struct mlx5e_tx_timeout_ctx to_ctx = {};
656 struct mlx5e_priv *priv = sq->priv;
657 struct mlx5e_err_ctx err_ctx = {};
658
659 to_ctx.sq = sq;
660 err_ctx.ctx = &to_ctx;
661 err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
662 err_ctx.dump = mlx5e_tx_reporter_timeout_dump;
663 snprintf(err_str, sizeof(err_str),
664 "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u",
665 sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
666 jiffies_to_usecs(jiffies - READ_ONCE(sq->txq->trans_start)));
667
668 mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
669 return to_ctx.status;
670 }
671
mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq * ptpsq)672 void mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq *ptpsq)
673 {
674 struct mlx5e_ptp_metadata_map *map = &ptpsq->metadata_map;
675 char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
676 struct mlx5e_txqsq *txqsq = &ptpsq->txqsq;
677 struct mlx5e_cq *ts_cq = &ptpsq->ts_cq;
678 struct mlx5e_priv *priv = txqsq->priv;
679 struct mlx5e_err_ctx err_ctx = {};
680
681 err_ctx.ctx = ptpsq;
682 err_ctx.recover = mlx5e_tx_reporter_ptpsq_unhealthy_recover;
683 err_ctx.dump = mlx5e_tx_reporter_ptpsq_unhealthy_dump;
684 snprintf(err_str, sizeof(err_str),
685 "Unhealthy TX port TS queue: %d, SQ: 0x%x, CQ: 0x%x, Undelivered CQEs: %u Map Capacity: %u",
686 txqsq->ch_ix, txqsq->sqn, ts_cq->mcq.cqn, map->undelivered_counter, map->capacity);
687
688 mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
689 }
690
691 static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
692 .name = "tx",
693 .recover = mlx5e_tx_reporter_recover,
694 .diagnose = mlx5e_tx_reporter_diagnose,
695 .dump = mlx5e_tx_reporter_dump,
696 };
697
698 #define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500
699
mlx5e_reporter_tx_create(struct mlx5e_priv * priv)700 void mlx5e_reporter_tx_create(struct mlx5e_priv *priv)
701 {
702 struct devlink_health_reporter *reporter;
703
704 reporter = devlink_port_health_reporter_create(priv->netdev->devlink_port,
705 &mlx5_tx_reporter_ops,
706 MLX5_REPORTER_TX_GRACEFUL_PERIOD, priv);
707 if (IS_ERR(reporter)) {
708 netdev_warn(priv->netdev,
709 "Failed to create tx reporter, err = %ld\n",
710 PTR_ERR(reporter));
711 return;
712 }
713 priv->tx_reporter = reporter;
714 }
715
mlx5e_reporter_tx_destroy(struct mlx5e_priv * priv)716 void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv)
717 {
718 if (!priv->tx_reporter)
719 return;
720
721 devlink_health_reporter_destroy(priv->tx_reporter);
722 priv->tx_reporter = NULL;
723 }
724