1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include "health.h"
5 #include "en/ptp.h"
6 #include "en/devlink.h"
7 #include "lib/tout.h"
8 
9 /* Keep this string array consistent with the MLX5E_SQ_STATE_* enums in en.h */
10 static const char * const sq_sw_state_type_name[] = {
11 	[MLX5E_SQ_STATE_ENABLED] = "enabled",
12 	[MLX5E_SQ_STATE_MPWQE] = "mpwqe",
13 	[MLX5E_SQ_STATE_RECOVERING] = "recovering",
14 	[MLX5E_SQ_STATE_IPSEC] = "ipsec",
15 	[MLX5E_SQ_STATE_DIM] = "dim",
16 	[MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE] = "vlan_need_l2_inline",
17 	[MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx",
18 	[MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync",
19 	[MLX5E_SQ_STATE_XDP_MULTIBUF] = "xdp_multibuf",
20 };
21 
22 static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
23 {
24 	struct mlx5_core_dev *dev = sq->mdev;
25 	unsigned long exp_time;
26 
27 	exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FLUSH_ON_ERROR));
28 
29 	while (time_before(jiffies, exp_time)) {
30 		if (sq->cc == sq->pc)
31 			return 0;
32 
33 		msleep(20);
34 	}
35 
36 	netdev_err(sq->netdev,
37 		   "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n",
38 		   sq->sqn, sq->cc, sq->pc);
39 
40 	return -ETIMEDOUT;
41 }
42 
43 static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
44 {
45 	WARN_ONCE(sq->cc != sq->pc,
46 		  "SQ 0x%x: cc (0x%x) != pc (0x%x)\n",
47 		  sq->sqn, sq->cc, sq->pc);
48 	sq->cc = 0;
49 	sq->dma_fifo_cc = 0;
50 	sq->pc = 0;
51 }
52 
53 static int mlx5e_health_sq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *sq)
54 {
55 	int err;
56 	int i;
57 
58 	BUILD_BUG_ON_MSG(ARRAY_SIZE(sq_sw_state_type_name) != MLX5E_NUM_SQ_STATES,
59 			 "sq_sw_state_type_name string array must be consistent with MLX5E_SQ_STATE_* enum in en.h");
60 	err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State");
61 	if (err)
62 		return err;
63 
64 	for (i = 0; i < ARRAY_SIZE(sq_sw_state_type_name); ++i) {
65 		err = devlink_fmsg_u32_pair_put(fmsg, sq_sw_state_type_name[i],
66 						test_bit(i, &sq->state));
67 		if (err)
68 			return err;
69 	}
70 
71 	return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
72 }
73 
74 static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
75 {
76 	struct mlx5_core_dev *mdev;
77 	struct net_device *dev;
78 	struct mlx5e_txqsq *sq;
79 	u8 state;
80 	int err;
81 
82 	sq = ctx;
83 	mdev = sq->mdev;
84 	dev = sq->netdev;
85 
86 	if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
87 		return 0;
88 
89 	err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
90 	if (err) {
91 		netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
92 			   sq->sqn, err);
93 		goto out;
94 	}
95 
96 	if (state != MLX5_SQC_STATE_ERR)
97 		goto out;
98 
99 	mlx5e_tx_disable_queue(sq->txq);
100 
101 	err = mlx5e_wait_for_sq_flush(sq);
102 	if (err)
103 		goto out;
104 
105 	/* At this point, no new packets will arrive from the stack as TXQ is
106 	 * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
107 	 * pending WQEs. SQ can safely reset the SQ.
108 	 */
109 
110 	err = mlx5e_health_sq_to_ready(mdev, dev, sq->sqn);
111 	if (err)
112 		goto out;
113 
114 	mlx5e_reset_txqsq_cc_pc(sq);
115 	sq->stats->recover++;
116 	clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
117 	mlx5e_activate_txqsq(sq);
118 	if (sq->channel)
119 		mlx5e_trigger_napi_icosq(sq->channel);
120 	else
121 		mlx5e_trigger_napi_sched(sq->cq.napi);
122 
123 	return 0;
124 out:
125 	clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
126 	return err;
127 }
128 
129 struct mlx5e_tx_timeout_ctx {
130 	struct mlx5e_txqsq *sq;
131 	signed int status;
132 };
133 
134 static int mlx5e_tx_reporter_timeout_recover(void *ctx)
135 {
136 	struct mlx5e_tx_timeout_ctx *to_ctx;
137 	struct mlx5e_priv *priv;
138 	struct mlx5_eq_comp *eq;
139 	struct mlx5e_txqsq *sq;
140 	int err;
141 
142 	to_ctx = ctx;
143 	sq = to_ctx->sq;
144 	eq = sq->cq.mcq.eq;
145 	priv = sq->priv;
146 	err = mlx5e_health_channel_eq_recover(sq->netdev, eq, sq->cq.ch_stats);
147 	if (!err) {
148 		to_ctx->status = 0; /* this sq recovered */
149 		return err;
150 	}
151 
152 	err = mlx5e_safe_reopen_channels(priv);
153 	if (!err) {
154 		to_ctx->status = 1; /* all channels recovered */
155 		return err;
156 	}
157 
158 	to_ctx->status = err;
159 	clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
160 	netdev_err(priv->netdev,
161 		   "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n",
162 		   err);
163 
164 	return err;
165 }
166 
167 /* state lock cannot be grabbed within this function.
168  * It can cause a dead lock or a read-after-free.
169  */
170 static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx)
171 {
172 	return err_ctx->recover(err_ctx->ctx);
173 }
174 
175 static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter,
176 				     void *context,
177 				     struct netlink_ext_ack *extack)
178 {
179 	struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
180 	struct mlx5e_err_ctx *err_ctx = context;
181 
182 	return err_ctx ? mlx5e_tx_reporter_recover_from_ctx(err_ctx) :
183 			 mlx5e_health_recover_channels(priv);
184 }
185 
186 static int
187 mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg,
188 						  struct mlx5e_txqsq *sq, int tc)
189 {
190 	bool stopped = netif_xmit_stopped(sq->txq);
191 	struct mlx5e_priv *priv = sq->priv;
192 	u8 state;
193 	int err;
194 
195 	err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
196 	if (err)
197 		return err;
198 
199 	err = devlink_fmsg_u32_pair_put(fmsg, "tc", tc);
200 	if (err)
201 		return err;
202 
203 	err = devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix);
204 	if (err)
205 		return err;
206 
207 	err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn);
208 	if (err)
209 		return err;
210 
211 	err = devlink_fmsg_u8_pair_put(fmsg, "HW state", state);
212 	if (err)
213 		return err;
214 
215 	err = devlink_fmsg_bool_pair_put(fmsg, "stopped", stopped);
216 	if (err)
217 		return err;
218 
219 	err = devlink_fmsg_u32_pair_put(fmsg, "cc", sq->cc);
220 	if (err)
221 		return err;
222 
223 	err = devlink_fmsg_u32_pair_put(fmsg, "pc", sq->pc);
224 	if (err)
225 		return err;
226 
227 	err = mlx5e_health_sq_put_sw_state(fmsg, sq);
228 	if (err)
229 		return err;
230 
231 	err = mlx5e_health_cq_diag_fmsg(&sq->cq, fmsg);
232 	if (err)
233 		return err;
234 
235 	return mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg);
236 }
237 
238 static int
239 mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
240 					struct mlx5e_txqsq *sq, int tc)
241 {
242 	int err;
243 
244 	err = devlink_fmsg_obj_nest_start(fmsg);
245 	if (err)
246 		return err;
247 
248 	err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix);
249 	if (err)
250 		return err;
251 
252 	err = mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, sq, tc);
253 	if (err)
254 		return err;
255 
256 	err = devlink_fmsg_obj_nest_end(fmsg);
257 	if (err)
258 		return err;
259 
260 	return 0;
261 }
262 
263 static int
264 mlx5e_tx_reporter_build_diagnose_output_ptpsq(struct devlink_fmsg *fmsg,
265 					      struct mlx5e_ptpsq *ptpsq, int tc)
266 {
267 	int err;
268 
269 	err = devlink_fmsg_obj_nest_start(fmsg);
270 	if (err)
271 		return err;
272 
273 	err = devlink_fmsg_string_pair_put(fmsg, "channel", "ptp");
274 	if (err)
275 		return err;
276 
277 	err = mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, &ptpsq->txqsq, tc);
278 	if (err)
279 		return err;
280 
281 	err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS");
282 	if (err)
283 		return err;
284 
285 	err = mlx5e_health_cq_diag_fmsg(&ptpsq->ts_cq, fmsg);
286 	if (err)
287 		return err;
288 
289 	err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
290 	if (err)
291 		return err;
292 
293 	err = devlink_fmsg_obj_nest_end(fmsg);
294 	if (err)
295 		return err;
296 
297 	return 0;
298 }
299 
300 static int
301 mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg,
302 					 struct mlx5e_txqsq *txqsq)
303 {
304 	u32 sq_stride, sq_sz;
305 	bool real_time;
306 	int err;
307 
308 	err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
309 	if (err)
310 		return err;
311 
312 	real_time =  mlx5_is_real_time_sq(txqsq->mdev);
313 	sq_sz = mlx5_wq_cyc_get_size(&txqsq->wq);
314 	sq_stride = MLX5_SEND_WQE_BB;
315 
316 	err = devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride);
317 	if (err)
318 		return err;
319 
320 	err = devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz);
321 	if (err)
322 		return err;
323 
324 	err = devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC");
325 	if (err)
326 		return err;
327 
328 	err = mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg);
329 	if (err)
330 		return err;
331 
332 	return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
333 }
334 
335 static int
336 mlx5e_tx_reporter_diagnose_generic_tx_port_ts(struct devlink_fmsg *fmsg,
337 					      struct mlx5e_ptpsq *ptpsq)
338 {
339 	int err;
340 
341 	err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS");
342 	if (err)
343 		return err;
344 
345 	err = mlx5e_health_cq_common_diag_fmsg(&ptpsq->ts_cq, fmsg);
346 	if (err)
347 		return err;
348 
349 	return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
350 }
351 
352 static int
353 mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter,
354 					 struct devlink_fmsg *fmsg)
355 {
356 	struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
357 	struct mlx5e_txqsq *generic_sq = priv->txq2sq[0];
358 	struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
359 	struct mlx5e_ptpsq *generic_ptpsq;
360 	int err;
361 
362 	err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config");
363 	if (err)
364 		return err;
365 
366 	err = mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, generic_sq);
367 	if (err)
368 		return err;
369 
370 	if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state))
371 		goto out;
372 
373 	generic_ptpsq = &ptp_ch->ptpsq[0];
374 
375 	err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
376 	if (err)
377 		return err;
378 
379 	err = mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, &generic_ptpsq->txqsq);
380 	if (err)
381 		return err;
382 
383 	err = mlx5e_tx_reporter_diagnose_generic_tx_port_ts(fmsg, generic_ptpsq);
384 	if (err)
385 		return err;
386 
387 	err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
388 	if (err)
389 		return err;
390 
391 out:
392 	return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
393 }
394 
395 static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
396 				      struct devlink_fmsg *fmsg,
397 				      struct netlink_ext_ack *extack)
398 {
399 	struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
400 	struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
401 
402 	int i, tc, err = 0;
403 
404 	mutex_lock(&priv->state_lock);
405 
406 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
407 		goto unlock;
408 
409 	err = mlx5e_tx_reporter_diagnose_common_config(reporter, fmsg);
410 	if (err)
411 		goto unlock;
412 
413 	err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
414 	if (err)
415 		goto unlock;
416 
417 	for (i = 0; i < priv->channels.num; i++) {
418 		struct mlx5e_channel *c = priv->channels.c[i];
419 
420 		for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
421 			struct mlx5e_txqsq *sq = &c->sq[tc];
422 
423 			err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc);
424 			if (err)
425 				goto unlock;
426 		}
427 	}
428 
429 	if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state))
430 		goto close_sqs_nest;
431 
432 	for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
433 		err = mlx5e_tx_reporter_build_diagnose_output_ptpsq(fmsg,
434 								    &ptp_ch->ptpsq[tc],
435 								    tc);
436 		if (err)
437 			goto unlock;
438 	}
439 
440 close_sqs_nest:
441 	err = devlink_fmsg_arr_pair_nest_end(fmsg);
442 	if (err)
443 		goto unlock;
444 
445 unlock:
446 	mutex_unlock(&priv->state_lock);
447 	return err;
448 }
449 
450 static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
451 				     void *ctx)
452 {
453 	struct mlx5_rsc_key key = {};
454 	struct mlx5e_txqsq *sq = ctx;
455 	int err;
456 
457 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
458 		return 0;
459 
460 	err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
461 	if (err)
462 		return err;
463 
464 	key.size = PAGE_SIZE;
465 	key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
466 	err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
467 	if (err)
468 		return err;
469 
470 	err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
471 	if (err)
472 		return err;
473 
474 	err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
475 	if (err)
476 		return err;
477 
478 	err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
479 	if (err)
480 		return err;
481 
482 	key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
483 	key.index1 = sq->sqn;
484 	key.num_of_obj1 = 1;
485 
486 	err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
487 	if (err)
488 		return err;
489 
490 	err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
491 	if (err)
492 		return err;
493 
494 	err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff");
495 	if (err)
496 		return err;
497 
498 	key.rsc = MLX5_SGMT_TYPE_SND_BUFF;
499 	key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
500 	err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
501 	if (err)
502 		return err;
503 
504 	err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
505 	if (err)
506 		return err;
507 
508 	return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
509 }
510 
511 static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
512 					  void *ctx)
513 {
514 	struct mlx5e_tx_timeout_ctx *to_ctx = ctx;
515 
516 	return mlx5e_tx_reporter_dump_sq(priv, fmsg, to_ctx->sq);
517 }
518 
519 static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
520 					  struct devlink_fmsg *fmsg)
521 {
522 	struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
523 	struct mlx5_rsc_key key = {};
524 	int i, tc, err;
525 
526 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
527 		return 0;
528 
529 	err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
530 	if (err)
531 		return err;
532 
533 	key.size = PAGE_SIZE;
534 	key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
535 	err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
536 	if (err)
537 		return err;
538 
539 	err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
540 	if (err)
541 		return err;
542 
543 	err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
544 	if (err)
545 		return err;
546 
547 	for (i = 0; i < priv->channels.num; i++) {
548 		struct mlx5e_channel *c = priv->channels.c[i];
549 
550 		for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
551 			struct mlx5e_txqsq *sq = &c->sq[tc];
552 
553 			err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "SQ");
554 			if (err)
555 				return err;
556 		}
557 	}
558 
559 	if (ptp_ch && test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) {
560 		for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
561 			struct mlx5e_txqsq *sq = &ptp_ch->ptpsq[tc].txqsq;
562 
563 			err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "PTP SQ");
564 			if (err)
565 				return err;
566 		}
567 	}
568 
569 	return devlink_fmsg_arr_pair_nest_end(fmsg);
570 }
571 
572 static int mlx5e_tx_reporter_dump_from_ctx(struct mlx5e_priv *priv,
573 					   struct mlx5e_err_ctx *err_ctx,
574 					   struct devlink_fmsg *fmsg)
575 {
576 	return err_ctx->dump(priv, fmsg, err_ctx->ctx);
577 }
578 
579 static int mlx5e_tx_reporter_dump(struct devlink_health_reporter *reporter,
580 				  struct devlink_fmsg *fmsg, void *context,
581 				  struct netlink_ext_ack *extack)
582 {
583 	struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
584 	struct mlx5e_err_ctx *err_ctx = context;
585 
586 	return err_ctx ? mlx5e_tx_reporter_dump_from_ctx(priv, err_ctx, fmsg) :
587 			 mlx5e_tx_reporter_dump_all_sqs(priv, fmsg);
588 }
589 
590 void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq)
591 {
592 	char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
593 	struct mlx5e_priv *priv = sq->priv;
594 	struct mlx5e_err_ctx err_ctx = {};
595 
596 	err_ctx.ctx = sq;
597 	err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover;
598 	err_ctx.dump = mlx5e_tx_reporter_dump_sq;
599 	snprintf(err_str, sizeof(err_str), "ERR CQE on SQ: 0x%x", sq->sqn);
600 
601 	mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
602 }
603 
604 int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
605 {
606 	char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
607 	struct mlx5e_tx_timeout_ctx to_ctx = {};
608 	struct mlx5e_priv *priv = sq->priv;
609 	struct mlx5e_err_ctx err_ctx = {};
610 
611 	to_ctx.sq = sq;
612 	err_ctx.ctx = &to_ctx;
613 	err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
614 	err_ctx.dump = mlx5e_tx_reporter_timeout_dump;
615 	snprintf(err_str, sizeof(err_str),
616 		 "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u",
617 		 sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
618 		 jiffies_to_usecs(jiffies - READ_ONCE(sq->txq->trans_start)));
619 
620 	mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
621 	return to_ctx.status;
622 }
623 
624 static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
625 		.name = "tx",
626 		.recover = mlx5e_tx_reporter_recover,
627 		.diagnose = mlx5e_tx_reporter_diagnose,
628 		.dump = mlx5e_tx_reporter_dump,
629 };
630 
631 #define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500
632 
633 void mlx5e_reporter_tx_create(struct mlx5e_priv *priv)
634 {
635 	struct devlink_health_reporter *reporter;
636 
637 	reporter = devlink_port_health_reporter_create(priv->netdev->devlink_port,
638 						       &mlx5_tx_reporter_ops,
639 						       MLX5_REPORTER_TX_GRACEFUL_PERIOD, priv);
640 	if (IS_ERR(reporter)) {
641 		netdev_warn(priv->netdev,
642 			    "Failed to create tx reporter, err = %ld\n",
643 			    PTR_ERR(reporter));
644 		return;
645 	}
646 	priv->tx_reporter = reporter;
647 }
648 
649 void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv)
650 {
651 	if (!priv->tx_reporter)
652 		return;
653 
654 	devlink_health_reporter_destroy(priv->tx_reporter);
655 	priv->tx_reporter = NULL;
656 }
657