1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 4 #include "health.h" 5 #include "en/ptp.h" 6 #include "en/devlink.h" 7 #include "lib/tout.h" 8 9 /* Keep this string array consistent with the MLX5E_SQ_STATE_* enums in en.h */ 10 static const char * const sq_sw_state_type_name[] = { 11 [MLX5E_SQ_STATE_ENABLED] = "enabled", 12 [MLX5E_SQ_STATE_MPWQE] = "mpwqe", 13 [MLX5E_SQ_STATE_RECOVERING] = "recovering", 14 [MLX5E_SQ_STATE_IPSEC] = "ipsec", 15 [MLX5E_SQ_STATE_DIM] = "dim", 16 [MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE] = "vlan_need_l2_inline", 17 [MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx", 18 [MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync", 19 [MLX5E_SQ_STATE_XDP_MULTIBUF] = "xdp_multibuf", 20 }; 21 22 static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) 23 { 24 struct mlx5_core_dev *dev = sq->mdev; 25 unsigned long exp_time; 26 27 exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FLUSH_ON_ERROR)); 28 29 while (time_before(jiffies, exp_time)) { 30 if (sq->cc == sq->pc) 31 return 0; 32 33 msleep(20); 34 } 35 36 netdev_err(sq->netdev, 37 "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n", 38 sq->sqn, sq->cc, sq->pc); 39 40 return -ETIMEDOUT; 41 } 42 43 static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq) 44 { 45 WARN_ONCE(sq->cc != sq->pc, 46 "SQ 0x%x: cc (0x%x) != pc (0x%x)\n", 47 sq->sqn, sq->cc, sq->pc); 48 sq->cc = 0; 49 sq->dma_fifo_cc = 0; 50 sq->pc = 0; 51 } 52 53 static int mlx5e_health_sq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *sq) 54 { 55 int err; 56 int i; 57 58 BUILD_BUG_ON_MSG(ARRAY_SIZE(sq_sw_state_type_name) != MLX5E_NUM_SQ_STATES, 59 "sq_sw_state_type_name string array must be consistent with MLX5E_SQ_STATE_* enum in en.h"); 60 err = devlink_fmsg_obj_nest_start(fmsg); 61 if (err) 62 return err; 63 64 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State"); 65 if (err) 66 return err; 67 68 for (i = 0; i < ARRAY_SIZE(sq_sw_state_type_name); ++i) { 69 err = devlink_fmsg_u32_pair_put(fmsg, sq_sw_state_type_name[i], 70 test_bit(i, &sq->state)); 71 if (err) 72 return err; 73 } 74 75 err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); 76 if (err) 77 return err; 78 79 return devlink_fmsg_obj_nest_end(fmsg); 80 } 81 82 static int mlx5e_tx_reporter_err_cqe_recover(void *ctx) 83 { 84 struct mlx5_core_dev *mdev; 85 struct net_device *dev; 86 struct mlx5e_txqsq *sq; 87 u8 state; 88 int err; 89 90 sq = ctx; 91 mdev = sq->mdev; 92 dev = sq->netdev; 93 94 if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) 95 return 0; 96 97 err = mlx5_core_query_sq_state(mdev, sq->sqn, &state); 98 if (err) { 99 netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n", 100 sq->sqn, err); 101 goto out; 102 } 103 104 if (state != MLX5_SQC_STATE_ERR) 105 goto out; 106 107 mlx5e_tx_disable_queue(sq->txq); 108 109 err = mlx5e_wait_for_sq_flush(sq); 110 if (err) 111 goto out; 112 113 /* At this point, no new packets will arrive from the stack as TXQ is 114 * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all 115 * pending WQEs. SQ can safely reset the SQ. 116 */ 117 118 err = mlx5e_health_sq_to_ready(mdev, dev, sq->sqn); 119 if (err) 120 goto out; 121 122 mlx5e_reset_txqsq_cc_pc(sq); 123 sq->stats->recover++; 124 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); 125 mlx5e_activate_txqsq(sq); 126 if (sq->channel) 127 mlx5e_trigger_napi_icosq(sq->channel); 128 else 129 mlx5e_trigger_napi_sched(sq->cq.napi); 130 131 return 0; 132 out: 133 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); 134 return err; 135 } 136 137 struct mlx5e_tx_timeout_ctx { 138 struct mlx5e_txqsq *sq; 139 signed int status; 140 }; 141 142 static int mlx5e_tx_reporter_timeout_recover(void *ctx) 143 { 144 struct mlx5e_tx_timeout_ctx *to_ctx; 145 struct mlx5e_priv *priv; 146 struct mlx5_eq_comp *eq; 147 struct mlx5e_txqsq *sq; 148 int err; 149 150 to_ctx = ctx; 151 sq = to_ctx->sq; 152 eq = sq->cq.mcq.eq; 153 priv = sq->priv; 154 err = mlx5e_health_channel_eq_recover(sq->netdev, eq, sq->cq.ch_stats); 155 if (!err) { 156 to_ctx->status = 0; /* this sq recovered */ 157 return err; 158 } 159 160 err = mlx5e_safe_reopen_channels(priv); 161 if (!err) { 162 to_ctx->status = 1; /* all channels recovered */ 163 return err; 164 } 165 166 to_ctx->status = err; 167 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); 168 netdev_err(priv->netdev, 169 "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n", 170 err); 171 172 return err; 173 } 174 175 /* state lock cannot be grabbed within this function. 176 * It can cause a dead lock or a read-after-free. 177 */ 178 static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx) 179 { 180 return err_ctx->recover(err_ctx->ctx); 181 } 182 183 static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter, 184 void *context, 185 struct netlink_ext_ack *extack) 186 { 187 struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); 188 struct mlx5e_err_ctx *err_ctx = context; 189 190 return err_ctx ? mlx5e_tx_reporter_recover_from_ctx(err_ctx) : 191 mlx5e_health_recover_channels(priv); 192 } 193 194 static int 195 mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg, 196 struct mlx5e_txqsq *sq, int tc) 197 { 198 bool stopped = netif_xmit_stopped(sq->txq); 199 struct mlx5e_priv *priv = sq->priv; 200 u8 state; 201 int err; 202 203 err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state); 204 if (err) 205 return err; 206 207 err = devlink_fmsg_u32_pair_put(fmsg, "tc", tc); 208 if (err) 209 return err; 210 211 err = devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix); 212 if (err) 213 return err; 214 215 err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn); 216 if (err) 217 return err; 218 219 err = devlink_fmsg_u8_pair_put(fmsg, "HW state", state); 220 if (err) 221 return err; 222 223 err = devlink_fmsg_bool_pair_put(fmsg, "stopped", stopped); 224 if (err) 225 return err; 226 227 err = devlink_fmsg_u32_pair_put(fmsg, "cc", sq->cc); 228 if (err) 229 return err; 230 231 err = devlink_fmsg_u32_pair_put(fmsg, "pc", sq->pc); 232 if (err) 233 return err; 234 235 err = mlx5e_health_sq_put_sw_state(fmsg, sq); 236 if (err) 237 return err; 238 239 err = mlx5e_health_cq_diag_fmsg(&sq->cq, fmsg); 240 if (err) 241 return err; 242 243 return mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg); 244 } 245 246 static int 247 mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, 248 struct mlx5e_txqsq *sq, int tc) 249 { 250 int err; 251 252 err = devlink_fmsg_obj_nest_start(fmsg); 253 if (err) 254 return err; 255 256 err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix); 257 if (err) 258 return err; 259 260 err = mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, sq, tc); 261 if (err) 262 return err; 263 264 err = devlink_fmsg_obj_nest_end(fmsg); 265 if (err) 266 return err; 267 268 return 0; 269 } 270 271 static int 272 mlx5e_tx_reporter_build_diagnose_output_ptpsq(struct devlink_fmsg *fmsg, 273 struct mlx5e_ptpsq *ptpsq, int tc) 274 { 275 int err; 276 277 err = devlink_fmsg_obj_nest_start(fmsg); 278 if (err) 279 return err; 280 281 err = devlink_fmsg_string_pair_put(fmsg, "channel", "ptp"); 282 if (err) 283 return err; 284 285 err = mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, &ptpsq->txqsq, tc); 286 if (err) 287 return err; 288 289 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS"); 290 if (err) 291 return err; 292 293 err = mlx5e_health_cq_diag_fmsg(&ptpsq->ts_cq, fmsg); 294 if (err) 295 return err; 296 297 err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); 298 if (err) 299 return err; 300 301 err = devlink_fmsg_obj_nest_end(fmsg); 302 if (err) 303 return err; 304 305 return 0; 306 } 307 308 static int 309 mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg, 310 struct mlx5e_txqsq *txqsq) 311 { 312 u32 sq_stride, sq_sz; 313 bool real_time; 314 int err; 315 316 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ"); 317 if (err) 318 return err; 319 320 real_time = mlx5_is_real_time_sq(txqsq->mdev); 321 sq_sz = mlx5_wq_cyc_get_size(&txqsq->wq); 322 sq_stride = MLX5_SEND_WQE_BB; 323 324 err = devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride); 325 if (err) 326 return err; 327 328 err = devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz); 329 if (err) 330 return err; 331 332 err = devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC"); 333 if (err) 334 return err; 335 336 err = mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg); 337 if (err) 338 return err; 339 340 return mlx5e_health_fmsg_named_obj_nest_end(fmsg); 341 } 342 343 static int 344 mlx5e_tx_reporter_diagnose_generic_tx_port_ts(struct devlink_fmsg *fmsg, 345 struct mlx5e_ptpsq *ptpsq) 346 { 347 int err; 348 349 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS"); 350 if (err) 351 return err; 352 353 err = mlx5e_health_cq_common_diag_fmsg(&ptpsq->ts_cq, fmsg); 354 if (err) 355 return err; 356 357 return mlx5e_health_fmsg_named_obj_nest_end(fmsg); 358 } 359 360 static int 361 mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter, 362 struct devlink_fmsg *fmsg) 363 { 364 struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); 365 struct mlx5e_txqsq *generic_sq = priv->txq2sq[0]; 366 struct mlx5e_ptp *ptp_ch = priv->channels.ptp; 367 struct mlx5e_ptpsq *generic_ptpsq; 368 int err; 369 370 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config"); 371 if (err) 372 return err; 373 374 err = mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, generic_sq); 375 if (err) 376 return err; 377 378 if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) 379 goto out; 380 381 generic_ptpsq = &ptp_ch->ptpsq[0]; 382 383 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP"); 384 if (err) 385 return err; 386 387 err = mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, &generic_ptpsq->txqsq); 388 if (err) 389 return err; 390 391 err = mlx5e_tx_reporter_diagnose_generic_tx_port_ts(fmsg, generic_ptpsq); 392 if (err) 393 return err; 394 395 err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); 396 if (err) 397 return err; 398 399 out: 400 return mlx5e_health_fmsg_named_obj_nest_end(fmsg); 401 } 402 403 static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, 404 struct devlink_fmsg *fmsg, 405 struct netlink_ext_ack *extack) 406 { 407 struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); 408 struct mlx5e_ptp *ptp_ch = priv->channels.ptp; 409 410 int i, tc, err = 0; 411 412 mutex_lock(&priv->state_lock); 413 414 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) 415 goto unlock; 416 417 err = mlx5e_tx_reporter_diagnose_common_config(reporter, fmsg); 418 if (err) 419 goto unlock; 420 421 err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs"); 422 if (err) 423 goto unlock; 424 425 for (i = 0; i < priv->channels.num; i++) { 426 struct mlx5e_channel *c = priv->channels.c[i]; 427 428 for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) { 429 struct mlx5e_txqsq *sq = &c->sq[tc]; 430 431 err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc); 432 if (err) 433 goto unlock; 434 } 435 } 436 437 if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) 438 goto close_sqs_nest; 439 440 for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) { 441 err = mlx5e_tx_reporter_build_diagnose_output_ptpsq(fmsg, 442 &ptp_ch->ptpsq[tc], 443 tc); 444 if (err) 445 goto unlock; 446 } 447 448 close_sqs_nest: 449 err = devlink_fmsg_arr_pair_nest_end(fmsg); 450 if (err) 451 goto unlock; 452 453 unlock: 454 mutex_unlock(&priv->state_lock); 455 return err; 456 } 457 458 static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, 459 void *ctx) 460 { 461 struct mlx5_rsc_key key = {}; 462 struct mlx5e_txqsq *sq = ctx; 463 int err; 464 465 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) 466 return 0; 467 468 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice"); 469 if (err) 470 return err; 471 472 key.size = PAGE_SIZE; 473 key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL; 474 err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); 475 if (err) 476 return err; 477 478 err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); 479 if (err) 480 return err; 481 482 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ"); 483 if (err) 484 return err; 485 486 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC"); 487 if (err) 488 return err; 489 490 key.rsc = MLX5_SGMT_TYPE_FULL_QPC; 491 key.index1 = sq->sqn; 492 key.num_of_obj1 = 1; 493 494 err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); 495 if (err) 496 return err; 497 498 err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); 499 if (err) 500 return err; 501 502 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff"); 503 if (err) 504 return err; 505 506 key.rsc = MLX5_SGMT_TYPE_SND_BUFF; 507 key.num_of_obj2 = MLX5_RSC_DUMP_ALL; 508 err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); 509 if (err) 510 return err; 511 512 err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); 513 if (err) 514 return err; 515 516 return mlx5e_health_fmsg_named_obj_nest_end(fmsg); 517 } 518 519 static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, 520 void *ctx) 521 { 522 struct mlx5e_tx_timeout_ctx *to_ctx = ctx; 523 524 return mlx5e_tx_reporter_dump_sq(priv, fmsg, to_ctx->sq); 525 } 526 527 static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, 528 struct devlink_fmsg *fmsg) 529 { 530 struct mlx5e_ptp *ptp_ch = priv->channels.ptp; 531 struct mlx5_rsc_key key = {}; 532 int i, tc, err; 533 534 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) 535 return 0; 536 537 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice"); 538 if (err) 539 return err; 540 541 key.size = PAGE_SIZE; 542 key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL; 543 err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); 544 if (err) 545 return err; 546 547 err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); 548 if (err) 549 return err; 550 551 err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs"); 552 if (err) 553 return err; 554 555 for (i = 0; i < priv->channels.num; i++) { 556 struct mlx5e_channel *c = priv->channels.c[i]; 557 558 for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) { 559 struct mlx5e_txqsq *sq = &c->sq[tc]; 560 561 err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "SQ"); 562 if (err) 563 return err; 564 } 565 } 566 567 if (ptp_ch && test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) { 568 for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) { 569 struct mlx5e_txqsq *sq = &ptp_ch->ptpsq[tc].txqsq; 570 571 err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "PTP SQ"); 572 if (err) 573 return err; 574 } 575 } 576 577 return devlink_fmsg_arr_pair_nest_end(fmsg); 578 } 579 580 static int mlx5e_tx_reporter_dump_from_ctx(struct mlx5e_priv *priv, 581 struct mlx5e_err_ctx *err_ctx, 582 struct devlink_fmsg *fmsg) 583 { 584 return err_ctx->dump(priv, fmsg, err_ctx->ctx); 585 } 586 587 static int mlx5e_tx_reporter_dump(struct devlink_health_reporter *reporter, 588 struct devlink_fmsg *fmsg, void *context, 589 struct netlink_ext_ack *extack) 590 { 591 struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); 592 struct mlx5e_err_ctx *err_ctx = context; 593 594 return err_ctx ? mlx5e_tx_reporter_dump_from_ctx(priv, err_ctx, fmsg) : 595 mlx5e_tx_reporter_dump_all_sqs(priv, fmsg); 596 } 597 598 void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq) 599 { 600 char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; 601 struct mlx5e_priv *priv = sq->priv; 602 struct mlx5e_err_ctx err_ctx = {}; 603 604 err_ctx.ctx = sq; 605 err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover; 606 err_ctx.dump = mlx5e_tx_reporter_dump_sq; 607 snprintf(err_str, sizeof(err_str), "ERR CQE on SQ: 0x%x", sq->sqn); 608 609 mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx); 610 } 611 612 int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq) 613 { 614 char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; 615 struct mlx5e_tx_timeout_ctx to_ctx = {}; 616 struct mlx5e_priv *priv = sq->priv; 617 struct mlx5e_err_ctx err_ctx = {}; 618 619 to_ctx.sq = sq; 620 err_ctx.ctx = &to_ctx; 621 err_ctx.recover = mlx5e_tx_reporter_timeout_recover; 622 err_ctx.dump = mlx5e_tx_reporter_timeout_dump; 623 snprintf(err_str, sizeof(err_str), 624 "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u", 625 sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, 626 jiffies_to_usecs(jiffies - READ_ONCE(sq->txq->trans_start))); 627 628 mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx); 629 return to_ctx.status; 630 } 631 632 static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = { 633 .name = "tx", 634 .recover = mlx5e_tx_reporter_recover, 635 .diagnose = mlx5e_tx_reporter_diagnose, 636 .dump = mlx5e_tx_reporter_dump, 637 }; 638 639 #define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500 640 641 void mlx5e_reporter_tx_create(struct mlx5e_priv *priv) 642 { 643 struct devlink_health_reporter *reporter; 644 645 reporter = devlink_port_health_reporter_create(priv->netdev->devlink_port, 646 &mlx5_tx_reporter_ops, 647 MLX5_REPORTER_TX_GRACEFUL_PERIOD, priv); 648 if (IS_ERR(reporter)) { 649 netdev_warn(priv->netdev, 650 "Failed to create tx reporter, err = %ld\n", 651 PTR_ERR(reporter)); 652 return; 653 } 654 priv->tx_reporter = reporter; 655 } 656 657 void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv) 658 { 659 if (!priv->tx_reporter) 660 return; 661 662 devlink_health_reporter_destroy(priv->tx_reporter); 663 priv->tx_reporter = NULL; 664 } 665