1 /* QLogic qede NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #include <linux/version.h> 10 #include <linux/types.h> 11 #include <linux/netdevice.h> 12 #include <linux/ethtool.h> 13 #include <linux/string.h> 14 #include <linux/pci.h> 15 #include <linux/capability.h> 16 #include "qede.h" 17 18 #define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name)) 19 #define QEDE_STAT_STRING(stat_name) (#stat_name) 20 #define _QEDE_STAT(stat_name, pf_only) \ 21 {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only} 22 #define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true) 23 #define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false) 24 25 #define QEDE_RQSTAT_OFFSET(stat_name) \ 26 (offsetof(struct qede_rx_queue, stat_name)) 27 #define QEDE_RQSTAT_STRING(stat_name) (#stat_name) 28 #define QEDE_RQSTAT(stat_name) \ 29 {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)} 30 static const struct { 31 u64 offset; 32 char string[ETH_GSTRING_LEN]; 33 } qede_rqstats_arr[] = { 34 QEDE_RQSTAT(rx_hw_errors), 35 QEDE_RQSTAT(rx_alloc_errors), 36 }; 37 38 #define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr) 39 #define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \ 40 (*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\ 41 qede_rqstats_arr[(sindex)].offset))) 42 static const struct { 43 u64 offset; 44 char string[ETH_GSTRING_LEN]; 45 bool pf_only; 46 } qede_stats_arr[] = { 47 QEDE_STAT(rx_ucast_bytes), 48 QEDE_STAT(rx_mcast_bytes), 49 QEDE_STAT(rx_bcast_bytes), 50 QEDE_STAT(rx_ucast_pkts), 51 QEDE_STAT(rx_mcast_pkts), 52 QEDE_STAT(rx_bcast_pkts), 53 54 QEDE_STAT(tx_ucast_bytes), 55 QEDE_STAT(tx_mcast_bytes), 56 QEDE_STAT(tx_bcast_bytes), 57 QEDE_STAT(tx_ucast_pkts), 58 QEDE_STAT(tx_mcast_pkts), 59 QEDE_STAT(tx_bcast_pkts), 60 61 QEDE_PF_STAT(rx_64_byte_packets), 62 QEDE_PF_STAT(rx_127_byte_packets), 63 QEDE_PF_STAT(rx_255_byte_packets), 64 QEDE_PF_STAT(rx_511_byte_packets), 65 QEDE_PF_STAT(rx_1023_byte_packets), 66 QEDE_PF_STAT(rx_1518_byte_packets), 67 QEDE_PF_STAT(rx_1522_byte_packets), 68 QEDE_PF_STAT(rx_2047_byte_packets), 69 QEDE_PF_STAT(rx_4095_byte_packets), 70 QEDE_PF_STAT(rx_9216_byte_packets), 71 QEDE_PF_STAT(rx_16383_byte_packets), 72 QEDE_PF_STAT(tx_64_byte_packets), 73 QEDE_PF_STAT(tx_65_to_127_byte_packets), 74 QEDE_PF_STAT(tx_128_to_255_byte_packets), 75 QEDE_PF_STAT(tx_256_to_511_byte_packets), 76 QEDE_PF_STAT(tx_512_to_1023_byte_packets), 77 QEDE_PF_STAT(tx_1024_to_1518_byte_packets), 78 QEDE_PF_STAT(tx_1519_to_2047_byte_packets), 79 QEDE_PF_STAT(tx_2048_to_4095_byte_packets), 80 QEDE_PF_STAT(tx_4096_to_9216_byte_packets), 81 QEDE_PF_STAT(tx_9217_to_16383_byte_packets), 82 83 QEDE_PF_STAT(rx_mac_crtl_frames), 84 QEDE_PF_STAT(tx_mac_ctrl_frames), 85 QEDE_PF_STAT(rx_pause_frames), 86 QEDE_PF_STAT(tx_pause_frames), 87 QEDE_PF_STAT(rx_pfc_frames), 88 QEDE_PF_STAT(tx_pfc_frames), 89 90 QEDE_PF_STAT(rx_crc_errors), 91 QEDE_PF_STAT(rx_align_errors), 92 QEDE_PF_STAT(rx_carrier_errors), 93 QEDE_PF_STAT(rx_oversize_packets), 94 QEDE_PF_STAT(rx_jabbers), 95 QEDE_PF_STAT(rx_undersize_packets), 96 QEDE_PF_STAT(rx_fragments), 97 QEDE_PF_STAT(tx_lpi_entry_count), 98 QEDE_PF_STAT(tx_total_collisions), 99 QEDE_PF_STAT(brb_truncates), 100 QEDE_PF_STAT(brb_discards), 101 QEDE_STAT(no_buff_discards), 102 QEDE_PF_STAT(mftag_filter_discards), 103 QEDE_PF_STAT(mac_filter_discards), 104 QEDE_STAT(tx_err_drop_pkts), 105 106 QEDE_STAT(coalesced_pkts), 107 QEDE_STAT(coalesced_events), 108 QEDE_STAT(coalesced_aborts_num), 109 QEDE_STAT(non_coalesced_pkts), 110 QEDE_STAT(coalesced_bytes), 111 }; 112 113 #define QEDE_STATS_DATA(dev, index) \ 114 (*((u64 *)(((char *)(dev)) + offsetof(struct qede_dev, stats) \ 115 + qede_stats_arr[(index)].offset))) 116 117 #define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr) 118 119 static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) 120 { 121 int i, j, k; 122 123 for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) { 124 strcpy(buf + j * ETH_GSTRING_LEN, 125 qede_stats_arr[i].string); 126 j++; 127 } 128 129 for (k = 0; k < QEDE_NUM_RQSTATS; k++, j++) 130 strcpy(buf + j * ETH_GSTRING_LEN, 131 qede_rqstats_arr[k].string); 132 } 133 134 static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 135 { 136 struct qede_dev *edev = netdev_priv(dev); 137 138 switch (stringset) { 139 case ETH_SS_STATS: 140 qede_get_strings_stats(edev, buf); 141 break; 142 default: 143 DP_VERBOSE(edev, QED_MSG_DEBUG, 144 "Unsupported stringset 0x%08x\n", stringset); 145 } 146 } 147 148 static void qede_get_ethtool_stats(struct net_device *dev, 149 struct ethtool_stats *stats, u64 *buf) 150 { 151 struct qede_dev *edev = netdev_priv(dev); 152 int sidx, cnt = 0; 153 int qid; 154 155 qede_fill_by_demand_stats(edev); 156 157 mutex_lock(&edev->qede_lock); 158 159 for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) 160 buf[cnt++] = QEDE_STATS_DATA(edev, sidx); 161 162 for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) { 163 buf[cnt] = 0; 164 for (qid = 0; qid < edev->num_rss; qid++) 165 buf[cnt] += QEDE_RQSTATS_DATA(edev, sidx, qid); 166 cnt++; 167 } 168 169 mutex_unlock(&edev->qede_lock); 170 } 171 172 static int qede_get_sset_count(struct net_device *dev, int stringset) 173 { 174 struct qede_dev *edev = netdev_priv(dev); 175 int num_stats = QEDE_NUM_STATS; 176 177 switch (stringset) { 178 case ETH_SS_STATS: 179 return num_stats + QEDE_NUM_RQSTATS; 180 181 default: 182 DP_VERBOSE(edev, QED_MSG_DEBUG, 183 "Unsupported stringset 0x%08x\n", stringset); 184 return -EINVAL; 185 } 186 } 187 188 static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 189 { 190 struct qede_dev *edev = netdev_priv(dev); 191 struct qed_link_output current_link; 192 193 memset(¤t_link, 0, sizeof(current_link)); 194 edev->ops->common->get_link(edev->cdev, ¤t_link); 195 196 cmd->supported = current_link.supported_caps; 197 cmd->advertising = current_link.advertised_caps; 198 if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) { 199 ethtool_cmd_speed_set(cmd, current_link.speed); 200 cmd->duplex = current_link.duplex; 201 } else { 202 cmd->duplex = DUPLEX_UNKNOWN; 203 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); 204 } 205 cmd->port = current_link.port; 206 cmd->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE : 207 AUTONEG_DISABLE; 208 cmd->lp_advertising = current_link.lp_caps; 209 210 return 0; 211 } 212 213 static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 214 { 215 struct qede_dev *edev = netdev_priv(dev); 216 struct qed_link_output current_link; 217 struct qed_link_params params; 218 u32 speed; 219 220 if (!edev->dev_info.common.is_mf_default) { 221 DP_INFO(edev, 222 "Link parameters can not be changed in non-default mode\n"); 223 return -EOPNOTSUPP; 224 } 225 226 memset(¤t_link, 0, sizeof(current_link)); 227 memset(¶ms, 0, sizeof(params)); 228 edev->ops->common->get_link(edev->cdev, ¤t_link); 229 230 speed = ethtool_cmd_speed(cmd); 231 params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS; 232 params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG; 233 if (cmd->autoneg == AUTONEG_ENABLE) { 234 params.autoneg = true; 235 params.forced_speed = 0; 236 params.adv_speeds = cmd->advertising; 237 } else { /* forced speed */ 238 params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED; 239 params.autoneg = false; 240 params.forced_speed = speed; 241 switch (speed) { 242 case SPEED_10000: 243 if (!(current_link.supported_caps & 244 SUPPORTED_10000baseKR_Full)) { 245 DP_INFO(edev, "10G speed not supported\n"); 246 return -EINVAL; 247 } 248 params.adv_speeds = SUPPORTED_10000baseKR_Full; 249 break; 250 case SPEED_40000: 251 if (!(current_link.supported_caps & 252 SUPPORTED_40000baseLR4_Full)) { 253 DP_INFO(edev, "40G speed not supported\n"); 254 return -EINVAL; 255 } 256 params.adv_speeds = SUPPORTED_40000baseLR4_Full; 257 break; 258 default: 259 DP_INFO(edev, "Unsupported speed %u\n", speed); 260 return -EINVAL; 261 } 262 } 263 264 params.link_up = true; 265 edev->ops->common->set_link(edev->cdev, ¶ms); 266 267 return 0; 268 } 269 270 static void qede_get_drvinfo(struct net_device *ndev, 271 struct ethtool_drvinfo *info) 272 { 273 char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN]; 274 struct qede_dev *edev = netdev_priv(ndev); 275 276 strlcpy(info->driver, "qede", sizeof(info->driver)); 277 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); 278 279 snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d", 280 edev->dev_info.common.fw_major, 281 edev->dev_info.common.fw_minor, 282 edev->dev_info.common.fw_rev, 283 edev->dev_info.common.fw_eng); 284 285 snprintf(mfw, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d", 286 (edev->dev_info.common.mfw_rev >> 24) & 0xFF, 287 (edev->dev_info.common.mfw_rev >> 16) & 0xFF, 288 (edev->dev_info.common.mfw_rev >> 8) & 0xFF, 289 edev->dev_info.common.mfw_rev & 0xFF); 290 291 if ((strlen(storm) + strlen(mfw) + strlen("mfw storm ")) < 292 sizeof(info->fw_version)) { 293 snprintf(info->fw_version, sizeof(info->fw_version), 294 "mfw %s storm %s", mfw, storm); 295 } else { 296 snprintf(info->fw_version, sizeof(info->fw_version), 297 "%s %s", mfw, storm); 298 } 299 300 strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info)); 301 } 302 303 static u32 qede_get_msglevel(struct net_device *ndev) 304 { 305 struct qede_dev *edev = netdev_priv(ndev); 306 307 return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) | 308 edev->dp_module; 309 } 310 311 static void qede_set_msglevel(struct net_device *ndev, u32 level) 312 { 313 struct qede_dev *edev = netdev_priv(ndev); 314 u32 dp_module = 0; 315 u8 dp_level = 0; 316 317 qede_config_debug(level, &dp_module, &dp_level); 318 319 edev->dp_level = dp_level; 320 edev->dp_module = dp_module; 321 edev->ops->common->update_msglvl(edev->cdev, 322 dp_module, dp_level); 323 } 324 325 static int qede_nway_reset(struct net_device *dev) 326 { 327 struct qede_dev *edev = netdev_priv(dev); 328 struct qed_link_output current_link; 329 struct qed_link_params link_params; 330 331 if (!netif_running(dev)) 332 return 0; 333 334 memset(¤t_link, 0, sizeof(current_link)); 335 edev->ops->common->get_link(edev->cdev, ¤t_link); 336 if (!current_link.link_up) 337 return 0; 338 339 /* Toggle the link */ 340 memset(&link_params, 0, sizeof(link_params)); 341 link_params.link_up = false; 342 edev->ops->common->set_link(edev->cdev, &link_params); 343 link_params.link_up = true; 344 edev->ops->common->set_link(edev->cdev, &link_params); 345 346 return 0; 347 } 348 349 static u32 qede_get_link(struct net_device *dev) 350 { 351 struct qede_dev *edev = netdev_priv(dev); 352 struct qed_link_output current_link; 353 354 memset(¤t_link, 0, sizeof(current_link)); 355 edev->ops->common->get_link(edev->cdev, ¤t_link); 356 357 return current_link.link_up; 358 } 359 360 static void qede_get_ringparam(struct net_device *dev, 361 struct ethtool_ringparam *ering) 362 { 363 struct qede_dev *edev = netdev_priv(dev); 364 365 ering->rx_max_pending = NUM_RX_BDS_MAX; 366 ering->rx_pending = edev->q_num_rx_buffers; 367 ering->tx_max_pending = NUM_TX_BDS_MAX; 368 ering->tx_pending = edev->q_num_tx_buffers; 369 } 370 371 static int qede_set_ringparam(struct net_device *dev, 372 struct ethtool_ringparam *ering) 373 { 374 struct qede_dev *edev = netdev_priv(dev); 375 376 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 377 "Set ring params command parameters: rx_pending = %d, tx_pending = %d\n", 378 ering->rx_pending, ering->tx_pending); 379 380 /* Validate legality of configuration */ 381 if (ering->rx_pending > NUM_RX_BDS_MAX || 382 ering->rx_pending < NUM_RX_BDS_MIN || 383 ering->tx_pending > NUM_TX_BDS_MAX || 384 ering->tx_pending < NUM_TX_BDS_MIN) { 385 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 386 "Can only support Rx Buffer size [0%08x,...,0x%08x] and Tx Buffer size [0x%08x,...,0x%08x]\n", 387 NUM_RX_BDS_MIN, NUM_RX_BDS_MAX, 388 NUM_TX_BDS_MIN, NUM_TX_BDS_MAX); 389 return -EINVAL; 390 } 391 392 /* Change ring size and re-load */ 393 edev->q_num_rx_buffers = ering->rx_pending; 394 edev->q_num_tx_buffers = ering->tx_pending; 395 396 if (netif_running(edev->ndev)) 397 qede_reload(edev, NULL, NULL); 398 399 return 0; 400 } 401 402 static void qede_get_pauseparam(struct net_device *dev, 403 struct ethtool_pauseparam *epause) 404 { 405 struct qede_dev *edev = netdev_priv(dev); 406 struct qed_link_output current_link; 407 408 memset(¤t_link, 0, sizeof(current_link)); 409 edev->ops->common->get_link(edev->cdev, ¤t_link); 410 411 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 412 epause->autoneg = true; 413 if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) 414 epause->rx_pause = true; 415 if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) 416 epause->tx_pause = true; 417 418 DP_VERBOSE(edev, QED_MSG_DEBUG, 419 "ethtool_pauseparam: cmd %d autoneg %d rx_pause %d tx_pause %d\n", 420 epause->cmd, epause->autoneg, epause->rx_pause, 421 epause->tx_pause); 422 } 423 424 static int qede_set_pauseparam(struct net_device *dev, 425 struct ethtool_pauseparam *epause) 426 { 427 struct qede_dev *edev = netdev_priv(dev); 428 struct qed_link_params params; 429 struct qed_link_output current_link; 430 431 if (!edev->dev_info.common.is_mf_default) { 432 DP_INFO(edev, 433 "Pause parameters can not be updated in non-default mode\n"); 434 return -EOPNOTSUPP; 435 } 436 437 memset(¤t_link, 0, sizeof(current_link)); 438 edev->ops->common->get_link(edev->cdev, ¤t_link); 439 440 memset(¶ms, 0, sizeof(params)); 441 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; 442 if (epause->autoneg) { 443 if (!(current_link.supported_caps & SUPPORTED_Autoneg)) { 444 DP_INFO(edev, "autoneg not supported\n"); 445 return -EINVAL; 446 } 447 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 448 } 449 if (epause->rx_pause) 450 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; 451 if (epause->tx_pause) 452 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; 453 454 params.link_up = true; 455 edev->ops->common->set_link(edev->cdev, ¶ms); 456 457 return 0; 458 } 459 460 static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args) 461 { 462 edev->ndev->mtu = args->mtu; 463 } 464 465 /* Netdevice NDOs */ 466 #define ETH_MAX_JUMBO_PACKET_SIZE 9600 467 #define ETH_MIN_PACKET_SIZE 60 468 int qede_change_mtu(struct net_device *ndev, int new_mtu) 469 { 470 struct qede_dev *edev = netdev_priv(ndev); 471 union qede_reload_args args; 472 473 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || 474 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) { 475 DP_ERR(edev, "Can't support requested MTU size\n"); 476 return -EINVAL; 477 } 478 479 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 480 "Configuring MTU size of %d\n", new_mtu); 481 482 /* Set the mtu field and re-start the interface if needed*/ 483 args.mtu = new_mtu; 484 485 if (netif_running(edev->ndev)) 486 qede_reload(edev, &qede_update_mtu, &args); 487 488 qede_update_mtu(edev, &args); 489 490 return 0; 491 } 492 493 static void qede_get_channels(struct net_device *dev, 494 struct ethtool_channels *channels) 495 { 496 struct qede_dev *edev = netdev_priv(dev); 497 498 channels->max_combined = QEDE_MAX_RSS_CNT(edev); 499 channels->combined_count = QEDE_RSS_CNT(edev); 500 } 501 502 static int qede_set_channels(struct net_device *dev, 503 struct ethtool_channels *channels) 504 { 505 struct qede_dev *edev = netdev_priv(dev); 506 507 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 508 "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n", 509 channels->rx_count, channels->tx_count, 510 channels->other_count, channels->combined_count); 511 512 /* We don't support separate rx / tx, nor `other' channels. */ 513 if (channels->rx_count || channels->tx_count || 514 channels->other_count || (channels->combined_count == 0) || 515 (channels->combined_count > QEDE_MAX_RSS_CNT(edev))) { 516 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 517 "command parameters not supported\n"); 518 return -EINVAL; 519 } 520 521 /* Check if there was a change in the active parameters */ 522 if (channels->combined_count == QEDE_RSS_CNT(edev)) { 523 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 524 "No change in active parameters\n"); 525 return 0; 526 } 527 528 /* We need the number of queues to be divisible between the hwfns */ 529 if (channels->combined_count % edev->dev_info.common.num_hwfns) { 530 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 531 "Number of channels must be divisable by %04x\n", 532 edev->dev_info.common.num_hwfns); 533 return -EINVAL; 534 } 535 536 /* Set number of queues and reload if necessary */ 537 edev->req_rss = channels->combined_count; 538 if (netif_running(dev)) 539 qede_reload(edev, NULL, NULL); 540 541 return 0; 542 } 543 544 static int qede_set_phys_id(struct net_device *dev, 545 enum ethtool_phys_id_state state) 546 { 547 struct qede_dev *edev = netdev_priv(dev); 548 u8 led_state = 0; 549 550 switch (state) { 551 case ETHTOOL_ID_ACTIVE: 552 return 1; /* cycle on/off once per second */ 553 554 case ETHTOOL_ID_ON: 555 led_state = QED_LED_MODE_ON; 556 break; 557 558 case ETHTOOL_ID_OFF: 559 led_state = QED_LED_MODE_OFF; 560 break; 561 562 case ETHTOOL_ID_INACTIVE: 563 led_state = QED_LED_MODE_RESTORE; 564 break; 565 } 566 567 edev->ops->common->set_led(edev->cdev, led_state); 568 569 return 0; 570 } 571 572 static const struct ethtool_ops qede_ethtool_ops = { 573 .get_settings = qede_get_settings, 574 .set_settings = qede_set_settings, 575 .get_drvinfo = qede_get_drvinfo, 576 .get_msglevel = qede_get_msglevel, 577 .set_msglevel = qede_set_msglevel, 578 .nway_reset = qede_nway_reset, 579 .get_link = qede_get_link, 580 .get_ringparam = qede_get_ringparam, 581 .set_ringparam = qede_set_ringparam, 582 .get_pauseparam = qede_get_pauseparam, 583 .set_pauseparam = qede_set_pauseparam, 584 .get_strings = qede_get_strings, 585 .set_phys_id = qede_set_phys_id, 586 .get_ethtool_stats = qede_get_ethtool_stats, 587 .get_sset_count = qede_get_sset_count, 588 589 .get_channels = qede_get_channels, 590 .set_channels = qede_set_channels, 591 }; 592 593 void qede_set_ethtool_ops(struct net_device *dev) 594 { 595 dev->ethtool_ops = &qede_ethtool_ops; 596 } 597