1 /* QLogic qede NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #include <linux/version.h> 10 #include <linux/types.h> 11 #include <linux/netdevice.h> 12 #include <linux/etherdevice.h> 13 #include <linux/ethtool.h> 14 #include <linux/string.h> 15 #include <linux/pci.h> 16 #include <linux/capability.h> 17 #include "qede.h" 18 19 #define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name)) 20 #define QEDE_STAT_STRING(stat_name) (#stat_name) 21 #define _QEDE_STAT(stat_name, pf_only) \ 22 {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only} 23 #define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true) 24 #define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false) 25 26 #define QEDE_RQSTAT_OFFSET(stat_name) \ 27 (offsetof(struct qede_rx_queue, stat_name)) 28 #define QEDE_RQSTAT_STRING(stat_name) (#stat_name) 29 #define QEDE_RQSTAT(stat_name) \ 30 {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)} 31 32 #define QEDE_SELFTEST_POLL_COUNT 100 33 34 static const struct { 35 u64 offset; 36 char string[ETH_GSTRING_LEN]; 37 } qede_rqstats_arr[] = { 38 QEDE_RQSTAT(rx_hw_errors), 39 QEDE_RQSTAT(rx_alloc_errors), 40 }; 41 42 #define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr) 43 #define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \ 44 (*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\ 45 qede_rqstats_arr[(sindex)].offset))) 46 static const struct { 47 u64 offset; 48 char string[ETH_GSTRING_LEN]; 49 bool pf_only; 50 } qede_stats_arr[] = { 51 QEDE_STAT(rx_ucast_bytes), 52 QEDE_STAT(rx_mcast_bytes), 53 QEDE_STAT(rx_bcast_bytes), 54 QEDE_STAT(rx_ucast_pkts), 55 QEDE_STAT(rx_mcast_pkts), 56 QEDE_STAT(rx_bcast_pkts), 57 58 QEDE_STAT(tx_ucast_bytes), 59 QEDE_STAT(tx_mcast_bytes), 60 QEDE_STAT(tx_bcast_bytes), 61 QEDE_STAT(tx_ucast_pkts), 62 QEDE_STAT(tx_mcast_pkts), 63 QEDE_STAT(tx_bcast_pkts), 64 65 QEDE_PF_STAT(rx_64_byte_packets), 66 QEDE_PF_STAT(rx_65_to_127_byte_packets), 67 QEDE_PF_STAT(rx_128_to_255_byte_packets), 68 QEDE_PF_STAT(rx_256_to_511_byte_packets), 69 QEDE_PF_STAT(rx_512_to_1023_byte_packets), 70 QEDE_PF_STAT(rx_1024_to_1518_byte_packets), 71 QEDE_PF_STAT(rx_1519_to_1522_byte_packets), 72 QEDE_PF_STAT(rx_1519_to_2047_byte_packets), 73 QEDE_PF_STAT(rx_2048_to_4095_byte_packets), 74 QEDE_PF_STAT(rx_4096_to_9216_byte_packets), 75 QEDE_PF_STAT(rx_9217_to_16383_byte_packets), 76 QEDE_PF_STAT(tx_64_byte_packets), 77 QEDE_PF_STAT(tx_65_to_127_byte_packets), 78 QEDE_PF_STAT(tx_128_to_255_byte_packets), 79 QEDE_PF_STAT(tx_256_to_511_byte_packets), 80 QEDE_PF_STAT(tx_512_to_1023_byte_packets), 81 QEDE_PF_STAT(tx_1024_to_1518_byte_packets), 82 QEDE_PF_STAT(tx_1519_to_2047_byte_packets), 83 QEDE_PF_STAT(tx_2048_to_4095_byte_packets), 84 QEDE_PF_STAT(tx_4096_to_9216_byte_packets), 85 QEDE_PF_STAT(tx_9217_to_16383_byte_packets), 86 87 QEDE_PF_STAT(rx_mac_crtl_frames), 88 QEDE_PF_STAT(tx_mac_ctrl_frames), 89 QEDE_PF_STAT(rx_pause_frames), 90 QEDE_PF_STAT(tx_pause_frames), 91 QEDE_PF_STAT(rx_pfc_frames), 92 QEDE_PF_STAT(tx_pfc_frames), 93 94 QEDE_PF_STAT(rx_crc_errors), 95 QEDE_PF_STAT(rx_align_errors), 96 QEDE_PF_STAT(rx_carrier_errors), 97 QEDE_PF_STAT(rx_oversize_packets), 98 QEDE_PF_STAT(rx_jabbers), 99 QEDE_PF_STAT(rx_undersize_packets), 100 QEDE_PF_STAT(rx_fragments), 101 QEDE_PF_STAT(tx_lpi_entry_count), 102 QEDE_PF_STAT(tx_total_collisions), 103 QEDE_PF_STAT(brb_truncates), 104 QEDE_PF_STAT(brb_discards), 105 QEDE_STAT(no_buff_discards), 106 QEDE_PF_STAT(mftag_filter_discards), 107 QEDE_PF_STAT(mac_filter_discards), 108 QEDE_STAT(tx_err_drop_pkts), 109 110 QEDE_STAT(coalesced_pkts), 111 QEDE_STAT(coalesced_events), 112 QEDE_STAT(coalesced_aborts_num), 113 QEDE_STAT(non_coalesced_pkts), 114 QEDE_STAT(coalesced_bytes), 115 }; 116 117 #define QEDE_STATS_DATA(dev, index) \ 118 (*((u64 *)(((char *)(dev)) + offsetof(struct qede_dev, stats) \ 119 + qede_stats_arr[(index)].offset))) 120 121 #define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr) 122 123 enum { 124 QEDE_PRI_FLAG_CMT, 125 QEDE_PRI_FLAG_LEN, 126 }; 127 128 static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = { 129 "Coupled-Function", 130 }; 131 132 enum qede_ethtool_tests { 133 QEDE_ETHTOOL_INT_LOOPBACK, 134 QEDE_ETHTOOL_INTERRUPT_TEST, 135 QEDE_ETHTOOL_MEMORY_TEST, 136 QEDE_ETHTOOL_REGISTER_TEST, 137 QEDE_ETHTOOL_CLOCK_TEST, 138 QEDE_ETHTOOL_TEST_MAX 139 }; 140 141 static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = { 142 "Internal loopback (offline)", 143 "Interrupt (online)\t", 144 "Memory (online)\t\t", 145 "Register (online)\t", 146 "Clock (online)\t\t", 147 }; 148 149 static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) 150 { 151 int i, j, k; 152 153 for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) { 154 if (IS_VF(edev) && qede_stats_arr[i].pf_only) 155 continue; 156 strcpy(buf + j * ETH_GSTRING_LEN, 157 qede_stats_arr[i].string); 158 j++; 159 } 160 161 for (k = 0; k < QEDE_NUM_RQSTATS; k++, j++) 162 strcpy(buf + j * ETH_GSTRING_LEN, 163 qede_rqstats_arr[k].string); 164 } 165 166 static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 167 { 168 struct qede_dev *edev = netdev_priv(dev); 169 170 switch (stringset) { 171 case ETH_SS_STATS: 172 qede_get_strings_stats(edev, buf); 173 break; 174 case ETH_SS_PRIV_FLAGS: 175 memcpy(buf, qede_private_arr, 176 ETH_GSTRING_LEN * QEDE_PRI_FLAG_LEN); 177 break; 178 case ETH_SS_TEST: 179 memcpy(buf, qede_tests_str_arr, 180 ETH_GSTRING_LEN * QEDE_ETHTOOL_TEST_MAX); 181 break; 182 default: 183 DP_VERBOSE(edev, QED_MSG_DEBUG, 184 "Unsupported stringset 0x%08x\n", stringset); 185 } 186 } 187 188 static void qede_get_ethtool_stats(struct net_device *dev, 189 struct ethtool_stats *stats, u64 *buf) 190 { 191 struct qede_dev *edev = netdev_priv(dev); 192 int sidx, cnt = 0; 193 int qid; 194 195 qede_fill_by_demand_stats(edev); 196 197 mutex_lock(&edev->qede_lock); 198 199 for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) { 200 if (IS_VF(edev) && qede_stats_arr[sidx].pf_only) 201 continue; 202 buf[cnt++] = QEDE_STATS_DATA(edev, sidx); 203 } 204 205 for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) { 206 buf[cnt] = 0; 207 for (qid = 0; qid < edev->num_rss; qid++) 208 buf[cnt] += QEDE_RQSTATS_DATA(edev, sidx, qid); 209 cnt++; 210 } 211 212 mutex_unlock(&edev->qede_lock); 213 } 214 215 static int qede_get_sset_count(struct net_device *dev, int stringset) 216 { 217 struct qede_dev *edev = netdev_priv(dev); 218 int num_stats = QEDE_NUM_STATS; 219 220 switch (stringset) { 221 case ETH_SS_STATS: 222 if (IS_VF(edev)) { 223 int i; 224 225 for (i = 0; i < QEDE_NUM_STATS; i++) 226 if (qede_stats_arr[i].pf_only) 227 num_stats--; 228 } 229 return num_stats + QEDE_NUM_RQSTATS; 230 case ETH_SS_PRIV_FLAGS: 231 return QEDE_PRI_FLAG_LEN; 232 case ETH_SS_TEST: 233 return QEDE_ETHTOOL_TEST_MAX; 234 default: 235 DP_VERBOSE(edev, QED_MSG_DEBUG, 236 "Unsupported stringset 0x%08x\n", stringset); 237 return -EINVAL; 238 } 239 } 240 241 static u32 qede_get_priv_flags(struct net_device *dev) 242 { 243 struct qede_dev *edev = netdev_priv(dev); 244 245 return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT; 246 } 247 248 static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 249 { 250 struct qede_dev *edev = netdev_priv(dev); 251 struct qed_link_output current_link; 252 253 memset(¤t_link, 0, sizeof(current_link)); 254 edev->ops->common->get_link(edev->cdev, ¤t_link); 255 256 cmd->supported = current_link.supported_caps; 257 cmd->advertising = current_link.advertised_caps; 258 if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) { 259 ethtool_cmd_speed_set(cmd, current_link.speed); 260 cmd->duplex = current_link.duplex; 261 } else { 262 cmd->duplex = DUPLEX_UNKNOWN; 263 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); 264 } 265 cmd->port = current_link.port; 266 cmd->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE : 267 AUTONEG_DISABLE; 268 cmd->lp_advertising = current_link.lp_caps; 269 270 return 0; 271 } 272 273 static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 274 { 275 struct qede_dev *edev = netdev_priv(dev); 276 struct qed_link_output current_link; 277 struct qed_link_params params; 278 u32 speed; 279 280 if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { 281 DP_INFO(edev, 282 "Link settings are not allowed to be changed\n"); 283 return -EOPNOTSUPP; 284 } 285 286 memset(¤t_link, 0, sizeof(current_link)); 287 memset(¶ms, 0, sizeof(params)); 288 edev->ops->common->get_link(edev->cdev, ¤t_link); 289 290 speed = ethtool_cmd_speed(cmd); 291 params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS; 292 params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG; 293 if (cmd->autoneg == AUTONEG_ENABLE) { 294 params.autoneg = true; 295 params.forced_speed = 0; 296 params.adv_speeds = cmd->advertising; 297 } else { /* forced speed */ 298 params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED; 299 params.autoneg = false; 300 params.forced_speed = speed; 301 switch (speed) { 302 case SPEED_10000: 303 if (!(current_link.supported_caps & 304 SUPPORTED_10000baseKR_Full)) { 305 DP_INFO(edev, "10G speed not supported\n"); 306 return -EINVAL; 307 } 308 params.adv_speeds = SUPPORTED_10000baseKR_Full; 309 break; 310 case SPEED_40000: 311 if (!(current_link.supported_caps & 312 SUPPORTED_40000baseLR4_Full)) { 313 DP_INFO(edev, "40G speed not supported\n"); 314 return -EINVAL; 315 } 316 params.adv_speeds = SUPPORTED_40000baseLR4_Full; 317 break; 318 default: 319 DP_INFO(edev, "Unsupported speed %u\n", speed); 320 return -EINVAL; 321 } 322 } 323 324 params.link_up = true; 325 edev->ops->common->set_link(edev->cdev, ¶ms); 326 327 return 0; 328 } 329 330 static void qede_get_drvinfo(struct net_device *ndev, 331 struct ethtool_drvinfo *info) 332 { 333 char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN]; 334 struct qede_dev *edev = netdev_priv(ndev); 335 336 strlcpy(info->driver, "qede", sizeof(info->driver)); 337 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); 338 339 snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d", 340 edev->dev_info.common.fw_major, 341 edev->dev_info.common.fw_minor, 342 edev->dev_info.common.fw_rev, 343 edev->dev_info.common.fw_eng); 344 345 snprintf(mfw, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d", 346 (edev->dev_info.common.mfw_rev >> 24) & 0xFF, 347 (edev->dev_info.common.mfw_rev >> 16) & 0xFF, 348 (edev->dev_info.common.mfw_rev >> 8) & 0xFF, 349 edev->dev_info.common.mfw_rev & 0xFF); 350 351 if ((strlen(storm) + strlen(mfw) + strlen("mfw storm ")) < 352 sizeof(info->fw_version)) { 353 snprintf(info->fw_version, sizeof(info->fw_version), 354 "mfw %s storm %s", mfw, storm); 355 } else { 356 snprintf(info->fw_version, sizeof(info->fw_version), 357 "%s %s", mfw, storm); 358 } 359 360 strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info)); 361 } 362 363 static u32 qede_get_msglevel(struct net_device *ndev) 364 { 365 struct qede_dev *edev = netdev_priv(ndev); 366 367 return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) | 368 edev->dp_module; 369 } 370 371 static void qede_set_msglevel(struct net_device *ndev, u32 level) 372 { 373 struct qede_dev *edev = netdev_priv(ndev); 374 u32 dp_module = 0; 375 u8 dp_level = 0; 376 377 qede_config_debug(level, &dp_module, &dp_level); 378 379 edev->dp_level = dp_level; 380 edev->dp_module = dp_module; 381 edev->ops->common->update_msglvl(edev->cdev, 382 dp_module, dp_level); 383 } 384 385 static int qede_nway_reset(struct net_device *dev) 386 { 387 struct qede_dev *edev = netdev_priv(dev); 388 struct qed_link_output current_link; 389 struct qed_link_params link_params; 390 391 if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { 392 DP_INFO(edev, 393 "Link settings are not allowed to be changed\n"); 394 return -EOPNOTSUPP; 395 } 396 397 if (!netif_running(dev)) 398 return 0; 399 400 memset(¤t_link, 0, sizeof(current_link)); 401 edev->ops->common->get_link(edev->cdev, ¤t_link); 402 if (!current_link.link_up) 403 return 0; 404 405 /* Toggle the link */ 406 memset(&link_params, 0, sizeof(link_params)); 407 link_params.link_up = false; 408 edev->ops->common->set_link(edev->cdev, &link_params); 409 link_params.link_up = true; 410 edev->ops->common->set_link(edev->cdev, &link_params); 411 412 return 0; 413 } 414 415 static u32 qede_get_link(struct net_device *dev) 416 { 417 struct qede_dev *edev = netdev_priv(dev); 418 struct qed_link_output current_link; 419 420 memset(¤t_link, 0, sizeof(current_link)); 421 edev->ops->common->get_link(edev->cdev, ¤t_link); 422 423 return current_link.link_up; 424 } 425 426 static void qede_get_ringparam(struct net_device *dev, 427 struct ethtool_ringparam *ering) 428 { 429 struct qede_dev *edev = netdev_priv(dev); 430 431 ering->rx_max_pending = NUM_RX_BDS_MAX; 432 ering->rx_pending = edev->q_num_rx_buffers; 433 ering->tx_max_pending = NUM_TX_BDS_MAX; 434 ering->tx_pending = edev->q_num_tx_buffers; 435 } 436 437 static int qede_set_ringparam(struct net_device *dev, 438 struct ethtool_ringparam *ering) 439 { 440 struct qede_dev *edev = netdev_priv(dev); 441 442 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 443 "Set ring params command parameters: rx_pending = %d, tx_pending = %d\n", 444 ering->rx_pending, ering->tx_pending); 445 446 /* Validate legality of configuration */ 447 if (ering->rx_pending > NUM_RX_BDS_MAX || 448 ering->rx_pending < NUM_RX_BDS_MIN || 449 ering->tx_pending > NUM_TX_BDS_MAX || 450 ering->tx_pending < NUM_TX_BDS_MIN) { 451 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 452 "Can only support Rx Buffer size [0%08x,...,0x%08x] and Tx Buffer size [0x%08x,...,0x%08x]\n", 453 NUM_RX_BDS_MIN, NUM_RX_BDS_MAX, 454 NUM_TX_BDS_MIN, NUM_TX_BDS_MAX); 455 return -EINVAL; 456 } 457 458 /* Change ring size and re-load */ 459 edev->q_num_rx_buffers = ering->rx_pending; 460 edev->q_num_tx_buffers = ering->tx_pending; 461 462 if (netif_running(edev->ndev)) 463 qede_reload(edev, NULL, NULL); 464 465 return 0; 466 } 467 468 static void qede_get_pauseparam(struct net_device *dev, 469 struct ethtool_pauseparam *epause) 470 { 471 struct qede_dev *edev = netdev_priv(dev); 472 struct qed_link_output current_link; 473 474 memset(¤t_link, 0, sizeof(current_link)); 475 edev->ops->common->get_link(edev->cdev, ¤t_link); 476 477 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 478 epause->autoneg = true; 479 if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) 480 epause->rx_pause = true; 481 if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) 482 epause->tx_pause = true; 483 484 DP_VERBOSE(edev, QED_MSG_DEBUG, 485 "ethtool_pauseparam: cmd %d autoneg %d rx_pause %d tx_pause %d\n", 486 epause->cmd, epause->autoneg, epause->rx_pause, 487 epause->tx_pause); 488 } 489 490 static int qede_set_pauseparam(struct net_device *dev, 491 struct ethtool_pauseparam *epause) 492 { 493 struct qede_dev *edev = netdev_priv(dev); 494 struct qed_link_params params; 495 struct qed_link_output current_link; 496 497 if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { 498 DP_INFO(edev, 499 "Pause settings are not allowed to be changed\n"); 500 return -EOPNOTSUPP; 501 } 502 503 memset(¤t_link, 0, sizeof(current_link)); 504 edev->ops->common->get_link(edev->cdev, ¤t_link); 505 506 memset(¶ms, 0, sizeof(params)); 507 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; 508 if (epause->autoneg) { 509 if (!(current_link.supported_caps & SUPPORTED_Autoneg)) { 510 DP_INFO(edev, "autoneg not supported\n"); 511 return -EINVAL; 512 } 513 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 514 } 515 if (epause->rx_pause) 516 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; 517 if (epause->tx_pause) 518 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; 519 520 params.link_up = true; 521 edev->ops->common->set_link(edev->cdev, ¶ms); 522 523 return 0; 524 } 525 526 static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args) 527 { 528 edev->ndev->mtu = args->mtu; 529 } 530 531 /* Netdevice NDOs */ 532 #define ETH_MAX_JUMBO_PACKET_SIZE 9600 533 #define ETH_MIN_PACKET_SIZE 60 534 int qede_change_mtu(struct net_device *ndev, int new_mtu) 535 { 536 struct qede_dev *edev = netdev_priv(ndev); 537 union qede_reload_args args; 538 539 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || 540 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) { 541 DP_ERR(edev, "Can't support requested MTU size\n"); 542 return -EINVAL; 543 } 544 545 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 546 "Configuring MTU size of %d\n", new_mtu); 547 548 /* Set the mtu field and re-start the interface if needed*/ 549 args.mtu = new_mtu; 550 551 if (netif_running(edev->ndev)) 552 qede_reload(edev, &qede_update_mtu, &args); 553 554 qede_update_mtu(edev, &args); 555 556 return 0; 557 } 558 559 static void qede_get_channels(struct net_device *dev, 560 struct ethtool_channels *channels) 561 { 562 struct qede_dev *edev = netdev_priv(dev); 563 564 channels->max_combined = QEDE_MAX_RSS_CNT(edev); 565 channels->combined_count = QEDE_RSS_CNT(edev); 566 } 567 568 static int qede_set_channels(struct net_device *dev, 569 struct ethtool_channels *channels) 570 { 571 struct qede_dev *edev = netdev_priv(dev); 572 573 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 574 "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n", 575 channels->rx_count, channels->tx_count, 576 channels->other_count, channels->combined_count); 577 578 /* We don't support separate rx / tx, nor `other' channels. */ 579 if (channels->rx_count || channels->tx_count || 580 channels->other_count || (channels->combined_count == 0) || 581 (channels->combined_count > QEDE_MAX_RSS_CNT(edev))) { 582 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 583 "command parameters not supported\n"); 584 return -EINVAL; 585 } 586 587 /* Check if there was a change in the active parameters */ 588 if (channels->combined_count == QEDE_RSS_CNT(edev)) { 589 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 590 "No change in active parameters\n"); 591 return 0; 592 } 593 594 /* We need the number of queues to be divisible between the hwfns */ 595 if (channels->combined_count % edev->dev_info.common.num_hwfns) { 596 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 597 "Number of channels must be divisable by %04x\n", 598 edev->dev_info.common.num_hwfns); 599 return -EINVAL; 600 } 601 602 /* Set number of queues and reload if necessary */ 603 edev->req_rss = channels->combined_count; 604 if (netif_running(dev)) 605 qede_reload(edev, NULL, NULL); 606 607 return 0; 608 } 609 610 static int qede_set_phys_id(struct net_device *dev, 611 enum ethtool_phys_id_state state) 612 { 613 struct qede_dev *edev = netdev_priv(dev); 614 u8 led_state = 0; 615 616 switch (state) { 617 case ETHTOOL_ID_ACTIVE: 618 return 1; /* cycle on/off once per second */ 619 620 case ETHTOOL_ID_ON: 621 led_state = QED_LED_MODE_ON; 622 break; 623 624 case ETHTOOL_ID_OFF: 625 led_state = QED_LED_MODE_OFF; 626 break; 627 628 case ETHTOOL_ID_INACTIVE: 629 led_state = QED_LED_MODE_RESTORE; 630 break; 631 } 632 633 edev->ops->common->set_led(edev->cdev, led_state); 634 635 return 0; 636 } 637 638 static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) 639 { 640 info->data = RXH_IP_SRC | RXH_IP_DST; 641 642 switch (info->flow_type) { 643 case TCP_V4_FLOW: 644 case TCP_V6_FLOW: 645 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 646 break; 647 case UDP_V4_FLOW: 648 if (edev->rss_params.rss_caps & QED_RSS_IPV4_UDP) 649 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 650 break; 651 case UDP_V6_FLOW: 652 if (edev->rss_params.rss_caps & QED_RSS_IPV6_UDP) 653 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 654 break; 655 case IPV4_FLOW: 656 case IPV6_FLOW: 657 break; 658 default: 659 info->data = 0; 660 break; 661 } 662 663 return 0; 664 } 665 666 static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 667 u32 *rules __always_unused) 668 { 669 struct qede_dev *edev = netdev_priv(dev); 670 671 switch (info->cmd) { 672 case ETHTOOL_GRXRINGS: 673 info->data = edev->num_rss; 674 return 0; 675 case ETHTOOL_GRXFH: 676 return qede_get_rss_flags(edev, info); 677 default: 678 DP_ERR(edev, "Command parameters not supported\n"); 679 return -EOPNOTSUPP; 680 } 681 } 682 683 static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) 684 { 685 struct qed_update_vport_params vport_update_params; 686 u8 set_caps = 0, clr_caps = 0; 687 688 DP_VERBOSE(edev, QED_MSG_DEBUG, 689 "Set rss flags command parameters: flow type = %d, data = %llu\n", 690 info->flow_type, info->data); 691 692 switch (info->flow_type) { 693 case TCP_V4_FLOW: 694 case TCP_V6_FLOW: 695 /* For TCP only 4-tuple hash is supported */ 696 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST | 697 RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 698 DP_INFO(edev, "Command parameters not supported\n"); 699 return -EINVAL; 700 } 701 return 0; 702 case UDP_V4_FLOW: 703 /* For UDP either 2-tuple hash or 4-tuple hash is supported */ 704 if (info->data == (RXH_IP_SRC | RXH_IP_DST | 705 RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 706 set_caps = QED_RSS_IPV4_UDP; 707 DP_VERBOSE(edev, QED_MSG_DEBUG, 708 "UDP 4-tuple enabled\n"); 709 } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { 710 clr_caps = QED_RSS_IPV4_UDP; 711 DP_VERBOSE(edev, QED_MSG_DEBUG, 712 "UDP 4-tuple disabled\n"); 713 } else { 714 return -EINVAL; 715 } 716 break; 717 case UDP_V6_FLOW: 718 /* For UDP either 2-tuple hash or 4-tuple hash is supported */ 719 if (info->data == (RXH_IP_SRC | RXH_IP_DST | 720 RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 721 set_caps = QED_RSS_IPV6_UDP; 722 DP_VERBOSE(edev, QED_MSG_DEBUG, 723 "UDP 4-tuple enabled\n"); 724 } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { 725 clr_caps = QED_RSS_IPV6_UDP; 726 DP_VERBOSE(edev, QED_MSG_DEBUG, 727 "UDP 4-tuple disabled\n"); 728 } else { 729 return -EINVAL; 730 } 731 break; 732 case IPV4_FLOW: 733 case IPV6_FLOW: 734 /* For IP only 2-tuple hash is supported */ 735 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) { 736 DP_INFO(edev, "Command parameters not supported\n"); 737 return -EINVAL; 738 } 739 return 0; 740 case SCTP_V4_FLOW: 741 case AH_ESP_V4_FLOW: 742 case AH_V4_FLOW: 743 case ESP_V4_FLOW: 744 case SCTP_V6_FLOW: 745 case AH_ESP_V6_FLOW: 746 case AH_V6_FLOW: 747 case ESP_V6_FLOW: 748 case IP_USER_FLOW: 749 case ETHER_FLOW: 750 /* RSS is not supported for these protocols */ 751 if (info->data) { 752 DP_INFO(edev, "Command parameters not supported\n"); 753 return -EINVAL; 754 } 755 return 0; 756 default: 757 return -EINVAL; 758 } 759 760 /* No action is needed if there is no change in the rss capability */ 761 if (edev->rss_params.rss_caps == ((edev->rss_params.rss_caps & 762 ~clr_caps) | set_caps)) 763 return 0; 764 765 /* Update internal configuration */ 766 edev->rss_params.rss_caps = (edev->rss_params.rss_caps & ~clr_caps) | 767 set_caps; 768 edev->rss_params_inited |= QEDE_RSS_CAPS_INITED; 769 770 /* Re-configure if possible */ 771 if (netif_running(edev->ndev)) { 772 memset(&vport_update_params, 0, sizeof(vport_update_params)); 773 vport_update_params.update_rss_flg = 1; 774 vport_update_params.vport_id = 0; 775 memcpy(&vport_update_params.rss_params, &edev->rss_params, 776 sizeof(vport_update_params.rss_params)); 777 return edev->ops->vport_update(edev->cdev, 778 &vport_update_params); 779 } 780 781 return 0; 782 } 783 784 static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) 785 { 786 struct qede_dev *edev = netdev_priv(dev); 787 788 switch (info->cmd) { 789 case ETHTOOL_SRXFH: 790 return qede_set_rss_flags(edev, info); 791 default: 792 DP_INFO(edev, "Command parameters not supported\n"); 793 return -EOPNOTSUPP; 794 } 795 } 796 797 static u32 qede_get_rxfh_indir_size(struct net_device *dev) 798 { 799 return QED_RSS_IND_TABLE_SIZE; 800 } 801 802 static u32 qede_get_rxfh_key_size(struct net_device *dev) 803 { 804 struct qede_dev *edev = netdev_priv(dev); 805 806 return sizeof(edev->rss_params.rss_key); 807 } 808 809 static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) 810 { 811 struct qede_dev *edev = netdev_priv(dev); 812 int i; 813 814 if (hfunc) 815 *hfunc = ETH_RSS_HASH_TOP; 816 817 if (!indir) 818 return 0; 819 820 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) 821 indir[i] = edev->rss_params.rss_ind_table[i]; 822 823 if (key) 824 memcpy(key, edev->rss_params.rss_key, 825 qede_get_rxfh_key_size(dev)); 826 827 return 0; 828 } 829 830 static int qede_set_rxfh(struct net_device *dev, const u32 *indir, 831 const u8 *key, const u8 hfunc) 832 { 833 struct qed_update_vport_params vport_update_params; 834 struct qede_dev *edev = netdev_priv(dev); 835 int i; 836 837 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 838 return -EOPNOTSUPP; 839 840 if (!indir && !key) 841 return 0; 842 843 if (indir) { 844 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) 845 edev->rss_params.rss_ind_table[i] = indir[i]; 846 edev->rss_params_inited |= QEDE_RSS_INDIR_INITED; 847 } 848 849 if (key) { 850 memcpy(&edev->rss_params.rss_key, key, 851 qede_get_rxfh_key_size(dev)); 852 edev->rss_params_inited |= QEDE_RSS_KEY_INITED; 853 } 854 855 if (netif_running(edev->ndev)) { 856 memset(&vport_update_params, 0, sizeof(vport_update_params)); 857 vport_update_params.update_rss_flg = 1; 858 vport_update_params.vport_id = 0; 859 memcpy(&vport_update_params.rss_params, &edev->rss_params, 860 sizeof(vport_update_params.rss_params)); 861 return edev->ops->vport_update(edev->cdev, 862 &vport_update_params); 863 } 864 865 return 0; 866 } 867 868 /* This function enables the interrupt generation and the NAPI on the device */ 869 static void qede_netif_start(struct qede_dev *edev) 870 { 871 int i; 872 873 if (!netif_running(edev->ndev)) 874 return; 875 876 for_each_rss(i) { 877 /* Update and reenable interrupts */ 878 qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1); 879 napi_enable(&edev->fp_array[i].napi); 880 } 881 } 882 883 /* This function disables the NAPI and the interrupt generation on the device */ 884 static void qede_netif_stop(struct qede_dev *edev) 885 { 886 int i; 887 888 for_each_rss(i) { 889 napi_disable(&edev->fp_array[i].napi); 890 /* Disable interrupts */ 891 qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0); 892 } 893 } 894 895 static int qede_selftest_transmit_traffic(struct qede_dev *edev, 896 struct sk_buff *skb) 897 { 898 struct qede_tx_queue *txq = &edev->fp_array[0].txqs[0]; 899 struct eth_tx_1st_bd *first_bd; 900 dma_addr_t mapping; 901 int i, idx, val; 902 903 /* Fill the entry in the SW ring and the BDs in the FW ring */ 904 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; 905 txq->sw_tx_ring[idx].skb = skb; 906 first_bd = qed_chain_produce(&txq->tx_pbl); 907 memset(first_bd, 0, sizeof(*first_bd)); 908 val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; 909 first_bd->data.bd_flags.bitfields = val; 910 911 /* Map skb linear data for DMA and set in the first BD */ 912 mapping = dma_map_single(&edev->pdev->dev, skb->data, 913 skb_headlen(skb), DMA_TO_DEVICE); 914 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { 915 DP_NOTICE(edev, "SKB mapping failed\n"); 916 return -ENOMEM; 917 } 918 BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); 919 920 /* update the first BD with the actual num BDs */ 921 first_bd->data.nbds = 1; 922 txq->sw_tx_prod++; 923 /* 'next page' entries are counted in the producer value */ 924 val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); 925 txq->tx_db.data.bd_prod = val; 926 927 /* wmb makes sure that the BDs data is updated before updating the 928 * producer, otherwise FW may read old data from the BDs. 929 */ 930 wmb(); 931 barrier(); 932 writel(txq->tx_db.raw, txq->doorbell_addr); 933 934 /* mmiowb is needed to synchronize doorbell writes from more than one 935 * processor. It guarantees that the write arrives to the device before 936 * the queue lock is released and another start_xmit is called (possibly 937 * on another CPU). Without this barrier, the next doorbell can bypass 938 * this doorbell. This is applicable to IA64/Altix systems. 939 */ 940 mmiowb(); 941 942 for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { 943 if (qede_txq_has_work(txq)) 944 break; 945 usleep_range(100, 200); 946 } 947 948 if (!qede_txq_has_work(txq)) { 949 DP_NOTICE(edev, "Tx completion didn't happen\n"); 950 return -1; 951 } 952 953 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); 954 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), 955 BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE); 956 txq->sw_tx_cons++; 957 txq->sw_tx_ring[idx].skb = NULL; 958 959 return 0; 960 } 961 962 static int qede_selftest_receive_traffic(struct qede_dev *edev) 963 { 964 struct qede_rx_queue *rxq = edev->fp_array[0].rxq; 965 u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len; 966 struct eth_fast_path_rx_reg_cqe *fp_cqe; 967 struct sw_rx_data *sw_rx_data; 968 union eth_rx_cqe *cqe; 969 u8 *data_ptr; 970 int i; 971 972 /* The packet is expected to receive on rx-queue 0 even though RSS is 973 * enabled. This is because the queue 0 is configured as the default 974 * queue and that the loopback traffic is not IP. 975 */ 976 for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { 977 if (qede_has_rx_work(rxq)) 978 break; 979 usleep_range(100, 200); 980 } 981 982 if (!qede_has_rx_work(rxq)) { 983 DP_NOTICE(edev, "Failed to receive the traffic\n"); 984 return -1; 985 } 986 987 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); 988 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); 989 990 /* Memory barrier to prevent the CPU from doing speculative reads of CQE 991 * / BD before reading hw_comp_cons. If the CQE is read before it is 992 * written by FW, then FW writes CQE and SB, and then the CPU reads the 993 * hw_comp_cons, it will use an old CQE. 994 */ 995 rmb(); 996 997 /* Get the CQE from the completion ring */ 998 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); 999 1000 /* Get the data from the SW ring */ 1001 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; 1002 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; 1003 fp_cqe = &cqe->fast_path_regular; 1004 len = le16_to_cpu(fp_cqe->len_on_first_bd); 1005 data_ptr = (u8 *)(page_address(sw_rx_data->data) + 1006 fp_cqe->placement_offset + sw_rx_data->page_offset); 1007 for (i = ETH_HLEN; i < len; i++) 1008 if (data_ptr[i] != (unsigned char)(i & 0xff)) { 1009 DP_NOTICE(edev, "Loopback test failed\n"); 1010 qede_recycle_rx_bd_ring(rxq, edev, 1); 1011 return -1; 1012 } 1013 1014 qede_recycle_rx_bd_ring(rxq, edev, 1); 1015 1016 return 0; 1017 } 1018 1019 static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode) 1020 { 1021 struct qed_link_params link_params; 1022 struct sk_buff *skb = NULL; 1023 int rc = 0, i; 1024 u32 pkt_size; 1025 u8 *packet; 1026 1027 if (!netif_running(edev->ndev)) { 1028 DP_NOTICE(edev, "Interface is down\n"); 1029 return -EINVAL; 1030 } 1031 1032 qede_netif_stop(edev); 1033 1034 /* Bring up the link in Loopback mode */ 1035 memset(&link_params, 0, sizeof(link_params)); 1036 link_params.link_up = true; 1037 link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE; 1038 link_params.loopback_mode = loopback_mode; 1039 edev->ops->common->set_link(edev->cdev, &link_params); 1040 1041 /* Wait for loopback configuration to apply */ 1042 msleep_interruptible(500); 1043 1044 /* prepare the loopback packet */ 1045 pkt_size = edev->ndev->mtu + ETH_HLEN; 1046 1047 skb = netdev_alloc_skb(edev->ndev, pkt_size); 1048 if (!skb) { 1049 DP_INFO(edev, "Can't allocate skb\n"); 1050 rc = -ENOMEM; 1051 goto test_loopback_exit; 1052 } 1053 packet = skb_put(skb, pkt_size); 1054 ether_addr_copy(packet, edev->ndev->dev_addr); 1055 ether_addr_copy(packet + ETH_ALEN, edev->ndev->dev_addr); 1056 memset(packet + (2 * ETH_ALEN), 0x77, (ETH_HLEN - (2 * ETH_ALEN))); 1057 for (i = ETH_HLEN; i < pkt_size; i++) 1058 packet[i] = (unsigned char)(i & 0xff); 1059 1060 rc = qede_selftest_transmit_traffic(edev, skb); 1061 if (rc) 1062 goto test_loopback_exit; 1063 1064 rc = qede_selftest_receive_traffic(edev); 1065 if (rc) 1066 goto test_loopback_exit; 1067 1068 DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, "Loopback test successful\n"); 1069 1070 test_loopback_exit: 1071 dev_kfree_skb(skb); 1072 1073 /* Bring up the link in Normal mode */ 1074 memset(&link_params, 0, sizeof(link_params)); 1075 link_params.link_up = true; 1076 link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE; 1077 link_params.loopback_mode = QED_LINK_LOOPBACK_NONE; 1078 edev->ops->common->set_link(edev->cdev, &link_params); 1079 1080 /* Wait for loopback configuration to apply */ 1081 msleep_interruptible(500); 1082 1083 qede_netif_start(edev); 1084 1085 return rc; 1086 } 1087 1088 static void qede_self_test(struct net_device *dev, 1089 struct ethtool_test *etest, u64 *buf) 1090 { 1091 struct qede_dev *edev = netdev_priv(dev); 1092 1093 DP_VERBOSE(edev, QED_MSG_DEBUG, 1094 "Self-test command parameters: offline = %d, external_lb = %d\n", 1095 (etest->flags & ETH_TEST_FL_OFFLINE), 1096 (etest->flags & ETH_TEST_FL_EXTERNAL_LB) >> 2); 1097 1098 memset(buf, 0, sizeof(u64) * QEDE_ETHTOOL_TEST_MAX); 1099 1100 if (etest->flags & ETH_TEST_FL_OFFLINE) { 1101 if (qede_selftest_run_loopback(edev, 1102 QED_LINK_LOOPBACK_INT_PHY)) { 1103 buf[QEDE_ETHTOOL_INT_LOOPBACK] = 1; 1104 etest->flags |= ETH_TEST_FL_FAILED; 1105 } 1106 } 1107 1108 if (edev->ops->common->selftest->selftest_interrupt(edev->cdev)) { 1109 buf[QEDE_ETHTOOL_INTERRUPT_TEST] = 1; 1110 etest->flags |= ETH_TEST_FL_FAILED; 1111 } 1112 1113 if (edev->ops->common->selftest->selftest_memory(edev->cdev)) { 1114 buf[QEDE_ETHTOOL_MEMORY_TEST] = 1; 1115 etest->flags |= ETH_TEST_FL_FAILED; 1116 } 1117 1118 if (edev->ops->common->selftest->selftest_register(edev->cdev)) { 1119 buf[QEDE_ETHTOOL_REGISTER_TEST] = 1; 1120 etest->flags |= ETH_TEST_FL_FAILED; 1121 } 1122 1123 if (edev->ops->common->selftest->selftest_clock(edev->cdev)) { 1124 buf[QEDE_ETHTOOL_CLOCK_TEST] = 1; 1125 etest->flags |= ETH_TEST_FL_FAILED; 1126 } 1127 } 1128 1129 static const struct ethtool_ops qede_ethtool_ops = { 1130 .get_settings = qede_get_settings, 1131 .set_settings = qede_set_settings, 1132 .get_drvinfo = qede_get_drvinfo, 1133 .get_msglevel = qede_get_msglevel, 1134 .set_msglevel = qede_set_msglevel, 1135 .nway_reset = qede_nway_reset, 1136 .get_link = qede_get_link, 1137 .get_ringparam = qede_get_ringparam, 1138 .set_ringparam = qede_set_ringparam, 1139 .get_pauseparam = qede_get_pauseparam, 1140 .set_pauseparam = qede_set_pauseparam, 1141 .get_strings = qede_get_strings, 1142 .set_phys_id = qede_set_phys_id, 1143 .get_ethtool_stats = qede_get_ethtool_stats, 1144 .get_priv_flags = qede_get_priv_flags, 1145 .get_sset_count = qede_get_sset_count, 1146 .get_rxnfc = qede_get_rxnfc, 1147 .set_rxnfc = qede_set_rxnfc, 1148 .get_rxfh_indir_size = qede_get_rxfh_indir_size, 1149 .get_rxfh_key_size = qede_get_rxfh_key_size, 1150 .get_rxfh = qede_get_rxfh, 1151 .set_rxfh = qede_set_rxfh, 1152 .get_channels = qede_get_channels, 1153 .set_channels = qede_set_channels, 1154 .self_test = qede_self_test, 1155 }; 1156 1157 static const struct ethtool_ops qede_vf_ethtool_ops = { 1158 .get_settings = qede_get_settings, 1159 .get_drvinfo = qede_get_drvinfo, 1160 .get_msglevel = qede_get_msglevel, 1161 .set_msglevel = qede_set_msglevel, 1162 .get_link = qede_get_link, 1163 .get_ringparam = qede_get_ringparam, 1164 .set_ringparam = qede_set_ringparam, 1165 .get_strings = qede_get_strings, 1166 .get_ethtool_stats = qede_get_ethtool_stats, 1167 .get_priv_flags = qede_get_priv_flags, 1168 .get_sset_count = qede_get_sset_count, 1169 .get_rxnfc = qede_get_rxnfc, 1170 .set_rxnfc = qede_set_rxnfc, 1171 .get_rxfh_indir_size = qede_get_rxfh_indir_size, 1172 .get_rxfh_key_size = qede_get_rxfh_key_size, 1173 .get_rxfh = qede_get_rxfh, 1174 .set_rxfh = qede_set_rxfh, 1175 .get_channels = qede_get_channels, 1176 .set_channels = qede_set_channels, 1177 }; 1178 1179 void qede_set_ethtool_ops(struct net_device *dev) 1180 { 1181 struct qede_dev *edev = netdev_priv(dev); 1182 1183 if (IS_VF(edev)) 1184 dev->ethtool_ops = &qede_vf_ethtool_ops; 1185 else 1186 dev->ethtool_ops = &qede_ethtool_ops; 1187 } 1188