1 /* QLogic qede NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #include <linux/version.h> 10 #include <linux/types.h> 11 #include <linux/netdevice.h> 12 #include <linux/etherdevice.h> 13 #include <linux/ethtool.h> 14 #include <linux/string.h> 15 #include <linux/pci.h> 16 #include <linux/capability.h> 17 #include "qede.h" 18 19 #define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name)) 20 #define QEDE_STAT_STRING(stat_name) (#stat_name) 21 #define _QEDE_STAT(stat_name, pf_only) \ 22 {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only} 23 #define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true) 24 #define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false) 25 26 #define QEDE_RQSTAT_OFFSET(stat_name) \ 27 (offsetof(struct qede_rx_queue, stat_name)) 28 #define QEDE_RQSTAT_STRING(stat_name) (#stat_name) 29 #define QEDE_RQSTAT(stat_name) \ 30 {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)} 31 32 #define QEDE_SELFTEST_POLL_COUNT 100 33 34 static const struct { 35 u64 offset; 36 char string[ETH_GSTRING_LEN]; 37 } qede_rqstats_arr[] = { 38 QEDE_RQSTAT(rx_hw_errors), 39 QEDE_RQSTAT(rx_alloc_errors), 40 }; 41 42 #define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr) 43 #define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \ 44 (*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\ 45 qede_rqstats_arr[(sindex)].offset))) 46 static const struct { 47 u64 offset; 48 char string[ETH_GSTRING_LEN]; 49 bool pf_only; 50 } qede_stats_arr[] = { 51 QEDE_STAT(rx_ucast_bytes), 52 QEDE_STAT(rx_mcast_bytes), 53 QEDE_STAT(rx_bcast_bytes), 54 QEDE_STAT(rx_ucast_pkts), 55 QEDE_STAT(rx_mcast_pkts), 56 QEDE_STAT(rx_bcast_pkts), 57 58 QEDE_STAT(tx_ucast_bytes), 59 QEDE_STAT(tx_mcast_bytes), 60 QEDE_STAT(tx_bcast_bytes), 61 QEDE_STAT(tx_ucast_pkts), 62 QEDE_STAT(tx_mcast_pkts), 63 QEDE_STAT(tx_bcast_pkts), 64 65 QEDE_PF_STAT(rx_64_byte_packets), 66 QEDE_PF_STAT(rx_65_to_127_byte_packets), 67 QEDE_PF_STAT(rx_128_to_255_byte_packets), 68 QEDE_PF_STAT(rx_256_to_511_byte_packets), 69 QEDE_PF_STAT(rx_512_to_1023_byte_packets), 70 QEDE_PF_STAT(rx_1024_to_1518_byte_packets), 71 QEDE_PF_STAT(rx_1519_to_1522_byte_packets), 72 QEDE_PF_STAT(rx_1519_to_2047_byte_packets), 73 QEDE_PF_STAT(rx_2048_to_4095_byte_packets), 74 QEDE_PF_STAT(rx_4096_to_9216_byte_packets), 75 QEDE_PF_STAT(rx_9217_to_16383_byte_packets), 76 QEDE_PF_STAT(tx_64_byte_packets), 77 QEDE_PF_STAT(tx_65_to_127_byte_packets), 78 QEDE_PF_STAT(tx_128_to_255_byte_packets), 79 QEDE_PF_STAT(tx_256_to_511_byte_packets), 80 QEDE_PF_STAT(tx_512_to_1023_byte_packets), 81 QEDE_PF_STAT(tx_1024_to_1518_byte_packets), 82 QEDE_PF_STAT(tx_1519_to_2047_byte_packets), 83 QEDE_PF_STAT(tx_2048_to_4095_byte_packets), 84 QEDE_PF_STAT(tx_4096_to_9216_byte_packets), 85 QEDE_PF_STAT(tx_9217_to_16383_byte_packets), 86 87 QEDE_PF_STAT(rx_mac_crtl_frames), 88 QEDE_PF_STAT(tx_mac_ctrl_frames), 89 QEDE_PF_STAT(rx_pause_frames), 90 QEDE_PF_STAT(tx_pause_frames), 91 QEDE_PF_STAT(rx_pfc_frames), 92 QEDE_PF_STAT(tx_pfc_frames), 93 94 QEDE_PF_STAT(rx_crc_errors), 95 QEDE_PF_STAT(rx_align_errors), 96 QEDE_PF_STAT(rx_carrier_errors), 97 QEDE_PF_STAT(rx_oversize_packets), 98 QEDE_PF_STAT(rx_jabbers), 99 QEDE_PF_STAT(rx_undersize_packets), 100 QEDE_PF_STAT(rx_fragments), 101 QEDE_PF_STAT(tx_lpi_entry_count), 102 QEDE_PF_STAT(tx_total_collisions), 103 QEDE_PF_STAT(brb_truncates), 104 QEDE_PF_STAT(brb_discards), 105 QEDE_STAT(no_buff_discards), 106 QEDE_PF_STAT(mftag_filter_discards), 107 QEDE_PF_STAT(mac_filter_discards), 108 QEDE_STAT(tx_err_drop_pkts), 109 110 QEDE_STAT(coalesced_pkts), 111 QEDE_STAT(coalesced_events), 112 QEDE_STAT(coalesced_aborts_num), 113 QEDE_STAT(non_coalesced_pkts), 114 QEDE_STAT(coalesced_bytes), 115 }; 116 117 #define QEDE_STATS_DATA(dev, index) \ 118 (*((u64 *)(((char *)(dev)) + offsetof(struct qede_dev, stats) \ 119 + qede_stats_arr[(index)].offset))) 120 121 #define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr) 122 123 enum { 124 QEDE_PRI_FLAG_CMT, 125 QEDE_PRI_FLAG_LEN, 126 }; 127 128 static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = { 129 "Coupled-Function", 130 }; 131 132 enum qede_ethtool_tests { 133 QEDE_ETHTOOL_INT_LOOPBACK, 134 QEDE_ETHTOOL_INTERRUPT_TEST, 135 QEDE_ETHTOOL_MEMORY_TEST, 136 QEDE_ETHTOOL_REGISTER_TEST, 137 QEDE_ETHTOOL_CLOCK_TEST, 138 QEDE_ETHTOOL_TEST_MAX 139 }; 140 141 static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = { 142 "Internal loopback (offline)", 143 "Interrupt (online)\t", 144 "Memory (online)\t\t", 145 "Register (online)\t", 146 "Clock (online)\t\t", 147 }; 148 149 static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) 150 { 151 int i, j, k; 152 153 for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) { 154 if (IS_VF(edev) && qede_stats_arr[i].pf_only) 155 continue; 156 strcpy(buf + j * ETH_GSTRING_LEN, 157 qede_stats_arr[i].string); 158 j++; 159 } 160 161 for (k = 0; k < QEDE_NUM_RQSTATS; k++, j++) 162 strcpy(buf + j * ETH_GSTRING_LEN, 163 qede_rqstats_arr[k].string); 164 } 165 166 static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 167 { 168 struct qede_dev *edev = netdev_priv(dev); 169 170 switch (stringset) { 171 case ETH_SS_STATS: 172 qede_get_strings_stats(edev, buf); 173 break; 174 case ETH_SS_PRIV_FLAGS: 175 memcpy(buf, qede_private_arr, 176 ETH_GSTRING_LEN * QEDE_PRI_FLAG_LEN); 177 break; 178 case ETH_SS_TEST: 179 memcpy(buf, qede_tests_str_arr, 180 ETH_GSTRING_LEN * QEDE_ETHTOOL_TEST_MAX); 181 break; 182 default: 183 DP_VERBOSE(edev, QED_MSG_DEBUG, 184 "Unsupported stringset 0x%08x\n", stringset); 185 } 186 } 187 188 static void qede_get_ethtool_stats(struct net_device *dev, 189 struct ethtool_stats *stats, u64 *buf) 190 { 191 struct qede_dev *edev = netdev_priv(dev); 192 int sidx, cnt = 0; 193 int qid; 194 195 qede_fill_by_demand_stats(edev); 196 197 mutex_lock(&edev->qede_lock); 198 199 for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) { 200 if (IS_VF(edev) && qede_stats_arr[sidx].pf_only) 201 continue; 202 buf[cnt++] = QEDE_STATS_DATA(edev, sidx); 203 } 204 205 for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) { 206 buf[cnt] = 0; 207 for (qid = 0; qid < edev->num_rss; qid++) 208 buf[cnt] += QEDE_RQSTATS_DATA(edev, sidx, qid); 209 cnt++; 210 } 211 212 mutex_unlock(&edev->qede_lock); 213 } 214 215 static int qede_get_sset_count(struct net_device *dev, int stringset) 216 { 217 struct qede_dev *edev = netdev_priv(dev); 218 int num_stats = QEDE_NUM_STATS; 219 220 switch (stringset) { 221 case ETH_SS_STATS: 222 if (IS_VF(edev)) { 223 int i; 224 225 for (i = 0; i < QEDE_NUM_STATS; i++) 226 if (qede_stats_arr[i].pf_only) 227 num_stats--; 228 } 229 return num_stats + QEDE_NUM_RQSTATS; 230 case ETH_SS_PRIV_FLAGS: 231 return QEDE_PRI_FLAG_LEN; 232 case ETH_SS_TEST: 233 if (!IS_VF(edev)) 234 return QEDE_ETHTOOL_TEST_MAX; 235 else 236 return 0; 237 default: 238 DP_VERBOSE(edev, QED_MSG_DEBUG, 239 "Unsupported stringset 0x%08x\n", stringset); 240 return -EINVAL; 241 } 242 } 243 244 static u32 qede_get_priv_flags(struct net_device *dev) 245 { 246 struct qede_dev *edev = netdev_priv(dev); 247 248 return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT; 249 } 250 251 static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 252 { 253 struct qede_dev *edev = netdev_priv(dev); 254 struct qed_link_output current_link; 255 256 memset(¤t_link, 0, sizeof(current_link)); 257 edev->ops->common->get_link(edev->cdev, ¤t_link); 258 259 cmd->supported = current_link.supported_caps; 260 cmd->advertising = current_link.advertised_caps; 261 if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) { 262 ethtool_cmd_speed_set(cmd, current_link.speed); 263 cmd->duplex = current_link.duplex; 264 } else { 265 cmd->duplex = DUPLEX_UNKNOWN; 266 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); 267 } 268 cmd->port = current_link.port; 269 cmd->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE : 270 AUTONEG_DISABLE; 271 cmd->lp_advertising = current_link.lp_caps; 272 273 return 0; 274 } 275 276 static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 277 { 278 struct qede_dev *edev = netdev_priv(dev); 279 struct qed_link_output current_link; 280 struct qed_link_params params; 281 u32 speed; 282 283 if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { 284 DP_INFO(edev, 285 "Link settings are not allowed to be changed\n"); 286 return -EOPNOTSUPP; 287 } 288 289 memset(¤t_link, 0, sizeof(current_link)); 290 memset(¶ms, 0, sizeof(params)); 291 edev->ops->common->get_link(edev->cdev, ¤t_link); 292 293 speed = ethtool_cmd_speed(cmd); 294 params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS; 295 params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG; 296 if (cmd->autoneg == AUTONEG_ENABLE) { 297 params.autoneg = true; 298 params.forced_speed = 0; 299 params.adv_speeds = cmd->advertising; 300 } else { /* forced speed */ 301 params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED; 302 params.autoneg = false; 303 params.forced_speed = speed; 304 switch (speed) { 305 case SPEED_10000: 306 if (!(current_link.supported_caps & 307 SUPPORTED_10000baseKR_Full)) { 308 DP_INFO(edev, "10G speed not supported\n"); 309 return -EINVAL; 310 } 311 params.adv_speeds = SUPPORTED_10000baseKR_Full; 312 break; 313 case SPEED_40000: 314 if (!(current_link.supported_caps & 315 SUPPORTED_40000baseLR4_Full)) { 316 DP_INFO(edev, "40G speed not supported\n"); 317 return -EINVAL; 318 } 319 params.adv_speeds = SUPPORTED_40000baseLR4_Full; 320 break; 321 default: 322 DP_INFO(edev, "Unsupported speed %u\n", speed); 323 return -EINVAL; 324 } 325 } 326 327 params.link_up = true; 328 edev->ops->common->set_link(edev->cdev, ¶ms); 329 330 return 0; 331 } 332 333 static void qede_get_drvinfo(struct net_device *ndev, 334 struct ethtool_drvinfo *info) 335 { 336 char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN]; 337 struct qede_dev *edev = netdev_priv(ndev); 338 339 strlcpy(info->driver, "qede", sizeof(info->driver)); 340 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); 341 342 snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d", 343 edev->dev_info.common.fw_major, 344 edev->dev_info.common.fw_minor, 345 edev->dev_info.common.fw_rev, 346 edev->dev_info.common.fw_eng); 347 348 snprintf(mfw, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d", 349 (edev->dev_info.common.mfw_rev >> 24) & 0xFF, 350 (edev->dev_info.common.mfw_rev >> 16) & 0xFF, 351 (edev->dev_info.common.mfw_rev >> 8) & 0xFF, 352 edev->dev_info.common.mfw_rev & 0xFF); 353 354 if ((strlen(storm) + strlen(mfw) + strlen("mfw storm ")) < 355 sizeof(info->fw_version)) { 356 snprintf(info->fw_version, sizeof(info->fw_version), 357 "mfw %s storm %s", mfw, storm); 358 } else { 359 snprintf(info->fw_version, sizeof(info->fw_version), 360 "%s %s", mfw, storm); 361 } 362 363 strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info)); 364 } 365 366 static u32 qede_get_msglevel(struct net_device *ndev) 367 { 368 struct qede_dev *edev = netdev_priv(ndev); 369 370 return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) | 371 edev->dp_module; 372 } 373 374 static void qede_set_msglevel(struct net_device *ndev, u32 level) 375 { 376 struct qede_dev *edev = netdev_priv(ndev); 377 u32 dp_module = 0; 378 u8 dp_level = 0; 379 380 qede_config_debug(level, &dp_module, &dp_level); 381 382 edev->dp_level = dp_level; 383 edev->dp_module = dp_module; 384 edev->ops->common->update_msglvl(edev->cdev, 385 dp_module, dp_level); 386 } 387 388 static int qede_nway_reset(struct net_device *dev) 389 { 390 struct qede_dev *edev = netdev_priv(dev); 391 struct qed_link_output current_link; 392 struct qed_link_params link_params; 393 394 if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { 395 DP_INFO(edev, 396 "Link settings are not allowed to be changed\n"); 397 return -EOPNOTSUPP; 398 } 399 400 if (!netif_running(dev)) 401 return 0; 402 403 memset(¤t_link, 0, sizeof(current_link)); 404 edev->ops->common->get_link(edev->cdev, ¤t_link); 405 if (!current_link.link_up) 406 return 0; 407 408 /* Toggle the link */ 409 memset(&link_params, 0, sizeof(link_params)); 410 link_params.link_up = false; 411 edev->ops->common->set_link(edev->cdev, &link_params); 412 link_params.link_up = true; 413 edev->ops->common->set_link(edev->cdev, &link_params); 414 415 return 0; 416 } 417 418 static u32 qede_get_link(struct net_device *dev) 419 { 420 struct qede_dev *edev = netdev_priv(dev); 421 struct qed_link_output current_link; 422 423 memset(¤t_link, 0, sizeof(current_link)); 424 edev->ops->common->get_link(edev->cdev, ¤t_link); 425 426 return current_link.link_up; 427 } 428 429 static void qede_get_ringparam(struct net_device *dev, 430 struct ethtool_ringparam *ering) 431 { 432 struct qede_dev *edev = netdev_priv(dev); 433 434 ering->rx_max_pending = NUM_RX_BDS_MAX; 435 ering->rx_pending = edev->q_num_rx_buffers; 436 ering->tx_max_pending = NUM_TX_BDS_MAX; 437 ering->tx_pending = edev->q_num_tx_buffers; 438 } 439 440 static int qede_set_ringparam(struct net_device *dev, 441 struct ethtool_ringparam *ering) 442 { 443 struct qede_dev *edev = netdev_priv(dev); 444 445 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 446 "Set ring params command parameters: rx_pending = %d, tx_pending = %d\n", 447 ering->rx_pending, ering->tx_pending); 448 449 /* Validate legality of configuration */ 450 if (ering->rx_pending > NUM_RX_BDS_MAX || 451 ering->rx_pending < NUM_RX_BDS_MIN || 452 ering->tx_pending > NUM_TX_BDS_MAX || 453 ering->tx_pending < NUM_TX_BDS_MIN) { 454 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 455 "Can only support Rx Buffer size [0%08x,...,0x%08x] and Tx Buffer size [0x%08x,...,0x%08x]\n", 456 NUM_RX_BDS_MIN, NUM_RX_BDS_MAX, 457 NUM_TX_BDS_MIN, NUM_TX_BDS_MAX); 458 return -EINVAL; 459 } 460 461 /* Change ring size and re-load */ 462 edev->q_num_rx_buffers = ering->rx_pending; 463 edev->q_num_tx_buffers = ering->tx_pending; 464 465 if (netif_running(edev->ndev)) 466 qede_reload(edev, NULL, NULL); 467 468 return 0; 469 } 470 471 static void qede_get_pauseparam(struct net_device *dev, 472 struct ethtool_pauseparam *epause) 473 { 474 struct qede_dev *edev = netdev_priv(dev); 475 struct qed_link_output current_link; 476 477 memset(¤t_link, 0, sizeof(current_link)); 478 edev->ops->common->get_link(edev->cdev, ¤t_link); 479 480 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 481 epause->autoneg = true; 482 if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) 483 epause->rx_pause = true; 484 if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) 485 epause->tx_pause = true; 486 487 DP_VERBOSE(edev, QED_MSG_DEBUG, 488 "ethtool_pauseparam: cmd %d autoneg %d rx_pause %d tx_pause %d\n", 489 epause->cmd, epause->autoneg, epause->rx_pause, 490 epause->tx_pause); 491 } 492 493 static int qede_set_pauseparam(struct net_device *dev, 494 struct ethtool_pauseparam *epause) 495 { 496 struct qede_dev *edev = netdev_priv(dev); 497 struct qed_link_params params; 498 struct qed_link_output current_link; 499 500 if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { 501 DP_INFO(edev, 502 "Pause settings are not allowed to be changed\n"); 503 return -EOPNOTSUPP; 504 } 505 506 memset(¤t_link, 0, sizeof(current_link)); 507 edev->ops->common->get_link(edev->cdev, ¤t_link); 508 509 memset(¶ms, 0, sizeof(params)); 510 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; 511 if (epause->autoneg) { 512 if (!(current_link.supported_caps & SUPPORTED_Autoneg)) { 513 DP_INFO(edev, "autoneg not supported\n"); 514 return -EINVAL; 515 } 516 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 517 } 518 if (epause->rx_pause) 519 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; 520 if (epause->tx_pause) 521 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; 522 523 params.link_up = true; 524 edev->ops->common->set_link(edev->cdev, ¶ms); 525 526 return 0; 527 } 528 529 static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args) 530 { 531 edev->ndev->mtu = args->mtu; 532 } 533 534 /* Netdevice NDOs */ 535 #define ETH_MAX_JUMBO_PACKET_SIZE 9600 536 #define ETH_MIN_PACKET_SIZE 60 537 int qede_change_mtu(struct net_device *ndev, int new_mtu) 538 { 539 struct qede_dev *edev = netdev_priv(ndev); 540 union qede_reload_args args; 541 542 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || 543 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) { 544 DP_ERR(edev, "Can't support requested MTU size\n"); 545 return -EINVAL; 546 } 547 548 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 549 "Configuring MTU size of %d\n", new_mtu); 550 551 /* Set the mtu field and re-start the interface if needed*/ 552 args.mtu = new_mtu; 553 554 if (netif_running(edev->ndev)) 555 qede_reload(edev, &qede_update_mtu, &args); 556 557 qede_update_mtu(edev, &args); 558 559 return 0; 560 } 561 562 static void qede_get_channels(struct net_device *dev, 563 struct ethtool_channels *channels) 564 { 565 struct qede_dev *edev = netdev_priv(dev); 566 567 channels->max_combined = QEDE_MAX_RSS_CNT(edev); 568 channels->combined_count = QEDE_RSS_CNT(edev); 569 } 570 571 static int qede_set_channels(struct net_device *dev, 572 struct ethtool_channels *channels) 573 { 574 struct qede_dev *edev = netdev_priv(dev); 575 576 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 577 "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n", 578 channels->rx_count, channels->tx_count, 579 channels->other_count, channels->combined_count); 580 581 /* We don't support separate rx / tx, nor `other' channels. */ 582 if (channels->rx_count || channels->tx_count || 583 channels->other_count || (channels->combined_count == 0) || 584 (channels->combined_count > QEDE_MAX_RSS_CNT(edev))) { 585 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 586 "command parameters not supported\n"); 587 return -EINVAL; 588 } 589 590 /* Check if there was a change in the active parameters */ 591 if (channels->combined_count == QEDE_RSS_CNT(edev)) { 592 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 593 "No change in active parameters\n"); 594 return 0; 595 } 596 597 /* We need the number of queues to be divisible between the hwfns */ 598 if (channels->combined_count % edev->dev_info.common.num_hwfns) { 599 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 600 "Number of channels must be divisable by %04x\n", 601 edev->dev_info.common.num_hwfns); 602 return -EINVAL; 603 } 604 605 /* Set number of queues and reload if necessary */ 606 edev->req_rss = channels->combined_count; 607 if (netif_running(dev)) 608 qede_reload(edev, NULL, NULL); 609 610 return 0; 611 } 612 613 static int qede_set_phys_id(struct net_device *dev, 614 enum ethtool_phys_id_state state) 615 { 616 struct qede_dev *edev = netdev_priv(dev); 617 u8 led_state = 0; 618 619 switch (state) { 620 case ETHTOOL_ID_ACTIVE: 621 return 1; /* cycle on/off once per second */ 622 623 case ETHTOOL_ID_ON: 624 led_state = QED_LED_MODE_ON; 625 break; 626 627 case ETHTOOL_ID_OFF: 628 led_state = QED_LED_MODE_OFF; 629 break; 630 631 case ETHTOOL_ID_INACTIVE: 632 led_state = QED_LED_MODE_RESTORE; 633 break; 634 } 635 636 edev->ops->common->set_led(edev->cdev, led_state); 637 638 return 0; 639 } 640 641 static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) 642 { 643 info->data = RXH_IP_SRC | RXH_IP_DST; 644 645 switch (info->flow_type) { 646 case TCP_V4_FLOW: 647 case TCP_V6_FLOW: 648 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 649 break; 650 case UDP_V4_FLOW: 651 if (edev->rss_params.rss_caps & QED_RSS_IPV4_UDP) 652 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 653 break; 654 case UDP_V6_FLOW: 655 if (edev->rss_params.rss_caps & QED_RSS_IPV6_UDP) 656 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 657 break; 658 case IPV4_FLOW: 659 case IPV6_FLOW: 660 break; 661 default: 662 info->data = 0; 663 break; 664 } 665 666 return 0; 667 } 668 669 static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 670 u32 *rules __always_unused) 671 { 672 struct qede_dev *edev = netdev_priv(dev); 673 674 switch (info->cmd) { 675 case ETHTOOL_GRXRINGS: 676 info->data = edev->num_rss; 677 return 0; 678 case ETHTOOL_GRXFH: 679 return qede_get_rss_flags(edev, info); 680 default: 681 DP_ERR(edev, "Command parameters not supported\n"); 682 return -EOPNOTSUPP; 683 } 684 } 685 686 static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) 687 { 688 struct qed_update_vport_params vport_update_params; 689 u8 set_caps = 0, clr_caps = 0; 690 691 DP_VERBOSE(edev, QED_MSG_DEBUG, 692 "Set rss flags command parameters: flow type = %d, data = %llu\n", 693 info->flow_type, info->data); 694 695 switch (info->flow_type) { 696 case TCP_V4_FLOW: 697 case TCP_V6_FLOW: 698 /* For TCP only 4-tuple hash is supported */ 699 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST | 700 RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 701 DP_INFO(edev, "Command parameters not supported\n"); 702 return -EINVAL; 703 } 704 return 0; 705 case UDP_V4_FLOW: 706 /* For UDP either 2-tuple hash or 4-tuple hash is supported */ 707 if (info->data == (RXH_IP_SRC | RXH_IP_DST | 708 RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 709 set_caps = QED_RSS_IPV4_UDP; 710 DP_VERBOSE(edev, QED_MSG_DEBUG, 711 "UDP 4-tuple enabled\n"); 712 } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { 713 clr_caps = QED_RSS_IPV4_UDP; 714 DP_VERBOSE(edev, QED_MSG_DEBUG, 715 "UDP 4-tuple disabled\n"); 716 } else { 717 return -EINVAL; 718 } 719 break; 720 case UDP_V6_FLOW: 721 /* For UDP either 2-tuple hash or 4-tuple hash is supported */ 722 if (info->data == (RXH_IP_SRC | RXH_IP_DST | 723 RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 724 set_caps = QED_RSS_IPV6_UDP; 725 DP_VERBOSE(edev, QED_MSG_DEBUG, 726 "UDP 4-tuple enabled\n"); 727 } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { 728 clr_caps = QED_RSS_IPV6_UDP; 729 DP_VERBOSE(edev, QED_MSG_DEBUG, 730 "UDP 4-tuple disabled\n"); 731 } else { 732 return -EINVAL; 733 } 734 break; 735 case IPV4_FLOW: 736 case IPV6_FLOW: 737 /* For IP only 2-tuple hash is supported */ 738 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) { 739 DP_INFO(edev, "Command parameters not supported\n"); 740 return -EINVAL; 741 } 742 return 0; 743 case SCTP_V4_FLOW: 744 case AH_ESP_V4_FLOW: 745 case AH_V4_FLOW: 746 case ESP_V4_FLOW: 747 case SCTP_V6_FLOW: 748 case AH_ESP_V6_FLOW: 749 case AH_V6_FLOW: 750 case ESP_V6_FLOW: 751 case IP_USER_FLOW: 752 case ETHER_FLOW: 753 /* RSS is not supported for these protocols */ 754 if (info->data) { 755 DP_INFO(edev, "Command parameters not supported\n"); 756 return -EINVAL; 757 } 758 return 0; 759 default: 760 return -EINVAL; 761 } 762 763 /* No action is needed if there is no change in the rss capability */ 764 if (edev->rss_params.rss_caps == ((edev->rss_params.rss_caps & 765 ~clr_caps) | set_caps)) 766 return 0; 767 768 /* Update internal configuration */ 769 edev->rss_params.rss_caps = (edev->rss_params.rss_caps & ~clr_caps) | 770 set_caps; 771 edev->rss_params_inited |= QEDE_RSS_CAPS_INITED; 772 773 /* Re-configure if possible */ 774 if (netif_running(edev->ndev)) { 775 memset(&vport_update_params, 0, sizeof(vport_update_params)); 776 vport_update_params.update_rss_flg = 1; 777 vport_update_params.vport_id = 0; 778 memcpy(&vport_update_params.rss_params, &edev->rss_params, 779 sizeof(vport_update_params.rss_params)); 780 return edev->ops->vport_update(edev->cdev, 781 &vport_update_params); 782 } 783 784 return 0; 785 } 786 787 static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) 788 { 789 struct qede_dev *edev = netdev_priv(dev); 790 791 switch (info->cmd) { 792 case ETHTOOL_SRXFH: 793 return qede_set_rss_flags(edev, info); 794 default: 795 DP_INFO(edev, "Command parameters not supported\n"); 796 return -EOPNOTSUPP; 797 } 798 } 799 800 static u32 qede_get_rxfh_indir_size(struct net_device *dev) 801 { 802 return QED_RSS_IND_TABLE_SIZE; 803 } 804 805 static u32 qede_get_rxfh_key_size(struct net_device *dev) 806 { 807 struct qede_dev *edev = netdev_priv(dev); 808 809 return sizeof(edev->rss_params.rss_key); 810 } 811 812 static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) 813 { 814 struct qede_dev *edev = netdev_priv(dev); 815 int i; 816 817 if (hfunc) 818 *hfunc = ETH_RSS_HASH_TOP; 819 820 if (!indir) 821 return 0; 822 823 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) 824 indir[i] = edev->rss_params.rss_ind_table[i]; 825 826 if (key) 827 memcpy(key, edev->rss_params.rss_key, 828 qede_get_rxfh_key_size(dev)); 829 830 return 0; 831 } 832 833 static int qede_set_rxfh(struct net_device *dev, const u32 *indir, 834 const u8 *key, const u8 hfunc) 835 { 836 struct qed_update_vport_params vport_update_params; 837 struct qede_dev *edev = netdev_priv(dev); 838 int i; 839 840 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 841 return -EOPNOTSUPP; 842 843 if (!indir && !key) 844 return 0; 845 846 if (indir) { 847 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) 848 edev->rss_params.rss_ind_table[i] = indir[i]; 849 edev->rss_params_inited |= QEDE_RSS_INDIR_INITED; 850 } 851 852 if (key) { 853 memcpy(&edev->rss_params.rss_key, key, 854 qede_get_rxfh_key_size(dev)); 855 edev->rss_params_inited |= QEDE_RSS_KEY_INITED; 856 } 857 858 if (netif_running(edev->ndev)) { 859 memset(&vport_update_params, 0, sizeof(vport_update_params)); 860 vport_update_params.update_rss_flg = 1; 861 vport_update_params.vport_id = 0; 862 memcpy(&vport_update_params.rss_params, &edev->rss_params, 863 sizeof(vport_update_params.rss_params)); 864 return edev->ops->vport_update(edev->cdev, 865 &vport_update_params); 866 } 867 868 return 0; 869 } 870 871 /* This function enables the interrupt generation and the NAPI on the device */ 872 static void qede_netif_start(struct qede_dev *edev) 873 { 874 int i; 875 876 if (!netif_running(edev->ndev)) 877 return; 878 879 for_each_rss(i) { 880 /* Update and reenable interrupts */ 881 qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1); 882 napi_enable(&edev->fp_array[i].napi); 883 } 884 } 885 886 /* This function disables the NAPI and the interrupt generation on the device */ 887 static void qede_netif_stop(struct qede_dev *edev) 888 { 889 int i; 890 891 for_each_rss(i) { 892 napi_disable(&edev->fp_array[i].napi); 893 /* Disable interrupts */ 894 qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0); 895 } 896 } 897 898 static int qede_selftest_transmit_traffic(struct qede_dev *edev, 899 struct sk_buff *skb) 900 { 901 struct qede_tx_queue *txq = &edev->fp_array[0].txqs[0]; 902 struct eth_tx_1st_bd *first_bd; 903 dma_addr_t mapping; 904 int i, idx, val; 905 906 /* Fill the entry in the SW ring and the BDs in the FW ring */ 907 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; 908 txq->sw_tx_ring[idx].skb = skb; 909 first_bd = qed_chain_produce(&txq->tx_pbl); 910 memset(first_bd, 0, sizeof(*first_bd)); 911 val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; 912 first_bd->data.bd_flags.bitfields = val; 913 914 /* Map skb linear data for DMA and set in the first BD */ 915 mapping = dma_map_single(&edev->pdev->dev, skb->data, 916 skb_headlen(skb), DMA_TO_DEVICE); 917 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { 918 DP_NOTICE(edev, "SKB mapping failed\n"); 919 return -ENOMEM; 920 } 921 BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); 922 923 /* update the first BD with the actual num BDs */ 924 first_bd->data.nbds = 1; 925 txq->sw_tx_prod++; 926 /* 'next page' entries are counted in the producer value */ 927 val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); 928 txq->tx_db.data.bd_prod = val; 929 930 /* wmb makes sure that the BDs data is updated before updating the 931 * producer, otherwise FW may read old data from the BDs. 932 */ 933 wmb(); 934 barrier(); 935 writel(txq->tx_db.raw, txq->doorbell_addr); 936 937 /* mmiowb is needed to synchronize doorbell writes from more than one 938 * processor. It guarantees that the write arrives to the device before 939 * the queue lock is released and another start_xmit is called (possibly 940 * on another CPU). Without this barrier, the next doorbell can bypass 941 * this doorbell. This is applicable to IA64/Altix systems. 942 */ 943 mmiowb(); 944 945 for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { 946 if (qede_txq_has_work(txq)) 947 break; 948 usleep_range(100, 200); 949 } 950 951 if (!qede_txq_has_work(txq)) { 952 DP_NOTICE(edev, "Tx completion didn't happen\n"); 953 return -1; 954 } 955 956 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); 957 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), 958 BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE); 959 txq->sw_tx_cons++; 960 txq->sw_tx_ring[idx].skb = NULL; 961 962 return 0; 963 } 964 965 static int qede_selftest_receive_traffic(struct qede_dev *edev) 966 { 967 struct qede_rx_queue *rxq = edev->fp_array[0].rxq; 968 u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len; 969 struct eth_fast_path_rx_reg_cqe *fp_cqe; 970 struct sw_rx_data *sw_rx_data; 971 union eth_rx_cqe *cqe; 972 u8 *data_ptr; 973 int i; 974 975 /* The packet is expected to receive on rx-queue 0 even though RSS is 976 * enabled. This is because the queue 0 is configured as the default 977 * queue and that the loopback traffic is not IP. 978 */ 979 for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { 980 if (qede_has_rx_work(rxq)) 981 break; 982 usleep_range(100, 200); 983 } 984 985 if (!qede_has_rx_work(rxq)) { 986 DP_NOTICE(edev, "Failed to receive the traffic\n"); 987 return -1; 988 } 989 990 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); 991 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); 992 993 /* Memory barrier to prevent the CPU from doing speculative reads of CQE 994 * / BD before reading hw_comp_cons. If the CQE is read before it is 995 * written by FW, then FW writes CQE and SB, and then the CPU reads the 996 * hw_comp_cons, it will use an old CQE. 997 */ 998 rmb(); 999 1000 /* Get the CQE from the completion ring */ 1001 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); 1002 1003 /* Get the data from the SW ring */ 1004 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; 1005 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; 1006 fp_cqe = &cqe->fast_path_regular; 1007 len = le16_to_cpu(fp_cqe->len_on_first_bd); 1008 data_ptr = (u8 *)(page_address(sw_rx_data->data) + 1009 fp_cqe->placement_offset + sw_rx_data->page_offset); 1010 for (i = ETH_HLEN; i < len; i++) 1011 if (data_ptr[i] != (unsigned char)(i & 0xff)) { 1012 DP_NOTICE(edev, "Loopback test failed\n"); 1013 qede_recycle_rx_bd_ring(rxq, edev, 1); 1014 return -1; 1015 } 1016 1017 qede_recycle_rx_bd_ring(rxq, edev, 1); 1018 1019 return 0; 1020 } 1021 1022 static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode) 1023 { 1024 struct qed_link_params link_params; 1025 struct sk_buff *skb = NULL; 1026 int rc = 0, i; 1027 u32 pkt_size; 1028 u8 *packet; 1029 1030 if (!netif_running(edev->ndev)) { 1031 DP_NOTICE(edev, "Interface is down\n"); 1032 return -EINVAL; 1033 } 1034 1035 qede_netif_stop(edev); 1036 1037 /* Bring up the link in Loopback mode */ 1038 memset(&link_params, 0, sizeof(link_params)); 1039 link_params.link_up = true; 1040 link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE; 1041 link_params.loopback_mode = loopback_mode; 1042 edev->ops->common->set_link(edev->cdev, &link_params); 1043 1044 /* Wait for loopback configuration to apply */ 1045 msleep_interruptible(500); 1046 1047 /* prepare the loopback packet */ 1048 pkt_size = edev->ndev->mtu + ETH_HLEN; 1049 1050 skb = netdev_alloc_skb(edev->ndev, pkt_size); 1051 if (!skb) { 1052 DP_INFO(edev, "Can't allocate skb\n"); 1053 rc = -ENOMEM; 1054 goto test_loopback_exit; 1055 } 1056 packet = skb_put(skb, pkt_size); 1057 ether_addr_copy(packet, edev->ndev->dev_addr); 1058 ether_addr_copy(packet + ETH_ALEN, edev->ndev->dev_addr); 1059 memset(packet + (2 * ETH_ALEN), 0x77, (ETH_HLEN - (2 * ETH_ALEN))); 1060 for (i = ETH_HLEN; i < pkt_size; i++) 1061 packet[i] = (unsigned char)(i & 0xff); 1062 1063 rc = qede_selftest_transmit_traffic(edev, skb); 1064 if (rc) 1065 goto test_loopback_exit; 1066 1067 rc = qede_selftest_receive_traffic(edev); 1068 if (rc) 1069 goto test_loopback_exit; 1070 1071 DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, "Loopback test successful\n"); 1072 1073 test_loopback_exit: 1074 dev_kfree_skb(skb); 1075 1076 /* Bring up the link in Normal mode */ 1077 memset(&link_params, 0, sizeof(link_params)); 1078 link_params.link_up = true; 1079 link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE; 1080 link_params.loopback_mode = QED_LINK_LOOPBACK_NONE; 1081 edev->ops->common->set_link(edev->cdev, &link_params); 1082 1083 /* Wait for loopback configuration to apply */ 1084 msleep_interruptible(500); 1085 1086 qede_netif_start(edev); 1087 1088 return rc; 1089 } 1090 1091 static void qede_self_test(struct net_device *dev, 1092 struct ethtool_test *etest, u64 *buf) 1093 { 1094 struct qede_dev *edev = netdev_priv(dev); 1095 1096 DP_VERBOSE(edev, QED_MSG_DEBUG, 1097 "Self-test command parameters: offline = %d, external_lb = %d\n", 1098 (etest->flags & ETH_TEST_FL_OFFLINE), 1099 (etest->flags & ETH_TEST_FL_EXTERNAL_LB) >> 2); 1100 1101 memset(buf, 0, sizeof(u64) * QEDE_ETHTOOL_TEST_MAX); 1102 1103 if (etest->flags & ETH_TEST_FL_OFFLINE) { 1104 if (qede_selftest_run_loopback(edev, 1105 QED_LINK_LOOPBACK_INT_PHY)) { 1106 buf[QEDE_ETHTOOL_INT_LOOPBACK] = 1; 1107 etest->flags |= ETH_TEST_FL_FAILED; 1108 } 1109 } 1110 1111 if (edev->ops->common->selftest->selftest_interrupt(edev->cdev)) { 1112 buf[QEDE_ETHTOOL_INTERRUPT_TEST] = 1; 1113 etest->flags |= ETH_TEST_FL_FAILED; 1114 } 1115 1116 if (edev->ops->common->selftest->selftest_memory(edev->cdev)) { 1117 buf[QEDE_ETHTOOL_MEMORY_TEST] = 1; 1118 etest->flags |= ETH_TEST_FL_FAILED; 1119 } 1120 1121 if (edev->ops->common->selftest->selftest_register(edev->cdev)) { 1122 buf[QEDE_ETHTOOL_REGISTER_TEST] = 1; 1123 etest->flags |= ETH_TEST_FL_FAILED; 1124 } 1125 1126 if (edev->ops->common->selftest->selftest_clock(edev->cdev)) { 1127 buf[QEDE_ETHTOOL_CLOCK_TEST] = 1; 1128 etest->flags |= ETH_TEST_FL_FAILED; 1129 } 1130 } 1131 1132 static const struct ethtool_ops qede_ethtool_ops = { 1133 .get_settings = qede_get_settings, 1134 .set_settings = qede_set_settings, 1135 .get_drvinfo = qede_get_drvinfo, 1136 .get_msglevel = qede_get_msglevel, 1137 .set_msglevel = qede_set_msglevel, 1138 .nway_reset = qede_nway_reset, 1139 .get_link = qede_get_link, 1140 .get_ringparam = qede_get_ringparam, 1141 .set_ringparam = qede_set_ringparam, 1142 .get_pauseparam = qede_get_pauseparam, 1143 .set_pauseparam = qede_set_pauseparam, 1144 .get_strings = qede_get_strings, 1145 .set_phys_id = qede_set_phys_id, 1146 .get_ethtool_stats = qede_get_ethtool_stats, 1147 .get_priv_flags = qede_get_priv_flags, 1148 .get_sset_count = qede_get_sset_count, 1149 .get_rxnfc = qede_get_rxnfc, 1150 .set_rxnfc = qede_set_rxnfc, 1151 .get_rxfh_indir_size = qede_get_rxfh_indir_size, 1152 .get_rxfh_key_size = qede_get_rxfh_key_size, 1153 .get_rxfh = qede_get_rxfh, 1154 .set_rxfh = qede_set_rxfh, 1155 .get_channels = qede_get_channels, 1156 .set_channels = qede_set_channels, 1157 .self_test = qede_self_test, 1158 }; 1159 1160 static const struct ethtool_ops qede_vf_ethtool_ops = { 1161 .get_settings = qede_get_settings, 1162 .get_drvinfo = qede_get_drvinfo, 1163 .get_msglevel = qede_get_msglevel, 1164 .set_msglevel = qede_set_msglevel, 1165 .get_link = qede_get_link, 1166 .get_ringparam = qede_get_ringparam, 1167 .set_ringparam = qede_set_ringparam, 1168 .get_strings = qede_get_strings, 1169 .get_ethtool_stats = qede_get_ethtool_stats, 1170 .get_priv_flags = qede_get_priv_flags, 1171 .get_sset_count = qede_get_sset_count, 1172 .get_rxnfc = qede_get_rxnfc, 1173 .set_rxnfc = qede_set_rxnfc, 1174 .get_rxfh_indir_size = qede_get_rxfh_indir_size, 1175 .get_rxfh_key_size = qede_get_rxfh_key_size, 1176 .get_rxfh = qede_get_rxfh, 1177 .set_rxfh = qede_set_rxfh, 1178 .get_channels = qede_get_channels, 1179 .set_channels = qede_set_channels, 1180 }; 1181 1182 void qede_set_ethtool_ops(struct net_device *dev) 1183 { 1184 struct qede_dev *edev = netdev_priv(dev); 1185 1186 if (IS_VF(edev)) 1187 dev->ethtool_ops = &qede_vf_ethtool_ops; 1188 else 1189 dev->ethtool_ops = &qede_ethtool_ops; 1190 } 1191