1 /* QLogic qede NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/version.h> 33 #include <linux/types.h> 34 #include <linux/netdevice.h> 35 #include <linux/etherdevice.h> 36 #include <linux/ethtool.h> 37 #include <linux/string.h> 38 #include <linux/pci.h> 39 #include <linux/capability.h> 40 #include <linux/vmalloc.h> 41 #include "qede.h" 42 #include "qede_ptp.h" 43 44 #define QEDE_RQSTAT_OFFSET(stat_name) \ 45 (offsetof(struct qede_rx_queue, stat_name)) 46 #define QEDE_RQSTAT_STRING(stat_name) (#stat_name) 47 #define QEDE_RQSTAT(stat_name) \ 48 {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)} 49 50 #define QEDE_SELFTEST_POLL_COUNT 100 51 52 static const struct { 53 u64 offset; 54 char string[ETH_GSTRING_LEN]; 55 } qede_rqstats_arr[] = { 56 QEDE_RQSTAT(rcv_pkts), 57 QEDE_RQSTAT(rx_hw_errors), 58 QEDE_RQSTAT(rx_alloc_errors), 59 QEDE_RQSTAT(rx_ip_frags), 60 QEDE_RQSTAT(xdp_no_pass), 61 }; 62 63 #define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr) 64 #define QEDE_TQSTAT_OFFSET(stat_name) \ 65 (offsetof(struct qede_tx_queue, stat_name)) 66 #define QEDE_TQSTAT_STRING(stat_name) (#stat_name) 67 #define QEDE_TQSTAT(stat_name) \ 68 {QEDE_TQSTAT_OFFSET(stat_name), QEDE_TQSTAT_STRING(stat_name)} 69 #define QEDE_NUM_TQSTATS ARRAY_SIZE(qede_tqstats_arr) 70 static const struct { 71 u64 offset; 72 char string[ETH_GSTRING_LEN]; 73 } qede_tqstats_arr[] = { 74 QEDE_TQSTAT(xmit_pkts), 75 QEDE_TQSTAT(stopped_cnt), 76 }; 77 78 #define QEDE_STAT_OFFSET(stat_name, type, base) \ 79 (offsetof(type, stat_name) + (base)) 80 #define QEDE_STAT_STRING(stat_name) (#stat_name) 81 #define _QEDE_STAT(stat_name, type, base, attr) \ 82 {QEDE_STAT_OFFSET(stat_name, type, base), \ 83 QEDE_STAT_STRING(stat_name), \ 84 attr} 85 #define QEDE_STAT(stat_name) \ 86 _QEDE_STAT(stat_name, struct qede_stats_common, 0, 0x0) 87 #define QEDE_PF_STAT(stat_name) \ 88 _QEDE_STAT(stat_name, struct qede_stats_common, 0, \ 89 BIT(QEDE_STAT_PF_ONLY)) 90 #define QEDE_PF_BB_STAT(stat_name) \ 91 _QEDE_STAT(stat_name, struct qede_stats_bb, \ 92 offsetof(struct qede_stats, bb), \ 93 BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_BB_ONLY)) 94 #define QEDE_PF_AH_STAT(stat_name) \ 95 _QEDE_STAT(stat_name, struct qede_stats_ah, \ 96 offsetof(struct qede_stats, ah), \ 97 BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_AH_ONLY)) 98 static const struct { 99 u64 offset; 100 char string[ETH_GSTRING_LEN]; 101 unsigned long attr; 102 #define QEDE_STAT_PF_ONLY 0 103 #define QEDE_STAT_BB_ONLY 1 104 #define QEDE_STAT_AH_ONLY 2 105 } qede_stats_arr[] = { 106 QEDE_STAT(rx_ucast_bytes), 107 QEDE_STAT(rx_mcast_bytes), 108 QEDE_STAT(rx_bcast_bytes), 109 QEDE_STAT(rx_ucast_pkts), 110 QEDE_STAT(rx_mcast_pkts), 111 QEDE_STAT(rx_bcast_pkts), 112 113 QEDE_STAT(tx_ucast_bytes), 114 QEDE_STAT(tx_mcast_bytes), 115 QEDE_STAT(tx_bcast_bytes), 116 QEDE_STAT(tx_ucast_pkts), 117 QEDE_STAT(tx_mcast_pkts), 118 QEDE_STAT(tx_bcast_pkts), 119 120 QEDE_PF_STAT(rx_64_byte_packets), 121 QEDE_PF_STAT(rx_65_to_127_byte_packets), 122 QEDE_PF_STAT(rx_128_to_255_byte_packets), 123 QEDE_PF_STAT(rx_256_to_511_byte_packets), 124 QEDE_PF_STAT(rx_512_to_1023_byte_packets), 125 QEDE_PF_STAT(rx_1024_to_1518_byte_packets), 126 QEDE_PF_BB_STAT(rx_1519_to_1522_byte_packets), 127 QEDE_PF_BB_STAT(rx_1519_to_2047_byte_packets), 128 QEDE_PF_BB_STAT(rx_2048_to_4095_byte_packets), 129 QEDE_PF_BB_STAT(rx_4096_to_9216_byte_packets), 130 QEDE_PF_BB_STAT(rx_9217_to_16383_byte_packets), 131 QEDE_PF_AH_STAT(rx_1519_to_max_byte_packets), 132 QEDE_PF_STAT(tx_64_byte_packets), 133 QEDE_PF_STAT(tx_65_to_127_byte_packets), 134 QEDE_PF_STAT(tx_128_to_255_byte_packets), 135 QEDE_PF_STAT(tx_256_to_511_byte_packets), 136 QEDE_PF_STAT(tx_512_to_1023_byte_packets), 137 QEDE_PF_STAT(tx_1024_to_1518_byte_packets), 138 QEDE_PF_BB_STAT(tx_1519_to_2047_byte_packets), 139 QEDE_PF_BB_STAT(tx_2048_to_4095_byte_packets), 140 QEDE_PF_BB_STAT(tx_4096_to_9216_byte_packets), 141 QEDE_PF_BB_STAT(tx_9217_to_16383_byte_packets), 142 QEDE_PF_AH_STAT(tx_1519_to_max_byte_packets), 143 QEDE_PF_STAT(rx_mac_crtl_frames), 144 QEDE_PF_STAT(tx_mac_ctrl_frames), 145 QEDE_PF_STAT(rx_pause_frames), 146 QEDE_PF_STAT(tx_pause_frames), 147 QEDE_PF_STAT(rx_pfc_frames), 148 QEDE_PF_STAT(tx_pfc_frames), 149 150 QEDE_PF_STAT(rx_crc_errors), 151 QEDE_PF_STAT(rx_align_errors), 152 QEDE_PF_STAT(rx_carrier_errors), 153 QEDE_PF_STAT(rx_oversize_packets), 154 QEDE_PF_STAT(rx_jabbers), 155 QEDE_PF_STAT(rx_undersize_packets), 156 QEDE_PF_STAT(rx_fragments), 157 QEDE_PF_BB_STAT(tx_lpi_entry_count), 158 QEDE_PF_BB_STAT(tx_total_collisions), 159 QEDE_PF_STAT(brb_truncates), 160 QEDE_PF_STAT(brb_discards), 161 QEDE_STAT(no_buff_discards), 162 QEDE_PF_STAT(mftag_filter_discards), 163 QEDE_PF_STAT(mac_filter_discards), 164 QEDE_PF_STAT(gft_filter_drop), 165 QEDE_STAT(tx_err_drop_pkts), 166 QEDE_STAT(ttl0_discard), 167 QEDE_STAT(packet_too_big_discard), 168 169 QEDE_STAT(coalesced_pkts), 170 QEDE_STAT(coalesced_events), 171 QEDE_STAT(coalesced_aborts_num), 172 QEDE_STAT(non_coalesced_pkts), 173 QEDE_STAT(coalesced_bytes), 174 175 QEDE_STAT(link_change_count), 176 }; 177 178 #define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr) 179 #define QEDE_STAT_IS_PF_ONLY(i) \ 180 test_bit(QEDE_STAT_PF_ONLY, &qede_stats_arr[i].attr) 181 #define QEDE_STAT_IS_BB_ONLY(i) \ 182 test_bit(QEDE_STAT_BB_ONLY, &qede_stats_arr[i].attr) 183 #define QEDE_STAT_IS_AH_ONLY(i) \ 184 test_bit(QEDE_STAT_AH_ONLY, &qede_stats_arr[i].attr) 185 186 enum { 187 QEDE_PRI_FLAG_CMT, 188 QEDE_PRI_FLAG_LEN, 189 }; 190 191 static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = { 192 "Coupled-Function", 193 }; 194 195 enum qede_ethtool_tests { 196 QEDE_ETHTOOL_INT_LOOPBACK, 197 QEDE_ETHTOOL_INTERRUPT_TEST, 198 QEDE_ETHTOOL_MEMORY_TEST, 199 QEDE_ETHTOOL_REGISTER_TEST, 200 QEDE_ETHTOOL_CLOCK_TEST, 201 QEDE_ETHTOOL_NVRAM_TEST, 202 QEDE_ETHTOOL_TEST_MAX 203 }; 204 205 static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = { 206 "Internal loopback (offline)", 207 "Interrupt (online)\t", 208 "Memory (online)\t\t", 209 "Register (online)\t", 210 "Clock (online)\t\t", 211 "Nvram (online)\t\t", 212 }; 213 214 static void qede_get_strings_stats_txq(struct qede_dev *edev, 215 struct qede_tx_queue *txq, u8 **buf) 216 { 217 int i; 218 219 for (i = 0; i < QEDE_NUM_TQSTATS; i++) { 220 if (txq->is_xdp) 221 sprintf(*buf, "%d [XDP]: %s", 222 QEDE_TXQ_XDP_TO_IDX(edev, txq), 223 qede_tqstats_arr[i].string); 224 else 225 sprintf(*buf, "%d: %s", txq->index, 226 qede_tqstats_arr[i].string); 227 *buf += ETH_GSTRING_LEN; 228 } 229 } 230 231 static void qede_get_strings_stats_rxq(struct qede_dev *edev, 232 struct qede_rx_queue *rxq, u8 **buf) 233 { 234 int i; 235 236 for (i = 0; i < QEDE_NUM_RQSTATS; i++) { 237 sprintf(*buf, "%d: %s", rxq->rxq_id, 238 qede_rqstats_arr[i].string); 239 *buf += ETH_GSTRING_LEN; 240 } 241 } 242 243 static bool qede_is_irrelevant_stat(struct qede_dev *edev, int stat_index) 244 { 245 return (IS_VF(edev) && QEDE_STAT_IS_PF_ONLY(stat_index)) || 246 (QEDE_IS_BB(edev) && QEDE_STAT_IS_AH_ONLY(stat_index)) || 247 (QEDE_IS_AH(edev) && QEDE_STAT_IS_BB_ONLY(stat_index)); 248 } 249 250 static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) 251 { 252 struct qede_fastpath *fp; 253 int i; 254 255 /* Account for queue statistics */ 256 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) { 257 fp = &edev->fp_array[i]; 258 259 if (fp->type & QEDE_FASTPATH_RX) 260 qede_get_strings_stats_rxq(edev, fp->rxq, &buf); 261 262 if (fp->type & QEDE_FASTPATH_XDP) 263 qede_get_strings_stats_txq(edev, fp->xdp_tx, &buf); 264 265 if (fp->type & QEDE_FASTPATH_TX) 266 qede_get_strings_stats_txq(edev, fp->txq, &buf); 267 } 268 269 /* Account for non-queue statistics */ 270 for (i = 0; i < QEDE_NUM_STATS; i++) { 271 if (qede_is_irrelevant_stat(edev, i)) 272 continue; 273 strcpy(buf, qede_stats_arr[i].string); 274 buf += ETH_GSTRING_LEN; 275 } 276 } 277 278 static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 279 { 280 struct qede_dev *edev = netdev_priv(dev); 281 282 switch (stringset) { 283 case ETH_SS_STATS: 284 qede_get_strings_stats(edev, buf); 285 break; 286 case ETH_SS_PRIV_FLAGS: 287 memcpy(buf, qede_private_arr, 288 ETH_GSTRING_LEN * QEDE_PRI_FLAG_LEN); 289 break; 290 case ETH_SS_TEST: 291 memcpy(buf, qede_tests_str_arr, 292 ETH_GSTRING_LEN * QEDE_ETHTOOL_TEST_MAX); 293 break; 294 default: 295 DP_VERBOSE(edev, QED_MSG_DEBUG, 296 "Unsupported stringset 0x%08x\n", stringset); 297 } 298 } 299 300 static void qede_get_ethtool_stats_txq(struct qede_tx_queue *txq, u64 **buf) 301 { 302 int i; 303 304 for (i = 0; i < QEDE_NUM_TQSTATS; i++) { 305 **buf = *((u64 *)(((void *)txq) + qede_tqstats_arr[i].offset)); 306 (*buf)++; 307 } 308 } 309 310 static void qede_get_ethtool_stats_rxq(struct qede_rx_queue *rxq, u64 **buf) 311 { 312 int i; 313 314 for (i = 0; i < QEDE_NUM_RQSTATS; i++) { 315 **buf = *((u64 *)(((void *)rxq) + qede_rqstats_arr[i].offset)); 316 (*buf)++; 317 } 318 } 319 320 static void qede_get_ethtool_stats(struct net_device *dev, 321 struct ethtool_stats *stats, u64 *buf) 322 { 323 struct qede_dev *edev = netdev_priv(dev); 324 struct qede_fastpath *fp; 325 int i; 326 327 qede_fill_by_demand_stats(edev); 328 329 /* Need to protect the access to the fastpath array */ 330 __qede_lock(edev); 331 332 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) { 333 fp = &edev->fp_array[i]; 334 335 if (fp->type & QEDE_FASTPATH_RX) 336 qede_get_ethtool_stats_rxq(fp->rxq, &buf); 337 338 if (fp->type & QEDE_FASTPATH_XDP) 339 qede_get_ethtool_stats_txq(fp->xdp_tx, &buf); 340 341 if (fp->type & QEDE_FASTPATH_TX) 342 qede_get_ethtool_stats_txq(fp->txq, &buf); 343 } 344 345 for (i = 0; i < QEDE_NUM_STATS; i++) { 346 if (qede_is_irrelevant_stat(edev, i)) 347 continue; 348 *buf = *((u64 *)(((void *)&edev->stats) + 349 qede_stats_arr[i].offset)); 350 351 buf++; 352 } 353 354 __qede_unlock(edev); 355 } 356 357 static int qede_get_sset_count(struct net_device *dev, int stringset) 358 { 359 struct qede_dev *edev = netdev_priv(dev); 360 int num_stats = QEDE_NUM_STATS, i; 361 362 switch (stringset) { 363 case ETH_SS_STATS: 364 for (i = 0; i < QEDE_NUM_STATS; i++) 365 if (qede_is_irrelevant_stat(edev, i)) 366 num_stats--; 367 368 /* Account for the Regular Tx statistics */ 369 num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS; 370 371 /* Account for the Regular Rx statistics */ 372 num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS; 373 374 /* Account for XDP statistics [if needed] */ 375 if (edev->xdp_prog) 376 num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_TQSTATS; 377 return num_stats; 378 379 case ETH_SS_PRIV_FLAGS: 380 return QEDE_PRI_FLAG_LEN; 381 case ETH_SS_TEST: 382 if (!IS_VF(edev)) 383 return QEDE_ETHTOOL_TEST_MAX; 384 else 385 return 0; 386 default: 387 DP_VERBOSE(edev, QED_MSG_DEBUG, 388 "Unsupported stringset 0x%08x\n", stringset); 389 return -EINVAL; 390 } 391 } 392 393 static u32 qede_get_priv_flags(struct net_device *dev) 394 { 395 struct qede_dev *edev = netdev_priv(dev); 396 397 return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT; 398 } 399 400 struct qede_link_mode_mapping { 401 u32 qed_link_mode; 402 u32 ethtool_link_mode; 403 }; 404 405 static const struct qede_link_mode_mapping qed_lm_map[] = { 406 {QED_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT}, 407 {QED_LM_Autoneg_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT}, 408 {QED_LM_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT}, 409 {QED_LM_Pause_BIT, ETHTOOL_LINK_MODE_Pause_BIT}, 410 {QED_LM_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Half_BIT}, 411 {QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT}, 412 {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT}, 413 {QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT}, 414 {QED_LM_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT}, 415 {QED_LM_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT}, 416 {QED_LM_100000baseKR4_Full_BIT, 417 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT}, 418 }; 419 420 #define QEDE_DRV_TO_ETHTOOL_CAPS(caps, lk_ksettings, name) \ 421 { \ 422 int i; \ 423 \ 424 for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) { \ 425 if ((caps) & (qed_lm_map[i].qed_link_mode)) \ 426 __set_bit(qed_lm_map[i].ethtool_link_mode,\ 427 lk_ksettings->link_modes.name); \ 428 } \ 429 } 430 431 #define QEDE_ETHTOOL_TO_DRV_CAPS(caps, lk_ksettings, name) \ 432 { \ 433 int i; \ 434 \ 435 for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) { \ 436 if (test_bit(qed_lm_map[i].ethtool_link_mode, \ 437 lk_ksettings->link_modes.name)) \ 438 caps |= qed_lm_map[i].qed_link_mode; \ 439 } \ 440 } 441 442 static int qede_get_link_ksettings(struct net_device *dev, 443 struct ethtool_link_ksettings *cmd) 444 { 445 struct ethtool_link_settings *base = &cmd->base; 446 struct qede_dev *edev = netdev_priv(dev); 447 struct qed_link_output current_link; 448 449 __qede_lock(edev); 450 451 memset(¤t_link, 0, sizeof(current_link)); 452 edev->ops->common->get_link(edev->cdev, ¤t_link); 453 454 ethtool_link_ksettings_zero_link_mode(cmd, supported); 455 QEDE_DRV_TO_ETHTOOL_CAPS(current_link.supported_caps, cmd, supported) 456 457 ethtool_link_ksettings_zero_link_mode(cmd, advertising); 458 QEDE_DRV_TO_ETHTOOL_CAPS(current_link.advertised_caps, cmd, advertising) 459 460 ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising); 461 QEDE_DRV_TO_ETHTOOL_CAPS(current_link.lp_caps, cmd, lp_advertising) 462 463 if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) { 464 base->speed = current_link.speed; 465 base->duplex = current_link.duplex; 466 } else { 467 base->speed = SPEED_UNKNOWN; 468 base->duplex = DUPLEX_UNKNOWN; 469 } 470 471 __qede_unlock(edev); 472 473 base->port = current_link.port; 474 base->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE : 475 AUTONEG_DISABLE; 476 477 return 0; 478 } 479 480 static int qede_set_link_ksettings(struct net_device *dev, 481 const struct ethtool_link_ksettings *cmd) 482 { 483 const struct ethtool_link_settings *base = &cmd->base; 484 struct qede_dev *edev = netdev_priv(dev); 485 struct qed_link_output current_link; 486 struct qed_link_params params; 487 488 if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { 489 DP_INFO(edev, "Link settings are not allowed to be changed\n"); 490 return -EOPNOTSUPP; 491 } 492 memset(¤t_link, 0, sizeof(current_link)); 493 memset(¶ms, 0, sizeof(params)); 494 edev->ops->common->get_link(edev->cdev, ¤t_link); 495 496 params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS; 497 params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG; 498 if (base->autoneg == AUTONEG_ENABLE) { 499 if (!(current_link.supported_caps & QED_LM_Autoneg_BIT)) { 500 DP_INFO(edev, "Auto negotiation is not supported\n"); 501 return -EOPNOTSUPP; 502 } 503 504 params.autoneg = true; 505 params.forced_speed = 0; 506 QEDE_ETHTOOL_TO_DRV_CAPS(params.adv_speeds, cmd, advertising) 507 } else { /* forced speed */ 508 params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED; 509 params.autoneg = false; 510 params.forced_speed = base->speed; 511 switch (base->speed) { 512 case SPEED_1000: 513 if (!(current_link.supported_caps & 514 QED_LM_1000baseT_Full_BIT)) { 515 DP_INFO(edev, "1G speed not supported\n"); 516 return -EINVAL; 517 } 518 params.adv_speeds = QED_LM_1000baseT_Full_BIT; 519 break; 520 case SPEED_10000: 521 if (!(current_link.supported_caps & 522 QED_LM_10000baseKR_Full_BIT)) { 523 DP_INFO(edev, "10G speed not supported\n"); 524 return -EINVAL; 525 } 526 params.adv_speeds = QED_LM_10000baseKR_Full_BIT; 527 break; 528 case SPEED_25000: 529 if (!(current_link.supported_caps & 530 QED_LM_25000baseKR_Full_BIT)) { 531 DP_INFO(edev, "25G speed not supported\n"); 532 return -EINVAL; 533 } 534 params.adv_speeds = QED_LM_25000baseKR_Full_BIT; 535 break; 536 case SPEED_40000: 537 if (!(current_link.supported_caps & 538 QED_LM_40000baseLR4_Full_BIT)) { 539 DP_INFO(edev, "40G speed not supported\n"); 540 return -EINVAL; 541 } 542 params.adv_speeds = QED_LM_40000baseLR4_Full_BIT; 543 break; 544 case SPEED_50000: 545 if (!(current_link.supported_caps & 546 QED_LM_50000baseKR2_Full_BIT)) { 547 DP_INFO(edev, "50G speed not supported\n"); 548 return -EINVAL; 549 } 550 params.adv_speeds = QED_LM_50000baseKR2_Full_BIT; 551 break; 552 case SPEED_100000: 553 if (!(current_link.supported_caps & 554 QED_LM_100000baseKR4_Full_BIT)) { 555 DP_INFO(edev, "100G speed not supported\n"); 556 return -EINVAL; 557 } 558 params.adv_speeds = QED_LM_100000baseKR4_Full_BIT; 559 break; 560 default: 561 DP_INFO(edev, "Unsupported speed %u\n", base->speed); 562 return -EINVAL; 563 } 564 } 565 566 params.link_up = true; 567 edev->ops->common->set_link(edev->cdev, ¶ms); 568 569 return 0; 570 } 571 572 static void qede_get_drvinfo(struct net_device *ndev, 573 struct ethtool_drvinfo *info) 574 { 575 char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN]; 576 struct qede_dev *edev = netdev_priv(ndev); 577 578 strlcpy(info->driver, "qede", sizeof(info->driver)); 579 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); 580 581 snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d", 582 edev->dev_info.common.fw_major, 583 edev->dev_info.common.fw_minor, 584 edev->dev_info.common.fw_rev, 585 edev->dev_info.common.fw_eng); 586 587 snprintf(mfw, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d", 588 (edev->dev_info.common.mfw_rev >> 24) & 0xFF, 589 (edev->dev_info.common.mfw_rev >> 16) & 0xFF, 590 (edev->dev_info.common.mfw_rev >> 8) & 0xFF, 591 edev->dev_info.common.mfw_rev & 0xFF); 592 593 if ((strlen(storm) + strlen(mfw) + strlen("mfw storm ")) < 594 sizeof(info->fw_version)) { 595 snprintf(info->fw_version, sizeof(info->fw_version), 596 "mfw %s storm %s", mfw, storm); 597 } else { 598 snprintf(info->fw_version, sizeof(info->fw_version), 599 "%s %s", mfw, storm); 600 } 601 602 strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info)); 603 } 604 605 static void qede_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 606 { 607 struct qede_dev *edev = netdev_priv(ndev); 608 609 if (edev->dev_info.common.wol_support) { 610 wol->supported = WAKE_MAGIC; 611 wol->wolopts = edev->wol_enabled ? WAKE_MAGIC : 0; 612 } 613 } 614 615 static int qede_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 616 { 617 struct qede_dev *edev = netdev_priv(ndev); 618 bool wol_requested; 619 int rc; 620 621 if (wol->wolopts & ~WAKE_MAGIC) { 622 DP_INFO(edev, 623 "Can't support WoL options other than magic-packet\n"); 624 return -EINVAL; 625 } 626 627 wol_requested = !!(wol->wolopts & WAKE_MAGIC); 628 if (wol_requested == edev->wol_enabled) 629 return 0; 630 631 /* Need to actually change configuration */ 632 if (!edev->dev_info.common.wol_support) { 633 DP_INFO(edev, "Device doesn't support WoL\n"); 634 return -EINVAL; 635 } 636 637 rc = edev->ops->common->update_wol(edev->cdev, wol_requested); 638 if (!rc) 639 edev->wol_enabled = wol_requested; 640 641 return rc; 642 } 643 644 static u32 qede_get_msglevel(struct net_device *ndev) 645 { 646 struct qede_dev *edev = netdev_priv(ndev); 647 648 return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) | edev->dp_module; 649 } 650 651 static void qede_set_msglevel(struct net_device *ndev, u32 level) 652 { 653 struct qede_dev *edev = netdev_priv(ndev); 654 u32 dp_module = 0; 655 u8 dp_level = 0; 656 657 qede_config_debug(level, &dp_module, &dp_level); 658 659 edev->dp_level = dp_level; 660 edev->dp_module = dp_module; 661 edev->ops->common->update_msglvl(edev->cdev, 662 dp_module, dp_level); 663 } 664 665 static int qede_nway_reset(struct net_device *dev) 666 { 667 struct qede_dev *edev = netdev_priv(dev); 668 struct qed_link_output current_link; 669 struct qed_link_params link_params; 670 671 if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { 672 DP_INFO(edev, "Link settings are not allowed to be changed\n"); 673 return -EOPNOTSUPP; 674 } 675 676 if (!netif_running(dev)) 677 return 0; 678 679 memset(¤t_link, 0, sizeof(current_link)); 680 edev->ops->common->get_link(edev->cdev, ¤t_link); 681 if (!current_link.link_up) 682 return 0; 683 684 /* Toggle the link */ 685 memset(&link_params, 0, sizeof(link_params)); 686 link_params.link_up = false; 687 edev->ops->common->set_link(edev->cdev, &link_params); 688 link_params.link_up = true; 689 edev->ops->common->set_link(edev->cdev, &link_params); 690 691 return 0; 692 } 693 694 static u32 qede_get_link(struct net_device *dev) 695 { 696 struct qede_dev *edev = netdev_priv(dev); 697 struct qed_link_output current_link; 698 699 memset(¤t_link, 0, sizeof(current_link)); 700 edev->ops->common->get_link(edev->cdev, ¤t_link); 701 702 return current_link.link_up; 703 } 704 705 static int qede_flash_device(struct net_device *dev, 706 struct ethtool_flash *flash) 707 { 708 struct qede_dev *edev = netdev_priv(dev); 709 710 return edev->ops->common->nvm_flash(edev->cdev, flash->data); 711 } 712 713 static int qede_get_coalesce(struct net_device *dev, 714 struct ethtool_coalesce *coal) 715 { 716 void *rx_handle = NULL, *tx_handle = NULL; 717 struct qede_dev *edev = netdev_priv(dev); 718 u16 rx_coal, tx_coal, i, rc = 0; 719 struct qede_fastpath *fp; 720 721 rx_coal = QED_DEFAULT_RX_USECS; 722 tx_coal = QED_DEFAULT_TX_USECS; 723 724 memset(coal, 0, sizeof(struct ethtool_coalesce)); 725 726 __qede_lock(edev); 727 if (edev->state == QEDE_STATE_OPEN) { 728 for_each_queue(i) { 729 fp = &edev->fp_array[i]; 730 731 if (fp->type & QEDE_FASTPATH_RX) { 732 rx_handle = fp->rxq->handle; 733 break; 734 } 735 } 736 737 rc = edev->ops->get_coalesce(edev->cdev, &rx_coal, rx_handle); 738 if (rc) { 739 DP_INFO(edev, "Read Rx coalesce error\n"); 740 goto out; 741 } 742 743 for_each_queue(i) { 744 fp = &edev->fp_array[i]; 745 if (fp->type & QEDE_FASTPATH_TX) { 746 tx_handle = fp->txq->handle; 747 break; 748 } 749 } 750 751 rc = edev->ops->get_coalesce(edev->cdev, &tx_coal, tx_handle); 752 if (rc) 753 DP_INFO(edev, "Read Tx coalesce error\n"); 754 } 755 756 out: 757 __qede_unlock(edev); 758 759 coal->rx_coalesce_usecs = rx_coal; 760 coal->tx_coalesce_usecs = tx_coal; 761 762 return rc; 763 } 764 765 static int qede_set_coalesce(struct net_device *dev, 766 struct ethtool_coalesce *coal) 767 { 768 struct qede_dev *edev = netdev_priv(dev); 769 struct qede_fastpath *fp; 770 int i, rc = 0; 771 u16 rxc, txc; 772 773 if (!netif_running(dev)) { 774 DP_INFO(edev, "Interface is down\n"); 775 return -EINVAL; 776 } 777 778 if (coal->rx_coalesce_usecs > QED_COALESCE_MAX || 779 coal->tx_coalesce_usecs > QED_COALESCE_MAX) { 780 DP_INFO(edev, 781 "Can't support requested %s coalesce value [max supported value %d]\n", 782 coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx" : 783 "tx", QED_COALESCE_MAX); 784 return -EINVAL; 785 } 786 787 rxc = (u16)coal->rx_coalesce_usecs; 788 txc = (u16)coal->tx_coalesce_usecs; 789 for_each_queue(i) { 790 fp = &edev->fp_array[i]; 791 792 if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { 793 rc = edev->ops->common->set_coalesce(edev->cdev, 794 rxc, 0, 795 fp->rxq->handle); 796 if (rc) { 797 DP_INFO(edev, 798 "Set RX coalesce error, rc = %d\n", rc); 799 return rc; 800 } 801 } 802 803 if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { 804 rc = edev->ops->common->set_coalesce(edev->cdev, 805 0, txc, 806 fp->txq->handle); 807 if (rc) { 808 DP_INFO(edev, 809 "Set TX coalesce error, rc = %d\n", rc); 810 return rc; 811 } 812 } 813 } 814 815 return rc; 816 } 817 818 static void qede_get_ringparam(struct net_device *dev, 819 struct ethtool_ringparam *ering) 820 { 821 struct qede_dev *edev = netdev_priv(dev); 822 823 ering->rx_max_pending = NUM_RX_BDS_MAX; 824 ering->rx_pending = edev->q_num_rx_buffers; 825 ering->tx_max_pending = NUM_TX_BDS_MAX; 826 ering->tx_pending = edev->q_num_tx_buffers; 827 } 828 829 static int qede_set_ringparam(struct net_device *dev, 830 struct ethtool_ringparam *ering) 831 { 832 struct qede_dev *edev = netdev_priv(dev); 833 834 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 835 "Set ring params command parameters: rx_pending = %d, tx_pending = %d\n", 836 ering->rx_pending, ering->tx_pending); 837 838 /* Validate legality of configuration */ 839 if (ering->rx_pending > NUM_RX_BDS_MAX || 840 ering->rx_pending < NUM_RX_BDS_MIN || 841 ering->tx_pending > NUM_TX_BDS_MAX || 842 ering->tx_pending < NUM_TX_BDS_MIN) { 843 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 844 "Can only support Rx Buffer size [0%08x,...,0x%08x] and Tx Buffer size [0x%08x,...,0x%08x]\n", 845 NUM_RX_BDS_MIN, NUM_RX_BDS_MAX, 846 NUM_TX_BDS_MIN, NUM_TX_BDS_MAX); 847 return -EINVAL; 848 } 849 850 /* Change ring size and re-load */ 851 edev->q_num_rx_buffers = ering->rx_pending; 852 edev->q_num_tx_buffers = ering->tx_pending; 853 854 qede_reload(edev, NULL, false); 855 856 return 0; 857 } 858 859 static void qede_get_pauseparam(struct net_device *dev, 860 struct ethtool_pauseparam *epause) 861 { 862 struct qede_dev *edev = netdev_priv(dev); 863 struct qed_link_output current_link; 864 865 memset(¤t_link, 0, sizeof(current_link)); 866 edev->ops->common->get_link(edev->cdev, ¤t_link); 867 868 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 869 epause->autoneg = true; 870 if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) 871 epause->rx_pause = true; 872 if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) 873 epause->tx_pause = true; 874 875 DP_VERBOSE(edev, QED_MSG_DEBUG, 876 "ethtool_pauseparam: cmd %d autoneg %d rx_pause %d tx_pause %d\n", 877 epause->cmd, epause->autoneg, epause->rx_pause, 878 epause->tx_pause); 879 } 880 881 static int qede_set_pauseparam(struct net_device *dev, 882 struct ethtool_pauseparam *epause) 883 { 884 struct qede_dev *edev = netdev_priv(dev); 885 struct qed_link_params params; 886 struct qed_link_output current_link; 887 888 if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { 889 DP_INFO(edev, 890 "Pause settings are not allowed to be changed\n"); 891 return -EOPNOTSUPP; 892 } 893 894 memset(¤t_link, 0, sizeof(current_link)); 895 edev->ops->common->get_link(edev->cdev, ¤t_link); 896 897 memset(¶ms, 0, sizeof(params)); 898 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; 899 if (epause->autoneg) { 900 if (!(current_link.supported_caps & QED_LM_Autoneg_BIT)) { 901 DP_INFO(edev, "autoneg not supported\n"); 902 return -EINVAL; 903 } 904 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 905 } 906 if (epause->rx_pause) 907 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; 908 if (epause->tx_pause) 909 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; 910 911 params.link_up = true; 912 edev->ops->common->set_link(edev->cdev, ¶ms); 913 914 return 0; 915 } 916 917 static void qede_get_regs(struct net_device *ndev, 918 struct ethtool_regs *regs, void *buffer) 919 { 920 struct qede_dev *edev = netdev_priv(ndev); 921 922 regs->version = 0; 923 memset(buffer, 0, regs->len); 924 925 if (edev->ops && edev->ops->common) 926 edev->ops->common->dbg_all_data(edev->cdev, buffer); 927 } 928 929 static int qede_get_regs_len(struct net_device *ndev) 930 { 931 struct qede_dev *edev = netdev_priv(ndev); 932 933 if (edev->ops && edev->ops->common) 934 return edev->ops->common->dbg_all_data_size(edev->cdev); 935 else 936 return -EINVAL; 937 } 938 939 static void qede_update_mtu(struct qede_dev *edev, 940 struct qede_reload_args *args) 941 { 942 edev->ndev->mtu = args->u.mtu; 943 } 944 945 /* Netdevice NDOs */ 946 int qede_change_mtu(struct net_device *ndev, int new_mtu) 947 { 948 struct qede_dev *edev = netdev_priv(ndev); 949 struct qede_reload_args args; 950 951 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 952 "Configuring MTU size of %d\n", new_mtu); 953 954 if (new_mtu > PAGE_SIZE) 955 ndev->features &= ~NETIF_F_GRO_HW; 956 957 /* Set the mtu field and re-start the interface if needed */ 958 args.u.mtu = new_mtu; 959 args.func = &qede_update_mtu; 960 qede_reload(edev, &args, false); 961 962 edev->ops->common->update_mtu(edev->cdev, new_mtu); 963 964 return 0; 965 } 966 967 static void qede_get_channels(struct net_device *dev, 968 struct ethtool_channels *channels) 969 { 970 struct qede_dev *edev = netdev_priv(dev); 971 972 channels->max_combined = QEDE_MAX_RSS_CNT(edev); 973 channels->max_rx = QEDE_MAX_RSS_CNT(edev); 974 channels->max_tx = QEDE_MAX_RSS_CNT(edev); 975 channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx - 976 edev->fp_num_rx; 977 channels->tx_count = edev->fp_num_tx; 978 channels->rx_count = edev->fp_num_rx; 979 } 980 981 static int qede_set_channels(struct net_device *dev, 982 struct ethtool_channels *channels) 983 { 984 struct qede_dev *edev = netdev_priv(dev); 985 u32 count; 986 987 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 988 "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n", 989 channels->rx_count, channels->tx_count, 990 channels->other_count, channels->combined_count); 991 992 count = channels->rx_count + channels->tx_count + 993 channels->combined_count; 994 995 /* We don't support `other' channels */ 996 if (channels->other_count) { 997 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 998 "command parameters not supported\n"); 999 return -EINVAL; 1000 } 1001 1002 if (!(channels->combined_count || (channels->rx_count && 1003 channels->tx_count))) { 1004 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 1005 "need to request at least one transmit and one receive channel\n"); 1006 return -EINVAL; 1007 } 1008 1009 if (count > QEDE_MAX_RSS_CNT(edev)) { 1010 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 1011 "requested channels = %d max supported channels = %d\n", 1012 count, QEDE_MAX_RSS_CNT(edev)); 1013 return -EINVAL; 1014 } 1015 1016 /* Check if there was a change in the active parameters */ 1017 if ((count == QEDE_QUEUE_CNT(edev)) && 1018 (channels->tx_count == edev->fp_num_tx) && 1019 (channels->rx_count == edev->fp_num_rx)) { 1020 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 1021 "No change in active parameters\n"); 1022 return 0; 1023 } 1024 1025 /* We need the number of queues to be divisible between the hwfns */ 1026 if ((count % edev->dev_info.common.num_hwfns) || 1027 (channels->tx_count % edev->dev_info.common.num_hwfns) || 1028 (channels->rx_count % edev->dev_info.common.num_hwfns)) { 1029 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 1030 "Number of channels must be divisible by %04x\n", 1031 edev->dev_info.common.num_hwfns); 1032 return -EINVAL; 1033 } 1034 1035 /* Set number of queues and reload if necessary */ 1036 edev->req_queues = count; 1037 edev->req_num_tx = channels->tx_count; 1038 edev->req_num_rx = channels->rx_count; 1039 /* Reset the indirection table if rx queue count is updated */ 1040 if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) { 1041 edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED; 1042 memset(edev->rss_ind_table, 0, sizeof(edev->rss_ind_table)); 1043 } 1044 1045 qede_reload(edev, NULL, false); 1046 1047 return 0; 1048 } 1049 1050 static int qede_get_ts_info(struct net_device *dev, 1051 struct ethtool_ts_info *info) 1052 { 1053 struct qede_dev *edev = netdev_priv(dev); 1054 1055 return qede_ptp_get_ts_info(edev, info); 1056 } 1057 1058 static int qede_set_phys_id(struct net_device *dev, 1059 enum ethtool_phys_id_state state) 1060 { 1061 struct qede_dev *edev = netdev_priv(dev); 1062 u8 led_state = 0; 1063 1064 switch (state) { 1065 case ETHTOOL_ID_ACTIVE: 1066 return 1; /* cycle on/off once per second */ 1067 1068 case ETHTOOL_ID_ON: 1069 led_state = QED_LED_MODE_ON; 1070 break; 1071 1072 case ETHTOOL_ID_OFF: 1073 led_state = QED_LED_MODE_OFF; 1074 break; 1075 1076 case ETHTOOL_ID_INACTIVE: 1077 led_state = QED_LED_MODE_RESTORE; 1078 break; 1079 } 1080 1081 edev->ops->common->set_led(edev->cdev, led_state); 1082 1083 return 0; 1084 } 1085 1086 static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) 1087 { 1088 info->data = RXH_IP_SRC | RXH_IP_DST; 1089 1090 switch (info->flow_type) { 1091 case TCP_V4_FLOW: 1092 case TCP_V6_FLOW: 1093 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 1094 break; 1095 case UDP_V4_FLOW: 1096 if (edev->rss_caps & QED_RSS_IPV4_UDP) 1097 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 1098 break; 1099 case UDP_V6_FLOW: 1100 if (edev->rss_caps & QED_RSS_IPV6_UDP) 1101 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 1102 break; 1103 case IPV4_FLOW: 1104 case IPV6_FLOW: 1105 break; 1106 default: 1107 info->data = 0; 1108 break; 1109 } 1110 1111 return 0; 1112 } 1113 1114 static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 1115 u32 *rule_locs) 1116 { 1117 struct qede_dev *edev = netdev_priv(dev); 1118 int rc = 0; 1119 1120 switch (info->cmd) { 1121 case ETHTOOL_GRXRINGS: 1122 info->data = QEDE_RSS_COUNT(edev); 1123 break; 1124 case ETHTOOL_GRXFH: 1125 rc = qede_get_rss_flags(edev, info); 1126 break; 1127 case ETHTOOL_GRXCLSRLCNT: 1128 info->rule_cnt = qede_get_arfs_filter_count(edev); 1129 info->data = QEDE_RFS_MAX_FLTR; 1130 break; 1131 case ETHTOOL_GRXCLSRULE: 1132 rc = qede_get_cls_rule_entry(edev, info); 1133 break; 1134 case ETHTOOL_GRXCLSRLALL: 1135 rc = qede_get_cls_rule_all(edev, info, rule_locs); 1136 break; 1137 default: 1138 DP_ERR(edev, "Command parameters not supported\n"); 1139 rc = -EOPNOTSUPP; 1140 } 1141 1142 return rc; 1143 } 1144 1145 static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) 1146 { 1147 struct qed_update_vport_params *vport_update_params; 1148 u8 set_caps = 0, clr_caps = 0; 1149 int rc = 0; 1150 1151 DP_VERBOSE(edev, QED_MSG_DEBUG, 1152 "Set rss flags command parameters: flow type = %d, data = %llu\n", 1153 info->flow_type, info->data); 1154 1155 switch (info->flow_type) { 1156 case TCP_V4_FLOW: 1157 case TCP_V6_FLOW: 1158 /* For TCP only 4-tuple hash is supported */ 1159 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST | 1160 RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 1161 DP_INFO(edev, "Command parameters not supported\n"); 1162 return -EINVAL; 1163 } 1164 return 0; 1165 case UDP_V4_FLOW: 1166 /* For UDP either 2-tuple hash or 4-tuple hash is supported */ 1167 if (info->data == (RXH_IP_SRC | RXH_IP_DST | 1168 RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 1169 set_caps = QED_RSS_IPV4_UDP; 1170 DP_VERBOSE(edev, QED_MSG_DEBUG, 1171 "UDP 4-tuple enabled\n"); 1172 } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { 1173 clr_caps = QED_RSS_IPV4_UDP; 1174 DP_VERBOSE(edev, QED_MSG_DEBUG, 1175 "UDP 4-tuple disabled\n"); 1176 } else { 1177 return -EINVAL; 1178 } 1179 break; 1180 case UDP_V6_FLOW: 1181 /* For UDP either 2-tuple hash or 4-tuple hash is supported */ 1182 if (info->data == (RXH_IP_SRC | RXH_IP_DST | 1183 RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 1184 set_caps = QED_RSS_IPV6_UDP; 1185 DP_VERBOSE(edev, QED_MSG_DEBUG, 1186 "UDP 4-tuple enabled\n"); 1187 } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { 1188 clr_caps = QED_RSS_IPV6_UDP; 1189 DP_VERBOSE(edev, QED_MSG_DEBUG, 1190 "UDP 4-tuple disabled\n"); 1191 } else { 1192 return -EINVAL; 1193 } 1194 break; 1195 case IPV4_FLOW: 1196 case IPV6_FLOW: 1197 /* For IP only 2-tuple hash is supported */ 1198 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) { 1199 DP_INFO(edev, "Command parameters not supported\n"); 1200 return -EINVAL; 1201 } 1202 return 0; 1203 case SCTP_V4_FLOW: 1204 case AH_ESP_V4_FLOW: 1205 case AH_V4_FLOW: 1206 case ESP_V4_FLOW: 1207 case SCTP_V6_FLOW: 1208 case AH_ESP_V6_FLOW: 1209 case AH_V6_FLOW: 1210 case ESP_V6_FLOW: 1211 case IP_USER_FLOW: 1212 case ETHER_FLOW: 1213 /* RSS is not supported for these protocols */ 1214 if (info->data) { 1215 DP_INFO(edev, "Command parameters not supported\n"); 1216 return -EINVAL; 1217 } 1218 return 0; 1219 default: 1220 return -EINVAL; 1221 } 1222 1223 /* No action is needed if there is no change in the rss capability */ 1224 if (edev->rss_caps == ((edev->rss_caps & ~clr_caps) | set_caps)) 1225 return 0; 1226 1227 /* Update internal configuration */ 1228 edev->rss_caps = ((edev->rss_caps & ~clr_caps) | set_caps); 1229 edev->rss_params_inited |= QEDE_RSS_CAPS_INITED; 1230 1231 /* Re-configure if possible */ 1232 __qede_lock(edev); 1233 if (edev->state == QEDE_STATE_OPEN) { 1234 vport_update_params = vzalloc(sizeof(*vport_update_params)); 1235 if (!vport_update_params) { 1236 __qede_unlock(edev); 1237 return -ENOMEM; 1238 } 1239 qede_fill_rss_params(edev, &vport_update_params->rss_params, 1240 &vport_update_params->update_rss_flg); 1241 rc = edev->ops->vport_update(edev->cdev, vport_update_params); 1242 vfree(vport_update_params); 1243 } 1244 __qede_unlock(edev); 1245 1246 return rc; 1247 } 1248 1249 static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) 1250 { 1251 struct qede_dev *edev = netdev_priv(dev); 1252 int rc; 1253 1254 switch (info->cmd) { 1255 case ETHTOOL_SRXFH: 1256 rc = qede_set_rss_flags(edev, info); 1257 break; 1258 case ETHTOOL_SRXCLSRLINS: 1259 rc = qede_add_cls_rule(edev, info); 1260 break; 1261 case ETHTOOL_SRXCLSRLDEL: 1262 rc = qede_del_cls_rule(edev, info); 1263 break; 1264 default: 1265 DP_INFO(edev, "Command parameters not supported\n"); 1266 rc = -EOPNOTSUPP; 1267 } 1268 1269 return rc; 1270 } 1271 1272 static u32 qede_get_rxfh_indir_size(struct net_device *dev) 1273 { 1274 return QED_RSS_IND_TABLE_SIZE; 1275 } 1276 1277 static u32 qede_get_rxfh_key_size(struct net_device *dev) 1278 { 1279 struct qede_dev *edev = netdev_priv(dev); 1280 1281 return sizeof(edev->rss_key); 1282 } 1283 1284 static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) 1285 { 1286 struct qede_dev *edev = netdev_priv(dev); 1287 int i; 1288 1289 if (hfunc) 1290 *hfunc = ETH_RSS_HASH_TOP; 1291 1292 if (!indir) 1293 return 0; 1294 1295 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) 1296 indir[i] = edev->rss_ind_table[i]; 1297 1298 if (key) 1299 memcpy(key, edev->rss_key, qede_get_rxfh_key_size(dev)); 1300 1301 return 0; 1302 } 1303 1304 static int qede_set_rxfh(struct net_device *dev, const u32 *indir, 1305 const u8 *key, const u8 hfunc) 1306 { 1307 struct qed_update_vport_params *vport_update_params; 1308 struct qede_dev *edev = netdev_priv(dev); 1309 int i, rc = 0; 1310 1311 if (edev->dev_info.common.num_hwfns > 1) { 1312 DP_INFO(edev, 1313 "RSS configuration is not supported for 100G devices\n"); 1314 return -EOPNOTSUPP; 1315 } 1316 1317 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 1318 return -EOPNOTSUPP; 1319 1320 if (!indir && !key) 1321 return 0; 1322 1323 if (indir) { 1324 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) 1325 edev->rss_ind_table[i] = indir[i]; 1326 edev->rss_params_inited |= QEDE_RSS_INDIR_INITED; 1327 } 1328 1329 if (key) { 1330 memcpy(&edev->rss_key, key, qede_get_rxfh_key_size(dev)); 1331 edev->rss_params_inited |= QEDE_RSS_KEY_INITED; 1332 } 1333 1334 __qede_lock(edev); 1335 if (edev->state == QEDE_STATE_OPEN) { 1336 vport_update_params = vzalloc(sizeof(*vport_update_params)); 1337 if (!vport_update_params) { 1338 __qede_unlock(edev); 1339 return -ENOMEM; 1340 } 1341 qede_fill_rss_params(edev, &vport_update_params->rss_params, 1342 &vport_update_params->update_rss_flg); 1343 rc = edev->ops->vport_update(edev->cdev, vport_update_params); 1344 vfree(vport_update_params); 1345 } 1346 __qede_unlock(edev); 1347 1348 return rc; 1349 } 1350 1351 /* This function enables the interrupt generation and the NAPI on the device */ 1352 static void qede_netif_start(struct qede_dev *edev) 1353 { 1354 int i; 1355 1356 if (!netif_running(edev->ndev)) 1357 return; 1358 1359 for_each_queue(i) { 1360 /* Update and reenable interrupts */ 1361 qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1); 1362 napi_enable(&edev->fp_array[i].napi); 1363 } 1364 } 1365 1366 /* This function disables the NAPI and the interrupt generation on the device */ 1367 static void qede_netif_stop(struct qede_dev *edev) 1368 { 1369 int i; 1370 1371 for_each_queue(i) { 1372 napi_disable(&edev->fp_array[i].napi); 1373 /* Disable interrupts */ 1374 qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0); 1375 } 1376 } 1377 1378 static int qede_selftest_transmit_traffic(struct qede_dev *edev, 1379 struct sk_buff *skb) 1380 { 1381 struct qede_tx_queue *txq = NULL; 1382 struct eth_tx_1st_bd *first_bd; 1383 dma_addr_t mapping; 1384 int i, idx; 1385 u16 val; 1386 1387 for_each_queue(i) { 1388 if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { 1389 txq = edev->fp_array[i].txq; 1390 break; 1391 } 1392 } 1393 1394 if (!txq) { 1395 DP_NOTICE(edev, "Tx path is not available\n"); 1396 return -1; 1397 } 1398 1399 /* Fill the entry in the SW ring and the BDs in the FW ring */ 1400 idx = txq->sw_tx_prod; 1401 txq->sw_tx_ring.skbs[idx].skb = skb; 1402 first_bd = qed_chain_produce(&txq->tx_pbl); 1403 memset(first_bd, 0, sizeof(*first_bd)); 1404 val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; 1405 first_bd->data.bd_flags.bitfields = val; 1406 val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK; 1407 val = val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; 1408 first_bd->data.bitfields |= cpu_to_le16(val); 1409 1410 /* Map skb linear data for DMA and set in the first BD */ 1411 mapping = dma_map_single(&edev->pdev->dev, skb->data, 1412 skb_headlen(skb), DMA_TO_DEVICE); 1413 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { 1414 DP_NOTICE(edev, "SKB mapping failed\n"); 1415 return -ENOMEM; 1416 } 1417 BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); 1418 1419 /* update the first BD with the actual num BDs */ 1420 first_bd->data.nbds = 1; 1421 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; 1422 /* 'next page' entries are counted in the producer value */ 1423 val = qed_chain_get_prod_idx(&txq->tx_pbl); 1424 txq->tx_db.data.bd_prod = cpu_to_le16(val); 1425 1426 /* wmb makes sure that the BDs data is updated before updating the 1427 * producer, otherwise FW may read old data from the BDs. 1428 */ 1429 wmb(); 1430 barrier(); 1431 writel(txq->tx_db.raw, txq->doorbell_addr); 1432 1433 /* mmiowb is needed to synchronize doorbell writes from more than one 1434 * processor. It guarantees that the write arrives to the device before 1435 * the queue lock is released and another start_xmit is called (possibly 1436 * on another CPU). Without this barrier, the next doorbell can bypass 1437 * this doorbell. This is applicable to IA64/Altix systems. 1438 */ 1439 mmiowb(); 1440 1441 for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { 1442 if (qede_txq_has_work(txq)) 1443 break; 1444 usleep_range(100, 200); 1445 } 1446 1447 if (!qede_txq_has_work(txq)) { 1448 DP_NOTICE(edev, "Tx completion didn't happen\n"); 1449 return -1; 1450 } 1451 1452 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); 1453 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), 1454 BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE); 1455 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; 1456 txq->sw_tx_ring.skbs[idx].skb = NULL; 1457 1458 return 0; 1459 } 1460 1461 static int qede_selftest_receive_traffic(struct qede_dev *edev) 1462 { 1463 u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len; 1464 struct eth_fast_path_rx_reg_cqe *fp_cqe; 1465 struct qede_rx_queue *rxq = NULL; 1466 struct sw_rx_data *sw_rx_data; 1467 union eth_rx_cqe *cqe; 1468 int i, iter, rc = 0; 1469 u8 *data_ptr; 1470 1471 for_each_queue(i) { 1472 if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { 1473 rxq = edev->fp_array[i].rxq; 1474 break; 1475 } 1476 } 1477 1478 if (!rxq) { 1479 DP_NOTICE(edev, "Rx path is not available\n"); 1480 return -1; 1481 } 1482 1483 /* The packet is expected to receive on rx-queue 0 even though RSS is 1484 * enabled. This is because the queue 0 is configured as the default 1485 * queue and that the loopback traffic is not IP. 1486 */ 1487 for (iter = 0; iter < QEDE_SELFTEST_POLL_COUNT; iter++) { 1488 if (!qede_has_rx_work(rxq)) { 1489 usleep_range(100, 200); 1490 continue; 1491 } 1492 1493 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); 1494 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); 1495 1496 /* Memory barrier to prevent the CPU from doing speculative 1497 * reads of CQE/BD before reading hw_comp_cons. If the CQE is 1498 * read before it is written by FW, then FW writes CQE and SB, 1499 * and then the CPU reads the hw_comp_cons, it will use an old 1500 * CQE. 1501 */ 1502 rmb(); 1503 1504 /* Get the CQE from the completion ring */ 1505 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); 1506 1507 /* Get the data from the SW ring */ 1508 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; 1509 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; 1510 fp_cqe = &cqe->fast_path_regular; 1511 len = le16_to_cpu(fp_cqe->len_on_first_bd); 1512 data_ptr = (u8 *)(page_address(sw_rx_data->data) + 1513 fp_cqe->placement_offset + 1514 sw_rx_data->page_offset + 1515 rxq->rx_headroom); 1516 if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) && 1517 ether_addr_equal(data_ptr + ETH_ALEN, 1518 edev->ndev->dev_addr)) { 1519 for (i = ETH_HLEN; i < len; i++) 1520 if (data_ptr[i] != (unsigned char)(i & 0xff)) { 1521 rc = -1; 1522 break; 1523 } 1524 1525 qede_recycle_rx_bd_ring(rxq, 1); 1526 qed_chain_recycle_consumed(&rxq->rx_comp_ring); 1527 break; 1528 } 1529 1530 DP_INFO(edev, "Not the transmitted packet\n"); 1531 qede_recycle_rx_bd_ring(rxq, 1); 1532 qed_chain_recycle_consumed(&rxq->rx_comp_ring); 1533 } 1534 1535 if (iter == QEDE_SELFTEST_POLL_COUNT) { 1536 DP_NOTICE(edev, "Failed to receive the traffic\n"); 1537 return -1; 1538 } 1539 1540 qede_update_rx_prod(edev, rxq); 1541 1542 return rc; 1543 } 1544 1545 static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode) 1546 { 1547 struct qed_link_params link_params; 1548 struct sk_buff *skb = NULL; 1549 int rc = 0, i; 1550 u32 pkt_size; 1551 u8 *packet; 1552 1553 if (!netif_running(edev->ndev)) { 1554 DP_NOTICE(edev, "Interface is down\n"); 1555 return -EINVAL; 1556 } 1557 1558 qede_netif_stop(edev); 1559 1560 /* Bring up the link in Loopback mode */ 1561 memset(&link_params, 0, sizeof(link_params)); 1562 link_params.link_up = true; 1563 link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE; 1564 link_params.loopback_mode = loopback_mode; 1565 edev->ops->common->set_link(edev->cdev, &link_params); 1566 1567 /* Wait for loopback configuration to apply */ 1568 msleep_interruptible(500); 1569 1570 /* prepare the loopback packet */ 1571 pkt_size = edev->ndev->mtu + ETH_HLEN; 1572 1573 skb = netdev_alloc_skb(edev->ndev, pkt_size); 1574 if (!skb) { 1575 DP_INFO(edev, "Can't allocate skb\n"); 1576 rc = -ENOMEM; 1577 goto test_loopback_exit; 1578 } 1579 packet = skb_put(skb, pkt_size); 1580 ether_addr_copy(packet, edev->ndev->dev_addr); 1581 ether_addr_copy(packet + ETH_ALEN, edev->ndev->dev_addr); 1582 memset(packet + (2 * ETH_ALEN), 0x77, (ETH_HLEN - (2 * ETH_ALEN))); 1583 for (i = ETH_HLEN; i < pkt_size; i++) 1584 packet[i] = (unsigned char)(i & 0xff); 1585 1586 rc = qede_selftest_transmit_traffic(edev, skb); 1587 if (rc) 1588 goto test_loopback_exit; 1589 1590 rc = qede_selftest_receive_traffic(edev); 1591 if (rc) 1592 goto test_loopback_exit; 1593 1594 DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, "Loopback test successful\n"); 1595 1596 test_loopback_exit: 1597 dev_kfree_skb(skb); 1598 1599 /* Bring up the link in Normal mode */ 1600 memset(&link_params, 0, sizeof(link_params)); 1601 link_params.link_up = true; 1602 link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE; 1603 link_params.loopback_mode = QED_LINK_LOOPBACK_NONE; 1604 edev->ops->common->set_link(edev->cdev, &link_params); 1605 1606 /* Wait for loopback configuration to apply */ 1607 msleep_interruptible(500); 1608 1609 qede_netif_start(edev); 1610 1611 return rc; 1612 } 1613 1614 static void qede_self_test(struct net_device *dev, 1615 struct ethtool_test *etest, u64 *buf) 1616 { 1617 struct qede_dev *edev = netdev_priv(dev); 1618 1619 DP_VERBOSE(edev, QED_MSG_DEBUG, 1620 "Self-test command parameters: offline = %d, external_lb = %d\n", 1621 (etest->flags & ETH_TEST_FL_OFFLINE), 1622 (etest->flags & ETH_TEST_FL_EXTERNAL_LB) >> 2); 1623 1624 memset(buf, 0, sizeof(u64) * QEDE_ETHTOOL_TEST_MAX); 1625 1626 if (etest->flags & ETH_TEST_FL_OFFLINE) { 1627 if (qede_selftest_run_loopback(edev, 1628 QED_LINK_LOOPBACK_INT_PHY)) { 1629 buf[QEDE_ETHTOOL_INT_LOOPBACK] = 1; 1630 etest->flags |= ETH_TEST_FL_FAILED; 1631 } 1632 } 1633 1634 if (edev->ops->common->selftest->selftest_interrupt(edev->cdev)) { 1635 buf[QEDE_ETHTOOL_INTERRUPT_TEST] = 1; 1636 etest->flags |= ETH_TEST_FL_FAILED; 1637 } 1638 1639 if (edev->ops->common->selftest->selftest_memory(edev->cdev)) { 1640 buf[QEDE_ETHTOOL_MEMORY_TEST] = 1; 1641 etest->flags |= ETH_TEST_FL_FAILED; 1642 } 1643 1644 if (edev->ops->common->selftest->selftest_register(edev->cdev)) { 1645 buf[QEDE_ETHTOOL_REGISTER_TEST] = 1; 1646 etest->flags |= ETH_TEST_FL_FAILED; 1647 } 1648 1649 if (edev->ops->common->selftest->selftest_clock(edev->cdev)) { 1650 buf[QEDE_ETHTOOL_CLOCK_TEST] = 1; 1651 etest->flags |= ETH_TEST_FL_FAILED; 1652 } 1653 1654 if (edev->ops->common->selftest->selftest_nvram(edev->cdev)) { 1655 buf[QEDE_ETHTOOL_NVRAM_TEST] = 1; 1656 etest->flags |= ETH_TEST_FL_FAILED; 1657 } 1658 } 1659 1660 static int qede_set_tunable(struct net_device *dev, 1661 const struct ethtool_tunable *tuna, 1662 const void *data) 1663 { 1664 struct qede_dev *edev = netdev_priv(dev); 1665 u32 val; 1666 1667 switch (tuna->id) { 1668 case ETHTOOL_RX_COPYBREAK: 1669 val = *(u32 *)data; 1670 if (val < QEDE_MIN_PKT_LEN || val > QEDE_RX_HDR_SIZE) { 1671 DP_VERBOSE(edev, QED_MSG_DEBUG, 1672 "Invalid rx copy break value, range is [%u, %u]", 1673 QEDE_MIN_PKT_LEN, QEDE_RX_HDR_SIZE); 1674 return -EINVAL; 1675 } 1676 1677 edev->rx_copybreak = *(u32 *)data; 1678 break; 1679 default: 1680 return -EOPNOTSUPP; 1681 } 1682 1683 return 0; 1684 } 1685 1686 static int qede_get_tunable(struct net_device *dev, 1687 const struct ethtool_tunable *tuna, void *data) 1688 { 1689 struct qede_dev *edev = netdev_priv(dev); 1690 1691 switch (tuna->id) { 1692 case ETHTOOL_RX_COPYBREAK: 1693 *(u32 *)data = edev->rx_copybreak; 1694 break; 1695 default: 1696 return -EOPNOTSUPP; 1697 } 1698 1699 return 0; 1700 } 1701 1702 static int qede_get_eee(struct net_device *dev, struct ethtool_eee *edata) 1703 { 1704 struct qede_dev *edev = netdev_priv(dev); 1705 struct qed_link_output current_link; 1706 1707 memset(¤t_link, 0, sizeof(current_link)); 1708 edev->ops->common->get_link(edev->cdev, ¤t_link); 1709 1710 if (!current_link.eee_supported) { 1711 DP_INFO(edev, "EEE is not supported\n"); 1712 return -EOPNOTSUPP; 1713 } 1714 1715 if (current_link.eee.adv_caps & QED_EEE_1G_ADV) 1716 edata->advertised = ADVERTISED_1000baseT_Full; 1717 if (current_link.eee.adv_caps & QED_EEE_10G_ADV) 1718 edata->advertised |= ADVERTISED_10000baseT_Full; 1719 if (current_link.sup_caps & QED_EEE_1G_ADV) 1720 edata->supported = ADVERTISED_1000baseT_Full; 1721 if (current_link.sup_caps & QED_EEE_10G_ADV) 1722 edata->supported |= ADVERTISED_10000baseT_Full; 1723 if (current_link.eee.lp_adv_caps & QED_EEE_1G_ADV) 1724 edata->lp_advertised = ADVERTISED_1000baseT_Full; 1725 if (current_link.eee.lp_adv_caps & QED_EEE_10G_ADV) 1726 edata->lp_advertised |= ADVERTISED_10000baseT_Full; 1727 1728 edata->tx_lpi_timer = current_link.eee.tx_lpi_timer; 1729 edata->eee_enabled = current_link.eee.enable; 1730 edata->tx_lpi_enabled = current_link.eee.tx_lpi_enable; 1731 edata->eee_active = current_link.eee_active; 1732 1733 return 0; 1734 } 1735 1736 static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata) 1737 { 1738 struct qede_dev *edev = netdev_priv(dev); 1739 struct qed_link_output current_link; 1740 struct qed_link_params params; 1741 1742 if (!edev->ops->common->can_link_change(edev->cdev)) { 1743 DP_INFO(edev, "Link settings are not allowed to be changed\n"); 1744 return -EOPNOTSUPP; 1745 } 1746 1747 memset(¤t_link, 0, sizeof(current_link)); 1748 edev->ops->common->get_link(edev->cdev, ¤t_link); 1749 1750 if (!current_link.eee_supported) { 1751 DP_INFO(edev, "EEE is not supported\n"); 1752 return -EOPNOTSUPP; 1753 } 1754 1755 memset(¶ms, 0, sizeof(params)); 1756 params.override_flags |= QED_LINK_OVERRIDE_EEE_CONFIG; 1757 1758 if (!(edata->advertised & (ADVERTISED_1000baseT_Full | 1759 ADVERTISED_10000baseT_Full)) || 1760 ((edata->advertised & (ADVERTISED_1000baseT_Full | 1761 ADVERTISED_10000baseT_Full)) != 1762 edata->advertised)) { 1763 DP_VERBOSE(edev, QED_MSG_DEBUG, 1764 "Invalid advertised capabilities %d\n", 1765 edata->advertised); 1766 return -EINVAL; 1767 } 1768 1769 if (edata->advertised & ADVERTISED_1000baseT_Full) 1770 params.eee.adv_caps = QED_EEE_1G_ADV; 1771 if (edata->advertised & ADVERTISED_10000baseT_Full) 1772 params.eee.adv_caps |= QED_EEE_10G_ADV; 1773 params.eee.enable = edata->eee_enabled; 1774 params.eee.tx_lpi_enable = edata->tx_lpi_enabled; 1775 params.eee.tx_lpi_timer = edata->tx_lpi_timer; 1776 1777 params.link_up = true; 1778 edev->ops->common->set_link(edev->cdev, ¶ms); 1779 1780 return 0; 1781 } 1782 1783 static const struct ethtool_ops qede_ethtool_ops = { 1784 .get_link_ksettings = qede_get_link_ksettings, 1785 .set_link_ksettings = qede_set_link_ksettings, 1786 .get_drvinfo = qede_get_drvinfo, 1787 .get_regs_len = qede_get_regs_len, 1788 .get_regs = qede_get_regs, 1789 .get_wol = qede_get_wol, 1790 .set_wol = qede_set_wol, 1791 .get_msglevel = qede_get_msglevel, 1792 .set_msglevel = qede_set_msglevel, 1793 .nway_reset = qede_nway_reset, 1794 .get_link = qede_get_link, 1795 .get_coalesce = qede_get_coalesce, 1796 .set_coalesce = qede_set_coalesce, 1797 .get_ringparam = qede_get_ringparam, 1798 .set_ringparam = qede_set_ringparam, 1799 .get_pauseparam = qede_get_pauseparam, 1800 .set_pauseparam = qede_set_pauseparam, 1801 .get_strings = qede_get_strings, 1802 .set_phys_id = qede_set_phys_id, 1803 .get_ethtool_stats = qede_get_ethtool_stats, 1804 .get_priv_flags = qede_get_priv_flags, 1805 .get_sset_count = qede_get_sset_count, 1806 .get_rxnfc = qede_get_rxnfc, 1807 .set_rxnfc = qede_set_rxnfc, 1808 .get_rxfh_indir_size = qede_get_rxfh_indir_size, 1809 .get_rxfh_key_size = qede_get_rxfh_key_size, 1810 .get_rxfh = qede_get_rxfh, 1811 .set_rxfh = qede_set_rxfh, 1812 .get_ts_info = qede_get_ts_info, 1813 .get_channels = qede_get_channels, 1814 .set_channels = qede_set_channels, 1815 .self_test = qede_self_test, 1816 .get_eee = qede_get_eee, 1817 .set_eee = qede_set_eee, 1818 1819 .get_tunable = qede_get_tunable, 1820 .set_tunable = qede_set_tunable, 1821 .flash_device = qede_flash_device, 1822 }; 1823 1824 static const struct ethtool_ops qede_vf_ethtool_ops = { 1825 .get_link_ksettings = qede_get_link_ksettings, 1826 .get_drvinfo = qede_get_drvinfo, 1827 .get_msglevel = qede_get_msglevel, 1828 .set_msglevel = qede_set_msglevel, 1829 .get_link = qede_get_link, 1830 .get_coalesce = qede_get_coalesce, 1831 .set_coalesce = qede_set_coalesce, 1832 .get_ringparam = qede_get_ringparam, 1833 .set_ringparam = qede_set_ringparam, 1834 .get_strings = qede_get_strings, 1835 .get_ethtool_stats = qede_get_ethtool_stats, 1836 .get_priv_flags = qede_get_priv_flags, 1837 .get_sset_count = qede_get_sset_count, 1838 .get_rxnfc = qede_get_rxnfc, 1839 .set_rxnfc = qede_set_rxnfc, 1840 .get_rxfh_indir_size = qede_get_rxfh_indir_size, 1841 .get_rxfh_key_size = qede_get_rxfh_key_size, 1842 .get_rxfh = qede_get_rxfh, 1843 .set_rxfh = qede_set_rxfh, 1844 .get_channels = qede_get_channels, 1845 .set_channels = qede_set_channels, 1846 .get_tunable = qede_get_tunable, 1847 .set_tunable = qede_set_tunable, 1848 }; 1849 1850 void qede_set_ethtool_ops(struct net_device *dev) 1851 { 1852 struct qede_dev *edev = netdev_priv(dev); 1853 1854 if (IS_VF(edev)) 1855 dev->ethtool_ops = &qede_vf_ethtool_ops; 1856 else 1857 dev->ethtool_ops = &qede_ethtool_ops; 1858 } 1859