1 /* QLogic qede NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #include <linux/version.h> 10 #include <linux/types.h> 11 #include <linux/netdevice.h> 12 #include <linux/etherdevice.h> 13 #include <linux/ethtool.h> 14 #include <linux/string.h> 15 #include <linux/pci.h> 16 #include <linux/capability.h> 17 #include "qede.h" 18 19 #define QEDE_RQSTAT_OFFSET(stat_name) \ 20 (offsetof(struct qede_rx_queue, stat_name)) 21 #define QEDE_RQSTAT_STRING(stat_name) (#stat_name) 22 #define QEDE_RQSTAT(stat_name) \ 23 {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)} 24 25 #define QEDE_SELFTEST_POLL_COUNT 100 26 27 static const struct { 28 u64 offset; 29 char string[ETH_GSTRING_LEN]; 30 } qede_rqstats_arr[] = { 31 QEDE_RQSTAT(rcv_pkts), 32 QEDE_RQSTAT(rx_hw_errors), 33 QEDE_RQSTAT(rx_alloc_errors), 34 QEDE_RQSTAT(rx_ip_frags), 35 QEDE_RQSTAT(xdp_no_pass), 36 }; 37 38 #define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr) 39 #define QEDE_TQSTAT_OFFSET(stat_name) \ 40 (offsetof(struct qede_tx_queue, stat_name)) 41 #define QEDE_TQSTAT_STRING(stat_name) (#stat_name) 42 #define QEDE_TQSTAT(stat_name) \ 43 {QEDE_TQSTAT_OFFSET(stat_name), QEDE_TQSTAT_STRING(stat_name)} 44 #define QEDE_NUM_TQSTATS ARRAY_SIZE(qede_tqstats_arr) 45 static const struct { 46 u64 offset; 47 char string[ETH_GSTRING_LEN]; 48 } qede_tqstats_arr[] = { 49 QEDE_TQSTAT(xmit_pkts), 50 QEDE_TQSTAT(stopped_cnt), 51 }; 52 53 #define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name)) 54 #define QEDE_STAT_STRING(stat_name) (#stat_name) 55 #define _QEDE_STAT(stat_name, pf_only) \ 56 {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only} 57 #define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true) 58 #define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false) 59 static const struct { 60 u64 offset; 61 char string[ETH_GSTRING_LEN]; 62 bool pf_only; 63 } qede_stats_arr[] = { 64 QEDE_STAT(rx_ucast_bytes), 65 QEDE_STAT(rx_mcast_bytes), 66 QEDE_STAT(rx_bcast_bytes), 67 QEDE_STAT(rx_ucast_pkts), 68 QEDE_STAT(rx_mcast_pkts), 69 QEDE_STAT(rx_bcast_pkts), 70 71 QEDE_STAT(tx_ucast_bytes), 72 QEDE_STAT(tx_mcast_bytes), 73 QEDE_STAT(tx_bcast_bytes), 74 QEDE_STAT(tx_ucast_pkts), 75 QEDE_STAT(tx_mcast_pkts), 76 QEDE_STAT(tx_bcast_pkts), 77 78 QEDE_PF_STAT(rx_64_byte_packets), 79 QEDE_PF_STAT(rx_65_to_127_byte_packets), 80 QEDE_PF_STAT(rx_128_to_255_byte_packets), 81 QEDE_PF_STAT(rx_256_to_511_byte_packets), 82 QEDE_PF_STAT(rx_512_to_1023_byte_packets), 83 QEDE_PF_STAT(rx_1024_to_1518_byte_packets), 84 QEDE_PF_STAT(rx_1519_to_1522_byte_packets), 85 QEDE_PF_STAT(rx_1519_to_2047_byte_packets), 86 QEDE_PF_STAT(rx_2048_to_4095_byte_packets), 87 QEDE_PF_STAT(rx_4096_to_9216_byte_packets), 88 QEDE_PF_STAT(rx_9217_to_16383_byte_packets), 89 QEDE_PF_STAT(tx_64_byte_packets), 90 QEDE_PF_STAT(tx_65_to_127_byte_packets), 91 QEDE_PF_STAT(tx_128_to_255_byte_packets), 92 QEDE_PF_STAT(tx_256_to_511_byte_packets), 93 QEDE_PF_STAT(tx_512_to_1023_byte_packets), 94 QEDE_PF_STAT(tx_1024_to_1518_byte_packets), 95 QEDE_PF_STAT(tx_1519_to_2047_byte_packets), 96 QEDE_PF_STAT(tx_2048_to_4095_byte_packets), 97 QEDE_PF_STAT(tx_4096_to_9216_byte_packets), 98 QEDE_PF_STAT(tx_9217_to_16383_byte_packets), 99 100 QEDE_PF_STAT(rx_mac_crtl_frames), 101 QEDE_PF_STAT(tx_mac_ctrl_frames), 102 QEDE_PF_STAT(rx_pause_frames), 103 QEDE_PF_STAT(tx_pause_frames), 104 QEDE_PF_STAT(rx_pfc_frames), 105 QEDE_PF_STAT(tx_pfc_frames), 106 107 QEDE_PF_STAT(rx_crc_errors), 108 QEDE_PF_STAT(rx_align_errors), 109 QEDE_PF_STAT(rx_carrier_errors), 110 QEDE_PF_STAT(rx_oversize_packets), 111 QEDE_PF_STAT(rx_jabbers), 112 QEDE_PF_STAT(rx_undersize_packets), 113 QEDE_PF_STAT(rx_fragments), 114 QEDE_PF_STAT(tx_lpi_entry_count), 115 QEDE_PF_STAT(tx_total_collisions), 116 QEDE_PF_STAT(brb_truncates), 117 QEDE_PF_STAT(brb_discards), 118 QEDE_STAT(no_buff_discards), 119 QEDE_PF_STAT(mftag_filter_discards), 120 QEDE_PF_STAT(mac_filter_discards), 121 QEDE_STAT(tx_err_drop_pkts), 122 QEDE_STAT(ttl0_discard), 123 QEDE_STAT(packet_too_big_discard), 124 125 QEDE_STAT(coalesced_pkts), 126 QEDE_STAT(coalesced_events), 127 QEDE_STAT(coalesced_aborts_num), 128 QEDE_STAT(non_coalesced_pkts), 129 QEDE_STAT(coalesced_bytes), 130 }; 131 132 #define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr) 133 134 enum { 135 QEDE_PRI_FLAG_CMT, 136 QEDE_PRI_FLAG_LEN, 137 }; 138 139 static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = { 140 "Coupled-Function", 141 }; 142 143 enum qede_ethtool_tests { 144 QEDE_ETHTOOL_INT_LOOPBACK, 145 QEDE_ETHTOOL_INTERRUPT_TEST, 146 QEDE_ETHTOOL_MEMORY_TEST, 147 QEDE_ETHTOOL_REGISTER_TEST, 148 QEDE_ETHTOOL_CLOCK_TEST, 149 QEDE_ETHTOOL_NVRAM_TEST, 150 QEDE_ETHTOOL_TEST_MAX 151 }; 152 153 static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = { 154 "Internal loopback (offline)", 155 "Interrupt (online)\t", 156 "Memory (online)\t\t", 157 "Register (online)\t", 158 "Clock (online)\t\t", 159 "Nvram (online)\t\t", 160 }; 161 162 static void qede_get_strings_stats_txq(struct qede_dev *edev, 163 struct qede_tx_queue *txq, u8 **buf) 164 { 165 int i; 166 167 for (i = 0; i < QEDE_NUM_TQSTATS; i++) { 168 if (txq->is_xdp) 169 sprintf(*buf, "%d [XDP]: %s", 170 QEDE_TXQ_XDP_TO_IDX(edev, txq), 171 qede_tqstats_arr[i].string); 172 else 173 sprintf(*buf, "%d: %s", txq->index, 174 qede_tqstats_arr[i].string); 175 *buf += ETH_GSTRING_LEN; 176 } 177 } 178 179 static void qede_get_strings_stats_rxq(struct qede_dev *edev, 180 struct qede_rx_queue *rxq, u8 **buf) 181 { 182 int i; 183 184 for (i = 0; i < QEDE_NUM_RQSTATS; i++) { 185 sprintf(*buf, "%d: %s", rxq->rxq_id, 186 qede_rqstats_arr[i].string); 187 *buf += ETH_GSTRING_LEN; 188 } 189 } 190 191 static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) 192 { 193 struct qede_fastpath *fp; 194 int i; 195 196 /* Account for queue statistics */ 197 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) { 198 fp = &edev->fp_array[i]; 199 200 if (fp->type & QEDE_FASTPATH_RX) 201 qede_get_strings_stats_rxq(edev, fp->rxq, &buf); 202 203 if (fp->type & QEDE_FASTPATH_XDP) 204 qede_get_strings_stats_txq(edev, fp->xdp_tx, &buf); 205 206 if (fp->type & QEDE_FASTPATH_TX) 207 qede_get_strings_stats_txq(edev, fp->txq, &buf); 208 } 209 210 /* Account for non-queue statistics */ 211 for (i = 0; i < QEDE_NUM_STATS; i++) { 212 if (IS_VF(edev) && qede_stats_arr[i].pf_only) 213 continue; 214 strcpy(buf, qede_stats_arr[i].string); 215 buf += ETH_GSTRING_LEN; 216 } 217 } 218 219 static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 220 { 221 struct qede_dev *edev = netdev_priv(dev); 222 223 switch (stringset) { 224 case ETH_SS_STATS: 225 qede_get_strings_stats(edev, buf); 226 break; 227 case ETH_SS_PRIV_FLAGS: 228 memcpy(buf, qede_private_arr, 229 ETH_GSTRING_LEN * QEDE_PRI_FLAG_LEN); 230 break; 231 case ETH_SS_TEST: 232 memcpy(buf, qede_tests_str_arr, 233 ETH_GSTRING_LEN * QEDE_ETHTOOL_TEST_MAX); 234 break; 235 default: 236 DP_VERBOSE(edev, QED_MSG_DEBUG, 237 "Unsupported stringset 0x%08x\n", stringset); 238 } 239 } 240 241 static void qede_get_ethtool_stats_txq(struct qede_tx_queue *txq, u64 **buf) 242 { 243 int i; 244 245 for (i = 0; i < QEDE_NUM_TQSTATS; i++) { 246 **buf = *((u64 *)(((void *)txq) + qede_tqstats_arr[i].offset)); 247 (*buf)++; 248 } 249 } 250 251 static void qede_get_ethtool_stats_rxq(struct qede_rx_queue *rxq, u64 **buf) 252 { 253 int i; 254 255 for (i = 0; i < QEDE_NUM_RQSTATS; i++) { 256 **buf = *((u64 *)(((void *)rxq) + qede_rqstats_arr[i].offset)); 257 (*buf)++; 258 } 259 } 260 261 static void qede_get_ethtool_stats(struct net_device *dev, 262 struct ethtool_stats *stats, u64 *buf) 263 { 264 struct qede_dev *edev = netdev_priv(dev); 265 struct qede_fastpath *fp; 266 int i; 267 268 qede_fill_by_demand_stats(edev); 269 270 /* Need to protect the access to the fastpath array */ 271 __qede_lock(edev); 272 273 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) { 274 fp = &edev->fp_array[i]; 275 276 if (fp->type & QEDE_FASTPATH_RX) 277 qede_get_ethtool_stats_rxq(fp->rxq, &buf); 278 279 if (fp->type & QEDE_FASTPATH_XDP) 280 qede_get_ethtool_stats_txq(fp->xdp_tx, &buf); 281 282 if (fp->type & QEDE_FASTPATH_TX) 283 qede_get_ethtool_stats_txq(fp->txq, &buf); 284 } 285 286 for (i = 0; i < QEDE_NUM_STATS; i++) { 287 if (IS_VF(edev) && qede_stats_arr[i].pf_only) 288 continue; 289 *buf = *((u64 *)(((void *)&edev->stats) + 290 qede_stats_arr[i].offset)); 291 292 buf++; 293 } 294 295 __qede_unlock(edev); 296 } 297 298 static int qede_get_sset_count(struct net_device *dev, int stringset) 299 { 300 struct qede_dev *edev = netdev_priv(dev); 301 int num_stats = QEDE_NUM_STATS; 302 303 switch (stringset) { 304 case ETH_SS_STATS: 305 if (IS_VF(edev)) { 306 int i; 307 308 for (i = 0; i < QEDE_NUM_STATS; i++) 309 if (qede_stats_arr[i].pf_only) 310 num_stats--; 311 } 312 313 /* Account for the Regular Tx statistics */ 314 num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS; 315 316 /* Account for the Regular Rx statistics */ 317 num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS; 318 319 /* Account for XDP statistics [if needed] */ 320 if (edev->xdp_prog) 321 num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_TQSTATS; 322 return num_stats; 323 324 case ETH_SS_PRIV_FLAGS: 325 return QEDE_PRI_FLAG_LEN; 326 case ETH_SS_TEST: 327 if (!IS_VF(edev)) 328 return QEDE_ETHTOOL_TEST_MAX; 329 else 330 return 0; 331 default: 332 DP_VERBOSE(edev, QED_MSG_DEBUG, 333 "Unsupported stringset 0x%08x\n", stringset); 334 return -EINVAL; 335 } 336 } 337 338 static u32 qede_get_priv_flags(struct net_device *dev) 339 { 340 struct qede_dev *edev = netdev_priv(dev); 341 342 return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT; 343 } 344 345 struct qede_link_mode_mapping { 346 u32 qed_link_mode; 347 u32 ethtool_link_mode; 348 }; 349 350 static const struct qede_link_mode_mapping qed_lm_map[] = { 351 {QED_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT}, 352 {QED_LM_Autoneg_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT}, 353 {QED_LM_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT}, 354 {QED_LM_Pause_BIT, ETHTOOL_LINK_MODE_Pause_BIT}, 355 {QED_LM_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Half_BIT}, 356 {QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT}, 357 {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT}, 358 {QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT}, 359 {QED_LM_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT}, 360 {QED_LM_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT}, 361 {QED_LM_100000baseKR4_Full_BIT, 362 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT}, 363 }; 364 365 #define QEDE_DRV_TO_ETHTOOL_CAPS(caps, lk_ksettings, name) \ 366 { \ 367 int i; \ 368 \ 369 for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) { \ 370 if ((caps) & (qed_lm_map[i].qed_link_mode)) \ 371 __set_bit(qed_lm_map[i].ethtool_link_mode,\ 372 lk_ksettings->link_modes.name); \ 373 } \ 374 } 375 376 #define QEDE_ETHTOOL_TO_DRV_CAPS(caps, lk_ksettings, name) \ 377 { \ 378 int i; \ 379 \ 380 for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) { \ 381 if (test_bit(qed_lm_map[i].ethtool_link_mode, \ 382 lk_ksettings->link_modes.name)) \ 383 caps |= qed_lm_map[i].qed_link_mode; \ 384 } \ 385 } 386 387 static int qede_get_link_ksettings(struct net_device *dev, 388 struct ethtool_link_ksettings *cmd) 389 { 390 struct ethtool_link_settings *base = &cmd->base; 391 struct qede_dev *edev = netdev_priv(dev); 392 struct qed_link_output current_link; 393 394 __qede_lock(edev); 395 396 memset(¤t_link, 0, sizeof(current_link)); 397 edev->ops->common->get_link(edev->cdev, ¤t_link); 398 399 ethtool_link_ksettings_zero_link_mode(cmd, supported); 400 QEDE_DRV_TO_ETHTOOL_CAPS(current_link.supported_caps, cmd, supported) 401 402 ethtool_link_ksettings_zero_link_mode(cmd, advertising); 403 QEDE_DRV_TO_ETHTOOL_CAPS(current_link.advertised_caps, cmd, advertising) 404 405 ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising); 406 QEDE_DRV_TO_ETHTOOL_CAPS(current_link.lp_caps, cmd, lp_advertising) 407 408 if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) { 409 base->speed = current_link.speed; 410 base->duplex = current_link.duplex; 411 } else { 412 base->speed = SPEED_UNKNOWN; 413 base->duplex = DUPLEX_UNKNOWN; 414 } 415 416 __qede_unlock(edev); 417 418 base->port = current_link.port; 419 base->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE : 420 AUTONEG_DISABLE; 421 422 return 0; 423 } 424 425 static int qede_set_link_ksettings(struct net_device *dev, 426 const struct ethtool_link_ksettings *cmd) 427 { 428 const struct ethtool_link_settings *base = &cmd->base; 429 struct qede_dev *edev = netdev_priv(dev); 430 struct qed_link_output current_link; 431 struct qed_link_params params; 432 433 if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { 434 DP_INFO(edev, "Link settings are not allowed to be changed\n"); 435 return -EOPNOTSUPP; 436 } 437 memset(¤t_link, 0, sizeof(current_link)); 438 memset(¶ms, 0, sizeof(params)); 439 edev->ops->common->get_link(edev->cdev, ¤t_link); 440 441 params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS; 442 params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG; 443 if (base->autoneg == AUTONEG_ENABLE) { 444 params.autoneg = true; 445 params.forced_speed = 0; 446 QEDE_ETHTOOL_TO_DRV_CAPS(params.adv_speeds, cmd, advertising) 447 } else { /* forced speed */ 448 params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED; 449 params.autoneg = false; 450 params.forced_speed = base->speed; 451 switch (base->speed) { 452 case SPEED_10000: 453 if (!(current_link.supported_caps & 454 QED_LM_10000baseKR_Full_BIT)) { 455 DP_INFO(edev, "10G speed not supported\n"); 456 return -EINVAL; 457 } 458 params.adv_speeds = QED_LM_10000baseKR_Full_BIT; 459 break; 460 case SPEED_25000: 461 if (!(current_link.supported_caps & 462 QED_LM_25000baseKR_Full_BIT)) { 463 DP_INFO(edev, "25G speed not supported\n"); 464 return -EINVAL; 465 } 466 params.adv_speeds = QED_LM_25000baseKR_Full_BIT; 467 break; 468 case SPEED_40000: 469 if (!(current_link.supported_caps & 470 QED_LM_40000baseLR4_Full_BIT)) { 471 DP_INFO(edev, "40G speed not supported\n"); 472 return -EINVAL; 473 } 474 params.adv_speeds = QED_LM_40000baseLR4_Full_BIT; 475 break; 476 case SPEED_50000: 477 if (!(current_link.supported_caps & 478 QED_LM_50000baseKR2_Full_BIT)) { 479 DP_INFO(edev, "50G speed not supported\n"); 480 return -EINVAL; 481 } 482 params.adv_speeds = QED_LM_50000baseKR2_Full_BIT; 483 break; 484 case SPEED_100000: 485 if (!(current_link.supported_caps & 486 QED_LM_100000baseKR4_Full_BIT)) { 487 DP_INFO(edev, "100G speed not supported\n"); 488 return -EINVAL; 489 } 490 params.adv_speeds = QED_LM_100000baseKR4_Full_BIT; 491 break; 492 default: 493 DP_INFO(edev, "Unsupported speed %u\n", base->speed); 494 return -EINVAL; 495 } 496 } 497 498 params.link_up = true; 499 edev->ops->common->set_link(edev->cdev, ¶ms); 500 501 return 0; 502 } 503 504 static void qede_get_drvinfo(struct net_device *ndev, 505 struct ethtool_drvinfo *info) 506 { 507 char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN]; 508 struct qede_dev *edev = netdev_priv(ndev); 509 510 strlcpy(info->driver, "qede", sizeof(info->driver)); 511 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); 512 513 snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d", 514 edev->dev_info.common.fw_major, 515 edev->dev_info.common.fw_minor, 516 edev->dev_info.common.fw_rev, 517 edev->dev_info.common.fw_eng); 518 519 snprintf(mfw, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d", 520 (edev->dev_info.common.mfw_rev >> 24) & 0xFF, 521 (edev->dev_info.common.mfw_rev >> 16) & 0xFF, 522 (edev->dev_info.common.mfw_rev >> 8) & 0xFF, 523 edev->dev_info.common.mfw_rev & 0xFF); 524 525 if ((strlen(storm) + strlen(mfw) + strlen("mfw storm ")) < 526 sizeof(info->fw_version)) { 527 snprintf(info->fw_version, sizeof(info->fw_version), 528 "mfw %s storm %s", mfw, storm); 529 } else { 530 snprintf(info->fw_version, sizeof(info->fw_version), 531 "%s %s", mfw, storm); 532 } 533 534 strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info)); 535 } 536 537 static void qede_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 538 { 539 struct qede_dev *edev = netdev_priv(ndev); 540 541 if (edev->dev_info.common.wol_support) { 542 wol->supported = WAKE_MAGIC; 543 wol->wolopts = edev->wol_enabled ? WAKE_MAGIC : 0; 544 } 545 } 546 547 static int qede_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 548 { 549 struct qede_dev *edev = netdev_priv(ndev); 550 bool wol_requested; 551 int rc; 552 553 if (wol->wolopts & ~WAKE_MAGIC) { 554 DP_INFO(edev, 555 "Can't support WoL options other than magic-packet\n"); 556 return -EINVAL; 557 } 558 559 wol_requested = !!(wol->wolopts & WAKE_MAGIC); 560 if (wol_requested == edev->wol_enabled) 561 return 0; 562 563 /* Need to actually change configuration */ 564 if (!edev->dev_info.common.wol_support) { 565 DP_INFO(edev, "Device doesn't support WoL\n"); 566 return -EINVAL; 567 } 568 569 rc = edev->ops->common->update_wol(edev->cdev, wol_requested); 570 if (!rc) 571 edev->wol_enabled = wol_requested; 572 573 return rc; 574 } 575 576 static u32 qede_get_msglevel(struct net_device *ndev) 577 { 578 struct qede_dev *edev = netdev_priv(ndev); 579 580 return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) | edev->dp_module; 581 } 582 583 static void qede_set_msglevel(struct net_device *ndev, u32 level) 584 { 585 struct qede_dev *edev = netdev_priv(ndev); 586 u32 dp_module = 0; 587 u8 dp_level = 0; 588 589 qede_config_debug(level, &dp_module, &dp_level); 590 591 edev->dp_level = dp_level; 592 edev->dp_module = dp_module; 593 edev->ops->common->update_msglvl(edev->cdev, 594 dp_module, dp_level); 595 } 596 597 static int qede_nway_reset(struct net_device *dev) 598 { 599 struct qede_dev *edev = netdev_priv(dev); 600 struct qed_link_output current_link; 601 struct qed_link_params link_params; 602 603 if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { 604 DP_INFO(edev, "Link settings are not allowed to be changed\n"); 605 return -EOPNOTSUPP; 606 } 607 608 if (!netif_running(dev)) 609 return 0; 610 611 memset(¤t_link, 0, sizeof(current_link)); 612 edev->ops->common->get_link(edev->cdev, ¤t_link); 613 if (!current_link.link_up) 614 return 0; 615 616 /* Toggle the link */ 617 memset(&link_params, 0, sizeof(link_params)); 618 link_params.link_up = false; 619 edev->ops->common->set_link(edev->cdev, &link_params); 620 link_params.link_up = true; 621 edev->ops->common->set_link(edev->cdev, &link_params); 622 623 return 0; 624 } 625 626 static u32 qede_get_link(struct net_device *dev) 627 { 628 struct qede_dev *edev = netdev_priv(dev); 629 struct qed_link_output current_link; 630 631 memset(¤t_link, 0, sizeof(current_link)); 632 edev->ops->common->get_link(edev->cdev, ¤t_link); 633 634 return current_link.link_up; 635 } 636 637 static int qede_get_coalesce(struct net_device *dev, 638 struct ethtool_coalesce *coal) 639 { 640 struct qede_dev *edev = netdev_priv(dev); 641 u16 rxc, txc; 642 643 memset(coal, 0, sizeof(struct ethtool_coalesce)); 644 edev->ops->common->get_coalesce(edev->cdev, &rxc, &txc); 645 646 coal->rx_coalesce_usecs = rxc; 647 coal->tx_coalesce_usecs = txc; 648 649 return 0; 650 } 651 652 static int qede_set_coalesce(struct net_device *dev, 653 struct ethtool_coalesce *coal) 654 { 655 struct qede_dev *edev = netdev_priv(dev); 656 int i, rc = 0; 657 u16 rxc, txc; 658 u8 sb_id; 659 660 if (!netif_running(dev)) { 661 DP_INFO(edev, "Interface is down\n"); 662 return -EINVAL; 663 } 664 665 if (coal->rx_coalesce_usecs > QED_COALESCE_MAX || 666 coal->tx_coalesce_usecs > QED_COALESCE_MAX) { 667 DP_INFO(edev, 668 "Can't support requested %s coalesce value [max supported value %d]\n", 669 coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx" 670 : "tx", 671 QED_COALESCE_MAX); 672 return -EINVAL; 673 } 674 675 rxc = (u16)coal->rx_coalesce_usecs; 676 txc = (u16)coal->tx_coalesce_usecs; 677 for_each_queue(i) { 678 sb_id = edev->fp_array[i].sb_info->igu_sb_id; 679 rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc, 680 (u8)i, sb_id); 681 if (rc) { 682 DP_INFO(edev, "Set coalesce error, rc = %d\n", rc); 683 return rc; 684 } 685 } 686 687 return rc; 688 } 689 690 static void qede_get_ringparam(struct net_device *dev, 691 struct ethtool_ringparam *ering) 692 { 693 struct qede_dev *edev = netdev_priv(dev); 694 695 ering->rx_max_pending = NUM_RX_BDS_MAX; 696 ering->rx_pending = edev->q_num_rx_buffers; 697 ering->tx_max_pending = NUM_TX_BDS_MAX; 698 ering->tx_pending = edev->q_num_tx_buffers; 699 } 700 701 static int qede_set_ringparam(struct net_device *dev, 702 struct ethtool_ringparam *ering) 703 { 704 struct qede_dev *edev = netdev_priv(dev); 705 706 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 707 "Set ring params command parameters: rx_pending = %d, tx_pending = %d\n", 708 ering->rx_pending, ering->tx_pending); 709 710 /* Validate legality of configuration */ 711 if (ering->rx_pending > NUM_RX_BDS_MAX || 712 ering->rx_pending < NUM_RX_BDS_MIN || 713 ering->tx_pending > NUM_TX_BDS_MAX || 714 ering->tx_pending < NUM_TX_BDS_MIN) { 715 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 716 "Can only support Rx Buffer size [0%08x,...,0x%08x] and Tx Buffer size [0x%08x,...,0x%08x]\n", 717 NUM_RX_BDS_MIN, NUM_RX_BDS_MAX, 718 NUM_TX_BDS_MIN, NUM_TX_BDS_MAX); 719 return -EINVAL; 720 } 721 722 /* Change ring size and re-load */ 723 edev->q_num_rx_buffers = ering->rx_pending; 724 edev->q_num_tx_buffers = ering->tx_pending; 725 726 qede_reload(edev, NULL, false); 727 728 return 0; 729 } 730 731 static void qede_get_pauseparam(struct net_device *dev, 732 struct ethtool_pauseparam *epause) 733 { 734 struct qede_dev *edev = netdev_priv(dev); 735 struct qed_link_output current_link; 736 737 memset(¤t_link, 0, sizeof(current_link)); 738 edev->ops->common->get_link(edev->cdev, ¤t_link); 739 740 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 741 epause->autoneg = true; 742 if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) 743 epause->rx_pause = true; 744 if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) 745 epause->tx_pause = true; 746 747 DP_VERBOSE(edev, QED_MSG_DEBUG, 748 "ethtool_pauseparam: cmd %d autoneg %d rx_pause %d tx_pause %d\n", 749 epause->cmd, epause->autoneg, epause->rx_pause, 750 epause->tx_pause); 751 } 752 753 static int qede_set_pauseparam(struct net_device *dev, 754 struct ethtool_pauseparam *epause) 755 { 756 struct qede_dev *edev = netdev_priv(dev); 757 struct qed_link_params params; 758 struct qed_link_output current_link; 759 760 if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { 761 DP_INFO(edev, 762 "Pause settings are not allowed to be changed\n"); 763 return -EOPNOTSUPP; 764 } 765 766 memset(¤t_link, 0, sizeof(current_link)); 767 edev->ops->common->get_link(edev->cdev, ¤t_link); 768 769 memset(¶ms, 0, sizeof(params)); 770 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; 771 if (epause->autoneg) { 772 if (!(current_link.supported_caps & QED_LM_Autoneg_BIT)) { 773 DP_INFO(edev, "autoneg not supported\n"); 774 return -EINVAL; 775 } 776 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 777 } 778 if (epause->rx_pause) 779 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; 780 if (epause->tx_pause) 781 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; 782 783 params.link_up = true; 784 edev->ops->common->set_link(edev->cdev, ¶ms); 785 786 return 0; 787 } 788 789 static void qede_get_regs(struct net_device *ndev, 790 struct ethtool_regs *regs, void *buffer) 791 { 792 struct qede_dev *edev = netdev_priv(ndev); 793 794 regs->version = 0; 795 memset(buffer, 0, regs->len); 796 797 if (edev->ops && edev->ops->common) 798 edev->ops->common->dbg_all_data(edev->cdev, buffer); 799 } 800 801 static int qede_get_regs_len(struct net_device *ndev) 802 { 803 struct qede_dev *edev = netdev_priv(ndev); 804 805 if (edev->ops && edev->ops->common) 806 return edev->ops->common->dbg_all_data_size(edev->cdev); 807 else 808 return -EINVAL; 809 } 810 811 static void qede_update_mtu(struct qede_dev *edev, 812 struct qede_reload_args *args) 813 { 814 edev->ndev->mtu = args->u.mtu; 815 } 816 817 /* Netdevice NDOs */ 818 int qede_change_mtu(struct net_device *ndev, int new_mtu) 819 { 820 struct qede_dev *edev = netdev_priv(ndev); 821 struct qede_reload_args args; 822 823 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 824 "Configuring MTU size of %d\n", new_mtu); 825 826 /* Set the mtu field and re-start the interface if needed */ 827 args.u.mtu = new_mtu; 828 args.func = &qede_update_mtu; 829 qede_reload(edev, &args, false); 830 831 edev->ops->common->update_mtu(edev->cdev, new_mtu); 832 833 return 0; 834 } 835 836 static void qede_get_channels(struct net_device *dev, 837 struct ethtool_channels *channels) 838 { 839 struct qede_dev *edev = netdev_priv(dev); 840 841 channels->max_combined = QEDE_MAX_RSS_CNT(edev); 842 channels->max_rx = QEDE_MAX_RSS_CNT(edev); 843 channels->max_tx = QEDE_MAX_RSS_CNT(edev); 844 channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx - 845 edev->fp_num_rx; 846 channels->tx_count = edev->fp_num_tx; 847 channels->rx_count = edev->fp_num_rx; 848 } 849 850 static int qede_set_channels(struct net_device *dev, 851 struct ethtool_channels *channels) 852 { 853 struct qede_dev *edev = netdev_priv(dev); 854 u32 count; 855 856 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 857 "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n", 858 channels->rx_count, channels->tx_count, 859 channels->other_count, channels->combined_count); 860 861 count = channels->rx_count + channels->tx_count + 862 channels->combined_count; 863 864 /* We don't support `other' channels */ 865 if (channels->other_count) { 866 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 867 "command parameters not supported\n"); 868 return -EINVAL; 869 } 870 871 if (!(channels->combined_count || (channels->rx_count && 872 channels->tx_count))) { 873 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 874 "need to request at least one transmit and one receive channel\n"); 875 return -EINVAL; 876 } 877 878 if (count > QEDE_MAX_RSS_CNT(edev)) { 879 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 880 "requested channels = %d max supported channels = %d\n", 881 count, QEDE_MAX_RSS_CNT(edev)); 882 return -EINVAL; 883 } 884 885 /* Check if there was a change in the active parameters */ 886 if ((count == QEDE_QUEUE_CNT(edev)) && 887 (channels->tx_count == edev->fp_num_tx) && 888 (channels->rx_count == edev->fp_num_rx)) { 889 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 890 "No change in active parameters\n"); 891 return 0; 892 } 893 894 /* We need the number of queues to be divisible between the hwfns */ 895 if ((count % edev->dev_info.common.num_hwfns) || 896 (channels->tx_count % edev->dev_info.common.num_hwfns) || 897 (channels->rx_count % edev->dev_info.common.num_hwfns)) { 898 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 899 "Number of channels must be divisible by %04x\n", 900 edev->dev_info.common.num_hwfns); 901 return -EINVAL; 902 } 903 904 /* Set number of queues and reload if necessary */ 905 edev->req_queues = count; 906 edev->req_num_tx = channels->tx_count; 907 edev->req_num_rx = channels->rx_count; 908 /* Reset the indirection table if rx queue count is updated */ 909 if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) { 910 edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED; 911 memset(&edev->rss_params.rss_ind_table, 0, 912 sizeof(edev->rss_params.rss_ind_table)); 913 } 914 915 qede_reload(edev, NULL, false); 916 917 return 0; 918 } 919 920 static int qede_set_phys_id(struct net_device *dev, 921 enum ethtool_phys_id_state state) 922 { 923 struct qede_dev *edev = netdev_priv(dev); 924 u8 led_state = 0; 925 926 switch (state) { 927 case ETHTOOL_ID_ACTIVE: 928 return 1; /* cycle on/off once per second */ 929 930 case ETHTOOL_ID_ON: 931 led_state = QED_LED_MODE_ON; 932 break; 933 934 case ETHTOOL_ID_OFF: 935 led_state = QED_LED_MODE_OFF; 936 break; 937 938 case ETHTOOL_ID_INACTIVE: 939 led_state = QED_LED_MODE_RESTORE; 940 break; 941 } 942 943 edev->ops->common->set_led(edev->cdev, led_state); 944 945 return 0; 946 } 947 948 static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) 949 { 950 info->data = RXH_IP_SRC | RXH_IP_DST; 951 952 switch (info->flow_type) { 953 case TCP_V4_FLOW: 954 case TCP_V6_FLOW: 955 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 956 break; 957 case UDP_V4_FLOW: 958 if (edev->rss_params.rss_caps & QED_RSS_IPV4_UDP) 959 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 960 break; 961 case UDP_V6_FLOW: 962 if (edev->rss_params.rss_caps & QED_RSS_IPV6_UDP) 963 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 964 break; 965 case IPV4_FLOW: 966 case IPV6_FLOW: 967 break; 968 default: 969 info->data = 0; 970 break; 971 } 972 973 return 0; 974 } 975 976 static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 977 u32 *rules __always_unused) 978 { 979 struct qede_dev *edev = netdev_priv(dev); 980 981 switch (info->cmd) { 982 case ETHTOOL_GRXRINGS: 983 info->data = QEDE_RSS_COUNT(edev); 984 return 0; 985 case ETHTOOL_GRXFH: 986 return qede_get_rss_flags(edev, info); 987 default: 988 DP_ERR(edev, "Command parameters not supported\n"); 989 return -EOPNOTSUPP; 990 } 991 } 992 993 static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) 994 { 995 struct qed_update_vport_params vport_update_params; 996 u8 set_caps = 0, clr_caps = 0; 997 998 DP_VERBOSE(edev, QED_MSG_DEBUG, 999 "Set rss flags command parameters: flow type = %d, data = %llu\n", 1000 info->flow_type, info->data); 1001 1002 switch (info->flow_type) { 1003 case TCP_V4_FLOW: 1004 case TCP_V6_FLOW: 1005 /* For TCP only 4-tuple hash is supported */ 1006 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST | 1007 RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 1008 DP_INFO(edev, "Command parameters not supported\n"); 1009 return -EINVAL; 1010 } 1011 return 0; 1012 case UDP_V4_FLOW: 1013 /* For UDP either 2-tuple hash or 4-tuple hash is supported */ 1014 if (info->data == (RXH_IP_SRC | RXH_IP_DST | 1015 RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 1016 set_caps = QED_RSS_IPV4_UDP; 1017 DP_VERBOSE(edev, QED_MSG_DEBUG, 1018 "UDP 4-tuple enabled\n"); 1019 } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { 1020 clr_caps = QED_RSS_IPV4_UDP; 1021 DP_VERBOSE(edev, QED_MSG_DEBUG, 1022 "UDP 4-tuple disabled\n"); 1023 } else { 1024 return -EINVAL; 1025 } 1026 break; 1027 case UDP_V6_FLOW: 1028 /* For UDP either 2-tuple hash or 4-tuple hash is supported */ 1029 if (info->data == (RXH_IP_SRC | RXH_IP_DST | 1030 RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 1031 set_caps = QED_RSS_IPV6_UDP; 1032 DP_VERBOSE(edev, QED_MSG_DEBUG, 1033 "UDP 4-tuple enabled\n"); 1034 } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { 1035 clr_caps = QED_RSS_IPV6_UDP; 1036 DP_VERBOSE(edev, QED_MSG_DEBUG, 1037 "UDP 4-tuple disabled\n"); 1038 } else { 1039 return -EINVAL; 1040 } 1041 break; 1042 case IPV4_FLOW: 1043 case IPV6_FLOW: 1044 /* For IP only 2-tuple hash is supported */ 1045 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) { 1046 DP_INFO(edev, "Command parameters not supported\n"); 1047 return -EINVAL; 1048 } 1049 return 0; 1050 case SCTP_V4_FLOW: 1051 case AH_ESP_V4_FLOW: 1052 case AH_V4_FLOW: 1053 case ESP_V4_FLOW: 1054 case SCTP_V6_FLOW: 1055 case AH_ESP_V6_FLOW: 1056 case AH_V6_FLOW: 1057 case ESP_V6_FLOW: 1058 case IP_USER_FLOW: 1059 case ETHER_FLOW: 1060 /* RSS is not supported for these protocols */ 1061 if (info->data) { 1062 DP_INFO(edev, "Command parameters not supported\n"); 1063 return -EINVAL; 1064 } 1065 return 0; 1066 default: 1067 return -EINVAL; 1068 } 1069 1070 /* No action is needed if there is no change in the rss capability */ 1071 if (edev->rss_params.rss_caps == ((edev->rss_params.rss_caps & 1072 ~clr_caps) | set_caps)) 1073 return 0; 1074 1075 /* Update internal configuration */ 1076 edev->rss_params.rss_caps = (edev->rss_params.rss_caps & ~clr_caps) | 1077 set_caps; 1078 edev->rss_params_inited |= QEDE_RSS_CAPS_INITED; 1079 1080 /* Re-configure if possible */ 1081 if (netif_running(edev->ndev)) { 1082 memset(&vport_update_params, 0, sizeof(vport_update_params)); 1083 vport_update_params.update_rss_flg = 1; 1084 vport_update_params.vport_id = 0; 1085 memcpy(&vport_update_params.rss_params, &edev->rss_params, 1086 sizeof(vport_update_params.rss_params)); 1087 return edev->ops->vport_update(edev->cdev, 1088 &vport_update_params); 1089 } 1090 1091 return 0; 1092 } 1093 1094 static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) 1095 { 1096 struct qede_dev *edev = netdev_priv(dev); 1097 1098 switch (info->cmd) { 1099 case ETHTOOL_SRXFH: 1100 return qede_set_rss_flags(edev, info); 1101 default: 1102 DP_INFO(edev, "Command parameters not supported\n"); 1103 return -EOPNOTSUPP; 1104 } 1105 } 1106 1107 static u32 qede_get_rxfh_indir_size(struct net_device *dev) 1108 { 1109 return QED_RSS_IND_TABLE_SIZE; 1110 } 1111 1112 static u32 qede_get_rxfh_key_size(struct net_device *dev) 1113 { 1114 struct qede_dev *edev = netdev_priv(dev); 1115 1116 return sizeof(edev->rss_params.rss_key); 1117 } 1118 1119 static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) 1120 { 1121 struct qede_dev *edev = netdev_priv(dev); 1122 int i; 1123 1124 if (hfunc) 1125 *hfunc = ETH_RSS_HASH_TOP; 1126 1127 if (!indir) 1128 return 0; 1129 1130 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) 1131 indir[i] = edev->rss_params.rss_ind_table[i]; 1132 1133 if (key) 1134 memcpy(key, edev->rss_params.rss_key, 1135 qede_get_rxfh_key_size(dev)); 1136 1137 return 0; 1138 } 1139 1140 static int qede_set_rxfh(struct net_device *dev, const u32 *indir, 1141 const u8 *key, const u8 hfunc) 1142 { 1143 struct qed_update_vport_params vport_update_params; 1144 struct qede_dev *edev = netdev_priv(dev); 1145 int i; 1146 1147 if (edev->dev_info.common.num_hwfns > 1) { 1148 DP_INFO(edev, 1149 "RSS configuration is not supported for 100G devices\n"); 1150 return -EOPNOTSUPP; 1151 } 1152 1153 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 1154 return -EOPNOTSUPP; 1155 1156 if (!indir && !key) 1157 return 0; 1158 1159 if (indir) { 1160 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) 1161 edev->rss_params.rss_ind_table[i] = indir[i]; 1162 edev->rss_params_inited |= QEDE_RSS_INDIR_INITED; 1163 } 1164 1165 if (key) { 1166 memcpy(&edev->rss_params.rss_key, key, 1167 qede_get_rxfh_key_size(dev)); 1168 edev->rss_params_inited |= QEDE_RSS_KEY_INITED; 1169 } 1170 1171 if (netif_running(edev->ndev)) { 1172 memset(&vport_update_params, 0, sizeof(vport_update_params)); 1173 vport_update_params.update_rss_flg = 1; 1174 vport_update_params.vport_id = 0; 1175 memcpy(&vport_update_params.rss_params, &edev->rss_params, 1176 sizeof(vport_update_params.rss_params)); 1177 return edev->ops->vport_update(edev->cdev, 1178 &vport_update_params); 1179 } 1180 1181 return 0; 1182 } 1183 1184 /* This function enables the interrupt generation and the NAPI on the device */ 1185 static void qede_netif_start(struct qede_dev *edev) 1186 { 1187 int i; 1188 1189 if (!netif_running(edev->ndev)) 1190 return; 1191 1192 for_each_queue(i) { 1193 /* Update and reenable interrupts */ 1194 qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1); 1195 napi_enable(&edev->fp_array[i].napi); 1196 } 1197 } 1198 1199 /* This function disables the NAPI and the interrupt generation on the device */ 1200 static void qede_netif_stop(struct qede_dev *edev) 1201 { 1202 int i; 1203 1204 for_each_queue(i) { 1205 napi_disable(&edev->fp_array[i].napi); 1206 /* Disable interrupts */ 1207 qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0); 1208 } 1209 } 1210 1211 static int qede_selftest_transmit_traffic(struct qede_dev *edev, 1212 struct sk_buff *skb) 1213 { 1214 struct qede_tx_queue *txq = NULL; 1215 struct eth_tx_1st_bd *first_bd; 1216 dma_addr_t mapping; 1217 int i, idx, val; 1218 1219 for_each_queue(i) { 1220 if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { 1221 txq = edev->fp_array[i].txq; 1222 break; 1223 } 1224 } 1225 1226 if (!txq) { 1227 DP_NOTICE(edev, "Tx path is not available\n"); 1228 return -1; 1229 } 1230 1231 /* Fill the entry in the SW ring and the BDs in the FW ring */ 1232 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; 1233 txq->sw_tx_ring.skbs[idx].skb = skb; 1234 first_bd = qed_chain_produce(&txq->tx_pbl); 1235 memset(first_bd, 0, sizeof(*first_bd)); 1236 val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; 1237 first_bd->data.bd_flags.bitfields = val; 1238 val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK; 1239 first_bd->data.bitfields |= (val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT); 1240 1241 /* Map skb linear data for DMA and set in the first BD */ 1242 mapping = dma_map_single(&edev->pdev->dev, skb->data, 1243 skb_headlen(skb), DMA_TO_DEVICE); 1244 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { 1245 DP_NOTICE(edev, "SKB mapping failed\n"); 1246 return -ENOMEM; 1247 } 1248 BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); 1249 1250 /* update the first BD with the actual num BDs */ 1251 first_bd->data.nbds = 1; 1252 txq->sw_tx_prod++; 1253 /* 'next page' entries are counted in the producer value */ 1254 val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); 1255 txq->tx_db.data.bd_prod = val; 1256 1257 /* wmb makes sure that the BDs data is updated before updating the 1258 * producer, otherwise FW may read old data from the BDs. 1259 */ 1260 wmb(); 1261 barrier(); 1262 writel(txq->tx_db.raw, txq->doorbell_addr); 1263 1264 /* mmiowb is needed to synchronize doorbell writes from more than one 1265 * processor. It guarantees that the write arrives to the device before 1266 * the queue lock is released and another start_xmit is called (possibly 1267 * on another CPU). Without this barrier, the next doorbell can bypass 1268 * this doorbell. This is applicable to IA64/Altix systems. 1269 */ 1270 mmiowb(); 1271 1272 for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { 1273 if (qede_txq_has_work(txq)) 1274 break; 1275 usleep_range(100, 200); 1276 } 1277 1278 if (!qede_txq_has_work(txq)) { 1279 DP_NOTICE(edev, "Tx completion didn't happen\n"); 1280 return -1; 1281 } 1282 1283 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); 1284 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), 1285 BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE); 1286 txq->sw_tx_cons++; 1287 txq->sw_tx_ring.skbs[idx].skb = NULL; 1288 1289 return 0; 1290 } 1291 1292 static int qede_selftest_receive_traffic(struct qede_dev *edev) 1293 { 1294 u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len; 1295 struct eth_fast_path_rx_reg_cqe *fp_cqe; 1296 struct qede_rx_queue *rxq = NULL; 1297 struct sw_rx_data *sw_rx_data; 1298 union eth_rx_cqe *cqe; 1299 int i, rc = 0; 1300 u8 *data_ptr; 1301 1302 for_each_queue(i) { 1303 if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { 1304 rxq = edev->fp_array[i].rxq; 1305 break; 1306 } 1307 } 1308 1309 if (!rxq) { 1310 DP_NOTICE(edev, "Rx path is not available\n"); 1311 return -1; 1312 } 1313 1314 /* The packet is expected to receive on rx-queue 0 even though RSS is 1315 * enabled. This is because the queue 0 is configured as the default 1316 * queue and that the loopback traffic is not IP. 1317 */ 1318 for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { 1319 if (!qede_has_rx_work(rxq)) { 1320 usleep_range(100, 200); 1321 continue; 1322 } 1323 1324 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); 1325 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); 1326 1327 /* Memory barrier to prevent the CPU from doing speculative 1328 * reads of CQE/BD before reading hw_comp_cons. If the CQE is 1329 * read before it is written by FW, then FW writes CQE and SB, 1330 * and then the CPU reads the hw_comp_cons, it will use an old 1331 * CQE. 1332 */ 1333 rmb(); 1334 1335 /* Get the CQE from the completion ring */ 1336 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); 1337 1338 /* Get the data from the SW ring */ 1339 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; 1340 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; 1341 fp_cqe = &cqe->fast_path_regular; 1342 len = le16_to_cpu(fp_cqe->len_on_first_bd); 1343 data_ptr = (u8 *)(page_address(sw_rx_data->data) + 1344 fp_cqe->placement_offset + 1345 sw_rx_data->page_offset); 1346 if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) && 1347 ether_addr_equal(data_ptr + ETH_ALEN, 1348 edev->ndev->dev_addr)) { 1349 for (i = ETH_HLEN; i < len; i++) 1350 if (data_ptr[i] != (unsigned char)(i & 0xff)) { 1351 rc = -1; 1352 break; 1353 } 1354 1355 qede_recycle_rx_bd_ring(rxq, 1); 1356 qed_chain_recycle_consumed(&rxq->rx_comp_ring); 1357 break; 1358 } 1359 1360 DP_INFO(edev, "Not the transmitted packet\n"); 1361 qede_recycle_rx_bd_ring(rxq, 1); 1362 qed_chain_recycle_consumed(&rxq->rx_comp_ring); 1363 } 1364 1365 if (i == QEDE_SELFTEST_POLL_COUNT) { 1366 DP_NOTICE(edev, "Failed to receive the traffic\n"); 1367 return -1; 1368 } 1369 1370 qede_update_rx_prod(edev, rxq); 1371 1372 return rc; 1373 } 1374 1375 static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode) 1376 { 1377 struct qed_link_params link_params; 1378 struct sk_buff *skb = NULL; 1379 int rc = 0, i; 1380 u32 pkt_size; 1381 u8 *packet; 1382 1383 if (!netif_running(edev->ndev)) { 1384 DP_NOTICE(edev, "Interface is down\n"); 1385 return -EINVAL; 1386 } 1387 1388 qede_netif_stop(edev); 1389 1390 /* Bring up the link in Loopback mode */ 1391 memset(&link_params, 0, sizeof(link_params)); 1392 link_params.link_up = true; 1393 link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE; 1394 link_params.loopback_mode = loopback_mode; 1395 edev->ops->common->set_link(edev->cdev, &link_params); 1396 1397 /* Wait for loopback configuration to apply */ 1398 msleep_interruptible(500); 1399 1400 /* prepare the loopback packet */ 1401 pkt_size = edev->ndev->mtu + ETH_HLEN; 1402 1403 skb = netdev_alloc_skb(edev->ndev, pkt_size); 1404 if (!skb) { 1405 DP_INFO(edev, "Can't allocate skb\n"); 1406 rc = -ENOMEM; 1407 goto test_loopback_exit; 1408 } 1409 packet = skb_put(skb, pkt_size); 1410 ether_addr_copy(packet, edev->ndev->dev_addr); 1411 ether_addr_copy(packet + ETH_ALEN, edev->ndev->dev_addr); 1412 memset(packet + (2 * ETH_ALEN), 0x77, (ETH_HLEN - (2 * ETH_ALEN))); 1413 for (i = ETH_HLEN; i < pkt_size; i++) 1414 packet[i] = (unsigned char)(i & 0xff); 1415 1416 rc = qede_selftest_transmit_traffic(edev, skb); 1417 if (rc) 1418 goto test_loopback_exit; 1419 1420 rc = qede_selftest_receive_traffic(edev); 1421 if (rc) 1422 goto test_loopback_exit; 1423 1424 DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, "Loopback test successful\n"); 1425 1426 test_loopback_exit: 1427 dev_kfree_skb(skb); 1428 1429 /* Bring up the link in Normal mode */ 1430 memset(&link_params, 0, sizeof(link_params)); 1431 link_params.link_up = true; 1432 link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE; 1433 link_params.loopback_mode = QED_LINK_LOOPBACK_NONE; 1434 edev->ops->common->set_link(edev->cdev, &link_params); 1435 1436 /* Wait for loopback configuration to apply */ 1437 msleep_interruptible(500); 1438 1439 qede_netif_start(edev); 1440 1441 return rc; 1442 } 1443 1444 static void qede_self_test(struct net_device *dev, 1445 struct ethtool_test *etest, u64 *buf) 1446 { 1447 struct qede_dev *edev = netdev_priv(dev); 1448 1449 DP_VERBOSE(edev, QED_MSG_DEBUG, 1450 "Self-test command parameters: offline = %d, external_lb = %d\n", 1451 (etest->flags & ETH_TEST_FL_OFFLINE), 1452 (etest->flags & ETH_TEST_FL_EXTERNAL_LB) >> 2); 1453 1454 memset(buf, 0, sizeof(u64) * QEDE_ETHTOOL_TEST_MAX); 1455 1456 if (etest->flags & ETH_TEST_FL_OFFLINE) { 1457 if (qede_selftest_run_loopback(edev, 1458 QED_LINK_LOOPBACK_INT_PHY)) { 1459 buf[QEDE_ETHTOOL_INT_LOOPBACK] = 1; 1460 etest->flags |= ETH_TEST_FL_FAILED; 1461 } 1462 } 1463 1464 if (edev->ops->common->selftest->selftest_interrupt(edev->cdev)) { 1465 buf[QEDE_ETHTOOL_INTERRUPT_TEST] = 1; 1466 etest->flags |= ETH_TEST_FL_FAILED; 1467 } 1468 1469 if (edev->ops->common->selftest->selftest_memory(edev->cdev)) { 1470 buf[QEDE_ETHTOOL_MEMORY_TEST] = 1; 1471 etest->flags |= ETH_TEST_FL_FAILED; 1472 } 1473 1474 if (edev->ops->common->selftest->selftest_register(edev->cdev)) { 1475 buf[QEDE_ETHTOOL_REGISTER_TEST] = 1; 1476 etest->flags |= ETH_TEST_FL_FAILED; 1477 } 1478 1479 if (edev->ops->common->selftest->selftest_clock(edev->cdev)) { 1480 buf[QEDE_ETHTOOL_CLOCK_TEST] = 1; 1481 etest->flags |= ETH_TEST_FL_FAILED; 1482 } 1483 1484 if (edev->ops->common->selftest->selftest_nvram(edev->cdev)) { 1485 buf[QEDE_ETHTOOL_NVRAM_TEST] = 1; 1486 etest->flags |= ETH_TEST_FL_FAILED; 1487 } 1488 } 1489 1490 static int qede_set_tunable(struct net_device *dev, 1491 const struct ethtool_tunable *tuna, 1492 const void *data) 1493 { 1494 struct qede_dev *edev = netdev_priv(dev); 1495 u32 val; 1496 1497 switch (tuna->id) { 1498 case ETHTOOL_RX_COPYBREAK: 1499 val = *(u32 *)data; 1500 if (val < QEDE_MIN_PKT_LEN || val > QEDE_RX_HDR_SIZE) { 1501 DP_VERBOSE(edev, QED_MSG_DEBUG, 1502 "Invalid rx copy break value, range is [%u, %u]", 1503 QEDE_MIN_PKT_LEN, QEDE_RX_HDR_SIZE); 1504 return -EINVAL; 1505 } 1506 1507 edev->rx_copybreak = *(u32 *)data; 1508 break; 1509 default: 1510 return -EOPNOTSUPP; 1511 } 1512 1513 return 0; 1514 } 1515 1516 static int qede_get_tunable(struct net_device *dev, 1517 const struct ethtool_tunable *tuna, void *data) 1518 { 1519 struct qede_dev *edev = netdev_priv(dev); 1520 1521 switch (tuna->id) { 1522 case ETHTOOL_RX_COPYBREAK: 1523 *(u32 *)data = edev->rx_copybreak; 1524 break; 1525 default: 1526 return -EOPNOTSUPP; 1527 } 1528 1529 return 0; 1530 } 1531 1532 static const struct ethtool_ops qede_ethtool_ops = { 1533 .get_link_ksettings = qede_get_link_ksettings, 1534 .set_link_ksettings = qede_set_link_ksettings, 1535 .get_drvinfo = qede_get_drvinfo, 1536 .get_regs_len = qede_get_regs_len, 1537 .get_regs = qede_get_regs, 1538 .get_wol = qede_get_wol, 1539 .set_wol = qede_set_wol, 1540 .get_msglevel = qede_get_msglevel, 1541 .set_msglevel = qede_set_msglevel, 1542 .nway_reset = qede_nway_reset, 1543 .get_link = qede_get_link, 1544 .get_coalesce = qede_get_coalesce, 1545 .set_coalesce = qede_set_coalesce, 1546 .get_ringparam = qede_get_ringparam, 1547 .set_ringparam = qede_set_ringparam, 1548 .get_pauseparam = qede_get_pauseparam, 1549 .set_pauseparam = qede_set_pauseparam, 1550 .get_strings = qede_get_strings, 1551 .set_phys_id = qede_set_phys_id, 1552 .get_ethtool_stats = qede_get_ethtool_stats, 1553 .get_priv_flags = qede_get_priv_flags, 1554 .get_sset_count = qede_get_sset_count, 1555 .get_rxnfc = qede_get_rxnfc, 1556 .set_rxnfc = qede_set_rxnfc, 1557 .get_rxfh_indir_size = qede_get_rxfh_indir_size, 1558 .get_rxfh_key_size = qede_get_rxfh_key_size, 1559 .get_rxfh = qede_get_rxfh, 1560 .set_rxfh = qede_set_rxfh, 1561 .get_channels = qede_get_channels, 1562 .set_channels = qede_set_channels, 1563 .self_test = qede_self_test, 1564 .get_tunable = qede_get_tunable, 1565 .set_tunable = qede_set_tunable, 1566 }; 1567 1568 static const struct ethtool_ops qede_vf_ethtool_ops = { 1569 .get_link_ksettings = qede_get_link_ksettings, 1570 .get_drvinfo = qede_get_drvinfo, 1571 .get_msglevel = qede_get_msglevel, 1572 .set_msglevel = qede_set_msglevel, 1573 .get_link = qede_get_link, 1574 .get_ringparam = qede_get_ringparam, 1575 .set_ringparam = qede_set_ringparam, 1576 .get_strings = qede_get_strings, 1577 .get_ethtool_stats = qede_get_ethtool_stats, 1578 .get_priv_flags = qede_get_priv_flags, 1579 .get_sset_count = qede_get_sset_count, 1580 .get_rxnfc = qede_get_rxnfc, 1581 .set_rxnfc = qede_set_rxnfc, 1582 .get_rxfh_indir_size = qede_get_rxfh_indir_size, 1583 .get_rxfh_key_size = qede_get_rxfh_key_size, 1584 .get_rxfh = qede_get_rxfh, 1585 .set_rxfh = qede_set_rxfh, 1586 .get_channels = qede_get_channels, 1587 .set_channels = qede_set_channels, 1588 .get_tunable = qede_get_tunable, 1589 .set_tunable = qede_set_tunable, 1590 }; 1591 1592 void qede_set_ethtool_ops(struct net_device *dev) 1593 { 1594 struct qede_dev *edev = netdev_priv(dev); 1595 1596 if (IS_VF(edev)) 1597 dev->ethtool_ops = &qede_vf_ethtool_ops; 1598 else 1599 dev->ethtool_ops = &qede_ethtool_ops; 1600 } 1601