1 /* QLogic qede NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #include <linux/version.h> 10 #include <linux/types.h> 11 #include <linux/netdevice.h> 12 #include <linux/etherdevice.h> 13 #include <linux/ethtool.h> 14 #include <linux/string.h> 15 #include <linux/pci.h> 16 #include <linux/capability.h> 17 #include "qede.h" 18 19 #define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name)) 20 #define QEDE_STAT_STRING(stat_name) (#stat_name) 21 #define _QEDE_STAT(stat_name, pf_only) \ 22 {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only} 23 #define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true) 24 #define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false) 25 26 #define QEDE_RQSTAT_OFFSET(stat_name) \ 27 (offsetof(struct qede_rx_queue, stat_name)) 28 #define QEDE_RQSTAT_STRING(stat_name) (#stat_name) 29 #define QEDE_RQSTAT(stat_name) \ 30 {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)} 31 32 #define QEDE_SELFTEST_POLL_COUNT 100 33 34 static const struct { 35 u64 offset; 36 char string[ETH_GSTRING_LEN]; 37 } qede_rqstats_arr[] = { 38 QEDE_RQSTAT(rcv_pkts), 39 QEDE_RQSTAT(rx_hw_errors), 40 QEDE_RQSTAT(rx_alloc_errors), 41 QEDE_RQSTAT(rx_ip_frags), 42 }; 43 44 #define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr) 45 #define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \ 46 (*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\ 47 qede_rqstats_arr[(sindex)].offset))) 48 #define QEDE_TQSTAT_OFFSET(stat_name) \ 49 (offsetof(struct qede_tx_queue, stat_name)) 50 #define QEDE_TQSTAT_STRING(stat_name) (#stat_name) 51 #define QEDE_TQSTAT(stat_name) \ 52 {QEDE_TQSTAT_OFFSET(stat_name), QEDE_TQSTAT_STRING(stat_name)} 53 #define QEDE_NUM_TQSTATS ARRAY_SIZE(qede_tqstats_arr) 54 static const struct { 55 u64 offset; 56 char string[ETH_GSTRING_LEN]; 57 } qede_tqstats_arr[] = { 58 QEDE_TQSTAT(xmit_pkts), 59 QEDE_TQSTAT(stopped_cnt), 60 }; 61 62 #define QEDE_TQSTATS_DATA(dev, sindex, tssid, tcid) \ 63 (*((u64 *)(((void *)(&dev->fp_array[tssid].txqs[tcid])) +\ 64 qede_tqstats_arr[(sindex)].offset))) 65 66 static const struct { 67 u64 offset; 68 char string[ETH_GSTRING_LEN]; 69 bool pf_only; 70 } qede_stats_arr[] = { 71 QEDE_STAT(rx_ucast_bytes), 72 QEDE_STAT(rx_mcast_bytes), 73 QEDE_STAT(rx_bcast_bytes), 74 QEDE_STAT(rx_ucast_pkts), 75 QEDE_STAT(rx_mcast_pkts), 76 QEDE_STAT(rx_bcast_pkts), 77 78 QEDE_STAT(tx_ucast_bytes), 79 QEDE_STAT(tx_mcast_bytes), 80 QEDE_STAT(tx_bcast_bytes), 81 QEDE_STAT(tx_ucast_pkts), 82 QEDE_STAT(tx_mcast_pkts), 83 QEDE_STAT(tx_bcast_pkts), 84 85 QEDE_PF_STAT(rx_64_byte_packets), 86 QEDE_PF_STAT(rx_65_to_127_byte_packets), 87 QEDE_PF_STAT(rx_128_to_255_byte_packets), 88 QEDE_PF_STAT(rx_256_to_511_byte_packets), 89 QEDE_PF_STAT(rx_512_to_1023_byte_packets), 90 QEDE_PF_STAT(rx_1024_to_1518_byte_packets), 91 QEDE_PF_STAT(rx_1519_to_1522_byte_packets), 92 QEDE_PF_STAT(rx_1519_to_2047_byte_packets), 93 QEDE_PF_STAT(rx_2048_to_4095_byte_packets), 94 QEDE_PF_STAT(rx_4096_to_9216_byte_packets), 95 QEDE_PF_STAT(rx_9217_to_16383_byte_packets), 96 QEDE_PF_STAT(tx_64_byte_packets), 97 QEDE_PF_STAT(tx_65_to_127_byte_packets), 98 QEDE_PF_STAT(tx_128_to_255_byte_packets), 99 QEDE_PF_STAT(tx_256_to_511_byte_packets), 100 QEDE_PF_STAT(tx_512_to_1023_byte_packets), 101 QEDE_PF_STAT(tx_1024_to_1518_byte_packets), 102 QEDE_PF_STAT(tx_1519_to_2047_byte_packets), 103 QEDE_PF_STAT(tx_2048_to_4095_byte_packets), 104 QEDE_PF_STAT(tx_4096_to_9216_byte_packets), 105 QEDE_PF_STAT(tx_9217_to_16383_byte_packets), 106 107 QEDE_PF_STAT(rx_mac_crtl_frames), 108 QEDE_PF_STAT(tx_mac_ctrl_frames), 109 QEDE_PF_STAT(rx_pause_frames), 110 QEDE_PF_STAT(tx_pause_frames), 111 QEDE_PF_STAT(rx_pfc_frames), 112 QEDE_PF_STAT(tx_pfc_frames), 113 114 QEDE_PF_STAT(rx_crc_errors), 115 QEDE_PF_STAT(rx_align_errors), 116 QEDE_PF_STAT(rx_carrier_errors), 117 QEDE_PF_STAT(rx_oversize_packets), 118 QEDE_PF_STAT(rx_jabbers), 119 QEDE_PF_STAT(rx_undersize_packets), 120 QEDE_PF_STAT(rx_fragments), 121 QEDE_PF_STAT(tx_lpi_entry_count), 122 QEDE_PF_STAT(tx_total_collisions), 123 QEDE_PF_STAT(brb_truncates), 124 QEDE_PF_STAT(brb_discards), 125 QEDE_STAT(no_buff_discards), 126 QEDE_PF_STAT(mftag_filter_discards), 127 QEDE_PF_STAT(mac_filter_discards), 128 QEDE_STAT(tx_err_drop_pkts), 129 QEDE_STAT(ttl0_discard), 130 QEDE_STAT(packet_too_big_discard), 131 132 QEDE_STAT(coalesced_pkts), 133 QEDE_STAT(coalesced_events), 134 QEDE_STAT(coalesced_aborts_num), 135 QEDE_STAT(non_coalesced_pkts), 136 QEDE_STAT(coalesced_bytes), 137 }; 138 139 #define QEDE_STATS_DATA(dev, index) \ 140 (*((u64 *)(((char *)(dev)) + offsetof(struct qede_dev, stats) \ 141 + qede_stats_arr[(index)].offset))) 142 143 #define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr) 144 145 enum { 146 QEDE_PRI_FLAG_CMT, 147 QEDE_PRI_FLAG_LEN, 148 }; 149 150 static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = { 151 "Coupled-Function", 152 }; 153 154 enum qede_ethtool_tests { 155 QEDE_ETHTOOL_INT_LOOPBACK, 156 QEDE_ETHTOOL_INTERRUPT_TEST, 157 QEDE_ETHTOOL_MEMORY_TEST, 158 QEDE_ETHTOOL_REGISTER_TEST, 159 QEDE_ETHTOOL_CLOCK_TEST, 160 QEDE_ETHTOOL_TEST_MAX 161 }; 162 163 static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = { 164 "Internal loopback (offline)", 165 "Interrupt (online)\t", 166 "Memory (online)\t\t", 167 "Register (online)\t", 168 "Clock (online)\t\t", 169 }; 170 171 static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) 172 { 173 int i, j, k; 174 175 for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) { 176 int tc; 177 178 for (j = 0; j < QEDE_NUM_RQSTATS; j++) 179 sprintf(buf + (k + j) * ETH_GSTRING_LEN, 180 "%d: %s", i, qede_rqstats_arr[j].string); 181 k += QEDE_NUM_RQSTATS; 182 for (tc = 0; tc < edev->num_tc; tc++) { 183 for (j = 0; j < QEDE_NUM_TQSTATS; j++) 184 sprintf(buf + (k + j) * ETH_GSTRING_LEN, 185 "%d.%d: %s", i, tc, 186 qede_tqstats_arr[j].string); 187 k += QEDE_NUM_TQSTATS; 188 } 189 } 190 191 for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) { 192 if (IS_VF(edev) && qede_stats_arr[i].pf_only) 193 continue; 194 strcpy(buf + (k + j) * ETH_GSTRING_LEN, 195 qede_stats_arr[i].string); 196 j++; 197 } 198 } 199 200 static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 201 { 202 struct qede_dev *edev = netdev_priv(dev); 203 204 switch (stringset) { 205 case ETH_SS_STATS: 206 qede_get_strings_stats(edev, buf); 207 break; 208 case ETH_SS_PRIV_FLAGS: 209 memcpy(buf, qede_private_arr, 210 ETH_GSTRING_LEN * QEDE_PRI_FLAG_LEN); 211 break; 212 case ETH_SS_TEST: 213 memcpy(buf, qede_tests_str_arr, 214 ETH_GSTRING_LEN * QEDE_ETHTOOL_TEST_MAX); 215 break; 216 default: 217 DP_VERBOSE(edev, QED_MSG_DEBUG, 218 "Unsupported stringset 0x%08x\n", stringset); 219 } 220 } 221 222 static void qede_get_ethtool_stats(struct net_device *dev, 223 struct ethtool_stats *stats, u64 *buf) 224 { 225 struct qede_dev *edev = netdev_priv(dev); 226 int sidx, cnt = 0; 227 int qid; 228 229 qede_fill_by_demand_stats(edev); 230 231 mutex_lock(&edev->qede_lock); 232 233 for (qid = 0; qid < QEDE_QUEUE_CNT(edev); qid++) { 234 int tc; 235 236 if (edev->fp_array[qid].type & QEDE_FASTPATH_RX) { 237 for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) 238 buf[cnt++] = QEDE_RQSTATS_DATA(edev, sidx, qid); 239 } 240 241 if (edev->fp_array[qid].type & QEDE_FASTPATH_TX) { 242 for (tc = 0; tc < edev->num_tc; tc++) { 243 for (sidx = 0; sidx < QEDE_NUM_TQSTATS; sidx++) 244 buf[cnt++] = QEDE_TQSTATS_DATA(edev, 245 sidx, 246 qid, tc); 247 } 248 } 249 } 250 251 for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) { 252 if (IS_VF(edev) && qede_stats_arr[sidx].pf_only) 253 continue; 254 buf[cnt++] = QEDE_STATS_DATA(edev, sidx); 255 } 256 257 mutex_unlock(&edev->qede_lock); 258 } 259 260 static int qede_get_sset_count(struct net_device *dev, int stringset) 261 { 262 struct qede_dev *edev = netdev_priv(dev); 263 int num_stats = QEDE_NUM_STATS; 264 265 switch (stringset) { 266 case ETH_SS_STATS: 267 if (IS_VF(edev)) { 268 int i; 269 270 for (i = 0; i < QEDE_NUM_STATS; i++) 271 if (qede_stats_arr[i].pf_only) 272 num_stats--; 273 } 274 return num_stats + QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS + 275 QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS * edev->num_tc; 276 case ETH_SS_PRIV_FLAGS: 277 return QEDE_PRI_FLAG_LEN; 278 case ETH_SS_TEST: 279 if (!IS_VF(edev)) 280 return QEDE_ETHTOOL_TEST_MAX; 281 else 282 return 0; 283 default: 284 DP_VERBOSE(edev, QED_MSG_DEBUG, 285 "Unsupported stringset 0x%08x\n", stringset); 286 return -EINVAL; 287 } 288 } 289 290 static u32 qede_get_priv_flags(struct net_device *dev) 291 { 292 struct qede_dev *edev = netdev_priv(dev); 293 294 return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT; 295 } 296 297 struct qede_link_mode_mapping { 298 u32 qed_link_mode; 299 u32 ethtool_link_mode; 300 }; 301 302 static const struct qede_link_mode_mapping qed_lm_map[] = { 303 {QED_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT}, 304 {QED_LM_Autoneg_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT}, 305 {QED_LM_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT}, 306 {QED_LM_Pause_BIT, ETHTOOL_LINK_MODE_Pause_BIT}, 307 {QED_LM_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Half_BIT}, 308 {QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT}, 309 {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT}, 310 {QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT}, 311 {QED_LM_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT}, 312 {QED_LM_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT}, 313 {QED_LM_100000baseKR4_Full_BIT, 314 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT}, 315 }; 316 317 #define QEDE_DRV_TO_ETHTOOL_CAPS(caps, lk_ksettings, name) \ 318 { \ 319 int i; \ 320 \ 321 for (i = 0; i < QED_LM_COUNT; i++) { \ 322 if ((caps) & (qed_lm_map[i].qed_link_mode)) \ 323 __set_bit(qed_lm_map[i].ethtool_link_mode,\ 324 lk_ksettings->link_modes.name); \ 325 } \ 326 } 327 328 #define QEDE_ETHTOOL_TO_DRV_CAPS(caps, lk_ksettings, name) \ 329 { \ 330 int i; \ 331 \ 332 for (i = 0; i < QED_LM_COUNT; i++) { \ 333 if (test_bit(qed_lm_map[i].ethtool_link_mode, \ 334 lk_ksettings->link_modes.name)) \ 335 caps |= qed_lm_map[i].qed_link_mode; \ 336 } \ 337 } 338 339 static int qede_get_link_ksettings(struct net_device *dev, 340 struct ethtool_link_ksettings *cmd) 341 { 342 struct ethtool_link_settings *base = &cmd->base; 343 struct qede_dev *edev = netdev_priv(dev); 344 struct qed_link_output current_link; 345 346 memset(¤t_link, 0, sizeof(current_link)); 347 edev->ops->common->get_link(edev->cdev, ¤t_link); 348 349 ethtool_link_ksettings_zero_link_mode(cmd, supported); 350 QEDE_DRV_TO_ETHTOOL_CAPS(current_link.supported_caps, cmd, supported) 351 352 ethtool_link_ksettings_zero_link_mode(cmd, advertising); 353 QEDE_DRV_TO_ETHTOOL_CAPS(current_link.advertised_caps, cmd, advertising) 354 355 ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising); 356 QEDE_DRV_TO_ETHTOOL_CAPS(current_link.lp_caps, cmd, lp_advertising) 357 358 if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) { 359 base->speed = current_link.speed; 360 base->duplex = current_link.duplex; 361 } else { 362 base->speed = SPEED_UNKNOWN; 363 base->duplex = DUPLEX_UNKNOWN; 364 } 365 base->port = current_link.port; 366 base->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE : 367 AUTONEG_DISABLE; 368 369 return 0; 370 } 371 372 static int qede_set_link_ksettings(struct net_device *dev, 373 const struct ethtool_link_ksettings *cmd) 374 { 375 const struct ethtool_link_settings *base = &cmd->base; 376 struct qede_dev *edev = netdev_priv(dev); 377 struct qed_link_output current_link; 378 struct qed_link_params params; 379 380 if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { 381 DP_INFO(edev, "Link settings are not allowed to be changed\n"); 382 return -EOPNOTSUPP; 383 } 384 memset(¤t_link, 0, sizeof(current_link)); 385 memset(¶ms, 0, sizeof(params)); 386 edev->ops->common->get_link(edev->cdev, ¤t_link); 387 388 params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS; 389 params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG; 390 if (base->autoneg == AUTONEG_ENABLE) { 391 params.autoneg = true; 392 params.forced_speed = 0; 393 QEDE_ETHTOOL_TO_DRV_CAPS(params.adv_speeds, cmd, advertising) 394 } else { /* forced speed */ 395 params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED; 396 params.autoneg = false; 397 params.forced_speed = base->speed; 398 switch (base->speed) { 399 case SPEED_10000: 400 if (!(current_link.supported_caps & 401 QED_LM_10000baseKR_Full_BIT)) { 402 DP_INFO(edev, "10G speed not supported\n"); 403 return -EINVAL; 404 } 405 params.adv_speeds = QED_LM_10000baseKR_Full_BIT; 406 break; 407 case SPEED_25000: 408 if (!(current_link.supported_caps & 409 QED_LM_25000baseKR_Full_BIT)) { 410 DP_INFO(edev, "25G speed not supported\n"); 411 return -EINVAL; 412 } 413 params.adv_speeds = QED_LM_25000baseKR_Full_BIT; 414 break; 415 case SPEED_40000: 416 if (!(current_link.supported_caps & 417 QED_LM_40000baseLR4_Full_BIT)) { 418 DP_INFO(edev, "40G speed not supported\n"); 419 return -EINVAL; 420 } 421 params.adv_speeds = QED_LM_40000baseLR4_Full_BIT; 422 break; 423 case SPEED_50000: 424 if (!(current_link.supported_caps & 425 QED_LM_50000baseKR2_Full_BIT)) { 426 DP_INFO(edev, "50G speed not supported\n"); 427 return -EINVAL; 428 } 429 params.adv_speeds = QED_LM_50000baseKR2_Full_BIT; 430 break; 431 case SPEED_100000: 432 if (!(current_link.supported_caps & 433 QED_LM_100000baseKR4_Full_BIT)) { 434 DP_INFO(edev, "100G speed not supported\n"); 435 return -EINVAL; 436 } 437 params.adv_speeds = QED_LM_100000baseKR4_Full_BIT; 438 break; 439 default: 440 DP_INFO(edev, "Unsupported speed %u\n", base->speed); 441 return -EINVAL; 442 } 443 } 444 445 params.link_up = true; 446 edev->ops->common->set_link(edev->cdev, ¶ms); 447 448 return 0; 449 } 450 451 static void qede_get_drvinfo(struct net_device *ndev, 452 struct ethtool_drvinfo *info) 453 { 454 char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN]; 455 struct qede_dev *edev = netdev_priv(ndev); 456 457 strlcpy(info->driver, "qede", sizeof(info->driver)); 458 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); 459 460 snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d", 461 edev->dev_info.common.fw_major, 462 edev->dev_info.common.fw_minor, 463 edev->dev_info.common.fw_rev, 464 edev->dev_info.common.fw_eng); 465 466 snprintf(mfw, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d", 467 (edev->dev_info.common.mfw_rev >> 24) & 0xFF, 468 (edev->dev_info.common.mfw_rev >> 16) & 0xFF, 469 (edev->dev_info.common.mfw_rev >> 8) & 0xFF, 470 edev->dev_info.common.mfw_rev & 0xFF); 471 472 if ((strlen(storm) + strlen(mfw) + strlen("mfw storm ")) < 473 sizeof(info->fw_version)) { 474 snprintf(info->fw_version, sizeof(info->fw_version), 475 "mfw %s storm %s", mfw, storm); 476 } else { 477 snprintf(info->fw_version, sizeof(info->fw_version), 478 "%s %s", mfw, storm); 479 } 480 481 strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info)); 482 } 483 484 static u32 qede_get_msglevel(struct net_device *ndev) 485 { 486 struct qede_dev *edev = netdev_priv(ndev); 487 488 return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) | edev->dp_module; 489 } 490 491 static void qede_set_msglevel(struct net_device *ndev, u32 level) 492 { 493 struct qede_dev *edev = netdev_priv(ndev); 494 u32 dp_module = 0; 495 u8 dp_level = 0; 496 497 qede_config_debug(level, &dp_module, &dp_level); 498 499 edev->dp_level = dp_level; 500 edev->dp_module = dp_module; 501 edev->ops->common->update_msglvl(edev->cdev, 502 dp_module, dp_level); 503 } 504 505 static int qede_nway_reset(struct net_device *dev) 506 { 507 struct qede_dev *edev = netdev_priv(dev); 508 struct qed_link_output current_link; 509 struct qed_link_params link_params; 510 511 if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { 512 DP_INFO(edev, "Link settings are not allowed to be changed\n"); 513 return -EOPNOTSUPP; 514 } 515 516 if (!netif_running(dev)) 517 return 0; 518 519 memset(¤t_link, 0, sizeof(current_link)); 520 edev->ops->common->get_link(edev->cdev, ¤t_link); 521 if (!current_link.link_up) 522 return 0; 523 524 /* Toggle the link */ 525 memset(&link_params, 0, sizeof(link_params)); 526 link_params.link_up = false; 527 edev->ops->common->set_link(edev->cdev, &link_params); 528 link_params.link_up = true; 529 edev->ops->common->set_link(edev->cdev, &link_params); 530 531 return 0; 532 } 533 534 static u32 qede_get_link(struct net_device *dev) 535 { 536 struct qede_dev *edev = netdev_priv(dev); 537 struct qed_link_output current_link; 538 539 memset(¤t_link, 0, sizeof(current_link)); 540 edev->ops->common->get_link(edev->cdev, ¤t_link); 541 542 return current_link.link_up; 543 } 544 545 static int qede_get_coalesce(struct net_device *dev, 546 struct ethtool_coalesce *coal) 547 { 548 struct qede_dev *edev = netdev_priv(dev); 549 u16 rxc, txc; 550 551 memset(coal, 0, sizeof(struct ethtool_coalesce)); 552 edev->ops->common->get_coalesce(edev->cdev, &rxc, &txc); 553 554 coal->rx_coalesce_usecs = rxc; 555 coal->tx_coalesce_usecs = txc; 556 557 return 0; 558 } 559 560 static int qede_set_coalesce(struct net_device *dev, 561 struct ethtool_coalesce *coal) 562 { 563 struct qede_dev *edev = netdev_priv(dev); 564 int i, rc = 0; 565 u16 rxc, txc; 566 u8 sb_id; 567 568 if (!netif_running(dev)) { 569 DP_INFO(edev, "Interface is down\n"); 570 return -EINVAL; 571 } 572 573 if (coal->rx_coalesce_usecs > QED_COALESCE_MAX || 574 coal->tx_coalesce_usecs > QED_COALESCE_MAX) { 575 DP_INFO(edev, 576 "Can't support requested %s coalesce value [max supported value %d]\n", 577 coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx" 578 : "tx", 579 QED_COALESCE_MAX); 580 return -EINVAL; 581 } 582 583 rxc = (u16)coal->rx_coalesce_usecs; 584 txc = (u16)coal->tx_coalesce_usecs; 585 for_each_queue(i) { 586 sb_id = edev->fp_array[i].sb_info->igu_sb_id; 587 rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc, 588 (u8)i, sb_id); 589 if (rc) { 590 DP_INFO(edev, "Set coalesce error, rc = %d\n", rc); 591 return rc; 592 } 593 } 594 595 return rc; 596 } 597 598 static void qede_get_ringparam(struct net_device *dev, 599 struct ethtool_ringparam *ering) 600 { 601 struct qede_dev *edev = netdev_priv(dev); 602 603 ering->rx_max_pending = NUM_RX_BDS_MAX; 604 ering->rx_pending = edev->q_num_rx_buffers; 605 ering->tx_max_pending = NUM_TX_BDS_MAX; 606 ering->tx_pending = edev->q_num_tx_buffers; 607 } 608 609 static int qede_set_ringparam(struct net_device *dev, 610 struct ethtool_ringparam *ering) 611 { 612 struct qede_dev *edev = netdev_priv(dev); 613 614 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 615 "Set ring params command parameters: rx_pending = %d, tx_pending = %d\n", 616 ering->rx_pending, ering->tx_pending); 617 618 /* Validate legality of configuration */ 619 if (ering->rx_pending > NUM_RX_BDS_MAX || 620 ering->rx_pending < NUM_RX_BDS_MIN || 621 ering->tx_pending > NUM_TX_BDS_MAX || 622 ering->tx_pending < NUM_TX_BDS_MIN) { 623 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 624 "Can only support Rx Buffer size [0%08x,...,0x%08x] and Tx Buffer size [0x%08x,...,0x%08x]\n", 625 NUM_RX_BDS_MIN, NUM_RX_BDS_MAX, 626 NUM_TX_BDS_MIN, NUM_TX_BDS_MAX); 627 return -EINVAL; 628 } 629 630 /* Change ring size and re-load */ 631 edev->q_num_rx_buffers = ering->rx_pending; 632 edev->q_num_tx_buffers = ering->tx_pending; 633 634 if (netif_running(edev->ndev)) 635 qede_reload(edev, NULL, NULL); 636 637 return 0; 638 } 639 640 static void qede_get_pauseparam(struct net_device *dev, 641 struct ethtool_pauseparam *epause) 642 { 643 struct qede_dev *edev = netdev_priv(dev); 644 struct qed_link_output current_link; 645 646 memset(¤t_link, 0, sizeof(current_link)); 647 edev->ops->common->get_link(edev->cdev, ¤t_link); 648 649 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 650 epause->autoneg = true; 651 if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) 652 epause->rx_pause = true; 653 if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) 654 epause->tx_pause = true; 655 656 DP_VERBOSE(edev, QED_MSG_DEBUG, 657 "ethtool_pauseparam: cmd %d autoneg %d rx_pause %d tx_pause %d\n", 658 epause->cmd, epause->autoneg, epause->rx_pause, 659 epause->tx_pause); 660 } 661 662 static int qede_set_pauseparam(struct net_device *dev, 663 struct ethtool_pauseparam *epause) 664 { 665 struct qede_dev *edev = netdev_priv(dev); 666 struct qed_link_params params; 667 struct qed_link_output current_link; 668 669 if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { 670 DP_INFO(edev, 671 "Pause settings are not allowed to be changed\n"); 672 return -EOPNOTSUPP; 673 } 674 675 memset(¤t_link, 0, sizeof(current_link)); 676 edev->ops->common->get_link(edev->cdev, ¤t_link); 677 678 memset(¶ms, 0, sizeof(params)); 679 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; 680 if (epause->autoneg) { 681 if (!(current_link.supported_caps & QED_LM_Autoneg_BIT)) { 682 DP_INFO(edev, "autoneg not supported\n"); 683 return -EINVAL; 684 } 685 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 686 } 687 if (epause->rx_pause) 688 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; 689 if (epause->tx_pause) 690 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; 691 692 params.link_up = true; 693 edev->ops->common->set_link(edev->cdev, ¶ms); 694 695 return 0; 696 } 697 698 static void qede_get_regs(struct net_device *ndev, 699 struct ethtool_regs *regs, void *buffer) 700 { 701 struct qede_dev *edev = netdev_priv(ndev); 702 703 regs->version = 0; 704 memset(buffer, 0, regs->len); 705 706 if (edev->ops && edev->ops->common) 707 edev->ops->common->dbg_all_data(edev->cdev, buffer); 708 } 709 710 static int qede_get_regs_len(struct net_device *ndev) 711 { 712 struct qede_dev *edev = netdev_priv(ndev); 713 714 if (edev->ops && edev->ops->common) 715 return edev->ops->common->dbg_all_data_size(edev->cdev); 716 else 717 return -EINVAL; 718 } 719 720 static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args) 721 { 722 edev->ndev->mtu = args->mtu; 723 } 724 725 /* Netdevice NDOs */ 726 #define ETH_MAX_JUMBO_PACKET_SIZE 9600 727 #define ETH_MIN_PACKET_SIZE 60 728 int qede_change_mtu(struct net_device *ndev, int new_mtu) 729 { 730 struct qede_dev *edev = netdev_priv(ndev); 731 union qede_reload_args args; 732 733 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || 734 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) { 735 DP_ERR(edev, "Can't support requested MTU size\n"); 736 return -EINVAL; 737 } 738 739 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 740 "Configuring MTU size of %d\n", new_mtu); 741 742 /* Set the mtu field and re-start the interface if needed*/ 743 args.mtu = new_mtu; 744 745 if (netif_running(edev->ndev)) 746 qede_reload(edev, &qede_update_mtu, &args); 747 748 qede_update_mtu(edev, &args); 749 750 return 0; 751 } 752 753 static void qede_get_channels(struct net_device *dev, 754 struct ethtool_channels *channels) 755 { 756 struct qede_dev *edev = netdev_priv(dev); 757 758 channels->max_combined = QEDE_MAX_RSS_CNT(edev); 759 channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx - 760 edev->fp_num_rx; 761 channels->tx_count = edev->fp_num_tx; 762 channels->rx_count = edev->fp_num_rx; 763 } 764 765 static int qede_set_channels(struct net_device *dev, 766 struct ethtool_channels *channels) 767 { 768 struct qede_dev *edev = netdev_priv(dev); 769 u32 count; 770 771 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 772 "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n", 773 channels->rx_count, channels->tx_count, 774 channels->other_count, channels->combined_count); 775 776 count = channels->rx_count + channels->tx_count + 777 channels->combined_count; 778 779 /* We don't support `other' channels */ 780 if (channels->other_count) { 781 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 782 "command parameters not supported\n"); 783 return -EINVAL; 784 } 785 786 if (!(channels->combined_count || (channels->rx_count && 787 channels->tx_count))) { 788 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 789 "need to request at least one transmit and one receive channel\n"); 790 return -EINVAL; 791 } 792 793 if (count > QEDE_MAX_RSS_CNT(edev)) { 794 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 795 "requested channels = %d max supported channels = %d\n", 796 count, QEDE_MAX_RSS_CNT(edev)); 797 return -EINVAL; 798 } 799 800 /* Check if there was a change in the active parameters */ 801 if ((count == QEDE_QUEUE_CNT(edev)) && 802 (channels->tx_count == edev->fp_num_tx) && 803 (channels->rx_count == edev->fp_num_rx)) { 804 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 805 "No change in active parameters\n"); 806 return 0; 807 } 808 809 /* We need the number of queues to be divisible between the hwfns */ 810 if ((count % edev->dev_info.common.num_hwfns) || 811 (channels->tx_count % edev->dev_info.common.num_hwfns) || 812 (channels->rx_count % edev->dev_info.common.num_hwfns)) { 813 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), 814 "Number of channels must be divisible by %04x\n", 815 edev->dev_info.common.num_hwfns); 816 return -EINVAL; 817 } 818 819 /* Set number of queues and reload if necessary */ 820 edev->req_queues = count; 821 edev->req_num_tx = channels->tx_count; 822 edev->req_num_rx = channels->rx_count; 823 if (netif_running(dev)) 824 qede_reload(edev, NULL, NULL); 825 826 return 0; 827 } 828 829 static int qede_set_phys_id(struct net_device *dev, 830 enum ethtool_phys_id_state state) 831 { 832 struct qede_dev *edev = netdev_priv(dev); 833 u8 led_state = 0; 834 835 switch (state) { 836 case ETHTOOL_ID_ACTIVE: 837 return 1; /* cycle on/off once per second */ 838 839 case ETHTOOL_ID_ON: 840 led_state = QED_LED_MODE_ON; 841 break; 842 843 case ETHTOOL_ID_OFF: 844 led_state = QED_LED_MODE_OFF; 845 break; 846 847 case ETHTOOL_ID_INACTIVE: 848 led_state = QED_LED_MODE_RESTORE; 849 break; 850 } 851 852 edev->ops->common->set_led(edev->cdev, led_state); 853 854 return 0; 855 } 856 857 static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) 858 { 859 info->data = RXH_IP_SRC | RXH_IP_DST; 860 861 switch (info->flow_type) { 862 case TCP_V4_FLOW: 863 case TCP_V6_FLOW: 864 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 865 break; 866 case UDP_V4_FLOW: 867 if (edev->rss_params.rss_caps & QED_RSS_IPV4_UDP) 868 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 869 break; 870 case UDP_V6_FLOW: 871 if (edev->rss_params.rss_caps & QED_RSS_IPV6_UDP) 872 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 873 break; 874 case IPV4_FLOW: 875 case IPV6_FLOW: 876 break; 877 default: 878 info->data = 0; 879 break; 880 } 881 882 return 0; 883 } 884 885 static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 886 u32 *rules __always_unused) 887 { 888 struct qede_dev *edev = netdev_priv(dev); 889 890 switch (info->cmd) { 891 case ETHTOOL_GRXRINGS: 892 info->data = QEDE_RSS_COUNT(edev); 893 return 0; 894 case ETHTOOL_GRXFH: 895 return qede_get_rss_flags(edev, info); 896 default: 897 DP_ERR(edev, "Command parameters not supported\n"); 898 return -EOPNOTSUPP; 899 } 900 } 901 902 static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) 903 { 904 struct qed_update_vport_params vport_update_params; 905 u8 set_caps = 0, clr_caps = 0; 906 907 DP_VERBOSE(edev, QED_MSG_DEBUG, 908 "Set rss flags command parameters: flow type = %d, data = %llu\n", 909 info->flow_type, info->data); 910 911 switch (info->flow_type) { 912 case TCP_V4_FLOW: 913 case TCP_V6_FLOW: 914 /* For TCP only 4-tuple hash is supported */ 915 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST | 916 RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 917 DP_INFO(edev, "Command parameters not supported\n"); 918 return -EINVAL; 919 } 920 return 0; 921 case UDP_V4_FLOW: 922 /* For UDP either 2-tuple hash or 4-tuple hash is supported */ 923 if (info->data == (RXH_IP_SRC | RXH_IP_DST | 924 RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 925 set_caps = QED_RSS_IPV4_UDP; 926 DP_VERBOSE(edev, QED_MSG_DEBUG, 927 "UDP 4-tuple enabled\n"); 928 } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { 929 clr_caps = QED_RSS_IPV4_UDP; 930 DP_VERBOSE(edev, QED_MSG_DEBUG, 931 "UDP 4-tuple disabled\n"); 932 } else { 933 return -EINVAL; 934 } 935 break; 936 case UDP_V6_FLOW: 937 /* For UDP either 2-tuple hash or 4-tuple hash is supported */ 938 if (info->data == (RXH_IP_SRC | RXH_IP_DST | 939 RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 940 set_caps = QED_RSS_IPV6_UDP; 941 DP_VERBOSE(edev, QED_MSG_DEBUG, 942 "UDP 4-tuple enabled\n"); 943 } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { 944 clr_caps = QED_RSS_IPV6_UDP; 945 DP_VERBOSE(edev, QED_MSG_DEBUG, 946 "UDP 4-tuple disabled\n"); 947 } else { 948 return -EINVAL; 949 } 950 break; 951 case IPV4_FLOW: 952 case IPV6_FLOW: 953 /* For IP only 2-tuple hash is supported */ 954 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) { 955 DP_INFO(edev, "Command parameters not supported\n"); 956 return -EINVAL; 957 } 958 return 0; 959 case SCTP_V4_FLOW: 960 case AH_ESP_V4_FLOW: 961 case AH_V4_FLOW: 962 case ESP_V4_FLOW: 963 case SCTP_V6_FLOW: 964 case AH_ESP_V6_FLOW: 965 case AH_V6_FLOW: 966 case ESP_V6_FLOW: 967 case IP_USER_FLOW: 968 case ETHER_FLOW: 969 /* RSS is not supported for these protocols */ 970 if (info->data) { 971 DP_INFO(edev, "Command parameters not supported\n"); 972 return -EINVAL; 973 } 974 return 0; 975 default: 976 return -EINVAL; 977 } 978 979 /* No action is needed if there is no change in the rss capability */ 980 if (edev->rss_params.rss_caps == ((edev->rss_params.rss_caps & 981 ~clr_caps) | set_caps)) 982 return 0; 983 984 /* Update internal configuration */ 985 edev->rss_params.rss_caps = (edev->rss_params.rss_caps & ~clr_caps) | 986 set_caps; 987 edev->rss_params_inited |= QEDE_RSS_CAPS_INITED; 988 989 /* Re-configure if possible */ 990 if (netif_running(edev->ndev)) { 991 memset(&vport_update_params, 0, sizeof(vport_update_params)); 992 vport_update_params.update_rss_flg = 1; 993 vport_update_params.vport_id = 0; 994 memcpy(&vport_update_params.rss_params, &edev->rss_params, 995 sizeof(vport_update_params.rss_params)); 996 return edev->ops->vport_update(edev->cdev, 997 &vport_update_params); 998 } 999 1000 return 0; 1001 } 1002 1003 static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) 1004 { 1005 struct qede_dev *edev = netdev_priv(dev); 1006 1007 switch (info->cmd) { 1008 case ETHTOOL_SRXFH: 1009 return qede_set_rss_flags(edev, info); 1010 default: 1011 DP_INFO(edev, "Command parameters not supported\n"); 1012 return -EOPNOTSUPP; 1013 } 1014 } 1015 1016 static u32 qede_get_rxfh_indir_size(struct net_device *dev) 1017 { 1018 return QED_RSS_IND_TABLE_SIZE; 1019 } 1020 1021 static u32 qede_get_rxfh_key_size(struct net_device *dev) 1022 { 1023 struct qede_dev *edev = netdev_priv(dev); 1024 1025 return sizeof(edev->rss_params.rss_key); 1026 } 1027 1028 static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) 1029 { 1030 struct qede_dev *edev = netdev_priv(dev); 1031 int i; 1032 1033 if (hfunc) 1034 *hfunc = ETH_RSS_HASH_TOP; 1035 1036 if (!indir) 1037 return 0; 1038 1039 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) 1040 indir[i] = edev->rss_params.rss_ind_table[i]; 1041 1042 if (key) 1043 memcpy(key, edev->rss_params.rss_key, 1044 qede_get_rxfh_key_size(dev)); 1045 1046 return 0; 1047 } 1048 1049 static int qede_set_rxfh(struct net_device *dev, const u32 *indir, 1050 const u8 *key, const u8 hfunc) 1051 { 1052 struct qed_update_vport_params vport_update_params; 1053 struct qede_dev *edev = netdev_priv(dev); 1054 int i; 1055 1056 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 1057 return -EOPNOTSUPP; 1058 1059 if (!indir && !key) 1060 return 0; 1061 1062 if (indir) { 1063 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) 1064 edev->rss_params.rss_ind_table[i] = indir[i]; 1065 edev->rss_params_inited |= QEDE_RSS_INDIR_INITED; 1066 } 1067 1068 if (key) { 1069 memcpy(&edev->rss_params.rss_key, key, 1070 qede_get_rxfh_key_size(dev)); 1071 edev->rss_params_inited |= QEDE_RSS_KEY_INITED; 1072 } 1073 1074 if (netif_running(edev->ndev)) { 1075 memset(&vport_update_params, 0, sizeof(vport_update_params)); 1076 vport_update_params.update_rss_flg = 1; 1077 vport_update_params.vport_id = 0; 1078 memcpy(&vport_update_params.rss_params, &edev->rss_params, 1079 sizeof(vport_update_params.rss_params)); 1080 return edev->ops->vport_update(edev->cdev, 1081 &vport_update_params); 1082 } 1083 1084 return 0; 1085 } 1086 1087 /* This function enables the interrupt generation and the NAPI on the device */ 1088 static void qede_netif_start(struct qede_dev *edev) 1089 { 1090 int i; 1091 1092 if (!netif_running(edev->ndev)) 1093 return; 1094 1095 for_each_queue(i) { 1096 /* Update and reenable interrupts */ 1097 qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1); 1098 napi_enable(&edev->fp_array[i].napi); 1099 } 1100 } 1101 1102 /* This function disables the NAPI and the interrupt generation on the device */ 1103 static void qede_netif_stop(struct qede_dev *edev) 1104 { 1105 int i; 1106 1107 for_each_queue(i) { 1108 napi_disable(&edev->fp_array[i].napi); 1109 /* Disable interrupts */ 1110 qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0); 1111 } 1112 } 1113 1114 static int qede_selftest_transmit_traffic(struct qede_dev *edev, 1115 struct sk_buff *skb) 1116 { 1117 struct qede_tx_queue *txq = NULL; 1118 struct eth_tx_1st_bd *first_bd; 1119 dma_addr_t mapping; 1120 int i, idx, val; 1121 1122 for_each_queue(i) { 1123 if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { 1124 txq = edev->fp_array[i].txqs; 1125 break; 1126 } 1127 } 1128 1129 if (!txq) { 1130 DP_NOTICE(edev, "Tx path is not available\n"); 1131 return -1; 1132 } 1133 1134 /* Fill the entry in the SW ring and the BDs in the FW ring */ 1135 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; 1136 txq->sw_tx_ring[idx].skb = skb; 1137 first_bd = qed_chain_produce(&txq->tx_pbl); 1138 memset(first_bd, 0, sizeof(*first_bd)); 1139 val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; 1140 first_bd->data.bd_flags.bitfields = val; 1141 val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK; 1142 first_bd->data.bitfields |= (val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT); 1143 1144 /* Map skb linear data for DMA and set in the first BD */ 1145 mapping = dma_map_single(&edev->pdev->dev, skb->data, 1146 skb_headlen(skb), DMA_TO_DEVICE); 1147 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { 1148 DP_NOTICE(edev, "SKB mapping failed\n"); 1149 return -ENOMEM; 1150 } 1151 BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); 1152 1153 /* update the first BD with the actual num BDs */ 1154 first_bd->data.nbds = 1; 1155 txq->sw_tx_prod++; 1156 /* 'next page' entries are counted in the producer value */ 1157 val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); 1158 txq->tx_db.data.bd_prod = val; 1159 1160 /* wmb makes sure that the BDs data is updated before updating the 1161 * producer, otherwise FW may read old data from the BDs. 1162 */ 1163 wmb(); 1164 barrier(); 1165 writel(txq->tx_db.raw, txq->doorbell_addr); 1166 1167 /* mmiowb is needed to synchronize doorbell writes from more than one 1168 * processor. It guarantees that the write arrives to the device before 1169 * the queue lock is released and another start_xmit is called (possibly 1170 * on another CPU). Without this barrier, the next doorbell can bypass 1171 * this doorbell. This is applicable to IA64/Altix systems. 1172 */ 1173 mmiowb(); 1174 1175 for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { 1176 if (qede_txq_has_work(txq)) 1177 break; 1178 usleep_range(100, 200); 1179 } 1180 1181 if (!qede_txq_has_work(txq)) { 1182 DP_NOTICE(edev, "Tx completion didn't happen\n"); 1183 return -1; 1184 } 1185 1186 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); 1187 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), 1188 BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE); 1189 txq->sw_tx_cons++; 1190 txq->sw_tx_ring[idx].skb = NULL; 1191 1192 return 0; 1193 } 1194 1195 static int qede_selftest_receive_traffic(struct qede_dev *edev) 1196 { 1197 u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len; 1198 struct eth_fast_path_rx_reg_cqe *fp_cqe; 1199 struct qede_rx_queue *rxq = NULL; 1200 struct sw_rx_data *sw_rx_data; 1201 union eth_rx_cqe *cqe; 1202 u8 *data_ptr; 1203 int i; 1204 1205 for_each_queue(i) { 1206 if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { 1207 rxq = edev->fp_array[i].rxq; 1208 break; 1209 } 1210 } 1211 1212 if (!rxq) { 1213 DP_NOTICE(edev, "Rx path is not available\n"); 1214 return -1; 1215 } 1216 1217 /* The packet is expected to receive on rx-queue 0 even though RSS is 1218 * enabled. This is because the queue 0 is configured as the default 1219 * queue and that the loopback traffic is not IP. 1220 */ 1221 for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { 1222 if (qede_has_rx_work(rxq)) 1223 break; 1224 usleep_range(100, 200); 1225 } 1226 1227 if (!qede_has_rx_work(rxq)) { 1228 DP_NOTICE(edev, "Failed to receive the traffic\n"); 1229 return -1; 1230 } 1231 1232 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); 1233 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); 1234 1235 /* Memory barrier to prevent the CPU from doing speculative reads of CQE 1236 * / BD before reading hw_comp_cons. If the CQE is read before it is 1237 * written by FW, then FW writes CQE and SB, and then the CPU reads the 1238 * hw_comp_cons, it will use an old CQE. 1239 */ 1240 rmb(); 1241 1242 /* Get the CQE from the completion ring */ 1243 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); 1244 1245 /* Get the data from the SW ring */ 1246 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; 1247 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; 1248 fp_cqe = &cqe->fast_path_regular; 1249 len = le16_to_cpu(fp_cqe->len_on_first_bd); 1250 data_ptr = (u8 *)(page_address(sw_rx_data->data) + 1251 fp_cqe->placement_offset + sw_rx_data->page_offset); 1252 for (i = ETH_HLEN; i < len; i++) 1253 if (data_ptr[i] != (unsigned char)(i & 0xff)) { 1254 DP_NOTICE(edev, "Loopback test failed\n"); 1255 qede_recycle_rx_bd_ring(rxq, edev, 1); 1256 return -1; 1257 } 1258 1259 qede_recycle_rx_bd_ring(rxq, edev, 1); 1260 1261 return 0; 1262 } 1263 1264 static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode) 1265 { 1266 struct qed_link_params link_params; 1267 struct sk_buff *skb = NULL; 1268 int rc = 0, i; 1269 u32 pkt_size; 1270 u8 *packet; 1271 1272 if (!netif_running(edev->ndev)) { 1273 DP_NOTICE(edev, "Interface is down\n"); 1274 return -EINVAL; 1275 } 1276 1277 qede_netif_stop(edev); 1278 1279 /* Bring up the link in Loopback mode */ 1280 memset(&link_params, 0, sizeof(link_params)); 1281 link_params.link_up = true; 1282 link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE; 1283 link_params.loopback_mode = loopback_mode; 1284 edev->ops->common->set_link(edev->cdev, &link_params); 1285 1286 /* Wait for loopback configuration to apply */ 1287 msleep_interruptible(500); 1288 1289 /* prepare the loopback packet */ 1290 pkt_size = edev->ndev->mtu + ETH_HLEN; 1291 1292 skb = netdev_alloc_skb(edev->ndev, pkt_size); 1293 if (!skb) { 1294 DP_INFO(edev, "Can't allocate skb\n"); 1295 rc = -ENOMEM; 1296 goto test_loopback_exit; 1297 } 1298 packet = skb_put(skb, pkt_size); 1299 ether_addr_copy(packet, edev->ndev->dev_addr); 1300 ether_addr_copy(packet + ETH_ALEN, edev->ndev->dev_addr); 1301 memset(packet + (2 * ETH_ALEN), 0x77, (ETH_HLEN - (2 * ETH_ALEN))); 1302 for (i = ETH_HLEN; i < pkt_size; i++) 1303 packet[i] = (unsigned char)(i & 0xff); 1304 1305 rc = qede_selftest_transmit_traffic(edev, skb); 1306 if (rc) 1307 goto test_loopback_exit; 1308 1309 rc = qede_selftest_receive_traffic(edev); 1310 if (rc) 1311 goto test_loopback_exit; 1312 1313 DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, "Loopback test successful\n"); 1314 1315 test_loopback_exit: 1316 dev_kfree_skb(skb); 1317 1318 /* Bring up the link in Normal mode */ 1319 memset(&link_params, 0, sizeof(link_params)); 1320 link_params.link_up = true; 1321 link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE; 1322 link_params.loopback_mode = QED_LINK_LOOPBACK_NONE; 1323 edev->ops->common->set_link(edev->cdev, &link_params); 1324 1325 /* Wait for loopback configuration to apply */ 1326 msleep_interruptible(500); 1327 1328 qede_netif_start(edev); 1329 1330 return rc; 1331 } 1332 1333 static void qede_self_test(struct net_device *dev, 1334 struct ethtool_test *etest, u64 *buf) 1335 { 1336 struct qede_dev *edev = netdev_priv(dev); 1337 1338 DP_VERBOSE(edev, QED_MSG_DEBUG, 1339 "Self-test command parameters: offline = %d, external_lb = %d\n", 1340 (etest->flags & ETH_TEST_FL_OFFLINE), 1341 (etest->flags & ETH_TEST_FL_EXTERNAL_LB) >> 2); 1342 1343 memset(buf, 0, sizeof(u64) * QEDE_ETHTOOL_TEST_MAX); 1344 1345 if (etest->flags & ETH_TEST_FL_OFFLINE) { 1346 if (qede_selftest_run_loopback(edev, 1347 QED_LINK_LOOPBACK_INT_PHY)) { 1348 buf[QEDE_ETHTOOL_INT_LOOPBACK] = 1; 1349 etest->flags |= ETH_TEST_FL_FAILED; 1350 } 1351 } 1352 1353 if (edev->ops->common->selftest->selftest_interrupt(edev->cdev)) { 1354 buf[QEDE_ETHTOOL_INTERRUPT_TEST] = 1; 1355 etest->flags |= ETH_TEST_FL_FAILED; 1356 } 1357 1358 if (edev->ops->common->selftest->selftest_memory(edev->cdev)) { 1359 buf[QEDE_ETHTOOL_MEMORY_TEST] = 1; 1360 etest->flags |= ETH_TEST_FL_FAILED; 1361 } 1362 1363 if (edev->ops->common->selftest->selftest_register(edev->cdev)) { 1364 buf[QEDE_ETHTOOL_REGISTER_TEST] = 1; 1365 etest->flags |= ETH_TEST_FL_FAILED; 1366 } 1367 1368 if (edev->ops->common->selftest->selftest_clock(edev->cdev)) { 1369 buf[QEDE_ETHTOOL_CLOCK_TEST] = 1; 1370 etest->flags |= ETH_TEST_FL_FAILED; 1371 } 1372 } 1373 1374 static int qede_set_tunable(struct net_device *dev, 1375 const struct ethtool_tunable *tuna, 1376 const void *data) 1377 { 1378 struct qede_dev *edev = netdev_priv(dev); 1379 u32 val; 1380 1381 switch (tuna->id) { 1382 case ETHTOOL_RX_COPYBREAK: 1383 val = *(u32 *)data; 1384 if (val < QEDE_MIN_PKT_LEN || val > QEDE_RX_HDR_SIZE) { 1385 DP_VERBOSE(edev, QED_MSG_DEBUG, 1386 "Invalid rx copy break value, range is [%u, %u]", 1387 QEDE_MIN_PKT_LEN, QEDE_RX_HDR_SIZE); 1388 return -EINVAL; 1389 } 1390 1391 edev->rx_copybreak = *(u32 *)data; 1392 break; 1393 default: 1394 return -EOPNOTSUPP; 1395 } 1396 1397 return 0; 1398 } 1399 1400 static int qede_get_tunable(struct net_device *dev, 1401 const struct ethtool_tunable *tuna, void *data) 1402 { 1403 struct qede_dev *edev = netdev_priv(dev); 1404 1405 switch (tuna->id) { 1406 case ETHTOOL_RX_COPYBREAK: 1407 *(u32 *)data = edev->rx_copybreak; 1408 break; 1409 default: 1410 return -EOPNOTSUPP; 1411 } 1412 1413 return 0; 1414 } 1415 1416 static const struct ethtool_ops qede_ethtool_ops = { 1417 .get_link_ksettings = qede_get_link_ksettings, 1418 .set_link_ksettings = qede_set_link_ksettings, 1419 .get_drvinfo = qede_get_drvinfo, 1420 .get_regs_len = qede_get_regs_len, 1421 .get_regs = qede_get_regs, 1422 .get_msglevel = qede_get_msglevel, 1423 .set_msglevel = qede_set_msglevel, 1424 .nway_reset = qede_nway_reset, 1425 .get_link = qede_get_link, 1426 .get_coalesce = qede_get_coalesce, 1427 .set_coalesce = qede_set_coalesce, 1428 .get_ringparam = qede_get_ringparam, 1429 .set_ringparam = qede_set_ringparam, 1430 .get_pauseparam = qede_get_pauseparam, 1431 .set_pauseparam = qede_set_pauseparam, 1432 .get_strings = qede_get_strings, 1433 .set_phys_id = qede_set_phys_id, 1434 .get_ethtool_stats = qede_get_ethtool_stats, 1435 .get_priv_flags = qede_get_priv_flags, 1436 .get_sset_count = qede_get_sset_count, 1437 .get_rxnfc = qede_get_rxnfc, 1438 .set_rxnfc = qede_set_rxnfc, 1439 .get_rxfh_indir_size = qede_get_rxfh_indir_size, 1440 .get_rxfh_key_size = qede_get_rxfh_key_size, 1441 .get_rxfh = qede_get_rxfh, 1442 .set_rxfh = qede_set_rxfh, 1443 .get_channels = qede_get_channels, 1444 .set_channels = qede_set_channels, 1445 .self_test = qede_self_test, 1446 .get_tunable = qede_get_tunable, 1447 .set_tunable = qede_set_tunable, 1448 }; 1449 1450 static const struct ethtool_ops qede_vf_ethtool_ops = { 1451 .get_link_ksettings = qede_get_link_ksettings, 1452 .get_drvinfo = qede_get_drvinfo, 1453 .get_msglevel = qede_get_msglevel, 1454 .set_msglevel = qede_set_msglevel, 1455 .get_link = qede_get_link, 1456 .get_ringparam = qede_get_ringparam, 1457 .set_ringparam = qede_set_ringparam, 1458 .get_strings = qede_get_strings, 1459 .get_ethtool_stats = qede_get_ethtool_stats, 1460 .get_priv_flags = qede_get_priv_flags, 1461 .get_sset_count = qede_get_sset_count, 1462 .get_rxnfc = qede_get_rxnfc, 1463 .set_rxnfc = qede_set_rxnfc, 1464 .get_rxfh_indir_size = qede_get_rxfh_indir_size, 1465 .get_rxfh_key_size = qede_get_rxfh_key_size, 1466 .get_rxfh = qede_get_rxfh, 1467 .set_rxfh = qede_set_rxfh, 1468 .get_channels = qede_get_channels, 1469 .set_channels = qede_set_channels, 1470 .get_tunable = qede_get_tunable, 1471 .set_tunable = qede_set_tunable, 1472 }; 1473 1474 void qede_set_ethtool_ops(struct net_device *dev) 1475 { 1476 struct qede_dev *edev = netdev_priv(dev); 1477 1478 if (IS_VF(edev)) 1479 dev->ethtool_ops = &qede_vf_ethtool_ops; 1480 else 1481 dev->ethtool_ops = &qede_ethtool_ops; 1482 } 1483