1 /* 2 * Copyright (C) 2013-2015 Chelsio Communications. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * The full GNU General Public License is included in this distribution in 14 * the file called "COPYING". 15 * 16 */ 17 18 #include <linux/firmware.h> 19 #include <linux/mdio.h> 20 21 #include "cxgb4.h" 22 #include "t4_regs.h" 23 #include "t4fw_api.h" 24 25 #define EEPROM_MAGIC 0x38E2F10C 26 27 static u32 get_msglevel(struct net_device *dev) 28 { 29 return netdev2adap(dev)->msg_enable; 30 } 31 32 static void set_msglevel(struct net_device *dev, u32 val) 33 { 34 netdev2adap(dev)->msg_enable = val; 35 } 36 37 static const char stats_strings[][ETH_GSTRING_LEN] = { 38 "tx_octets_ok ", 39 "tx_frames_ok ", 40 "tx_broadcast_frames ", 41 "tx_multicast_frames ", 42 "tx_unicast_frames ", 43 "tx_error_frames ", 44 45 "tx_frames_64 ", 46 "tx_frames_65_to_127 ", 47 "tx_frames_128_to_255 ", 48 "tx_frames_256_to_511 ", 49 "tx_frames_512_to_1023 ", 50 "tx_frames_1024_to_1518 ", 51 "tx_frames_1519_to_max ", 52 53 "tx_frames_dropped ", 54 "tx_pause_frames ", 55 "tx_ppp0_frames ", 56 "tx_ppp1_frames ", 57 "tx_ppp2_frames ", 58 "tx_ppp3_frames ", 59 "tx_ppp4_frames ", 60 "tx_ppp5_frames ", 61 "tx_ppp6_frames ", 62 "tx_ppp7_frames ", 63 64 "rx_octets_ok ", 65 "rx_frames_ok ", 66 "rx_broadcast_frames ", 67 "rx_multicast_frames ", 68 "rx_unicast_frames ", 69 70 "rx_frames_too_long ", 71 "rx_jabber_errors ", 72 "rx_fcs_errors ", 73 "rx_length_errors ", 74 "rx_symbol_errors ", 75 "rx_runt_frames ", 76 77 "rx_frames_64 ", 78 "rx_frames_65_to_127 ", 79 "rx_frames_128_to_255 ", 80 "rx_frames_256_to_511 ", 81 "rx_frames_512_to_1023 ", 82 "rx_frames_1024_to_1518 ", 83 "rx_frames_1519_to_max ", 84 85 "rx_pause_frames ", 86 "rx_ppp0_frames ", 87 "rx_ppp1_frames ", 88 "rx_ppp2_frames ", 89 "rx_ppp3_frames ", 90 "rx_ppp4_frames ", 91 "rx_ppp5_frames ", 92 "rx_ppp6_frames ", 93 "rx_ppp7_frames ", 94 95 "rx_bg0_frames_dropped ", 96 "rx_bg1_frames_dropped ", 97 "rx_bg2_frames_dropped ", 98 "rx_bg3_frames_dropped ", 99 "rx_bg0_frames_trunc ", 100 "rx_bg1_frames_trunc ", 101 "rx_bg2_frames_trunc ", 102 "rx_bg3_frames_trunc ", 103 104 "tso ", 105 "tx_csum_offload ", 106 "rx_csum_good ", 107 "vlan_extractions ", 108 "vlan_insertions ", 109 "gro_packets ", 110 "gro_merged ", 111 }; 112 113 static char adapter_stats_strings[][ETH_GSTRING_LEN] = { 114 "db_drop ", 115 "db_full ", 116 "db_empty ", 117 "tcp_ipv4_out_rsts ", 118 "tcp_ipv4_in_segs ", 119 "tcp_ipv4_out_segs ", 120 "tcp_ipv4_retrans_segs ", 121 "tcp_ipv6_out_rsts ", 122 "tcp_ipv6_in_segs ", 123 "tcp_ipv6_out_segs ", 124 "tcp_ipv6_retrans_segs ", 125 "usm_ddp_frames ", 126 "usm_ddp_octets ", 127 "usm_ddp_drops ", 128 "rdma_no_rqe_mod_defer ", 129 "rdma_no_rqe_pkt_defer ", 130 "tp_err_ofld_no_neigh ", 131 "tp_err_ofld_cong_defer ", 132 "write_coal_success ", 133 "write_coal_fail ", 134 }; 135 136 static char channel_stats_strings[][ETH_GSTRING_LEN] = { 137 "--------Channel--------- ", 138 "tp_cpl_requests ", 139 "tp_cpl_responses ", 140 "tp_mac_in_errs ", 141 "tp_hdr_in_errs ", 142 "tp_tcp_in_errs ", 143 "tp_tcp6_in_errs ", 144 "tp_tnl_cong_drops ", 145 "tp_tnl_tx_drops ", 146 "tp_ofld_vlan_drops ", 147 "tp_ofld_chan_drops ", 148 "fcoe_octets_ddp ", 149 "fcoe_frames_ddp ", 150 "fcoe_frames_drop ", 151 }; 152 153 static char loopback_stats_strings[][ETH_GSTRING_LEN] = { 154 "-------Loopback----------- ", 155 "octets_ok ", 156 "frames_ok ", 157 "bcast_frames ", 158 "mcast_frames ", 159 "ucast_frames ", 160 "error_frames ", 161 "frames_64 ", 162 "frames_65_to_127 ", 163 "frames_128_to_255 ", 164 "frames_256_to_511 ", 165 "frames_512_to_1023 ", 166 "frames_1024_to_1518 ", 167 "frames_1519_to_max ", 168 "frames_dropped ", 169 "bg0_frames_dropped ", 170 "bg1_frames_dropped ", 171 "bg2_frames_dropped ", 172 "bg3_frames_dropped ", 173 "bg0_frames_trunc ", 174 "bg1_frames_trunc ", 175 "bg2_frames_trunc ", 176 "bg3_frames_trunc ", 177 }; 178 179 static int get_sset_count(struct net_device *dev, int sset) 180 { 181 switch (sset) { 182 case ETH_SS_STATS: 183 return ARRAY_SIZE(stats_strings) + 184 ARRAY_SIZE(adapter_stats_strings) + 185 ARRAY_SIZE(channel_stats_strings) + 186 ARRAY_SIZE(loopback_stats_strings); 187 default: 188 return -EOPNOTSUPP; 189 } 190 } 191 192 static int get_regs_len(struct net_device *dev) 193 { 194 struct adapter *adap = netdev2adap(dev); 195 196 return t4_get_regs_len(adap); 197 } 198 199 static int get_eeprom_len(struct net_device *dev) 200 { 201 return EEPROMSIZE; 202 } 203 204 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 205 { 206 struct adapter *adapter = netdev2adap(dev); 207 u32 exprom_vers; 208 209 strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver)); 210 strlcpy(info->version, cxgb4_driver_version, 211 sizeof(info->version)); 212 strlcpy(info->bus_info, pci_name(adapter->pdev), 213 sizeof(info->bus_info)); 214 info->regdump_len = get_regs_len(dev); 215 216 if (!adapter->params.fw_vers) 217 strcpy(info->fw_version, "N/A"); 218 else 219 snprintf(info->fw_version, sizeof(info->fw_version), 220 "%u.%u.%u.%u, TP %u.%u.%u.%u", 221 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers), 222 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers), 223 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers), 224 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers), 225 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers), 226 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers), 227 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers), 228 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers)); 229 230 if (!t4_get_exprom_version(adapter, &exprom_vers)) 231 snprintf(info->erom_version, sizeof(info->erom_version), 232 "%u.%u.%u.%u", 233 FW_HDR_FW_VER_MAJOR_G(exprom_vers), 234 FW_HDR_FW_VER_MINOR_G(exprom_vers), 235 FW_HDR_FW_VER_MICRO_G(exprom_vers), 236 FW_HDR_FW_VER_BUILD_G(exprom_vers)); 237 } 238 239 static void get_strings(struct net_device *dev, u32 stringset, u8 *data) 240 { 241 if (stringset == ETH_SS_STATS) { 242 memcpy(data, stats_strings, sizeof(stats_strings)); 243 data += sizeof(stats_strings); 244 memcpy(data, adapter_stats_strings, 245 sizeof(adapter_stats_strings)); 246 data += sizeof(adapter_stats_strings); 247 memcpy(data, channel_stats_strings, 248 sizeof(channel_stats_strings)); 249 data += sizeof(channel_stats_strings); 250 memcpy(data, loopback_stats_strings, 251 sizeof(loopback_stats_strings)); 252 } 253 } 254 255 /* port stats maintained per queue of the port. They should be in the same 256 * order as in stats_strings above. 257 */ 258 struct queue_port_stats { 259 u64 tso; 260 u64 tx_csum; 261 u64 rx_csum; 262 u64 vlan_ex; 263 u64 vlan_ins; 264 u64 gro_pkts; 265 u64 gro_merged; 266 }; 267 268 struct adapter_stats { 269 u64 db_drop; 270 u64 db_full; 271 u64 db_empty; 272 u64 tcp_v4_out_rsts; 273 u64 tcp_v4_in_segs; 274 u64 tcp_v4_out_segs; 275 u64 tcp_v4_retrans_segs; 276 u64 tcp_v6_out_rsts; 277 u64 tcp_v6_in_segs; 278 u64 tcp_v6_out_segs; 279 u64 tcp_v6_retrans_segs; 280 u64 frames; 281 u64 octets; 282 u64 drops; 283 u64 rqe_dfr_mod; 284 u64 rqe_dfr_pkt; 285 u64 ofld_no_neigh; 286 u64 ofld_cong_defer; 287 u64 wc_success; 288 u64 wc_fail; 289 }; 290 291 struct channel_stats { 292 u64 cpl_req; 293 u64 cpl_rsp; 294 u64 mac_in_errs; 295 u64 hdr_in_errs; 296 u64 tcp_in_errs; 297 u64 tcp6_in_errs; 298 u64 tnl_cong_drops; 299 u64 tnl_tx_drops; 300 u64 ofld_vlan_drops; 301 u64 ofld_chan_drops; 302 u64 octets_ddp; 303 u64 frames_ddp; 304 u64 frames_drop; 305 }; 306 307 static void collect_sge_port_stats(const struct adapter *adap, 308 const struct port_info *p, 309 struct queue_port_stats *s) 310 { 311 int i; 312 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset]; 313 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset]; 314 315 memset(s, 0, sizeof(*s)); 316 for (i = 0; i < p->nqsets; i++, rx++, tx++) { 317 s->tso += tx->tso; 318 s->tx_csum += tx->tx_cso; 319 s->rx_csum += rx->stats.rx_cso; 320 s->vlan_ex += rx->stats.vlan_ex; 321 s->vlan_ins += tx->vlan_ins; 322 s->gro_pkts += rx->stats.lro_pkts; 323 s->gro_merged += rx->stats.lro_merged; 324 } 325 } 326 327 static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s) 328 { 329 struct tp_tcp_stats v4, v6; 330 struct tp_rdma_stats rdma_stats; 331 struct tp_err_stats err_stats; 332 struct tp_usm_stats usm_stats; 333 u64 val1, val2; 334 335 memset(s, 0, sizeof(*s)); 336 337 spin_lock(&adap->stats_lock); 338 t4_tp_get_tcp_stats(adap, &v4, &v6); 339 t4_tp_get_rdma_stats(adap, &rdma_stats); 340 t4_get_usm_stats(adap, &usm_stats); 341 t4_tp_get_err_stats(adap, &err_stats); 342 spin_unlock(&adap->stats_lock); 343 344 s->db_drop = adap->db_stats.db_drop; 345 s->db_full = adap->db_stats.db_full; 346 s->db_empty = adap->db_stats.db_empty; 347 348 s->tcp_v4_out_rsts = v4.tcp_out_rsts; 349 s->tcp_v4_in_segs = v4.tcp_in_segs; 350 s->tcp_v4_out_segs = v4.tcp_out_segs; 351 s->tcp_v4_retrans_segs = v4.tcp_retrans_segs; 352 s->tcp_v6_out_rsts = v6.tcp_out_rsts; 353 s->tcp_v6_in_segs = v6.tcp_in_segs; 354 s->tcp_v6_out_segs = v6.tcp_out_segs; 355 s->tcp_v6_retrans_segs = v6.tcp_retrans_segs; 356 357 if (is_offload(adap)) { 358 s->frames = usm_stats.frames; 359 s->octets = usm_stats.octets; 360 s->drops = usm_stats.drops; 361 s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod; 362 s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt; 363 } 364 365 s->ofld_no_neigh = err_stats.ofld_no_neigh; 366 s->ofld_cong_defer = err_stats.ofld_cong_defer; 367 368 if (!is_t4(adap->params.chip)) { 369 int v; 370 371 v = t4_read_reg(adap, SGE_STAT_CFG_A); 372 if (STATSOURCE_T5_G(v) == 7) { 373 val2 = t4_read_reg(adap, SGE_STAT_MATCH_A); 374 val1 = t4_read_reg(adap, SGE_STAT_TOTAL_A); 375 s->wc_success = val1 - val2; 376 s->wc_fail = val2; 377 } 378 } 379 } 380 381 static void collect_channel_stats(struct adapter *adap, struct channel_stats *s, 382 u8 i) 383 { 384 struct tp_cpl_stats cpl_stats; 385 struct tp_err_stats err_stats; 386 struct tp_fcoe_stats fcoe_stats; 387 388 memset(s, 0, sizeof(*s)); 389 390 spin_lock(&adap->stats_lock); 391 t4_tp_get_cpl_stats(adap, &cpl_stats); 392 t4_tp_get_err_stats(adap, &err_stats); 393 t4_get_fcoe_stats(adap, i, &fcoe_stats); 394 spin_unlock(&adap->stats_lock); 395 396 s->cpl_req = cpl_stats.req[i]; 397 s->cpl_rsp = cpl_stats.rsp[i]; 398 s->mac_in_errs = err_stats.mac_in_errs[i]; 399 s->hdr_in_errs = err_stats.hdr_in_errs[i]; 400 s->tcp_in_errs = err_stats.tcp_in_errs[i]; 401 s->tcp6_in_errs = err_stats.tcp6_in_errs[i]; 402 s->tnl_cong_drops = err_stats.tnl_cong_drops[i]; 403 s->tnl_tx_drops = err_stats.tnl_tx_drops[i]; 404 s->ofld_vlan_drops = err_stats.ofld_vlan_drops[i]; 405 s->ofld_chan_drops = err_stats.ofld_chan_drops[i]; 406 s->octets_ddp = fcoe_stats.octets_ddp; 407 s->frames_ddp = fcoe_stats.frames_ddp; 408 s->frames_drop = fcoe_stats.frames_drop; 409 } 410 411 static void get_stats(struct net_device *dev, struct ethtool_stats *stats, 412 u64 *data) 413 { 414 struct port_info *pi = netdev_priv(dev); 415 struct adapter *adapter = pi->adapter; 416 struct lb_port_stats s; 417 int i; 418 u64 *p0; 419 420 t4_get_port_stats_offset(adapter, pi->tx_chan, 421 (struct port_stats *)data, 422 &pi->stats_base); 423 424 data += sizeof(struct port_stats) / sizeof(u64); 425 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); 426 data += sizeof(struct queue_port_stats) / sizeof(u64); 427 collect_adapter_stats(adapter, (struct adapter_stats *)data); 428 data += sizeof(struct adapter_stats) / sizeof(u64); 429 430 *data++ = (u64)pi->port_id; 431 collect_channel_stats(adapter, (struct channel_stats *)data, 432 pi->port_id); 433 data += sizeof(struct channel_stats) / sizeof(u64); 434 435 *data++ = (u64)pi->port_id; 436 memset(&s, 0, sizeof(s)); 437 t4_get_lb_stats(adapter, pi->port_id, &s); 438 439 p0 = &s.octets; 440 for (i = 0; i < ARRAY_SIZE(loopback_stats_strings) - 1; i++) 441 *data++ = (unsigned long long)*p0++; 442 } 443 444 static void get_regs(struct net_device *dev, struct ethtool_regs *regs, 445 void *buf) 446 { 447 struct adapter *adap = netdev2adap(dev); 448 size_t buf_size; 449 450 buf_size = t4_get_regs_len(adap); 451 regs->version = mk_adap_vers(adap); 452 t4_get_regs(adap, buf, buf_size); 453 } 454 455 static int restart_autoneg(struct net_device *dev) 456 { 457 struct port_info *p = netdev_priv(dev); 458 459 if (!netif_running(dev)) 460 return -EAGAIN; 461 if (p->link_cfg.autoneg != AUTONEG_ENABLE) 462 return -EINVAL; 463 t4_restart_aneg(p->adapter, p->adapter->pf, p->tx_chan); 464 return 0; 465 } 466 467 static int identify_port(struct net_device *dev, 468 enum ethtool_phys_id_state state) 469 { 470 unsigned int val; 471 struct adapter *adap = netdev2adap(dev); 472 473 if (state == ETHTOOL_ID_ACTIVE) 474 val = 0xffff; 475 else if (state == ETHTOOL_ID_INACTIVE) 476 val = 0; 477 else 478 return -EINVAL; 479 480 return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val); 481 } 482 483 /** 484 * from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool 485 * @port_type: Firmware Port Type 486 * @mod_type: Firmware Module Type 487 * 488 * Translate Firmware Port/Module type to Ethtool Port Type. 489 */ 490 static int from_fw_port_mod_type(enum fw_port_type port_type, 491 enum fw_port_module_type mod_type) 492 { 493 if (port_type == FW_PORT_TYPE_BT_SGMII || 494 port_type == FW_PORT_TYPE_BT_XFI || 495 port_type == FW_PORT_TYPE_BT_XAUI) { 496 return PORT_TP; 497 } else if (port_type == FW_PORT_TYPE_FIBER_XFI || 498 port_type == FW_PORT_TYPE_FIBER_XAUI) { 499 return PORT_FIBRE; 500 } else if (port_type == FW_PORT_TYPE_SFP || 501 port_type == FW_PORT_TYPE_QSFP_10G || 502 port_type == FW_PORT_TYPE_QSA || 503 port_type == FW_PORT_TYPE_QSFP) { 504 if (mod_type == FW_PORT_MOD_TYPE_LR || 505 mod_type == FW_PORT_MOD_TYPE_SR || 506 mod_type == FW_PORT_MOD_TYPE_ER || 507 mod_type == FW_PORT_MOD_TYPE_LRM) 508 return PORT_FIBRE; 509 else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || 510 mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) 511 return PORT_DA; 512 else 513 return PORT_OTHER; 514 } 515 516 return PORT_OTHER; 517 } 518 519 /** 520 * speed_to_fw_caps - translate Port Speed to Firmware Port Capabilities 521 * @speed: speed in Kb/s 522 * 523 * Translates a specific Port Speed into a Firmware Port Capabilities 524 * value. 525 */ 526 static unsigned int speed_to_fw_caps(int speed) 527 { 528 if (speed == 100) 529 return FW_PORT_CAP_SPEED_100M; 530 if (speed == 1000) 531 return FW_PORT_CAP_SPEED_1G; 532 if (speed == 10000) 533 return FW_PORT_CAP_SPEED_10G; 534 if (speed == 25000) 535 return FW_PORT_CAP_SPEED_25G; 536 if (speed == 40000) 537 return FW_PORT_CAP_SPEED_40G; 538 if (speed == 100000) 539 return FW_PORT_CAP_SPEED_100G; 540 return 0; 541 } 542 543 /** 544 * fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask 545 * @port_type: Firmware Port Type 546 * @fw_caps: Firmware Port Capabilities 547 * @link_mode_mask: ethtool Link Mode Mask 548 * 549 * Translate a Firmware Port Capabilities specification to an ethtool 550 * Link Mode Mask. 551 */ 552 static void fw_caps_to_lmm(enum fw_port_type port_type, 553 unsigned int fw_caps, 554 unsigned long *link_mode_mask) 555 { 556 #define SET_LMM(__lmm_name) __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name \ 557 ## _BIT, link_mode_mask) 558 559 #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \ 560 do { \ 561 if (fw_caps & FW_PORT_CAP_ ## __fw_name) \ 562 SET_LMM(__lmm_name); \ 563 } while (0) 564 565 switch (port_type) { 566 case FW_PORT_TYPE_BT_SGMII: 567 case FW_PORT_TYPE_BT_XFI: 568 case FW_PORT_TYPE_BT_XAUI: 569 SET_LMM(TP); 570 FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full); 571 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); 572 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full); 573 break; 574 575 case FW_PORT_TYPE_KX4: 576 case FW_PORT_TYPE_KX: 577 SET_LMM(Backplane); 578 FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full); 579 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full); 580 break; 581 582 case FW_PORT_TYPE_KR: 583 SET_LMM(Backplane); 584 SET_LMM(10000baseKR_Full); 585 break; 586 587 case FW_PORT_TYPE_BP_AP: 588 SET_LMM(Backplane); 589 SET_LMM(10000baseR_FEC); 590 SET_LMM(10000baseKR_Full); 591 SET_LMM(1000baseKX_Full); 592 break; 593 594 case FW_PORT_TYPE_BP4_AP: 595 SET_LMM(Backplane); 596 SET_LMM(10000baseR_FEC); 597 SET_LMM(10000baseKR_Full); 598 SET_LMM(1000baseKX_Full); 599 SET_LMM(10000baseKX4_Full); 600 break; 601 602 case FW_PORT_TYPE_FIBER_XFI: 603 case FW_PORT_TYPE_FIBER_XAUI: 604 case FW_PORT_TYPE_SFP: 605 case FW_PORT_TYPE_QSFP_10G: 606 case FW_PORT_TYPE_QSA: 607 SET_LMM(FIBRE); 608 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); 609 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full); 610 break; 611 612 case FW_PORT_TYPE_BP40_BA: 613 case FW_PORT_TYPE_QSFP: 614 SET_LMM(FIBRE); 615 SET_LMM(40000baseSR4_Full); 616 break; 617 618 case FW_PORT_TYPE_CR_QSFP: 619 case FW_PORT_TYPE_SFP28: 620 SET_LMM(FIBRE); 621 SET_LMM(25000baseCR_Full); 622 break; 623 624 case FW_PORT_TYPE_KR4_100G: 625 case FW_PORT_TYPE_CR4_QSFP: 626 SET_LMM(FIBRE); 627 SET_LMM(100000baseCR4_Full); 628 break; 629 630 default: 631 break; 632 } 633 634 FW_CAPS_TO_LMM(ANEG, Autoneg); 635 FW_CAPS_TO_LMM(802_3_PAUSE, Pause); 636 FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause); 637 638 #undef FW_CAPS_TO_LMM 639 #undef SET_LMM 640 } 641 642 /** 643 * lmm_to_fw_caps - translate ethtool Link Mode Mask to Firmware 644 * capabilities 645 * 646 * @link_mode_mask: ethtool Link Mode Mask 647 * 648 * Translate ethtool Link Mode Mask into a Firmware Port capabilities 649 * value. 650 */ 651 static unsigned int lmm_to_fw_caps(const unsigned long *link_mode_mask) 652 { 653 unsigned int fw_caps = 0; 654 655 #define LMM_TO_FW_CAPS(__lmm_name, __fw_name) \ 656 do { \ 657 if (test_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \ 658 link_mode_mask)) \ 659 fw_caps |= FW_PORT_CAP_ ## __fw_name; \ 660 } while (0) 661 662 LMM_TO_FW_CAPS(100baseT_Full, SPEED_100M); 663 LMM_TO_FW_CAPS(1000baseT_Full, SPEED_1G); 664 LMM_TO_FW_CAPS(10000baseT_Full, SPEED_10G); 665 LMM_TO_FW_CAPS(40000baseSR4_Full, SPEED_40G); 666 LMM_TO_FW_CAPS(25000baseCR_Full, SPEED_25G); 667 LMM_TO_FW_CAPS(100000baseCR4_Full, SPEED_100G); 668 669 #undef LMM_TO_FW_CAPS 670 671 return fw_caps; 672 } 673 674 static int get_link_ksettings(struct net_device *dev, 675 struct ethtool_link_ksettings *link_ksettings) 676 { 677 const struct port_info *pi = netdev_priv(dev); 678 struct ethtool_link_settings *base = &link_ksettings->base; 679 680 ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); 681 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); 682 ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising); 683 684 base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type); 685 686 if (pi->mdio_addr >= 0) { 687 base->phy_address = pi->mdio_addr; 688 base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII 689 ? ETH_MDIO_SUPPORTS_C22 690 : ETH_MDIO_SUPPORTS_C45); 691 } else { 692 base->phy_address = 255; 693 base->mdio_support = 0; 694 } 695 696 fw_caps_to_lmm(pi->port_type, pi->link_cfg.supported, 697 link_ksettings->link_modes.supported); 698 fw_caps_to_lmm(pi->port_type, pi->link_cfg.advertising, 699 link_ksettings->link_modes.advertising); 700 fw_caps_to_lmm(pi->port_type, pi->link_cfg.lp_advertising, 701 link_ksettings->link_modes.lp_advertising); 702 703 if (netif_carrier_ok(dev)) { 704 base->speed = pi->link_cfg.speed; 705 base->duplex = DUPLEX_FULL; 706 } else { 707 base->speed = SPEED_UNKNOWN; 708 base->duplex = DUPLEX_UNKNOWN; 709 } 710 711 base->autoneg = pi->link_cfg.autoneg; 712 if (pi->link_cfg.supported & FW_PORT_CAP_ANEG) 713 ethtool_link_ksettings_add_link_mode(link_ksettings, 714 supported, Autoneg); 715 if (pi->link_cfg.autoneg) 716 ethtool_link_ksettings_add_link_mode(link_ksettings, 717 advertising, Autoneg); 718 719 return 0; 720 } 721 722 static int set_link_ksettings(struct net_device *dev, 723 const struct ethtool_link_ksettings 724 *link_ksettings) 725 { 726 struct port_info *pi = netdev_priv(dev); 727 struct link_config *lc = &pi->link_cfg; 728 const struct ethtool_link_settings *base = &link_ksettings->base; 729 struct link_config old_lc; 730 unsigned int fw_caps; 731 int ret = 0; 732 733 /* only full-duplex supported */ 734 if (base->duplex != DUPLEX_FULL) 735 return -EINVAL; 736 737 if (!(lc->supported & FW_PORT_CAP_ANEG)) { 738 /* PHY offers a single speed. See if that's what's 739 * being requested. 740 */ 741 if (base->autoneg == AUTONEG_DISABLE && 742 (lc->supported & speed_to_fw_caps(base->speed))) 743 return 0; 744 return -EINVAL; 745 } 746 747 old_lc = *lc; 748 if (base->autoneg == AUTONEG_DISABLE) { 749 fw_caps = speed_to_fw_caps(base->speed); 750 751 if (!(lc->supported & fw_caps)) 752 return -EINVAL; 753 lc->requested_speed = fw_caps; 754 lc->advertising = 0; 755 } else { 756 fw_caps = 757 lmm_to_fw_caps(link_ksettings->link_modes.advertising); 758 759 if (!(lc->supported & fw_caps)) 760 return -EINVAL; 761 lc->requested_speed = 0; 762 lc->advertising = fw_caps | FW_PORT_CAP_ANEG; 763 } 764 lc->autoneg = base->autoneg; 765 766 /* If the firmware rejects the Link Configuration request, back out 767 * the changes and report the error. 768 */ 769 ret = t4_link_l1cfg(pi->adapter, pi->adapter->mbox, pi->tx_chan, lc); 770 if (ret) 771 *lc = old_lc; 772 773 return ret; 774 } 775 776 static void get_pauseparam(struct net_device *dev, 777 struct ethtool_pauseparam *epause) 778 { 779 struct port_info *p = netdev_priv(dev); 780 781 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; 782 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0; 783 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0; 784 } 785 786 static int set_pauseparam(struct net_device *dev, 787 struct ethtool_pauseparam *epause) 788 { 789 struct port_info *p = netdev_priv(dev); 790 struct link_config *lc = &p->link_cfg; 791 792 if (epause->autoneg == AUTONEG_DISABLE) 793 lc->requested_fc = 0; 794 else if (lc->supported & FW_PORT_CAP_ANEG) 795 lc->requested_fc = PAUSE_AUTONEG; 796 else 797 return -EINVAL; 798 799 if (epause->rx_pause) 800 lc->requested_fc |= PAUSE_RX; 801 if (epause->tx_pause) 802 lc->requested_fc |= PAUSE_TX; 803 if (netif_running(dev)) 804 return t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan, 805 lc); 806 return 0; 807 } 808 809 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) 810 { 811 const struct port_info *pi = netdev_priv(dev); 812 const struct sge *s = &pi->adapter->sge; 813 814 e->rx_max_pending = MAX_RX_BUFFERS; 815 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES; 816 e->rx_jumbo_max_pending = 0; 817 e->tx_max_pending = MAX_TXQ_ENTRIES; 818 819 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8; 820 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; 821 e->rx_jumbo_pending = 0; 822 e->tx_pending = s->ethtxq[pi->first_qset].q.size; 823 } 824 825 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) 826 { 827 int i; 828 const struct port_info *pi = netdev_priv(dev); 829 struct adapter *adapter = pi->adapter; 830 struct sge *s = &adapter->sge; 831 832 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending || 833 e->tx_pending > MAX_TXQ_ENTRIES || 834 e->rx_mini_pending > MAX_RSPQ_ENTRIES || 835 e->rx_mini_pending < MIN_RSPQ_ENTRIES || 836 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES) 837 return -EINVAL; 838 839 if (adapter->flags & FULL_INIT_DONE) 840 return -EBUSY; 841 842 for (i = 0; i < pi->nqsets; ++i) { 843 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending; 844 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8; 845 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending; 846 } 847 return 0; 848 } 849 850 /** 851 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete! 852 * @dev: the network device 853 * @us: the hold-off time in us, or 0 to disable timer 854 * @cnt: the hold-off packet count, or 0 to disable counter 855 * 856 * Set the RX interrupt hold-off parameters for a network device. 857 */ 858 static int set_rx_intr_params(struct net_device *dev, 859 unsigned int us, unsigned int cnt) 860 { 861 int i, err; 862 struct port_info *pi = netdev_priv(dev); 863 struct adapter *adap = pi->adapter; 864 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; 865 866 for (i = 0; i < pi->nqsets; i++, q++) { 867 err = cxgb4_set_rspq_intr_params(&q->rspq, us, cnt); 868 if (err) 869 return err; 870 } 871 return 0; 872 } 873 874 static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx) 875 { 876 int i; 877 struct port_info *pi = netdev_priv(dev); 878 struct adapter *adap = pi->adapter; 879 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; 880 881 for (i = 0; i < pi->nqsets; i++, q++) 882 q->rspq.adaptive_rx = adaptive_rx; 883 884 return 0; 885 } 886 887 static int get_adaptive_rx_setting(struct net_device *dev) 888 { 889 struct port_info *pi = netdev_priv(dev); 890 struct adapter *adap = pi->adapter; 891 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; 892 893 return q->rspq.adaptive_rx; 894 } 895 896 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 897 { 898 set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce); 899 return set_rx_intr_params(dev, c->rx_coalesce_usecs, 900 c->rx_max_coalesced_frames); 901 } 902 903 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 904 { 905 const struct port_info *pi = netdev_priv(dev); 906 const struct adapter *adap = pi->adapter; 907 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq; 908 909 c->rx_coalesce_usecs = qtimer_val(adap, rq); 910 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ? 911 adap->sge.counter_val[rq->pktcnt_idx] : 0; 912 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev); 913 return 0; 914 } 915 916 /** 917 * eeprom_ptov - translate a physical EEPROM address to virtual 918 * @phys_addr: the physical EEPROM address 919 * @fn: the PCI function number 920 * @sz: size of function-specific area 921 * 922 * Translate a physical EEPROM address to virtual. The first 1K is 923 * accessed through virtual addresses starting at 31K, the rest is 924 * accessed through virtual addresses starting at 0. 925 * 926 * The mapping is as follows: 927 * [0..1K) -> [31K..32K) 928 * [1K..1K+A) -> [31K-A..31K) 929 * [1K+A..ES) -> [0..ES-A-1K) 930 * 931 * where A = @fn * @sz, and ES = EEPROM size. 932 */ 933 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) 934 { 935 fn *= sz; 936 if (phys_addr < 1024) 937 return phys_addr + (31 << 10); 938 if (phys_addr < 1024 + fn) 939 return 31744 - fn + phys_addr - 1024; 940 if (phys_addr < EEPROMSIZE) 941 return phys_addr - 1024 - fn; 942 return -EINVAL; 943 } 944 945 /* The next two routines implement eeprom read/write from physical addresses. 946 */ 947 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) 948 { 949 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); 950 951 if (vaddr >= 0) 952 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); 953 return vaddr < 0 ? vaddr : 0; 954 } 955 956 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) 957 { 958 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); 959 960 if (vaddr >= 0) 961 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); 962 return vaddr < 0 ? vaddr : 0; 963 } 964 965 #define EEPROM_MAGIC 0x38E2F10C 966 967 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, 968 u8 *data) 969 { 970 int i, err = 0; 971 struct adapter *adapter = netdev2adap(dev); 972 u8 *buf = t4_alloc_mem(EEPROMSIZE); 973 974 if (!buf) 975 return -ENOMEM; 976 977 e->magic = EEPROM_MAGIC; 978 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4) 979 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]); 980 981 if (!err) 982 memcpy(data, buf + e->offset, e->len); 983 t4_free_mem(buf); 984 return err; 985 } 986 987 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, 988 u8 *data) 989 { 990 u8 *buf; 991 int err = 0; 992 u32 aligned_offset, aligned_len, *p; 993 struct adapter *adapter = netdev2adap(dev); 994 995 if (eeprom->magic != EEPROM_MAGIC) 996 return -EINVAL; 997 998 aligned_offset = eeprom->offset & ~3; 999 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; 1000 1001 if (adapter->pf > 0) { 1002 u32 start = 1024 + adapter->pf * EEPROMPFSIZE; 1003 1004 if (aligned_offset < start || 1005 aligned_offset + aligned_len > start + EEPROMPFSIZE) 1006 return -EPERM; 1007 } 1008 1009 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { 1010 /* RMW possibly needed for first or last words. 1011 */ 1012 buf = t4_alloc_mem(aligned_len); 1013 if (!buf) 1014 return -ENOMEM; 1015 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); 1016 if (!err && aligned_len > 4) 1017 err = eeprom_rd_phys(adapter, 1018 aligned_offset + aligned_len - 4, 1019 (u32 *)&buf[aligned_len - 4]); 1020 if (err) 1021 goto out; 1022 memcpy(buf + (eeprom->offset & 3), data, eeprom->len); 1023 } else { 1024 buf = data; 1025 } 1026 1027 err = t4_seeprom_wp(adapter, false); 1028 if (err) 1029 goto out; 1030 1031 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { 1032 err = eeprom_wr_phys(adapter, aligned_offset, *p); 1033 aligned_offset += 4; 1034 } 1035 1036 if (!err) 1037 err = t4_seeprom_wp(adapter, true); 1038 out: 1039 if (buf != data) 1040 t4_free_mem(buf); 1041 return err; 1042 } 1043 1044 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) 1045 { 1046 int ret; 1047 const struct firmware *fw; 1048 struct adapter *adap = netdev2adap(netdev); 1049 unsigned int mbox = PCIE_FW_MASTER_M + 1; 1050 u32 pcie_fw; 1051 unsigned int master; 1052 u8 master_vld = 0; 1053 1054 pcie_fw = t4_read_reg(adap, PCIE_FW_A); 1055 master = PCIE_FW_MASTER_G(pcie_fw); 1056 if (pcie_fw & PCIE_FW_MASTER_VLD_F) 1057 master_vld = 1; 1058 /* if csiostor is the master return */ 1059 if (master_vld && (master != adap->pf)) { 1060 dev_warn(adap->pdev_dev, 1061 "cxgb4 driver needs to be loaded as MASTER to support FW flash\n"); 1062 return -EOPNOTSUPP; 1063 } 1064 1065 ef->data[sizeof(ef->data) - 1] = '\0'; 1066 ret = request_firmware(&fw, ef->data, adap->pdev_dev); 1067 if (ret < 0) 1068 return ret; 1069 1070 /* If the adapter has been fully initialized then we'll go ahead and 1071 * try to get the firmware's cooperation in upgrading to the new 1072 * firmware image otherwise we'll try to do the entire job from the 1073 * host ... and we always "force" the operation in this path. 1074 */ 1075 if (adap->flags & FULL_INIT_DONE) 1076 mbox = adap->mbox; 1077 1078 ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1); 1079 release_firmware(fw); 1080 if (!ret) 1081 dev_info(adap->pdev_dev, 1082 "loaded firmware %s, reload cxgb4 driver\n", ef->data); 1083 return ret; 1084 } 1085 1086 static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info) 1087 { 1088 ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 1089 SOF_TIMESTAMPING_RX_SOFTWARE | 1090 SOF_TIMESTAMPING_SOFTWARE; 1091 1092 ts_info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE | 1093 SOF_TIMESTAMPING_RAW_HARDWARE; 1094 1095 ts_info->phc_index = -1; 1096 1097 return 0; 1098 } 1099 1100 static u32 get_rss_table_size(struct net_device *dev) 1101 { 1102 const struct port_info *pi = netdev_priv(dev); 1103 1104 return pi->rss_size; 1105 } 1106 1107 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc) 1108 { 1109 const struct port_info *pi = netdev_priv(dev); 1110 unsigned int n = pi->rss_size; 1111 1112 if (hfunc) 1113 *hfunc = ETH_RSS_HASH_TOP; 1114 if (!p) 1115 return 0; 1116 while (n--) 1117 p[n] = pi->rss[n]; 1118 return 0; 1119 } 1120 1121 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key, 1122 const u8 hfunc) 1123 { 1124 unsigned int i; 1125 struct port_info *pi = netdev_priv(dev); 1126 1127 /* We require at least one supported parameter to be changed and no 1128 * change in any of the unsupported parameters 1129 */ 1130 if (key || 1131 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) 1132 return -EOPNOTSUPP; 1133 if (!p) 1134 return 0; 1135 1136 /* Interface must be brought up atleast once */ 1137 if (pi->adapter->flags & FULL_INIT_DONE) { 1138 for (i = 0; i < pi->rss_size; i++) 1139 pi->rss[i] = p[i]; 1140 1141 return cxgb4_write_rss(pi, pi->rss); 1142 } 1143 1144 return -EPERM; 1145 } 1146 1147 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 1148 u32 *rules) 1149 { 1150 const struct port_info *pi = netdev_priv(dev); 1151 1152 switch (info->cmd) { 1153 case ETHTOOL_GRXFH: { 1154 unsigned int v = pi->rss_mode; 1155 1156 info->data = 0; 1157 switch (info->flow_type) { 1158 case TCP_V4_FLOW: 1159 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) 1160 info->data = RXH_IP_SRC | RXH_IP_DST | 1161 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1162 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) 1163 info->data = RXH_IP_SRC | RXH_IP_DST; 1164 break; 1165 case UDP_V4_FLOW: 1166 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) && 1167 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F)) 1168 info->data = RXH_IP_SRC | RXH_IP_DST | 1169 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1170 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) 1171 info->data = RXH_IP_SRC | RXH_IP_DST; 1172 break; 1173 case SCTP_V4_FLOW: 1174 case AH_ESP_V4_FLOW: 1175 case IPV4_FLOW: 1176 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) 1177 info->data = RXH_IP_SRC | RXH_IP_DST; 1178 break; 1179 case TCP_V6_FLOW: 1180 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) 1181 info->data = RXH_IP_SRC | RXH_IP_DST | 1182 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1183 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) 1184 info->data = RXH_IP_SRC | RXH_IP_DST; 1185 break; 1186 case UDP_V6_FLOW: 1187 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) && 1188 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F)) 1189 info->data = RXH_IP_SRC | RXH_IP_DST | 1190 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1191 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) 1192 info->data = RXH_IP_SRC | RXH_IP_DST; 1193 break; 1194 case SCTP_V6_FLOW: 1195 case AH_ESP_V6_FLOW: 1196 case IPV6_FLOW: 1197 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) 1198 info->data = RXH_IP_SRC | RXH_IP_DST; 1199 break; 1200 } 1201 return 0; 1202 } 1203 case ETHTOOL_GRXRINGS: 1204 info->data = pi->nqsets; 1205 return 0; 1206 } 1207 return -EOPNOTSUPP; 1208 } 1209 1210 static const struct ethtool_ops cxgb_ethtool_ops = { 1211 .get_link_ksettings = get_link_ksettings, 1212 .set_link_ksettings = set_link_ksettings, 1213 .get_drvinfo = get_drvinfo, 1214 .get_msglevel = get_msglevel, 1215 .set_msglevel = set_msglevel, 1216 .get_ringparam = get_sge_param, 1217 .set_ringparam = set_sge_param, 1218 .get_coalesce = get_coalesce, 1219 .set_coalesce = set_coalesce, 1220 .get_eeprom_len = get_eeprom_len, 1221 .get_eeprom = get_eeprom, 1222 .set_eeprom = set_eeprom, 1223 .get_pauseparam = get_pauseparam, 1224 .set_pauseparam = set_pauseparam, 1225 .get_link = ethtool_op_get_link, 1226 .get_strings = get_strings, 1227 .set_phys_id = identify_port, 1228 .nway_reset = restart_autoneg, 1229 .get_sset_count = get_sset_count, 1230 .get_ethtool_stats = get_stats, 1231 .get_regs_len = get_regs_len, 1232 .get_regs = get_regs, 1233 .get_rxnfc = get_rxnfc, 1234 .get_rxfh_indir_size = get_rss_table_size, 1235 .get_rxfh = get_rss_table, 1236 .set_rxfh = set_rss_table, 1237 .flash_device = set_flash, 1238 .get_ts_info = get_ts_info 1239 }; 1240 1241 void cxgb4_set_ethtool_ops(struct net_device *netdev) 1242 { 1243 netdev->ethtool_ops = &cxgb_ethtool_ops; 1244 } 1245