1 /* 2 * Copyright (C) 2013-2015 Chelsio Communications. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * The full GNU General Public License is included in this distribution in 14 * the file called "COPYING". 15 * 16 */ 17 18 #include <linux/firmware.h> 19 #include <linux/mdio.h> 20 21 #include "cxgb4.h" 22 #include "t4_regs.h" 23 #include "t4fw_api.h" 24 25 #define EEPROM_MAGIC 0x38E2F10C 26 27 static u32 get_msglevel(struct net_device *dev) 28 { 29 return netdev2adap(dev)->msg_enable; 30 } 31 32 static void set_msglevel(struct net_device *dev, u32 val) 33 { 34 netdev2adap(dev)->msg_enable = val; 35 } 36 37 static const char stats_strings[][ETH_GSTRING_LEN] = { 38 "tx_octets_ok ", 39 "tx_frames_ok ", 40 "tx_broadcast_frames ", 41 "tx_multicast_frames ", 42 "tx_unicast_frames ", 43 "tx_error_frames ", 44 45 "tx_frames_64 ", 46 "tx_frames_65_to_127 ", 47 "tx_frames_128_to_255 ", 48 "tx_frames_256_to_511 ", 49 "tx_frames_512_to_1023 ", 50 "tx_frames_1024_to_1518 ", 51 "tx_frames_1519_to_max ", 52 53 "tx_frames_dropped ", 54 "tx_pause_frames ", 55 "tx_ppp0_frames ", 56 "tx_ppp1_frames ", 57 "tx_ppp2_frames ", 58 "tx_ppp3_frames ", 59 "tx_ppp4_frames ", 60 "tx_ppp5_frames ", 61 "tx_ppp6_frames ", 62 "tx_ppp7_frames ", 63 64 "rx_octets_ok ", 65 "rx_frames_ok ", 66 "rx_broadcast_frames ", 67 "rx_multicast_frames ", 68 "rx_unicast_frames ", 69 70 "rx_frames_too_long ", 71 "rx_jabber_errors ", 72 "rx_fcs_errors ", 73 "rx_length_errors ", 74 "rx_symbol_errors ", 75 "rx_runt_frames ", 76 77 "rx_frames_64 ", 78 "rx_frames_65_to_127 ", 79 "rx_frames_128_to_255 ", 80 "rx_frames_256_to_511 ", 81 "rx_frames_512_to_1023 ", 82 "rx_frames_1024_to_1518 ", 83 "rx_frames_1519_to_max ", 84 85 "rx_pause_frames ", 86 "rx_ppp0_frames ", 87 "rx_ppp1_frames ", 88 "rx_ppp2_frames ", 89 "rx_ppp3_frames ", 90 "rx_ppp4_frames ", 91 "rx_ppp5_frames ", 92 "rx_ppp6_frames ", 93 "rx_ppp7_frames ", 94 95 "rx_bg0_frames_dropped ", 96 "rx_bg1_frames_dropped ", 97 "rx_bg2_frames_dropped ", 98 "rx_bg3_frames_dropped ", 99 "rx_bg0_frames_trunc ", 100 "rx_bg1_frames_trunc ", 101 "rx_bg2_frames_trunc ", 102 "rx_bg3_frames_trunc ", 103 104 "tso ", 105 "tx_csum_offload ", 106 "rx_csum_good ", 107 "vlan_extractions ", 108 "vlan_insertions ", 109 "gro_packets ", 110 "gro_merged ", 111 }; 112 113 static char adapter_stats_strings[][ETH_GSTRING_LEN] = { 114 "db_drop ", 115 "db_full ", 116 "db_empty ", 117 "tcp_ipv4_out_rsts ", 118 "tcp_ipv4_in_segs ", 119 "tcp_ipv4_out_segs ", 120 "tcp_ipv4_retrans_segs ", 121 "tcp_ipv6_out_rsts ", 122 "tcp_ipv6_in_segs ", 123 "tcp_ipv6_out_segs ", 124 "tcp_ipv6_retrans_segs ", 125 "usm_ddp_frames ", 126 "usm_ddp_octets ", 127 "usm_ddp_drops ", 128 "rdma_no_rqe_mod_defer ", 129 "rdma_no_rqe_pkt_defer ", 130 "tp_err_ofld_no_neigh ", 131 "tp_err_ofld_cong_defer ", 132 "write_coal_success ", 133 "write_coal_fail ", 134 }; 135 136 static char channel_stats_strings[][ETH_GSTRING_LEN] = { 137 "--------Channel--------- ", 138 "tp_cpl_requests ", 139 "tp_cpl_responses ", 140 "tp_mac_in_errs ", 141 "tp_hdr_in_errs ", 142 "tp_tcp_in_errs ", 143 "tp_tcp6_in_errs ", 144 "tp_tnl_cong_drops ", 145 "tp_tnl_tx_drops ", 146 "tp_ofld_vlan_drops ", 147 "tp_ofld_chan_drops ", 148 "fcoe_octets_ddp ", 149 "fcoe_frames_ddp ", 150 "fcoe_frames_drop ", 151 }; 152 153 static char loopback_stats_strings[][ETH_GSTRING_LEN] = { 154 "-------Loopback----------- ", 155 "octets_ok ", 156 "frames_ok ", 157 "bcast_frames ", 158 "mcast_frames ", 159 "ucast_frames ", 160 "error_frames ", 161 "frames_64 ", 162 "frames_65_to_127 ", 163 "frames_128_to_255 ", 164 "frames_256_to_511 ", 165 "frames_512_to_1023 ", 166 "frames_1024_to_1518 ", 167 "frames_1519_to_max ", 168 "frames_dropped ", 169 "bg0_frames_dropped ", 170 "bg1_frames_dropped ", 171 "bg2_frames_dropped ", 172 "bg3_frames_dropped ", 173 "bg0_frames_trunc ", 174 "bg1_frames_trunc ", 175 "bg2_frames_trunc ", 176 "bg3_frames_trunc ", 177 }; 178 179 static int get_sset_count(struct net_device *dev, int sset) 180 { 181 switch (sset) { 182 case ETH_SS_STATS: 183 return ARRAY_SIZE(stats_strings) + 184 ARRAY_SIZE(adapter_stats_strings) + 185 ARRAY_SIZE(channel_stats_strings) + 186 ARRAY_SIZE(loopback_stats_strings); 187 default: 188 return -EOPNOTSUPP; 189 } 190 } 191 192 static int get_regs_len(struct net_device *dev) 193 { 194 struct adapter *adap = netdev2adap(dev); 195 196 return t4_get_regs_len(adap); 197 } 198 199 static int get_eeprom_len(struct net_device *dev) 200 { 201 return EEPROMSIZE; 202 } 203 204 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 205 { 206 struct adapter *adapter = netdev2adap(dev); 207 u32 exprom_vers; 208 209 strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver)); 210 strlcpy(info->version, cxgb4_driver_version, 211 sizeof(info->version)); 212 strlcpy(info->bus_info, pci_name(adapter->pdev), 213 sizeof(info->bus_info)); 214 info->regdump_len = get_regs_len(dev); 215 216 if (!adapter->params.fw_vers) 217 strcpy(info->fw_version, "N/A"); 218 else 219 snprintf(info->fw_version, sizeof(info->fw_version), 220 "%u.%u.%u.%u, TP %u.%u.%u.%u", 221 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers), 222 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers), 223 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers), 224 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers), 225 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers), 226 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers), 227 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers), 228 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers)); 229 230 if (!t4_get_exprom_version(adapter, &exprom_vers)) 231 snprintf(info->erom_version, sizeof(info->erom_version), 232 "%u.%u.%u.%u", 233 FW_HDR_FW_VER_MAJOR_G(exprom_vers), 234 FW_HDR_FW_VER_MINOR_G(exprom_vers), 235 FW_HDR_FW_VER_MICRO_G(exprom_vers), 236 FW_HDR_FW_VER_BUILD_G(exprom_vers)); 237 } 238 239 static void get_strings(struct net_device *dev, u32 stringset, u8 *data) 240 { 241 if (stringset == ETH_SS_STATS) { 242 memcpy(data, stats_strings, sizeof(stats_strings)); 243 data += sizeof(stats_strings); 244 memcpy(data, adapter_stats_strings, 245 sizeof(adapter_stats_strings)); 246 data += sizeof(adapter_stats_strings); 247 memcpy(data, channel_stats_strings, 248 sizeof(channel_stats_strings)); 249 data += sizeof(channel_stats_strings); 250 memcpy(data, loopback_stats_strings, 251 sizeof(loopback_stats_strings)); 252 } 253 } 254 255 /* port stats maintained per queue of the port. They should be in the same 256 * order as in stats_strings above. 257 */ 258 struct queue_port_stats { 259 u64 tso; 260 u64 tx_csum; 261 u64 rx_csum; 262 u64 vlan_ex; 263 u64 vlan_ins; 264 u64 gro_pkts; 265 u64 gro_merged; 266 }; 267 268 struct adapter_stats { 269 u64 db_drop; 270 u64 db_full; 271 u64 db_empty; 272 u64 tcp_v4_out_rsts; 273 u64 tcp_v4_in_segs; 274 u64 tcp_v4_out_segs; 275 u64 tcp_v4_retrans_segs; 276 u64 tcp_v6_out_rsts; 277 u64 tcp_v6_in_segs; 278 u64 tcp_v6_out_segs; 279 u64 tcp_v6_retrans_segs; 280 u64 frames; 281 u64 octets; 282 u64 drops; 283 u64 rqe_dfr_mod; 284 u64 rqe_dfr_pkt; 285 u64 ofld_no_neigh; 286 u64 ofld_cong_defer; 287 u64 wc_success; 288 u64 wc_fail; 289 }; 290 291 struct channel_stats { 292 u64 cpl_req; 293 u64 cpl_rsp; 294 u64 mac_in_errs; 295 u64 hdr_in_errs; 296 u64 tcp_in_errs; 297 u64 tcp6_in_errs; 298 u64 tnl_cong_drops; 299 u64 tnl_tx_drops; 300 u64 ofld_vlan_drops; 301 u64 ofld_chan_drops; 302 u64 octets_ddp; 303 u64 frames_ddp; 304 u64 frames_drop; 305 }; 306 307 static void collect_sge_port_stats(const struct adapter *adap, 308 const struct port_info *p, 309 struct queue_port_stats *s) 310 { 311 int i; 312 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset]; 313 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset]; 314 315 memset(s, 0, sizeof(*s)); 316 for (i = 0; i < p->nqsets; i++, rx++, tx++) { 317 s->tso += tx->tso; 318 s->tx_csum += tx->tx_cso; 319 s->rx_csum += rx->stats.rx_cso; 320 s->vlan_ex += rx->stats.vlan_ex; 321 s->vlan_ins += tx->vlan_ins; 322 s->gro_pkts += rx->stats.lro_pkts; 323 s->gro_merged += rx->stats.lro_merged; 324 } 325 } 326 327 static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s) 328 { 329 struct tp_tcp_stats v4, v6; 330 struct tp_rdma_stats rdma_stats; 331 struct tp_err_stats err_stats; 332 struct tp_usm_stats usm_stats; 333 u64 val1, val2; 334 335 memset(s, 0, sizeof(*s)); 336 337 spin_lock(&adap->stats_lock); 338 t4_tp_get_tcp_stats(adap, &v4, &v6); 339 t4_tp_get_rdma_stats(adap, &rdma_stats); 340 t4_get_usm_stats(adap, &usm_stats); 341 t4_tp_get_err_stats(adap, &err_stats); 342 spin_unlock(&adap->stats_lock); 343 344 s->db_drop = adap->db_stats.db_drop; 345 s->db_full = adap->db_stats.db_full; 346 s->db_empty = adap->db_stats.db_empty; 347 348 s->tcp_v4_out_rsts = v4.tcp_out_rsts; 349 s->tcp_v4_in_segs = v4.tcp_in_segs; 350 s->tcp_v4_out_segs = v4.tcp_out_segs; 351 s->tcp_v4_retrans_segs = v4.tcp_retrans_segs; 352 s->tcp_v6_out_rsts = v6.tcp_out_rsts; 353 s->tcp_v6_in_segs = v6.tcp_in_segs; 354 s->tcp_v6_out_segs = v6.tcp_out_segs; 355 s->tcp_v6_retrans_segs = v6.tcp_retrans_segs; 356 357 if (is_offload(adap)) { 358 s->frames = usm_stats.frames; 359 s->octets = usm_stats.octets; 360 s->drops = usm_stats.drops; 361 s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod; 362 s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt; 363 } 364 365 s->ofld_no_neigh = err_stats.ofld_no_neigh; 366 s->ofld_cong_defer = err_stats.ofld_cong_defer; 367 368 if (!is_t4(adap->params.chip)) { 369 int v; 370 371 v = t4_read_reg(adap, SGE_STAT_CFG_A); 372 if (STATSOURCE_T5_G(v) == 7) { 373 val2 = t4_read_reg(adap, SGE_STAT_MATCH_A); 374 val1 = t4_read_reg(adap, SGE_STAT_TOTAL_A); 375 s->wc_success = val1 - val2; 376 s->wc_fail = val2; 377 } 378 } 379 } 380 381 static void collect_channel_stats(struct adapter *adap, struct channel_stats *s, 382 u8 i) 383 { 384 struct tp_cpl_stats cpl_stats; 385 struct tp_err_stats err_stats; 386 struct tp_fcoe_stats fcoe_stats; 387 388 memset(s, 0, sizeof(*s)); 389 390 spin_lock(&adap->stats_lock); 391 t4_tp_get_cpl_stats(adap, &cpl_stats); 392 t4_tp_get_err_stats(adap, &err_stats); 393 t4_get_fcoe_stats(adap, i, &fcoe_stats); 394 spin_unlock(&adap->stats_lock); 395 396 s->cpl_req = cpl_stats.req[i]; 397 s->cpl_rsp = cpl_stats.rsp[i]; 398 s->mac_in_errs = err_stats.mac_in_errs[i]; 399 s->hdr_in_errs = err_stats.hdr_in_errs[i]; 400 s->tcp_in_errs = err_stats.tcp_in_errs[i]; 401 s->tcp6_in_errs = err_stats.tcp6_in_errs[i]; 402 s->tnl_cong_drops = err_stats.tnl_cong_drops[i]; 403 s->tnl_tx_drops = err_stats.tnl_tx_drops[i]; 404 s->ofld_vlan_drops = err_stats.ofld_vlan_drops[i]; 405 s->ofld_chan_drops = err_stats.ofld_chan_drops[i]; 406 s->octets_ddp = fcoe_stats.octets_ddp; 407 s->frames_ddp = fcoe_stats.frames_ddp; 408 s->frames_drop = fcoe_stats.frames_drop; 409 } 410 411 static void get_stats(struct net_device *dev, struct ethtool_stats *stats, 412 u64 *data) 413 { 414 struct port_info *pi = netdev_priv(dev); 415 struct adapter *adapter = pi->adapter; 416 struct lb_port_stats s; 417 int i; 418 u64 *p0; 419 420 t4_get_port_stats_offset(adapter, pi->tx_chan, 421 (struct port_stats *)data, 422 &pi->stats_base); 423 424 data += sizeof(struct port_stats) / sizeof(u64); 425 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); 426 data += sizeof(struct queue_port_stats) / sizeof(u64); 427 collect_adapter_stats(adapter, (struct adapter_stats *)data); 428 data += sizeof(struct adapter_stats) / sizeof(u64); 429 430 *data++ = (u64)pi->port_id; 431 collect_channel_stats(adapter, (struct channel_stats *)data, 432 pi->port_id); 433 data += sizeof(struct channel_stats) / sizeof(u64); 434 435 *data++ = (u64)pi->port_id; 436 memset(&s, 0, sizeof(s)); 437 t4_get_lb_stats(adapter, pi->port_id, &s); 438 439 p0 = &s.octets; 440 for (i = 0; i < ARRAY_SIZE(loopback_stats_strings) - 1; i++) 441 *data++ = (unsigned long long)*p0++; 442 } 443 444 static void get_regs(struct net_device *dev, struct ethtool_regs *regs, 445 void *buf) 446 { 447 struct adapter *adap = netdev2adap(dev); 448 size_t buf_size; 449 450 buf_size = t4_get_regs_len(adap); 451 regs->version = mk_adap_vers(adap); 452 t4_get_regs(adap, buf, buf_size); 453 } 454 455 static int restart_autoneg(struct net_device *dev) 456 { 457 struct port_info *p = netdev_priv(dev); 458 459 if (!netif_running(dev)) 460 return -EAGAIN; 461 if (p->link_cfg.autoneg != AUTONEG_ENABLE) 462 return -EINVAL; 463 t4_restart_aneg(p->adapter, p->adapter->pf, p->tx_chan); 464 return 0; 465 } 466 467 static int identify_port(struct net_device *dev, 468 enum ethtool_phys_id_state state) 469 { 470 unsigned int val; 471 struct adapter *adap = netdev2adap(dev); 472 473 if (state == ETHTOOL_ID_ACTIVE) 474 val = 0xffff; 475 else if (state == ETHTOOL_ID_INACTIVE) 476 val = 0; 477 else 478 return -EINVAL; 479 480 return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val); 481 } 482 483 /** 484 * from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool 485 * @port_type: Firmware Port Type 486 * @mod_type: Firmware Module Type 487 * 488 * Translate Firmware Port/Module type to Ethtool Port Type. 489 */ 490 static int from_fw_port_mod_type(enum fw_port_type port_type, 491 enum fw_port_module_type mod_type) 492 { 493 if (port_type == FW_PORT_TYPE_BT_SGMII || 494 port_type == FW_PORT_TYPE_BT_XFI || 495 port_type == FW_PORT_TYPE_BT_XAUI) { 496 return PORT_TP; 497 } else if (port_type == FW_PORT_TYPE_FIBER_XFI || 498 port_type == FW_PORT_TYPE_FIBER_XAUI) { 499 return PORT_FIBRE; 500 } else if (port_type == FW_PORT_TYPE_SFP || 501 port_type == FW_PORT_TYPE_QSFP_10G || 502 port_type == FW_PORT_TYPE_QSA || 503 port_type == FW_PORT_TYPE_QSFP || 504 port_type == FW_PORT_TYPE_CR4_QSFP || 505 port_type == FW_PORT_TYPE_CR_QSFP || 506 port_type == FW_PORT_TYPE_CR2_QSFP || 507 port_type == FW_PORT_TYPE_SFP28) { 508 if (mod_type == FW_PORT_MOD_TYPE_LR || 509 mod_type == FW_PORT_MOD_TYPE_SR || 510 mod_type == FW_PORT_MOD_TYPE_ER || 511 mod_type == FW_PORT_MOD_TYPE_LRM) 512 return PORT_FIBRE; 513 else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || 514 mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) 515 return PORT_DA; 516 else 517 return PORT_OTHER; 518 } else if (port_type == FW_PORT_TYPE_KR4_100G || 519 port_type == FW_PORT_TYPE_KR_SFP28) { 520 return PORT_NONE; 521 } 522 523 return PORT_OTHER; 524 } 525 526 /** 527 * speed_to_fw_caps - translate Port Speed to Firmware Port Capabilities 528 * @speed: speed in Kb/s 529 * 530 * Translates a specific Port Speed into a Firmware Port Capabilities 531 * value. 532 */ 533 static unsigned int speed_to_fw_caps(int speed) 534 { 535 if (speed == 100) 536 return FW_PORT_CAP_SPEED_100M; 537 if (speed == 1000) 538 return FW_PORT_CAP_SPEED_1G; 539 if (speed == 10000) 540 return FW_PORT_CAP_SPEED_10G; 541 if (speed == 25000) 542 return FW_PORT_CAP_SPEED_25G; 543 if (speed == 40000) 544 return FW_PORT_CAP_SPEED_40G; 545 if (speed == 100000) 546 return FW_PORT_CAP_SPEED_100G; 547 return 0; 548 } 549 550 /** 551 * fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask 552 * @port_type: Firmware Port Type 553 * @fw_caps: Firmware Port Capabilities 554 * @link_mode_mask: ethtool Link Mode Mask 555 * 556 * Translate a Firmware Port Capabilities specification to an ethtool 557 * Link Mode Mask. 558 */ 559 static void fw_caps_to_lmm(enum fw_port_type port_type, 560 unsigned int fw_caps, 561 unsigned long *link_mode_mask) 562 { 563 #define SET_LMM(__lmm_name) __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name \ 564 ## _BIT, link_mode_mask) 565 566 #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \ 567 do { \ 568 if (fw_caps & FW_PORT_CAP_ ## __fw_name) \ 569 SET_LMM(__lmm_name); \ 570 } while (0) 571 572 switch (port_type) { 573 case FW_PORT_TYPE_BT_SGMII: 574 case FW_PORT_TYPE_BT_XFI: 575 case FW_PORT_TYPE_BT_XAUI: 576 SET_LMM(TP); 577 FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full); 578 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); 579 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full); 580 break; 581 582 case FW_PORT_TYPE_KX4: 583 case FW_PORT_TYPE_KX: 584 SET_LMM(Backplane); 585 FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full); 586 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full); 587 break; 588 589 case FW_PORT_TYPE_KR: 590 SET_LMM(Backplane); 591 SET_LMM(10000baseKR_Full); 592 break; 593 594 case FW_PORT_TYPE_BP_AP: 595 SET_LMM(Backplane); 596 SET_LMM(10000baseR_FEC); 597 SET_LMM(10000baseKR_Full); 598 SET_LMM(1000baseKX_Full); 599 break; 600 601 case FW_PORT_TYPE_BP4_AP: 602 SET_LMM(Backplane); 603 SET_LMM(10000baseR_FEC); 604 SET_LMM(10000baseKR_Full); 605 SET_LMM(1000baseKX_Full); 606 SET_LMM(10000baseKX4_Full); 607 break; 608 609 case FW_PORT_TYPE_FIBER_XFI: 610 case FW_PORT_TYPE_FIBER_XAUI: 611 case FW_PORT_TYPE_SFP: 612 case FW_PORT_TYPE_QSFP_10G: 613 case FW_PORT_TYPE_QSA: 614 SET_LMM(FIBRE); 615 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); 616 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full); 617 break; 618 619 case FW_PORT_TYPE_BP40_BA: 620 case FW_PORT_TYPE_QSFP: 621 SET_LMM(FIBRE); 622 SET_LMM(40000baseSR4_Full); 623 break; 624 625 case FW_PORT_TYPE_CR_QSFP: 626 case FW_PORT_TYPE_SFP28: 627 SET_LMM(FIBRE); 628 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); 629 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full); 630 FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full); 631 break; 632 633 case FW_PORT_TYPE_KR_SFP28: 634 SET_LMM(Backplane); 635 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); 636 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full); 637 FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full); 638 break; 639 640 case FW_PORT_TYPE_CR2_QSFP: 641 SET_LMM(FIBRE); 642 SET_LMM(50000baseSR2_Full); 643 break; 644 645 case FW_PORT_TYPE_KR4_100G: 646 case FW_PORT_TYPE_CR4_QSFP: 647 SET_LMM(FIBRE); 648 SET_LMM(100000baseCR4_Full); 649 break; 650 651 default: 652 break; 653 } 654 655 FW_CAPS_TO_LMM(ANEG, Autoneg); 656 FW_CAPS_TO_LMM(802_3_PAUSE, Pause); 657 FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause); 658 659 #undef FW_CAPS_TO_LMM 660 #undef SET_LMM 661 } 662 663 /** 664 * lmm_to_fw_caps - translate ethtool Link Mode Mask to Firmware 665 * capabilities 666 * 667 * @link_mode_mask: ethtool Link Mode Mask 668 * 669 * Translate ethtool Link Mode Mask into a Firmware Port capabilities 670 * value. 671 */ 672 static unsigned int lmm_to_fw_caps(const unsigned long *link_mode_mask) 673 { 674 unsigned int fw_caps = 0; 675 676 #define LMM_TO_FW_CAPS(__lmm_name, __fw_name) \ 677 do { \ 678 if (test_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \ 679 link_mode_mask)) \ 680 fw_caps |= FW_PORT_CAP_ ## __fw_name; \ 681 } while (0) 682 683 LMM_TO_FW_CAPS(100baseT_Full, SPEED_100M); 684 LMM_TO_FW_CAPS(1000baseT_Full, SPEED_1G); 685 LMM_TO_FW_CAPS(10000baseT_Full, SPEED_10G); 686 LMM_TO_FW_CAPS(40000baseSR4_Full, SPEED_40G); 687 LMM_TO_FW_CAPS(25000baseCR_Full, SPEED_25G); 688 LMM_TO_FW_CAPS(100000baseCR4_Full, SPEED_100G); 689 690 #undef LMM_TO_FW_CAPS 691 692 return fw_caps; 693 } 694 695 static int get_link_ksettings(struct net_device *dev, 696 struct ethtool_link_ksettings *link_ksettings) 697 { 698 struct port_info *pi = netdev_priv(dev); 699 struct ethtool_link_settings *base = &link_ksettings->base; 700 701 ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); 702 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); 703 ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising); 704 705 /* For the nonce, the Firmware doesn't send up Port State changes 706 * when the Virtual Interface attached to the Port is down. So 707 * if it's down, let's grab any changes. 708 */ 709 if (!netif_running(dev)) 710 (void)t4_update_port_info(pi); 711 712 base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type); 713 714 if (pi->mdio_addr >= 0) { 715 base->phy_address = pi->mdio_addr; 716 base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII 717 ? ETH_MDIO_SUPPORTS_C22 718 : ETH_MDIO_SUPPORTS_C45); 719 } else { 720 base->phy_address = 255; 721 base->mdio_support = 0; 722 } 723 724 fw_caps_to_lmm(pi->port_type, pi->link_cfg.supported, 725 link_ksettings->link_modes.supported); 726 fw_caps_to_lmm(pi->port_type, pi->link_cfg.advertising, 727 link_ksettings->link_modes.advertising); 728 fw_caps_to_lmm(pi->port_type, pi->link_cfg.lp_advertising, 729 link_ksettings->link_modes.lp_advertising); 730 731 if (netif_carrier_ok(dev)) { 732 base->speed = pi->link_cfg.speed; 733 base->duplex = DUPLEX_FULL; 734 } else { 735 base->speed = SPEED_UNKNOWN; 736 base->duplex = DUPLEX_UNKNOWN; 737 } 738 739 base->autoneg = pi->link_cfg.autoneg; 740 if (pi->link_cfg.supported & FW_PORT_CAP_ANEG) 741 ethtool_link_ksettings_add_link_mode(link_ksettings, 742 supported, Autoneg); 743 if (pi->link_cfg.autoneg) 744 ethtool_link_ksettings_add_link_mode(link_ksettings, 745 advertising, Autoneg); 746 747 return 0; 748 } 749 750 static int set_link_ksettings(struct net_device *dev, 751 const struct ethtool_link_ksettings 752 *link_ksettings) 753 { 754 struct port_info *pi = netdev_priv(dev); 755 struct link_config *lc = &pi->link_cfg; 756 const struct ethtool_link_settings *base = &link_ksettings->base; 757 struct link_config old_lc; 758 unsigned int fw_caps; 759 int ret = 0; 760 761 /* only full-duplex supported */ 762 if (base->duplex != DUPLEX_FULL) 763 return -EINVAL; 764 765 if (!(lc->supported & FW_PORT_CAP_ANEG)) { 766 /* PHY offers a single speed. See if that's what's 767 * being requested. 768 */ 769 if (base->autoneg == AUTONEG_DISABLE && 770 (lc->supported & speed_to_fw_caps(base->speed))) 771 return 0; 772 return -EINVAL; 773 } 774 775 old_lc = *lc; 776 if (base->autoneg == AUTONEG_DISABLE) { 777 fw_caps = speed_to_fw_caps(base->speed); 778 779 if (!(lc->supported & fw_caps)) 780 return -EINVAL; 781 lc->requested_speed = fw_caps; 782 lc->advertising = 0; 783 } else { 784 fw_caps = 785 lmm_to_fw_caps(link_ksettings->link_modes.advertising); 786 787 if (!(lc->supported & fw_caps)) 788 return -EINVAL; 789 lc->requested_speed = 0; 790 lc->advertising = fw_caps | FW_PORT_CAP_ANEG; 791 } 792 lc->autoneg = base->autoneg; 793 794 /* If the firmware rejects the Link Configuration request, back out 795 * the changes and report the error. 796 */ 797 ret = t4_link_l1cfg(pi->adapter, pi->adapter->mbox, pi->tx_chan, lc); 798 if (ret) 799 *lc = old_lc; 800 801 return ret; 802 } 803 804 static void get_pauseparam(struct net_device *dev, 805 struct ethtool_pauseparam *epause) 806 { 807 struct port_info *p = netdev_priv(dev); 808 809 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; 810 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0; 811 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0; 812 } 813 814 static int set_pauseparam(struct net_device *dev, 815 struct ethtool_pauseparam *epause) 816 { 817 struct port_info *p = netdev_priv(dev); 818 struct link_config *lc = &p->link_cfg; 819 820 if (epause->autoneg == AUTONEG_DISABLE) 821 lc->requested_fc = 0; 822 else if (lc->supported & FW_PORT_CAP_ANEG) 823 lc->requested_fc = PAUSE_AUTONEG; 824 else 825 return -EINVAL; 826 827 if (epause->rx_pause) 828 lc->requested_fc |= PAUSE_RX; 829 if (epause->tx_pause) 830 lc->requested_fc |= PAUSE_TX; 831 if (netif_running(dev)) 832 return t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan, 833 lc); 834 return 0; 835 } 836 837 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) 838 { 839 const struct port_info *pi = netdev_priv(dev); 840 const struct sge *s = &pi->adapter->sge; 841 842 e->rx_max_pending = MAX_RX_BUFFERS; 843 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES; 844 e->rx_jumbo_max_pending = 0; 845 e->tx_max_pending = MAX_TXQ_ENTRIES; 846 847 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8; 848 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; 849 e->rx_jumbo_pending = 0; 850 e->tx_pending = s->ethtxq[pi->first_qset].q.size; 851 } 852 853 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) 854 { 855 int i; 856 const struct port_info *pi = netdev_priv(dev); 857 struct adapter *adapter = pi->adapter; 858 struct sge *s = &adapter->sge; 859 860 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending || 861 e->tx_pending > MAX_TXQ_ENTRIES || 862 e->rx_mini_pending > MAX_RSPQ_ENTRIES || 863 e->rx_mini_pending < MIN_RSPQ_ENTRIES || 864 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES) 865 return -EINVAL; 866 867 if (adapter->flags & FULL_INIT_DONE) 868 return -EBUSY; 869 870 for (i = 0; i < pi->nqsets; ++i) { 871 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending; 872 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8; 873 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending; 874 } 875 return 0; 876 } 877 878 /** 879 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete! 880 * @dev: the network device 881 * @us: the hold-off time in us, or 0 to disable timer 882 * @cnt: the hold-off packet count, or 0 to disable counter 883 * 884 * Set the RX interrupt hold-off parameters for a network device. 885 */ 886 static int set_rx_intr_params(struct net_device *dev, 887 unsigned int us, unsigned int cnt) 888 { 889 int i, err; 890 struct port_info *pi = netdev_priv(dev); 891 struct adapter *adap = pi->adapter; 892 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; 893 894 for (i = 0; i < pi->nqsets; i++, q++) { 895 err = cxgb4_set_rspq_intr_params(&q->rspq, us, cnt); 896 if (err) 897 return err; 898 } 899 return 0; 900 } 901 902 static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx) 903 { 904 int i; 905 struct port_info *pi = netdev_priv(dev); 906 struct adapter *adap = pi->adapter; 907 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; 908 909 for (i = 0; i < pi->nqsets; i++, q++) 910 q->rspq.adaptive_rx = adaptive_rx; 911 912 return 0; 913 } 914 915 static int get_adaptive_rx_setting(struct net_device *dev) 916 { 917 struct port_info *pi = netdev_priv(dev); 918 struct adapter *adap = pi->adapter; 919 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; 920 921 return q->rspq.adaptive_rx; 922 } 923 924 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 925 { 926 set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce); 927 return set_rx_intr_params(dev, c->rx_coalesce_usecs, 928 c->rx_max_coalesced_frames); 929 } 930 931 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 932 { 933 const struct port_info *pi = netdev_priv(dev); 934 const struct adapter *adap = pi->adapter; 935 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq; 936 937 c->rx_coalesce_usecs = qtimer_val(adap, rq); 938 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ? 939 adap->sge.counter_val[rq->pktcnt_idx] : 0; 940 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev); 941 return 0; 942 } 943 944 /** 945 * eeprom_ptov - translate a physical EEPROM address to virtual 946 * @phys_addr: the physical EEPROM address 947 * @fn: the PCI function number 948 * @sz: size of function-specific area 949 * 950 * Translate a physical EEPROM address to virtual. The first 1K is 951 * accessed through virtual addresses starting at 31K, the rest is 952 * accessed through virtual addresses starting at 0. 953 * 954 * The mapping is as follows: 955 * [0..1K) -> [31K..32K) 956 * [1K..1K+A) -> [31K-A..31K) 957 * [1K+A..ES) -> [0..ES-A-1K) 958 * 959 * where A = @fn * @sz, and ES = EEPROM size. 960 */ 961 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) 962 { 963 fn *= sz; 964 if (phys_addr < 1024) 965 return phys_addr + (31 << 10); 966 if (phys_addr < 1024 + fn) 967 return 31744 - fn + phys_addr - 1024; 968 if (phys_addr < EEPROMSIZE) 969 return phys_addr - 1024 - fn; 970 return -EINVAL; 971 } 972 973 /* The next two routines implement eeprom read/write from physical addresses. 974 */ 975 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) 976 { 977 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); 978 979 if (vaddr >= 0) 980 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); 981 return vaddr < 0 ? vaddr : 0; 982 } 983 984 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) 985 { 986 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); 987 988 if (vaddr >= 0) 989 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); 990 return vaddr < 0 ? vaddr : 0; 991 } 992 993 #define EEPROM_MAGIC 0x38E2F10C 994 995 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, 996 u8 *data) 997 { 998 int i, err = 0; 999 struct adapter *adapter = netdev2adap(dev); 1000 u8 *buf = kvzalloc(EEPROMSIZE, GFP_KERNEL); 1001 1002 if (!buf) 1003 return -ENOMEM; 1004 1005 e->magic = EEPROM_MAGIC; 1006 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4) 1007 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]); 1008 1009 if (!err) 1010 memcpy(data, buf + e->offset, e->len); 1011 kvfree(buf); 1012 return err; 1013 } 1014 1015 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, 1016 u8 *data) 1017 { 1018 u8 *buf; 1019 int err = 0; 1020 u32 aligned_offset, aligned_len, *p; 1021 struct adapter *adapter = netdev2adap(dev); 1022 1023 if (eeprom->magic != EEPROM_MAGIC) 1024 return -EINVAL; 1025 1026 aligned_offset = eeprom->offset & ~3; 1027 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; 1028 1029 if (adapter->pf > 0) { 1030 u32 start = 1024 + adapter->pf * EEPROMPFSIZE; 1031 1032 if (aligned_offset < start || 1033 aligned_offset + aligned_len > start + EEPROMPFSIZE) 1034 return -EPERM; 1035 } 1036 1037 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { 1038 /* RMW possibly needed for first or last words. 1039 */ 1040 buf = kvzalloc(aligned_len, GFP_KERNEL); 1041 if (!buf) 1042 return -ENOMEM; 1043 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); 1044 if (!err && aligned_len > 4) 1045 err = eeprom_rd_phys(adapter, 1046 aligned_offset + aligned_len - 4, 1047 (u32 *)&buf[aligned_len - 4]); 1048 if (err) 1049 goto out; 1050 memcpy(buf + (eeprom->offset & 3), data, eeprom->len); 1051 } else { 1052 buf = data; 1053 } 1054 1055 err = t4_seeprom_wp(adapter, false); 1056 if (err) 1057 goto out; 1058 1059 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { 1060 err = eeprom_wr_phys(adapter, aligned_offset, *p); 1061 aligned_offset += 4; 1062 } 1063 1064 if (!err) 1065 err = t4_seeprom_wp(adapter, true); 1066 out: 1067 if (buf != data) 1068 kvfree(buf); 1069 return err; 1070 } 1071 1072 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) 1073 { 1074 int ret; 1075 const struct firmware *fw; 1076 struct adapter *adap = netdev2adap(netdev); 1077 unsigned int mbox = PCIE_FW_MASTER_M + 1; 1078 u32 pcie_fw; 1079 unsigned int master; 1080 u8 master_vld = 0; 1081 1082 pcie_fw = t4_read_reg(adap, PCIE_FW_A); 1083 master = PCIE_FW_MASTER_G(pcie_fw); 1084 if (pcie_fw & PCIE_FW_MASTER_VLD_F) 1085 master_vld = 1; 1086 /* if csiostor is the master return */ 1087 if (master_vld && (master != adap->pf)) { 1088 dev_warn(adap->pdev_dev, 1089 "cxgb4 driver needs to be loaded as MASTER to support FW flash\n"); 1090 return -EOPNOTSUPP; 1091 } 1092 1093 ef->data[sizeof(ef->data) - 1] = '\0'; 1094 ret = request_firmware(&fw, ef->data, adap->pdev_dev); 1095 if (ret < 0) 1096 return ret; 1097 1098 /* If the adapter has been fully initialized then we'll go ahead and 1099 * try to get the firmware's cooperation in upgrading to the new 1100 * firmware image otherwise we'll try to do the entire job from the 1101 * host ... and we always "force" the operation in this path. 1102 */ 1103 if (adap->flags & FULL_INIT_DONE) 1104 mbox = adap->mbox; 1105 1106 ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1); 1107 release_firmware(fw); 1108 if (!ret) 1109 dev_info(adap->pdev_dev, 1110 "loaded firmware %s, reload cxgb4 driver\n", ef->data); 1111 return ret; 1112 } 1113 1114 static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info) 1115 { 1116 struct port_info *pi = netdev_priv(dev); 1117 struct adapter *adapter = pi->adapter; 1118 1119 ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 1120 SOF_TIMESTAMPING_RX_SOFTWARE | 1121 SOF_TIMESTAMPING_SOFTWARE; 1122 1123 ts_info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE | 1124 SOF_TIMESTAMPING_TX_HARDWARE | 1125 SOF_TIMESTAMPING_RAW_HARDWARE; 1126 1127 ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | 1128 (1 << HWTSTAMP_TX_ON); 1129 1130 ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 1131 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | 1132 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 1133 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | 1134 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | 1135 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); 1136 1137 if (adapter->ptp_clock) 1138 ts_info->phc_index = ptp_clock_index(adapter->ptp_clock); 1139 else 1140 ts_info->phc_index = -1; 1141 1142 return 0; 1143 } 1144 1145 static u32 get_rss_table_size(struct net_device *dev) 1146 { 1147 const struct port_info *pi = netdev_priv(dev); 1148 1149 return pi->rss_size; 1150 } 1151 1152 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc) 1153 { 1154 const struct port_info *pi = netdev_priv(dev); 1155 unsigned int n = pi->rss_size; 1156 1157 if (hfunc) 1158 *hfunc = ETH_RSS_HASH_TOP; 1159 if (!p) 1160 return 0; 1161 while (n--) 1162 p[n] = pi->rss[n]; 1163 return 0; 1164 } 1165 1166 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key, 1167 const u8 hfunc) 1168 { 1169 unsigned int i; 1170 struct port_info *pi = netdev_priv(dev); 1171 1172 /* We require at least one supported parameter to be changed and no 1173 * change in any of the unsupported parameters 1174 */ 1175 if (key || 1176 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) 1177 return -EOPNOTSUPP; 1178 if (!p) 1179 return 0; 1180 1181 /* Interface must be brought up atleast once */ 1182 if (pi->adapter->flags & FULL_INIT_DONE) { 1183 for (i = 0; i < pi->rss_size; i++) 1184 pi->rss[i] = p[i]; 1185 1186 return cxgb4_write_rss(pi, pi->rss); 1187 } 1188 1189 return -EPERM; 1190 } 1191 1192 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 1193 u32 *rules) 1194 { 1195 const struct port_info *pi = netdev_priv(dev); 1196 1197 switch (info->cmd) { 1198 case ETHTOOL_GRXFH: { 1199 unsigned int v = pi->rss_mode; 1200 1201 info->data = 0; 1202 switch (info->flow_type) { 1203 case TCP_V4_FLOW: 1204 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) 1205 info->data = RXH_IP_SRC | RXH_IP_DST | 1206 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1207 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) 1208 info->data = RXH_IP_SRC | RXH_IP_DST; 1209 break; 1210 case UDP_V4_FLOW: 1211 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) && 1212 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F)) 1213 info->data = RXH_IP_SRC | RXH_IP_DST | 1214 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1215 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) 1216 info->data = RXH_IP_SRC | RXH_IP_DST; 1217 break; 1218 case SCTP_V4_FLOW: 1219 case AH_ESP_V4_FLOW: 1220 case IPV4_FLOW: 1221 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) 1222 info->data = RXH_IP_SRC | RXH_IP_DST; 1223 break; 1224 case TCP_V6_FLOW: 1225 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) 1226 info->data = RXH_IP_SRC | RXH_IP_DST | 1227 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1228 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) 1229 info->data = RXH_IP_SRC | RXH_IP_DST; 1230 break; 1231 case UDP_V6_FLOW: 1232 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) && 1233 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F)) 1234 info->data = RXH_IP_SRC | RXH_IP_DST | 1235 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1236 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) 1237 info->data = RXH_IP_SRC | RXH_IP_DST; 1238 break; 1239 case SCTP_V6_FLOW: 1240 case AH_ESP_V6_FLOW: 1241 case IPV6_FLOW: 1242 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) 1243 info->data = RXH_IP_SRC | RXH_IP_DST; 1244 break; 1245 } 1246 return 0; 1247 } 1248 case ETHTOOL_GRXRINGS: 1249 info->data = pi->nqsets; 1250 return 0; 1251 } 1252 return -EOPNOTSUPP; 1253 } 1254 1255 static const struct ethtool_ops cxgb_ethtool_ops = { 1256 .get_link_ksettings = get_link_ksettings, 1257 .set_link_ksettings = set_link_ksettings, 1258 .get_drvinfo = get_drvinfo, 1259 .get_msglevel = get_msglevel, 1260 .set_msglevel = set_msglevel, 1261 .get_ringparam = get_sge_param, 1262 .set_ringparam = set_sge_param, 1263 .get_coalesce = get_coalesce, 1264 .set_coalesce = set_coalesce, 1265 .get_eeprom_len = get_eeprom_len, 1266 .get_eeprom = get_eeprom, 1267 .set_eeprom = set_eeprom, 1268 .get_pauseparam = get_pauseparam, 1269 .set_pauseparam = set_pauseparam, 1270 .get_link = ethtool_op_get_link, 1271 .get_strings = get_strings, 1272 .set_phys_id = identify_port, 1273 .nway_reset = restart_autoneg, 1274 .get_sset_count = get_sset_count, 1275 .get_ethtool_stats = get_stats, 1276 .get_regs_len = get_regs_len, 1277 .get_regs = get_regs, 1278 .get_rxnfc = get_rxnfc, 1279 .get_rxfh_indir_size = get_rss_table_size, 1280 .get_rxfh = get_rss_table, 1281 .set_rxfh = set_rss_table, 1282 .flash_device = set_flash, 1283 .get_ts_info = get_ts_info 1284 }; 1285 1286 void cxgb4_set_ethtool_ops(struct net_device *netdev) 1287 { 1288 netdev->ethtool_ops = &cxgb_ethtool_ops; 1289 } 1290