1 /* 2 * Copyright (C) 2013-2015 Chelsio Communications. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * The full GNU General Public License is included in this distribution in 14 * the file called "COPYING". 15 * 16 */ 17 18 #include <linux/firmware.h> 19 #include <linux/mdio.h> 20 21 #include "cxgb4.h" 22 #include "t4_regs.h" 23 #include "t4fw_api.h" 24 25 #define EEPROM_MAGIC 0x38E2F10C 26 27 static u32 get_msglevel(struct net_device *dev) 28 { 29 return netdev2adap(dev)->msg_enable; 30 } 31 32 static void set_msglevel(struct net_device *dev, u32 val) 33 { 34 netdev2adap(dev)->msg_enable = val; 35 } 36 37 static const char stats_strings[][ETH_GSTRING_LEN] = { 38 "tx_octets_ok ", 39 "tx_frames_ok ", 40 "tx_broadcast_frames ", 41 "tx_multicast_frames ", 42 "tx_unicast_frames ", 43 "tx_error_frames ", 44 45 "tx_frames_64 ", 46 "tx_frames_65_to_127 ", 47 "tx_frames_128_to_255 ", 48 "tx_frames_256_to_511 ", 49 "tx_frames_512_to_1023 ", 50 "tx_frames_1024_to_1518 ", 51 "tx_frames_1519_to_max ", 52 53 "tx_frames_dropped ", 54 "tx_pause_frames ", 55 "tx_ppp0_frames ", 56 "tx_ppp1_frames ", 57 "tx_ppp2_frames ", 58 "tx_ppp3_frames ", 59 "tx_ppp4_frames ", 60 "tx_ppp5_frames ", 61 "tx_ppp6_frames ", 62 "tx_ppp7_frames ", 63 64 "rx_octets_ok ", 65 "rx_frames_ok ", 66 "rx_broadcast_frames ", 67 "rx_multicast_frames ", 68 "rx_unicast_frames ", 69 70 "rx_frames_too_long ", 71 "rx_jabber_errors ", 72 "rx_fcs_errors ", 73 "rx_length_errors ", 74 "rx_symbol_errors ", 75 "rx_runt_frames ", 76 77 "rx_frames_64 ", 78 "rx_frames_65_to_127 ", 79 "rx_frames_128_to_255 ", 80 "rx_frames_256_to_511 ", 81 "rx_frames_512_to_1023 ", 82 "rx_frames_1024_to_1518 ", 83 "rx_frames_1519_to_max ", 84 85 "rx_pause_frames ", 86 "rx_ppp0_frames ", 87 "rx_ppp1_frames ", 88 "rx_ppp2_frames ", 89 "rx_ppp3_frames ", 90 "rx_ppp4_frames ", 91 "rx_ppp5_frames ", 92 "rx_ppp6_frames ", 93 "rx_ppp7_frames ", 94 95 "rx_bg0_frames_dropped ", 96 "rx_bg1_frames_dropped ", 97 "rx_bg2_frames_dropped ", 98 "rx_bg3_frames_dropped ", 99 "rx_bg0_frames_trunc ", 100 "rx_bg1_frames_trunc ", 101 "rx_bg2_frames_trunc ", 102 "rx_bg3_frames_trunc ", 103 104 "tso ", 105 "tx_csum_offload ", 106 "rx_csum_good ", 107 "vlan_extractions ", 108 "vlan_insertions ", 109 "gro_packets ", 110 "gro_merged ", 111 }; 112 113 static char adapter_stats_strings[][ETH_GSTRING_LEN] = { 114 "db_drop ", 115 "db_full ", 116 "db_empty ", 117 "tcp_ipv4_out_rsts ", 118 "tcp_ipv4_in_segs ", 119 "tcp_ipv4_out_segs ", 120 "tcp_ipv4_retrans_segs ", 121 "tcp_ipv6_out_rsts ", 122 "tcp_ipv6_in_segs ", 123 "tcp_ipv6_out_segs ", 124 "tcp_ipv6_retrans_segs ", 125 "usm_ddp_frames ", 126 "usm_ddp_octets ", 127 "usm_ddp_drops ", 128 "rdma_no_rqe_mod_defer ", 129 "rdma_no_rqe_pkt_defer ", 130 "tp_err_ofld_no_neigh ", 131 "tp_err_ofld_cong_defer ", 132 "write_coal_success ", 133 "write_coal_fail ", 134 }; 135 136 static char channel_stats_strings[][ETH_GSTRING_LEN] = { 137 "--------Channel--------- ", 138 "tp_cpl_requests ", 139 "tp_cpl_responses ", 140 "tp_mac_in_errs ", 141 "tp_hdr_in_errs ", 142 "tp_tcp_in_errs ", 143 "tp_tcp6_in_errs ", 144 "tp_tnl_cong_drops ", 145 "tp_tnl_tx_drops ", 146 "tp_ofld_vlan_drops ", 147 "tp_ofld_chan_drops ", 148 "fcoe_octets_ddp ", 149 "fcoe_frames_ddp ", 150 "fcoe_frames_drop ", 151 }; 152 153 static char loopback_stats_strings[][ETH_GSTRING_LEN] = { 154 "-------Loopback----------- ", 155 "octets_ok ", 156 "frames_ok ", 157 "bcast_frames ", 158 "mcast_frames ", 159 "ucast_frames ", 160 "error_frames ", 161 "frames_64 ", 162 "frames_65_to_127 ", 163 "frames_128_to_255 ", 164 "frames_256_to_511 ", 165 "frames_512_to_1023 ", 166 "frames_1024_to_1518 ", 167 "frames_1519_to_max ", 168 "frames_dropped ", 169 "bg0_frames_dropped ", 170 "bg1_frames_dropped ", 171 "bg2_frames_dropped ", 172 "bg3_frames_dropped ", 173 "bg0_frames_trunc ", 174 "bg1_frames_trunc ", 175 "bg2_frames_trunc ", 176 "bg3_frames_trunc ", 177 }; 178 179 static int get_sset_count(struct net_device *dev, int sset) 180 { 181 switch (sset) { 182 case ETH_SS_STATS: 183 return ARRAY_SIZE(stats_strings) + 184 ARRAY_SIZE(adapter_stats_strings) + 185 ARRAY_SIZE(channel_stats_strings) + 186 ARRAY_SIZE(loopback_stats_strings); 187 default: 188 return -EOPNOTSUPP; 189 } 190 } 191 192 static int get_regs_len(struct net_device *dev) 193 { 194 struct adapter *adap = netdev2adap(dev); 195 196 return t4_get_regs_len(adap); 197 } 198 199 static int get_eeprom_len(struct net_device *dev) 200 { 201 return EEPROMSIZE; 202 } 203 204 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 205 { 206 struct adapter *adapter = netdev2adap(dev); 207 u32 exprom_vers; 208 209 strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver)); 210 strlcpy(info->version, cxgb4_driver_version, 211 sizeof(info->version)); 212 strlcpy(info->bus_info, pci_name(adapter->pdev), 213 sizeof(info->bus_info)); 214 info->regdump_len = get_regs_len(dev); 215 216 if (!adapter->params.fw_vers) 217 strcpy(info->fw_version, "N/A"); 218 else 219 snprintf(info->fw_version, sizeof(info->fw_version), 220 "%u.%u.%u.%u, TP %u.%u.%u.%u", 221 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers), 222 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers), 223 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers), 224 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers), 225 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers), 226 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers), 227 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers), 228 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers)); 229 230 if (!t4_get_exprom_version(adapter, &exprom_vers)) 231 snprintf(info->erom_version, sizeof(info->erom_version), 232 "%u.%u.%u.%u", 233 FW_HDR_FW_VER_MAJOR_G(exprom_vers), 234 FW_HDR_FW_VER_MINOR_G(exprom_vers), 235 FW_HDR_FW_VER_MICRO_G(exprom_vers), 236 FW_HDR_FW_VER_BUILD_G(exprom_vers)); 237 } 238 239 static void get_strings(struct net_device *dev, u32 stringset, u8 *data) 240 { 241 if (stringset == ETH_SS_STATS) { 242 memcpy(data, stats_strings, sizeof(stats_strings)); 243 data += sizeof(stats_strings); 244 memcpy(data, adapter_stats_strings, 245 sizeof(adapter_stats_strings)); 246 data += sizeof(adapter_stats_strings); 247 memcpy(data, channel_stats_strings, 248 sizeof(channel_stats_strings)); 249 data += sizeof(channel_stats_strings); 250 memcpy(data, loopback_stats_strings, 251 sizeof(loopback_stats_strings)); 252 } 253 } 254 255 /* port stats maintained per queue of the port. They should be in the same 256 * order as in stats_strings above. 257 */ 258 struct queue_port_stats { 259 u64 tso; 260 u64 tx_csum; 261 u64 rx_csum; 262 u64 vlan_ex; 263 u64 vlan_ins; 264 u64 gro_pkts; 265 u64 gro_merged; 266 }; 267 268 struct adapter_stats { 269 u64 db_drop; 270 u64 db_full; 271 u64 db_empty; 272 u64 tcp_v4_out_rsts; 273 u64 tcp_v4_in_segs; 274 u64 tcp_v4_out_segs; 275 u64 tcp_v4_retrans_segs; 276 u64 tcp_v6_out_rsts; 277 u64 tcp_v6_in_segs; 278 u64 tcp_v6_out_segs; 279 u64 tcp_v6_retrans_segs; 280 u64 frames; 281 u64 octets; 282 u64 drops; 283 u64 rqe_dfr_mod; 284 u64 rqe_dfr_pkt; 285 u64 ofld_no_neigh; 286 u64 ofld_cong_defer; 287 u64 wc_success; 288 u64 wc_fail; 289 }; 290 291 struct channel_stats { 292 u64 cpl_req; 293 u64 cpl_rsp; 294 u64 mac_in_errs; 295 u64 hdr_in_errs; 296 u64 tcp_in_errs; 297 u64 tcp6_in_errs; 298 u64 tnl_cong_drops; 299 u64 tnl_tx_drops; 300 u64 ofld_vlan_drops; 301 u64 ofld_chan_drops; 302 u64 octets_ddp; 303 u64 frames_ddp; 304 u64 frames_drop; 305 }; 306 307 static void collect_sge_port_stats(const struct adapter *adap, 308 const struct port_info *p, 309 struct queue_port_stats *s) 310 { 311 int i; 312 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset]; 313 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset]; 314 315 memset(s, 0, sizeof(*s)); 316 for (i = 0; i < p->nqsets; i++, rx++, tx++) { 317 s->tso += tx->tso; 318 s->tx_csum += tx->tx_cso; 319 s->rx_csum += rx->stats.rx_cso; 320 s->vlan_ex += rx->stats.vlan_ex; 321 s->vlan_ins += tx->vlan_ins; 322 s->gro_pkts += rx->stats.lro_pkts; 323 s->gro_merged += rx->stats.lro_merged; 324 } 325 } 326 327 static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s) 328 { 329 struct tp_tcp_stats v4, v6; 330 struct tp_rdma_stats rdma_stats; 331 struct tp_err_stats err_stats; 332 struct tp_usm_stats usm_stats; 333 u64 val1, val2; 334 335 memset(s, 0, sizeof(*s)); 336 337 spin_lock(&adap->stats_lock); 338 t4_tp_get_tcp_stats(adap, &v4, &v6); 339 t4_tp_get_rdma_stats(adap, &rdma_stats); 340 t4_get_usm_stats(adap, &usm_stats); 341 t4_tp_get_err_stats(adap, &err_stats); 342 spin_unlock(&adap->stats_lock); 343 344 s->db_drop = adap->db_stats.db_drop; 345 s->db_full = adap->db_stats.db_full; 346 s->db_empty = adap->db_stats.db_empty; 347 348 s->tcp_v4_out_rsts = v4.tcp_out_rsts; 349 s->tcp_v4_in_segs = v4.tcp_in_segs; 350 s->tcp_v4_out_segs = v4.tcp_out_segs; 351 s->tcp_v4_retrans_segs = v4.tcp_retrans_segs; 352 s->tcp_v6_out_rsts = v6.tcp_out_rsts; 353 s->tcp_v6_in_segs = v6.tcp_in_segs; 354 s->tcp_v6_out_segs = v6.tcp_out_segs; 355 s->tcp_v6_retrans_segs = v6.tcp_retrans_segs; 356 357 if (is_offload(adap)) { 358 s->frames = usm_stats.frames; 359 s->octets = usm_stats.octets; 360 s->drops = usm_stats.drops; 361 s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod; 362 s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt; 363 } 364 365 s->ofld_no_neigh = err_stats.ofld_no_neigh; 366 s->ofld_cong_defer = err_stats.ofld_cong_defer; 367 368 if (!is_t4(adap->params.chip)) { 369 int v; 370 371 v = t4_read_reg(adap, SGE_STAT_CFG_A); 372 if (STATSOURCE_T5_G(v) == 7) { 373 val2 = t4_read_reg(adap, SGE_STAT_MATCH_A); 374 val1 = t4_read_reg(adap, SGE_STAT_TOTAL_A); 375 s->wc_success = val1 - val2; 376 s->wc_fail = val2; 377 } 378 } 379 } 380 381 static void collect_channel_stats(struct adapter *adap, struct channel_stats *s, 382 u8 i) 383 { 384 struct tp_cpl_stats cpl_stats; 385 struct tp_err_stats err_stats; 386 struct tp_fcoe_stats fcoe_stats; 387 388 memset(s, 0, sizeof(*s)); 389 390 spin_lock(&adap->stats_lock); 391 t4_tp_get_cpl_stats(adap, &cpl_stats); 392 t4_tp_get_err_stats(adap, &err_stats); 393 t4_get_fcoe_stats(adap, i, &fcoe_stats); 394 spin_unlock(&adap->stats_lock); 395 396 s->cpl_req = cpl_stats.req[i]; 397 s->cpl_rsp = cpl_stats.rsp[i]; 398 s->mac_in_errs = err_stats.mac_in_errs[i]; 399 s->hdr_in_errs = err_stats.hdr_in_errs[i]; 400 s->tcp_in_errs = err_stats.tcp_in_errs[i]; 401 s->tcp6_in_errs = err_stats.tcp6_in_errs[i]; 402 s->tnl_cong_drops = err_stats.tnl_cong_drops[i]; 403 s->tnl_tx_drops = err_stats.tnl_tx_drops[i]; 404 s->ofld_vlan_drops = err_stats.ofld_vlan_drops[i]; 405 s->ofld_chan_drops = err_stats.ofld_chan_drops[i]; 406 s->octets_ddp = fcoe_stats.octets_ddp; 407 s->frames_ddp = fcoe_stats.frames_ddp; 408 s->frames_drop = fcoe_stats.frames_drop; 409 } 410 411 static void get_stats(struct net_device *dev, struct ethtool_stats *stats, 412 u64 *data) 413 { 414 struct port_info *pi = netdev_priv(dev); 415 struct adapter *adapter = pi->adapter; 416 struct lb_port_stats s; 417 int i; 418 u64 *p0; 419 420 t4_get_port_stats_offset(adapter, pi->tx_chan, 421 (struct port_stats *)data, 422 &pi->stats_base); 423 424 data += sizeof(struct port_stats) / sizeof(u64); 425 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); 426 data += sizeof(struct queue_port_stats) / sizeof(u64); 427 collect_adapter_stats(adapter, (struct adapter_stats *)data); 428 data += sizeof(struct adapter_stats) / sizeof(u64); 429 430 *data++ = (u64)pi->port_id; 431 collect_channel_stats(adapter, (struct channel_stats *)data, 432 pi->port_id); 433 data += sizeof(struct channel_stats) / sizeof(u64); 434 435 *data++ = (u64)pi->port_id; 436 memset(&s, 0, sizeof(s)); 437 t4_get_lb_stats(adapter, pi->port_id, &s); 438 439 p0 = &s.octets; 440 for (i = 0; i < ARRAY_SIZE(loopback_stats_strings) - 1; i++) 441 *data++ = (unsigned long long)*p0++; 442 } 443 444 static void get_regs(struct net_device *dev, struct ethtool_regs *regs, 445 void *buf) 446 { 447 struct adapter *adap = netdev2adap(dev); 448 size_t buf_size; 449 450 buf_size = t4_get_regs_len(adap); 451 regs->version = mk_adap_vers(adap); 452 t4_get_regs(adap, buf, buf_size); 453 } 454 455 static int restart_autoneg(struct net_device *dev) 456 { 457 struct port_info *p = netdev_priv(dev); 458 459 if (!netif_running(dev)) 460 return -EAGAIN; 461 if (p->link_cfg.autoneg != AUTONEG_ENABLE) 462 return -EINVAL; 463 t4_restart_aneg(p->adapter, p->adapter->pf, p->tx_chan); 464 return 0; 465 } 466 467 static int identify_port(struct net_device *dev, 468 enum ethtool_phys_id_state state) 469 { 470 unsigned int val; 471 struct adapter *adap = netdev2adap(dev); 472 473 if (state == ETHTOOL_ID_ACTIVE) 474 val = 0xffff; 475 else if (state == ETHTOOL_ID_INACTIVE) 476 val = 0; 477 else 478 return -EINVAL; 479 480 return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val); 481 } 482 483 static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps) 484 { 485 unsigned int v = 0; 486 487 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI || 488 type == FW_PORT_TYPE_BT_XAUI) { 489 v |= SUPPORTED_TP; 490 if (caps & FW_PORT_CAP_SPEED_100M) 491 v |= SUPPORTED_100baseT_Full; 492 if (caps & FW_PORT_CAP_SPEED_1G) 493 v |= SUPPORTED_1000baseT_Full; 494 if (caps & FW_PORT_CAP_SPEED_10G) 495 v |= SUPPORTED_10000baseT_Full; 496 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) { 497 v |= SUPPORTED_Backplane; 498 if (caps & FW_PORT_CAP_SPEED_1G) 499 v |= SUPPORTED_1000baseKX_Full; 500 if (caps & FW_PORT_CAP_SPEED_10G) 501 v |= SUPPORTED_10000baseKX4_Full; 502 } else if (type == FW_PORT_TYPE_KR) { 503 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; 504 } else if (type == FW_PORT_TYPE_BP_AP) { 505 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | 506 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full; 507 } else if (type == FW_PORT_TYPE_BP4_AP) { 508 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | 509 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | 510 SUPPORTED_10000baseKX4_Full; 511 } else if (type == FW_PORT_TYPE_FIBER_XFI || 512 type == FW_PORT_TYPE_FIBER_XAUI || 513 type == FW_PORT_TYPE_SFP || 514 type == FW_PORT_TYPE_QSFP_10G || 515 type == FW_PORT_TYPE_QSA) { 516 v |= SUPPORTED_FIBRE; 517 if (caps & FW_PORT_CAP_SPEED_1G) 518 v |= SUPPORTED_1000baseT_Full; 519 if (caps & FW_PORT_CAP_SPEED_10G) 520 v |= SUPPORTED_10000baseT_Full; 521 } else if (type == FW_PORT_TYPE_BP40_BA || 522 type == FW_PORT_TYPE_QSFP) { 523 v |= SUPPORTED_40000baseSR4_Full; 524 v |= SUPPORTED_FIBRE; 525 } 526 527 if (caps & FW_PORT_CAP_ANEG) 528 v |= SUPPORTED_Autoneg; 529 return v; 530 } 531 532 static unsigned int to_fw_linkcaps(unsigned int caps) 533 { 534 unsigned int v = 0; 535 536 if (caps & ADVERTISED_100baseT_Full) 537 v |= FW_PORT_CAP_SPEED_100M; 538 if (caps & ADVERTISED_1000baseT_Full) 539 v |= FW_PORT_CAP_SPEED_1G; 540 if (caps & ADVERTISED_10000baseT_Full) 541 v |= FW_PORT_CAP_SPEED_10G; 542 if (caps & ADVERTISED_40000baseSR4_Full) 543 v |= FW_PORT_CAP_SPEED_40G; 544 return v; 545 } 546 547 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 548 { 549 const struct port_info *p = netdev_priv(dev); 550 551 if (p->port_type == FW_PORT_TYPE_BT_SGMII || 552 p->port_type == FW_PORT_TYPE_BT_XFI || 553 p->port_type == FW_PORT_TYPE_BT_XAUI) { 554 cmd->port = PORT_TP; 555 } else if (p->port_type == FW_PORT_TYPE_FIBER_XFI || 556 p->port_type == FW_PORT_TYPE_FIBER_XAUI) { 557 cmd->port = PORT_FIBRE; 558 } else if (p->port_type == FW_PORT_TYPE_SFP || 559 p->port_type == FW_PORT_TYPE_QSFP_10G || 560 p->port_type == FW_PORT_TYPE_QSA || 561 p->port_type == FW_PORT_TYPE_QSFP) { 562 if (p->mod_type == FW_PORT_MOD_TYPE_LR || 563 p->mod_type == FW_PORT_MOD_TYPE_SR || 564 p->mod_type == FW_PORT_MOD_TYPE_ER || 565 p->mod_type == FW_PORT_MOD_TYPE_LRM) 566 cmd->port = PORT_FIBRE; 567 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || 568 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) 569 cmd->port = PORT_DA; 570 else 571 cmd->port = PORT_OTHER; 572 } else { 573 cmd->port = PORT_OTHER; 574 } 575 576 if (p->mdio_addr >= 0) { 577 cmd->phy_address = p->mdio_addr; 578 cmd->transceiver = XCVR_EXTERNAL; 579 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ? 580 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45; 581 } else { 582 cmd->phy_address = 0; /* not really, but no better option */ 583 cmd->transceiver = XCVR_INTERNAL; 584 cmd->mdio_support = 0; 585 } 586 587 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported); 588 cmd->advertising = from_fw_linkcaps(p->port_type, 589 p->link_cfg.advertising); 590 ethtool_cmd_speed_set(cmd, 591 netif_carrier_ok(dev) ? p->link_cfg.speed : 0); 592 cmd->duplex = DUPLEX_FULL; 593 cmd->autoneg = p->link_cfg.autoneg; 594 cmd->maxtxpkt = 0; 595 cmd->maxrxpkt = 0; 596 return 0; 597 } 598 599 static unsigned int speed_to_caps(int speed) 600 { 601 if (speed == 100) 602 return FW_PORT_CAP_SPEED_100M; 603 if (speed == 1000) 604 return FW_PORT_CAP_SPEED_1G; 605 if (speed == 10000) 606 return FW_PORT_CAP_SPEED_10G; 607 if (speed == 40000) 608 return FW_PORT_CAP_SPEED_40G; 609 return 0; 610 } 611 612 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 613 { 614 unsigned int cap; 615 struct port_info *p = netdev_priv(dev); 616 struct link_config *lc = &p->link_cfg; 617 u32 speed = ethtool_cmd_speed(cmd); 618 struct link_config old_lc; 619 int ret; 620 621 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */ 622 return -EINVAL; 623 624 if (!(lc->supported & FW_PORT_CAP_ANEG)) { 625 /* PHY offers a single speed. See if that's what's 626 * being requested. 627 */ 628 if (cmd->autoneg == AUTONEG_DISABLE && 629 (lc->supported & speed_to_caps(speed))) 630 return 0; 631 return -EINVAL; 632 } 633 634 old_lc = *lc; 635 if (cmd->autoneg == AUTONEG_DISABLE) { 636 cap = speed_to_caps(speed); 637 638 if (!(lc->supported & cap)) 639 return -EINVAL; 640 lc->requested_speed = cap; 641 lc->advertising = 0; 642 } else { 643 cap = to_fw_linkcaps(cmd->advertising); 644 if (!(lc->supported & cap)) 645 return -EINVAL; 646 lc->requested_speed = 0; 647 lc->advertising = cap | FW_PORT_CAP_ANEG; 648 } 649 lc->autoneg = cmd->autoneg; 650 651 /* If the firmware rejects the Link Configuration request, back out 652 * the changes and report the error. 653 */ 654 ret = t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan, lc); 655 if (ret) 656 *lc = old_lc; 657 658 return ret; 659 } 660 661 static void get_pauseparam(struct net_device *dev, 662 struct ethtool_pauseparam *epause) 663 { 664 struct port_info *p = netdev_priv(dev); 665 666 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; 667 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0; 668 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0; 669 } 670 671 static int set_pauseparam(struct net_device *dev, 672 struct ethtool_pauseparam *epause) 673 { 674 struct port_info *p = netdev_priv(dev); 675 struct link_config *lc = &p->link_cfg; 676 677 if (epause->autoneg == AUTONEG_DISABLE) 678 lc->requested_fc = 0; 679 else if (lc->supported & FW_PORT_CAP_ANEG) 680 lc->requested_fc = PAUSE_AUTONEG; 681 else 682 return -EINVAL; 683 684 if (epause->rx_pause) 685 lc->requested_fc |= PAUSE_RX; 686 if (epause->tx_pause) 687 lc->requested_fc |= PAUSE_TX; 688 if (netif_running(dev)) 689 return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan, 690 lc); 691 return 0; 692 } 693 694 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) 695 { 696 const struct port_info *pi = netdev_priv(dev); 697 const struct sge *s = &pi->adapter->sge; 698 699 e->rx_max_pending = MAX_RX_BUFFERS; 700 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES; 701 e->rx_jumbo_max_pending = 0; 702 e->tx_max_pending = MAX_TXQ_ENTRIES; 703 704 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8; 705 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; 706 e->rx_jumbo_pending = 0; 707 e->tx_pending = s->ethtxq[pi->first_qset].q.size; 708 } 709 710 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) 711 { 712 int i; 713 const struct port_info *pi = netdev_priv(dev); 714 struct adapter *adapter = pi->adapter; 715 struct sge *s = &adapter->sge; 716 717 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending || 718 e->tx_pending > MAX_TXQ_ENTRIES || 719 e->rx_mini_pending > MAX_RSPQ_ENTRIES || 720 e->rx_mini_pending < MIN_RSPQ_ENTRIES || 721 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES) 722 return -EINVAL; 723 724 if (adapter->flags & FULL_INIT_DONE) 725 return -EBUSY; 726 727 for (i = 0; i < pi->nqsets; ++i) { 728 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending; 729 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8; 730 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending; 731 } 732 return 0; 733 } 734 735 /** 736 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete! 737 * @dev: the network device 738 * @us: the hold-off time in us, or 0 to disable timer 739 * @cnt: the hold-off packet count, or 0 to disable counter 740 * 741 * Set the RX interrupt hold-off parameters for a network device. 742 */ 743 static int set_rx_intr_params(struct net_device *dev, 744 unsigned int us, unsigned int cnt) 745 { 746 int i, err; 747 struct port_info *pi = netdev_priv(dev); 748 struct adapter *adap = pi->adapter; 749 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; 750 751 for (i = 0; i < pi->nqsets; i++, q++) { 752 err = cxgb4_set_rspq_intr_params(&q->rspq, us, cnt); 753 if (err) 754 return err; 755 } 756 return 0; 757 } 758 759 static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx) 760 { 761 int i; 762 struct port_info *pi = netdev_priv(dev); 763 struct adapter *adap = pi->adapter; 764 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; 765 766 for (i = 0; i < pi->nqsets; i++, q++) 767 q->rspq.adaptive_rx = adaptive_rx; 768 769 return 0; 770 } 771 772 static int get_adaptive_rx_setting(struct net_device *dev) 773 { 774 struct port_info *pi = netdev_priv(dev); 775 struct adapter *adap = pi->adapter; 776 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; 777 778 return q->rspq.adaptive_rx; 779 } 780 781 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 782 { 783 set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce); 784 return set_rx_intr_params(dev, c->rx_coalesce_usecs, 785 c->rx_max_coalesced_frames); 786 } 787 788 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 789 { 790 const struct port_info *pi = netdev_priv(dev); 791 const struct adapter *adap = pi->adapter; 792 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq; 793 794 c->rx_coalesce_usecs = qtimer_val(adap, rq); 795 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ? 796 adap->sge.counter_val[rq->pktcnt_idx] : 0; 797 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev); 798 return 0; 799 } 800 801 /** 802 * eeprom_ptov - translate a physical EEPROM address to virtual 803 * @phys_addr: the physical EEPROM address 804 * @fn: the PCI function number 805 * @sz: size of function-specific area 806 * 807 * Translate a physical EEPROM address to virtual. The first 1K is 808 * accessed through virtual addresses starting at 31K, the rest is 809 * accessed through virtual addresses starting at 0. 810 * 811 * The mapping is as follows: 812 * [0..1K) -> [31K..32K) 813 * [1K..1K+A) -> [31K-A..31K) 814 * [1K+A..ES) -> [0..ES-A-1K) 815 * 816 * where A = @fn * @sz, and ES = EEPROM size. 817 */ 818 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) 819 { 820 fn *= sz; 821 if (phys_addr < 1024) 822 return phys_addr + (31 << 10); 823 if (phys_addr < 1024 + fn) 824 return 31744 - fn + phys_addr - 1024; 825 if (phys_addr < EEPROMSIZE) 826 return phys_addr - 1024 - fn; 827 return -EINVAL; 828 } 829 830 /* The next two routines implement eeprom read/write from physical addresses. 831 */ 832 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) 833 { 834 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); 835 836 if (vaddr >= 0) 837 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); 838 return vaddr < 0 ? vaddr : 0; 839 } 840 841 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) 842 { 843 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); 844 845 if (vaddr >= 0) 846 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); 847 return vaddr < 0 ? vaddr : 0; 848 } 849 850 #define EEPROM_MAGIC 0x38E2F10C 851 852 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, 853 u8 *data) 854 { 855 int i, err = 0; 856 struct adapter *adapter = netdev2adap(dev); 857 u8 *buf = t4_alloc_mem(EEPROMSIZE); 858 859 if (!buf) 860 return -ENOMEM; 861 862 e->magic = EEPROM_MAGIC; 863 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4) 864 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]); 865 866 if (!err) 867 memcpy(data, buf + e->offset, e->len); 868 t4_free_mem(buf); 869 return err; 870 } 871 872 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, 873 u8 *data) 874 { 875 u8 *buf; 876 int err = 0; 877 u32 aligned_offset, aligned_len, *p; 878 struct adapter *adapter = netdev2adap(dev); 879 880 if (eeprom->magic != EEPROM_MAGIC) 881 return -EINVAL; 882 883 aligned_offset = eeprom->offset & ~3; 884 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; 885 886 if (adapter->pf > 0) { 887 u32 start = 1024 + adapter->pf * EEPROMPFSIZE; 888 889 if (aligned_offset < start || 890 aligned_offset + aligned_len > start + EEPROMPFSIZE) 891 return -EPERM; 892 } 893 894 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { 895 /* RMW possibly needed for first or last words. 896 */ 897 buf = t4_alloc_mem(aligned_len); 898 if (!buf) 899 return -ENOMEM; 900 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); 901 if (!err && aligned_len > 4) 902 err = eeprom_rd_phys(adapter, 903 aligned_offset + aligned_len - 4, 904 (u32 *)&buf[aligned_len - 4]); 905 if (err) 906 goto out; 907 memcpy(buf + (eeprom->offset & 3), data, eeprom->len); 908 } else { 909 buf = data; 910 } 911 912 err = t4_seeprom_wp(adapter, false); 913 if (err) 914 goto out; 915 916 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { 917 err = eeprom_wr_phys(adapter, aligned_offset, *p); 918 aligned_offset += 4; 919 } 920 921 if (!err) 922 err = t4_seeprom_wp(adapter, true); 923 out: 924 if (buf != data) 925 t4_free_mem(buf); 926 return err; 927 } 928 929 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) 930 { 931 int ret; 932 const struct firmware *fw; 933 struct adapter *adap = netdev2adap(netdev); 934 unsigned int mbox = PCIE_FW_MASTER_M + 1; 935 u32 pcie_fw; 936 unsigned int master; 937 u8 master_vld = 0; 938 939 pcie_fw = t4_read_reg(adap, PCIE_FW_A); 940 master = PCIE_FW_MASTER_G(pcie_fw); 941 if (pcie_fw & PCIE_FW_MASTER_VLD_F) 942 master_vld = 1; 943 /* if csiostor is the master return */ 944 if (master_vld && (master != adap->pf)) { 945 dev_warn(adap->pdev_dev, 946 "cxgb4 driver needs to be loaded as MASTER to support FW flash\n"); 947 return -EOPNOTSUPP; 948 } 949 950 ef->data[sizeof(ef->data) - 1] = '\0'; 951 ret = request_firmware(&fw, ef->data, adap->pdev_dev); 952 if (ret < 0) 953 return ret; 954 955 /* If the adapter has been fully initialized then we'll go ahead and 956 * try to get the firmware's cooperation in upgrading to the new 957 * firmware image otherwise we'll try to do the entire job from the 958 * host ... and we always "force" the operation in this path. 959 */ 960 if (adap->flags & FULL_INIT_DONE) 961 mbox = adap->mbox; 962 963 ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1); 964 release_firmware(fw); 965 if (!ret) 966 dev_info(adap->pdev_dev, 967 "loaded firmware %s, reload cxgb4 driver\n", ef->data); 968 return ret; 969 } 970 971 static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info) 972 { 973 ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 974 SOF_TIMESTAMPING_RX_SOFTWARE | 975 SOF_TIMESTAMPING_SOFTWARE; 976 977 ts_info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE | 978 SOF_TIMESTAMPING_RAW_HARDWARE; 979 980 ts_info->phc_index = -1; 981 982 return 0; 983 } 984 985 static u32 get_rss_table_size(struct net_device *dev) 986 { 987 const struct port_info *pi = netdev_priv(dev); 988 989 return pi->rss_size; 990 } 991 992 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc) 993 { 994 const struct port_info *pi = netdev_priv(dev); 995 unsigned int n = pi->rss_size; 996 997 if (hfunc) 998 *hfunc = ETH_RSS_HASH_TOP; 999 if (!p) 1000 return 0; 1001 while (n--) 1002 p[n] = pi->rss[n]; 1003 return 0; 1004 } 1005 1006 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key, 1007 const u8 hfunc) 1008 { 1009 unsigned int i; 1010 struct port_info *pi = netdev_priv(dev); 1011 1012 /* We require at least one supported parameter to be changed and no 1013 * change in any of the unsupported parameters 1014 */ 1015 if (key || 1016 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) 1017 return -EOPNOTSUPP; 1018 if (!p) 1019 return 0; 1020 1021 /* Interface must be brought up atleast once */ 1022 if (pi->adapter->flags & FULL_INIT_DONE) { 1023 for (i = 0; i < pi->rss_size; i++) 1024 pi->rss[i] = p[i]; 1025 1026 return cxgb4_write_rss(pi, pi->rss); 1027 } 1028 1029 return -EPERM; 1030 } 1031 1032 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 1033 u32 *rules) 1034 { 1035 const struct port_info *pi = netdev_priv(dev); 1036 1037 switch (info->cmd) { 1038 case ETHTOOL_GRXFH: { 1039 unsigned int v = pi->rss_mode; 1040 1041 info->data = 0; 1042 switch (info->flow_type) { 1043 case TCP_V4_FLOW: 1044 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) 1045 info->data = RXH_IP_SRC | RXH_IP_DST | 1046 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1047 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) 1048 info->data = RXH_IP_SRC | RXH_IP_DST; 1049 break; 1050 case UDP_V4_FLOW: 1051 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) && 1052 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F)) 1053 info->data = RXH_IP_SRC | RXH_IP_DST | 1054 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1055 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) 1056 info->data = RXH_IP_SRC | RXH_IP_DST; 1057 break; 1058 case SCTP_V4_FLOW: 1059 case AH_ESP_V4_FLOW: 1060 case IPV4_FLOW: 1061 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) 1062 info->data = RXH_IP_SRC | RXH_IP_DST; 1063 break; 1064 case TCP_V6_FLOW: 1065 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) 1066 info->data = RXH_IP_SRC | RXH_IP_DST | 1067 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1068 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) 1069 info->data = RXH_IP_SRC | RXH_IP_DST; 1070 break; 1071 case UDP_V6_FLOW: 1072 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) && 1073 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F)) 1074 info->data = RXH_IP_SRC | RXH_IP_DST | 1075 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1076 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) 1077 info->data = RXH_IP_SRC | RXH_IP_DST; 1078 break; 1079 case SCTP_V6_FLOW: 1080 case AH_ESP_V6_FLOW: 1081 case IPV6_FLOW: 1082 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) 1083 info->data = RXH_IP_SRC | RXH_IP_DST; 1084 break; 1085 } 1086 return 0; 1087 } 1088 case ETHTOOL_GRXRINGS: 1089 info->data = pi->nqsets; 1090 return 0; 1091 } 1092 return -EOPNOTSUPP; 1093 } 1094 1095 static const struct ethtool_ops cxgb_ethtool_ops = { 1096 .get_settings = get_settings, 1097 .set_settings = set_settings, 1098 .get_drvinfo = get_drvinfo, 1099 .get_msglevel = get_msglevel, 1100 .set_msglevel = set_msglevel, 1101 .get_ringparam = get_sge_param, 1102 .set_ringparam = set_sge_param, 1103 .get_coalesce = get_coalesce, 1104 .set_coalesce = set_coalesce, 1105 .get_eeprom_len = get_eeprom_len, 1106 .get_eeprom = get_eeprom, 1107 .set_eeprom = set_eeprom, 1108 .get_pauseparam = get_pauseparam, 1109 .set_pauseparam = set_pauseparam, 1110 .get_link = ethtool_op_get_link, 1111 .get_strings = get_strings, 1112 .set_phys_id = identify_port, 1113 .nway_reset = restart_autoneg, 1114 .get_sset_count = get_sset_count, 1115 .get_ethtool_stats = get_stats, 1116 .get_regs_len = get_regs_len, 1117 .get_regs = get_regs, 1118 .get_rxnfc = get_rxnfc, 1119 .get_rxfh_indir_size = get_rss_table_size, 1120 .get_rxfh = get_rss_table, 1121 .set_rxfh = set_rss_table, 1122 .flash_device = set_flash, 1123 .get_ts_info = get_ts_info 1124 }; 1125 1126 void cxgb4_set_ethtool_ops(struct net_device *netdev) 1127 { 1128 netdev->ethtool_ops = &cxgb_ethtool_ops; 1129 } 1130