1 /* 2 * Copyright (C) 2005 - 2011 Emulex 3 * All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 2 7 * as published by the Free Software Foundation. The full GNU General 8 * Public License is included in this distribution in the file called COPYING. 9 * 10 * Contact Information: 11 * linux-drivers@emulex.com 12 * 13 * Emulex 14 * 3333 Susan Street 15 * Costa Mesa, CA 92626 16 */ 17 18 #include "be.h" 19 #include "be_cmds.h" 20 #include <linux/ethtool.h> 21 22 struct be_ethtool_stat { 23 char desc[ETH_GSTRING_LEN]; 24 int type; 25 int size; 26 int offset; 27 }; 28 29 enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT}; 30 #define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \ 31 offsetof(_struct, field) 32 #define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\ 33 FIELDINFO(struct be_tx_stats, field) 34 #define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\ 35 FIELDINFO(struct be_rx_stats, field) 36 #define DRVSTAT_INFO(field) #field, DRVSTAT,\ 37 FIELDINFO(struct be_drv_stats, field) 38 39 static const struct be_ethtool_stat et_stats[] = { 40 {DRVSTAT_INFO(rx_crc_errors)}, 41 {DRVSTAT_INFO(rx_alignment_symbol_errors)}, 42 {DRVSTAT_INFO(rx_pause_frames)}, 43 {DRVSTAT_INFO(rx_control_frames)}, 44 /* Received packets dropped when the Ethernet length field 45 * is not equal to the actual Ethernet data length. 46 */ 47 {DRVSTAT_INFO(rx_in_range_errors)}, 48 /* Received packets dropped when their length field is >= 1501 bytes 49 * and <= 1535 bytes. 50 */ 51 {DRVSTAT_INFO(rx_out_range_errors)}, 52 /* Received packets dropped when they are longer than 9216 bytes */ 53 {DRVSTAT_INFO(rx_frame_too_long)}, 54 /* Received packets dropped when they don't pass the unicast or 55 * multicast address filtering. 56 */ 57 {DRVSTAT_INFO(rx_address_mismatch_drops)}, 58 /* Received packets dropped when IP packet length field is less than 59 * the IP header length field. 60 */ 61 {DRVSTAT_INFO(rx_dropped_too_small)}, 62 /* Received packets dropped when IP length field is greater than 63 * the actual packet length. 64 */ 65 {DRVSTAT_INFO(rx_dropped_too_short)}, 66 /* Received packets dropped when the IP header length field is less 67 * than 5. 68 */ 69 {DRVSTAT_INFO(rx_dropped_header_too_small)}, 70 /* Received packets dropped when the TCP header length field is less 71 * than 5 or the TCP header length + IP header length is more 72 * than IP packet length. 73 */ 74 {DRVSTAT_INFO(rx_dropped_tcp_length)}, 75 {DRVSTAT_INFO(rx_dropped_runt)}, 76 /* Number of received packets dropped when a fifo for descriptors going 77 * into the packet demux block overflows. In normal operation, this 78 * fifo must never overflow. 79 */ 80 {DRVSTAT_INFO(rxpp_fifo_overflow_drop)}, 81 {DRVSTAT_INFO(rx_input_fifo_overflow_drop)}, 82 {DRVSTAT_INFO(rx_ip_checksum_errs)}, 83 {DRVSTAT_INFO(rx_tcp_checksum_errs)}, 84 {DRVSTAT_INFO(rx_udp_checksum_errs)}, 85 {DRVSTAT_INFO(tx_pauseframes)}, 86 {DRVSTAT_INFO(tx_controlframes)}, 87 {DRVSTAT_INFO(rx_priority_pause_frames)}, 88 /* Received packets dropped when an internal fifo going into 89 * main packet buffer tank (PMEM) overflows. 90 */ 91 {DRVSTAT_INFO(pmem_fifo_overflow_drop)}, 92 {DRVSTAT_INFO(jabber_events)}, 93 /* Received packets dropped due to lack of available HW packet buffers 94 * used to temporarily hold the received packets. 95 */ 96 {DRVSTAT_INFO(rx_drops_no_pbuf)}, 97 /* Received packets dropped due to input receive buffer 98 * descriptor fifo overflowing. 99 */ 100 {DRVSTAT_INFO(rx_drops_no_erx_descr)}, 101 /* Packets dropped because the internal FIFO to the offloaded TCP 102 * receive processing block is full. This could happen only for 103 * offloaded iSCSI or FCoE trarffic. 104 */ 105 {DRVSTAT_INFO(rx_drops_no_tpre_descr)}, 106 /* Received packets dropped when they need more than 8 107 * receive buffers. This cannot happen as the driver configures 108 * 2048 byte receive buffers. 109 */ 110 {DRVSTAT_INFO(rx_drops_too_many_frags)}, 111 {DRVSTAT_INFO(forwarded_packets)}, 112 /* Received packets dropped when the frame length 113 * is more than 9018 bytes 114 */ 115 {DRVSTAT_INFO(rx_drops_mtu)}, 116 /* Number of packets dropped due to random early drop function */ 117 {DRVSTAT_INFO(eth_red_drops)}, 118 {DRVSTAT_INFO(be_on_die_temperature)} 119 }; 120 #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats) 121 122 /* Stats related to multi RX queues: get_stats routine assumes bytes, pkts 123 * are first and second members respectively. 124 */ 125 static const struct be_ethtool_stat et_rx_stats[] = { 126 {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */ 127 {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */ 128 {DRVSTAT_RX_INFO(rx_compl)}, 129 {DRVSTAT_RX_INFO(rx_mcast_pkts)}, 130 /* Number of page allocation failures while posting receive buffers 131 * to HW. 132 */ 133 {DRVSTAT_RX_INFO(rx_post_fail)}, 134 /* Recevied packets dropped due to skb allocation failure */ 135 {DRVSTAT_RX_INFO(rx_drops_no_skbs)}, 136 /* Received packets dropped due to lack of available fetched buffers 137 * posted by the driver. 138 */ 139 {DRVSTAT_RX_INFO(rx_drops_no_frags)} 140 }; 141 #define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats)) 142 143 /* Stats related to multi TX queues: get_stats routine assumes compl is the 144 * first member 145 */ 146 static const struct be_ethtool_stat et_tx_stats[] = { 147 {DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */ 148 {DRVSTAT_TX_INFO(tx_bytes)}, 149 {DRVSTAT_TX_INFO(tx_pkts)}, 150 /* Number of skbs queued for trasmission by the driver */ 151 {DRVSTAT_TX_INFO(tx_reqs)}, 152 /* Number of TX work request blocks DMAed to HW */ 153 {DRVSTAT_TX_INFO(tx_wrbs)}, 154 /* Number of times the TX queue was stopped due to lack 155 * of spaces in the TXQ. 156 */ 157 {DRVSTAT_TX_INFO(tx_stops)} 158 }; 159 #define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats)) 160 161 static const char et_self_tests[][ETH_GSTRING_LEN] = { 162 "MAC Loopback test", 163 "PHY Loopback test", 164 "External Loopback test", 165 "DDR DMA test", 166 "Link test" 167 }; 168 169 #define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests) 170 #define BE_MAC_LOOPBACK 0x0 171 #define BE_PHY_LOOPBACK 0x1 172 #define BE_ONE_PORT_EXT_LOOPBACK 0x2 173 #define BE_NO_LOOPBACK 0xff 174 175 static void be_get_drvinfo(struct net_device *netdev, 176 struct ethtool_drvinfo *drvinfo) 177 { 178 struct be_adapter *adapter = netdev_priv(netdev); 179 char fw_on_flash[FW_VER_LEN]; 180 181 memset(fw_on_flash, 0 , sizeof(fw_on_flash)); 182 be_cmd_get_fw_ver(adapter, adapter->fw_ver, fw_on_flash); 183 184 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); 185 strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version)); 186 strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN); 187 if (memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN) != 0) { 188 strcat(drvinfo->fw_version, " ["); 189 strcat(drvinfo->fw_version, fw_on_flash); 190 strcat(drvinfo->fw_version, "]"); 191 } 192 193 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 194 sizeof(drvinfo->bus_info)); 195 drvinfo->testinfo_len = 0; 196 drvinfo->regdump_len = 0; 197 drvinfo->eedump_len = 0; 198 } 199 200 static u32 201 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name) 202 { 203 u32 data_read = 0, eof; 204 u8 addn_status; 205 struct be_dma_mem data_len_cmd; 206 int status; 207 208 memset(&data_len_cmd, 0, sizeof(data_len_cmd)); 209 /* data_offset and data_size should be 0 to get reg len */ 210 status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0, 211 file_name, &data_read, &eof, &addn_status); 212 213 return data_read; 214 } 215 216 static int 217 lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, 218 u32 buf_len, void *buf) 219 { 220 struct be_dma_mem read_cmd; 221 u32 read_len = 0, total_read_len = 0, chunk_size; 222 u32 eof = 0; 223 u8 addn_status; 224 int status = 0; 225 226 read_cmd.size = LANCER_READ_FILE_CHUNK; 227 read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size, 228 &read_cmd.dma); 229 230 if (!read_cmd.va) { 231 dev_err(&adapter->pdev->dev, 232 "Memory allocation failure while reading dump\n"); 233 return -ENOMEM; 234 } 235 236 while ((total_read_len < buf_len) && !eof) { 237 chunk_size = min_t(u32, (buf_len - total_read_len), 238 LANCER_READ_FILE_CHUNK); 239 chunk_size = ALIGN(chunk_size, 4); 240 status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size, 241 total_read_len, file_name, &read_len, 242 &eof, &addn_status); 243 if (!status) { 244 memcpy(buf + total_read_len, read_cmd.va, read_len); 245 total_read_len += read_len; 246 eof &= LANCER_READ_FILE_EOF_MASK; 247 } else { 248 status = -EIO; 249 break; 250 } 251 } 252 pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va, 253 read_cmd.dma); 254 255 return status; 256 } 257 258 static int 259 be_get_reg_len(struct net_device *netdev) 260 { 261 struct be_adapter *adapter = netdev_priv(netdev); 262 u32 log_size = 0; 263 264 if (be_physfn(adapter)) { 265 if (lancer_chip(adapter)) 266 log_size = lancer_cmd_get_file_len(adapter, 267 LANCER_FW_DUMP_FILE); 268 else 269 be_cmd_get_reg_len(adapter, &log_size); 270 } 271 return log_size; 272 } 273 274 static void 275 be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf) 276 { 277 struct be_adapter *adapter = netdev_priv(netdev); 278 279 if (be_physfn(adapter)) { 280 memset(buf, 0, regs->len); 281 if (lancer_chip(adapter)) 282 lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE, 283 regs->len, buf); 284 else 285 be_cmd_get_regs(adapter, regs->len, buf); 286 } 287 } 288 289 static int be_get_coalesce(struct net_device *netdev, 290 struct ethtool_coalesce *et) 291 { 292 struct be_adapter *adapter = netdev_priv(netdev); 293 struct be_eq_obj *eqo = &adapter->eq_obj[0]; 294 295 296 et->rx_coalesce_usecs = eqo->cur_eqd; 297 et->rx_coalesce_usecs_high = eqo->max_eqd; 298 et->rx_coalesce_usecs_low = eqo->min_eqd; 299 300 et->tx_coalesce_usecs = eqo->cur_eqd; 301 et->tx_coalesce_usecs_high = eqo->max_eqd; 302 et->tx_coalesce_usecs_low = eqo->min_eqd; 303 304 et->use_adaptive_rx_coalesce = eqo->enable_aic; 305 et->use_adaptive_tx_coalesce = eqo->enable_aic; 306 307 return 0; 308 } 309 310 /* TX attributes are ignored. Only RX attributes are considered 311 * eqd cmd is issued in the worker thread. 312 */ 313 static int be_set_coalesce(struct net_device *netdev, 314 struct ethtool_coalesce *et) 315 { 316 struct be_adapter *adapter = netdev_priv(netdev); 317 struct be_eq_obj *eqo; 318 int i; 319 320 for_all_evt_queues(adapter, eqo, i) { 321 eqo->enable_aic = et->use_adaptive_rx_coalesce; 322 eqo->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD); 323 eqo->min_eqd = min(et->rx_coalesce_usecs_low, eqo->max_eqd); 324 eqo->eqd = et->rx_coalesce_usecs; 325 } 326 327 return 0; 328 } 329 330 static void 331 be_get_ethtool_stats(struct net_device *netdev, 332 struct ethtool_stats *stats, uint64_t *data) 333 { 334 struct be_adapter *adapter = netdev_priv(netdev); 335 struct be_rx_obj *rxo; 336 struct be_tx_obj *txo; 337 void *p; 338 unsigned int i, j, base = 0, start; 339 340 for (i = 0; i < ETHTOOL_STATS_NUM; i++) { 341 p = (u8 *)&adapter->drv_stats + et_stats[i].offset; 342 data[i] = *(u32 *)p; 343 } 344 base += ETHTOOL_STATS_NUM; 345 346 for_all_rx_queues(adapter, rxo, j) { 347 struct be_rx_stats *stats = rx_stats(rxo); 348 349 do { 350 start = u64_stats_fetch_begin_bh(&stats->sync); 351 data[base] = stats->rx_bytes; 352 data[base + 1] = stats->rx_pkts; 353 } while (u64_stats_fetch_retry_bh(&stats->sync, start)); 354 355 for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) { 356 p = (u8 *)stats + et_rx_stats[i].offset; 357 data[base + i] = *(u32 *)p; 358 } 359 base += ETHTOOL_RXSTATS_NUM; 360 } 361 362 for_all_tx_queues(adapter, txo, j) { 363 struct be_tx_stats *stats = tx_stats(txo); 364 365 do { 366 start = u64_stats_fetch_begin_bh(&stats->sync_compl); 367 data[base] = stats->tx_compl; 368 } while (u64_stats_fetch_retry_bh(&stats->sync_compl, start)); 369 370 do { 371 start = u64_stats_fetch_begin_bh(&stats->sync); 372 for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) { 373 p = (u8 *)stats + et_tx_stats[i].offset; 374 data[base + i] = 375 (et_tx_stats[i].size == sizeof(u64)) ? 376 *(u64 *)p : *(u32 *)p; 377 } 378 } while (u64_stats_fetch_retry_bh(&stats->sync, start)); 379 base += ETHTOOL_TXSTATS_NUM; 380 } 381 } 382 383 static void 384 be_get_stat_strings(struct net_device *netdev, uint32_t stringset, 385 uint8_t *data) 386 { 387 struct be_adapter *adapter = netdev_priv(netdev); 388 int i, j; 389 390 switch (stringset) { 391 case ETH_SS_STATS: 392 for (i = 0; i < ETHTOOL_STATS_NUM; i++) { 393 memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN); 394 data += ETH_GSTRING_LEN; 395 } 396 for (i = 0; i < adapter->num_rx_qs; i++) { 397 for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) { 398 sprintf(data, "rxq%d: %s", i, 399 et_rx_stats[j].desc); 400 data += ETH_GSTRING_LEN; 401 } 402 } 403 for (i = 0; i < adapter->num_tx_qs; i++) { 404 for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) { 405 sprintf(data, "txq%d: %s", i, 406 et_tx_stats[j].desc); 407 data += ETH_GSTRING_LEN; 408 } 409 } 410 break; 411 case ETH_SS_TEST: 412 for (i = 0; i < ETHTOOL_TESTS_NUM; i++) { 413 memcpy(data, et_self_tests[i], ETH_GSTRING_LEN); 414 data += ETH_GSTRING_LEN; 415 } 416 break; 417 } 418 } 419 420 static int be_get_sset_count(struct net_device *netdev, int stringset) 421 { 422 struct be_adapter *adapter = netdev_priv(netdev); 423 424 switch (stringset) { 425 case ETH_SS_TEST: 426 return ETHTOOL_TESTS_NUM; 427 case ETH_SS_STATS: 428 return ETHTOOL_STATS_NUM + 429 adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM + 430 adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM; 431 default: 432 return -EINVAL; 433 } 434 } 435 436 static u32 be_get_port_type(u32 phy_type, u32 dac_cable_len) 437 { 438 u32 port; 439 440 switch (phy_type) { 441 case PHY_TYPE_BASET_1GB: 442 case PHY_TYPE_BASEX_1GB: 443 case PHY_TYPE_SGMII: 444 port = PORT_TP; 445 break; 446 case PHY_TYPE_SFP_PLUS_10GB: 447 port = dac_cable_len ? PORT_DA : PORT_FIBRE; 448 break; 449 case PHY_TYPE_XFP_10GB: 450 case PHY_TYPE_SFP_1GB: 451 port = PORT_FIBRE; 452 break; 453 case PHY_TYPE_BASET_10GB: 454 port = PORT_TP; 455 break; 456 default: 457 port = PORT_OTHER; 458 } 459 460 return port; 461 } 462 463 static u32 convert_to_et_setting(u32 if_type, u32 if_speeds) 464 { 465 u32 val = 0; 466 467 switch (if_type) { 468 case PHY_TYPE_BASET_1GB: 469 case PHY_TYPE_BASEX_1GB: 470 case PHY_TYPE_SGMII: 471 val |= SUPPORTED_TP; 472 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS) 473 val |= SUPPORTED_1000baseT_Full; 474 if (if_speeds & BE_SUPPORTED_SPEED_100MBPS) 475 val |= SUPPORTED_100baseT_Full; 476 if (if_speeds & BE_SUPPORTED_SPEED_10MBPS) 477 val |= SUPPORTED_10baseT_Full; 478 break; 479 case PHY_TYPE_KX4_10GB: 480 val |= SUPPORTED_Backplane; 481 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS) 482 val |= SUPPORTED_1000baseKX_Full; 483 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS) 484 val |= SUPPORTED_10000baseKX4_Full; 485 break; 486 case PHY_TYPE_KR_10GB: 487 val |= SUPPORTED_Backplane | 488 SUPPORTED_10000baseKR_Full; 489 break; 490 case PHY_TYPE_SFP_PLUS_10GB: 491 case PHY_TYPE_XFP_10GB: 492 case PHY_TYPE_SFP_1GB: 493 val |= SUPPORTED_FIBRE; 494 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS) 495 val |= SUPPORTED_10000baseT_Full; 496 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS) 497 val |= SUPPORTED_1000baseT_Full; 498 break; 499 case PHY_TYPE_BASET_10GB: 500 val |= SUPPORTED_TP; 501 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS) 502 val |= SUPPORTED_10000baseT_Full; 503 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS) 504 val |= SUPPORTED_1000baseT_Full; 505 if (if_speeds & BE_SUPPORTED_SPEED_100MBPS) 506 val |= SUPPORTED_100baseT_Full; 507 break; 508 default: 509 val |= SUPPORTED_TP; 510 } 511 512 return val; 513 } 514 515 bool be_pause_supported(struct be_adapter *adapter) 516 { 517 return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB || 518 adapter->phy.interface_type == PHY_TYPE_XFP_10GB) ? 519 false : true; 520 } 521 522 static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 523 { 524 struct be_adapter *adapter = netdev_priv(netdev); 525 u8 link_status; 526 u16 link_speed = 0; 527 int status; 528 529 if (adapter->phy.link_speed < 0) { 530 status = be_cmd_link_status_query(adapter, &link_speed, 531 &link_status, 0); 532 if (!status) 533 be_link_status_update(adapter, link_status); 534 ethtool_cmd_speed_set(ecmd, link_speed); 535 536 status = be_cmd_get_phy_info(adapter); 537 if (status) 538 return status; 539 540 ecmd->supported = 541 convert_to_et_setting(adapter->phy.interface_type, 542 adapter->phy.auto_speeds_supported | 543 adapter->phy.fixed_speeds_supported); 544 ecmd->advertising = 545 convert_to_et_setting(adapter->phy.interface_type, 546 adapter->phy.auto_speeds_supported); 547 548 ecmd->port = be_get_port_type(adapter->phy.interface_type, 549 adapter->phy.dac_cable_len); 550 551 if (adapter->phy.auto_speeds_supported) { 552 ecmd->supported |= SUPPORTED_Autoneg; 553 ecmd->autoneg = AUTONEG_ENABLE; 554 ecmd->advertising |= ADVERTISED_Autoneg; 555 } 556 557 if (be_pause_supported(adapter)) { 558 ecmd->supported |= SUPPORTED_Pause; 559 ecmd->advertising |= ADVERTISED_Pause; 560 } 561 562 switch (adapter->phy.interface_type) { 563 case PHY_TYPE_KR_10GB: 564 case PHY_TYPE_KX4_10GB: 565 ecmd->transceiver = XCVR_INTERNAL; 566 break; 567 default: 568 ecmd->transceiver = XCVR_EXTERNAL; 569 break; 570 } 571 572 /* Save for future use */ 573 adapter->phy.link_speed = ethtool_cmd_speed(ecmd); 574 adapter->phy.port_type = ecmd->port; 575 adapter->phy.transceiver = ecmd->transceiver; 576 adapter->phy.autoneg = ecmd->autoneg; 577 adapter->phy.advertising = ecmd->advertising; 578 adapter->phy.supported = ecmd->supported; 579 } else { 580 ethtool_cmd_speed_set(ecmd, adapter->phy.link_speed); 581 ecmd->port = adapter->phy.port_type; 582 ecmd->transceiver = adapter->phy.transceiver; 583 ecmd->autoneg = adapter->phy.autoneg; 584 ecmd->advertising = adapter->phy.advertising; 585 ecmd->supported = adapter->phy.supported; 586 } 587 588 ecmd->duplex = netif_carrier_ok(netdev) ? DUPLEX_FULL : DUPLEX_UNKNOWN; 589 ecmd->phy_address = adapter->port_num; 590 591 return 0; 592 } 593 594 static void be_get_ringparam(struct net_device *netdev, 595 struct ethtool_ringparam *ring) 596 { 597 struct be_adapter *adapter = netdev_priv(netdev); 598 599 ring->rx_max_pending = ring->rx_pending = adapter->rx_obj[0].q.len; 600 ring->tx_max_pending = ring->tx_pending = adapter->tx_obj[0].q.len; 601 } 602 603 static void 604 be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd) 605 { 606 struct be_adapter *adapter = netdev_priv(netdev); 607 608 be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause); 609 ecmd->autoneg = adapter->phy.fc_autoneg; 610 } 611 612 static int 613 be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd) 614 { 615 struct be_adapter *adapter = netdev_priv(netdev); 616 int status; 617 618 if (ecmd->autoneg != adapter->phy.fc_autoneg) 619 return -EINVAL; 620 adapter->tx_fc = ecmd->tx_pause; 621 adapter->rx_fc = ecmd->rx_pause; 622 623 status = be_cmd_set_flow_control(adapter, 624 adapter->tx_fc, adapter->rx_fc); 625 if (status) 626 dev_warn(&adapter->pdev->dev, "Pause param set failed.\n"); 627 628 return status; 629 } 630 631 static int 632 be_set_phys_id(struct net_device *netdev, 633 enum ethtool_phys_id_state state) 634 { 635 struct be_adapter *adapter = netdev_priv(netdev); 636 637 switch (state) { 638 case ETHTOOL_ID_ACTIVE: 639 be_cmd_get_beacon_state(adapter, adapter->hba_port_num, 640 &adapter->beacon_state); 641 return 1; /* cycle on/off once per second */ 642 643 case ETHTOOL_ID_ON: 644 be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, 645 BEACON_STATE_ENABLED); 646 break; 647 648 case ETHTOOL_ID_OFF: 649 be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, 650 BEACON_STATE_DISABLED); 651 break; 652 653 case ETHTOOL_ID_INACTIVE: 654 be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, 655 adapter->beacon_state); 656 } 657 658 return 0; 659 } 660 661 662 static void 663 be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 664 { 665 struct be_adapter *adapter = netdev_priv(netdev); 666 667 if (be_is_wol_supported(adapter)) { 668 wol->supported |= WAKE_MAGIC; 669 wol->wolopts |= WAKE_MAGIC; 670 } else 671 wol->wolopts = 0; 672 memset(&wol->sopass, 0, sizeof(wol->sopass)); 673 } 674 675 static int 676 be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 677 { 678 struct be_adapter *adapter = netdev_priv(netdev); 679 680 if (wol->wolopts & ~WAKE_MAGIC) 681 return -EOPNOTSUPP; 682 683 if (!be_is_wol_supported(adapter)) { 684 dev_warn(&adapter->pdev->dev, "WOL not supported\n"); 685 return -EOPNOTSUPP; 686 } 687 688 if (wol->wolopts & WAKE_MAGIC) 689 adapter->wol = true; 690 else 691 adapter->wol = false; 692 693 return 0; 694 } 695 696 static int 697 be_test_ddr_dma(struct be_adapter *adapter) 698 { 699 int ret, i; 700 struct be_dma_mem ddrdma_cmd; 701 static const u64 pattern[2] = { 702 0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL 703 }; 704 705 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); 706 ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size, 707 &ddrdma_cmd.dma, GFP_KERNEL); 708 if (!ddrdma_cmd.va) { 709 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 710 return -ENOMEM; 711 } 712 713 for (i = 0; i < 2; i++) { 714 ret = be_cmd_ddr_dma_test(adapter, pattern[i], 715 4096, &ddrdma_cmd); 716 if (ret != 0) 717 goto err; 718 } 719 720 err: 721 dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va, 722 ddrdma_cmd.dma); 723 return ret; 724 } 725 726 static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type, 727 u64 *status) 728 { 729 be_cmd_set_loopback(adapter, adapter->hba_port_num, 730 loopback_type, 1); 731 *status = be_cmd_loopback_test(adapter, adapter->hba_port_num, 732 loopback_type, 1500, 733 2, 0xabc); 734 be_cmd_set_loopback(adapter, adapter->hba_port_num, 735 BE_NO_LOOPBACK, 1); 736 return *status; 737 } 738 739 static void 740 be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) 741 { 742 struct be_adapter *adapter = netdev_priv(netdev); 743 int status; 744 u8 link_status = 0; 745 746 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); 747 748 if (test->flags & ETH_TEST_FL_OFFLINE) { 749 if (be_loopback_test(adapter, BE_MAC_LOOPBACK, 750 &data[0]) != 0) { 751 test->flags |= ETH_TEST_FL_FAILED; 752 } 753 if (be_loopback_test(adapter, BE_PHY_LOOPBACK, 754 &data[1]) != 0) { 755 test->flags |= ETH_TEST_FL_FAILED; 756 } 757 if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK, 758 &data[2]) != 0) { 759 test->flags |= ETH_TEST_FL_FAILED; 760 } 761 } 762 763 if (!lancer_chip(adapter) && be_test_ddr_dma(adapter) != 0) { 764 data[3] = 1; 765 test->flags |= ETH_TEST_FL_FAILED; 766 } 767 768 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0); 769 if (status) { 770 test->flags |= ETH_TEST_FL_FAILED; 771 data[4] = -1; 772 } else if (!link_status) { 773 test->flags |= ETH_TEST_FL_FAILED; 774 data[4] = 1; 775 } 776 } 777 778 static int 779 be_do_flash(struct net_device *netdev, struct ethtool_flash *efl) 780 { 781 struct be_adapter *adapter = netdev_priv(netdev); 782 783 return be_load_fw(adapter, efl->data); 784 } 785 786 static int 787 be_get_eeprom_len(struct net_device *netdev) 788 { 789 struct be_adapter *adapter = netdev_priv(netdev); 790 if (lancer_chip(adapter)) { 791 if (be_physfn(adapter)) 792 return lancer_cmd_get_file_len(adapter, 793 LANCER_VPD_PF_FILE); 794 else 795 return lancer_cmd_get_file_len(adapter, 796 LANCER_VPD_VF_FILE); 797 } else { 798 return BE_READ_SEEPROM_LEN; 799 } 800 } 801 802 static int 803 be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, 804 uint8_t *data) 805 { 806 struct be_adapter *adapter = netdev_priv(netdev); 807 struct be_dma_mem eeprom_cmd; 808 struct be_cmd_resp_seeprom_read *resp; 809 int status; 810 811 if (!eeprom->len) 812 return -EINVAL; 813 814 if (lancer_chip(adapter)) { 815 if (be_physfn(adapter)) 816 return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE, 817 eeprom->len, data); 818 else 819 return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE, 820 eeprom->len, data); 821 } 822 823 eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16); 824 825 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); 826 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); 827 eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size, 828 &eeprom_cmd.dma, GFP_KERNEL); 829 830 if (!eeprom_cmd.va) { 831 dev_err(&adapter->pdev->dev, 832 "Memory allocation failure. Could not read eeprom\n"); 833 return -ENOMEM; 834 } 835 836 status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd); 837 838 if (!status) { 839 resp = eeprom_cmd.va; 840 memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len); 841 } 842 dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va, 843 eeprom_cmd.dma); 844 845 return status; 846 } 847 848 static u32 be_get_msg_level(struct net_device *netdev) 849 { 850 struct be_adapter *adapter = netdev_priv(netdev); 851 852 if (lancer_chip(adapter)) { 853 dev_err(&adapter->pdev->dev, "Operation not supported\n"); 854 return -EOPNOTSUPP; 855 } 856 857 return adapter->msg_enable; 858 } 859 860 static void be_set_fw_log_level(struct be_adapter *adapter, u32 level) 861 { 862 struct be_dma_mem extfat_cmd; 863 struct be_fat_conf_params *cfgs; 864 int status; 865 int i, j; 866 867 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 868 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 869 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, 870 &extfat_cmd.dma); 871 if (!extfat_cmd.va) { 872 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", 873 __func__); 874 goto err; 875 } 876 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); 877 if (!status) { 878 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va + 879 sizeof(struct be_cmd_resp_hdr)); 880 for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) { 881 u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes); 882 for (j = 0; j < num_modes; j++) { 883 if (cfgs->module[i].trace_lvl[j].mode == 884 MODE_UART) 885 cfgs->module[i].trace_lvl[j].dbg_lvl = 886 cpu_to_le32(level); 887 } 888 } 889 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, 890 cfgs); 891 if (status) 892 dev_err(&adapter->pdev->dev, 893 "Message level set failed\n"); 894 } else { 895 dev_err(&adapter->pdev->dev, "Message level get failed\n"); 896 } 897 898 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, 899 extfat_cmd.dma); 900 err: 901 return; 902 } 903 904 static void be_set_msg_level(struct net_device *netdev, u32 level) 905 { 906 struct be_adapter *adapter = netdev_priv(netdev); 907 908 if (lancer_chip(adapter)) { 909 dev_err(&adapter->pdev->dev, "Operation not supported\n"); 910 return; 911 } 912 913 if (adapter->msg_enable == level) 914 return; 915 916 if ((level & NETIF_MSG_HW) != (adapter->msg_enable & NETIF_MSG_HW)) 917 be_set_fw_log_level(adapter, level & NETIF_MSG_HW ? 918 FW_LOG_LEVEL_DEFAULT : FW_LOG_LEVEL_FATAL); 919 adapter->msg_enable = level; 920 921 return; 922 } 923 924 const struct ethtool_ops be_ethtool_ops = { 925 .get_settings = be_get_settings, 926 .get_drvinfo = be_get_drvinfo, 927 .get_wol = be_get_wol, 928 .set_wol = be_set_wol, 929 .get_link = ethtool_op_get_link, 930 .get_eeprom_len = be_get_eeprom_len, 931 .get_eeprom = be_read_eeprom, 932 .get_coalesce = be_get_coalesce, 933 .set_coalesce = be_set_coalesce, 934 .get_ringparam = be_get_ringparam, 935 .get_pauseparam = be_get_pauseparam, 936 .set_pauseparam = be_set_pauseparam, 937 .get_strings = be_get_stat_strings, 938 .set_phys_id = be_set_phys_id, 939 .get_msglevel = be_get_msg_level, 940 .set_msglevel = be_set_msg_level, 941 .get_sset_count = be_get_sset_count, 942 .get_ethtool_stats = be_get_ethtool_stats, 943 .get_regs_len = be_get_reg_len, 944 .get_regs = be_get_regs, 945 .flash_device = be_do_flash, 946 .self_test = be_self_test, 947 }; 948