1 /* bnx2x_ethtool.c: Broadcom Everest network driver. 2 * 3 * Copyright (c) 2007-2013 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 10 * Written by: Eliezer Tamir 11 * Based on code from Michael Chan's bnx2 driver 12 * UDP CSUM errata workaround by Arik Gendelman 13 * Slowpath and fastpath rework by Vladislav Zolotarov 14 * Statistics and Link management by Yitchak Gertner 15 * 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20 #include <linux/ethtool.h> 21 #include <linux/netdevice.h> 22 #include <linux/types.h> 23 #include <linux/sched.h> 24 #include <linux/crc32.h> 25 #include "bnx2x.h" 26 #include "bnx2x_cmn.h" 27 #include "bnx2x_dump.h" 28 #include "bnx2x_init.h" 29 30 /* Note: in the format strings below %s is replaced by the queue-name which is 31 * either its index or 'fcoe' for the fcoe queue. Make sure the format string 32 * length does not exceed ETH_GSTRING_LEN - MAX_QUEUE_NAME_LEN + 2 33 */ 34 #define MAX_QUEUE_NAME_LEN 4 35 static const struct { 36 long offset; 37 int size; 38 char string[ETH_GSTRING_LEN]; 39 } bnx2x_q_stats_arr[] = { 40 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" }, 41 { Q_STATS_OFFSET32(total_unicast_packets_received_hi), 42 8, "[%s]: rx_ucast_packets" }, 43 { Q_STATS_OFFSET32(total_multicast_packets_received_hi), 44 8, "[%s]: rx_mcast_packets" }, 45 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), 46 8, "[%s]: rx_bcast_packets" }, 47 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" }, 48 { Q_STATS_OFFSET32(rx_err_discard_pkt), 49 4, "[%s]: rx_phy_ip_err_discards"}, 50 { Q_STATS_OFFSET32(rx_skb_alloc_failed), 51 4, "[%s]: rx_skb_alloc_discard" }, 52 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" }, 53 54 { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" }, 55 /* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), 56 8, "[%s]: tx_ucast_packets" }, 57 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), 58 8, "[%s]: tx_mcast_packets" }, 59 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 60 8, "[%s]: tx_bcast_packets" }, 61 { Q_STATS_OFFSET32(total_tpa_aggregations_hi), 62 8, "[%s]: tpa_aggregations" }, 63 { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), 64 8, "[%s]: tpa_aggregated_frames"}, 65 { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"}, 66 { Q_STATS_OFFSET32(driver_filtered_tx_pkt), 67 4, "[%s]: driver_filtered_tx_pkt" } 68 }; 69 70 #define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr) 71 72 static const struct { 73 long offset; 74 int size; 75 u32 flags; 76 #define STATS_FLAGS_PORT 1 77 #define STATS_FLAGS_FUNC 2 78 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) 79 char string[ETH_GSTRING_LEN]; 80 } bnx2x_stats_arr[] = { 81 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi), 82 8, STATS_FLAGS_BOTH, "rx_bytes" }, 83 { STATS_OFFSET32(error_bytes_received_hi), 84 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, 85 { STATS_OFFSET32(total_unicast_packets_received_hi), 86 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, 87 { STATS_OFFSET32(total_multicast_packets_received_hi), 88 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, 89 { STATS_OFFSET32(total_broadcast_packets_received_hi), 90 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, 91 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), 92 8, STATS_FLAGS_PORT, "rx_crc_errors" }, 93 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), 94 8, STATS_FLAGS_PORT, "rx_align_errors" }, 95 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), 96 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, 97 { STATS_OFFSET32(etherstatsoverrsizepkts_hi), 98 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, 99 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi), 100 8, STATS_FLAGS_PORT, "rx_fragments" }, 101 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 102 8, STATS_FLAGS_PORT, "rx_jabbers" }, 103 { STATS_OFFSET32(no_buff_discard_hi), 104 8, STATS_FLAGS_BOTH, "rx_discards" }, 105 { STATS_OFFSET32(mac_filter_discard), 106 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, 107 { STATS_OFFSET32(mf_tag_discard), 108 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, 109 { STATS_OFFSET32(pfc_frames_received_hi), 110 8, STATS_FLAGS_PORT, "pfc_frames_received" }, 111 { STATS_OFFSET32(pfc_frames_sent_hi), 112 8, STATS_FLAGS_PORT, "pfc_frames_sent" }, 113 { STATS_OFFSET32(brb_drop_hi), 114 8, STATS_FLAGS_PORT, "rx_brb_discard" }, 115 { STATS_OFFSET32(brb_truncate_hi), 116 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, 117 { STATS_OFFSET32(pause_frames_received_hi), 118 8, STATS_FLAGS_PORT, "rx_pause_frames" }, 119 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), 120 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, 121 { STATS_OFFSET32(nig_timer_max), 122 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, 123 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt), 124 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"}, 125 { STATS_OFFSET32(rx_skb_alloc_failed), 126 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" }, 127 { STATS_OFFSET32(hw_csum_err), 128 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" }, 129 130 { STATS_OFFSET32(total_bytes_transmitted_hi), 131 8, STATS_FLAGS_BOTH, "tx_bytes" }, 132 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 133 8, STATS_FLAGS_PORT, "tx_error_bytes" }, 134 { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 135 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, 136 { STATS_OFFSET32(total_multicast_packets_transmitted_hi), 137 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, 138 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 139 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, 140 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 141 8, STATS_FLAGS_PORT, "tx_mac_errors" }, 142 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 143 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, 144 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 145 8, STATS_FLAGS_PORT, "tx_single_collisions" }, 146 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 147 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, 148 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 149 8, STATS_FLAGS_PORT, "tx_deferred" }, 150 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 151 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, 152 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), 153 8, STATS_FLAGS_PORT, "tx_late_collisions" }, 154 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), 155 8, STATS_FLAGS_PORT, "tx_total_collisions" }, 156 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), 157 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, 158 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), 159 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, 160 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), 161 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, 162 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 163 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, 164 /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 165 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, 166 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 167 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, 168 { STATS_OFFSET32(etherstatspktsover1522octets_hi), 169 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, 170 { STATS_OFFSET32(pause_frames_sent_hi), 171 8, STATS_FLAGS_PORT, "tx_pause_frames" }, 172 { STATS_OFFSET32(total_tpa_aggregations_hi), 173 8, STATS_FLAGS_FUNC, "tpa_aggregations" }, 174 { STATS_OFFSET32(total_tpa_aggregated_frames_hi), 175 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, 176 { STATS_OFFSET32(total_tpa_bytes_hi), 177 8, STATS_FLAGS_FUNC, "tpa_bytes"}, 178 { STATS_OFFSET32(recoverable_error), 179 4, STATS_FLAGS_FUNC, "recoverable_errors" }, 180 { STATS_OFFSET32(unrecoverable_error), 181 4, STATS_FLAGS_FUNC, "unrecoverable_errors" }, 182 { STATS_OFFSET32(driver_filtered_tx_pkt), 183 4, STATS_FLAGS_FUNC, "driver_filtered_tx_pkt" }, 184 { STATS_OFFSET32(eee_tx_lpi), 185 4, STATS_FLAGS_PORT, "Tx LPI entry count"} 186 }; 187 188 #define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr) 189 190 static int bnx2x_get_port_type(struct bnx2x *bp) 191 { 192 int port_type; 193 u32 phy_idx = bnx2x_get_cur_phy_idx(bp); 194 switch (bp->link_params.phy[phy_idx].media_type) { 195 case ETH_PHY_SFPP_10G_FIBER: 196 case ETH_PHY_SFP_1G_FIBER: 197 case ETH_PHY_XFP_FIBER: 198 case ETH_PHY_KR: 199 case ETH_PHY_CX4: 200 port_type = PORT_FIBRE; 201 break; 202 case ETH_PHY_DA_TWINAX: 203 port_type = PORT_DA; 204 break; 205 case ETH_PHY_BASE_T: 206 port_type = PORT_TP; 207 break; 208 case ETH_PHY_NOT_PRESENT: 209 port_type = PORT_NONE; 210 break; 211 case ETH_PHY_UNSPECIFIED: 212 default: 213 port_type = PORT_OTHER; 214 break; 215 } 216 return port_type; 217 } 218 219 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 220 { 221 struct bnx2x *bp = netdev_priv(dev); 222 int cfg_idx = bnx2x_get_link_cfg_idx(bp); 223 224 /* Dual Media boards present all available port types */ 225 cmd->supported = bp->port.supported[cfg_idx] | 226 (bp->port.supported[cfg_idx ^ 1] & 227 (SUPPORTED_TP | SUPPORTED_FIBRE)); 228 cmd->advertising = bp->port.advertising[cfg_idx]; 229 if (bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type == 230 ETH_PHY_SFP_1G_FIBER) { 231 cmd->supported &= ~(SUPPORTED_10000baseT_Full); 232 cmd->advertising &= ~(ADVERTISED_10000baseT_Full); 233 } 234 235 if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up && 236 !(bp->flags & MF_FUNC_DIS)) { 237 cmd->duplex = bp->link_vars.duplex; 238 239 if (IS_MF(bp) && !BP_NOMCP(bp)) 240 ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp)); 241 else 242 ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed); 243 } else { 244 cmd->duplex = DUPLEX_UNKNOWN; 245 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); 246 } 247 248 cmd->port = bnx2x_get_port_type(bp); 249 250 cmd->phy_address = bp->mdio.prtad; 251 cmd->transceiver = XCVR_INTERNAL; 252 253 if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) 254 cmd->autoneg = AUTONEG_ENABLE; 255 else 256 cmd->autoneg = AUTONEG_DISABLE; 257 258 /* Publish LP advertised speeds and FC */ 259 if (bp->link_vars.link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { 260 u32 status = bp->link_vars.link_status; 261 262 cmd->lp_advertising |= ADVERTISED_Autoneg; 263 if (status & LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE) 264 cmd->lp_advertising |= ADVERTISED_Pause; 265 if (status & LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE) 266 cmd->lp_advertising |= ADVERTISED_Asym_Pause; 267 268 if (status & LINK_STATUS_LINK_PARTNER_10THD_CAPABLE) 269 cmd->lp_advertising |= ADVERTISED_10baseT_Half; 270 if (status & LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE) 271 cmd->lp_advertising |= ADVERTISED_10baseT_Full; 272 if (status & LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE) 273 cmd->lp_advertising |= ADVERTISED_100baseT_Half; 274 if (status & LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE) 275 cmd->lp_advertising |= ADVERTISED_100baseT_Full; 276 if (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) 277 cmd->lp_advertising |= ADVERTISED_1000baseT_Half; 278 if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) 279 cmd->lp_advertising |= ADVERTISED_1000baseT_Full; 280 if (status & LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE) 281 cmd->lp_advertising |= ADVERTISED_2500baseX_Full; 282 if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) 283 cmd->lp_advertising |= ADVERTISED_10000baseT_Full; 284 if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE) 285 cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full; 286 } 287 288 cmd->maxtxpkt = 0; 289 cmd->maxrxpkt = 0; 290 291 DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n" 292 " supported 0x%x advertising 0x%x speed %u\n" 293 " duplex %d port %d phy_address %d transceiver %d\n" 294 " autoneg %d maxtxpkt %d maxrxpkt %d\n", 295 cmd->cmd, cmd->supported, cmd->advertising, 296 ethtool_cmd_speed(cmd), 297 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, 298 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); 299 300 return 0; 301 } 302 303 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 304 { 305 struct bnx2x *bp = netdev_priv(dev); 306 u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config; 307 u32 speed, phy_idx; 308 309 if (IS_MF_SD(bp)) 310 return 0; 311 312 DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n" 313 " supported 0x%x advertising 0x%x speed %u\n" 314 " duplex %d port %d phy_address %d transceiver %d\n" 315 " autoneg %d maxtxpkt %d maxrxpkt %d\n", 316 cmd->cmd, cmd->supported, cmd->advertising, 317 ethtool_cmd_speed(cmd), 318 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, 319 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); 320 321 speed = ethtool_cmd_speed(cmd); 322 323 /* If received a request for an unknown duplex, assume full*/ 324 if (cmd->duplex == DUPLEX_UNKNOWN) 325 cmd->duplex = DUPLEX_FULL; 326 327 if (IS_MF_SI(bp)) { 328 u32 part; 329 u32 line_speed = bp->link_vars.line_speed; 330 331 /* use 10G if no link detected */ 332 if (!line_speed) 333 line_speed = 10000; 334 335 if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) { 336 DP(BNX2X_MSG_ETHTOOL, 337 "To set speed BC %X or higher is required, please upgrade BC\n", 338 REQ_BC_VER_4_SET_MF_BW); 339 return -EINVAL; 340 } 341 342 part = (speed * 100) / line_speed; 343 344 if (line_speed < speed || !part) { 345 DP(BNX2X_MSG_ETHTOOL, 346 "Speed setting should be in a range from 1%% to 100%% of actual line speed\n"); 347 return -EINVAL; 348 } 349 350 if (bp->state != BNX2X_STATE_OPEN) 351 /* store value for following "load" */ 352 bp->pending_max = part; 353 else 354 bnx2x_update_max_mf_config(bp, part); 355 356 return 0; 357 } 358 359 cfg_idx = bnx2x_get_link_cfg_idx(bp); 360 old_multi_phy_config = bp->link_params.multi_phy_config; 361 switch (cmd->port) { 362 case PORT_TP: 363 if (bp->port.supported[cfg_idx] & SUPPORTED_TP) 364 break; /* no port change */ 365 366 if (!(bp->port.supported[0] & SUPPORTED_TP || 367 bp->port.supported[1] & SUPPORTED_TP)) { 368 DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n"); 369 return -EINVAL; 370 } 371 bp->link_params.multi_phy_config &= 372 ~PORT_HW_CFG_PHY_SELECTION_MASK; 373 if (bp->link_params.multi_phy_config & 374 PORT_HW_CFG_PHY_SWAPPED_ENABLED) 375 bp->link_params.multi_phy_config |= 376 PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; 377 else 378 bp->link_params.multi_phy_config |= 379 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; 380 break; 381 case PORT_FIBRE: 382 case PORT_DA: 383 if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE) 384 break; /* no port change */ 385 386 if (!(bp->port.supported[0] & SUPPORTED_FIBRE || 387 bp->port.supported[1] & SUPPORTED_FIBRE)) { 388 DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n"); 389 return -EINVAL; 390 } 391 bp->link_params.multi_phy_config &= 392 ~PORT_HW_CFG_PHY_SELECTION_MASK; 393 if (bp->link_params.multi_phy_config & 394 PORT_HW_CFG_PHY_SWAPPED_ENABLED) 395 bp->link_params.multi_phy_config |= 396 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; 397 else 398 bp->link_params.multi_phy_config |= 399 PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; 400 break; 401 default: 402 DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n"); 403 return -EINVAL; 404 } 405 /* Save new config in case command complete successfully */ 406 new_multi_phy_config = bp->link_params.multi_phy_config; 407 /* Get the new cfg_idx */ 408 cfg_idx = bnx2x_get_link_cfg_idx(bp); 409 /* Restore old config in case command failed */ 410 bp->link_params.multi_phy_config = old_multi_phy_config; 411 DP(BNX2X_MSG_ETHTOOL, "cfg_idx = %x\n", cfg_idx); 412 413 if (cmd->autoneg == AUTONEG_ENABLE) { 414 u32 an_supported_speed = bp->port.supported[cfg_idx]; 415 if (bp->link_params.phy[EXT_PHY1].type == 416 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) 417 an_supported_speed |= (SUPPORTED_100baseT_Half | 418 SUPPORTED_100baseT_Full); 419 if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) { 420 DP(BNX2X_MSG_ETHTOOL, "Autoneg not supported\n"); 421 return -EINVAL; 422 } 423 424 /* advertise the requested speed and duplex if supported */ 425 if (cmd->advertising & ~an_supported_speed) { 426 DP(BNX2X_MSG_ETHTOOL, 427 "Advertisement parameters are not supported\n"); 428 return -EINVAL; 429 } 430 431 bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; 432 bp->link_params.req_duplex[cfg_idx] = cmd->duplex; 433 bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg | 434 cmd->advertising); 435 if (cmd->advertising) { 436 437 bp->link_params.speed_cap_mask[cfg_idx] = 0; 438 if (cmd->advertising & ADVERTISED_10baseT_Half) { 439 bp->link_params.speed_cap_mask[cfg_idx] |= 440 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF; 441 } 442 if (cmd->advertising & ADVERTISED_10baseT_Full) 443 bp->link_params.speed_cap_mask[cfg_idx] |= 444 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL; 445 446 if (cmd->advertising & ADVERTISED_100baseT_Full) 447 bp->link_params.speed_cap_mask[cfg_idx] |= 448 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL; 449 450 if (cmd->advertising & ADVERTISED_100baseT_Half) { 451 bp->link_params.speed_cap_mask[cfg_idx] |= 452 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF; 453 } 454 if (cmd->advertising & ADVERTISED_1000baseT_Half) { 455 bp->link_params.speed_cap_mask[cfg_idx] |= 456 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; 457 } 458 if (cmd->advertising & (ADVERTISED_1000baseT_Full | 459 ADVERTISED_1000baseKX_Full)) 460 bp->link_params.speed_cap_mask[cfg_idx] |= 461 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; 462 463 if (cmd->advertising & (ADVERTISED_10000baseT_Full | 464 ADVERTISED_10000baseKX4_Full | 465 ADVERTISED_10000baseKR_Full)) 466 bp->link_params.speed_cap_mask[cfg_idx] |= 467 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G; 468 469 if (cmd->advertising & ADVERTISED_20000baseKR2_Full) 470 bp->link_params.speed_cap_mask[cfg_idx] |= 471 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G; 472 } 473 } else { /* forced speed */ 474 /* advertise the requested speed and duplex if supported */ 475 switch (speed) { 476 case SPEED_10: 477 if (cmd->duplex == DUPLEX_FULL) { 478 if (!(bp->port.supported[cfg_idx] & 479 SUPPORTED_10baseT_Full)) { 480 DP(BNX2X_MSG_ETHTOOL, 481 "10M full not supported\n"); 482 return -EINVAL; 483 } 484 485 advertising = (ADVERTISED_10baseT_Full | 486 ADVERTISED_TP); 487 } else { 488 if (!(bp->port.supported[cfg_idx] & 489 SUPPORTED_10baseT_Half)) { 490 DP(BNX2X_MSG_ETHTOOL, 491 "10M half not supported\n"); 492 return -EINVAL; 493 } 494 495 advertising = (ADVERTISED_10baseT_Half | 496 ADVERTISED_TP); 497 } 498 break; 499 500 case SPEED_100: 501 if (cmd->duplex == DUPLEX_FULL) { 502 if (!(bp->port.supported[cfg_idx] & 503 SUPPORTED_100baseT_Full)) { 504 DP(BNX2X_MSG_ETHTOOL, 505 "100M full not supported\n"); 506 return -EINVAL; 507 } 508 509 advertising = (ADVERTISED_100baseT_Full | 510 ADVERTISED_TP); 511 } else { 512 if (!(bp->port.supported[cfg_idx] & 513 SUPPORTED_100baseT_Half)) { 514 DP(BNX2X_MSG_ETHTOOL, 515 "100M half not supported\n"); 516 return -EINVAL; 517 } 518 519 advertising = (ADVERTISED_100baseT_Half | 520 ADVERTISED_TP); 521 } 522 break; 523 524 case SPEED_1000: 525 if (cmd->duplex != DUPLEX_FULL) { 526 DP(BNX2X_MSG_ETHTOOL, 527 "1G half not supported\n"); 528 return -EINVAL; 529 } 530 531 if (!(bp->port.supported[cfg_idx] & 532 SUPPORTED_1000baseT_Full)) { 533 DP(BNX2X_MSG_ETHTOOL, 534 "1G full not supported\n"); 535 return -EINVAL; 536 } 537 538 advertising = (ADVERTISED_1000baseT_Full | 539 ADVERTISED_TP); 540 break; 541 542 case SPEED_2500: 543 if (cmd->duplex != DUPLEX_FULL) { 544 DP(BNX2X_MSG_ETHTOOL, 545 "2.5G half not supported\n"); 546 return -EINVAL; 547 } 548 549 if (!(bp->port.supported[cfg_idx] 550 & SUPPORTED_2500baseX_Full)) { 551 DP(BNX2X_MSG_ETHTOOL, 552 "2.5G full not supported\n"); 553 return -EINVAL; 554 } 555 556 advertising = (ADVERTISED_2500baseX_Full | 557 ADVERTISED_TP); 558 break; 559 560 case SPEED_10000: 561 if (cmd->duplex != DUPLEX_FULL) { 562 DP(BNX2X_MSG_ETHTOOL, 563 "10G half not supported\n"); 564 return -EINVAL; 565 } 566 phy_idx = bnx2x_get_cur_phy_idx(bp); 567 if (!(bp->port.supported[cfg_idx] 568 & SUPPORTED_10000baseT_Full) || 569 (bp->link_params.phy[phy_idx].media_type == 570 ETH_PHY_SFP_1G_FIBER)) { 571 DP(BNX2X_MSG_ETHTOOL, 572 "10G full not supported\n"); 573 return -EINVAL; 574 } 575 576 advertising = (ADVERTISED_10000baseT_Full | 577 ADVERTISED_FIBRE); 578 break; 579 580 default: 581 DP(BNX2X_MSG_ETHTOOL, "Unsupported speed %u\n", speed); 582 return -EINVAL; 583 } 584 585 bp->link_params.req_line_speed[cfg_idx] = speed; 586 bp->link_params.req_duplex[cfg_idx] = cmd->duplex; 587 bp->port.advertising[cfg_idx] = advertising; 588 } 589 590 DP(BNX2X_MSG_ETHTOOL, "req_line_speed %d\n" 591 " req_duplex %d advertising 0x%x\n", 592 bp->link_params.req_line_speed[cfg_idx], 593 bp->link_params.req_duplex[cfg_idx], 594 bp->port.advertising[cfg_idx]); 595 596 /* Set new config */ 597 bp->link_params.multi_phy_config = new_multi_phy_config; 598 if (netif_running(dev)) { 599 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 600 bnx2x_link_set(bp); 601 } 602 603 return 0; 604 } 605 606 #define DUMP_ALL_PRESETS 0x1FFF 607 #define DUMP_MAX_PRESETS 13 608 609 static int __bnx2x_get_preset_regs_len(struct bnx2x *bp, u32 preset) 610 { 611 if (CHIP_IS_E1(bp)) 612 return dump_num_registers[0][preset-1]; 613 else if (CHIP_IS_E1H(bp)) 614 return dump_num_registers[1][preset-1]; 615 else if (CHIP_IS_E2(bp)) 616 return dump_num_registers[2][preset-1]; 617 else if (CHIP_IS_E3A0(bp)) 618 return dump_num_registers[3][preset-1]; 619 else if (CHIP_IS_E3B0(bp)) 620 return dump_num_registers[4][preset-1]; 621 else 622 return 0; 623 } 624 625 static int __bnx2x_get_regs_len(struct bnx2x *bp) 626 { 627 u32 preset_idx; 628 int regdump_len = 0; 629 630 /* Calculate the total preset regs length */ 631 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) 632 regdump_len += __bnx2x_get_preset_regs_len(bp, preset_idx); 633 634 return regdump_len; 635 } 636 637 static int bnx2x_get_regs_len(struct net_device *dev) 638 { 639 struct bnx2x *bp = netdev_priv(dev); 640 int regdump_len = 0; 641 642 regdump_len = __bnx2x_get_regs_len(bp); 643 regdump_len *= 4; 644 regdump_len += sizeof(struct dump_header); 645 646 return regdump_len; 647 } 648 649 #define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1) 650 #define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H) 651 #define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2) 652 #define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0) 653 #define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0) 654 655 #define IS_REG_IN_PRESET(presets, idx) \ 656 ((presets & (1 << (idx-1))) == (1 << (idx-1))) 657 658 /******* Paged registers info selectors ********/ 659 static const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp) 660 { 661 if (CHIP_IS_E2(bp)) 662 return page_vals_e2; 663 else if (CHIP_IS_E3(bp)) 664 return page_vals_e3; 665 else 666 return NULL; 667 } 668 669 static u32 __bnx2x_get_page_reg_num(struct bnx2x *bp) 670 { 671 if (CHIP_IS_E2(bp)) 672 return PAGE_MODE_VALUES_E2; 673 else if (CHIP_IS_E3(bp)) 674 return PAGE_MODE_VALUES_E3; 675 else 676 return 0; 677 } 678 679 static const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp) 680 { 681 if (CHIP_IS_E2(bp)) 682 return page_write_regs_e2; 683 else if (CHIP_IS_E3(bp)) 684 return page_write_regs_e3; 685 else 686 return NULL; 687 } 688 689 static u32 __bnx2x_get_page_write_num(struct bnx2x *bp) 690 { 691 if (CHIP_IS_E2(bp)) 692 return PAGE_WRITE_REGS_E2; 693 else if (CHIP_IS_E3(bp)) 694 return PAGE_WRITE_REGS_E3; 695 else 696 return 0; 697 } 698 699 static const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp) 700 { 701 if (CHIP_IS_E2(bp)) 702 return page_read_regs_e2; 703 else if (CHIP_IS_E3(bp)) 704 return page_read_regs_e3; 705 else 706 return NULL; 707 } 708 709 static u32 __bnx2x_get_page_read_num(struct bnx2x *bp) 710 { 711 if (CHIP_IS_E2(bp)) 712 return PAGE_READ_REGS_E2; 713 else if (CHIP_IS_E3(bp)) 714 return PAGE_READ_REGS_E3; 715 else 716 return 0; 717 } 718 719 static bool bnx2x_is_reg_in_chip(struct bnx2x *bp, 720 const struct reg_addr *reg_info) 721 { 722 if (CHIP_IS_E1(bp)) 723 return IS_E1_REG(reg_info->chips); 724 else if (CHIP_IS_E1H(bp)) 725 return IS_E1H_REG(reg_info->chips); 726 else if (CHIP_IS_E2(bp)) 727 return IS_E2_REG(reg_info->chips); 728 else if (CHIP_IS_E3A0(bp)) 729 return IS_E3A0_REG(reg_info->chips); 730 else if (CHIP_IS_E3B0(bp)) 731 return IS_E3B0_REG(reg_info->chips); 732 else 733 return false; 734 } 735 736 static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp, 737 const struct wreg_addr *wreg_info) 738 { 739 if (CHIP_IS_E1(bp)) 740 return IS_E1_REG(wreg_info->chips); 741 else if (CHIP_IS_E1H(bp)) 742 return IS_E1H_REG(wreg_info->chips); 743 else if (CHIP_IS_E2(bp)) 744 return IS_E2_REG(wreg_info->chips); 745 else if (CHIP_IS_E3A0(bp)) 746 return IS_E3A0_REG(wreg_info->chips); 747 else if (CHIP_IS_E3B0(bp)) 748 return IS_E3B0_REG(wreg_info->chips); 749 else 750 return false; 751 } 752 753 /** 754 * bnx2x_read_pages_regs - read "paged" registers 755 * 756 * @bp device handle 757 * @p output buffer 758 * 759 * Reads "paged" memories: memories that may only be read by first writing to a 760 * specific address ("write address") and then reading from a specific address 761 * ("read address"). There may be more than one write address per "page" and 762 * more than one read address per write address. 763 */ 764 static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p, u32 preset) 765 { 766 u32 i, j, k, n; 767 768 /* addresses of the paged registers */ 769 const u32 *page_addr = __bnx2x_get_page_addr_ar(bp); 770 /* number of paged registers */ 771 int num_pages = __bnx2x_get_page_reg_num(bp); 772 /* write addresses */ 773 const u32 *write_addr = __bnx2x_get_page_write_ar(bp); 774 /* number of write addresses */ 775 int write_num = __bnx2x_get_page_write_num(bp); 776 /* read addresses info */ 777 const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp); 778 /* number of read addresses */ 779 int read_num = __bnx2x_get_page_read_num(bp); 780 u32 addr, size; 781 782 for (i = 0; i < num_pages; i++) { 783 for (j = 0; j < write_num; j++) { 784 REG_WR(bp, write_addr[j], page_addr[i]); 785 786 for (k = 0; k < read_num; k++) { 787 if (IS_REG_IN_PRESET(read_addr[k].presets, 788 preset)) { 789 size = read_addr[k].size; 790 for (n = 0; n < size; n++) { 791 addr = read_addr[k].addr + n*4; 792 *p++ = REG_RD(bp, addr); 793 } 794 } 795 } 796 } 797 } 798 } 799 800 static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset) 801 { 802 u32 i, j, addr; 803 const struct wreg_addr *wreg_addr_p = NULL; 804 805 if (CHIP_IS_E1(bp)) 806 wreg_addr_p = &wreg_addr_e1; 807 else if (CHIP_IS_E1H(bp)) 808 wreg_addr_p = &wreg_addr_e1h; 809 else if (CHIP_IS_E2(bp)) 810 wreg_addr_p = &wreg_addr_e2; 811 else if (CHIP_IS_E3A0(bp)) 812 wreg_addr_p = &wreg_addr_e3; 813 else if (CHIP_IS_E3B0(bp)) 814 wreg_addr_p = &wreg_addr_e3b0; 815 816 /* Read the idle_chk registers */ 817 for (i = 0; i < IDLE_REGS_COUNT; i++) { 818 if (bnx2x_is_reg_in_chip(bp, &idle_reg_addrs[i]) && 819 IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) { 820 for (j = 0; j < idle_reg_addrs[i].size; j++) 821 *p++ = REG_RD(bp, idle_reg_addrs[i].addr + j*4); 822 } 823 } 824 825 /* Read the regular registers */ 826 for (i = 0; i < REGS_COUNT; i++) { 827 if (bnx2x_is_reg_in_chip(bp, ®_addrs[i]) && 828 IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) { 829 for (j = 0; j < reg_addrs[i].size; j++) 830 *p++ = REG_RD(bp, reg_addrs[i].addr + j*4); 831 } 832 } 833 834 /* Read the CAM registers */ 835 if (bnx2x_is_wreg_in_chip(bp, wreg_addr_p) && 836 IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) { 837 for (i = 0; i < wreg_addr_p->size; i++) { 838 *p++ = REG_RD(bp, wreg_addr_p->addr + i*4); 839 840 /* In case of wreg_addr register, read additional 841 registers from read_regs array 842 */ 843 for (j = 0; j < wreg_addr_p->read_regs_count; j++) { 844 addr = *(wreg_addr_p->read_regs); 845 *p++ = REG_RD(bp, addr + j*4); 846 } 847 } 848 } 849 850 /* Paged registers are supported in E2 & E3 only */ 851 if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) { 852 /* Read "paged" registers */ 853 bnx2x_read_pages_regs(bp, p, preset); 854 } 855 856 return 0; 857 } 858 859 static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p) 860 { 861 u32 preset_idx; 862 863 /* Read all registers, by reading all preset registers */ 864 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { 865 /* Skip presets with IOR */ 866 if ((preset_idx == 2) || 867 (preset_idx == 5) || 868 (preset_idx == 8) || 869 (preset_idx == 11)) 870 continue; 871 __bnx2x_get_preset_regs(bp, p, preset_idx); 872 p += __bnx2x_get_preset_regs_len(bp, preset_idx); 873 } 874 } 875 876 static void bnx2x_get_regs(struct net_device *dev, 877 struct ethtool_regs *regs, void *_p) 878 { 879 u32 *p = _p; 880 struct bnx2x *bp = netdev_priv(dev); 881 struct dump_header dump_hdr = {0}; 882 883 regs->version = 2; 884 memset(p, 0, regs->len); 885 886 if (!netif_running(bp->dev)) 887 return; 888 889 /* Disable parity attentions as long as following dump may 890 * cause false alarms by reading never written registers. We 891 * will re-enable parity attentions right after the dump. 892 */ 893 894 /* Disable parity on path 0 */ 895 bnx2x_pretend_func(bp, 0); 896 bnx2x_disable_blocks_parity(bp); 897 898 /* Disable parity on path 1 */ 899 bnx2x_pretend_func(bp, 1); 900 bnx2x_disable_blocks_parity(bp); 901 902 /* Return to current function */ 903 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 904 905 dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1; 906 dump_hdr.preset = DUMP_ALL_PRESETS; 907 dump_hdr.version = BNX2X_DUMP_VERSION; 908 909 /* dump_meta_data presents OR of CHIP and PATH. */ 910 if (CHIP_IS_E1(bp)) { 911 dump_hdr.dump_meta_data = DUMP_CHIP_E1; 912 } else if (CHIP_IS_E1H(bp)) { 913 dump_hdr.dump_meta_data = DUMP_CHIP_E1H; 914 } else if (CHIP_IS_E2(bp)) { 915 dump_hdr.dump_meta_data = DUMP_CHIP_E2 | 916 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); 917 } else if (CHIP_IS_E3A0(bp)) { 918 dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 | 919 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); 920 } else if (CHIP_IS_E3B0(bp)) { 921 dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 | 922 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); 923 } 924 925 memcpy(p, &dump_hdr, sizeof(struct dump_header)); 926 p += dump_hdr.header_size + 1; 927 928 /* Actually read the registers */ 929 __bnx2x_get_regs(bp, p); 930 931 /* Re-enable parity attentions on path 0 */ 932 bnx2x_pretend_func(bp, 0); 933 bnx2x_clear_blocks_parity(bp); 934 bnx2x_enable_blocks_parity(bp); 935 936 /* Re-enable parity attentions on path 1 */ 937 bnx2x_pretend_func(bp, 1); 938 bnx2x_clear_blocks_parity(bp); 939 bnx2x_enable_blocks_parity(bp); 940 941 /* Return to current function */ 942 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 943 } 944 945 static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset) 946 { 947 struct bnx2x *bp = netdev_priv(dev); 948 int regdump_len = 0; 949 950 regdump_len = __bnx2x_get_preset_regs_len(bp, preset); 951 regdump_len *= 4; 952 regdump_len += sizeof(struct dump_header); 953 954 return regdump_len; 955 } 956 957 static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val) 958 { 959 struct bnx2x *bp = netdev_priv(dev); 960 961 /* Use the ethtool_dump "flag" field as the dump preset index */ 962 if (val->flag < 1 || val->flag > DUMP_MAX_PRESETS) 963 return -EINVAL; 964 965 bp->dump_preset_idx = val->flag; 966 return 0; 967 } 968 969 static int bnx2x_get_dump_flag(struct net_device *dev, 970 struct ethtool_dump *dump) 971 { 972 struct bnx2x *bp = netdev_priv(dev); 973 974 dump->version = BNX2X_DUMP_VERSION; 975 dump->flag = bp->dump_preset_idx; 976 /* Calculate the requested preset idx length */ 977 dump->len = bnx2x_get_preset_regs_len(dev, bp->dump_preset_idx); 978 DP(BNX2X_MSG_ETHTOOL, "Get dump preset %d length=%d\n", 979 bp->dump_preset_idx, dump->len); 980 return 0; 981 } 982 983 static int bnx2x_get_dump_data(struct net_device *dev, 984 struct ethtool_dump *dump, 985 void *buffer) 986 { 987 u32 *p = buffer; 988 struct bnx2x *bp = netdev_priv(dev); 989 struct dump_header dump_hdr = {0}; 990 991 /* Disable parity attentions as long as following dump may 992 * cause false alarms by reading never written registers. We 993 * will re-enable parity attentions right after the dump. 994 */ 995 996 /* Disable parity on path 0 */ 997 bnx2x_pretend_func(bp, 0); 998 bnx2x_disable_blocks_parity(bp); 999 1000 /* Disable parity on path 1 */ 1001 bnx2x_pretend_func(bp, 1); 1002 bnx2x_disable_blocks_parity(bp); 1003 1004 /* Return to current function */ 1005 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1006 1007 dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1; 1008 dump_hdr.preset = bp->dump_preset_idx; 1009 dump_hdr.version = BNX2X_DUMP_VERSION; 1010 1011 DP(BNX2X_MSG_ETHTOOL, "Get dump data of preset %d\n", dump_hdr.preset); 1012 1013 /* dump_meta_data presents OR of CHIP and PATH. */ 1014 if (CHIP_IS_E1(bp)) { 1015 dump_hdr.dump_meta_data = DUMP_CHIP_E1; 1016 } else if (CHIP_IS_E1H(bp)) { 1017 dump_hdr.dump_meta_data = DUMP_CHIP_E1H; 1018 } else if (CHIP_IS_E2(bp)) { 1019 dump_hdr.dump_meta_data = DUMP_CHIP_E2 | 1020 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); 1021 } else if (CHIP_IS_E3A0(bp)) { 1022 dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 | 1023 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); 1024 } else if (CHIP_IS_E3B0(bp)) { 1025 dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 | 1026 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); 1027 } 1028 1029 memcpy(p, &dump_hdr, sizeof(struct dump_header)); 1030 p += dump_hdr.header_size + 1; 1031 1032 /* Actually read the registers */ 1033 __bnx2x_get_preset_regs(bp, p, dump_hdr.preset); 1034 1035 /* Re-enable parity attentions on path 0 */ 1036 bnx2x_pretend_func(bp, 0); 1037 bnx2x_clear_blocks_parity(bp); 1038 bnx2x_enable_blocks_parity(bp); 1039 1040 /* Re-enable parity attentions on path 1 */ 1041 bnx2x_pretend_func(bp, 1); 1042 bnx2x_clear_blocks_parity(bp); 1043 bnx2x_enable_blocks_parity(bp); 1044 1045 /* Return to current function */ 1046 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1047 1048 return 0; 1049 } 1050 1051 static void bnx2x_get_drvinfo(struct net_device *dev, 1052 struct ethtool_drvinfo *info) 1053 { 1054 struct bnx2x *bp = netdev_priv(dev); 1055 1056 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 1057 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); 1058 1059 bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version)); 1060 1061 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); 1062 info->n_stats = BNX2X_NUM_STATS; 1063 info->testinfo_len = BNX2X_NUM_TESTS(bp); 1064 info->eedump_len = bp->common.flash_size; 1065 info->regdump_len = bnx2x_get_regs_len(dev); 1066 } 1067 1068 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 1069 { 1070 struct bnx2x *bp = netdev_priv(dev); 1071 1072 if (bp->flags & NO_WOL_FLAG) { 1073 wol->supported = 0; 1074 wol->wolopts = 0; 1075 } else { 1076 wol->supported = WAKE_MAGIC; 1077 if (bp->wol) 1078 wol->wolopts = WAKE_MAGIC; 1079 else 1080 wol->wolopts = 0; 1081 } 1082 memset(&wol->sopass, 0, sizeof(wol->sopass)); 1083 } 1084 1085 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 1086 { 1087 struct bnx2x *bp = netdev_priv(dev); 1088 1089 if (wol->wolopts & ~WAKE_MAGIC) { 1090 DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n"); 1091 return -EINVAL; 1092 } 1093 1094 if (wol->wolopts & WAKE_MAGIC) { 1095 if (bp->flags & NO_WOL_FLAG) { 1096 DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n"); 1097 return -EINVAL; 1098 } 1099 bp->wol = 1; 1100 } else 1101 bp->wol = 0; 1102 1103 return 0; 1104 } 1105 1106 static u32 bnx2x_get_msglevel(struct net_device *dev) 1107 { 1108 struct bnx2x *bp = netdev_priv(dev); 1109 1110 return bp->msg_enable; 1111 } 1112 1113 static void bnx2x_set_msglevel(struct net_device *dev, u32 level) 1114 { 1115 struct bnx2x *bp = netdev_priv(dev); 1116 1117 if (capable(CAP_NET_ADMIN)) { 1118 /* dump MCP trace */ 1119 if (IS_PF(bp) && (level & BNX2X_MSG_MCP)) 1120 bnx2x_fw_dump_lvl(bp, KERN_INFO); 1121 bp->msg_enable = level; 1122 } 1123 } 1124 1125 static int bnx2x_nway_reset(struct net_device *dev) 1126 { 1127 struct bnx2x *bp = netdev_priv(dev); 1128 1129 if (!bp->port.pmf) 1130 return 0; 1131 1132 if (netif_running(dev)) { 1133 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 1134 bnx2x_force_link_reset(bp); 1135 bnx2x_link_set(bp); 1136 } 1137 1138 return 0; 1139 } 1140 1141 static u32 bnx2x_get_link(struct net_device *dev) 1142 { 1143 struct bnx2x *bp = netdev_priv(dev); 1144 1145 if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN)) 1146 return 0; 1147 1148 return bp->link_vars.link_up; 1149 } 1150 1151 static int bnx2x_get_eeprom_len(struct net_device *dev) 1152 { 1153 struct bnx2x *bp = netdev_priv(dev); 1154 1155 return bp->common.flash_size; 1156 } 1157 1158 /* Per pf misc lock must be acquired before the per port mcp lock. Otherwise, 1159 * had we done things the other way around, if two pfs from the same port would 1160 * attempt to access nvram at the same time, we could run into a scenario such 1161 * as: 1162 * pf A takes the port lock. 1163 * pf B succeeds in taking the same lock since they are from the same port. 1164 * pf A takes the per pf misc lock. Performs eeprom access. 1165 * pf A finishes. Unlocks the per pf misc lock. 1166 * Pf B takes the lock and proceeds to perform it's own access. 1167 * pf A unlocks the per port lock, while pf B is still working (!). 1168 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own 1169 * access corrupted by pf B) 1170 */ 1171 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) 1172 { 1173 int port = BP_PORT(bp); 1174 int count, i; 1175 u32 val; 1176 1177 /* acquire HW lock: protect against other PFs in PF Direct Assignment */ 1178 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM); 1179 1180 /* adjust timeout for emulation/FPGA */ 1181 count = BNX2X_NVRAM_TIMEOUT_COUNT; 1182 if (CHIP_REV_IS_SLOW(bp)) 1183 count *= 100; 1184 1185 /* request access to nvram interface */ 1186 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, 1187 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port)); 1188 1189 for (i = 0; i < count*10; i++) { 1190 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB); 1191 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) 1192 break; 1193 1194 udelay(5); 1195 } 1196 1197 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { 1198 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1199 "cannot get access to nvram interface\n"); 1200 return -EBUSY; 1201 } 1202 1203 return 0; 1204 } 1205 1206 static int bnx2x_release_nvram_lock(struct bnx2x *bp) 1207 { 1208 int port = BP_PORT(bp); 1209 int count, i; 1210 u32 val; 1211 1212 /* adjust timeout for emulation/FPGA */ 1213 count = BNX2X_NVRAM_TIMEOUT_COUNT; 1214 if (CHIP_REV_IS_SLOW(bp)) 1215 count *= 100; 1216 1217 /* relinquish nvram interface */ 1218 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, 1219 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port)); 1220 1221 for (i = 0; i < count*10; i++) { 1222 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB); 1223 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) 1224 break; 1225 1226 udelay(5); 1227 } 1228 1229 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { 1230 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1231 "cannot free access to nvram interface\n"); 1232 return -EBUSY; 1233 } 1234 1235 /* release HW lock: protect against other PFs in PF Direct Assignment */ 1236 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM); 1237 return 0; 1238 } 1239 1240 static void bnx2x_enable_nvram_access(struct bnx2x *bp) 1241 { 1242 u32 val; 1243 1244 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE); 1245 1246 /* enable both bits, even on read */ 1247 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE, 1248 (val | MCPR_NVM_ACCESS_ENABLE_EN | 1249 MCPR_NVM_ACCESS_ENABLE_WR_EN)); 1250 } 1251 1252 static void bnx2x_disable_nvram_access(struct bnx2x *bp) 1253 { 1254 u32 val; 1255 1256 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE); 1257 1258 /* disable both bits, even after read */ 1259 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE, 1260 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN | 1261 MCPR_NVM_ACCESS_ENABLE_WR_EN))); 1262 } 1263 1264 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val, 1265 u32 cmd_flags) 1266 { 1267 int count, i, rc; 1268 u32 val; 1269 1270 /* build the command word */ 1271 cmd_flags |= MCPR_NVM_COMMAND_DOIT; 1272 1273 /* need to clear DONE bit separately */ 1274 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); 1275 1276 /* address of the NVRAM to read from */ 1277 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR, 1278 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); 1279 1280 /* issue a read command */ 1281 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); 1282 1283 /* adjust timeout for emulation/FPGA */ 1284 count = BNX2X_NVRAM_TIMEOUT_COUNT; 1285 if (CHIP_REV_IS_SLOW(bp)) 1286 count *= 100; 1287 1288 /* wait for completion */ 1289 *ret_val = 0; 1290 rc = -EBUSY; 1291 for (i = 0; i < count; i++) { 1292 udelay(5); 1293 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND); 1294 1295 if (val & MCPR_NVM_COMMAND_DONE) { 1296 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ); 1297 /* we read nvram data in cpu order 1298 * but ethtool sees it as an array of bytes 1299 * converting to big-endian will do the work 1300 */ 1301 *ret_val = cpu_to_be32(val); 1302 rc = 0; 1303 break; 1304 } 1305 } 1306 if (rc == -EBUSY) 1307 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1308 "nvram read timeout expired\n"); 1309 return rc; 1310 } 1311 1312 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf, 1313 int buf_size) 1314 { 1315 int rc; 1316 u32 cmd_flags; 1317 __be32 val; 1318 1319 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { 1320 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1321 "Invalid parameter: offset 0x%x buf_size 0x%x\n", 1322 offset, buf_size); 1323 return -EINVAL; 1324 } 1325 1326 if (offset + buf_size > bp->common.flash_size) { 1327 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1328 "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n", 1329 offset, buf_size, bp->common.flash_size); 1330 return -EINVAL; 1331 } 1332 1333 /* request access to nvram interface */ 1334 rc = bnx2x_acquire_nvram_lock(bp); 1335 if (rc) 1336 return rc; 1337 1338 /* enable access to nvram interface */ 1339 bnx2x_enable_nvram_access(bp); 1340 1341 /* read the first word(s) */ 1342 cmd_flags = MCPR_NVM_COMMAND_FIRST; 1343 while ((buf_size > sizeof(u32)) && (rc == 0)) { 1344 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags); 1345 memcpy(ret_buf, &val, 4); 1346 1347 /* advance to the next dword */ 1348 offset += sizeof(u32); 1349 ret_buf += sizeof(u32); 1350 buf_size -= sizeof(u32); 1351 cmd_flags = 0; 1352 } 1353 1354 if (rc == 0) { 1355 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1356 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags); 1357 memcpy(ret_buf, &val, 4); 1358 } 1359 1360 /* disable access to nvram interface */ 1361 bnx2x_disable_nvram_access(bp); 1362 bnx2x_release_nvram_lock(bp); 1363 1364 return rc; 1365 } 1366 1367 static int bnx2x_nvram_read32(struct bnx2x *bp, u32 offset, u32 *buf, 1368 int buf_size) 1369 { 1370 int rc; 1371 1372 rc = bnx2x_nvram_read(bp, offset, (u8 *)buf, buf_size); 1373 1374 if (!rc) { 1375 __be32 *be = (__be32 *)buf; 1376 1377 while ((buf_size -= 4) >= 0) 1378 *buf++ = be32_to_cpu(*be++); 1379 } 1380 1381 return rc; 1382 } 1383 1384 static bool bnx2x_is_nvm_accessible(struct bnx2x *bp) 1385 { 1386 int rc = 1; 1387 u16 pm = 0; 1388 struct net_device *dev = pci_get_drvdata(bp->pdev); 1389 1390 if (bp->pdev->pm_cap) 1391 rc = pci_read_config_word(bp->pdev, 1392 bp->pdev->pm_cap + PCI_PM_CTRL, &pm); 1393 1394 if ((rc && !netif_running(dev)) || 1395 (!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0))) 1396 return false; 1397 1398 return true; 1399 } 1400 1401 static int bnx2x_get_eeprom(struct net_device *dev, 1402 struct ethtool_eeprom *eeprom, u8 *eebuf) 1403 { 1404 struct bnx2x *bp = netdev_priv(dev); 1405 1406 if (!bnx2x_is_nvm_accessible(bp)) { 1407 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1408 "cannot access eeprom when the interface is down\n"); 1409 return -EAGAIN; 1410 } 1411 1412 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" 1413 " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", 1414 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, 1415 eeprom->len, eeprom->len); 1416 1417 /* parameters already validated in ethtool_get_eeprom */ 1418 1419 return bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); 1420 } 1421 1422 static int bnx2x_get_module_eeprom(struct net_device *dev, 1423 struct ethtool_eeprom *ee, 1424 u8 *data) 1425 { 1426 struct bnx2x *bp = netdev_priv(dev); 1427 int rc = -EINVAL, phy_idx; 1428 u8 *user_data = data; 1429 unsigned int start_addr = ee->offset, xfer_size = 0; 1430 1431 if (!bnx2x_is_nvm_accessible(bp)) { 1432 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1433 "cannot access eeprom when the interface is down\n"); 1434 return -EAGAIN; 1435 } 1436 1437 phy_idx = bnx2x_get_cur_phy_idx(bp); 1438 1439 /* Read A0 section */ 1440 if (start_addr < ETH_MODULE_SFF_8079_LEN) { 1441 /* Limit transfer size to the A0 section boundary */ 1442 if (start_addr + ee->len > ETH_MODULE_SFF_8079_LEN) 1443 xfer_size = ETH_MODULE_SFF_8079_LEN - start_addr; 1444 else 1445 xfer_size = ee->len; 1446 bnx2x_acquire_phy_lock(bp); 1447 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], 1448 &bp->link_params, 1449 I2C_DEV_ADDR_A0, 1450 start_addr, 1451 xfer_size, 1452 user_data); 1453 bnx2x_release_phy_lock(bp); 1454 if (rc) { 1455 DP(BNX2X_MSG_ETHTOOL, "Failed reading A0 section\n"); 1456 1457 return -EINVAL; 1458 } 1459 user_data += xfer_size; 1460 start_addr += xfer_size; 1461 } 1462 1463 /* Read A2 section */ 1464 if ((start_addr >= ETH_MODULE_SFF_8079_LEN) && 1465 (start_addr < ETH_MODULE_SFF_8472_LEN)) { 1466 xfer_size = ee->len - xfer_size; 1467 /* Limit transfer size to the A2 section boundary */ 1468 if (start_addr + xfer_size > ETH_MODULE_SFF_8472_LEN) 1469 xfer_size = ETH_MODULE_SFF_8472_LEN - start_addr; 1470 start_addr -= ETH_MODULE_SFF_8079_LEN; 1471 bnx2x_acquire_phy_lock(bp); 1472 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], 1473 &bp->link_params, 1474 I2C_DEV_ADDR_A2, 1475 start_addr, 1476 xfer_size, 1477 user_data); 1478 bnx2x_release_phy_lock(bp); 1479 if (rc) { 1480 DP(BNX2X_MSG_ETHTOOL, "Failed reading A2 section\n"); 1481 return -EINVAL; 1482 } 1483 } 1484 return rc; 1485 } 1486 1487 static int bnx2x_get_module_info(struct net_device *dev, 1488 struct ethtool_modinfo *modinfo) 1489 { 1490 struct bnx2x *bp = netdev_priv(dev); 1491 int phy_idx, rc; 1492 u8 sff8472_comp, diag_type; 1493 1494 if (!bnx2x_is_nvm_accessible(bp)) { 1495 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1496 "cannot access eeprom when the interface is down\n"); 1497 return -EAGAIN; 1498 } 1499 phy_idx = bnx2x_get_cur_phy_idx(bp); 1500 bnx2x_acquire_phy_lock(bp); 1501 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], 1502 &bp->link_params, 1503 I2C_DEV_ADDR_A0, 1504 SFP_EEPROM_SFF_8472_COMP_ADDR, 1505 SFP_EEPROM_SFF_8472_COMP_SIZE, 1506 &sff8472_comp); 1507 bnx2x_release_phy_lock(bp); 1508 if (rc) { 1509 DP(BNX2X_MSG_ETHTOOL, "Failed reading SFF-8472 comp field\n"); 1510 return -EINVAL; 1511 } 1512 1513 bnx2x_acquire_phy_lock(bp); 1514 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], 1515 &bp->link_params, 1516 I2C_DEV_ADDR_A0, 1517 SFP_EEPROM_DIAG_TYPE_ADDR, 1518 SFP_EEPROM_DIAG_TYPE_SIZE, 1519 &diag_type); 1520 bnx2x_release_phy_lock(bp); 1521 if (rc) { 1522 DP(BNX2X_MSG_ETHTOOL, "Failed reading Diag Type field\n"); 1523 return -EINVAL; 1524 } 1525 1526 if (!sff8472_comp || 1527 (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ)) { 1528 modinfo->type = ETH_MODULE_SFF_8079; 1529 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; 1530 } else { 1531 modinfo->type = ETH_MODULE_SFF_8472; 1532 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 1533 } 1534 return 0; 1535 } 1536 1537 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, 1538 u32 cmd_flags) 1539 { 1540 int count, i, rc; 1541 1542 /* build the command word */ 1543 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR; 1544 1545 /* need to clear DONE bit separately */ 1546 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); 1547 1548 /* write the data */ 1549 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val); 1550 1551 /* address of the NVRAM to write to */ 1552 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR, 1553 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); 1554 1555 /* issue the write command */ 1556 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); 1557 1558 /* adjust timeout for emulation/FPGA */ 1559 count = BNX2X_NVRAM_TIMEOUT_COUNT; 1560 if (CHIP_REV_IS_SLOW(bp)) 1561 count *= 100; 1562 1563 /* wait for completion */ 1564 rc = -EBUSY; 1565 for (i = 0; i < count; i++) { 1566 udelay(5); 1567 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND); 1568 if (val & MCPR_NVM_COMMAND_DONE) { 1569 rc = 0; 1570 break; 1571 } 1572 } 1573 1574 if (rc == -EBUSY) 1575 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1576 "nvram write timeout expired\n"); 1577 return rc; 1578 } 1579 1580 #define BYTE_OFFSET(offset) (8 * (offset & 0x03)) 1581 1582 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, 1583 int buf_size) 1584 { 1585 int rc; 1586 u32 cmd_flags, align_offset, val; 1587 __be32 val_be; 1588 1589 if (offset + buf_size > bp->common.flash_size) { 1590 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1591 "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n", 1592 offset, buf_size, bp->common.flash_size); 1593 return -EINVAL; 1594 } 1595 1596 /* request access to nvram interface */ 1597 rc = bnx2x_acquire_nvram_lock(bp); 1598 if (rc) 1599 return rc; 1600 1601 /* enable access to nvram interface */ 1602 bnx2x_enable_nvram_access(bp); 1603 1604 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); 1605 align_offset = (offset & ~0x03); 1606 rc = bnx2x_nvram_read_dword(bp, align_offset, &val_be, cmd_flags); 1607 1608 if (rc == 0) { 1609 /* nvram data is returned as an array of bytes 1610 * convert it back to cpu order 1611 */ 1612 val = be32_to_cpu(val_be); 1613 1614 val &= ~le32_to_cpu((__force __le32) 1615 (0xff << BYTE_OFFSET(offset))); 1616 val |= le32_to_cpu((__force __le32) 1617 (*data_buf << BYTE_OFFSET(offset))); 1618 1619 rc = bnx2x_nvram_write_dword(bp, align_offset, val, 1620 cmd_flags); 1621 } 1622 1623 /* disable access to nvram interface */ 1624 bnx2x_disable_nvram_access(bp); 1625 bnx2x_release_nvram_lock(bp); 1626 1627 return rc; 1628 } 1629 1630 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, 1631 int buf_size) 1632 { 1633 int rc; 1634 u32 cmd_flags; 1635 u32 val; 1636 u32 written_so_far; 1637 1638 if (buf_size == 1) /* ethtool */ 1639 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size); 1640 1641 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { 1642 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1643 "Invalid parameter: offset 0x%x buf_size 0x%x\n", 1644 offset, buf_size); 1645 return -EINVAL; 1646 } 1647 1648 if (offset + buf_size > bp->common.flash_size) { 1649 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1650 "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n", 1651 offset, buf_size, bp->common.flash_size); 1652 return -EINVAL; 1653 } 1654 1655 /* request access to nvram interface */ 1656 rc = bnx2x_acquire_nvram_lock(bp); 1657 if (rc) 1658 return rc; 1659 1660 /* enable access to nvram interface */ 1661 bnx2x_enable_nvram_access(bp); 1662 1663 written_so_far = 0; 1664 cmd_flags = MCPR_NVM_COMMAND_FIRST; 1665 while ((written_so_far < buf_size) && (rc == 0)) { 1666 if (written_so_far == (buf_size - sizeof(u32))) 1667 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1668 else if (((offset + 4) % BNX2X_NVRAM_PAGE_SIZE) == 0) 1669 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1670 else if ((offset % BNX2X_NVRAM_PAGE_SIZE) == 0) 1671 cmd_flags |= MCPR_NVM_COMMAND_FIRST; 1672 1673 memcpy(&val, data_buf, 4); 1674 1675 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags); 1676 1677 /* advance to the next dword */ 1678 offset += sizeof(u32); 1679 data_buf += sizeof(u32); 1680 written_so_far += sizeof(u32); 1681 cmd_flags = 0; 1682 } 1683 1684 /* disable access to nvram interface */ 1685 bnx2x_disable_nvram_access(bp); 1686 bnx2x_release_nvram_lock(bp); 1687 1688 return rc; 1689 } 1690 1691 static int bnx2x_set_eeprom(struct net_device *dev, 1692 struct ethtool_eeprom *eeprom, u8 *eebuf) 1693 { 1694 struct bnx2x *bp = netdev_priv(dev); 1695 int port = BP_PORT(bp); 1696 int rc = 0; 1697 u32 ext_phy_config; 1698 1699 if (!bnx2x_is_nvm_accessible(bp)) { 1700 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1701 "cannot access eeprom when the interface is down\n"); 1702 return -EAGAIN; 1703 } 1704 1705 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" 1706 " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", 1707 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, 1708 eeprom->len, eeprom->len); 1709 1710 /* parameters already validated in ethtool_set_eeprom */ 1711 1712 /* PHY eeprom can be accessed only by the PMF */ 1713 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) && 1714 !bp->port.pmf) { 1715 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1716 "wrong magic or interface is not pmf\n"); 1717 return -EINVAL; 1718 } 1719 1720 ext_phy_config = 1721 SHMEM_RD(bp, 1722 dev_info.port_hw_config[port].external_phy_config); 1723 1724 if (eeprom->magic == 0x50485950) { 1725 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */ 1726 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 1727 1728 bnx2x_acquire_phy_lock(bp); 1729 rc |= bnx2x_link_reset(&bp->link_params, 1730 &bp->link_vars, 0); 1731 if (XGXS_EXT_PHY_TYPE(ext_phy_config) == 1732 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) 1733 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 1734 MISC_REGISTERS_GPIO_HIGH, port); 1735 bnx2x_release_phy_lock(bp); 1736 bnx2x_link_report(bp); 1737 1738 } else if (eeprom->magic == 0x50485952) { 1739 /* 'PHYR' (0x50485952): re-init link after FW upgrade */ 1740 if (bp->state == BNX2X_STATE_OPEN) { 1741 bnx2x_acquire_phy_lock(bp); 1742 rc |= bnx2x_link_reset(&bp->link_params, 1743 &bp->link_vars, 1); 1744 1745 rc |= bnx2x_phy_init(&bp->link_params, 1746 &bp->link_vars); 1747 bnx2x_release_phy_lock(bp); 1748 bnx2x_calc_fc_adv(bp); 1749 } 1750 } else if (eeprom->magic == 0x53985943) { 1751 /* 'PHYC' (0x53985943): PHY FW upgrade completed */ 1752 if (XGXS_EXT_PHY_TYPE(ext_phy_config) == 1753 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) { 1754 1755 /* DSP Remove Download Mode */ 1756 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 1757 MISC_REGISTERS_GPIO_LOW, port); 1758 1759 bnx2x_acquire_phy_lock(bp); 1760 1761 bnx2x_sfx7101_sp_sw_reset(bp, 1762 &bp->link_params.phy[EXT_PHY1]); 1763 1764 /* wait 0.5 sec to allow it to run */ 1765 msleep(500); 1766 bnx2x_ext_phy_hw_reset(bp, port); 1767 msleep(500); 1768 bnx2x_release_phy_lock(bp); 1769 } 1770 } else 1771 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); 1772 1773 return rc; 1774 } 1775 1776 static int bnx2x_get_coalesce(struct net_device *dev, 1777 struct ethtool_coalesce *coal) 1778 { 1779 struct bnx2x *bp = netdev_priv(dev); 1780 1781 memset(coal, 0, sizeof(struct ethtool_coalesce)); 1782 1783 coal->rx_coalesce_usecs = bp->rx_ticks; 1784 coal->tx_coalesce_usecs = bp->tx_ticks; 1785 1786 return 0; 1787 } 1788 1789 static int bnx2x_set_coalesce(struct net_device *dev, 1790 struct ethtool_coalesce *coal) 1791 { 1792 struct bnx2x *bp = netdev_priv(dev); 1793 1794 bp->rx_ticks = (u16)coal->rx_coalesce_usecs; 1795 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT) 1796 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT; 1797 1798 bp->tx_ticks = (u16)coal->tx_coalesce_usecs; 1799 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT) 1800 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT; 1801 1802 if (netif_running(dev)) 1803 bnx2x_update_coalesce(bp); 1804 1805 return 0; 1806 } 1807 1808 static void bnx2x_get_ringparam(struct net_device *dev, 1809 struct ethtool_ringparam *ering) 1810 { 1811 struct bnx2x *bp = netdev_priv(dev); 1812 1813 ering->rx_max_pending = MAX_RX_AVAIL; 1814 1815 if (bp->rx_ring_size) 1816 ering->rx_pending = bp->rx_ring_size; 1817 else 1818 ering->rx_pending = MAX_RX_AVAIL; 1819 1820 ering->tx_max_pending = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL; 1821 ering->tx_pending = bp->tx_ring_size; 1822 } 1823 1824 static int bnx2x_set_ringparam(struct net_device *dev, 1825 struct ethtool_ringparam *ering) 1826 { 1827 struct bnx2x *bp = netdev_priv(dev); 1828 1829 DP(BNX2X_MSG_ETHTOOL, 1830 "set ring params command parameters: rx_pending = %d, tx_pending = %d\n", 1831 ering->rx_pending, ering->tx_pending); 1832 1833 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 1834 DP(BNX2X_MSG_ETHTOOL, 1835 "Handling parity error recovery. Try again later\n"); 1836 return -EAGAIN; 1837 } 1838 1839 if ((ering->rx_pending > MAX_RX_AVAIL) || 1840 (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA : 1841 MIN_RX_SIZE_TPA)) || 1842 (ering->tx_pending > (IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL)) || 1843 (ering->tx_pending <= MAX_SKB_FRAGS + 4)) { 1844 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); 1845 return -EINVAL; 1846 } 1847 1848 bp->rx_ring_size = ering->rx_pending; 1849 bp->tx_ring_size = ering->tx_pending; 1850 1851 return bnx2x_reload_if_running(dev); 1852 } 1853 1854 static void bnx2x_get_pauseparam(struct net_device *dev, 1855 struct ethtool_pauseparam *epause) 1856 { 1857 struct bnx2x *bp = netdev_priv(dev); 1858 int cfg_idx = bnx2x_get_link_cfg_idx(bp); 1859 int cfg_reg; 1860 1861 epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] == 1862 BNX2X_FLOW_CTRL_AUTO); 1863 1864 if (!epause->autoneg) 1865 cfg_reg = bp->link_params.req_flow_ctrl[cfg_idx]; 1866 else 1867 cfg_reg = bp->link_params.req_fc_auto_adv; 1868 1869 epause->rx_pause = ((cfg_reg & BNX2X_FLOW_CTRL_RX) == 1870 BNX2X_FLOW_CTRL_RX); 1871 epause->tx_pause = ((cfg_reg & BNX2X_FLOW_CTRL_TX) == 1872 BNX2X_FLOW_CTRL_TX); 1873 1874 DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n" 1875 " autoneg %d rx_pause %d tx_pause %d\n", 1876 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); 1877 } 1878 1879 static int bnx2x_set_pauseparam(struct net_device *dev, 1880 struct ethtool_pauseparam *epause) 1881 { 1882 struct bnx2x *bp = netdev_priv(dev); 1883 u32 cfg_idx = bnx2x_get_link_cfg_idx(bp); 1884 if (IS_MF(bp)) 1885 return 0; 1886 1887 DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n" 1888 " autoneg %d rx_pause %d tx_pause %d\n", 1889 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); 1890 1891 bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO; 1892 1893 if (epause->rx_pause) 1894 bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_RX; 1895 1896 if (epause->tx_pause) 1897 bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_TX; 1898 1899 if (bp->link_params.req_flow_ctrl[cfg_idx] == BNX2X_FLOW_CTRL_AUTO) 1900 bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_NONE; 1901 1902 if (epause->autoneg) { 1903 if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) { 1904 DP(BNX2X_MSG_ETHTOOL, "autoneg not supported\n"); 1905 return -EINVAL; 1906 } 1907 1908 if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) { 1909 bp->link_params.req_flow_ctrl[cfg_idx] = 1910 BNX2X_FLOW_CTRL_AUTO; 1911 } 1912 bp->link_params.req_fc_auto_adv = 0; 1913 if (epause->rx_pause) 1914 bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_RX; 1915 1916 if (epause->tx_pause) 1917 bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_TX; 1918 1919 if (!bp->link_params.req_fc_auto_adv) 1920 bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_NONE; 1921 } 1922 1923 DP(BNX2X_MSG_ETHTOOL, 1924 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]); 1925 1926 if (netif_running(dev)) { 1927 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 1928 bnx2x_link_set(bp); 1929 } 1930 1931 return 0; 1932 } 1933 1934 static const char bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF][ETH_GSTRING_LEN] = { 1935 "register_test (offline) ", 1936 "memory_test (offline) ", 1937 "int_loopback_test (offline)", 1938 "ext_loopback_test (offline)", 1939 "nvram_test (online) ", 1940 "interrupt_test (online) ", 1941 "link_test (online) " 1942 }; 1943 1944 enum { 1945 BNX2X_PRI_FLAG_ISCSI, 1946 BNX2X_PRI_FLAG_FCOE, 1947 BNX2X_PRI_FLAG_STORAGE, 1948 BNX2X_PRI_FLAG_LEN, 1949 }; 1950 1951 static const char bnx2x_private_arr[BNX2X_PRI_FLAG_LEN][ETH_GSTRING_LEN] = { 1952 "iSCSI offload support", 1953 "FCoE offload support", 1954 "Storage only interface" 1955 }; 1956 1957 static u32 bnx2x_eee_to_adv(u32 eee_adv) 1958 { 1959 u32 modes = 0; 1960 1961 if (eee_adv & SHMEM_EEE_100M_ADV) 1962 modes |= ADVERTISED_100baseT_Full; 1963 if (eee_adv & SHMEM_EEE_1G_ADV) 1964 modes |= ADVERTISED_1000baseT_Full; 1965 if (eee_adv & SHMEM_EEE_10G_ADV) 1966 modes |= ADVERTISED_10000baseT_Full; 1967 1968 return modes; 1969 } 1970 1971 static u32 bnx2x_adv_to_eee(u32 modes, u32 shift) 1972 { 1973 u32 eee_adv = 0; 1974 if (modes & ADVERTISED_100baseT_Full) 1975 eee_adv |= SHMEM_EEE_100M_ADV; 1976 if (modes & ADVERTISED_1000baseT_Full) 1977 eee_adv |= SHMEM_EEE_1G_ADV; 1978 if (modes & ADVERTISED_10000baseT_Full) 1979 eee_adv |= SHMEM_EEE_10G_ADV; 1980 1981 return eee_adv << shift; 1982 } 1983 1984 static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata) 1985 { 1986 struct bnx2x *bp = netdev_priv(dev); 1987 u32 eee_cfg; 1988 1989 if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) { 1990 DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n"); 1991 return -EOPNOTSUPP; 1992 } 1993 1994 eee_cfg = bp->link_vars.eee_status; 1995 1996 edata->supported = 1997 bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >> 1998 SHMEM_EEE_SUPPORTED_SHIFT); 1999 2000 edata->advertised = 2001 bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >> 2002 SHMEM_EEE_ADV_STATUS_SHIFT); 2003 edata->lp_advertised = 2004 bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >> 2005 SHMEM_EEE_LP_ADV_STATUS_SHIFT); 2006 2007 /* SHMEM value is in 16u units --> Convert to 1u units. */ 2008 edata->tx_lpi_timer = (eee_cfg & SHMEM_EEE_TIMER_MASK) << 4; 2009 2010 edata->eee_enabled = (eee_cfg & SHMEM_EEE_REQUESTED_BIT) ? 1 : 0; 2011 edata->eee_active = (eee_cfg & SHMEM_EEE_ACTIVE_BIT) ? 1 : 0; 2012 edata->tx_lpi_enabled = (eee_cfg & SHMEM_EEE_LPI_REQUESTED_BIT) ? 1 : 0; 2013 2014 return 0; 2015 } 2016 2017 static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata) 2018 { 2019 struct bnx2x *bp = netdev_priv(dev); 2020 u32 eee_cfg; 2021 u32 advertised; 2022 2023 if (IS_MF(bp)) 2024 return 0; 2025 2026 if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) { 2027 DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n"); 2028 return -EOPNOTSUPP; 2029 } 2030 2031 eee_cfg = bp->link_vars.eee_status; 2032 2033 if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) { 2034 DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n"); 2035 return -EOPNOTSUPP; 2036 } 2037 2038 advertised = bnx2x_adv_to_eee(edata->advertised, 2039 SHMEM_EEE_ADV_STATUS_SHIFT); 2040 if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) { 2041 DP(BNX2X_MSG_ETHTOOL, 2042 "Direct manipulation of EEE advertisement is not supported\n"); 2043 return -EINVAL; 2044 } 2045 2046 if (edata->tx_lpi_timer > EEE_MODE_TIMER_MASK) { 2047 DP(BNX2X_MSG_ETHTOOL, 2048 "Maximal Tx Lpi timer supported is %x(u)\n", 2049 EEE_MODE_TIMER_MASK); 2050 return -EINVAL; 2051 } 2052 if (edata->tx_lpi_enabled && 2053 (edata->tx_lpi_timer < EEE_MODE_NVRAM_AGGRESSIVE_TIME)) { 2054 DP(BNX2X_MSG_ETHTOOL, 2055 "Minimal Tx Lpi timer supported is %d(u)\n", 2056 EEE_MODE_NVRAM_AGGRESSIVE_TIME); 2057 return -EINVAL; 2058 } 2059 2060 /* All is well; Apply changes*/ 2061 if (edata->eee_enabled) 2062 bp->link_params.eee_mode |= EEE_MODE_ADV_LPI; 2063 else 2064 bp->link_params.eee_mode &= ~EEE_MODE_ADV_LPI; 2065 2066 if (edata->tx_lpi_enabled) 2067 bp->link_params.eee_mode |= EEE_MODE_ENABLE_LPI; 2068 else 2069 bp->link_params.eee_mode &= ~EEE_MODE_ENABLE_LPI; 2070 2071 bp->link_params.eee_mode &= ~EEE_MODE_TIMER_MASK; 2072 bp->link_params.eee_mode |= (edata->tx_lpi_timer & 2073 EEE_MODE_TIMER_MASK) | 2074 EEE_MODE_OVERRIDE_NVRAM | 2075 EEE_MODE_OUTPUT_TIME; 2076 2077 /* Restart link to propagate changes */ 2078 if (netif_running(dev)) { 2079 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2080 bnx2x_force_link_reset(bp); 2081 bnx2x_link_set(bp); 2082 } 2083 2084 return 0; 2085 } 2086 2087 enum { 2088 BNX2X_CHIP_E1_OFST = 0, 2089 BNX2X_CHIP_E1H_OFST, 2090 BNX2X_CHIP_E2_OFST, 2091 BNX2X_CHIP_E3_OFST, 2092 BNX2X_CHIP_E3B0_OFST, 2093 BNX2X_CHIP_MAX_OFST 2094 }; 2095 2096 #define BNX2X_CHIP_MASK_E1 (1 << BNX2X_CHIP_E1_OFST) 2097 #define BNX2X_CHIP_MASK_E1H (1 << BNX2X_CHIP_E1H_OFST) 2098 #define BNX2X_CHIP_MASK_E2 (1 << BNX2X_CHIP_E2_OFST) 2099 #define BNX2X_CHIP_MASK_E3 (1 << BNX2X_CHIP_E3_OFST) 2100 #define BNX2X_CHIP_MASK_E3B0 (1 << BNX2X_CHIP_E3B0_OFST) 2101 2102 #define BNX2X_CHIP_MASK_ALL ((1 << BNX2X_CHIP_MAX_OFST) - 1) 2103 #define BNX2X_CHIP_MASK_E1X (BNX2X_CHIP_MASK_E1 | BNX2X_CHIP_MASK_E1H) 2104 2105 static int bnx2x_test_registers(struct bnx2x *bp) 2106 { 2107 int idx, i, rc = -ENODEV; 2108 u32 wr_val = 0, hw; 2109 int port = BP_PORT(bp); 2110 static const struct { 2111 u32 hw; 2112 u32 offset0; 2113 u32 offset1; 2114 u32 mask; 2115 } reg_tbl[] = { 2116 /* 0 */ { BNX2X_CHIP_MASK_ALL, 2117 BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff }, 2118 { BNX2X_CHIP_MASK_ALL, 2119 DORQ_REG_DB_ADDR0, 4, 0xffffffff }, 2120 { BNX2X_CHIP_MASK_E1X, 2121 HC_REG_AGG_INT_0, 4, 0x000003ff }, 2122 { BNX2X_CHIP_MASK_ALL, 2123 PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 }, 2124 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2 | BNX2X_CHIP_MASK_E3, 2125 PBF_REG_P0_INIT_CRD, 4, 0x000007ff }, 2126 { BNX2X_CHIP_MASK_E3B0, 2127 PBF_REG_INIT_CRD_Q0, 4, 0x000007ff }, 2128 { BNX2X_CHIP_MASK_ALL, 2129 PRS_REG_CID_PORT_0, 4, 0x00ffffff }, 2130 { BNX2X_CHIP_MASK_ALL, 2131 PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff }, 2132 { BNX2X_CHIP_MASK_ALL, 2133 PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, 2134 { BNX2X_CHIP_MASK_ALL, 2135 PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff }, 2136 /* 10 */ { BNX2X_CHIP_MASK_ALL, 2137 PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, 2138 { BNX2X_CHIP_MASK_ALL, 2139 PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff }, 2140 { BNX2X_CHIP_MASK_ALL, 2141 QM_REG_CONNNUM_0, 4, 0x000fffff }, 2142 { BNX2X_CHIP_MASK_ALL, 2143 TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff }, 2144 { BNX2X_CHIP_MASK_ALL, 2145 SRC_REG_KEYRSS0_0, 40, 0xffffffff }, 2146 { BNX2X_CHIP_MASK_ALL, 2147 SRC_REG_KEYRSS0_7, 40, 0xffffffff }, 2148 { BNX2X_CHIP_MASK_ALL, 2149 XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 }, 2150 { BNX2X_CHIP_MASK_ALL, 2151 XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 }, 2152 { BNX2X_CHIP_MASK_ALL, 2153 XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff }, 2154 { BNX2X_CHIP_MASK_ALL, 2155 NIG_REG_LLH0_T_BIT, 4, 0x00000001 }, 2156 /* 20 */ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, 2157 NIG_REG_EMAC0_IN_EN, 4, 0x00000001 }, 2158 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, 2159 NIG_REG_BMAC0_IN_EN, 4, 0x00000001 }, 2160 { BNX2X_CHIP_MASK_ALL, 2161 NIG_REG_XCM0_OUT_EN, 4, 0x00000001 }, 2162 { BNX2X_CHIP_MASK_ALL, 2163 NIG_REG_BRB0_OUT_EN, 4, 0x00000001 }, 2164 { BNX2X_CHIP_MASK_ALL, 2165 NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 }, 2166 { BNX2X_CHIP_MASK_ALL, 2167 NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff }, 2168 { BNX2X_CHIP_MASK_ALL, 2169 NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff }, 2170 { BNX2X_CHIP_MASK_ALL, 2171 NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff }, 2172 { BNX2X_CHIP_MASK_ALL, 2173 NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff }, 2174 { BNX2X_CHIP_MASK_ALL, 2175 NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 }, 2176 /* 30 */ { BNX2X_CHIP_MASK_ALL, 2177 NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff }, 2178 { BNX2X_CHIP_MASK_ALL, 2179 NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff }, 2180 { BNX2X_CHIP_MASK_ALL, 2181 NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff }, 2182 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, 2183 NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 }, 2184 { BNX2X_CHIP_MASK_ALL, 2185 NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001}, 2186 { BNX2X_CHIP_MASK_ALL, 2187 NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff }, 2188 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, 2189 NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 }, 2190 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, 2191 NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f }, 2192 2193 { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 } 2194 }; 2195 2196 if (!bnx2x_is_nvm_accessible(bp)) { 2197 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 2198 "cannot access eeprom when the interface is down\n"); 2199 return rc; 2200 } 2201 2202 if (CHIP_IS_E1(bp)) 2203 hw = BNX2X_CHIP_MASK_E1; 2204 else if (CHIP_IS_E1H(bp)) 2205 hw = BNX2X_CHIP_MASK_E1H; 2206 else if (CHIP_IS_E2(bp)) 2207 hw = BNX2X_CHIP_MASK_E2; 2208 else if (CHIP_IS_E3B0(bp)) 2209 hw = BNX2X_CHIP_MASK_E3B0; 2210 else /* e3 A0 */ 2211 hw = BNX2X_CHIP_MASK_E3; 2212 2213 /* Repeat the test twice: 2214 * First by writing 0x00000000, second by writing 0xffffffff 2215 */ 2216 for (idx = 0; idx < 2; idx++) { 2217 2218 switch (idx) { 2219 case 0: 2220 wr_val = 0; 2221 break; 2222 case 1: 2223 wr_val = 0xffffffff; 2224 break; 2225 } 2226 2227 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { 2228 u32 offset, mask, save_val, val; 2229 if (!(hw & reg_tbl[i].hw)) 2230 continue; 2231 2232 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; 2233 mask = reg_tbl[i].mask; 2234 2235 save_val = REG_RD(bp, offset); 2236 2237 REG_WR(bp, offset, wr_val & mask); 2238 2239 val = REG_RD(bp, offset); 2240 2241 /* Restore the original register's value */ 2242 REG_WR(bp, offset, save_val); 2243 2244 /* verify value is as expected */ 2245 if ((val & mask) != (wr_val & mask)) { 2246 DP(BNX2X_MSG_ETHTOOL, 2247 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n", 2248 offset, val, wr_val, mask); 2249 goto test_reg_exit; 2250 } 2251 } 2252 } 2253 2254 rc = 0; 2255 2256 test_reg_exit: 2257 return rc; 2258 } 2259 2260 static int bnx2x_test_memory(struct bnx2x *bp) 2261 { 2262 int i, j, rc = -ENODEV; 2263 u32 val, index; 2264 static const struct { 2265 u32 offset; 2266 int size; 2267 } mem_tbl[] = { 2268 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE }, 2269 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE }, 2270 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE }, 2271 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE }, 2272 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE }, 2273 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE }, 2274 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE }, 2275 2276 { 0xffffffff, 0 } 2277 }; 2278 2279 static const struct { 2280 char *name; 2281 u32 offset; 2282 u32 hw_mask[BNX2X_CHIP_MAX_OFST]; 2283 } prty_tbl[] = { 2284 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 2285 {0x3ffc0, 0, 0, 0} }, 2286 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 2287 {0x2, 0x2, 0, 0} }, 2288 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 2289 {0, 0, 0, 0} }, 2290 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 2291 {0x3ffc0, 0, 0, 0} }, 2292 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 2293 {0x3ffc0, 0, 0, 0} }, 2294 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 2295 {0x3ffc1, 0, 0, 0} }, 2296 2297 { NULL, 0xffffffff, {0, 0, 0, 0} } 2298 }; 2299 2300 if (!bnx2x_is_nvm_accessible(bp)) { 2301 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 2302 "cannot access eeprom when the interface is down\n"); 2303 return rc; 2304 } 2305 2306 if (CHIP_IS_E1(bp)) 2307 index = BNX2X_CHIP_E1_OFST; 2308 else if (CHIP_IS_E1H(bp)) 2309 index = BNX2X_CHIP_E1H_OFST; 2310 else if (CHIP_IS_E2(bp)) 2311 index = BNX2X_CHIP_E2_OFST; 2312 else /* e3 */ 2313 index = BNX2X_CHIP_E3_OFST; 2314 2315 /* pre-Check the parity status */ 2316 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { 2317 val = REG_RD(bp, prty_tbl[i].offset); 2318 if (val & ~(prty_tbl[i].hw_mask[index])) { 2319 DP(BNX2X_MSG_ETHTOOL, 2320 "%s is 0x%x\n", prty_tbl[i].name, val); 2321 goto test_mem_exit; 2322 } 2323 } 2324 2325 /* Go through all the memories */ 2326 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) 2327 for (j = 0; j < mem_tbl[i].size; j++) 2328 REG_RD(bp, mem_tbl[i].offset + j*4); 2329 2330 /* Check the parity status */ 2331 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { 2332 val = REG_RD(bp, prty_tbl[i].offset); 2333 if (val & ~(prty_tbl[i].hw_mask[index])) { 2334 DP(BNX2X_MSG_ETHTOOL, 2335 "%s is 0x%x\n", prty_tbl[i].name, val); 2336 goto test_mem_exit; 2337 } 2338 } 2339 2340 rc = 0; 2341 2342 test_mem_exit: 2343 return rc; 2344 } 2345 2346 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes) 2347 { 2348 int cnt = 1400; 2349 2350 if (link_up) { 2351 while (bnx2x_link_test(bp, is_serdes) && cnt--) 2352 msleep(20); 2353 2354 if (cnt <= 0 && bnx2x_link_test(bp, is_serdes)) 2355 DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n"); 2356 2357 cnt = 1400; 2358 while (!bp->link_vars.link_up && cnt--) 2359 msleep(20); 2360 2361 if (cnt <= 0 && !bp->link_vars.link_up) 2362 DP(BNX2X_MSG_ETHTOOL, 2363 "Timeout waiting for link init\n"); 2364 } 2365 } 2366 2367 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) 2368 { 2369 unsigned int pkt_size, num_pkts, i; 2370 struct sk_buff *skb; 2371 unsigned char *packet; 2372 struct bnx2x_fastpath *fp_rx = &bp->fp[0]; 2373 struct bnx2x_fastpath *fp_tx = &bp->fp[0]; 2374 struct bnx2x_fp_txdata *txdata = fp_tx->txdata_ptr[0]; 2375 u16 tx_start_idx, tx_idx; 2376 u16 rx_start_idx, rx_idx; 2377 u16 pkt_prod, bd_prod; 2378 struct sw_tx_bd *tx_buf; 2379 struct eth_tx_start_bd *tx_start_bd; 2380 dma_addr_t mapping; 2381 union eth_rx_cqe *cqe; 2382 u8 cqe_fp_flags, cqe_fp_type; 2383 struct sw_rx_bd *rx_buf; 2384 u16 len; 2385 int rc = -ENODEV; 2386 u8 *data; 2387 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, 2388 txdata->txq_index); 2389 2390 /* check the loopback mode */ 2391 switch (loopback_mode) { 2392 case BNX2X_PHY_LOOPBACK: 2393 if (bp->link_params.loopback_mode != LOOPBACK_XGXS) { 2394 DP(BNX2X_MSG_ETHTOOL, "PHY loopback not supported\n"); 2395 return -EINVAL; 2396 } 2397 break; 2398 case BNX2X_MAC_LOOPBACK: 2399 if (CHIP_IS_E3(bp)) { 2400 int cfg_idx = bnx2x_get_link_cfg_idx(bp); 2401 if (bp->port.supported[cfg_idx] & 2402 (SUPPORTED_10000baseT_Full | 2403 SUPPORTED_20000baseMLD2_Full | 2404 SUPPORTED_20000baseKR2_Full)) 2405 bp->link_params.loopback_mode = LOOPBACK_XMAC; 2406 else 2407 bp->link_params.loopback_mode = LOOPBACK_UMAC; 2408 } else 2409 bp->link_params.loopback_mode = LOOPBACK_BMAC; 2410 2411 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2412 break; 2413 case BNX2X_EXT_LOOPBACK: 2414 if (bp->link_params.loopback_mode != LOOPBACK_EXT) { 2415 DP(BNX2X_MSG_ETHTOOL, 2416 "Can't configure external loopback\n"); 2417 return -EINVAL; 2418 } 2419 break; 2420 default: 2421 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); 2422 return -EINVAL; 2423 } 2424 2425 /* prepare the loopback packet */ 2426 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ? 2427 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN); 2428 skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size); 2429 if (!skb) { 2430 DP(BNX2X_MSG_ETHTOOL, "Can't allocate skb\n"); 2431 rc = -ENOMEM; 2432 goto test_loopback_exit; 2433 } 2434 packet = skb_put(skb, pkt_size); 2435 memcpy(packet, bp->dev->dev_addr, ETH_ALEN); 2436 memset(packet + ETH_ALEN, 0, ETH_ALEN); 2437 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN)); 2438 for (i = ETH_HLEN; i < pkt_size; i++) 2439 packet[i] = (unsigned char) (i & 0xff); 2440 mapping = dma_map_single(&bp->pdev->dev, skb->data, 2441 skb_headlen(skb), DMA_TO_DEVICE); 2442 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 2443 rc = -ENOMEM; 2444 dev_kfree_skb(skb); 2445 DP(BNX2X_MSG_ETHTOOL, "Unable to map SKB\n"); 2446 goto test_loopback_exit; 2447 } 2448 2449 /* send the loopback packet */ 2450 num_pkts = 0; 2451 tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb); 2452 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb); 2453 2454 netdev_tx_sent_queue(txq, skb->len); 2455 2456 pkt_prod = txdata->tx_pkt_prod++; 2457 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)]; 2458 tx_buf->first_bd = txdata->tx_bd_prod; 2459 tx_buf->skb = skb; 2460 tx_buf->flags = 0; 2461 2462 bd_prod = TX_BD(txdata->tx_bd_prod); 2463 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd; 2464 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 2465 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 2466 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ 2467 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); 2468 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); 2469 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 2470 SET_FLAG(tx_start_bd->general_data, 2471 ETH_TX_START_BD_HDR_NBDS, 2472 1); 2473 SET_FLAG(tx_start_bd->general_data, 2474 ETH_TX_START_BD_PARSE_NBDS, 2475 0); 2476 2477 /* turn on parsing and get a BD */ 2478 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 2479 2480 if (CHIP_IS_E1x(bp)) { 2481 u16 global_data = 0; 2482 struct eth_tx_parse_bd_e1x *pbd_e1x = 2483 &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; 2484 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); 2485 SET_FLAG(global_data, 2486 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, UNICAST_ADDRESS); 2487 pbd_e1x->global_data = cpu_to_le16(global_data); 2488 } else { 2489 u32 parsing_data = 0; 2490 struct eth_tx_parse_bd_e2 *pbd_e2 = 2491 &txdata->tx_desc_ring[bd_prod].parse_bd_e2; 2492 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); 2493 SET_FLAG(parsing_data, 2494 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, UNICAST_ADDRESS); 2495 pbd_e2->parsing_data = cpu_to_le32(parsing_data); 2496 } 2497 wmb(); 2498 2499 txdata->tx_db.data.prod += 2; 2500 barrier(); 2501 DOORBELL(bp, txdata->cid, txdata->tx_db.raw); 2502 2503 mmiowb(); 2504 barrier(); 2505 2506 num_pkts++; 2507 txdata->tx_bd_prod += 2; /* start + pbd */ 2508 2509 udelay(100); 2510 2511 tx_idx = le16_to_cpu(*txdata->tx_cons_sb); 2512 if (tx_idx != tx_start_idx + num_pkts) 2513 goto test_loopback_exit; 2514 2515 /* Unlike HC IGU won't generate an interrupt for status block 2516 * updates that have been performed while interrupts were 2517 * disabled. 2518 */ 2519 if (bp->common.int_block == INT_BLOCK_IGU) { 2520 /* Disable local BHes to prevent a dead-lock situation between 2521 * sch_direct_xmit() and bnx2x_run_loopback() (calling 2522 * bnx2x_tx_int()), as both are taking netif_tx_lock(). 2523 */ 2524 local_bh_disable(); 2525 bnx2x_tx_int(bp, txdata); 2526 local_bh_enable(); 2527 } 2528 2529 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb); 2530 if (rx_idx != rx_start_idx + num_pkts) 2531 goto test_loopback_exit; 2532 2533 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)]; 2534 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; 2535 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 2536 if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS)) 2537 goto test_loopback_rx_exit; 2538 2539 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len_or_gro_seg_len); 2540 if (len != pkt_size) 2541 goto test_loopback_rx_exit; 2542 2543 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)]; 2544 dma_sync_single_for_cpu(&bp->pdev->dev, 2545 dma_unmap_addr(rx_buf, mapping), 2546 fp_rx->rx_buf_size, DMA_FROM_DEVICE); 2547 data = rx_buf->data + NET_SKB_PAD + cqe->fast_path_cqe.placement_offset; 2548 for (i = ETH_HLEN; i < pkt_size; i++) 2549 if (*(data + i) != (unsigned char) (i & 0xff)) 2550 goto test_loopback_rx_exit; 2551 2552 rc = 0; 2553 2554 test_loopback_rx_exit: 2555 2556 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons); 2557 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod); 2558 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons); 2559 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod); 2560 2561 /* Update producers */ 2562 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod, 2563 fp_rx->rx_sge_prod); 2564 2565 test_loopback_exit: 2566 bp->link_params.loopback_mode = LOOPBACK_NONE; 2567 2568 return rc; 2569 } 2570 2571 static int bnx2x_test_loopback(struct bnx2x *bp) 2572 { 2573 int rc = 0, res; 2574 2575 if (BP_NOMCP(bp)) 2576 return rc; 2577 2578 if (!netif_running(bp->dev)) 2579 return BNX2X_LOOPBACK_FAILED; 2580 2581 bnx2x_netif_stop(bp, 1); 2582 bnx2x_acquire_phy_lock(bp); 2583 2584 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK); 2585 if (res) { 2586 DP(BNX2X_MSG_ETHTOOL, " PHY loopback failed (res %d)\n", res); 2587 rc |= BNX2X_PHY_LOOPBACK_FAILED; 2588 } 2589 2590 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK); 2591 if (res) { 2592 DP(BNX2X_MSG_ETHTOOL, " MAC loopback failed (res %d)\n", res); 2593 rc |= BNX2X_MAC_LOOPBACK_FAILED; 2594 } 2595 2596 bnx2x_release_phy_lock(bp); 2597 bnx2x_netif_start(bp); 2598 2599 return rc; 2600 } 2601 2602 static int bnx2x_test_ext_loopback(struct bnx2x *bp) 2603 { 2604 int rc; 2605 u8 is_serdes = 2606 (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0; 2607 2608 if (BP_NOMCP(bp)) 2609 return -ENODEV; 2610 2611 if (!netif_running(bp->dev)) 2612 return BNX2X_EXT_LOOPBACK_FAILED; 2613 2614 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false); 2615 rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT); 2616 if (rc) { 2617 DP(BNX2X_MSG_ETHTOOL, 2618 "Can't perform self-test, nic_load (for external lb) failed\n"); 2619 return -ENODEV; 2620 } 2621 bnx2x_wait_for_link(bp, 1, is_serdes); 2622 2623 bnx2x_netif_stop(bp, 1); 2624 2625 rc = bnx2x_run_loopback(bp, BNX2X_EXT_LOOPBACK); 2626 if (rc) 2627 DP(BNX2X_MSG_ETHTOOL, "EXT loopback failed (res %d)\n", rc); 2628 2629 bnx2x_netif_start(bp); 2630 2631 return rc; 2632 } 2633 2634 struct code_entry { 2635 u32 sram_start_addr; 2636 u32 code_attribute; 2637 #define CODE_IMAGE_TYPE_MASK 0xf0800003 2638 #define CODE_IMAGE_VNTAG_PROFILES_DATA 0xd0000003 2639 #define CODE_IMAGE_LENGTH_MASK 0x007ffffc 2640 #define CODE_IMAGE_TYPE_EXTENDED_DIR 0xe0000000 2641 u32 nvm_start_addr; 2642 }; 2643 2644 #define CODE_ENTRY_MAX 16 2645 #define CODE_ENTRY_EXTENDED_DIR_IDX 15 2646 #define MAX_IMAGES_IN_EXTENDED_DIR 64 2647 #define NVRAM_DIR_OFFSET 0x14 2648 2649 #define EXTENDED_DIR_EXISTS(code) \ 2650 ((code & CODE_IMAGE_TYPE_MASK) == CODE_IMAGE_TYPE_EXTENDED_DIR && \ 2651 (code & CODE_IMAGE_LENGTH_MASK) != 0) 2652 2653 #define CRC32_RESIDUAL 0xdebb20e3 2654 #define CRC_BUFF_SIZE 256 2655 2656 static int bnx2x_nvram_crc(struct bnx2x *bp, 2657 int offset, 2658 int size, 2659 u8 *buff) 2660 { 2661 u32 crc = ~0; 2662 int rc = 0, done = 0; 2663 2664 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 2665 "NVRAM CRC from 0x%08x to 0x%08x\n", offset, offset + size); 2666 2667 while (done < size) { 2668 int count = min_t(int, size - done, CRC_BUFF_SIZE); 2669 2670 rc = bnx2x_nvram_read(bp, offset + done, buff, count); 2671 2672 if (rc) 2673 return rc; 2674 2675 crc = crc32_le(crc, buff, count); 2676 done += count; 2677 } 2678 2679 if (crc != CRC32_RESIDUAL) 2680 rc = -EINVAL; 2681 2682 return rc; 2683 } 2684 2685 static int bnx2x_test_nvram_dir(struct bnx2x *bp, 2686 struct code_entry *entry, 2687 u8 *buff) 2688 { 2689 size_t size = entry->code_attribute & CODE_IMAGE_LENGTH_MASK; 2690 u32 type = entry->code_attribute & CODE_IMAGE_TYPE_MASK; 2691 int rc; 2692 2693 /* Zero-length images and AFEX profiles do not have CRC */ 2694 if (size == 0 || type == CODE_IMAGE_VNTAG_PROFILES_DATA) 2695 return 0; 2696 2697 rc = bnx2x_nvram_crc(bp, entry->nvm_start_addr, size, buff); 2698 if (rc) 2699 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 2700 "image %x has failed crc test (rc %d)\n", type, rc); 2701 2702 return rc; 2703 } 2704 2705 static int bnx2x_test_dir_entry(struct bnx2x *bp, u32 addr, u8 *buff) 2706 { 2707 int rc; 2708 struct code_entry entry; 2709 2710 rc = bnx2x_nvram_read32(bp, addr, (u32 *)&entry, sizeof(entry)); 2711 if (rc) 2712 return rc; 2713 2714 return bnx2x_test_nvram_dir(bp, &entry, buff); 2715 } 2716 2717 static int bnx2x_test_nvram_ext_dirs(struct bnx2x *bp, u8 *buff) 2718 { 2719 u32 rc, cnt, dir_offset = NVRAM_DIR_OFFSET; 2720 struct code_entry entry; 2721 int i; 2722 2723 rc = bnx2x_nvram_read32(bp, 2724 dir_offset + 2725 sizeof(entry) * CODE_ENTRY_EXTENDED_DIR_IDX, 2726 (u32 *)&entry, sizeof(entry)); 2727 if (rc) 2728 return rc; 2729 2730 if (!EXTENDED_DIR_EXISTS(entry.code_attribute)) 2731 return 0; 2732 2733 rc = bnx2x_nvram_read32(bp, entry.nvm_start_addr, 2734 &cnt, sizeof(u32)); 2735 if (rc) 2736 return rc; 2737 2738 dir_offset = entry.nvm_start_addr + 8; 2739 2740 for (i = 0; i < cnt && i < MAX_IMAGES_IN_EXTENDED_DIR; i++) { 2741 rc = bnx2x_test_dir_entry(bp, dir_offset + 2742 sizeof(struct code_entry) * i, 2743 buff); 2744 if (rc) 2745 return rc; 2746 } 2747 2748 return 0; 2749 } 2750 2751 static int bnx2x_test_nvram_dirs(struct bnx2x *bp, u8 *buff) 2752 { 2753 u32 rc, dir_offset = NVRAM_DIR_OFFSET; 2754 int i; 2755 2756 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "NVRAM DIRS CRC test-set\n"); 2757 2758 for (i = 0; i < CODE_ENTRY_EXTENDED_DIR_IDX; i++) { 2759 rc = bnx2x_test_dir_entry(bp, dir_offset + 2760 sizeof(struct code_entry) * i, 2761 buff); 2762 if (rc) 2763 return rc; 2764 } 2765 2766 return bnx2x_test_nvram_ext_dirs(bp, buff); 2767 } 2768 2769 struct crc_pair { 2770 int offset; 2771 int size; 2772 }; 2773 2774 static int bnx2x_test_nvram_tbl(struct bnx2x *bp, 2775 const struct crc_pair *nvram_tbl, u8 *buf) 2776 { 2777 int i; 2778 2779 for (i = 0; nvram_tbl[i].size; i++) { 2780 int rc = bnx2x_nvram_crc(bp, nvram_tbl[i].offset, 2781 nvram_tbl[i].size, buf); 2782 if (rc) { 2783 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 2784 "nvram_tbl[%d] has failed crc test (rc %d)\n", 2785 i, rc); 2786 return rc; 2787 } 2788 } 2789 2790 return 0; 2791 } 2792 2793 static int bnx2x_test_nvram(struct bnx2x *bp) 2794 { 2795 const struct crc_pair nvram_tbl[] = { 2796 { 0, 0x14 }, /* bootstrap */ 2797 { 0x14, 0xec }, /* dir */ 2798 { 0x100, 0x350 }, /* manuf_info */ 2799 { 0x450, 0xf0 }, /* feature_info */ 2800 { 0x640, 0x64 }, /* upgrade_key_info */ 2801 { 0x708, 0x70 }, /* manuf_key_info */ 2802 { 0, 0 } 2803 }; 2804 const struct crc_pair nvram_tbl2[] = { 2805 { 0x7e8, 0x350 }, /* manuf_info2 */ 2806 { 0xb38, 0xf0 }, /* feature_info */ 2807 { 0, 0 } 2808 }; 2809 2810 u8 *buf; 2811 int rc; 2812 u32 magic; 2813 2814 if (BP_NOMCP(bp)) 2815 return 0; 2816 2817 buf = kmalloc(CRC_BUFF_SIZE, GFP_KERNEL); 2818 if (!buf) { 2819 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "kmalloc failed\n"); 2820 rc = -ENOMEM; 2821 goto test_nvram_exit; 2822 } 2823 2824 rc = bnx2x_nvram_read32(bp, 0, &magic, sizeof(magic)); 2825 if (rc) { 2826 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 2827 "magic value read (rc %d)\n", rc); 2828 goto test_nvram_exit; 2829 } 2830 2831 if (magic != 0x669955aa) { 2832 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 2833 "wrong magic value (0x%08x)\n", magic); 2834 rc = -ENODEV; 2835 goto test_nvram_exit; 2836 } 2837 2838 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "Port 0 CRC test-set\n"); 2839 rc = bnx2x_test_nvram_tbl(bp, nvram_tbl, buf); 2840 if (rc) 2841 goto test_nvram_exit; 2842 2843 if (!CHIP_IS_E1x(bp) && !CHIP_IS_57811xx(bp)) { 2844 u32 hide = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & 2845 SHARED_HW_CFG_HIDE_PORT1; 2846 2847 if (!hide) { 2848 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 2849 "Port 1 CRC test-set\n"); 2850 rc = bnx2x_test_nvram_tbl(bp, nvram_tbl2, buf); 2851 if (rc) 2852 goto test_nvram_exit; 2853 } 2854 } 2855 2856 rc = bnx2x_test_nvram_dirs(bp, buf); 2857 2858 test_nvram_exit: 2859 kfree(buf); 2860 return rc; 2861 } 2862 2863 /* Send an EMPTY ramrod on the first queue */ 2864 static int bnx2x_test_intr(struct bnx2x *bp) 2865 { 2866 struct bnx2x_queue_state_params params = {NULL}; 2867 2868 if (!netif_running(bp->dev)) { 2869 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 2870 "cannot access eeprom when the interface is down\n"); 2871 return -ENODEV; 2872 } 2873 2874 params.q_obj = &bp->sp_objs->q_obj; 2875 params.cmd = BNX2X_Q_CMD_EMPTY; 2876 2877 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); 2878 2879 return bnx2x_queue_state_change(bp, ¶ms); 2880 } 2881 2882 static void bnx2x_self_test(struct net_device *dev, 2883 struct ethtool_test *etest, u64 *buf) 2884 { 2885 struct bnx2x *bp = netdev_priv(dev); 2886 u8 is_serdes, link_up; 2887 int rc, cnt = 0; 2888 2889 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 2890 netdev_err(bp->dev, 2891 "Handling parity error recovery. Try again later\n"); 2892 etest->flags |= ETH_TEST_FL_FAILED; 2893 return; 2894 } 2895 2896 DP(BNX2X_MSG_ETHTOOL, 2897 "Self-test command parameters: offline = %d, external_lb = %d\n", 2898 (etest->flags & ETH_TEST_FL_OFFLINE), 2899 (etest->flags & ETH_TEST_FL_EXTERNAL_LB)>>2); 2900 2901 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp)); 2902 2903 if (!netif_running(dev)) { 2904 DP(BNX2X_MSG_ETHTOOL, 2905 "Can't perform self-test when interface is down\n"); 2906 return; 2907 } 2908 2909 is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0; 2910 link_up = bp->link_vars.link_up; 2911 /* offline tests are not supported in MF mode */ 2912 if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) { 2913 int port = BP_PORT(bp); 2914 u32 val; 2915 2916 /* save current value of input enable for TX port IF */ 2917 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4); 2918 /* disable input for TX port IF */ 2919 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0); 2920 2921 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false); 2922 rc = bnx2x_nic_load(bp, LOAD_DIAG); 2923 if (rc) { 2924 etest->flags |= ETH_TEST_FL_FAILED; 2925 DP(BNX2X_MSG_ETHTOOL, 2926 "Can't perform self-test, nic_load (for offline) failed\n"); 2927 return; 2928 } 2929 2930 /* wait until link state is restored */ 2931 bnx2x_wait_for_link(bp, 1, is_serdes); 2932 2933 if (bnx2x_test_registers(bp) != 0) { 2934 buf[0] = 1; 2935 etest->flags |= ETH_TEST_FL_FAILED; 2936 } 2937 if (bnx2x_test_memory(bp) != 0) { 2938 buf[1] = 1; 2939 etest->flags |= ETH_TEST_FL_FAILED; 2940 } 2941 2942 buf[2] = bnx2x_test_loopback(bp); /* internal LB */ 2943 if (buf[2] != 0) 2944 etest->flags |= ETH_TEST_FL_FAILED; 2945 2946 if (etest->flags & ETH_TEST_FL_EXTERNAL_LB) { 2947 buf[3] = bnx2x_test_ext_loopback(bp); /* external LB */ 2948 if (buf[3] != 0) 2949 etest->flags |= ETH_TEST_FL_FAILED; 2950 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 2951 } 2952 2953 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false); 2954 2955 /* restore input for TX port IF */ 2956 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val); 2957 rc = bnx2x_nic_load(bp, LOAD_NORMAL); 2958 if (rc) { 2959 etest->flags |= ETH_TEST_FL_FAILED; 2960 DP(BNX2X_MSG_ETHTOOL, 2961 "Can't perform self-test, nic_load (for online) failed\n"); 2962 return; 2963 } 2964 /* wait until link state is restored */ 2965 bnx2x_wait_for_link(bp, link_up, is_serdes); 2966 } 2967 if (bnx2x_test_nvram(bp) != 0) { 2968 if (!IS_MF(bp)) 2969 buf[4] = 1; 2970 else 2971 buf[0] = 1; 2972 etest->flags |= ETH_TEST_FL_FAILED; 2973 } 2974 if (bnx2x_test_intr(bp) != 0) { 2975 if (!IS_MF(bp)) 2976 buf[5] = 1; 2977 else 2978 buf[1] = 1; 2979 etest->flags |= ETH_TEST_FL_FAILED; 2980 } 2981 2982 if (link_up) { 2983 cnt = 100; 2984 while (bnx2x_link_test(bp, is_serdes) && --cnt) 2985 msleep(20); 2986 } 2987 2988 if (!cnt) { 2989 if (!IS_MF(bp)) 2990 buf[6] = 1; 2991 else 2992 buf[2] = 1; 2993 etest->flags |= ETH_TEST_FL_FAILED; 2994 } 2995 } 2996 2997 #define IS_PORT_STAT(i) \ 2998 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT) 2999 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC) 3000 #define IS_MF_MODE_STAT(bp) \ 3001 (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) 3002 3003 /* ethtool statistics are displayed for all regular ethernet queues and the 3004 * fcoe L2 queue if not disabled 3005 */ 3006 static int bnx2x_num_stat_queues(struct bnx2x *bp) 3007 { 3008 return BNX2X_NUM_ETH_QUEUES(bp); 3009 } 3010 3011 static int bnx2x_get_sset_count(struct net_device *dev, int stringset) 3012 { 3013 struct bnx2x *bp = netdev_priv(dev); 3014 int i, num_strings = 0; 3015 3016 switch (stringset) { 3017 case ETH_SS_STATS: 3018 if (is_multi(bp)) { 3019 num_strings = bnx2x_num_stat_queues(bp) * 3020 BNX2X_NUM_Q_STATS; 3021 } else 3022 num_strings = 0; 3023 if (IS_MF_MODE_STAT(bp)) { 3024 for (i = 0; i < BNX2X_NUM_STATS; i++) 3025 if (IS_FUNC_STAT(i)) 3026 num_strings++; 3027 } else 3028 num_strings += BNX2X_NUM_STATS; 3029 3030 return num_strings; 3031 3032 case ETH_SS_TEST: 3033 return BNX2X_NUM_TESTS(bp); 3034 3035 case ETH_SS_PRIV_FLAGS: 3036 return BNX2X_PRI_FLAG_LEN; 3037 3038 default: 3039 return -EINVAL; 3040 } 3041 } 3042 3043 static u32 bnx2x_get_private_flags(struct net_device *dev) 3044 { 3045 struct bnx2x *bp = netdev_priv(dev); 3046 u32 flags = 0; 3047 3048 flags |= (!(bp->flags & NO_ISCSI_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_ISCSI; 3049 flags |= (!(bp->flags & NO_FCOE_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_FCOE; 3050 flags |= (!!IS_MF_STORAGE_ONLY(bp)) << BNX2X_PRI_FLAG_STORAGE; 3051 3052 return flags; 3053 } 3054 3055 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 3056 { 3057 struct bnx2x *bp = netdev_priv(dev); 3058 int i, j, k, start; 3059 char queue_name[MAX_QUEUE_NAME_LEN+1]; 3060 3061 switch (stringset) { 3062 case ETH_SS_STATS: 3063 k = 0; 3064 if (is_multi(bp)) { 3065 for_each_eth_queue(bp, i) { 3066 memset(queue_name, 0, sizeof(queue_name)); 3067 sprintf(queue_name, "%d", i); 3068 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) 3069 snprintf(buf + (k + j)*ETH_GSTRING_LEN, 3070 ETH_GSTRING_LEN, 3071 bnx2x_q_stats_arr[j].string, 3072 queue_name); 3073 k += BNX2X_NUM_Q_STATS; 3074 } 3075 } 3076 3077 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 3078 if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) 3079 continue; 3080 strcpy(buf + (k + j)*ETH_GSTRING_LEN, 3081 bnx2x_stats_arr[i].string); 3082 j++; 3083 } 3084 3085 break; 3086 3087 case ETH_SS_TEST: 3088 /* First 4 tests cannot be done in MF mode */ 3089 if (!IS_MF(bp)) 3090 start = 0; 3091 else 3092 start = 4; 3093 memcpy(buf, bnx2x_tests_str_arr + start, 3094 ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp)); 3095 break; 3096 3097 case ETH_SS_PRIV_FLAGS: 3098 memcpy(buf, bnx2x_private_arr, 3099 ETH_GSTRING_LEN * BNX2X_PRI_FLAG_LEN); 3100 break; 3101 } 3102 } 3103 3104 static void bnx2x_get_ethtool_stats(struct net_device *dev, 3105 struct ethtool_stats *stats, u64 *buf) 3106 { 3107 struct bnx2x *bp = netdev_priv(dev); 3108 u32 *hw_stats, *offset; 3109 int i, j, k = 0; 3110 3111 if (is_multi(bp)) { 3112 for_each_eth_queue(bp, i) { 3113 hw_stats = (u32 *)&bp->fp_stats[i].eth_q_stats; 3114 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { 3115 if (bnx2x_q_stats_arr[j].size == 0) { 3116 /* skip this counter */ 3117 buf[k + j] = 0; 3118 continue; 3119 } 3120 offset = (hw_stats + 3121 bnx2x_q_stats_arr[j].offset); 3122 if (bnx2x_q_stats_arr[j].size == 4) { 3123 /* 4-byte counter */ 3124 buf[k + j] = (u64) *offset; 3125 continue; 3126 } 3127 /* 8-byte counter */ 3128 buf[k + j] = HILO_U64(*offset, *(offset + 1)); 3129 } 3130 k += BNX2X_NUM_Q_STATS; 3131 } 3132 } 3133 3134 hw_stats = (u32 *)&bp->eth_stats; 3135 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 3136 if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) 3137 continue; 3138 if (bnx2x_stats_arr[i].size == 0) { 3139 /* skip this counter */ 3140 buf[k + j] = 0; 3141 j++; 3142 continue; 3143 } 3144 offset = (hw_stats + bnx2x_stats_arr[i].offset); 3145 if (bnx2x_stats_arr[i].size == 4) { 3146 /* 4-byte counter */ 3147 buf[k + j] = (u64) *offset; 3148 j++; 3149 continue; 3150 } 3151 /* 8-byte counter */ 3152 buf[k + j] = HILO_U64(*offset, *(offset + 1)); 3153 j++; 3154 } 3155 } 3156 3157 static int bnx2x_set_phys_id(struct net_device *dev, 3158 enum ethtool_phys_id_state state) 3159 { 3160 struct bnx2x *bp = netdev_priv(dev); 3161 3162 if (!bnx2x_is_nvm_accessible(bp)) { 3163 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 3164 "cannot access eeprom when the interface is down\n"); 3165 return -EAGAIN; 3166 } 3167 3168 switch (state) { 3169 case ETHTOOL_ID_ACTIVE: 3170 return 1; /* cycle on/off once per second */ 3171 3172 case ETHTOOL_ID_ON: 3173 bnx2x_acquire_phy_lock(bp); 3174 bnx2x_set_led(&bp->link_params, &bp->link_vars, 3175 LED_MODE_ON, SPEED_1000); 3176 bnx2x_release_phy_lock(bp); 3177 break; 3178 3179 case ETHTOOL_ID_OFF: 3180 bnx2x_acquire_phy_lock(bp); 3181 bnx2x_set_led(&bp->link_params, &bp->link_vars, 3182 LED_MODE_FRONT_PANEL_OFF, 0); 3183 bnx2x_release_phy_lock(bp); 3184 break; 3185 3186 case ETHTOOL_ID_INACTIVE: 3187 bnx2x_acquire_phy_lock(bp); 3188 bnx2x_set_led(&bp->link_params, &bp->link_vars, 3189 LED_MODE_OPER, 3190 bp->link_vars.line_speed); 3191 bnx2x_release_phy_lock(bp); 3192 } 3193 3194 return 0; 3195 } 3196 3197 static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) 3198 { 3199 switch (info->flow_type) { 3200 case TCP_V4_FLOW: 3201 case TCP_V6_FLOW: 3202 info->data = RXH_IP_SRC | RXH_IP_DST | 3203 RXH_L4_B_0_1 | RXH_L4_B_2_3; 3204 break; 3205 case UDP_V4_FLOW: 3206 if (bp->rss_conf_obj.udp_rss_v4) 3207 info->data = RXH_IP_SRC | RXH_IP_DST | 3208 RXH_L4_B_0_1 | RXH_L4_B_2_3; 3209 else 3210 info->data = RXH_IP_SRC | RXH_IP_DST; 3211 break; 3212 case UDP_V6_FLOW: 3213 if (bp->rss_conf_obj.udp_rss_v6) 3214 info->data = RXH_IP_SRC | RXH_IP_DST | 3215 RXH_L4_B_0_1 | RXH_L4_B_2_3; 3216 else 3217 info->data = RXH_IP_SRC | RXH_IP_DST; 3218 break; 3219 case IPV4_FLOW: 3220 case IPV6_FLOW: 3221 info->data = RXH_IP_SRC | RXH_IP_DST; 3222 break; 3223 default: 3224 info->data = 0; 3225 break; 3226 } 3227 3228 return 0; 3229 } 3230 3231 static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 3232 u32 *rules __always_unused) 3233 { 3234 struct bnx2x *bp = netdev_priv(dev); 3235 3236 switch (info->cmd) { 3237 case ETHTOOL_GRXRINGS: 3238 info->data = BNX2X_NUM_ETH_QUEUES(bp); 3239 return 0; 3240 case ETHTOOL_GRXFH: 3241 return bnx2x_get_rss_flags(bp, info); 3242 default: 3243 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); 3244 return -EOPNOTSUPP; 3245 } 3246 } 3247 3248 static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) 3249 { 3250 int udp_rss_requested; 3251 3252 DP(BNX2X_MSG_ETHTOOL, 3253 "Set rss flags command parameters: flow type = %d, data = %llu\n", 3254 info->flow_type, info->data); 3255 3256 switch (info->flow_type) { 3257 case TCP_V4_FLOW: 3258 case TCP_V6_FLOW: 3259 /* For TCP only 4-tupple hash is supported */ 3260 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST | 3261 RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 3262 DP(BNX2X_MSG_ETHTOOL, 3263 "Command parameters not supported\n"); 3264 return -EINVAL; 3265 } 3266 return 0; 3267 3268 case UDP_V4_FLOW: 3269 case UDP_V6_FLOW: 3270 /* For UDP either 2-tupple hash or 4-tupple hash is supported */ 3271 if (info->data == (RXH_IP_SRC | RXH_IP_DST | 3272 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 3273 udp_rss_requested = 1; 3274 else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) 3275 udp_rss_requested = 0; 3276 else 3277 return -EINVAL; 3278 if ((info->flow_type == UDP_V4_FLOW) && 3279 (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) { 3280 bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested; 3281 DP(BNX2X_MSG_ETHTOOL, 3282 "rss re-configured, UDP 4-tupple %s\n", 3283 udp_rss_requested ? "enabled" : "disabled"); 3284 return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); 3285 } else if ((info->flow_type == UDP_V6_FLOW) && 3286 (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) { 3287 bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested; 3288 DP(BNX2X_MSG_ETHTOOL, 3289 "rss re-configured, UDP 4-tupple %s\n", 3290 udp_rss_requested ? "enabled" : "disabled"); 3291 return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); 3292 } 3293 return 0; 3294 3295 case IPV4_FLOW: 3296 case IPV6_FLOW: 3297 /* For IP only 2-tupple hash is supported */ 3298 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) { 3299 DP(BNX2X_MSG_ETHTOOL, 3300 "Command parameters not supported\n"); 3301 return -EINVAL; 3302 } 3303 return 0; 3304 3305 case SCTP_V4_FLOW: 3306 case AH_ESP_V4_FLOW: 3307 case AH_V4_FLOW: 3308 case ESP_V4_FLOW: 3309 case SCTP_V6_FLOW: 3310 case AH_ESP_V6_FLOW: 3311 case AH_V6_FLOW: 3312 case ESP_V6_FLOW: 3313 case IP_USER_FLOW: 3314 case ETHER_FLOW: 3315 /* RSS is not supported for these protocols */ 3316 if (info->data) { 3317 DP(BNX2X_MSG_ETHTOOL, 3318 "Command parameters not supported\n"); 3319 return -EINVAL; 3320 } 3321 return 0; 3322 3323 default: 3324 return -EINVAL; 3325 } 3326 } 3327 3328 static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) 3329 { 3330 struct bnx2x *bp = netdev_priv(dev); 3331 3332 switch (info->cmd) { 3333 case ETHTOOL_SRXFH: 3334 return bnx2x_set_rss_flags(bp, info); 3335 default: 3336 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); 3337 return -EOPNOTSUPP; 3338 } 3339 } 3340 3341 static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev) 3342 { 3343 return T_ETH_INDIRECTION_TABLE_SIZE; 3344 } 3345 3346 static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir) 3347 { 3348 struct bnx2x *bp = netdev_priv(dev); 3349 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; 3350 size_t i; 3351 3352 /* Get the current configuration of the RSS indirection table */ 3353 bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table); 3354 3355 /* 3356 * We can't use a memcpy() as an internal storage of an 3357 * indirection table is a u8 array while indir->ring_index 3358 * points to an array of u32. 3359 * 3360 * Indirection table contains the FW Client IDs, so we need to 3361 * align the returned table to the Client ID of the leading RSS 3362 * queue. 3363 */ 3364 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) 3365 indir[i] = ind_table[i] - bp->fp->cl_id; 3366 3367 return 0; 3368 } 3369 3370 static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir) 3371 { 3372 struct bnx2x *bp = netdev_priv(dev); 3373 size_t i; 3374 3375 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { 3376 /* 3377 * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy() 3378 * as an internal storage of an indirection table is a u8 array 3379 * while indir->ring_index points to an array of u32. 3380 * 3381 * Indirection table contains the FW Client IDs, so we need to 3382 * align the received table to the Client ID of the leading RSS 3383 * queue 3384 */ 3385 bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id; 3386 } 3387 3388 return bnx2x_config_rss_eth(bp, false); 3389 } 3390 3391 /** 3392 * bnx2x_get_channels - gets the number of RSS queues. 3393 * 3394 * @dev: net device 3395 * @channels: returns the number of max / current queues 3396 */ 3397 static void bnx2x_get_channels(struct net_device *dev, 3398 struct ethtool_channels *channels) 3399 { 3400 struct bnx2x *bp = netdev_priv(dev); 3401 3402 channels->max_combined = BNX2X_MAX_RSS_COUNT(bp); 3403 channels->combined_count = BNX2X_NUM_ETH_QUEUES(bp); 3404 } 3405 3406 /** 3407 * bnx2x_change_num_queues - change the number of RSS queues. 3408 * 3409 * @bp: bnx2x private structure 3410 * 3411 * Re-configure interrupt mode to get the new number of MSI-X 3412 * vectors and re-add NAPI objects. 3413 */ 3414 static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss) 3415 { 3416 bnx2x_disable_msi(bp); 3417 bp->num_ethernet_queues = num_rss; 3418 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; 3419 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); 3420 bnx2x_set_int_mode(bp); 3421 } 3422 3423 /** 3424 * bnx2x_set_channels - sets the number of RSS queues. 3425 * 3426 * @dev: net device 3427 * @channels: includes the number of queues requested 3428 */ 3429 static int bnx2x_set_channels(struct net_device *dev, 3430 struct ethtool_channels *channels) 3431 { 3432 struct bnx2x *bp = netdev_priv(dev); 3433 3434 DP(BNX2X_MSG_ETHTOOL, 3435 "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n", 3436 channels->rx_count, channels->tx_count, channels->other_count, 3437 channels->combined_count); 3438 3439 /* We don't support separate rx / tx channels. 3440 * We don't allow setting 'other' channels. 3441 */ 3442 if (channels->rx_count || channels->tx_count || channels->other_count 3443 || (channels->combined_count == 0) || 3444 (channels->combined_count > BNX2X_MAX_RSS_COUNT(bp))) { 3445 DP(BNX2X_MSG_ETHTOOL, "command parameters not supported\n"); 3446 return -EINVAL; 3447 } 3448 3449 /* Check if there was a change in the active parameters */ 3450 if (channels->combined_count == BNX2X_NUM_ETH_QUEUES(bp)) { 3451 DP(BNX2X_MSG_ETHTOOL, "No change in active parameters\n"); 3452 return 0; 3453 } 3454 3455 /* Set the requested number of queues in bp context. 3456 * Note that the actual number of queues created during load may be 3457 * less than requested if memory is low. 3458 */ 3459 if (unlikely(!netif_running(dev))) { 3460 bnx2x_change_num_queues(bp, channels->combined_count); 3461 return 0; 3462 } 3463 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); 3464 bnx2x_change_num_queues(bp, channels->combined_count); 3465 return bnx2x_nic_load(bp, LOAD_NORMAL); 3466 } 3467 3468 static const struct ethtool_ops bnx2x_ethtool_ops = { 3469 .get_settings = bnx2x_get_settings, 3470 .set_settings = bnx2x_set_settings, 3471 .get_drvinfo = bnx2x_get_drvinfo, 3472 .get_regs_len = bnx2x_get_regs_len, 3473 .get_regs = bnx2x_get_regs, 3474 .get_dump_flag = bnx2x_get_dump_flag, 3475 .get_dump_data = bnx2x_get_dump_data, 3476 .set_dump = bnx2x_set_dump, 3477 .get_wol = bnx2x_get_wol, 3478 .set_wol = bnx2x_set_wol, 3479 .get_msglevel = bnx2x_get_msglevel, 3480 .set_msglevel = bnx2x_set_msglevel, 3481 .nway_reset = bnx2x_nway_reset, 3482 .get_link = bnx2x_get_link, 3483 .get_eeprom_len = bnx2x_get_eeprom_len, 3484 .get_eeprom = bnx2x_get_eeprom, 3485 .set_eeprom = bnx2x_set_eeprom, 3486 .get_coalesce = bnx2x_get_coalesce, 3487 .set_coalesce = bnx2x_set_coalesce, 3488 .get_ringparam = bnx2x_get_ringparam, 3489 .set_ringparam = bnx2x_set_ringparam, 3490 .get_pauseparam = bnx2x_get_pauseparam, 3491 .set_pauseparam = bnx2x_set_pauseparam, 3492 .self_test = bnx2x_self_test, 3493 .get_sset_count = bnx2x_get_sset_count, 3494 .get_priv_flags = bnx2x_get_private_flags, 3495 .get_strings = bnx2x_get_strings, 3496 .set_phys_id = bnx2x_set_phys_id, 3497 .get_ethtool_stats = bnx2x_get_ethtool_stats, 3498 .get_rxnfc = bnx2x_get_rxnfc, 3499 .set_rxnfc = bnx2x_set_rxnfc, 3500 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, 3501 .get_rxfh_indir = bnx2x_get_rxfh_indir, 3502 .set_rxfh_indir = bnx2x_set_rxfh_indir, 3503 .get_channels = bnx2x_get_channels, 3504 .set_channels = bnx2x_set_channels, 3505 .get_module_info = bnx2x_get_module_info, 3506 .get_module_eeprom = bnx2x_get_module_eeprom, 3507 .get_eee = bnx2x_get_eee, 3508 .set_eee = bnx2x_set_eee, 3509 .get_ts_info = ethtool_op_get_ts_info, 3510 }; 3511 3512 static const struct ethtool_ops bnx2x_vf_ethtool_ops = { 3513 .get_settings = bnx2x_get_settings, 3514 .set_settings = bnx2x_set_settings, 3515 .get_drvinfo = bnx2x_get_drvinfo, 3516 .get_msglevel = bnx2x_get_msglevel, 3517 .set_msglevel = bnx2x_set_msglevel, 3518 .get_link = bnx2x_get_link, 3519 .get_coalesce = bnx2x_get_coalesce, 3520 .get_ringparam = bnx2x_get_ringparam, 3521 .set_ringparam = bnx2x_set_ringparam, 3522 .get_sset_count = bnx2x_get_sset_count, 3523 .get_strings = bnx2x_get_strings, 3524 .get_ethtool_stats = bnx2x_get_ethtool_stats, 3525 .get_rxnfc = bnx2x_get_rxnfc, 3526 .set_rxnfc = bnx2x_set_rxnfc, 3527 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, 3528 .get_rxfh_indir = bnx2x_get_rxfh_indir, 3529 .set_rxfh_indir = bnx2x_set_rxfh_indir, 3530 .get_channels = bnx2x_get_channels, 3531 .set_channels = bnx2x_set_channels, 3532 }; 3533 3534 void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev) 3535 { 3536 if (IS_PF(bp)) 3537 SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops); 3538 else /* vf */ 3539 SET_ETHTOOL_OPS(netdev, &bnx2x_vf_ethtool_ops); 3540 } 3541