1 /* Copyright 2008-2013 Broadcom Corporation 2 * 3 * Unless you and Broadcom execute a separate written software license 4 * agreement governing use of this software, this software is licensed to you 5 * under the terms of the GNU General Public License version 2, available 6 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 7 * 8 * Notwithstanding the above, under no circumstances may you combine this 9 * software in any way with any other Broadcom software provided under a 10 * license other than the GPL, without Broadcom's express prior written 11 * consent. 12 * 13 * Written by Yaniv Rosner 14 * 15 */ 16 17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18 19 #include <linux/kernel.h> 20 #include <linux/errno.h> 21 #include <linux/pci.h> 22 #include <linux/netdevice.h> 23 #include <linux/delay.h> 24 #include <linux/ethtool.h> 25 #include <linux/mutex.h> 26 27 #include "bnx2x.h" 28 #include "bnx2x_cmn.h" 29 30 typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy, 31 struct link_params *params, 32 u8 dev_addr, u16 addr, u8 byte_cnt, 33 u8 *o_buf, u8); 34 /********************************************************/ 35 #define ETH_HLEN 14 36 /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ 37 #define ETH_OVREHEAD (ETH_HLEN + 8 + 8) 38 #define ETH_MIN_PACKET_SIZE 60 39 #define ETH_MAX_PACKET_SIZE 1500 40 #define ETH_MAX_JUMBO_PACKET_SIZE 9600 41 #define MDIO_ACCESS_TIMEOUT 1000 42 #define WC_LANE_MAX 4 43 #define I2C_SWITCH_WIDTH 2 44 #define I2C_BSC0 0 45 #define I2C_BSC1 1 46 #define I2C_WA_RETRY_CNT 3 47 #define I2C_WA_PWR_ITER (I2C_WA_RETRY_CNT - 1) 48 #define MCPR_IMC_COMMAND_READ_OP 1 49 #define MCPR_IMC_COMMAND_WRITE_OP 2 50 51 /* LED Blink rate that will achieve ~15.9Hz */ 52 #define LED_BLINK_RATE_VAL_E3 354 53 #define LED_BLINK_RATE_VAL_E1X_E2 480 54 /***********************************************************/ 55 /* Shortcut definitions */ 56 /***********************************************************/ 57 58 #define NIG_LATCH_BC_ENABLE_MI_INT 0 59 60 #define NIG_STATUS_EMAC0_MI_INT \ 61 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT 62 #define NIG_STATUS_XGXS0_LINK10G \ 63 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G 64 #define NIG_STATUS_XGXS0_LINK_STATUS \ 65 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS 66 #define NIG_STATUS_XGXS0_LINK_STATUS_SIZE \ 67 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 68 #define NIG_STATUS_SERDES0_LINK_STATUS \ 69 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS 70 #define NIG_MASK_MI_INT \ 71 NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT 72 #define NIG_MASK_XGXS0_LINK10G \ 73 NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G 74 #define NIG_MASK_XGXS0_LINK_STATUS \ 75 NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS 76 #define NIG_MASK_SERDES0_LINK_STATUS \ 77 NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS 78 79 #define MDIO_AN_CL73_OR_37_COMPLETE \ 80 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | \ 81 MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE) 82 83 #define XGXS_RESET_BITS \ 84 (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW | \ 85 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ | \ 86 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN | \ 87 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD | \ 88 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB) 89 90 #define SERDES_RESET_BITS \ 91 (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW | \ 92 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ | \ 93 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN | \ 94 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD) 95 96 #define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37 97 #define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73 98 #define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM 99 #define AUTONEG_PARALLEL \ 100 SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 101 #define AUTONEG_SGMII_FIBER_AUTODET \ 102 SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT 103 #define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY 104 105 #define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \ 106 MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE 107 #define GP_STATUS_PAUSE_RSOLUTION_RXSIDE \ 108 MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE 109 #define GP_STATUS_SPEED_MASK \ 110 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK 111 #define GP_STATUS_10M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M 112 #define GP_STATUS_100M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M 113 #define GP_STATUS_1G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G 114 #define GP_STATUS_2_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G 115 #define GP_STATUS_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G 116 #define GP_STATUS_6G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G 117 #define GP_STATUS_10G_HIG \ 118 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG 119 #define GP_STATUS_10G_CX4 \ 120 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 121 #define GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX 122 #define GP_STATUS_10G_KX4 \ 123 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 124 #define GP_STATUS_10G_KR MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR 125 #define GP_STATUS_10G_XFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI 126 #define GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS 127 #define GP_STATUS_10G_SFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI 128 #define GP_STATUS_20G_KR2 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2 129 #define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD 130 #define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD 131 #define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD 132 #define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4 133 #define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD 134 #define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD 135 #define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD 136 #define LINK_1000XFD LINK_STATUS_SPEED_AND_DUPLEX_1000XFD 137 #define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD 138 #define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD 139 #define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD 140 #define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD 141 #define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD 142 #define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD 143 #define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD 144 145 #define LINK_UPDATE_MASK \ 146 (LINK_STATUS_SPEED_AND_DUPLEX_MASK | \ 147 LINK_STATUS_LINK_UP | \ 148 LINK_STATUS_PHYSICAL_LINK_FLAG | \ 149 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | \ 150 LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | \ 151 LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | \ 152 LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK | \ 153 LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE | \ 154 LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE) 155 156 #define SFP_EEPROM_CON_TYPE_ADDR 0x2 157 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 158 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 159 #define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22 160 161 162 #define SFP_EEPROM_COMP_CODE_ADDR 0x3 163 #define SFP_EEPROM_COMP_CODE_SR_MASK (1<<4) 164 #define SFP_EEPROM_COMP_CODE_LR_MASK (1<<5) 165 #define SFP_EEPROM_COMP_CODE_LRM_MASK (1<<6) 166 167 #define SFP_EEPROM_FC_TX_TECH_ADDR 0x8 168 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4 169 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8 170 171 #define SFP_EEPROM_OPTIONS_ADDR 0x40 172 #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1 173 #define SFP_EEPROM_OPTIONS_SIZE 2 174 175 #define EDC_MODE_LINEAR 0x0022 176 #define EDC_MODE_LIMITING 0x0044 177 #define EDC_MODE_PASSIVE_DAC 0x0055 178 #define EDC_MODE_ACTIVE_DAC 0x0066 179 180 /* ETS defines*/ 181 #define DCBX_INVALID_COS (0xFF) 182 183 #define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000) 184 #define ETS_BW_LIMIT_CREDIT_WEIGHT (0x5000) 185 #define ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS (1360) 186 #define ETS_E3B0_NIG_MIN_W_VAL_20GBPS (2720) 187 #define ETS_E3B0_PBF_MIN_W_VAL (10000) 188 189 #define MAX_PACKET_SIZE (9700) 190 #define MAX_KR_LINK_RETRY 4 191 192 /**********************************************************/ 193 /* INTERFACE */ 194 /**********************************************************/ 195 196 #define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \ 197 bnx2x_cl45_write(_bp, _phy, \ 198 (_phy)->def_md_devad, \ 199 (_bank + (_addr & 0xf)), \ 200 _val) 201 202 #define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \ 203 bnx2x_cl45_read(_bp, _phy, \ 204 (_phy)->def_md_devad, \ 205 (_bank + (_addr & 0xf)), \ 206 _val) 207 208 static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits) 209 { 210 u32 val = REG_RD(bp, reg); 211 212 val |= bits; 213 REG_WR(bp, reg, val); 214 return val; 215 } 216 217 static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits) 218 { 219 u32 val = REG_RD(bp, reg); 220 221 val &= ~bits; 222 REG_WR(bp, reg, val); 223 return val; 224 } 225 226 /* 227 * bnx2x_check_lfa - This function checks if link reinitialization is required, 228 * or link flap can be avoided. 229 * 230 * @params: link parameters 231 * Returns 0 if Link Flap Avoidance conditions are met otherwise, the failed 232 * condition code. 233 */ 234 static int bnx2x_check_lfa(struct link_params *params) 235 { 236 u32 link_status, cfg_idx, lfa_mask, cfg_size; 237 u32 cur_speed_cap_mask, cur_req_fc_auto_adv, additional_config; 238 u32 saved_val, req_val, eee_status; 239 struct bnx2x *bp = params->bp; 240 241 additional_config = 242 REG_RD(bp, params->lfa_base + 243 offsetof(struct shmem_lfa, additional_config)); 244 245 /* NOTE: must be first condition checked - 246 * to verify DCC bit is cleared in any case! 247 */ 248 if (additional_config & NO_LFA_DUE_TO_DCC_MASK) { 249 DP(NETIF_MSG_LINK, "No LFA due to DCC flap after clp exit\n"); 250 REG_WR(bp, params->lfa_base + 251 offsetof(struct shmem_lfa, additional_config), 252 additional_config & ~NO_LFA_DUE_TO_DCC_MASK); 253 return LFA_DCC_LFA_DISABLED; 254 } 255 256 /* Verify that link is up */ 257 link_status = REG_RD(bp, params->shmem_base + 258 offsetof(struct shmem_region, 259 port_mb[params->port].link_status)); 260 if (!(link_status & LINK_STATUS_LINK_UP)) 261 return LFA_LINK_DOWN; 262 263 /* if loaded after BOOT from SAN, don't flap the link in any case and 264 * rely on link set by preboot driver 265 */ 266 if (params->feature_config_flags & FEATURE_CONFIG_BOOT_FROM_SAN) 267 return 0; 268 269 /* Verify that loopback mode is not set */ 270 if (params->loopback_mode) 271 return LFA_LOOPBACK_ENABLED; 272 273 /* Verify that MFW supports LFA */ 274 if (!params->lfa_base) 275 return LFA_MFW_IS_TOO_OLD; 276 277 if (params->num_phys == 3) { 278 cfg_size = 2; 279 lfa_mask = 0xffffffff; 280 } else { 281 cfg_size = 1; 282 lfa_mask = 0xffff; 283 } 284 285 /* Compare Duplex */ 286 saved_val = REG_RD(bp, params->lfa_base + 287 offsetof(struct shmem_lfa, req_duplex)); 288 req_val = params->req_duplex[0] | (params->req_duplex[1] << 16); 289 if ((saved_val & lfa_mask) != (req_val & lfa_mask)) { 290 DP(NETIF_MSG_LINK, "Duplex mismatch %x vs. %x\n", 291 (saved_val & lfa_mask), (req_val & lfa_mask)); 292 return LFA_DUPLEX_MISMATCH; 293 } 294 /* Compare Flow Control */ 295 saved_val = REG_RD(bp, params->lfa_base + 296 offsetof(struct shmem_lfa, req_flow_ctrl)); 297 req_val = params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16); 298 if ((saved_val & lfa_mask) != (req_val & lfa_mask)) { 299 DP(NETIF_MSG_LINK, "Flow control mismatch %x vs. %x\n", 300 (saved_val & lfa_mask), (req_val & lfa_mask)); 301 return LFA_FLOW_CTRL_MISMATCH; 302 } 303 /* Compare Link Speed */ 304 saved_val = REG_RD(bp, params->lfa_base + 305 offsetof(struct shmem_lfa, req_line_speed)); 306 req_val = params->req_line_speed[0] | (params->req_line_speed[1] << 16); 307 if ((saved_val & lfa_mask) != (req_val & lfa_mask)) { 308 DP(NETIF_MSG_LINK, "Link speed mismatch %x vs. %x\n", 309 (saved_val & lfa_mask), (req_val & lfa_mask)); 310 return LFA_LINK_SPEED_MISMATCH; 311 } 312 313 for (cfg_idx = 0; cfg_idx < cfg_size; cfg_idx++) { 314 cur_speed_cap_mask = REG_RD(bp, params->lfa_base + 315 offsetof(struct shmem_lfa, 316 speed_cap_mask[cfg_idx])); 317 318 if (cur_speed_cap_mask != params->speed_cap_mask[cfg_idx]) { 319 DP(NETIF_MSG_LINK, "Speed Cap mismatch %x vs. %x\n", 320 cur_speed_cap_mask, 321 params->speed_cap_mask[cfg_idx]); 322 return LFA_SPEED_CAP_MISMATCH; 323 } 324 } 325 326 cur_req_fc_auto_adv = 327 REG_RD(bp, params->lfa_base + 328 offsetof(struct shmem_lfa, additional_config)) & 329 REQ_FC_AUTO_ADV_MASK; 330 331 if ((u16)cur_req_fc_auto_adv != params->req_fc_auto_adv) { 332 DP(NETIF_MSG_LINK, "Flow Ctrl AN mismatch %x vs. %x\n", 333 cur_req_fc_auto_adv, params->req_fc_auto_adv); 334 return LFA_FLOW_CTRL_MISMATCH; 335 } 336 337 eee_status = REG_RD(bp, params->shmem2_base + 338 offsetof(struct shmem2_region, 339 eee_status[params->port])); 340 341 if (((eee_status & SHMEM_EEE_LPI_REQUESTED_BIT) ^ 342 (params->eee_mode & EEE_MODE_ENABLE_LPI)) || 343 ((eee_status & SHMEM_EEE_REQUESTED_BIT) ^ 344 (params->eee_mode & EEE_MODE_ADV_LPI))) { 345 DP(NETIF_MSG_LINK, "EEE mismatch %x vs. %x\n", params->eee_mode, 346 eee_status); 347 return LFA_EEE_MISMATCH; 348 } 349 350 /* LFA conditions are met */ 351 return 0; 352 } 353 /******************************************************************/ 354 /* EPIO/GPIO section */ 355 /******************************************************************/ 356 static void bnx2x_get_epio(struct bnx2x *bp, u32 epio_pin, u32 *en) 357 { 358 u32 epio_mask, gp_oenable; 359 *en = 0; 360 /* Sanity check */ 361 if (epio_pin > 31) { 362 DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to get\n", epio_pin); 363 return; 364 } 365 366 epio_mask = 1 << epio_pin; 367 /* Set this EPIO to output */ 368 gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE); 369 REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable & ~epio_mask); 370 371 *en = (REG_RD(bp, MCP_REG_MCPR_GP_INPUTS) & epio_mask) >> epio_pin; 372 } 373 static void bnx2x_set_epio(struct bnx2x *bp, u32 epio_pin, u32 en) 374 { 375 u32 epio_mask, gp_output, gp_oenable; 376 377 /* Sanity check */ 378 if (epio_pin > 31) { 379 DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to set\n", epio_pin); 380 return; 381 } 382 DP(NETIF_MSG_LINK, "Setting EPIO pin %d to %d\n", epio_pin, en); 383 epio_mask = 1 << epio_pin; 384 /* Set this EPIO to output */ 385 gp_output = REG_RD(bp, MCP_REG_MCPR_GP_OUTPUTS); 386 if (en) 387 gp_output |= epio_mask; 388 else 389 gp_output &= ~epio_mask; 390 391 REG_WR(bp, MCP_REG_MCPR_GP_OUTPUTS, gp_output); 392 393 /* Set the value for this EPIO */ 394 gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE); 395 REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable | epio_mask); 396 } 397 398 static void bnx2x_set_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 val) 399 { 400 if (pin_cfg == PIN_CFG_NA) 401 return; 402 if (pin_cfg >= PIN_CFG_EPIO0) { 403 bnx2x_set_epio(bp, pin_cfg - PIN_CFG_EPIO0, val); 404 } else { 405 u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3; 406 u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2; 407 bnx2x_set_gpio(bp, gpio_num, (u8)val, gpio_port); 408 } 409 } 410 411 static u32 bnx2x_get_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 *val) 412 { 413 if (pin_cfg == PIN_CFG_NA) 414 return -EINVAL; 415 if (pin_cfg >= PIN_CFG_EPIO0) { 416 bnx2x_get_epio(bp, pin_cfg - PIN_CFG_EPIO0, val); 417 } else { 418 u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3; 419 u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2; 420 *val = bnx2x_get_gpio(bp, gpio_num, gpio_port); 421 } 422 return 0; 423 424 } 425 /******************************************************************/ 426 /* ETS section */ 427 /******************************************************************/ 428 static void bnx2x_ets_e2e3a0_disabled(struct link_params *params) 429 { 430 /* ETS disabled configuration*/ 431 struct bnx2x *bp = params->bp; 432 433 DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n"); 434 435 /* mapping between entry priority to client number (0,1,2 -debug and 436 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) 437 * 3bits client num. 438 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 439 * cos1-100 cos0-011 dbg1-010 dbg0-001 MCP-000 440 */ 441 442 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688); 443 /* Bitmap of 5bits length. Each bit specifies whether the entry behaves 444 * as strict. Bits 0,1,2 - debug and management entries, 3 - 445 * COS0 entry, 4 - COS1 entry. 446 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT 447 * bit4 bit3 bit2 bit1 bit0 448 * MCP and debug are strict 449 */ 450 451 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); 452 /* defines which entries (clients) are subjected to WFQ arbitration */ 453 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); 454 /* For strict priority entries defines the number of consecutive 455 * slots for the highest priority. 456 */ 457 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); 458 /* mapping between the CREDIT_WEIGHT registers and actual client 459 * numbers 460 */ 461 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0); 462 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0); 463 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0); 464 465 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 0); 466 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 0); 467 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0); 468 /* ETS mode disable */ 469 REG_WR(bp, PBF_REG_ETS_ENABLED, 0); 470 /* If ETS mode is enabled (there is no strict priority) defines a WFQ 471 * weight for COS0/COS1. 472 */ 473 REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710); 474 REG_WR(bp, PBF_REG_COS1_WEIGHT, 0x2710); 475 /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter */ 476 REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, 0x989680); 477 REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, 0x989680); 478 /* Defines the number of consecutive slots for the strict priority */ 479 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); 480 } 481 /****************************************************************************** 482 * Description: 483 * Getting min_w_val will be set according to line speed . 484 *. 485 ******************************************************************************/ 486 static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars) 487 { 488 u32 min_w_val = 0; 489 /* Calculate min_w_val.*/ 490 if (vars->link_up) { 491 if (vars->line_speed == SPEED_20000) 492 min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS; 493 else 494 min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS; 495 } else 496 min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS; 497 /* If the link isn't up (static configuration for example ) The 498 * link will be according to 20GBPS. 499 */ 500 return min_w_val; 501 } 502 /****************************************************************************** 503 * Description: 504 * Getting credit upper bound form min_w_val. 505 *. 506 ******************************************************************************/ 507 static u32 bnx2x_ets_get_credit_upper_bound(const u32 min_w_val) 508 { 509 const u32 credit_upper_bound = (u32)MAXVAL((150 * min_w_val), 510 MAX_PACKET_SIZE); 511 return credit_upper_bound; 512 } 513 /****************************************************************************** 514 * Description: 515 * Set credit upper bound for NIG. 516 *. 517 ******************************************************************************/ 518 static void bnx2x_ets_e3b0_set_credit_upper_bound_nig( 519 const struct link_params *params, 520 const u32 min_w_val) 521 { 522 struct bnx2x *bp = params->bp; 523 const u8 port = params->port; 524 const u32 credit_upper_bound = 525 bnx2x_ets_get_credit_upper_bound(min_w_val); 526 527 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 : 528 NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, credit_upper_bound); 529 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 : 530 NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, credit_upper_bound); 531 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 : 532 NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2, credit_upper_bound); 533 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 : 534 NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3, credit_upper_bound); 535 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 : 536 NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4, credit_upper_bound); 537 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 : 538 NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound); 539 540 if (!port) { 541 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6, 542 credit_upper_bound); 543 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7, 544 credit_upper_bound); 545 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8, 546 credit_upper_bound); 547 } 548 } 549 /****************************************************************************** 550 * Description: 551 * Will return the NIG ETS registers to init values.Except 552 * credit_upper_bound. 553 * That isn't used in this configuration (No WFQ is enabled) and will be 554 * configured acording to spec 555 *. 556 ******************************************************************************/ 557 static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params, 558 const struct link_vars *vars) 559 { 560 struct bnx2x *bp = params->bp; 561 const u8 port = params->port; 562 const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars); 563 /* Mapping between entry priority to client number (0,1,2 -debug and 564 * management clients, 3 - COS0 client, 4 - COS1, ... 8 - 565 * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by 566 * reset value or init tool 567 */ 568 if (port) { 569 REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, 0x543210); 570 REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB, 0x0); 571 } else { 572 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210); 573 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8); 574 } 575 /* For strict priority entries defines the number of consecutive 576 * slots for the highest priority. 577 */ 578 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS : 579 NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); 580 /* Mapping between the CREDIT_WEIGHT registers and actual client 581 * numbers 582 */ 583 if (port) { 584 /*Port 1 has 6 COS*/ 585 REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543); 586 REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x0); 587 } else { 588 /*Port 0 has 9 COS*/ 589 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 590 0x43210876); 591 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5); 592 } 593 594 /* Bitmap of 5bits length. Each bit specifies whether the entry behaves 595 * as strict. Bits 0,1,2 - debug and management entries, 3 - 596 * COS0 entry, 4 - COS1 entry. 597 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT 598 * bit4 bit3 bit2 bit1 bit0 599 * MCP and debug are strict 600 */ 601 if (port) 602 REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT, 0x3f); 603 else 604 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1ff); 605 /* defines which entries (clients) are subjected to WFQ arbitration */ 606 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ : 607 NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); 608 609 /* Please notice the register address are note continuous and a 610 * for here is note appropriate.In 2 port mode port0 only COS0-5 611 * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4 612 * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT 613 * are never used for WFQ 614 */ 615 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 : 616 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0); 617 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 : 618 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0x0); 619 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 : 620 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2, 0x0); 621 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 : 622 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3, 0x0); 623 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 : 624 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0); 625 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 : 626 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0); 627 if (!port) { 628 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0); 629 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0); 630 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0); 631 } 632 633 bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val); 634 } 635 /****************************************************************************** 636 * Description: 637 * Set credit upper bound for PBF. 638 *. 639 ******************************************************************************/ 640 static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf( 641 const struct link_params *params, 642 const u32 min_w_val) 643 { 644 struct bnx2x *bp = params->bp; 645 const u32 credit_upper_bound = 646 bnx2x_ets_get_credit_upper_bound(min_w_val); 647 const u8 port = params->port; 648 u32 base_upper_bound = 0; 649 u8 max_cos = 0; 650 u8 i = 0; 651 /* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4 652 * port mode port1 has COS0-2 that can be used for WFQ. 653 */ 654 if (!port) { 655 base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0; 656 max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; 657 } else { 658 base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P1; 659 max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1; 660 } 661 662 for (i = 0; i < max_cos; i++) 663 REG_WR(bp, base_upper_bound + (i << 2), credit_upper_bound); 664 } 665 666 /****************************************************************************** 667 * Description: 668 * Will return the PBF ETS registers to init values.Except 669 * credit_upper_bound. 670 * That isn't used in this configuration (No WFQ is enabled) and will be 671 * configured acording to spec 672 *. 673 ******************************************************************************/ 674 static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params) 675 { 676 struct bnx2x *bp = params->bp; 677 const u8 port = params->port; 678 const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL; 679 u8 i = 0; 680 u32 base_weight = 0; 681 u8 max_cos = 0; 682 683 /* Mapping between entry priority to client number 0 - COS0 684 * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num. 685 * TODO_ETS - Should be done by reset value or init tool 686 */ 687 if (port) 688 /* 0x688 (|011|0 10|00 1|000) */ 689 REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , 0x688); 690 else 691 /* (10 1|100 |011|0 10|00 1|000) */ 692 REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , 0x2C688); 693 694 /* TODO_ETS - Should be done by reset value or init tool */ 695 if (port) 696 /* 0x688 (|011|0 10|00 1|000)*/ 697 REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1, 0x688); 698 else 699 /* 0x2C688 (10 1|100 |011|0 10|00 1|000) */ 700 REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0, 0x2C688); 701 702 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 : 703 PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 , 0x100); 704 705 706 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 : 707 PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , 0); 708 709 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 : 710 PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0); 711 /* In 2 port mode port0 has COS0-5 that can be used for WFQ. 712 * In 4 port mode port1 has COS0-2 that can be used for WFQ. 713 */ 714 if (!port) { 715 base_weight = PBF_REG_COS0_WEIGHT_P0; 716 max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; 717 } else { 718 base_weight = PBF_REG_COS0_WEIGHT_P1; 719 max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1; 720 } 721 722 for (i = 0; i < max_cos; i++) 723 REG_WR(bp, base_weight + (0x4 * i), 0); 724 725 bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf); 726 } 727 /****************************************************************************** 728 * Description: 729 * E3B0 disable will return basicly the values to init values. 730 *. 731 ******************************************************************************/ 732 static int bnx2x_ets_e3b0_disabled(const struct link_params *params, 733 const struct link_vars *vars) 734 { 735 struct bnx2x *bp = params->bp; 736 737 if (!CHIP_IS_E3B0(bp)) { 738 DP(NETIF_MSG_LINK, 739 "bnx2x_ets_e3b0_disabled the chip isn't E3B0\n"); 740 return -EINVAL; 741 } 742 743 bnx2x_ets_e3b0_nig_disabled(params, vars); 744 745 bnx2x_ets_e3b0_pbf_disabled(params); 746 747 return 0; 748 } 749 750 /****************************************************************************** 751 * Description: 752 * Disable will return basicly the values to init values. 753 * 754 ******************************************************************************/ 755 int bnx2x_ets_disabled(struct link_params *params, 756 struct link_vars *vars) 757 { 758 struct bnx2x *bp = params->bp; 759 int bnx2x_status = 0; 760 761 if ((CHIP_IS_E2(bp)) || (CHIP_IS_E3A0(bp))) 762 bnx2x_ets_e2e3a0_disabled(params); 763 else if (CHIP_IS_E3B0(bp)) 764 bnx2x_status = bnx2x_ets_e3b0_disabled(params, vars); 765 else { 766 DP(NETIF_MSG_LINK, "bnx2x_ets_disabled - chip not supported\n"); 767 return -EINVAL; 768 } 769 770 return bnx2x_status; 771 } 772 773 /****************************************************************************** 774 * Description 775 * Set the COS mappimg to SP and BW until this point all the COS are not 776 * set as SP or BW. 777 ******************************************************************************/ 778 static int bnx2x_ets_e3b0_cli_map(const struct link_params *params, 779 const struct bnx2x_ets_params *ets_params, 780 const u8 cos_sp_bitmap, 781 const u8 cos_bw_bitmap) 782 { 783 struct bnx2x *bp = params->bp; 784 const u8 port = params->port; 785 const u8 nig_cli_sp_bitmap = 0x7 | (cos_sp_bitmap << 3); 786 const u8 pbf_cli_sp_bitmap = cos_sp_bitmap; 787 const u8 nig_cli_subject2wfq_bitmap = cos_bw_bitmap << 3; 788 const u8 pbf_cli_subject2wfq_bitmap = cos_bw_bitmap; 789 790 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT : 791 NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, nig_cli_sp_bitmap); 792 793 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 : 794 PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , pbf_cli_sp_bitmap); 795 796 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ : 797 NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 798 nig_cli_subject2wfq_bitmap); 799 800 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 : 801 PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0, 802 pbf_cli_subject2wfq_bitmap); 803 804 return 0; 805 } 806 807 /****************************************************************************** 808 * Description: 809 * This function is needed because NIG ARB_CREDIT_WEIGHT_X are 810 * not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable. 811 ******************************************************************************/ 812 static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp, 813 const u8 cos_entry, 814 const u32 min_w_val_nig, 815 const u32 min_w_val_pbf, 816 const u16 total_bw, 817 const u8 bw, 818 const u8 port) 819 { 820 u32 nig_reg_adress_crd_weight = 0; 821 u32 pbf_reg_adress_crd_weight = 0; 822 /* Calculate and set BW for this COS - use 1 instead of 0 for BW */ 823 const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw; 824 const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw; 825 826 switch (cos_entry) { 827 case 0: 828 nig_reg_adress_crd_weight = 829 (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 : 830 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0; 831 pbf_reg_adress_crd_weight = (port) ? 832 PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0; 833 break; 834 case 1: 835 nig_reg_adress_crd_weight = (port) ? 836 NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 : 837 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1; 838 pbf_reg_adress_crd_weight = (port) ? 839 PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0; 840 break; 841 case 2: 842 nig_reg_adress_crd_weight = (port) ? 843 NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 : 844 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2; 845 846 pbf_reg_adress_crd_weight = (port) ? 847 PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0; 848 break; 849 case 3: 850 if (port) 851 return -EINVAL; 852 nig_reg_adress_crd_weight = 853 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3; 854 pbf_reg_adress_crd_weight = 855 PBF_REG_COS3_WEIGHT_P0; 856 break; 857 case 4: 858 if (port) 859 return -EINVAL; 860 nig_reg_adress_crd_weight = 861 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4; 862 pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0; 863 break; 864 case 5: 865 if (port) 866 return -EINVAL; 867 nig_reg_adress_crd_weight = 868 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5; 869 pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0; 870 break; 871 } 872 873 REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig); 874 875 REG_WR(bp, pbf_reg_adress_crd_weight, cos_bw_pbf); 876 877 return 0; 878 } 879 /****************************************************************************** 880 * Description: 881 * Calculate the total BW.A value of 0 isn't legal. 882 * 883 ******************************************************************************/ 884 static int bnx2x_ets_e3b0_get_total_bw( 885 const struct link_params *params, 886 struct bnx2x_ets_params *ets_params, 887 u16 *total_bw) 888 { 889 struct bnx2x *bp = params->bp; 890 u8 cos_idx = 0; 891 u8 is_bw_cos_exist = 0; 892 893 *total_bw = 0 ; 894 /* Calculate total BW requested */ 895 for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { 896 if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) { 897 is_bw_cos_exist = 1; 898 if (!ets_params->cos[cos_idx].params.bw_params.bw) { 899 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW" 900 "was set to 0\n"); 901 /* This is to prevent a state when ramrods 902 * can't be sent 903 */ 904 ets_params->cos[cos_idx].params.bw_params.bw 905 = 1; 906 } 907 *total_bw += 908 ets_params->cos[cos_idx].params.bw_params.bw; 909 } 910 } 911 912 /* Check total BW is valid */ 913 if ((is_bw_cos_exist == 1) && (*total_bw != 100)) { 914 if (*total_bw == 0) { 915 DP(NETIF_MSG_LINK, 916 "bnx2x_ets_E3B0_config total BW shouldn't be 0\n"); 917 return -EINVAL; 918 } 919 DP(NETIF_MSG_LINK, 920 "bnx2x_ets_E3B0_config total BW should be 100\n"); 921 /* We can handle a case whre the BW isn't 100 this can happen 922 * if the TC are joined. 923 */ 924 } 925 return 0; 926 } 927 928 /****************************************************************************** 929 * Description: 930 * Invalidate all the sp_pri_to_cos. 931 * 932 ******************************************************************************/ 933 static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos) 934 { 935 u8 pri = 0; 936 for (pri = 0; pri < DCBX_MAX_NUM_COS; pri++) 937 sp_pri_to_cos[pri] = DCBX_INVALID_COS; 938 } 939 /****************************************************************************** 940 * Description: 941 * Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers 942 * according to sp_pri_to_cos. 943 * 944 ******************************************************************************/ 945 static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, 946 u8 *sp_pri_to_cos, const u8 pri, 947 const u8 cos_entry) 948 { 949 struct bnx2x *bp = params->bp; 950 const u8 port = params->port; 951 const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : 952 DCBX_E3B0_MAX_NUM_COS_PORT0; 953 954 if (pri >= max_num_of_cos) { 955 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " 956 "parameter Illegal strict priority\n"); 957 return -EINVAL; 958 } 959 960 if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) { 961 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " 962 "parameter There can't be two COS's with " 963 "the same strict pri\n"); 964 return -EINVAL; 965 } 966 967 sp_pri_to_cos[pri] = cos_entry; 968 return 0; 969 970 } 971 972 /****************************************************************************** 973 * Description: 974 * Returns the correct value according to COS and priority in 975 * the sp_pri_cli register. 976 * 977 ******************************************************************************/ 978 static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset, 979 const u8 pri_set, 980 const u8 pri_offset, 981 const u8 entry_size) 982 { 983 u64 pri_cli_nig = 0; 984 pri_cli_nig = ((u64)(cos + cos_offset)) << (entry_size * 985 (pri_set + pri_offset)); 986 987 return pri_cli_nig; 988 } 989 /****************************************************************************** 990 * Description: 991 * Returns the correct value according to COS and priority in the 992 * sp_pri_cli register for NIG. 993 * 994 ******************************************************************************/ 995 static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set) 996 { 997 /* MCP Dbg0 and dbg1 are always with higher strict pri*/ 998 const u8 nig_cos_offset = 3; 999 const u8 nig_pri_offset = 3; 1000 1001 return bnx2x_e3b0_sp_get_pri_cli_reg(cos, nig_cos_offset, pri_set, 1002 nig_pri_offset, 4); 1003 1004 } 1005 /****************************************************************************** 1006 * Description: 1007 * Returns the correct value according to COS and priority in the 1008 * sp_pri_cli register for PBF. 1009 * 1010 ******************************************************************************/ 1011 static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set) 1012 { 1013 const u8 pbf_cos_offset = 0; 1014 const u8 pbf_pri_offset = 0; 1015 1016 return bnx2x_e3b0_sp_get_pri_cli_reg(cos, pbf_cos_offset, pri_set, 1017 pbf_pri_offset, 3); 1018 1019 } 1020 1021 /****************************************************************************** 1022 * Description: 1023 * Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers 1024 * according to sp_pri_to_cos.(which COS has higher priority) 1025 * 1026 ******************************************************************************/ 1027 static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params, 1028 u8 *sp_pri_to_cos) 1029 { 1030 struct bnx2x *bp = params->bp; 1031 u8 i = 0; 1032 const u8 port = params->port; 1033 /* MCP Dbg0 and dbg1 are always with higher strict pri*/ 1034 u64 pri_cli_nig = 0x210; 1035 u32 pri_cli_pbf = 0x0; 1036 u8 pri_set = 0; 1037 u8 pri_bitmask = 0; 1038 const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : 1039 DCBX_E3B0_MAX_NUM_COS_PORT0; 1040 1041 u8 cos_bit_to_set = (1 << max_num_of_cos) - 1; 1042 1043 /* Set all the strict priority first */ 1044 for (i = 0; i < max_num_of_cos; i++) { 1045 if (sp_pri_to_cos[i] != DCBX_INVALID_COS) { 1046 if (sp_pri_to_cos[i] >= DCBX_MAX_NUM_COS) { 1047 DP(NETIF_MSG_LINK, 1048 "bnx2x_ets_e3b0_sp_set_pri_cli_reg " 1049 "invalid cos entry\n"); 1050 return -EINVAL; 1051 } 1052 1053 pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig( 1054 sp_pri_to_cos[i], pri_set); 1055 1056 pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf( 1057 sp_pri_to_cos[i], pri_set); 1058 pri_bitmask = 1 << sp_pri_to_cos[i]; 1059 /* COS is used remove it from bitmap.*/ 1060 if (!(pri_bitmask & cos_bit_to_set)) { 1061 DP(NETIF_MSG_LINK, 1062 "bnx2x_ets_e3b0_sp_set_pri_cli_reg " 1063 "invalid There can't be two COS's with" 1064 " the same strict pri\n"); 1065 return -EINVAL; 1066 } 1067 cos_bit_to_set &= ~pri_bitmask; 1068 pri_set++; 1069 } 1070 } 1071 1072 /* Set all the Non strict priority i= COS*/ 1073 for (i = 0; i < max_num_of_cos; i++) { 1074 pri_bitmask = 1 << i; 1075 /* Check if COS was already used for SP */ 1076 if (pri_bitmask & cos_bit_to_set) { 1077 /* COS wasn't used for SP */ 1078 pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig( 1079 i, pri_set); 1080 1081 pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf( 1082 i, pri_set); 1083 /* COS is used remove it from bitmap.*/ 1084 cos_bit_to_set &= ~pri_bitmask; 1085 pri_set++; 1086 } 1087 } 1088 1089 if (pri_set != max_num_of_cos) { 1090 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_set_pri_cli_reg not all " 1091 "entries were set\n"); 1092 return -EINVAL; 1093 } 1094 1095 if (port) { 1096 /* Only 6 usable clients*/ 1097 REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, 1098 (u32)pri_cli_nig); 1099 1100 REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , pri_cli_pbf); 1101 } else { 1102 /* Only 9 usable clients*/ 1103 const u32 pri_cli_nig_lsb = (u32) (pri_cli_nig); 1104 const u32 pri_cli_nig_msb = (u32) ((pri_cli_nig >> 32) & 0xF); 1105 1106 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 1107 pri_cli_nig_lsb); 1108 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 1109 pri_cli_nig_msb); 1110 1111 REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , pri_cli_pbf); 1112 } 1113 return 0; 1114 } 1115 1116 /****************************************************************************** 1117 * Description: 1118 * Configure the COS to ETS according to BW and SP settings. 1119 ******************************************************************************/ 1120 int bnx2x_ets_e3b0_config(const struct link_params *params, 1121 const struct link_vars *vars, 1122 struct bnx2x_ets_params *ets_params) 1123 { 1124 struct bnx2x *bp = params->bp; 1125 int bnx2x_status = 0; 1126 const u8 port = params->port; 1127 u16 total_bw = 0; 1128 const u32 min_w_val_nig = bnx2x_ets_get_min_w_val_nig(vars); 1129 const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL; 1130 u8 cos_bw_bitmap = 0; 1131 u8 cos_sp_bitmap = 0; 1132 u8 sp_pri_to_cos[DCBX_MAX_NUM_COS] = {0}; 1133 const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : 1134 DCBX_E3B0_MAX_NUM_COS_PORT0; 1135 u8 cos_entry = 0; 1136 1137 if (!CHIP_IS_E3B0(bp)) { 1138 DP(NETIF_MSG_LINK, 1139 "bnx2x_ets_e3b0_disabled the chip isn't E3B0\n"); 1140 return -EINVAL; 1141 } 1142 1143 if ((ets_params->num_of_cos > max_num_of_cos)) { 1144 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config the number of COS " 1145 "isn't supported\n"); 1146 return -EINVAL; 1147 } 1148 1149 /* Prepare sp strict priority parameters*/ 1150 bnx2x_ets_e3b0_sp_pri_to_cos_init(sp_pri_to_cos); 1151 1152 /* Prepare BW parameters*/ 1153 bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params, 1154 &total_bw); 1155 if (bnx2x_status) { 1156 DP(NETIF_MSG_LINK, 1157 "bnx2x_ets_E3B0_config get_total_bw failed\n"); 1158 return -EINVAL; 1159 } 1160 1161 /* Upper bound is set according to current link speed (min_w_val 1162 * should be the same for upper bound and COS credit val). 1163 */ 1164 bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig); 1165 bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf); 1166 1167 1168 for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) { 1169 if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) { 1170 cos_bw_bitmap |= (1 << cos_entry); 1171 /* The function also sets the BW in HW(not the mappin 1172 * yet) 1173 */ 1174 bnx2x_status = bnx2x_ets_e3b0_set_cos_bw( 1175 bp, cos_entry, min_w_val_nig, min_w_val_pbf, 1176 total_bw, 1177 ets_params->cos[cos_entry].params.bw_params.bw, 1178 port); 1179 } else if (bnx2x_cos_state_strict == 1180 ets_params->cos[cos_entry].state){ 1181 cos_sp_bitmap |= (1 << cos_entry); 1182 1183 bnx2x_status = bnx2x_ets_e3b0_sp_pri_to_cos_set( 1184 params, 1185 sp_pri_to_cos, 1186 ets_params->cos[cos_entry].params.sp_params.pri, 1187 cos_entry); 1188 1189 } else { 1190 DP(NETIF_MSG_LINK, 1191 "bnx2x_ets_e3b0_config cos state not valid\n"); 1192 return -EINVAL; 1193 } 1194 if (bnx2x_status) { 1195 DP(NETIF_MSG_LINK, 1196 "bnx2x_ets_e3b0_config set cos bw failed\n"); 1197 return bnx2x_status; 1198 } 1199 } 1200 1201 /* Set SP register (which COS has higher priority) */ 1202 bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params, 1203 sp_pri_to_cos); 1204 1205 if (bnx2x_status) { 1206 DP(NETIF_MSG_LINK, 1207 "bnx2x_ets_E3B0_config set_pri_cli_reg failed\n"); 1208 return bnx2x_status; 1209 } 1210 1211 /* Set client mapping of BW and strict */ 1212 bnx2x_status = bnx2x_ets_e3b0_cli_map(params, ets_params, 1213 cos_sp_bitmap, 1214 cos_bw_bitmap); 1215 1216 if (bnx2x_status) { 1217 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n"); 1218 return bnx2x_status; 1219 } 1220 return 0; 1221 } 1222 static void bnx2x_ets_bw_limit_common(const struct link_params *params) 1223 { 1224 /* ETS disabled configuration */ 1225 struct bnx2x *bp = params->bp; 1226 DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n"); 1227 /* Defines which entries (clients) are subjected to WFQ arbitration 1228 * COS0 0x8 1229 * COS1 0x10 1230 */ 1231 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18); 1232 /* Mapping between the ARB_CREDIT_WEIGHT registers and actual 1233 * client numbers (WEIGHT_0 does not actually have to represent 1234 * client 0) 1235 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 1236 * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010 1237 */ 1238 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A); 1239 1240 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 1241 ETS_BW_LIMIT_CREDIT_UPPER_BOUND); 1242 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 1243 ETS_BW_LIMIT_CREDIT_UPPER_BOUND); 1244 1245 /* ETS mode enabled*/ 1246 REG_WR(bp, PBF_REG_ETS_ENABLED, 1); 1247 1248 /* Defines the number of consecutive slots for the strict priority */ 1249 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); 1250 /* Bitmap of 5bits length. Each bit specifies whether the entry behaves 1251 * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0 1252 * entry, 4 - COS1 entry. 1253 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT 1254 * bit4 bit3 bit2 bit1 bit0 1255 * MCP and debug are strict 1256 */ 1257 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); 1258 1259 /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/ 1260 REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, 1261 ETS_BW_LIMIT_CREDIT_UPPER_BOUND); 1262 REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, 1263 ETS_BW_LIMIT_CREDIT_UPPER_BOUND); 1264 } 1265 1266 void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw, 1267 const u32 cos1_bw) 1268 { 1269 /* ETS disabled configuration*/ 1270 struct bnx2x *bp = params->bp; 1271 const u32 total_bw = cos0_bw + cos1_bw; 1272 u32 cos0_credit_weight = 0; 1273 u32 cos1_credit_weight = 0; 1274 1275 DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n"); 1276 1277 if ((!total_bw) || 1278 (!cos0_bw) || 1279 (!cos1_bw)) { 1280 DP(NETIF_MSG_LINK, "Total BW can't be zero\n"); 1281 return; 1282 } 1283 1284 cos0_credit_weight = (cos0_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/ 1285 total_bw; 1286 cos1_credit_weight = (cos1_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/ 1287 total_bw; 1288 1289 bnx2x_ets_bw_limit_common(params); 1290 1291 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight); 1292 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight); 1293 1294 REG_WR(bp, PBF_REG_COS0_WEIGHT, cos0_credit_weight); 1295 REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight); 1296 } 1297 1298 int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) 1299 { 1300 /* ETS disabled configuration*/ 1301 struct bnx2x *bp = params->bp; 1302 u32 val = 0; 1303 1304 DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n"); 1305 /* Bitmap of 5bits length. Each bit specifies whether the entry behaves 1306 * as strict. Bits 0,1,2 - debug and management entries, 1307 * 3 - COS0 entry, 4 - COS1 entry. 1308 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT 1309 * bit4 bit3 bit2 bit1 bit0 1310 * MCP and debug are strict 1311 */ 1312 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F); 1313 /* For strict priority entries defines the number of consecutive slots 1314 * for the highest priority. 1315 */ 1316 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); 1317 /* ETS mode disable */ 1318 REG_WR(bp, PBF_REG_ETS_ENABLED, 0); 1319 /* Defines the number of consecutive slots for the strict priority */ 1320 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0x100); 1321 1322 /* Defines the number of consecutive slots for the strict priority */ 1323 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos); 1324 1325 /* Mapping between entry priority to client number (0,1,2 -debug and 1326 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) 1327 * 3bits client num. 1328 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 1329 * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000 1330 * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000 1331 */ 1332 val = (!strict_cos) ? 0x2318 : 0x22E0; 1333 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val); 1334 1335 return 0; 1336 } 1337 1338 /******************************************************************/ 1339 /* PFC section */ 1340 /******************************************************************/ 1341 static void bnx2x_update_pfc_xmac(struct link_params *params, 1342 struct link_vars *vars, 1343 u8 is_lb) 1344 { 1345 struct bnx2x *bp = params->bp; 1346 u32 xmac_base; 1347 u32 pause_val, pfc0_val, pfc1_val; 1348 1349 /* XMAC base adrr */ 1350 xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 1351 1352 /* Initialize pause and pfc registers */ 1353 pause_val = 0x18000; 1354 pfc0_val = 0xFFFF8000; 1355 pfc1_val = 0x2; 1356 1357 /* No PFC support */ 1358 if (!(params->feature_config_flags & 1359 FEATURE_CONFIG_PFC_ENABLED)) { 1360 1361 /* RX flow control - Process pause frame in receive direction 1362 */ 1363 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) 1364 pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN; 1365 1366 /* TX flow control - Send pause packet when buffer is full */ 1367 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) 1368 pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN; 1369 } else {/* PFC support */ 1370 pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN | 1371 XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN | 1372 XMAC_PFC_CTRL_HI_REG_RX_PFC_EN | 1373 XMAC_PFC_CTRL_HI_REG_TX_PFC_EN | 1374 XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON; 1375 /* Write pause and PFC registers */ 1376 REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val); 1377 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val); 1378 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val); 1379 pfc1_val &= ~XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON; 1380 1381 } 1382 1383 /* Write pause and PFC registers */ 1384 REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val); 1385 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val); 1386 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val); 1387 1388 1389 /* Set MAC address for source TX Pause/PFC frames */ 1390 REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_LO, 1391 ((params->mac_addr[2] << 24) | 1392 (params->mac_addr[3] << 16) | 1393 (params->mac_addr[4] << 8) | 1394 (params->mac_addr[5]))); 1395 REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_HI, 1396 ((params->mac_addr[0] << 8) | 1397 (params->mac_addr[1]))); 1398 1399 udelay(30); 1400 } 1401 1402 1403 static void bnx2x_emac_get_pfc_stat(struct link_params *params, 1404 u32 pfc_frames_sent[2], 1405 u32 pfc_frames_received[2]) 1406 { 1407 /* Read pfc statistic */ 1408 struct bnx2x *bp = params->bp; 1409 u32 emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 1410 u32 val_xon = 0; 1411 u32 val_xoff = 0; 1412 1413 DP(NETIF_MSG_LINK, "pfc statistic read from EMAC\n"); 1414 1415 /* PFC received frames */ 1416 val_xoff = REG_RD(bp, emac_base + 1417 EMAC_REG_RX_PFC_STATS_XOFF_RCVD); 1418 val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT; 1419 val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD); 1420 val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT; 1421 1422 pfc_frames_received[0] = val_xon + val_xoff; 1423 1424 /* PFC received sent */ 1425 val_xoff = REG_RD(bp, emac_base + 1426 EMAC_REG_RX_PFC_STATS_XOFF_SENT); 1427 val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT; 1428 val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT); 1429 val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT; 1430 1431 pfc_frames_sent[0] = val_xon + val_xoff; 1432 } 1433 1434 /* Read pfc statistic*/ 1435 void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, 1436 u32 pfc_frames_sent[2], 1437 u32 pfc_frames_received[2]) 1438 { 1439 /* Read pfc statistic */ 1440 struct bnx2x *bp = params->bp; 1441 1442 DP(NETIF_MSG_LINK, "pfc statistic\n"); 1443 1444 if (!vars->link_up) 1445 return; 1446 1447 if (vars->mac_type == MAC_TYPE_EMAC) { 1448 DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n"); 1449 bnx2x_emac_get_pfc_stat(params, pfc_frames_sent, 1450 pfc_frames_received); 1451 } 1452 } 1453 /******************************************************************/ 1454 /* MAC/PBF section */ 1455 /******************************************************************/ 1456 static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, 1457 u32 emac_base) 1458 { 1459 u32 new_mode, cur_mode; 1460 u32 clc_cnt; 1461 /* Set clause 45 mode, slow down the MDIO clock to 2.5MHz 1462 * (a value of 49==0x31) and make sure that the AUTO poll is off 1463 */ 1464 cur_mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); 1465 1466 if (USES_WARPCORE(bp)) 1467 clc_cnt = 74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT; 1468 else 1469 clc_cnt = 49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT; 1470 1471 if (((cur_mode & EMAC_MDIO_MODE_CLOCK_CNT) == clc_cnt) && 1472 (cur_mode & (EMAC_MDIO_MODE_CLAUSE_45))) 1473 return; 1474 1475 new_mode = cur_mode & 1476 ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT); 1477 new_mode |= clc_cnt; 1478 new_mode |= (EMAC_MDIO_MODE_CLAUSE_45); 1479 1480 DP(NETIF_MSG_LINK, "Changing emac_mode from 0x%x to 0x%x\n", 1481 cur_mode, new_mode); 1482 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, new_mode); 1483 udelay(40); 1484 } 1485 1486 static void bnx2x_set_mdio_emac_per_phy(struct bnx2x *bp, 1487 struct link_params *params) 1488 { 1489 u8 phy_index; 1490 /* Set mdio clock per phy */ 1491 for (phy_index = INT_PHY; phy_index < params->num_phys; 1492 phy_index++) 1493 bnx2x_set_mdio_clk(bp, params->chip_id, 1494 params->phy[phy_index].mdio_ctrl); 1495 } 1496 1497 static u8 bnx2x_is_4_port_mode(struct bnx2x *bp) 1498 { 1499 u32 port4mode_ovwr_val; 1500 /* Check 4-port override enabled */ 1501 port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); 1502 if (port4mode_ovwr_val & (1<<0)) { 1503 /* Return 4-port mode override value */ 1504 return ((port4mode_ovwr_val & (1<<1)) == (1<<1)); 1505 } 1506 /* Return 4-port mode from input pin */ 1507 return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN); 1508 } 1509 1510 static void bnx2x_emac_init(struct link_params *params, 1511 struct link_vars *vars) 1512 { 1513 /* reset and unreset the emac core */ 1514 struct bnx2x *bp = params->bp; 1515 u8 port = params->port; 1516 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 1517 u32 val; 1518 u16 timeout; 1519 1520 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 1521 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); 1522 udelay(5); 1523 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 1524 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); 1525 1526 /* init emac - use read-modify-write */ 1527 /* self clear reset */ 1528 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); 1529 EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET)); 1530 1531 timeout = 200; 1532 do { 1533 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); 1534 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val); 1535 if (!timeout) { 1536 DP(NETIF_MSG_LINK, "EMAC timeout!\n"); 1537 return; 1538 } 1539 timeout--; 1540 } while (val & EMAC_MODE_RESET); 1541 1542 bnx2x_set_mdio_emac_per_phy(bp, params); 1543 /* Set mac address */ 1544 val = ((params->mac_addr[0] << 8) | 1545 params->mac_addr[1]); 1546 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val); 1547 1548 val = ((params->mac_addr[2] << 24) | 1549 (params->mac_addr[3] << 16) | 1550 (params->mac_addr[4] << 8) | 1551 params->mac_addr[5]); 1552 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val); 1553 } 1554 1555 static void bnx2x_set_xumac_nig(struct link_params *params, 1556 u16 tx_pause_en, 1557 u8 enable) 1558 { 1559 struct bnx2x *bp = params->bp; 1560 1561 REG_WR(bp, params->port ? NIG_REG_P1_MAC_IN_EN : NIG_REG_P0_MAC_IN_EN, 1562 enable); 1563 REG_WR(bp, params->port ? NIG_REG_P1_MAC_OUT_EN : NIG_REG_P0_MAC_OUT_EN, 1564 enable); 1565 REG_WR(bp, params->port ? NIG_REG_P1_MAC_PAUSE_OUT_EN : 1566 NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en); 1567 } 1568 1569 static void bnx2x_set_umac_rxtx(struct link_params *params, u8 en) 1570 { 1571 u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 1572 u32 val; 1573 struct bnx2x *bp = params->bp; 1574 if (!(REG_RD(bp, MISC_REG_RESET_REG_2) & 1575 (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port))) 1576 return; 1577 val = REG_RD(bp, umac_base + UMAC_REG_COMMAND_CONFIG); 1578 if (en) 1579 val |= (UMAC_COMMAND_CONFIG_REG_TX_ENA | 1580 UMAC_COMMAND_CONFIG_REG_RX_ENA); 1581 else 1582 val &= ~(UMAC_COMMAND_CONFIG_REG_TX_ENA | 1583 UMAC_COMMAND_CONFIG_REG_RX_ENA); 1584 /* Disable RX and TX */ 1585 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); 1586 } 1587 1588 static void bnx2x_umac_enable(struct link_params *params, 1589 struct link_vars *vars, u8 lb) 1590 { 1591 u32 val; 1592 u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 1593 struct bnx2x *bp = params->bp; 1594 /* Reset UMAC */ 1595 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 1596 (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); 1597 usleep_range(1000, 2000); 1598 1599 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 1600 (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); 1601 1602 DP(NETIF_MSG_LINK, "enabling UMAC\n"); 1603 1604 /* This register opens the gate for the UMAC despite its name */ 1605 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); 1606 1607 val = UMAC_COMMAND_CONFIG_REG_PROMIS_EN | 1608 UMAC_COMMAND_CONFIG_REG_PAD_EN | 1609 UMAC_COMMAND_CONFIG_REG_SW_RESET | 1610 UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK; 1611 switch (vars->line_speed) { 1612 case SPEED_10: 1613 val |= (0<<2); 1614 break; 1615 case SPEED_100: 1616 val |= (1<<2); 1617 break; 1618 case SPEED_1000: 1619 val |= (2<<2); 1620 break; 1621 case SPEED_2500: 1622 val |= (3<<2); 1623 break; 1624 default: 1625 DP(NETIF_MSG_LINK, "Invalid speed for UMAC %d\n", 1626 vars->line_speed); 1627 break; 1628 } 1629 if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) 1630 val |= UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE; 1631 1632 if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)) 1633 val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE; 1634 1635 if (vars->duplex == DUPLEX_HALF) 1636 val |= UMAC_COMMAND_CONFIG_REG_HD_ENA; 1637 1638 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); 1639 udelay(50); 1640 1641 /* Configure UMAC for EEE */ 1642 if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) { 1643 DP(NETIF_MSG_LINK, "configured UMAC for EEE\n"); 1644 REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, 1645 UMAC_UMAC_EEE_CTRL_REG_EEE_EN); 1646 REG_WR(bp, umac_base + UMAC_REG_EEE_WAKE_TIMER, 0x11); 1647 } else { 1648 REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, 0x0); 1649 } 1650 1651 /* Set MAC address for source TX Pause/PFC frames (under SW reset) */ 1652 REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0, 1653 ((params->mac_addr[2] << 24) | 1654 (params->mac_addr[3] << 16) | 1655 (params->mac_addr[4] << 8) | 1656 (params->mac_addr[5]))); 1657 REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR1, 1658 ((params->mac_addr[0] << 8) | 1659 (params->mac_addr[1]))); 1660 1661 /* Enable RX and TX */ 1662 val &= ~UMAC_COMMAND_CONFIG_REG_PAD_EN; 1663 val |= UMAC_COMMAND_CONFIG_REG_TX_ENA | 1664 UMAC_COMMAND_CONFIG_REG_RX_ENA; 1665 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); 1666 udelay(50); 1667 1668 /* Remove SW Reset */ 1669 val &= ~UMAC_COMMAND_CONFIG_REG_SW_RESET; 1670 1671 /* Check loopback mode */ 1672 if (lb) 1673 val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA; 1674 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); 1675 1676 /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame 1677 * length used by the MAC receive logic to check frames. 1678 */ 1679 REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); 1680 bnx2x_set_xumac_nig(params, 1681 ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); 1682 vars->mac_type = MAC_TYPE_UMAC; 1683 1684 } 1685 1686 /* Define the XMAC mode */ 1687 static void bnx2x_xmac_init(struct link_params *params, u32 max_speed) 1688 { 1689 struct bnx2x *bp = params->bp; 1690 u32 is_port4mode = bnx2x_is_4_port_mode(bp); 1691 1692 /* In 4-port mode, need to set the mode only once, so if XMAC is 1693 * already out of reset, it means the mode has already been set, 1694 * and it must not* reset the XMAC again, since it controls both 1695 * ports of the path 1696 */ 1697 1698 if (((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) || 1699 (CHIP_NUM(bp) == CHIP_NUM_57840_2_20) || 1700 (CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE)) && 1701 is_port4mode && 1702 (REG_RD(bp, MISC_REG_RESET_REG_2) & 1703 MISC_REGISTERS_RESET_REG_2_XMAC)) { 1704 DP(NETIF_MSG_LINK, 1705 "XMAC already out of reset in 4-port mode\n"); 1706 return; 1707 } 1708 1709 /* Hard reset */ 1710 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 1711 MISC_REGISTERS_RESET_REG_2_XMAC); 1712 usleep_range(1000, 2000); 1713 1714 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 1715 MISC_REGISTERS_RESET_REG_2_XMAC); 1716 if (is_port4mode) { 1717 DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n"); 1718 1719 /* Set the number of ports on the system side to up to 2 */ 1720 REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1); 1721 1722 /* Set the number of ports on the Warp Core to 10G */ 1723 REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3); 1724 } else { 1725 /* Set the number of ports on the system side to 1 */ 1726 REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0); 1727 if (max_speed == SPEED_10000) { 1728 DP(NETIF_MSG_LINK, 1729 "Init XMAC to 10G x 1 port per path\n"); 1730 /* Set the number of ports on the Warp Core to 10G */ 1731 REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3); 1732 } else { 1733 DP(NETIF_MSG_LINK, 1734 "Init XMAC to 20G x 2 ports per path\n"); 1735 /* Set the number of ports on the Warp Core to 20G */ 1736 REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 1); 1737 } 1738 } 1739 /* Soft reset */ 1740 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 1741 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); 1742 usleep_range(1000, 2000); 1743 1744 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 1745 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); 1746 1747 } 1748 1749 static void bnx2x_set_xmac_rxtx(struct link_params *params, u8 en) 1750 { 1751 u8 port = params->port; 1752 struct bnx2x *bp = params->bp; 1753 u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 1754 u32 val; 1755 1756 if (REG_RD(bp, MISC_REG_RESET_REG_2) & 1757 MISC_REGISTERS_RESET_REG_2_XMAC) { 1758 /* Send an indication to change the state in the NIG back to XON 1759 * Clearing this bit enables the next set of this bit to get 1760 * rising edge 1761 */ 1762 pfc_ctrl = REG_RD(bp, xmac_base + XMAC_REG_PFC_CTRL_HI); 1763 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, 1764 (pfc_ctrl & ~(1<<1))); 1765 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, 1766 (pfc_ctrl | (1<<1))); 1767 DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port); 1768 val = REG_RD(bp, xmac_base + XMAC_REG_CTRL); 1769 if (en) 1770 val |= (XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN); 1771 else 1772 val &= ~(XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN); 1773 REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); 1774 } 1775 } 1776 1777 static int bnx2x_xmac_enable(struct link_params *params, 1778 struct link_vars *vars, u8 lb) 1779 { 1780 u32 val, xmac_base; 1781 struct bnx2x *bp = params->bp; 1782 DP(NETIF_MSG_LINK, "enabling XMAC\n"); 1783 1784 xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 1785 1786 bnx2x_xmac_init(params, vars->line_speed); 1787 1788 /* This register determines on which events the MAC will assert 1789 * error on the i/f to the NIG along w/ EOP. 1790 */ 1791 1792 /* This register tells the NIG whether to send traffic to UMAC 1793 * or XMAC 1794 */ 1795 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0); 1796 1797 /* When XMAC is in XLGMII mode, disable sending idles for fault 1798 * detection. 1799 */ 1800 if (!(params->phy[INT_PHY].flags & FLAGS_TX_ERROR_CHECK)) { 1801 REG_WR(bp, xmac_base + XMAC_REG_RX_LSS_CTRL, 1802 (XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE | 1803 XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE)); 1804 REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0); 1805 REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 1806 XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS | 1807 XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS); 1808 } 1809 /* Set Max packet size */ 1810 REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710); 1811 1812 /* CRC append for Tx packets */ 1813 REG_WR(bp, xmac_base + XMAC_REG_TX_CTRL, 0xC800); 1814 1815 /* update PFC */ 1816 bnx2x_update_pfc_xmac(params, vars, 0); 1817 1818 if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) { 1819 DP(NETIF_MSG_LINK, "Setting XMAC for EEE\n"); 1820 REG_WR(bp, xmac_base + XMAC_REG_EEE_TIMERS_HI, 0x1380008); 1821 REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x1); 1822 } else { 1823 REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x0); 1824 } 1825 1826 /* Enable TX and RX */ 1827 val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN; 1828 1829 /* Set MAC in XLGMII mode for dual-mode */ 1830 if ((vars->line_speed == SPEED_20000) && 1831 (params->phy[INT_PHY].supported & 1832 SUPPORTED_20000baseKR2_Full)) 1833 val |= XMAC_CTRL_REG_XLGMII_ALIGN_ENB; 1834 1835 /* Check loopback mode */ 1836 if (lb) 1837 val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK; 1838 REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); 1839 bnx2x_set_xumac_nig(params, 1840 ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); 1841 1842 vars->mac_type = MAC_TYPE_XMAC; 1843 1844 return 0; 1845 } 1846 1847 static int bnx2x_emac_enable(struct link_params *params, 1848 struct link_vars *vars, u8 lb) 1849 { 1850 struct bnx2x *bp = params->bp; 1851 u8 port = params->port; 1852 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 1853 u32 val; 1854 1855 DP(NETIF_MSG_LINK, "enabling EMAC\n"); 1856 1857 /* Disable BMAC */ 1858 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 1859 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 1860 1861 /* enable emac and not bmac */ 1862 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1); 1863 1864 /* ASIC */ 1865 if (vars->phy_flags & PHY_XGXS_FLAG) { 1866 u32 ser_lane = ((params->lane_config & 1867 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 1868 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 1869 1870 DP(NETIF_MSG_LINK, "XGXS\n"); 1871 /* select the master lanes (out of 0-3) */ 1872 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane); 1873 /* select XGXS */ 1874 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); 1875 1876 } else { /* SerDes */ 1877 DP(NETIF_MSG_LINK, "SerDes\n"); 1878 /* select SerDes */ 1879 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0); 1880 } 1881 1882 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE, 1883 EMAC_RX_MODE_RESET); 1884 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, 1885 EMAC_TX_MODE_RESET); 1886 1887 /* pause enable/disable */ 1888 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE, 1889 EMAC_RX_MODE_FLOW_EN); 1890 1891 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE, 1892 (EMAC_TX_MODE_EXT_PAUSE_EN | 1893 EMAC_TX_MODE_FLOW_EN)); 1894 if (!(params->feature_config_flags & 1895 FEATURE_CONFIG_PFC_ENABLED)) { 1896 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) 1897 bnx2x_bits_en(bp, emac_base + 1898 EMAC_REG_EMAC_RX_MODE, 1899 EMAC_RX_MODE_FLOW_EN); 1900 1901 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) 1902 bnx2x_bits_en(bp, emac_base + 1903 EMAC_REG_EMAC_TX_MODE, 1904 (EMAC_TX_MODE_EXT_PAUSE_EN | 1905 EMAC_TX_MODE_FLOW_EN)); 1906 } else 1907 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, 1908 EMAC_TX_MODE_FLOW_EN); 1909 1910 /* KEEP_VLAN_TAG, promiscuous */ 1911 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); 1912 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; 1913 1914 /* Setting this bit causes MAC control frames (except for pause 1915 * frames) to be passed on for processing. This setting has no 1916 * affect on the operation of the pause frames. This bit effects 1917 * all packets regardless of RX Parser packet sorting logic. 1918 * Turn the PFC off to make sure we are in Xon state before 1919 * enabling it. 1920 */ 1921 EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0); 1922 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) { 1923 DP(NETIF_MSG_LINK, "PFC is enabled\n"); 1924 /* Enable PFC again */ 1925 EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 1926 EMAC_REG_RX_PFC_MODE_RX_EN | 1927 EMAC_REG_RX_PFC_MODE_TX_EN | 1928 EMAC_REG_RX_PFC_MODE_PRIORITIES); 1929 1930 EMAC_WR(bp, EMAC_REG_RX_PFC_PARAM, 1931 ((0x0101 << 1932 EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT) | 1933 (0x00ff << 1934 EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT))); 1935 val |= EMAC_RX_MODE_KEEP_MAC_CONTROL; 1936 } 1937 EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val); 1938 1939 /* Set Loopback */ 1940 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); 1941 if (lb) 1942 val |= 0x810; 1943 else 1944 val &= ~0x810; 1945 EMAC_WR(bp, EMAC_REG_EMAC_MODE, val); 1946 1947 /* Enable emac */ 1948 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1); 1949 1950 /* Enable emac for jumbo packets */ 1951 EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE, 1952 (EMAC_RX_MTU_SIZE_JUMBO_ENA | 1953 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); 1954 1955 /* Strip CRC */ 1956 REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1); 1957 1958 /* Disable the NIG in/out to the bmac */ 1959 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0); 1960 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0); 1961 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0); 1962 1963 /* Enable the NIG in/out to the emac */ 1964 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1); 1965 val = 0; 1966 if ((params->feature_config_flags & 1967 FEATURE_CONFIG_PFC_ENABLED) || 1968 (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) 1969 val = 1; 1970 1971 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val); 1972 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1); 1973 1974 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0); 1975 1976 vars->mac_type = MAC_TYPE_EMAC; 1977 return 0; 1978 } 1979 1980 static void bnx2x_update_pfc_bmac1(struct link_params *params, 1981 struct link_vars *vars) 1982 { 1983 u32 wb_data[2]; 1984 struct bnx2x *bp = params->bp; 1985 u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM : 1986 NIG_REG_INGRESS_BMAC0_MEM; 1987 1988 u32 val = 0x14; 1989 if ((!(params->feature_config_flags & 1990 FEATURE_CONFIG_PFC_ENABLED)) && 1991 (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)) 1992 /* Enable BigMAC to react on received Pause packets */ 1993 val |= (1<<5); 1994 wb_data[0] = val; 1995 wb_data[1] = 0; 1996 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2); 1997 1998 /* TX control */ 1999 val = 0xc0; 2000 if (!(params->feature_config_flags & 2001 FEATURE_CONFIG_PFC_ENABLED) && 2002 (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) 2003 val |= 0x800000; 2004 wb_data[0] = val; 2005 wb_data[1] = 0; 2006 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_data, 2); 2007 } 2008 2009 static void bnx2x_update_pfc_bmac2(struct link_params *params, 2010 struct link_vars *vars, 2011 u8 is_lb) 2012 { 2013 /* Set rx control: Strip CRC and enable BigMAC to relay 2014 * control packets to the system as well 2015 */ 2016 u32 wb_data[2]; 2017 struct bnx2x *bp = params->bp; 2018 u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM : 2019 NIG_REG_INGRESS_BMAC0_MEM; 2020 u32 val = 0x14; 2021 2022 if ((!(params->feature_config_flags & 2023 FEATURE_CONFIG_PFC_ENABLED)) && 2024 (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)) 2025 /* Enable BigMAC to react on received Pause packets */ 2026 val |= (1<<5); 2027 wb_data[0] = val; 2028 wb_data[1] = 0; 2029 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2); 2030 udelay(30); 2031 2032 /* Tx control */ 2033 val = 0xc0; 2034 if (!(params->feature_config_flags & 2035 FEATURE_CONFIG_PFC_ENABLED) && 2036 (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) 2037 val |= 0x800000; 2038 wb_data[0] = val; 2039 wb_data[1] = 0; 2040 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2); 2041 2042 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) { 2043 DP(NETIF_MSG_LINK, "PFC is enabled\n"); 2044 /* Enable PFC RX & TX & STATS and set 8 COS */ 2045 wb_data[0] = 0x0; 2046 wb_data[0] |= (1<<0); /* RX */ 2047 wb_data[0] |= (1<<1); /* TX */ 2048 wb_data[0] |= (1<<2); /* Force initial Xon */ 2049 wb_data[0] |= (1<<3); /* 8 cos */ 2050 wb_data[0] |= (1<<5); /* STATS */ 2051 wb_data[1] = 0; 2052 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, 2053 wb_data, 2); 2054 /* Clear the force Xon */ 2055 wb_data[0] &= ~(1<<2); 2056 } else { 2057 DP(NETIF_MSG_LINK, "PFC is disabled\n"); 2058 /* Disable PFC RX & TX & STATS and set 8 COS */ 2059 wb_data[0] = 0x8; 2060 wb_data[1] = 0; 2061 } 2062 2063 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2); 2064 2065 /* Set Time (based unit is 512 bit time) between automatic 2066 * re-sending of PP packets amd enable automatic re-send of 2067 * Per-Priroity Packet as long as pp_gen is asserted and 2068 * pp_disable is low. 2069 */ 2070 val = 0x8000; 2071 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) 2072 val |= (1<<16); /* enable automatic re-send */ 2073 2074 wb_data[0] = val; 2075 wb_data[1] = 0; 2076 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL, 2077 wb_data, 2); 2078 2079 /* mac control */ 2080 val = 0x3; /* Enable RX and TX */ 2081 if (is_lb) { 2082 val |= 0x4; /* Local loopback */ 2083 DP(NETIF_MSG_LINK, "enable bmac loopback\n"); 2084 } 2085 /* When PFC enabled, Pass pause frames towards the NIG. */ 2086 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) 2087 val |= ((1<<6)|(1<<5)); 2088 2089 wb_data[0] = val; 2090 wb_data[1] = 0; 2091 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); 2092 } 2093 2094 /****************************************************************************** 2095 * Description: 2096 * This function is needed because NIG ARB_CREDIT_WEIGHT_X are 2097 * not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable. 2098 ******************************************************************************/ 2099 static int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp, 2100 u8 cos_entry, 2101 u32 priority_mask, u8 port) 2102 { 2103 u32 nig_reg_rx_priority_mask_add = 0; 2104 2105 switch (cos_entry) { 2106 case 0: 2107 nig_reg_rx_priority_mask_add = (port) ? 2108 NIG_REG_P1_RX_COS0_PRIORITY_MASK : 2109 NIG_REG_P0_RX_COS0_PRIORITY_MASK; 2110 break; 2111 case 1: 2112 nig_reg_rx_priority_mask_add = (port) ? 2113 NIG_REG_P1_RX_COS1_PRIORITY_MASK : 2114 NIG_REG_P0_RX_COS1_PRIORITY_MASK; 2115 break; 2116 case 2: 2117 nig_reg_rx_priority_mask_add = (port) ? 2118 NIG_REG_P1_RX_COS2_PRIORITY_MASK : 2119 NIG_REG_P0_RX_COS2_PRIORITY_MASK; 2120 break; 2121 case 3: 2122 if (port) 2123 return -EINVAL; 2124 nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK; 2125 break; 2126 case 4: 2127 if (port) 2128 return -EINVAL; 2129 nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK; 2130 break; 2131 case 5: 2132 if (port) 2133 return -EINVAL; 2134 nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK; 2135 break; 2136 } 2137 2138 REG_WR(bp, nig_reg_rx_priority_mask_add, priority_mask); 2139 2140 return 0; 2141 } 2142 static void bnx2x_update_mng(struct link_params *params, u32 link_status) 2143 { 2144 struct bnx2x *bp = params->bp; 2145 2146 REG_WR(bp, params->shmem_base + 2147 offsetof(struct shmem_region, 2148 port_mb[params->port].link_status), link_status); 2149 } 2150 2151 static void bnx2x_update_link_attr(struct link_params *params, u32 link_attr) 2152 { 2153 struct bnx2x *bp = params->bp; 2154 2155 if (SHMEM2_HAS(bp, link_attr_sync)) 2156 REG_WR(bp, params->shmem2_base + 2157 offsetof(struct shmem2_region, 2158 link_attr_sync[params->port]), link_attr); 2159 } 2160 2161 static void bnx2x_update_pfc_nig(struct link_params *params, 2162 struct link_vars *vars, 2163 struct bnx2x_nig_brb_pfc_port_params *nig_params) 2164 { 2165 u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0; 2166 u32 llfc_enable = 0, xcm_out_en = 0, hwpfc_enable = 0; 2167 u32 pkt_priority_to_cos = 0; 2168 struct bnx2x *bp = params->bp; 2169 u8 port = params->port; 2170 2171 int set_pfc = params->feature_config_flags & 2172 FEATURE_CONFIG_PFC_ENABLED; 2173 DP(NETIF_MSG_LINK, "updating pfc nig parameters\n"); 2174 2175 /* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set 2176 * MAC control frames (that are not pause packets) 2177 * will be forwarded to the XCM. 2178 */ 2179 xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK : 2180 NIG_REG_LLH0_XCM_MASK); 2181 /* NIG params will override non PFC params, since it's possible to 2182 * do transition from PFC to SAFC 2183 */ 2184 if (set_pfc) { 2185 pause_enable = 0; 2186 llfc_out_en = 0; 2187 llfc_enable = 0; 2188 if (CHIP_IS_E3(bp)) 2189 ppp_enable = 0; 2190 else 2191 ppp_enable = 1; 2192 xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : 2193 NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN); 2194 xcm_out_en = 0; 2195 hwpfc_enable = 1; 2196 } else { 2197 if (nig_params) { 2198 llfc_out_en = nig_params->llfc_out_en; 2199 llfc_enable = nig_params->llfc_enable; 2200 pause_enable = nig_params->pause_enable; 2201 } else /* Default non PFC mode - PAUSE */ 2202 pause_enable = 1; 2203 2204 xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : 2205 NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN); 2206 xcm_out_en = 1; 2207 } 2208 2209 if (CHIP_IS_E3(bp)) 2210 REG_WR(bp, port ? NIG_REG_BRB1_PAUSE_IN_EN : 2211 NIG_REG_BRB0_PAUSE_IN_EN, pause_enable); 2212 REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 : 2213 NIG_REG_LLFC_OUT_EN_0, llfc_out_en); 2214 REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 : 2215 NIG_REG_LLFC_ENABLE_0, llfc_enable); 2216 REG_WR(bp, port ? NIG_REG_PAUSE_ENABLE_1 : 2217 NIG_REG_PAUSE_ENABLE_0, pause_enable); 2218 2219 REG_WR(bp, port ? NIG_REG_PPP_ENABLE_1 : 2220 NIG_REG_PPP_ENABLE_0, ppp_enable); 2221 2222 REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK : 2223 NIG_REG_LLH0_XCM_MASK, xcm_mask); 2224 2225 REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 : 2226 NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7); 2227 2228 /* Output enable for RX_XCM # IF */ 2229 REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN : 2230 NIG_REG_XCM0_OUT_EN, xcm_out_en); 2231 2232 /* HW PFC TX enable */ 2233 REG_WR(bp, port ? NIG_REG_P1_HWPFC_ENABLE : 2234 NIG_REG_P0_HWPFC_ENABLE, hwpfc_enable); 2235 2236 if (nig_params) { 2237 u8 i = 0; 2238 pkt_priority_to_cos = nig_params->pkt_priority_to_cos; 2239 2240 for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++) 2241 bnx2x_pfc_nig_rx_priority_mask(bp, i, 2242 nig_params->rx_cos_priority_mask[i], port); 2243 2244 REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 : 2245 NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0, 2246 nig_params->llfc_high_priority_classes); 2247 2248 REG_WR(bp, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 : 2249 NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0, 2250 nig_params->llfc_low_priority_classes); 2251 } 2252 REG_WR(bp, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS : 2253 NIG_REG_P0_PKT_PRIORITY_TO_COS, 2254 pkt_priority_to_cos); 2255 } 2256 2257 int bnx2x_update_pfc(struct link_params *params, 2258 struct link_vars *vars, 2259 struct bnx2x_nig_brb_pfc_port_params *pfc_params) 2260 { 2261 /* The PFC and pause are orthogonal to one another, meaning when 2262 * PFC is enabled, the pause are disabled, and when PFC is 2263 * disabled, pause are set according to the pause result. 2264 */ 2265 u32 val; 2266 struct bnx2x *bp = params->bp; 2267 int bnx2x_status = 0; 2268 u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC); 2269 2270 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) 2271 vars->link_status |= LINK_STATUS_PFC_ENABLED; 2272 else 2273 vars->link_status &= ~LINK_STATUS_PFC_ENABLED; 2274 2275 bnx2x_update_mng(params, vars->link_status); 2276 2277 /* Update NIG params */ 2278 bnx2x_update_pfc_nig(params, vars, pfc_params); 2279 2280 if (!vars->link_up) 2281 return bnx2x_status; 2282 2283 DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n"); 2284 2285 if (CHIP_IS_E3(bp)) { 2286 if (vars->mac_type == MAC_TYPE_XMAC) 2287 bnx2x_update_pfc_xmac(params, vars, 0); 2288 } else { 2289 val = REG_RD(bp, MISC_REG_RESET_REG_2); 2290 if ((val & 2291 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) 2292 == 0) { 2293 DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n"); 2294 bnx2x_emac_enable(params, vars, 0); 2295 return bnx2x_status; 2296 } 2297 if (CHIP_IS_E2(bp)) 2298 bnx2x_update_pfc_bmac2(params, vars, bmac_loopback); 2299 else 2300 bnx2x_update_pfc_bmac1(params, vars); 2301 2302 val = 0; 2303 if ((params->feature_config_flags & 2304 FEATURE_CONFIG_PFC_ENABLED) || 2305 (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) 2306 val = 1; 2307 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val); 2308 } 2309 return bnx2x_status; 2310 } 2311 2312 static int bnx2x_bmac1_enable(struct link_params *params, 2313 struct link_vars *vars, 2314 u8 is_lb) 2315 { 2316 struct bnx2x *bp = params->bp; 2317 u8 port = params->port; 2318 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : 2319 NIG_REG_INGRESS_BMAC0_MEM; 2320 u32 wb_data[2]; 2321 u32 val; 2322 2323 DP(NETIF_MSG_LINK, "Enabling BigMAC1\n"); 2324 2325 /* XGXS control */ 2326 wb_data[0] = 0x3c; 2327 wb_data[1] = 0; 2328 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL, 2329 wb_data, 2); 2330 2331 /* TX MAC SA */ 2332 wb_data[0] = ((params->mac_addr[2] << 24) | 2333 (params->mac_addr[3] << 16) | 2334 (params->mac_addr[4] << 8) | 2335 params->mac_addr[5]); 2336 wb_data[1] = ((params->mac_addr[0] << 8) | 2337 params->mac_addr[1]); 2338 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2); 2339 2340 /* MAC control */ 2341 val = 0x3; 2342 if (is_lb) { 2343 val |= 0x4; 2344 DP(NETIF_MSG_LINK, "enable bmac loopback\n"); 2345 } 2346 wb_data[0] = val; 2347 wb_data[1] = 0; 2348 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2); 2349 2350 /* Set rx mtu */ 2351 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 2352 wb_data[1] = 0; 2353 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2); 2354 2355 bnx2x_update_pfc_bmac1(params, vars); 2356 2357 /* Set tx mtu */ 2358 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 2359 wb_data[1] = 0; 2360 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2); 2361 2362 /* Set cnt max size */ 2363 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 2364 wb_data[1] = 0; 2365 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2); 2366 2367 /* Configure SAFC */ 2368 wb_data[0] = 0x1000200; 2369 wb_data[1] = 0; 2370 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, 2371 wb_data, 2); 2372 2373 return 0; 2374 } 2375 2376 static int bnx2x_bmac2_enable(struct link_params *params, 2377 struct link_vars *vars, 2378 u8 is_lb) 2379 { 2380 struct bnx2x *bp = params->bp; 2381 u8 port = params->port; 2382 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : 2383 NIG_REG_INGRESS_BMAC0_MEM; 2384 u32 wb_data[2]; 2385 2386 DP(NETIF_MSG_LINK, "Enabling BigMAC2\n"); 2387 2388 wb_data[0] = 0; 2389 wb_data[1] = 0; 2390 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); 2391 udelay(30); 2392 2393 /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */ 2394 wb_data[0] = 0x3c; 2395 wb_data[1] = 0; 2396 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL, 2397 wb_data, 2); 2398 2399 udelay(30); 2400 2401 /* TX MAC SA */ 2402 wb_data[0] = ((params->mac_addr[2] << 24) | 2403 (params->mac_addr[3] << 16) | 2404 (params->mac_addr[4] << 8) | 2405 params->mac_addr[5]); 2406 wb_data[1] = ((params->mac_addr[0] << 8) | 2407 params->mac_addr[1]); 2408 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR, 2409 wb_data, 2); 2410 2411 udelay(30); 2412 2413 /* Configure SAFC */ 2414 wb_data[0] = 0x1000200; 2415 wb_data[1] = 0; 2416 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS, 2417 wb_data, 2); 2418 udelay(30); 2419 2420 /* Set RX MTU */ 2421 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 2422 wb_data[1] = 0; 2423 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2); 2424 udelay(30); 2425 2426 /* Set TX MTU */ 2427 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 2428 wb_data[1] = 0; 2429 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2); 2430 udelay(30); 2431 /* Set cnt max size */ 2432 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2; 2433 wb_data[1] = 0; 2434 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2); 2435 udelay(30); 2436 bnx2x_update_pfc_bmac2(params, vars, is_lb); 2437 2438 return 0; 2439 } 2440 2441 static int bnx2x_bmac_enable(struct link_params *params, 2442 struct link_vars *vars, 2443 u8 is_lb, u8 reset_bmac) 2444 { 2445 int rc = 0; 2446 u8 port = params->port; 2447 struct bnx2x *bp = params->bp; 2448 u32 val; 2449 /* Reset and unreset the BigMac */ 2450 if (reset_bmac) { 2451 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 2452 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 2453 usleep_range(1000, 2000); 2454 } 2455 2456 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 2457 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 2458 2459 /* Enable access for bmac registers */ 2460 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1); 2461 2462 /* Enable BMAC according to BMAC type*/ 2463 if (CHIP_IS_E2(bp)) 2464 rc = bnx2x_bmac2_enable(params, vars, is_lb); 2465 else 2466 rc = bnx2x_bmac1_enable(params, vars, is_lb); 2467 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1); 2468 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0); 2469 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0); 2470 val = 0; 2471 if ((params->feature_config_flags & 2472 FEATURE_CONFIG_PFC_ENABLED) || 2473 (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) 2474 val = 1; 2475 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val); 2476 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0); 2477 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x0); 2478 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0); 2479 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x1); 2480 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1); 2481 2482 vars->mac_type = MAC_TYPE_BMAC; 2483 return rc; 2484 } 2485 2486 static void bnx2x_set_bmac_rx(struct bnx2x *bp, u32 chip_id, u8 port, u8 en) 2487 { 2488 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : 2489 NIG_REG_INGRESS_BMAC0_MEM; 2490 u32 wb_data[2]; 2491 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); 2492 2493 if (CHIP_IS_E2(bp)) 2494 bmac_addr += BIGMAC2_REGISTER_BMAC_CONTROL; 2495 else 2496 bmac_addr += BIGMAC_REGISTER_BMAC_CONTROL; 2497 /* Only if the bmac is out of reset */ 2498 if (REG_RD(bp, MISC_REG_RESET_REG_2) & 2499 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) && 2500 nig_bmac_enable) { 2501 /* Clear Rx Enable bit in BMAC_CONTROL register */ 2502 REG_RD_DMAE(bp, bmac_addr, wb_data, 2); 2503 if (en) 2504 wb_data[0] |= BMAC_CONTROL_RX_ENABLE; 2505 else 2506 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; 2507 REG_WR_DMAE(bp, bmac_addr, wb_data, 2); 2508 usleep_range(1000, 2000); 2509 } 2510 } 2511 2512 static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, 2513 u32 line_speed) 2514 { 2515 struct bnx2x *bp = params->bp; 2516 u8 port = params->port; 2517 u32 init_crd, crd; 2518 u32 count = 1000; 2519 2520 /* Disable port */ 2521 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1); 2522 2523 /* Wait for init credit */ 2524 init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4); 2525 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); 2526 DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd); 2527 2528 while ((init_crd != crd) && count) { 2529 usleep_range(5000, 10000); 2530 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); 2531 count--; 2532 } 2533 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); 2534 if (init_crd != crd) { 2535 DP(NETIF_MSG_LINK, "BUG! init_crd 0x%x != crd 0x%x\n", 2536 init_crd, crd); 2537 return -EINVAL; 2538 } 2539 2540 if (flow_ctrl & BNX2X_FLOW_CTRL_RX || 2541 line_speed == SPEED_10 || 2542 line_speed == SPEED_100 || 2543 line_speed == SPEED_1000 || 2544 line_speed == SPEED_2500) { 2545 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1); 2546 /* Update threshold */ 2547 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); 2548 /* Update init credit */ 2549 init_crd = 778; /* (800-18-4) */ 2550 2551 } else { 2552 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + 2553 ETH_OVREHEAD)/16; 2554 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 2555 /* Update threshold */ 2556 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh); 2557 /* Update init credit */ 2558 switch (line_speed) { 2559 case SPEED_10000: 2560 init_crd = thresh + 553 - 22; 2561 break; 2562 default: 2563 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", 2564 line_speed); 2565 return -EINVAL; 2566 } 2567 } 2568 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd); 2569 DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n", 2570 line_speed, init_crd); 2571 2572 /* Probe the credit changes */ 2573 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1); 2574 usleep_range(5000, 10000); 2575 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0); 2576 2577 /* Enable port */ 2578 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0); 2579 return 0; 2580 } 2581 2582 /** 2583 * bnx2x_get_emac_base - retrive emac base address 2584 * 2585 * @bp: driver handle 2586 * @mdc_mdio_access: access type 2587 * @port: port id 2588 * 2589 * This function selects the MDC/MDIO access (through emac0 or 2590 * emac1) depend on the mdc_mdio_access, port, port swapped. Each 2591 * phy has a default access mode, which could also be overridden 2592 * by nvram configuration. This parameter, whether this is the 2593 * default phy configuration, or the nvram overrun 2594 * configuration, is passed here as mdc_mdio_access and selects 2595 * the emac_base for the CL45 read/writes operations 2596 */ 2597 static u32 bnx2x_get_emac_base(struct bnx2x *bp, 2598 u32 mdc_mdio_access, u8 port) 2599 { 2600 u32 emac_base = 0; 2601 switch (mdc_mdio_access) { 2602 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE: 2603 break; 2604 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0: 2605 if (REG_RD(bp, NIG_REG_PORT_SWAP)) 2606 emac_base = GRCBASE_EMAC1; 2607 else 2608 emac_base = GRCBASE_EMAC0; 2609 break; 2610 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1: 2611 if (REG_RD(bp, NIG_REG_PORT_SWAP)) 2612 emac_base = GRCBASE_EMAC0; 2613 else 2614 emac_base = GRCBASE_EMAC1; 2615 break; 2616 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH: 2617 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 2618 break; 2619 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED: 2620 emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1; 2621 break; 2622 default: 2623 break; 2624 } 2625 return emac_base; 2626 2627 } 2628 2629 /******************************************************************/ 2630 /* CL22 access functions */ 2631 /******************************************************************/ 2632 static int bnx2x_cl22_write(struct bnx2x *bp, 2633 struct bnx2x_phy *phy, 2634 u16 reg, u16 val) 2635 { 2636 u32 tmp, mode; 2637 u8 i; 2638 int rc = 0; 2639 /* Switch to CL22 */ 2640 mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); 2641 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, 2642 mode & ~EMAC_MDIO_MODE_CLAUSE_45); 2643 2644 /* Address */ 2645 tmp = ((phy->addr << 21) | (reg << 16) | val | 2646 EMAC_MDIO_COMM_COMMAND_WRITE_22 | 2647 EMAC_MDIO_COMM_START_BUSY); 2648 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); 2649 2650 for (i = 0; i < 50; i++) { 2651 udelay(10); 2652 2653 tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); 2654 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { 2655 udelay(5); 2656 break; 2657 } 2658 } 2659 if (tmp & EMAC_MDIO_COMM_START_BUSY) { 2660 DP(NETIF_MSG_LINK, "write phy register failed\n"); 2661 rc = -EFAULT; 2662 } 2663 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode); 2664 return rc; 2665 } 2666 2667 static int bnx2x_cl22_read(struct bnx2x *bp, 2668 struct bnx2x_phy *phy, 2669 u16 reg, u16 *ret_val) 2670 { 2671 u32 val, mode; 2672 u16 i; 2673 int rc = 0; 2674 2675 /* Switch to CL22 */ 2676 mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); 2677 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, 2678 mode & ~EMAC_MDIO_MODE_CLAUSE_45); 2679 2680 /* Address */ 2681 val = ((phy->addr << 21) | (reg << 16) | 2682 EMAC_MDIO_COMM_COMMAND_READ_22 | 2683 EMAC_MDIO_COMM_START_BUSY); 2684 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); 2685 2686 for (i = 0; i < 50; i++) { 2687 udelay(10); 2688 2689 val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); 2690 if (!(val & EMAC_MDIO_COMM_START_BUSY)) { 2691 *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA); 2692 udelay(5); 2693 break; 2694 } 2695 } 2696 if (val & EMAC_MDIO_COMM_START_BUSY) { 2697 DP(NETIF_MSG_LINK, "read phy register failed\n"); 2698 2699 *ret_val = 0; 2700 rc = -EFAULT; 2701 } 2702 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode); 2703 return rc; 2704 } 2705 2706 /******************************************************************/ 2707 /* CL45 access functions */ 2708 /******************************************************************/ 2709 static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, 2710 u8 devad, u16 reg, u16 *ret_val) 2711 { 2712 u32 val; 2713 u16 i; 2714 int rc = 0; 2715 u32 chip_id; 2716 if (phy->flags & FLAGS_MDC_MDIO_WA_G) { 2717 chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) | 2718 ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12); 2719 bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl); 2720 } 2721 2722 if (phy->flags & FLAGS_MDC_MDIO_WA_B0) 2723 bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, 2724 EMAC_MDIO_STATUS_10MB); 2725 /* Address */ 2726 val = ((phy->addr << 21) | (devad << 16) | reg | 2727 EMAC_MDIO_COMM_COMMAND_ADDRESS | 2728 EMAC_MDIO_COMM_START_BUSY); 2729 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); 2730 2731 for (i = 0; i < 50; i++) { 2732 udelay(10); 2733 2734 val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); 2735 if (!(val & EMAC_MDIO_COMM_START_BUSY)) { 2736 udelay(5); 2737 break; 2738 } 2739 } 2740 if (val & EMAC_MDIO_COMM_START_BUSY) { 2741 DP(NETIF_MSG_LINK, "read phy register failed\n"); 2742 netdev_err(bp->dev, "MDC/MDIO access timeout\n"); 2743 *ret_val = 0; 2744 rc = -EFAULT; 2745 } else { 2746 /* Data */ 2747 val = ((phy->addr << 21) | (devad << 16) | 2748 EMAC_MDIO_COMM_COMMAND_READ_45 | 2749 EMAC_MDIO_COMM_START_BUSY); 2750 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); 2751 2752 for (i = 0; i < 50; i++) { 2753 udelay(10); 2754 2755 val = REG_RD(bp, phy->mdio_ctrl + 2756 EMAC_REG_EMAC_MDIO_COMM); 2757 if (!(val & EMAC_MDIO_COMM_START_BUSY)) { 2758 *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA); 2759 break; 2760 } 2761 } 2762 if (val & EMAC_MDIO_COMM_START_BUSY) { 2763 DP(NETIF_MSG_LINK, "read phy register failed\n"); 2764 netdev_err(bp->dev, "MDC/MDIO access timeout\n"); 2765 *ret_val = 0; 2766 rc = -EFAULT; 2767 } 2768 } 2769 /* Work around for E3 A0 */ 2770 if (phy->flags & FLAGS_MDC_MDIO_WA) { 2771 phy->flags ^= FLAGS_DUMMY_READ; 2772 if (phy->flags & FLAGS_DUMMY_READ) { 2773 u16 temp_val; 2774 bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val); 2775 } 2776 } 2777 2778 if (phy->flags & FLAGS_MDC_MDIO_WA_B0) 2779 bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, 2780 EMAC_MDIO_STATUS_10MB); 2781 return rc; 2782 } 2783 2784 static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, 2785 u8 devad, u16 reg, u16 val) 2786 { 2787 u32 tmp; 2788 u8 i; 2789 int rc = 0; 2790 u32 chip_id; 2791 if (phy->flags & FLAGS_MDC_MDIO_WA_G) { 2792 chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) | 2793 ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12); 2794 bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl); 2795 } 2796 2797 if (phy->flags & FLAGS_MDC_MDIO_WA_B0) 2798 bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, 2799 EMAC_MDIO_STATUS_10MB); 2800 2801 /* Address */ 2802 tmp = ((phy->addr << 21) | (devad << 16) | reg | 2803 EMAC_MDIO_COMM_COMMAND_ADDRESS | 2804 EMAC_MDIO_COMM_START_BUSY); 2805 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); 2806 2807 for (i = 0; i < 50; i++) { 2808 udelay(10); 2809 2810 tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); 2811 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { 2812 udelay(5); 2813 break; 2814 } 2815 } 2816 if (tmp & EMAC_MDIO_COMM_START_BUSY) { 2817 DP(NETIF_MSG_LINK, "write phy register failed\n"); 2818 netdev_err(bp->dev, "MDC/MDIO access timeout\n"); 2819 rc = -EFAULT; 2820 } else { 2821 /* Data */ 2822 tmp = ((phy->addr << 21) | (devad << 16) | val | 2823 EMAC_MDIO_COMM_COMMAND_WRITE_45 | 2824 EMAC_MDIO_COMM_START_BUSY); 2825 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); 2826 2827 for (i = 0; i < 50; i++) { 2828 udelay(10); 2829 2830 tmp = REG_RD(bp, phy->mdio_ctrl + 2831 EMAC_REG_EMAC_MDIO_COMM); 2832 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { 2833 udelay(5); 2834 break; 2835 } 2836 } 2837 if (tmp & EMAC_MDIO_COMM_START_BUSY) { 2838 DP(NETIF_MSG_LINK, "write phy register failed\n"); 2839 netdev_err(bp->dev, "MDC/MDIO access timeout\n"); 2840 rc = -EFAULT; 2841 } 2842 } 2843 /* Work around for E3 A0 */ 2844 if (phy->flags & FLAGS_MDC_MDIO_WA) { 2845 phy->flags ^= FLAGS_DUMMY_READ; 2846 if (phy->flags & FLAGS_DUMMY_READ) { 2847 u16 temp_val; 2848 bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val); 2849 } 2850 } 2851 if (phy->flags & FLAGS_MDC_MDIO_WA_B0) 2852 bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, 2853 EMAC_MDIO_STATUS_10MB); 2854 return rc; 2855 } 2856 2857 /******************************************************************/ 2858 /* EEE section */ 2859 /******************************************************************/ 2860 static u8 bnx2x_eee_has_cap(struct link_params *params) 2861 { 2862 struct bnx2x *bp = params->bp; 2863 2864 if (REG_RD(bp, params->shmem2_base) <= 2865 offsetof(struct shmem2_region, eee_status[params->port])) 2866 return 0; 2867 2868 return 1; 2869 } 2870 2871 static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer) 2872 { 2873 switch (nvram_mode) { 2874 case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED: 2875 *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME; 2876 break; 2877 case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE: 2878 *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME; 2879 break; 2880 case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY: 2881 *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME; 2882 break; 2883 default: 2884 *idle_timer = 0; 2885 break; 2886 } 2887 2888 return 0; 2889 } 2890 2891 static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode) 2892 { 2893 switch (idle_timer) { 2894 case EEE_MODE_NVRAM_BALANCED_TIME: 2895 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED; 2896 break; 2897 case EEE_MODE_NVRAM_AGGRESSIVE_TIME: 2898 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE; 2899 break; 2900 case EEE_MODE_NVRAM_LATENCY_TIME: 2901 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY; 2902 break; 2903 default: 2904 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED; 2905 break; 2906 } 2907 2908 return 0; 2909 } 2910 2911 static u32 bnx2x_eee_calc_timer(struct link_params *params) 2912 { 2913 u32 eee_mode, eee_idle; 2914 struct bnx2x *bp = params->bp; 2915 2916 if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) { 2917 if (params->eee_mode & EEE_MODE_OUTPUT_TIME) { 2918 /* time value in eee_mode --> used directly*/ 2919 eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK; 2920 } else { 2921 /* hsi value in eee_mode --> time */ 2922 if (bnx2x_eee_nvram_to_time(params->eee_mode & 2923 EEE_MODE_NVRAM_MASK, 2924 &eee_idle)) 2925 return 0; 2926 } 2927 } else { 2928 /* hsi values in nvram --> time*/ 2929 eee_mode = ((REG_RD(bp, params->shmem_base + 2930 offsetof(struct shmem_region, dev_info. 2931 port_feature_config[params->port]. 2932 eee_power_mode)) & 2933 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> 2934 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); 2935 2936 if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle)) 2937 return 0; 2938 } 2939 2940 return eee_idle; 2941 } 2942 2943 static int bnx2x_eee_set_timers(struct link_params *params, 2944 struct link_vars *vars) 2945 { 2946 u32 eee_idle = 0, eee_mode; 2947 struct bnx2x *bp = params->bp; 2948 2949 eee_idle = bnx2x_eee_calc_timer(params); 2950 2951 if (eee_idle) { 2952 REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2), 2953 eee_idle); 2954 } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) && 2955 (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) && 2956 (params->eee_mode & EEE_MODE_OUTPUT_TIME)) { 2957 DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n"); 2958 return -EINVAL; 2959 } 2960 2961 vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT); 2962 if (params->eee_mode & EEE_MODE_OUTPUT_TIME) { 2963 /* eee_idle in 1u --> eee_status in 16u */ 2964 eee_idle >>= 4; 2965 vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) | 2966 SHMEM_EEE_TIME_OUTPUT_BIT; 2967 } else { 2968 if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode)) 2969 return -EINVAL; 2970 vars->eee_status |= eee_mode; 2971 } 2972 2973 return 0; 2974 } 2975 2976 static int bnx2x_eee_initial_config(struct link_params *params, 2977 struct link_vars *vars, u8 mode) 2978 { 2979 vars->eee_status |= ((u32) mode) << SHMEM_EEE_SUPPORTED_SHIFT; 2980 2981 /* Propogate params' bits --> vars (for migration exposure) */ 2982 if (params->eee_mode & EEE_MODE_ENABLE_LPI) 2983 vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT; 2984 else 2985 vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT; 2986 2987 if (params->eee_mode & EEE_MODE_ADV_LPI) 2988 vars->eee_status |= SHMEM_EEE_REQUESTED_BIT; 2989 else 2990 vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT; 2991 2992 return bnx2x_eee_set_timers(params, vars); 2993 } 2994 2995 static int bnx2x_eee_disable(struct bnx2x_phy *phy, 2996 struct link_params *params, 2997 struct link_vars *vars) 2998 { 2999 struct bnx2x *bp = params->bp; 3000 3001 /* Make Certain LPI is disabled */ 3002 REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0); 3003 3004 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x0); 3005 3006 vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK; 3007 3008 return 0; 3009 } 3010 3011 static int bnx2x_eee_advertise(struct bnx2x_phy *phy, 3012 struct link_params *params, 3013 struct link_vars *vars, u8 modes) 3014 { 3015 struct bnx2x *bp = params->bp; 3016 u16 val = 0; 3017 3018 /* Mask events preventing LPI generation */ 3019 REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20); 3020 3021 if (modes & SHMEM_EEE_10G_ADV) { 3022 DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n"); 3023 val |= 0x8; 3024 } 3025 if (modes & SHMEM_EEE_1G_ADV) { 3026 DP(NETIF_MSG_LINK, "Advertise 1GBase-T EEE\n"); 3027 val |= 0x4; 3028 } 3029 3030 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, val); 3031 3032 vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK; 3033 vars->eee_status |= (modes << SHMEM_EEE_ADV_STATUS_SHIFT); 3034 3035 return 0; 3036 } 3037 3038 static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status) 3039 { 3040 struct bnx2x *bp = params->bp; 3041 3042 if (bnx2x_eee_has_cap(params)) 3043 REG_WR(bp, params->shmem2_base + 3044 offsetof(struct shmem2_region, 3045 eee_status[params->port]), eee_status); 3046 } 3047 3048 static void bnx2x_eee_an_resolve(struct bnx2x_phy *phy, 3049 struct link_params *params, 3050 struct link_vars *vars) 3051 { 3052 struct bnx2x *bp = params->bp; 3053 u16 adv = 0, lp = 0; 3054 u32 lp_adv = 0; 3055 u8 neg = 0; 3056 3057 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, &adv); 3058 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LP_EEE_ADV, &lp); 3059 3060 if (lp & 0x2) { 3061 lp_adv |= SHMEM_EEE_100M_ADV; 3062 if (adv & 0x2) { 3063 if (vars->line_speed == SPEED_100) 3064 neg = 1; 3065 DP(NETIF_MSG_LINK, "EEE negotiated - 100M\n"); 3066 } 3067 } 3068 if (lp & 0x14) { 3069 lp_adv |= SHMEM_EEE_1G_ADV; 3070 if (adv & 0x14) { 3071 if (vars->line_speed == SPEED_1000) 3072 neg = 1; 3073 DP(NETIF_MSG_LINK, "EEE negotiated - 1G\n"); 3074 } 3075 } 3076 if (lp & 0x68) { 3077 lp_adv |= SHMEM_EEE_10G_ADV; 3078 if (adv & 0x68) { 3079 if (vars->line_speed == SPEED_10000) 3080 neg = 1; 3081 DP(NETIF_MSG_LINK, "EEE negotiated - 10G\n"); 3082 } 3083 } 3084 3085 vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK; 3086 vars->eee_status |= (lp_adv << SHMEM_EEE_LP_ADV_STATUS_SHIFT); 3087 3088 if (neg) { 3089 DP(NETIF_MSG_LINK, "EEE is active\n"); 3090 vars->eee_status |= SHMEM_EEE_ACTIVE_BIT; 3091 } 3092 3093 } 3094 3095 /******************************************************************/ 3096 /* BSC access functions from E3 */ 3097 /******************************************************************/ 3098 static void bnx2x_bsc_module_sel(struct link_params *params) 3099 { 3100 int idx; 3101 u32 board_cfg, sfp_ctrl; 3102 u32 i2c_pins[I2C_SWITCH_WIDTH], i2c_val[I2C_SWITCH_WIDTH]; 3103 struct bnx2x *bp = params->bp; 3104 u8 port = params->port; 3105 /* Read I2C output PINs */ 3106 board_cfg = REG_RD(bp, params->shmem_base + 3107 offsetof(struct shmem_region, 3108 dev_info.shared_hw_config.board)); 3109 i2c_pins[I2C_BSC0] = board_cfg & SHARED_HW_CFG_E3_I2C_MUX0_MASK; 3110 i2c_pins[I2C_BSC1] = (board_cfg & SHARED_HW_CFG_E3_I2C_MUX1_MASK) >> 3111 SHARED_HW_CFG_E3_I2C_MUX1_SHIFT; 3112 3113 /* Read I2C output value */ 3114 sfp_ctrl = REG_RD(bp, params->shmem_base + 3115 offsetof(struct shmem_region, 3116 dev_info.port_hw_config[port].e3_cmn_pin_cfg)); 3117 i2c_val[I2C_BSC0] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX0_MASK) > 0; 3118 i2c_val[I2C_BSC1] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX1_MASK) > 0; 3119 DP(NETIF_MSG_LINK, "Setting BSC switch\n"); 3120 for (idx = 0; idx < I2C_SWITCH_WIDTH; idx++) 3121 bnx2x_set_cfg_pin(bp, i2c_pins[idx], i2c_val[idx]); 3122 } 3123 3124 static int bnx2x_bsc_read(struct link_params *params, 3125 struct bnx2x *bp, 3126 u8 sl_devid, 3127 u16 sl_addr, 3128 u8 lc_addr, 3129 u8 xfer_cnt, 3130 u32 *data_array) 3131 { 3132 u32 val, i; 3133 int rc = 0; 3134 3135 if (xfer_cnt > 16) { 3136 DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n", 3137 xfer_cnt); 3138 return -EINVAL; 3139 } 3140 bnx2x_bsc_module_sel(params); 3141 3142 xfer_cnt = 16 - lc_addr; 3143 3144 /* Enable the engine */ 3145 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); 3146 val |= MCPR_IMC_COMMAND_ENABLE; 3147 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); 3148 3149 /* Program slave device ID */ 3150 val = (sl_devid << 16) | sl_addr; 3151 REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val); 3152 3153 /* Start xfer with 0 byte to update the address pointer ???*/ 3154 val = (MCPR_IMC_COMMAND_ENABLE) | 3155 (MCPR_IMC_COMMAND_WRITE_OP << 3156 MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | 3157 (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0); 3158 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); 3159 3160 /* Poll for completion */ 3161 i = 0; 3162 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); 3163 while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { 3164 udelay(10); 3165 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); 3166 if (i++ > 1000) { 3167 DP(NETIF_MSG_LINK, "wr 0 byte timed out after %d try\n", 3168 i); 3169 rc = -EFAULT; 3170 break; 3171 } 3172 } 3173 if (rc == -EFAULT) 3174 return rc; 3175 3176 /* Start xfer with read op */ 3177 val = (MCPR_IMC_COMMAND_ENABLE) | 3178 (MCPR_IMC_COMMAND_READ_OP << 3179 MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | 3180 (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | 3181 (xfer_cnt); 3182 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); 3183 3184 /* Poll for completion */ 3185 i = 0; 3186 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); 3187 while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { 3188 udelay(10); 3189 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); 3190 if (i++ > 1000) { 3191 DP(NETIF_MSG_LINK, "rd op timed out after %d try\n", i); 3192 rc = -EFAULT; 3193 break; 3194 } 3195 } 3196 if (rc == -EFAULT) 3197 return rc; 3198 3199 for (i = (lc_addr >> 2); i < 4; i++) { 3200 data_array[i] = REG_RD(bp, (MCP_REG_MCPR_IMC_DATAREG0 + i*4)); 3201 #ifdef __BIG_ENDIAN 3202 data_array[i] = ((data_array[i] & 0x000000ff) << 24) | 3203 ((data_array[i] & 0x0000ff00) << 8) | 3204 ((data_array[i] & 0x00ff0000) >> 8) | 3205 ((data_array[i] & 0xff000000) >> 24); 3206 #endif 3207 } 3208 return rc; 3209 } 3210 3211 static void bnx2x_cl45_read_or_write(struct bnx2x *bp, struct bnx2x_phy *phy, 3212 u8 devad, u16 reg, u16 or_val) 3213 { 3214 u16 val; 3215 bnx2x_cl45_read(bp, phy, devad, reg, &val); 3216 bnx2x_cl45_write(bp, phy, devad, reg, val | or_val); 3217 } 3218 3219 static void bnx2x_cl45_read_and_write(struct bnx2x *bp, 3220 struct bnx2x_phy *phy, 3221 u8 devad, u16 reg, u16 and_val) 3222 { 3223 u16 val; 3224 bnx2x_cl45_read(bp, phy, devad, reg, &val); 3225 bnx2x_cl45_write(bp, phy, devad, reg, val & and_val); 3226 } 3227 3228 int bnx2x_phy_read(struct link_params *params, u8 phy_addr, 3229 u8 devad, u16 reg, u16 *ret_val) 3230 { 3231 u8 phy_index; 3232 /* Probe for the phy according to the given phy_addr, and execute 3233 * the read request on it 3234 */ 3235 for (phy_index = 0; phy_index < params->num_phys; phy_index++) { 3236 if (params->phy[phy_index].addr == phy_addr) { 3237 return bnx2x_cl45_read(params->bp, 3238 ¶ms->phy[phy_index], devad, 3239 reg, ret_val); 3240 } 3241 } 3242 return -EINVAL; 3243 } 3244 3245 int bnx2x_phy_write(struct link_params *params, u8 phy_addr, 3246 u8 devad, u16 reg, u16 val) 3247 { 3248 u8 phy_index; 3249 /* Probe for the phy according to the given phy_addr, and execute 3250 * the write request on it 3251 */ 3252 for (phy_index = 0; phy_index < params->num_phys; phy_index++) { 3253 if (params->phy[phy_index].addr == phy_addr) { 3254 return bnx2x_cl45_write(params->bp, 3255 ¶ms->phy[phy_index], devad, 3256 reg, val); 3257 } 3258 } 3259 return -EINVAL; 3260 } 3261 static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy, 3262 struct link_params *params) 3263 { 3264 u8 lane = 0; 3265 struct bnx2x *bp = params->bp; 3266 u32 path_swap, path_swap_ovr; 3267 u8 path, port; 3268 3269 path = BP_PATH(bp); 3270 port = params->port; 3271 3272 if (bnx2x_is_4_port_mode(bp)) { 3273 u32 port_swap, port_swap_ovr; 3274 3275 /* Figure out path swap value */ 3276 path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR); 3277 if (path_swap_ovr & 0x1) 3278 path_swap = (path_swap_ovr & 0x2); 3279 else 3280 path_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP); 3281 3282 if (path_swap) 3283 path = path ^ 1; 3284 3285 /* Figure out port swap value */ 3286 port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR); 3287 if (port_swap_ovr & 0x1) 3288 port_swap = (port_swap_ovr & 0x2); 3289 else 3290 port_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP); 3291 3292 if (port_swap) 3293 port = port ^ 1; 3294 3295 lane = (port<<1) + path; 3296 } else { /* Two port mode - no port swap */ 3297 3298 /* Figure out path swap value */ 3299 path_swap_ovr = 3300 REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR); 3301 if (path_swap_ovr & 0x1) { 3302 path_swap = (path_swap_ovr & 0x2); 3303 } else { 3304 path_swap = 3305 REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP); 3306 } 3307 if (path_swap) 3308 path = path ^ 1; 3309 3310 lane = path << 1 ; 3311 } 3312 return lane; 3313 } 3314 3315 static void bnx2x_set_aer_mmd(struct link_params *params, 3316 struct bnx2x_phy *phy) 3317 { 3318 u32 ser_lane; 3319 u16 offset, aer_val; 3320 struct bnx2x *bp = params->bp; 3321 ser_lane = ((params->lane_config & 3322 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 3323 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 3324 3325 offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ? 3326 (phy->addr + ser_lane) : 0; 3327 3328 if (USES_WARPCORE(bp)) { 3329 aer_val = bnx2x_get_warpcore_lane(phy, params); 3330 /* In Dual-lane mode, two lanes are joined together, 3331 * so in order to configure them, the AER broadcast method is 3332 * used here. 3333 * 0x200 is the broadcast address for lanes 0,1 3334 * 0x201 is the broadcast address for lanes 2,3 3335 */ 3336 if (phy->flags & FLAGS_WC_DUAL_MODE) 3337 aer_val = (aer_val >> 1) | 0x200; 3338 } else if (CHIP_IS_E2(bp)) 3339 aer_val = 0x3800 + offset - 1; 3340 else 3341 aer_val = 0x3800 + offset; 3342 3343 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 3344 MDIO_AER_BLOCK_AER_REG, aer_val); 3345 3346 } 3347 3348 /******************************************************************/ 3349 /* Internal phy section */ 3350 /******************************************************************/ 3351 3352 static void bnx2x_set_serdes_access(struct bnx2x *bp, u8 port) 3353 { 3354 u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 3355 3356 /* Set Clause 22 */ 3357 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 1); 3358 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000); 3359 udelay(500); 3360 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f); 3361 udelay(500); 3362 /* Set Clause 45 */ 3363 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 0); 3364 } 3365 3366 static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port) 3367 { 3368 u32 val; 3369 3370 DP(NETIF_MSG_LINK, "bnx2x_serdes_deassert\n"); 3371 3372 val = SERDES_RESET_BITS << (port*16); 3373 3374 /* Reset and unreset the SerDes/XGXS */ 3375 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); 3376 udelay(500); 3377 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); 3378 3379 bnx2x_set_serdes_access(bp, port); 3380 3381 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10, 3382 DEFAULT_PHY_DEV_ADDR); 3383 } 3384 3385 static void bnx2x_xgxs_specific_func(struct bnx2x_phy *phy, 3386 struct link_params *params, 3387 u32 action) 3388 { 3389 struct bnx2x *bp = params->bp; 3390 switch (action) { 3391 case PHY_INIT: 3392 /* Set correct devad */ 3393 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + params->port*0x18, 0); 3394 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18, 3395 phy->def_md_devad); 3396 break; 3397 } 3398 } 3399 3400 static void bnx2x_xgxs_deassert(struct link_params *params) 3401 { 3402 struct bnx2x *bp = params->bp; 3403 u8 port; 3404 u32 val; 3405 DP(NETIF_MSG_LINK, "bnx2x_xgxs_deassert\n"); 3406 port = params->port; 3407 3408 val = XGXS_RESET_BITS << (port*16); 3409 3410 /* Reset and unreset the SerDes/XGXS */ 3411 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); 3412 udelay(500); 3413 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); 3414 bnx2x_xgxs_specific_func(¶ms->phy[INT_PHY], params, 3415 PHY_INIT); 3416 } 3417 3418 static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, 3419 struct link_params *params, u16 *ieee_fc) 3420 { 3421 struct bnx2x *bp = params->bp; 3422 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; 3423 /* Resolve pause mode and advertisement Please refer to Table 3424 * 28B-3 of the 802.3ab-1999 spec 3425 */ 3426 3427 switch (phy->req_flow_ctrl) { 3428 case BNX2X_FLOW_CTRL_AUTO: 3429 switch (params->req_fc_auto_adv) { 3430 case BNX2X_FLOW_CTRL_BOTH: 3431 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 3432 break; 3433 case BNX2X_FLOW_CTRL_RX: 3434 case BNX2X_FLOW_CTRL_TX: 3435 *ieee_fc |= 3436 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; 3437 break; 3438 default: 3439 break; 3440 } 3441 break; 3442 case BNX2X_FLOW_CTRL_TX: 3443 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; 3444 break; 3445 3446 case BNX2X_FLOW_CTRL_RX: 3447 case BNX2X_FLOW_CTRL_BOTH: 3448 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 3449 break; 3450 3451 case BNX2X_FLOW_CTRL_NONE: 3452 default: 3453 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; 3454 break; 3455 } 3456 DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc); 3457 } 3458 3459 static void set_phy_vars(struct link_params *params, 3460 struct link_vars *vars) 3461 { 3462 struct bnx2x *bp = params->bp; 3463 u8 actual_phy_idx, phy_index, link_cfg_idx; 3464 u8 phy_config_swapped = params->multi_phy_config & 3465 PORT_HW_CFG_PHY_SWAPPED_ENABLED; 3466 for (phy_index = INT_PHY; phy_index < params->num_phys; 3467 phy_index++) { 3468 link_cfg_idx = LINK_CONFIG_IDX(phy_index); 3469 actual_phy_idx = phy_index; 3470 if (phy_config_swapped) { 3471 if (phy_index == EXT_PHY1) 3472 actual_phy_idx = EXT_PHY2; 3473 else if (phy_index == EXT_PHY2) 3474 actual_phy_idx = EXT_PHY1; 3475 } 3476 params->phy[actual_phy_idx].req_flow_ctrl = 3477 params->req_flow_ctrl[link_cfg_idx]; 3478 3479 params->phy[actual_phy_idx].req_line_speed = 3480 params->req_line_speed[link_cfg_idx]; 3481 3482 params->phy[actual_phy_idx].speed_cap_mask = 3483 params->speed_cap_mask[link_cfg_idx]; 3484 3485 params->phy[actual_phy_idx].req_duplex = 3486 params->req_duplex[link_cfg_idx]; 3487 3488 if (params->req_line_speed[link_cfg_idx] == 3489 SPEED_AUTO_NEG) 3490 vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED; 3491 3492 DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x," 3493 " speed_cap_mask %x\n", 3494 params->phy[actual_phy_idx].req_flow_ctrl, 3495 params->phy[actual_phy_idx].req_line_speed, 3496 params->phy[actual_phy_idx].speed_cap_mask); 3497 } 3498 } 3499 3500 static void bnx2x_ext_phy_set_pause(struct link_params *params, 3501 struct bnx2x_phy *phy, 3502 struct link_vars *vars) 3503 { 3504 u16 val; 3505 struct bnx2x *bp = params->bp; 3506 /* Read modify write pause advertizing */ 3507 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val); 3508 3509 val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH; 3510 3511 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ 3512 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); 3513 if ((vars->ieee_fc & 3514 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == 3515 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { 3516 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; 3517 } 3518 if ((vars->ieee_fc & 3519 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == 3520 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { 3521 val |= MDIO_AN_REG_ADV_PAUSE_PAUSE; 3522 } 3523 DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val); 3524 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val); 3525 } 3526 3527 static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) 3528 { /* LD LP */ 3529 switch (pause_result) { /* ASYM P ASYM P */ 3530 case 0xb: /* 1 0 1 1 */ 3531 vars->flow_ctrl = BNX2X_FLOW_CTRL_TX; 3532 break; 3533 3534 case 0xe: /* 1 1 1 0 */ 3535 vars->flow_ctrl = BNX2X_FLOW_CTRL_RX; 3536 break; 3537 3538 case 0x5: /* 0 1 0 1 */ 3539 case 0x7: /* 0 1 1 1 */ 3540 case 0xd: /* 1 1 0 1 */ 3541 case 0xf: /* 1 1 1 1 */ 3542 vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH; 3543 break; 3544 3545 default: 3546 break; 3547 } 3548 if (pause_result & (1<<0)) 3549 vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE; 3550 if (pause_result & (1<<1)) 3551 vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE; 3552 3553 } 3554 3555 static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy, 3556 struct link_params *params, 3557 struct link_vars *vars) 3558 { 3559 u16 ld_pause; /* local */ 3560 u16 lp_pause; /* link partner */ 3561 u16 pause_result; 3562 struct bnx2x *bp = params->bp; 3563 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) { 3564 bnx2x_cl22_read(bp, phy, 0x4, &ld_pause); 3565 bnx2x_cl22_read(bp, phy, 0x5, &lp_pause); 3566 } else if (CHIP_IS_E3(bp) && 3567 SINGLE_MEDIA_DIRECT(params)) { 3568 u8 lane = bnx2x_get_warpcore_lane(phy, params); 3569 u16 gp_status, gp_mask; 3570 bnx2x_cl45_read(bp, phy, 3571 MDIO_AN_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_4, 3572 &gp_status); 3573 gp_mask = (MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL | 3574 MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP) << 3575 lane; 3576 if ((gp_status & gp_mask) == gp_mask) { 3577 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, 3578 MDIO_AN_REG_ADV_PAUSE, &ld_pause); 3579 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, 3580 MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); 3581 } else { 3582 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, 3583 MDIO_AN_REG_CL37_FC_LD, &ld_pause); 3584 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, 3585 MDIO_AN_REG_CL37_FC_LP, &lp_pause); 3586 ld_pause = ((ld_pause & 3587 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) 3588 << 3); 3589 lp_pause = ((lp_pause & 3590 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) 3591 << 3); 3592 } 3593 } else { 3594 bnx2x_cl45_read(bp, phy, 3595 MDIO_AN_DEVAD, 3596 MDIO_AN_REG_ADV_PAUSE, &ld_pause); 3597 bnx2x_cl45_read(bp, phy, 3598 MDIO_AN_DEVAD, 3599 MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); 3600 } 3601 pause_result = (ld_pause & 3602 MDIO_AN_REG_ADV_PAUSE_MASK) >> 8; 3603 pause_result |= (lp_pause & 3604 MDIO_AN_REG_ADV_PAUSE_MASK) >> 10; 3605 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n", pause_result); 3606 bnx2x_pause_resolve(vars, pause_result); 3607 3608 } 3609 3610 static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy, 3611 struct link_params *params, 3612 struct link_vars *vars) 3613 { 3614 u8 ret = 0; 3615 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 3616 if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) { 3617 /* Update the advertised flow-controled of LD/LP in AN */ 3618 if (phy->req_line_speed == SPEED_AUTO_NEG) 3619 bnx2x_ext_phy_update_adv_fc(phy, params, vars); 3620 /* But set the flow-control result as the requested one */ 3621 vars->flow_ctrl = phy->req_flow_ctrl; 3622 } else if (phy->req_line_speed != SPEED_AUTO_NEG) 3623 vars->flow_ctrl = params->req_fc_auto_adv; 3624 else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { 3625 ret = 1; 3626 bnx2x_ext_phy_update_adv_fc(phy, params, vars); 3627 } 3628 return ret; 3629 } 3630 /******************************************************************/ 3631 /* Warpcore section */ 3632 /******************************************************************/ 3633 /* The init_internal_warpcore should mirror the xgxs, 3634 * i.e. reset the lane (if needed), set aer for the 3635 * init configuration, and set/clear SGMII flag. Internal 3636 * phy init is done purely in phy_init stage. 3637 */ 3638 #define WC_TX_DRIVER(post2, idriver, ipre) \ 3639 ((post2 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | \ 3640 (idriver << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | \ 3641 (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)) 3642 3643 #define WC_TX_FIR(post, main, pre) \ 3644 ((post << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | \ 3645 (main << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | \ 3646 (pre << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET)) 3647 3648 static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy, 3649 struct link_params *params, 3650 struct link_vars *vars) 3651 { 3652 struct bnx2x *bp = params->bp; 3653 u16 i; 3654 static struct bnx2x_reg_set reg_set[] = { 3655 /* Step 1 - Program the TX/RX alignment markers */ 3656 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0xa157}, 3657 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xcbe2}, 3658 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0x7537}, 3659 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0xa157}, 3660 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xcbe2}, 3661 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0x7537}, 3662 /* Step 2 - Configure the NP registers */ 3663 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000a}, 3664 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6400}, 3665 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0620}, 3666 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0157}, 3667 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x6464}, 3668 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x3150}, 3669 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x3150}, 3670 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0157}, 3671 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0620} 3672 }; 3673 DP(NETIF_MSG_LINK, "Enabling 20G-KR2\n"); 3674 3675 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3676 MDIO_WC_REG_CL49_USERB0_CTRL, (3<<6)); 3677 3678 for (i = 0; i < ARRAY_SIZE(reg_set); i++) 3679 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, 3680 reg_set[i].val); 3681 3682 /* Start KR2 work-around timer which handles BCM8073 link-parner */ 3683 vars->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE; 3684 bnx2x_update_link_attr(params, vars->link_attr_sync); 3685 } 3686 3687 static void bnx2x_disable_kr2(struct link_params *params, 3688 struct link_vars *vars, 3689 struct bnx2x_phy *phy) 3690 { 3691 struct bnx2x *bp = params->bp; 3692 int i; 3693 static struct bnx2x_reg_set reg_set[] = { 3694 /* Step 1 - Program the TX/RX alignment markers */ 3695 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690}, 3696 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647}, 3697 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0}, 3698 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690}, 3699 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647}, 3700 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0}, 3701 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c}, 3702 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000}, 3703 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000}, 3704 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002}, 3705 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000}, 3706 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7}, 3707 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7}, 3708 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002}, 3709 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000} 3710 }; 3711 DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n"); 3712 3713 for (i = 0; i < ARRAY_SIZE(reg_set); i++) 3714 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, 3715 reg_set[i].val); 3716 vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; 3717 bnx2x_update_link_attr(params, vars->link_attr_sync); 3718 3719 vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT; 3720 } 3721 3722 static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy, 3723 struct link_params *params) 3724 { 3725 struct bnx2x *bp = params->bp; 3726 3727 DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n"); 3728 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3729 MDIO_WC_REG_EEE_COMBO_CONTROL0, 0x7c); 3730 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3731 MDIO_WC_REG_DIGITAL4_MISC5, 0xc000); 3732 } 3733 3734 static void bnx2x_warpcore_restart_AN_KR(struct bnx2x_phy *phy, 3735 struct link_params *params) 3736 { 3737 /* Restart autoneg on the leading lane only */ 3738 struct bnx2x *bp = params->bp; 3739 u16 lane = bnx2x_get_warpcore_lane(phy, params); 3740 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 3741 MDIO_AER_BLOCK_AER_REG, lane); 3742 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3743 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200); 3744 3745 /* Restore AER */ 3746 bnx2x_set_aer_mmd(params, phy); 3747 } 3748 3749 static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, 3750 struct link_params *params, 3751 struct link_vars *vars) { 3752 u16 lane, i, cl72_ctrl, an_adv = 0; 3753 struct bnx2x *bp = params->bp; 3754 static struct bnx2x_reg_set reg_set[] = { 3755 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, 3756 {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0}, 3757 {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415}, 3758 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190}, 3759 /* Disable Autoneg: re-enable it after adv is done. */ 3760 {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0}, 3761 {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2}, 3762 {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0}, 3763 }; 3764 DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n"); 3765 /* Set to default registers that may be overriden by 10G force */ 3766 for (i = 0; i < ARRAY_SIZE(reg_set); i++) 3767 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, 3768 reg_set[i].val); 3769 3770 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3771 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl); 3772 cl72_ctrl &= 0x08ff; 3773 cl72_ctrl |= 0x3800; 3774 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3775 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl); 3776 3777 /* Check adding advertisement for 1G KX */ 3778 if (((vars->line_speed == SPEED_AUTO_NEG) && 3779 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || 3780 (vars->line_speed == SPEED_1000)) { 3781 u16 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2; 3782 an_adv |= (1<<5); 3783 3784 /* Enable CL37 1G Parallel Detect */ 3785 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1); 3786 DP(NETIF_MSG_LINK, "Advertize 1G\n"); 3787 } 3788 if (((vars->line_speed == SPEED_AUTO_NEG) && 3789 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || 3790 (vars->line_speed == SPEED_10000)) { 3791 /* Check adding advertisement for 10G KR */ 3792 an_adv |= (1<<7); 3793 /* Enable 10G Parallel Detect */ 3794 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 3795 MDIO_AER_BLOCK_AER_REG, 0); 3796 3797 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3798 MDIO_WC_REG_PAR_DET_10G_CTRL, 1); 3799 bnx2x_set_aer_mmd(params, phy); 3800 DP(NETIF_MSG_LINK, "Advertize 10G\n"); 3801 } 3802 3803 /* Set Transmit PMD settings */ 3804 lane = bnx2x_get_warpcore_lane(phy, params); 3805 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3806 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 3807 WC_TX_DRIVER(0x02, 0x06, 0x09)); 3808 /* Configure the next lane if dual mode */ 3809 if (phy->flags & FLAGS_WC_DUAL_MODE) 3810 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3811 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1), 3812 WC_TX_DRIVER(0x02, 0x06, 0x09)); 3813 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3814 MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL, 3815 0x03f0); 3816 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3817 MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL, 3818 0x03f0); 3819 3820 /* Advertised speeds */ 3821 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3822 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, an_adv); 3823 3824 /* Advertised and set FEC (Forward Error Correction) */ 3825 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3826 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2, 3827 (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY | 3828 MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ)); 3829 3830 /* Enable CL37 BAM */ 3831 if (REG_RD(bp, params->shmem_base + 3832 offsetof(struct shmem_region, dev_info. 3833 port_hw_config[params->port].default_cfg)) & 3834 PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) { 3835 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3836 MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, 3837 1); 3838 DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n"); 3839 } 3840 3841 /* Advertise pause */ 3842 bnx2x_ext_phy_set_pause(params, phy, vars); 3843 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; 3844 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3845 MDIO_WC_REG_DIGITAL5_MISC7, 0x100); 3846 3847 /* Over 1G - AN local device user page 1 */ 3848 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3849 MDIO_WC_REG_DIGITAL3_UP1, 0x1f); 3850 3851 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 3852 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) || 3853 (phy->req_line_speed == SPEED_20000)) { 3854 3855 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 3856 MDIO_AER_BLOCK_AER_REG, lane); 3857 3858 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3859 MDIO_WC_REG_RX1_PCI_CTRL + (0x10*lane), 3860 (1<<11)); 3861 3862 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3863 MDIO_WC_REG_XGXS_X2_CONTROL3, 0x7); 3864 bnx2x_set_aer_mmd(params, phy); 3865 3866 bnx2x_warpcore_enable_AN_KR2(phy, params, vars); 3867 } else { 3868 bnx2x_disable_kr2(params, vars, phy); 3869 } 3870 3871 /* Enable Autoneg: only on the main lane */ 3872 bnx2x_warpcore_restart_AN_KR(phy, params); 3873 } 3874 3875 static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy, 3876 struct link_params *params, 3877 struct link_vars *vars) 3878 { 3879 struct bnx2x *bp = params->bp; 3880 u16 val16, i, lane; 3881 static struct bnx2x_reg_set reg_set[] = { 3882 /* Disable Autoneg */ 3883 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, 3884 {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 3885 0x3f00}, 3886 {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0}, 3887 {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0}, 3888 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1}, 3889 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa}, 3890 /* Leave cl72 training enable, needed for KR */ 3891 {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2} 3892 }; 3893 3894 for (i = 0; i < ARRAY_SIZE(reg_set); i++) 3895 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, 3896 reg_set[i].val); 3897 3898 lane = bnx2x_get_warpcore_lane(phy, params); 3899 /* Global registers */ 3900 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 3901 MDIO_AER_BLOCK_AER_REG, 0); 3902 /* Disable CL36 PCS Tx */ 3903 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3904 MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16); 3905 val16 &= ~(0x0011 << lane); 3906 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3907 MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16); 3908 3909 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3910 MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16); 3911 val16 |= (0x0303 << (lane << 1)); 3912 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3913 MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16); 3914 /* Restore AER */ 3915 bnx2x_set_aer_mmd(params, phy); 3916 /* Set speed via PMA/PMD register */ 3917 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 3918 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040); 3919 3920 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 3921 MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB); 3922 3923 /* Enable encoded forced speed */ 3924 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3925 MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30); 3926 3927 /* Turn TX scramble payload only the 64/66 scrambler */ 3928 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3929 MDIO_WC_REG_TX66_CONTROL, 0x9); 3930 3931 /* Turn RX scramble payload only the 64/66 scrambler */ 3932 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3933 MDIO_WC_REG_RX66_CONTROL, 0xF9); 3934 3935 /* Set and clear loopback to cause a reset to 64/66 decoder */ 3936 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3937 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000); 3938 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3939 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0); 3940 3941 } 3942 3943 static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy, 3944 struct link_params *params, 3945 u8 is_xfi) 3946 { 3947 struct bnx2x *bp = params->bp; 3948 u16 misc1_val, tap_val, tx_driver_val, lane, val; 3949 u32 cfg_tap_val, tx_drv_brdct, tx_equal; 3950 3951 /* Hold rxSeqStart */ 3952 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3953 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000); 3954 3955 /* Hold tx_fifo_reset */ 3956 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3957 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x1); 3958 3959 /* Disable CL73 AN */ 3960 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); 3961 3962 /* Disable 100FX Enable and Auto-Detect */ 3963 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, 3964 MDIO_WC_REG_FX100_CTRL1, 0xFFFA); 3965 3966 /* Disable 100FX Idle detect */ 3967 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3968 MDIO_WC_REG_FX100_CTRL3, 0x0080); 3969 3970 /* Set Block address to Remote PHY & Clear forced_speed[5] */ 3971 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, 3972 MDIO_WC_REG_DIGITAL4_MISC3, 0xFF7F); 3973 3974 /* Turn off auto-detect & fiber mode */ 3975 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, 3976 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 3977 0xFFEE); 3978 3979 /* Set filter_force_link, disable_false_link and parallel_detect */ 3980 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3981 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &val); 3982 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3983 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 3984 ((val | 0x0006) & 0xFFFE)); 3985 3986 /* Set XFI / SFI */ 3987 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3988 MDIO_WC_REG_SERDESDIGITAL_MISC1, &misc1_val); 3989 3990 misc1_val &= ~(0x1f); 3991 3992 if (is_xfi) { 3993 misc1_val |= 0x5; 3994 tap_val = WC_TX_FIR(0x08, 0x37, 0x00); 3995 tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03); 3996 } else { 3997 cfg_tap_val = REG_RD(bp, params->shmem_base + 3998 offsetof(struct shmem_region, dev_info. 3999 port_hw_config[params->port]. 4000 sfi_tap_values)); 4001 4002 tx_equal = cfg_tap_val & PORT_HW_CFG_TX_EQUALIZATION_MASK; 4003 4004 tx_drv_brdct = (cfg_tap_val & 4005 PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >> 4006 PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT; 4007 4008 misc1_val |= 0x9; 4009 4010 /* TAP values are controlled by nvram, if value there isn't 0 */ 4011 if (tx_equal) 4012 tap_val = (u16)tx_equal; 4013 else 4014 tap_val = WC_TX_FIR(0x0f, 0x2b, 0x02); 4015 4016 if (tx_drv_brdct) 4017 tx_driver_val = WC_TX_DRIVER(0x03, (u16)tx_drv_brdct, 4018 0x06); 4019 else 4020 tx_driver_val = WC_TX_DRIVER(0x03, 0x02, 0x06); 4021 } 4022 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4023 MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val); 4024 4025 /* Set Transmit PMD settings */ 4026 lane = bnx2x_get_warpcore_lane(phy, params); 4027 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4028 MDIO_WC_REG_TX_FIR_TAP, 4029 tap_val | MDIO_WC_REG_TX_FIR_TAP_ENABLE); 4030 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4031 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 4032 tx_driver_val); 4033 4034 /* Enable fiber mode, enable and invert sig_det */ 4035 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 4036 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0xd); 4037 4038 /* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */ 4039 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 4040 MDIO_WC_REG_DIGITAL4_MISC3, 0x8080); 4041 4042 bnx2x_warpcore_set_lpi_passthrough(phy, params); 4043 4044 /* 10G XFI Full Duplex */ 4045 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4046 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100); 4047 4048 /* Release tx_fifo_reset */ 4049 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, 4050 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 4051 0xFFFE); 4052 /* Release rxSeqStart */ 4053 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, 4054 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x7FFF); 4055 } 4056 4057 static void bnx2x_warpcore_set_20G_force_KR2(struct bnx2x_phy *phy, 4058 struct link_params *params) 4059 { 4060 u16 val; 4061 struct bnx2x *bp = params->bp; 4062 /* Set global registers, so set AER lane to 0 */ 4063 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 4064 MDIO_AER_BLOCK_AER_REG, 0); 4065 4066 /* Disable sequencer */ 4067 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, 4068 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, ~(1<<13)); 4069 4070 bnx2x_set_aer_mmd(params, phy); 4071 4072 bnx2x_cl45_read_and_write(bp, phy, MDIO_PMA_DEVAD, 4073 MDIO_WC_REG_PMD_KR_CONTROL, ~(1<<1)); 4074 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 4075 MDIO_AN_REG_CTRL, 0); 4076 /* Turn off CL73 */ 4077 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4078 MDIO_WC_REG_CL73_USERB0_CTRL, &val); 4079 val &= ~(1<<5); 4080 val |= (1<<6); 4081 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4082 MDIO_WC_REG_CL73_USERB0_CTRL, val); 4083 4084 /* Set 20G KR2 force speed */ 4085 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 4086 MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x1f); 4087 4088 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 4089 MDIO_WC_REG_DIGITAL4_MISC3, (1<<7)); 4090 4091 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4092 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &val); 4093 val &= ~(3<<14); 4094 val |= (1<<15); 4095 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4096 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, val); 4097 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4098 MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0x835A); 4099 4100 /* Enable sequencer (over lane 0) */ 4101 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 4102 MDIO_AER_BLOCK_AER_REG, 0); 4103 4104 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 4105 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, (1<<13)); 4106 4107 bnx2x_set_aer_mmd(params, phy); 4108 } 4109 4110 static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp, 4111 struct bnx2x_phy *phy, 4112 u16 lane) 4113 { 4114 /* Rx0 anaRxControl1G */ 4115 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4116 MDIO_WC_REG_RX0_ANARXCONTROL1G, 0x90); 4117 4118 /* Rx2 anaRxControl1G */ 4119 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4120 MDIO_WC_REG_RX2_ANARXCONTROL1G, 0x90); 4121 4122 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4123 MDIO_WC_REG_RX66_SCW0, 0xE070); 4124 4125 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4126 MDIO_WC_REG_RX66_SCW1, 0xC0D0); 4127 4128 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4129 MDIO_WC_REG_RX66_SCW2, 0xA0B0); 4130 4131 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4132 MDIO_WC_REG_RX66_SCW3, 0x8090); 4133 4134 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4135 MDIO_WC_REG_RX66_SCW0_MASK, 0xF0F0); 4136 4137 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4138 MDIO_WC_REG_RX66_SCW1_MASK, 0xF0F0); 4139 4140 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4141 MDIO_WC_REG_RX66_SCW2_MASK, 0xF0F0); 4142 4143 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4144 MDIO_WC_REG_RX66_SCW3_MASK, 0xF0F0); 4145 4146 /* Serdes Digital Misc1 */ 4147 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4148 MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6008); 4149 4150 /* Serdes Digital4 Misc3 */ 4151 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4152 MDIO_WC_REG_DIGITAL4_MISC3, 0x8088); 4153 4154 /* Set Transmit PMD settings */ 4155 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4156 MDIO_WC_REG_TX_FIR_TAP, 4157 (WC_TX_FIR(0x12, 0x2d, 0x00) | 4158 MDIO_WC_REG_TX_FIR_TAP_ENABLE)); 4159 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4160 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 4161 WC_TX_DRIVER(0x02, 0x02, 0x02)); 4162 } 4163 4164 static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy, 4165 struct link_params *params, 4166 u8 fiber_mode, 4167 u8 always_autoneg) 4168 { 4169 struct bnx2x *bp = params->bp; 4170 u16 val16, digctrl_kx1, digctrl_kx2; 4171 4172 /* Clear XFI clock comp in non-10G single lane mode. */ 4173 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, 4174 MDIO_WC_REG_RX66_CONTROL, ~(3<<13)); 4175 4176 bnx2x_warpcore_set_lpi_passthrough(phy, params); 4177 4178 if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) { 4179 /* SGMII Autoneg */ 4180 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 4181 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 4182 0x1000); 4183 DP(NETIF_MSG_LINK, "set SGMII AUTONEG\n"); 4184 } else { 4185 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4186 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); 4187 val16 &= 0xcebf; 4188 switch (phy->req_line_speed) { 4189 case SPEED_10: 4190 break; 4191 case SPEED_100: 4192 val16 |= 0x2000; 4193 break; 4194 case SPEED_1000: 4195 val16 |= 0x0040; 4196 break; 4197 default: 4198 DP(NETIF_MSG_LINK, 4199 "Speed not supported: 0x%x\n", phy->req_line_speed); 4200 return; 4201 } 4202 4203 if (phy->req_duplex == DUPLEX_FULL) 4204 val16 |= 0x0100; 4205 4206 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4207 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16); 4208 4209 DP(NETIF_MSG_LINK, "set SGMII force speed %d\n", 4210 phy->req_line_speed); 4211 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4212 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); 4213 DP(NETIF_MSG_LINK, " (readback) %x\n", val16); 4214 } 4215 4216 /* SGMII Slave mode and disable signal detect */ 4217 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4218 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &digctrl_kx1); 4219 if (fiber_mode) 4220 digctrl_kx1 = 1; 4221 else 4222 digctrl_kx1 &= 0xff4a; 4223 4224 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4225 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 4226 digctrl_kx1); 4227 4228 /* Turn off parallel detect */ 4229 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4230 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &digctrl_kx2); 4231 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4232 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 4233 (digctrl_kx2 & ~(1<<2))); 4234 4235 /* Re-enable parallel detect */ 4236 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4237 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 4238 (digctrl_kx2 | (1<<2))); 4239 4240 /* Enable autodet */ 4241 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4242 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 4243 (digctrl_kx1 | 0x10)); 4244 } 4245 4246 static void bnx2x_warpcore_reset_lane(struct bnx2x *bp, 4247 struct bnx2x_phy *phy, 4248 u8 reset) 4249 { 4250 u16 val; 4251 /* Take lane out of reset after configuration is finished */ 4252 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4253 MDIO_WC_REG_DIGITAL5_MISC6, &val); 4254 if (reset) 4255 val |= 0xC000; 4256 else 4257 val &= 0x3FFF; 4258 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4259 MDIO_WC_REG_DIGITAL5_MISC6, val); 4260 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4261 MDIO_WC_REG_DIGITAL5_MISC6, &val); 4262 } 4263 /* Clear SFI/XFI link settings registers */ 4264 static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy, 4265 struct link_params *params, 4266 u16 lane) 4267 { 4268 struct bnx2x *bp = params->bp; 4269 u16 i; 4270 static struct bnx2x_reg_set wc_regs[] = { 4271 {MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0}, 4272 {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL1, 0x014a}, 4273 {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL3, 0x0800}, 4274 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL4_MISC3, 0x8008}, 4275 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 4276 0x0195}, 4277 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 4278 0x0007}, 4279 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 4280 0x0002}, 4281 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000}, 4282 {MDIO_WC_DEVAD, MDIO_WC_REG_TX_FIR_TAP, 0x0000}, 4283 {MDIO_WC_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040}, 4284 {MDIO_WC_DEVAD, MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140} 4285 }; 4286 /* Set XFI clock comp as default. */ 4287 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 4288 MDIO_WC_REG_RX66_CONTROL, (3<<13)); 4289 4290 for (i = 0; i < ARRAY_SIZE(wc_regs); i++) 4291 bnx2x_cl45_write(bp, phy, wc_regs[i].devad, wc_regs[i].reg, 4292 wc_regs[i].val); 4293 4294 lane = bnx2x_get_warpcore_lane(phy, params); 4295 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4296 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990); 4297 4298 } 4299 4300 static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp, 4301 u32 chip_id, 4302 u32 shmem_base, u8 port, 4303 u8 *gpio_num, u8 *gpio_port) 4304 { 4305 u32 cfg_pin; 4306 *gpio_num = 0; 4307 *gpio_port = 0; 4308 if (CHIP_IS_E3(bp)) { 4309 cfg_pin = (REG_RD(bp, shmem_base + 4310 offsetof(struct shmem_region, 4311 dev_info.port_hw_config[port].e3_sfp_ctrl)) & 4312 PORT_HW_CFG_E3_MOD_ABS_MASK) >> 4313 PORT_HW_CFG_E3_MOD_ABS_SHIFT; 4314 4315 /* Should not happen. This function called upon interrupt 4316 * triggered by GPIO ( since EPIO can only generate interrupts 4317 * to MCP). 4318 * So if this function was called and none of the GPIOs was set, 4319 * it means the shit hit the fan. 4320 */ 4321 if ((cfg_pin < PIN_CFG_GPIO0_P0) || 4322 (cfg_pin > PIN_CFG_GPIO3_P1)) { 4323 DP(NETIF_MSG_LINK, 4324 "No cfg pin %x for module detect indication\n", 4325 cfg_pin); 4326 return -EINVAL; 4327 } 4328 4329 *gpio_num = (cfg_pin - PIN_CFG_GPIO0_P0) & 0x3; 4330 *gpio_port = (cfg_pin - PIN_CFG_GPIO0_P0) >> 2; 4331 } else { 4332 *gpio_num = MISC_REGISTERS_GPIO_3; 4333 *gpio_port = port; 4334 } 4335 4336 return 0; 4337 } 4338 4339 static int bnx2x_is_sfp_module_plugged(struct bnx2x_phy *phy, 4340 struct link_params *params) 4341 { 4342 struct bnx2x *bp = params->bp; 4343 u8 gpio_num, gpio_port; 4344 u32 gpio_val; 4345 if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, 4346 params->shmem_base, params->port, 4347 &gpio_num, &gpio_port) != 0) 4348 return 0; 4349 gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port); 4350 4351 /* Call the handling function in case module is detected */ 4352 if (gpio_val == 0) 4353 return 1; 4354 else 4355 return 0; 4356 } 4357 static int bnx2x_warpcore_get_sigdet(struct bnx2x_phy *phy, 4358 struct link_params *params) 4359 { 4360 u16 gp2_status_reg0, lane; 4361 struct bnx2x *bp = params->bp; 4362 4363 lane = bnx2x_get_warpcore_lane(phy, params); 4364 4365 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_0, 4366 &gp2_status_reg0); 4367 4368 return (gp2_status_reg0 >> (8+lane)) & 0x1; 4369 } 4370 4371 static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy, 4372 struct link_params *params, 4373 struct link_vars *vars) 4374 { 4375 struct bnx2x *bp = params->bp; 4376 u32 serdes_net_if; 4377 u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0; 4378 4379 vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1; 4380 4381 if (!vars->turn_to_run_wc_rt) 4382 return; 4383 4384 if (vars->rx_tx_asic_rst) { 4385 u16 lane = bnx2x_get_warpcore_lane(phy, params); 4386 serdes_net_if = (REG_RD(bp, params->shmem_base + 4387 offsetof(struct shmem_region, dev_info. 4388 port_hw_config[params->port].default_cfg)) & 4389 PORT_HW_CFG_NET_SERDES_IF_MASK); 4390 4391 switch (serdes_net_if) { 4392 case PORT_HW_CFG_NET_SERDES_IF_KR: 4393 /* Do we get link yet? */ 4394 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 0x81d1, 4395 &gp_status1); 4396 lnkup = (gp_status1 >> (8+lane)) & 0x1;/* 1G */ 4397 /*10G KR*/ 4398 lnkup_kr = (gp_status1 >> (12+lane)) & 0x1; 4399 4400 if (lnkup_kr || lnkup) { 4401 vars->rx_tx_asic_rst = 0; 4402 } else { 4403 /* Reset the lane to see if link comes up.*/ 4404 bnx2x_warpcore_reset_lane(bp, phy, 1); 4405 bnx2x_warpcore_reset_lane(bp, phy, 0); 4406 4407 /* Restart Autoneg */ 4408 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 4409 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200); 4410 4411 vars->rx_tx_asic_rst--; 4412 DP(NETIF_MSG_LINK, "0x%x retry left\n", 4413 vars->rx_tx_asic_rst); 4414 } 4415 break; 4416 4417 default: 4418 break; 4419 } 4420 4421 } /*params->rx_tx_asic_rst*/ 4422 4423 } 4424 static void bnx2x_warpcore_config_sfi(struct bnx2x_phy *phy, 4425 struct link_params *params) 4426 { 4427 u16 lane = bnx2x_get_warpcore_lane(phy, params); 4428 struct bnx2x *bp = params->bp; 4429 bnx2x_warpcore_clear_regs(phy, params, lane); 4430 if ((params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)] == 4431 SPEED_10000) && 4432 (phy->media_type != ETH_PHY_SFP_1G_FIBER)) { 4433 DP(NETIF_MSG_LINK, "Setting 10G SFI\n"); 4434 bnx2x_warpcore_set_10G_XFI(phy, params, 0); 4435 } else { 4436 DP(NETIF_MSG_LINK, "Setting 1G Fiber\n"); 4437 bnx2x_warpcore_set_sgmii_speed(phy, params, 1, 0); 4438 } 4439 } 4440 4441 static void bnx2x_sfp_e3_set_transmitter(struct link_params *params, 4442 struct bnx2x_phy *phy, 4443 u8 tx_en) 4444 { 4445 struct bnx2x *bp = params->bp; 4446 u32 cfg_pin; 4447 u8 port = params->port; 4448 4449 cfg_pin = REG_RD(bp, params->shmem_base + 4450 offsetof(struct shmem_region, 4451 dev_info.port_hw_config[port].e3_sfp_ctrl)) & 4452 PORT_HW_CFG_E3_TX_LASER_MASK; 4453 /* Set the !tx_en since this pin is DISABLE_TX_LASER */ 4454 DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en); 4455 4456 /* For 20G, the expected pin to be used is 3 pins after the current */ 4457 bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1); 4458 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G) 4459 bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1); 4460 } 4461 4462 static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, 4463 struct link_params *params, 4464 struct link_vars *vars) 4465 { 4466 struct bnx2x *bp = params->bp; 4467 u32 serdes_net_if; 4468 u8 fiber_mode; 4469 u16 lane = bnx2x_get_warpcore_lane(phy, params); 4470 serdes_net_if = (REG_RD(bp, params->shmem_base + 4471 offsetof(struct shmem_region, dev_info. 4472 port_hw_config[params->port].default_cfg)) & 4473 PORT_HW_CFG_NET_SERDES_IF_MASK); 4474 DP(NETIF_MSG_LINK, "Begin Warpcore init, link_speed %d, " 4475 "serdes_net_if = 0x%x\n", 4476 vars->line_speed, serdes_net_if); 4477 bnx2x_set_aer_mmd(params, phy); 4478 bnx2x_warpcore_reset_lane(bp, phy, 1); 4479 vars->phy_flags |= PHY_XGXS_FLAG; 4480 if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) || 4481 (phy->req_line_speed && 4482 ((phy->req_line_speed == SPEED_100) || 4483 (phy->req_line_speed == SPEED_10)))) { 4484 vars->phy_flags |= PHY_SGMII_FLAG; 4485 DP(NETIF_MSG_LINK, "Setting SGMII mode\n"); 4486 bnx2x_warpcore_clear_regs(phy, params, lane); 4487 bnx2x_warpcore_set_sgmii_speed(phy, params, 0, 1); 4488 } else { 4489 switch (serdes_net_if) { 4490 case PORT_HW_CFG_NET_SERDES_IF_KR: 4491 /* Enable KR Auto Neg */ 4492 if (params->loopback_mode != LOOPBACK_EXT) 4493 bnx2x_warpcore_enable_AN_KR(phy, params, vars); 4494 else { 4495 DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n"); 4496 bnx2x_warpcore_set_10G_KR(phy, params, vars); 4497 } 4498 break; 4499 4500 case PORT_HW_CFG_NET_SERDES_IF_XFI: 4501 bnx2x_warpcore_clear_regs(phy, params, lane); 4502 if (vars->line_speed == SPEED_10000) { 4503 DP(NETIF_MSG_LINK, "Setting 10G XFI\n"); 4504 bnx2x_warpcore_set_10G_XFI(phy, params, 1); 4505 } else { 4506 if (SINGLE_MEDIA_DIRECT(params)) { 4507 DP(NETIF_MSG_LINK, "1G Fiber\n"); 4508 fiber_mode = 1; 4509 } else { 4510 DP(NETIF_MSG_LINK, "10/100/1G SGMII\n"); 4511 fiber_mode = 0; 4512 } 4513 bnx2x_warpcore_set_sgmii_speed(phy, 4514 params, 4515 fiber_mode, 4516 0); 4517 } 4518 4519 break; 4520 4521 case PORT_HW_CFG_NET_SERDES_IF_SFI: 4522 /* Issue Module detection if module is plugged, or 4523 * enabled transmitter to avoid current leakage in case 4524 * no module is connected 4525 */ 4526 if ((params->loopback_mode == LOOPBACK_NONE) || 4527 (params->loopback_mode == LOOPBACK_EXT)) { 4528 if (bnx2x_is_sfp_module_plugged(phy, params)) 4529 bnx2x_sfp_module_detection(phy, params); 4530 else 4531 bnx2x_sfp_e3_set_transmitter(params, 4532 phy, 1); 4533 } 4534 4535 bnx2x_warpcore_config_sfi(phy, params); 4536 break; 4537 4538 case PORT_HW_CFG_NET_SERDES_IF_DXGXS: 4539 if (vars->line_speed != SPEED_20000) { 4540 DP(NETIF_MSG_LINK, "Speed not supported yet\n"); 4541 return; 4542 } 4543 DP(NETIF_MSG_LINK, "Setting 20G DXGXS\n"); 4544 bnx2x_warpcore_set_20G_DXGXS(bp, phy, lane); 4545 /* Issue Module detection */ 4546 4547 bnx2x_sfp_module_detection(phy, params); 4548 break; 4549 case PORT_HW_CFG_NET_SERDES_IF_KR2: 4550 if (!params->loopback_mode) { 4551 bnx2x_warpcore_enable_AN_KR(phy, params, vars); 4552 } else { 4553 DP(NETIF_MSG_LINK, "Setting KR 20G-Force\n"); 4554 bnx2x_warpcore_set_20G_force_KR2(phy, params); 4555 } 4556 break; 4557 default: 4558 DP(NETIF_MSG_LINK, 4559 "Unsupported Serdes Net Interface 0x%x\n", 4560 serdes_net_if); 4561 return; 4562 } 4563 } 4564 4565 /* Take lane out of reset after configuration is finished */ 4566 bnx2x_warpcore_reset_lane(bp, phy, 0); 4567 DP(NETIF_MSG_LINK, "Exit config init\n"); 4568 } 4569 4570 static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy, 4571 struct link_params *params) 4572 { 4573 struct bnx2x *bp = params->bp; 4574 u16 val16, lane; 4575 bnx2x_sfp_e3_set_transmitter(params, phy, 0); 4576 bnx2x_set_mdio_emac_per_phy(bp, params); 4577 bnx2x_set_aer_mmd(params, phy); 4578 /* Global register */ 4579 bnx2x_warpcore_reset_lane(bp, phy, 1); 4580 4581 /* Clear loopback settings (if any) */ 4582 /* 10G & 20G */ 4583 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, 4584 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0xBFFF); 4585 4586 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, 4587 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0xfffe); 4588 4589 /* Update those 1-copy registers */ 4590 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 4591 MDIO_AER_BLOCK_AER_REG, 0); 4592 /* Enable 1G MDIO (1-copy) */ 4593 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, 4594 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, 4595 ~0x10); 4596 4597 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, 4598 MDIO_WC_REG_XGXSBLK1_LANECTRL2, 0xff00); 4599 lane = bnx2x_get_warpcore_lane(phy, params); 4600 /* Disable CL36 PCS Tx */ 4601 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4602 MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16); 4603 val16 |= (0x11 << lane); 4604 if (phy->flags & FLAGS_WC_DUAL_MODE) 4605 val16 |= (0x22 << lane); 4606 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4607 MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16); 4608 4609 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4610 MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16); 4611 val16 &= ~(0x0303 << (lane << 1)); 4612 val16 |= (0x0101 << (lane << 1)); 4613 if (phy->flags & FLAGS_WC_DUAL_MODE) { 4614 val16 &= ~(0x0c0c << (lane << 1)); 4615 val16 |= (0x0404 << (lane << 1)); 4616 } 4617 4618 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4619 MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16); 4620 /* Restore AER */ 4621 bnx2x_set_aer_mmd(params, phy); 4622 4623 } 4624 4625 static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy, 4626 struct link_params *params) 4627 { 4628 struct bnx2x *bp = params->bp; 4629 u16 val16; 4630 u32 lane; 4631 DP(NETIF_MSG_LINK, "Setting Warpcore loopback type %x, speed %d\n", 4632 params->loopback_mode, phy->req_line_speed); 4633 4634 if (phy->req_line_speed < SPEED_10000 || 4635 phy->supported & SUPPORTED_20000baseKR2_Full) { 4636 /* 10/100/1000/20G-KR2 */ 4637 4638 /* Update those 1-copy registers */ 4639 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 4640 MDIO_AER_BLOCK_AER_REG, 0); 4641 /* Enable 1G MDIO (1-copy) */ 4642 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 4643 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, 4644 0x10); 4645 /* Set 1G loopback based on lane (1-copy) */ 4646 lane = bnx2x_get_warpcore_lane(phy, params); 4647 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4648 MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16); 4649 val16 |= (1<<lane); 4650 if (phy->flags & FLAGS_WC_DUAL_MODE) 4651 val16 |= (2<<lane); 4652 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4653 MDIO_WC_REG_XGXSBLK1_LANECTRL2, 4654 val16); 4655 4656 /* Switch back to 4-copy registers */ 4657 bnx2x_set_aer_mmd(params, phy); 4658 } else { 4659 /* 10G / 20G-DXGXS */ 4660 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 4661 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 4662 0x4000); 4663 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 4664 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1); 4665 } 4666 } 4667 4668 4669 4670 static void bnx2x_sync_link(struct link_params *params, 4671 struct link_vars *vars) 4672 { 4673 struct bnx2x *bp = params->bp; 4674 u8 link_10g_plus; 4675 if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) 4676 vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG; 4677 vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); 4678 if (vars->link_up) { 4679 DP(NETIF_MSG_LINK, "phy link up\n"); 4680 4681 vars->phy_link_up = 1; 4682 vars->duplex = DUPLEX_FULL; 4683 switch (vars->link_status & 4684 LINK_STATUS_SPEED_AND_DUPLEX_MASK) { 4685 case LINK_10THD: 4686 vars->duplex = DUPLEX_HALF; 4687 /* Fall thru */ 4688 case LINK_10TFD: 4689 vars->line_speed = SPEED_10; 4690 break; 4691 4692 case LINK_100TXHD: 4693 vars->duplex = DUPLEX_HALF; 4694 /* Fall thru */ 4695 case LINK_100T4: 4696 case LINK_100TXFD: 4697 vars->line_speed = SPEED_100; 4698 break; 4699 4700 case LINK_1000THD: 4701 vars->duplex = DUPLEX_HALF; 4702 /* Fall thru */ 4703 case LINK_1000TFD: 4704 vars->line_speed = SPEED_1000; 4705 break; 4706 4707 case LINK_2500THD: 4708 vars->duplex = DUPLEX_HALF; 4709 /* Fall thru */ 4710 case LINK_2500TFD: 4711 vars->line_speed = SPEED_2500; 4712 break; 4713 4714 case LINK_10GTFD: 4715 vars->line_speed = SPEED_10000; 4716 break; 4717 case LINK_20GTFD: 4718 vars->line_speed = SPEED_20000; 4719 break; 4720 default: 4721 break; 4722 } 4723 vars->flow_ctrl = 0; 4724 if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED) 4725 vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX; 4726 4727 if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED) 4728 vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX; 4729 4730 if (!vars->flow_ctrl) 4731 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 4732 4733 if (vars->line_speed && 4734 ((vars->line_speed == SPEED_10) || 4735 (vars->line_speed == SPEED_100))) { 4736 vars->phy_flags |= PHY_SGMII_FLAG; 4737 } else { 4738 vars->phy_flags &= ~PHY_SGMII_FLAG; 4739 } 4740 if (vars->line_speed && 4741 USES_WARPCORE(bp) && 4742 (vars->line_speed == SPEED_1000)) 4743 vars->phy_flags |= PHY_SGMII_FLAG; 4744 /* Anything 10 and over uses the bmac */ 4745 link_10g_plus = (vars->line_speed >= SPEED_10000); 4746 4747 if (link_10g_plus) { 4748 if (USES_WARPCORE(bp)) 4749 vars->mac_type = MAC_TYPE_XMAC; 4750 else 4751 vars->mac_type = MAC_TYPE_BMAC; 4752 } else { 4753 if (USES_WARPCORE(bp)) 4754 vars->mac_type = MAC_TYPE_UMAC; 4755 else 4756 vars->mac_type = MAC_TYPE_EMAC; 4757 } 4758 } else { /* Link down */ 4759 DP(NETIF_MSG_LINK, "phy link down\n"); 4760 4761 vars->phy_link_up = 0; 4762 4763 vars->line_speed = 0; 4764 vars->duplex = DUPLEX_FULL; 4765 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 4766 4767 /* Indicate no mac active */ 4768 vars->mac_type = MAC_TYPE_NONE; 4769 if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) 4770 vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; 4771 if (vars->link_status & LINK_STATUS_SFP_TX_FAULT) 4772 vars->phy_flags |= PHY_SFP_TX_FAULT_FLAG; 4773 } 4774 } 4775 4776 void bnx2x_link_status_update(struct link_params *params, 4777 struct link_vars *vars) 4778 { 4779 struct bnx2x *bp = params->bp; 4780 u8 port = params->port; 4781 u32 sync_offset, media_types; 4782 /* Update PHY configuration */ 4783 set_phy_vars(params, vars); 4784 4785 vars->link_status = REG_RD(bp, params->shmem_base + 4786 offsetof(struct shmem_region, 4787 port_mb[port].link_status)); 4788 4789 /* Force link UP in non LOOPBACK_EXT loopback mode(s) */ 4790 if (params->loopback_mode != LOOPBACK_NONE && 4791 params->loopback_mode != LOOPBACK_EXT) 4792 vars->link_status |= LINK_STATUS_LINK_UP; 4793 4794 if (bnx2x_eee_has_cap(params)) 4795 vars->eee_status = REG_RD(bp, params->shmem2_base + 4796 offsetof(struct shmem2_region, 4797 eee_status[params->port])); 4798 4799 vars->phy_flags = PHY_XGXS_FLAG; 4800 bnx2x_sync_link(params, vars); 4801 /* Sync media type */ 4802 sync_offset = params->shmem_base + 4803 offsetof(struct shmem_region, 4804 dev_info.port_hw_config[port].media_type); 4805 media_types = REG_RD(bp, sync_offset); 4806 4807 params->phy[INT_PHY].media_type = 4808 (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) >> 4809 PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT; 4810 params->phy[EXT_PHY1].media_type = 4811 (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK) >> 4812 PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT; 4813 params->phy[EXT_PHY2].media_type = 4814 (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK) >> 4815 PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT; 4816 DP(NETIF_MSG_LINK, "media_types = 0x%x\n", media_types); 4817 4818 /* Sync AEU offset */ 4819 sync_offset = params->shmem_base + 4820 offsetof(struct shmem_region, 4821 dev_info.port_hw_config[port].aeu_int_mask); 4822 4823 vars->aeu_int_mask = REG_RD(bp, sync_offset); 4824 4825 /* Sync PFC status */ 4826 if (vars->link_status & LINK_STATUS_PFC_ENABLED) 4827 params->feature_config_flags |= 4828 FEATURE_CONFIG_PFC_ENABLED; 4829 else 4830 params->feature_config_flags &= 4831 ~FEATURE_CONFIG_PFC_ENABLED; 4832 4833 if (SHMEM2_HAS(bp, link_attr_sync)) 4834 vars->link_attr_sync = SHMEM2_RD(bp, 4835 link_attr_sync[params->port]); 4836 4837 DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n", 4838 vars->link_status, vars->phy_link_up, vars->aeu_int_mask); 4839 DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n", 4840 vars->line_speed, vars->duplex, vars->flow_ctrl); 4841 } 4842 4843 static void bnx2x_set_master_ln(struct link_params *params, 4844 struct bnx2x_phy *phy) 4845 { 4846 struct bnx2x *bp = params->bp; 4847 u16 new_master_ln, ser_lane; 4848 ser_lane = ((params->lane_config & 4849 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 4850 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 4851 4852 /* Set the master_ln for AN */ 4853 CL22_RD_OVER_CL45(bp, phy, 4854 MDIO_REG_BANK_XGXS_BLOCK2, 4855 MDIO_XGXS_BLOCK2_TEST_MODE_LANE, 4856 &new_master_ln); 4857 4858 CL22_WR_OVER_CL45(bp, phy, 4859 MDIO_REG_BANK_XGXS_BLOCK2 , 4860 MDIO_XGXS_BLOCK2_TEST_MODE_LANE, 4861 (new_master_ln | ser_lane)); 4862 } 4863 4864 static int bnx2x_reset_unicore(struct link_params *params, 4865 struct bnx2x_phy *phy, 4866 u8 set_serdes) 4867 { 4868 struct bnx2x *bp = params->bp; 4869 u16 mii_control; 4870 u16 i; 4871 CL22_RD_OVER_CL45(bp, phy, 4872 MDIO_REG_BANK_COMBO_IEEE0, 4873 MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control); 4874 4875 /* Reset the unicore */ 4876 CL22_WR_OVER_CL45(bp, phy, 4877 MDIO_REG_BANK_COMBO_IEEE0, 4878 MDIO_COMBO_IEEE0_MII_CONTROL, 4879 (mii_control | 4880 MDIO_COMBO_IEEO_MII_CONTROL_RESET)); 4881 if (set_serdes) 4882 bnx2x_set_serdes_access(bp, params->port); 4883 4884 /* Wait for the reset to self clear */ 4885 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) { 4886 udelay(5); 4887 4888 /* The reset erased the previous bank value */ 4889 CL22_RD_OVER_CL45(bp, phy, 4890 MDIO_REG_BANK_COMBO_IEEE0, 4891 MDIO_COMBO_IEEE0_MII_CONTROL, 4892 &mii_control); 4893 4894 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) { 4895 udelay(5); 4896 return 0; 4897 } 4898 } 4899 4900 netdev_err(bp->dev, "Warning: PHY was not initialized," 4901 " Port %d\n", 4902 params->port); 4903 DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n"); 4904 return -EINVAL; 4905 4906 } 4907 4908 static void bnx2x_set_swap_lanes(struct link_params *params, 4909 struct bnx2x_phy *phy) 4910 { 4911 struct bnx2x *bp = params->bp; 4912 /* Each two bits represents a lane number: 4913 * No swap is 0123 => 0x1b no need to enable the swap 4914 */ 4915 u16 rx_lane_swap, tx_lane_swap; 4916 4917 rx_lane_swap = ((params->lane_config & 4918 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >> 4919 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT); 4920 tx_lane_swap = ((params->lane_config & 4921 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >> 4922 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT); 4923 4924 if (rx_lane_swap != 0x1b) { 4925 CL22_WR_OVER_CL45(bp, phy, 4926 MDIO_REG_BANK_XGXS_BLOCK2, 4927 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 4928 (rx_lane_swap | 4929 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE | 4930 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE)); 4931 } else { 4932 CL22_WR_OVER_CL45(bp, phy, 4933 MDIO_REG_BANK_XGXS_BLOCK2, 4934 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0); 4935 } 4936 4937 if (tx_lane_swap != 0x1b) { 4938 CL22_WR_OVER_CL45(bp, phy, 4939 MDIO_REG_BANK_XGXS_BLOCK2, 4940 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 4941 (tx_lane_swap | 4942 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE)); 4943 } else { 4944 CL22_WR_OVER_CL45(bp, phy, 4945 MDIO_REG_BANK_XGXS_BLOCK2, 4946 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0); 4947 } 4948 } 4949 4950 static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy, 4951 struct link_params *params) 4952 { 4953 struct bnx2x *bp = params->bp; 4954 u16 control2; 4955 CL22_RD_OVER_CL45(bp, phy, 4956 MDIO_REG_BANK_SERDES_DIGITAL, 4957 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, 4958 &control2); 4959 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) 4960 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; 4961 else 4962 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; 4963 DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n", 4964 phy->speed_cap_mask, control2); 4965 CL22_WR_OVER_CL45(bp, phy, 4966 MDIO_REG_BANK_SERDES_DIGITAL, 4967 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, 4968 control2); 4969 4970 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && 4971 (phy->speed_cap_mask & 4972 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { 4973 DP(NETIF_MSG_LINK, "XGXS\n"); 4974 4975 CL22_WR_OVER_CL45(bp, phy, 4976 MDIO_REG_BANK_10G_PARALLEL_DETECT, 4977 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK, 4978 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT); 4979 4980 CL22_RD_OVER_CL45(bp, phy, 4981 MDIO_REG_BANK_10G_PARALLEL_DETECT, 4982 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, 4983 &control2); 4984 4985 4986 control2 |= 4987 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN; 4988 4989 CL22_WR_OVER_CL45(bp, phy, 4990 MDIO_REG_BANK_10G_PARALLEL_DETECT, 4991 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, 4992 control2); 4993 4994 /* Disable parallel detection of HiG */ 4995 CL22_WR_OVER_CL45(bp, phy, 4996 MDIO_REG_BANK_XGXS_BLOCK2, 4997 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G, 4998 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS | 4999 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS); 5000 } 5001 } 5002 5003 static void bnx2x_set_autoneg(struct bnx2x_phy *phy, 5004 struct link_params *params, 5005 struct link_vars *vars, 5006 u8 enable_cl73) 5007 { 5008 struct bnx2x *bp = params->bp; 5009 u16 reg_val; 5010 5011 /* CL37 Autoneg */ 5012 CL22_RD_OVER_CL45(bp, phy, 5013 MDIO_REG_BANK_COMBO_IEEE0, 5014 MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); 5015 5016 /* CL37 Autoneg Enabled */ 5017 if (vars->line_speed == SPEED_AUTO_NEG) 5018 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN; 5019 else /* CL37 Autoneg Disabled */ 5020 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 5021 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN); 5022 5023 CL22_WR_OVER_CL45(bp, phy, 5024 MDIO_REG_BANK_COMBO_IEEE0, 5025 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); 5026 5027 /* Enable/Disable Autodetection */ 5028 5029 CL22_RD_OVER_CL45(bp, phy, 5030 MDIO_REG_BANK_SERDES_DIGITAL, 5031 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, ®_val); 5032 reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN | 5033 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT); 5034 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE; 5035 if (vars->line_speed == SPEED_AUTO_NEG) 5036 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; 5037 else 5038 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; 5039 5040 CL22_WR_OVER_CL45(bp, phy, 5041 MDIO_REG_BANK_SERDES_DIGITAL, 5042 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val); 5043 5044 /* Enable TetonII and BAM autoneg */ 5045 CL22_RD_OVER_CL45(bp, phy, 5046 MDIO_REG_BANK_BAM_NEXT_PAGE, 5047 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, 5048 ®_val); 5049 if (vars->line_speed == SPEED_AUTO_NEG) { 5050 /* Enable BAM aneg Mode and TetonII aneg Mode */ 5051 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | 5052 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); 5053 } else { 5054 /* TetonII and BAM Autoneg Disabled */ 5055 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | 5056 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); 5057 } 5058 CL22_WR_OVER_CL45(bp, phy, 5059 MDIO_REG_BANK_BAM_NEXT_PAGE, 5060 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, 5061 reg_val); 5062 5063 if (enable_cl73) { 5064 /* Enable Cl73 FSM status bits */ 5065 CL22_WR_OVER_CL45(bp, phy, 5066 MDIO_REG_BANK_CL73_USERB0, 5067 MDIO_CL73_USERB0_CL73_UCTRL, 5068 0xe); 5069 5070 /* Enable BAM Station Manager*/ 5071 CL22_WR_OVER_CL45(bp, phy, 5072 MDIO_REG_BANK_CL73_USERB0, 5073 MDIO_CL73_USERB0_CL73_BAM_CTRL1, 5074 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN | 5075 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN | 5076 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN); 5077 5078 /* Advertise CL73 link speeds */ 5079 CL22_RD_OVER_CL45(bp, phy, 5080 MDIO_REG_BANK_CL73_IEEEB1, 5081 MDIO_CL73_IEEEB1_AN_ADV2, 5082 ®_val); 5083 if (phy->speed_cap_mask & 5084 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 5085 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4; 5086 if (phy->speed_cap_mask & 5087 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) 5088 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX; 5089 5090 CL22_WR_OVER_CL45(bp, phy, 5091 MDIO_REG_BANK_CL73_IEEEB1, 5092 MDIO_CL73_IEEEB1_AN_ADV2, 5093 reg_val); 5094 5095 /* CL73 Autoneg Enabled */ 5096 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN; 5097 5098 } else /* CL73 Autoneg Disabled */ 5099 reg_val = 0; 5100 5101 CL22_WR_OVER_CL45(bp, phy, 5102 MDIO_REG_BANK_CL73_IEEEB0, 5103 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val); 5104 } 5105 5106 /* Program SerDes, forced speed */ 5107 static void bnx2x_program_serdes(struct bnx2x_phy *phy, 5108 struct link_params *params, 5109 struct link_vars *vars) 5110 { 5111 struct bnx2x *bp = params->bp; 5112 u16 reg_val; 5113 5114 /* Program duplex, disable autoneg and sgmii*/ 5115 CL22_RD_OVER_CL45(bp, phy, 5116 MDIO_REG_BANK_COMBO_IEEE0, 5117 MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); 5118 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX | 5119 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 5120 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK); 5121 if (phy->req_duplex == DUPLEX_FULL) 5122 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; 5123 CL22_WR_OVER_CL45(bp, phy, 5124 MDIO_REG_BANK_COMBO_IEEE0, 5125 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); 5126 5127 /* Program speed 5128 * - needed only if the speed is greater than 1G (2.5G or 10G) 5129 */ 5130 CL22_RD_OVER_CL45(bp, phy, 5131 MDIO_REG_BANK_SERDES_DIGITAL, 5132 MDIO_SERDES_DIGITAL_MISC1, ®_val); 5133 /* Clearing the speed value before setting the right speed */ 5134 DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val); 5135 5136 reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK | 5137 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL); 5138 5139 if (!((vars->line_speed == SPEED_1000) || 5140 (vars->line_speed == SPEED_100) || 5141 (vars->line_speed == SPEED_10))) { 5142 5143 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M | 5144 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL); 5145 if (vars->line_speed == SPEED_10000) 5146 reg_val |= 5147 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4; 5148 } 5149 5150 CL22_WR_OVER_CL45(bp, phy, 5151 MDIO_REG_BANK_SERDES_DIGITAL, 5152 MDIO_SERDES_DIGITAL_MISC1, reg_val); 5153 5154 } 5155 5156 static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy, 5157 struct link_params *params) 5158 { 5159 struct bnx2x *bp = params->bp; 5160 u16 val = 0; 5161 5162 /* Set extended capabilities */ 5163 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) 5164 val |= MDIO_OVER_1G_UP1_2_5G; 5165 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 5166 val |= MDIO_OVER_1G_UP1_10G; 5167 CL22_WR_OVER_CL45(bp, phy, 5168 MDIO_REG_BANK_OVER_1G, 5169 MDIO_OVER_1G_UP1, val); 5170 5171 CL22_WR_OVER_CL45(bp, phy, 5172 MDIO_REG_BANK_OVER_1G, 5173 MDIO_OVER_1G_UP3, 0x400); 5174 } 5175 5176 static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy, 5177 struct link_params *params, 5178 u16 ieee_fc) 5179 { 5180 struct bnx2x *bp = params->bp; 5181 u16 val; 5182 /* For AN, we are always publishing full duplex */ 5183 5184 CL22_WR_OVER_CL45(bp, phy, 5185 MDIO_REG_BANK_COMBO_IEEE0, 5186 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc); 5187 CL22_RD_OVER_CL45(bp, phy, 5188 MDIO_REG_BANK_CL73_IEEEB1, 5189 MDIO_CL73_IEEEB1_AN_ADV1, &val); 5190 val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH; 5191 val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK); 5192 CL22_WR_OVER_CL45(bp, phy, 5193 MDIO_REG_BANK_CL73_IEEEB1, 5194 MDIO_CL73_IEEEB1_AN_ADV1, val); 5195 } 5196 5197 static void bnx2x_restart_autoneg(struct bnx2x_phy *phy, 5198 struct link_params *params, 5199 u8 enable_cl73) 5200 { 5201 struct bnx2x *bp = params->bp; 5202 u16 mii_control; 5203 5204 DP(NETIF_MSG_LINK, "bnx2x_restart_autoneg\n"); 5205 /* Enable and restart BAM/CL37 aneg */ 5206 5207 if (enable_cl73) { 5208 CL22_RD_OVER_CL45(bp, phy, 5209 MDIO_REG_BANK_CL73_IEEEB0, 5210 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 5211 &mii_control); 5212 5213 CL22_WR_OVER_CL45(bp, phy, 5214 MDIO_REG_BANK_CL73_IEEEB0, 5215 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 5216 (mii_control | 5217 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN | 5218 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN)); 5219 } else { 5220 5221 CL22_RD_OVER_CL45(bp, phy, 5222 MDIO_REG_BANK_COMBO_IEEE0, 5223 MDIO_COMBO_IEEE0_MII_CONTROL, 5224 &mii_control); 5225 DP(NETIF_MSG_LINK, 5226 "bnx2x_restart_autoneg mii_control before = 0x%x\n", 5227 mii_control); 5228 CL22_WR_OVER_CL45(bp, phy, 5229 MDIO_REG_BANK_COMBO_IEEE0, 5230 MDIO_COMBO_IEEE0_MII_CONTROL, 5231 (mii_control | 5232 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 5233 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN)); 5234 } 5235 } 5236 5237 static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, 5238 struct link_params *params, 5239 struct link_vars *vars) 5240 { 5241 struct bnx2x *bp = params->bp; 5242 u16 control1; 5243 5244 /* In SGMII mode, the unicore is always slave */ 5245 5246 CL22_RD_OVER_CL45(bp, phy, 5247 MDIO_REG_BANK_SERDES_DIGITAL, 5248 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, 5249 &control1); 5250 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT; 5251 /* Set sgmii mode (and not fiber) */ 5252 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE | 5253 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET | 5254 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE); 5255 CL22_WR_OVER_CL45(bp, phy, 5256 MDIO_REG_BANK_SERDES_DIGITAL, 5257 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, 5258 control1); 5259 5260 /* If forced speed */ 5261 if (!(vars->line_speed == SPEED_AUTO_NEG)) { 5262 /* Set speed, disable autoneg */ 5263 u16 mii_control; 5264 5265 CL22_RD_OVER_CL45(bp, phy, 5266 MDIO_REG_BANK_COMBO_IEEE0, 5267 MDIO_COMBO_IEEE0_MII_CONTROL, 5268 &mii_control); 5269 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 5270 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK| 5271 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX); 5272 5273 switch (vars->line_speed) { 5274 case SPEED_100: 5275 mii_control |= 5276 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100; 5277 break; 5278 case SPEED_1000: 5279 mii_control |= 5280 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000; 5281 break; 5282 case SPEED_10: 5283 /* There is nothing to set for 10M */ 5284 break; 5285 default: 5286 /* Invalid speed for SGMII */ 5287 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", 5288 vars->line_speed); 5289 break; 5290 } 5291 5292 /* Setting the full duplex */ 5293 if (phy->req_duplex == DUPLEX_FULL) 5294 mii_control |= 5295 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; 5296 CL22_WR_OVER_CL45(bp, phy, 5297 MDIO_REG_BANK_COMBO_IEEE0, 5298 MDIO_COMBO_IEEE0_MII_CONTROL, 5299 mii_control); 5300 5301 } else { /* AN mode */ 5302 /* Enable and restart AN */ 5303 bnx2x_restart_autoneg(phy, params, 0); 5304 } 5305 } 5306 5307 /* Link management 5308 */ 5309 static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy, 5310 struct link_params *params) 5311 { 5312 struct bnx2x *bp = params->bp; 5313 u16 pd_10g, status2_1000x; 5314 if (phy->req_line_speed != SPEED_AUTO_NEG) 5315 return 0; 5316 CL22_RD_OVER_CL45(bp, phy, 5317 MDIO_REG_BANK_SERDES_DIGITAL, 5318 MDIO_SERDES_DIGITAL_A_1000X_STATUS2, 5319 &status2_1000x); 5320 CL22_RD_OVER_CL45(bp, phy, 5321 MDIO_REG_BANK_SERDES_DIGITAL, 5322 MDIO_SERDES_DIGITAL_A_1000X_STATUS2, 5323 &status2_1000x); 5324 if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) { 5325 DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n", 5326 params->port); 5327 return 1; 5328 } 5329 5330 CL22_RD_OVER_CL45(bp, phy, 5331 MDIO_REG_BANK_10G_PARALLEL_DETECT, 5332 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS, 5333 &pd_10g); 5334 5335 if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) { 5336 DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n", 5337 params->port); 5338 return 1; 5339 } 5340 return 0; 5341 } 5342 5343 static void bnx2x_update_adv_fc(struct bnx2x_phy *phy, 5344 struct link_params *params, 5345 struct link_vars *vars, 5346 u32 gp_status) 5347 { 5348 u16 ld_pause; /* local driver */ 5349 u16 lp_pause; /* link partner */ 5350 u16 pause_result; 5351 struct bnx2x *bp = params->bp; 5352 if ((gp_status & 5353 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | 5354 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) == 5355 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | 5356 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) { 5357 5358 CL22_RD_OVER_CL45(bp, phy, 5359 MDIO_REG_BANK_CL73_IEEEB1, 5360 MDIO_CL73_IEEEB1_AN_ADV1, 5361 &ld_pause); 5362 CL22_RD_OVER_CL45(bp, phy, 5363 MDIO_REG_BANK_CL73_IEEEB1, 5364 MDIO_CL73_IEEEB1_AN_LP_ADV1, 5365 &lp_pause); 5366 pause_result = (ld_pause & 5367 MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK) >> 8; 5368 pause_result |= (lp_pause & 5369 MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK) >> 10; 5370 DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n", pause_result); 5371 } else { 5372 CL22_RD_OVER_CL45(bp, phy, 5373 MDIO_REG_BANK_COMBO_IEEE0, 5374 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, 5375 &ld_pause); 5376 CL22_RD_OVER_CL45(bp, phy, 5377 MDIO_REG_BANK_COMBO_IEEE0, 5378 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1, 5379 &lp_pause); 5380 pause_result = (ld_pause & 5381 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5; 5382 pause_result |= (lp_pause & 5383 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; 5384 DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n", pause_result); 5385 } 5386 bnx2x_pause_resolve(vars, pause_result); 5387 5388 } 5389 5390 static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy, 5391 struct link_params *params, 5392 struct link_vars *vars, 5393 u32 gp_status) 5394 { 5395 struct bnx2x *bp = params->bp; 5396 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 5397 5398 /* Resolve from gp_status in case of AN complete and not sgmii */ 5399 if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) { 5400 /* Update the advertised flow-controled of LD/LP in AN */ 5401 if (phy->req_line_speed == SPEED_AUTO_NEG) 5402 bnx2x_update_adv_fc(phy, params, vars, gp_status); 5403 /* But set the flow-control result as the requested one */ 5404 vars->flow_ctrl = phy->req_flow_ctrl; 5405 } else if (phy->req_line_speed != SPEED_AUTO_NEG) 5406 vars->flow_ctrl = params->req_fc_auto_adv; 5407 else if ((gp_status & MDIO_AN_CL73_OR_37_COMPLETE) && 5408 (!(vars->phy_flags & PHY_SGMII_FLAG))) { 5409 if (bnx2x_direct_parallel_detect_used(phy, params)) { 5410 vars->flow_ctrl = params->req_fc_auto_adv; 5411 return; 5412 } 5413 bnx2x_update_adv_fc(phy, params, vars, gp_status); 5414 } 5415 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl); 5416 } 5417 5418 static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy, 5419 struct link_params *params) 5420 { 5421 struct bnx2x *bp = params->bp; 5422 u16 rx_status, ustat_val, cl37_fsm_received; 5423 DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n"); 5424 /* Step 1: Make sure signal is detected */ 5425 CL22_RD_OVER_CL45(bp, phy, 5426 MDIO_REG_BANK_RX0, 5427 MDIO_RX0_RX_STATUS, 5428 &rx_status); 5429 if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) != 5430 (MDIO_RX0_RX_STATUS_SIGDET)) { 5431 DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73." 5432 "rx_status(0x80b0) = 0x%x\n", rx_status); 5433 CL22_WR_OVER_CL45(bp, phy, 5434 MDIO_REG_BANK_CL73_IEEEB0, 5435 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 5436 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN); 5437 return; 5438 } 5439 /* Step 2: Check CL73 state machine */ 5440 CL22_RD_OVER_CL45(bp, phy, 5441 MDIO_REG_BANK_CL73_USERB0, 5442 MDIO_CL73_USERB0_CL73_USTAT1, 5443 &ustat_val); 5444 if ((ustat_val & 5445 (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK | 5446 MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) != 5447 (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK | 5448 MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) { 5449 DP(NETIF_MSG_LINK, "CL73 state-machine is not stable. " 5450 "ustat_val(0x8371) = 0x%x\n", ustat_val); 5451 return; 5452 } 5453 /* Step 3: Check CL37 Message Pages received to indicate LP 5454 * supports only CL37 5455 */ 5456 CL22_RD_OVER_CL45(bp, phy, 5457 MDIO_REG_BANK_REMOTE_PHY, 5458 MDIO_REMOTE_PHY_MISC_RX_STATUS, 5459 &cl37_fsm_received); 5460 if ((cl37_fsm_received & 5461 (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | 5462 MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) != 5463 (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | 5464 MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) { 5465 DP(NETIF_MSG_LINK, "No CL37 FSM were received. " 5466 "misc_rx_status(0x8330) = 0x%x\n", 5467 cl37_fsm_received); 5468 return; 5469 } 5470 /* The combined cl37/cl73 fsm state information indicating that 5471 * we are connected to a device which does not support cl73, but 5472 * does support cl37 BAM. In this case we disable cl73 and 5473 * restart cl37 auto-neg 5474 */ 5475 5476 /* Disable CL73 */ 5477 CL22_WR_OVER_CL45(bp, phy, 5478 MDIO_REG_BANK_CL73_IEEEB0, 5479 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 5480 0); 5481 /* Restart CL37 autoneg */ 5482 bnx2x_restart_autoneg(phy, params, 0); 5483 DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n"); 5484 } 5485 5486 static void bnx2x_xgxs_an_resolve(struct bnx2x_phy *phy, 5487 struct link_params *params, 5488 struct link_vars *vars, 5489 u32 gp_status) 5490 { 5491 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) 5492 vars->link_status |= 5493 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; 5494 5495 if (bnx2x_direct_parallel_detect_used(phy, params)) 5496 vars->link_status |= 5497 LINK_STATUS_PARALLEL_DETECTION_USED; 5498 } 5499 static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy, 5500 struct link_params *params, 5501 struct link_vars *vars, 5502 u16 is_link_up, 5503 u16 speed_mask, 5504 u16 is_duplex) 5505 { 5506 struct bnx2x *bp = params->bp; 5507 if (phy->req_line_speed == SPEED_AUTO_NEG) 5508 vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED; 5509 if (is_link_up) { 5510 DP(NETIF_MSG_LINK, "phy link up\n"); 5511 5512 vars->phy_link_up = 1; 5513 vars->link_status |= LINK_STATUS_LINK_UP; 5514 5515 switch (speed_mask) { 5516 case GP_STATUS_10M: 5517 vars->line_speed = SPEED_10; 5518 if (is_duplex == DUPLEX_FULL) 5519 vars->link_status |= LINK_10TFD; 5520 else 5521 vars->link_status |= LINK_10THD; 5522 break; 5523 5524 case GP_STATUS_100M: 5525 vars->line_speed = SPEED_100; 5526 if (is_duplex == DUPLEX_FULL) 5527 vars->link_status |= LINK_100TXFD; 5528 else 5529 vars->link_status |= LINK_100TXHD; 5530 break; 5531 5532 case GP_STATUS_1G: 5533 case GP_STATUS_1G_KX: 5534 vars->line_speed = SPEED_1000; 5535 if (is_duplex == DUPLEX_FULL) 5536 vars->link_status |= LINK_1000TFD; 5537 else 5538 vars->link_status |= LINK_1000THD; 5539 break; 5540 5541 case GP_STATUS_2_5G: 5542 vars->line_speed = SPEED_2500; 5543 if (is_duplex == DUPLEX_FULL) 5544 vars->link_status |= LINK_2500TFD; 5545 else 5546 vars->link_status |= LINK_2500THD; 5547 break; 5548 5549 case GP_STATUS_5G: 5550 case GP_STATUS_6G: 5551 DP(NETIF_MSG_LINK, 5552 "link speed unsupported gp_status 0x%x\n", 5553 speed_mask); 5554 return -EINVAL; 5555 5556 case GP_STATUS_10G_KX4: 5557 case GP_STATUS_10G_HIG: 5558 case GP_STATUS_10G_CX4: 5559 case GP_STATUS_10G_KR: 5560 case GP_STATUS_10G_SFI: 5561 case GP_STATUS_10G_XFI: 5562 vars->line_speed = SPEED_10000; 5563 vars->link_status |= LINK_10GTFD; 5564 break; 5565 case GP_STATUS_20G_DXGXS: 5566 case GP_STATUS_20G_KR2: 5567 vars->line_speed = SPEED_20000; 5568 vars->link_status |= LINK_20GTFD; 5569 break; 5570 default: 5571 DP(NETIF_MSG_LINK, 5572 "link speed unsupported gp_status 0x%x\n", 5573 speed_mask); 5574 return -EINVAL; 5575 } 5576 } else { /* link_down */ 5577 DP(NETIF_MSG_LINK, "phy link down\n"); 5578 5579 vars->phy_link_up = 0; 5580 5581 vars->duplex = DUPLEX_FULL; 5582 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 5583 vars->mac_type = MAC_TYPE_NONE; 5584 } 5585 DP(NETIF_MSG_LINK, " phy_link_up %x line_speed %d\n", 5586 vars->phy_link_up, vars->line_speed); 5587 return 0; 5588 } 5589 5590 static int bnx2x_link_settings_status(struct bnx2x_phy *phy, 5591 struct link_params *params, 5592 struct link_vars *vars) 5593 { 5594 struct bnx2x *bp = params->bp; 5595 5596 u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask; 5597 int rc = 0; 5598 5599 /* Read gp_status */ 5600 CL22_RD_OVER_CL45(bp, phy, 5601 MDIO_REG_BANK_GP_STATUS, 5602 MDIO_GP_STATUS_TOP_AN_STATUS1, 5603 &gp_status); 5604 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS) 5605 duplex = DUPLEX_FULL; 5606 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) 5607 link_up = 1; 5608 speed_mask = gp_status & GP_STATUS_SPEED_MASK; 5609 DP(NETIF_MSG_LINK, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x\n", 5610 gp_status, link_up, speed_mask); 5611 rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, speed_mask, 5612 duplex); 5613 if (rc == -EINVAL) 5614 return rc; 5615 5616 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) { 5617 if (SINGLE_MEDIA_DIRECT(params)) { 5618 vars->duplex = duplex; 5619 bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status); 5620 if (phy->req_line_speed == SPEED_AUTO_NEG) 5621 bnx2x_xgxs_an_resolve(phy, params, vars, 5622 gp_status); 5623 } 5624 } else { /* Link_down */ 5625 if ((phy->req_line_speed == SPEED_AUTO_NEG) && 5626 SINGLE_MEDIA_DIRECT(params)) { 5627 /* Check signal is detected */ 5628 bnx2x_check_fallback_to_cl37(phy, params); 5629 } 5630 } 5631 5632 /* Read LP advertised speeds*/ 5633 if (SINGLE_MEDIA_DIRECT(params) && 5634 (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)) { 5635 u16 val; 5636 5637 CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_CL73_IEEEB1, 5638 MDIO_CL73_IEEEB1_AN_LP_ADV2, &val); 5639 5640 if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX) 5641 vars->link_status |= 5642 LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; 5643 if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 | 5644 MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR)) 5645 vars->link_status |= 5646 LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; 5647 5648 CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_OVER_1G, 5649 MDIO_OVER_1G_LP_UP1, &val); 5650 5651 if (val & MDIO_OVER_1G_UP1_2_5G) 5652 vars->link_status |= 5653 LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE; 5654 if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH)) 5655 vars->link_status |= 5656 LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; 5657 } 5658 5659 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", 5660 vars->duplex, vars->flow_ctrl, vars->link_status); 5661 return rc; 5662 } 5663 5664 static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy, 5665 struct link_params *params, 5666 struct link_vars *vars) 5667 { 5668 struct bnx2x *bp = params->bp; 5669 u8 lane; 5670 u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL; 5671 int rc = 0; 5672 lane = bnx2x_get_warpcore_lane(phy, params); 5673 /* Read gp_status */ 5674 if ((params->loopback_mode) && 5675 (phy->flags & FLAGS_WC_DUAL_MODE)) { 5676 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 5677 MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up); 5678 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 5679 MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up); 5680 link_up &= 0x1; 5681 } else if ((phy->req_line_speed > SPEED_10000) && 5682 (phy->supported & SUPPORTED_20000baseMLD2_Full)) { 5683 u16 temp_link_up; 5684 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 5685 1, &temp_link_up); 5686 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 5687 1, &link_up); 5688 DP(NETIF_MSG_LINK, "PCS RX link status = 0x%x-->0x%x\n", 5689 temp_link_up, link_up); 5690 link_up &= (1<<2); 5691 if (link_up) 5692 bnx2x_ext_phy_resolve_fc(phy, params, vars); 5693 } else { 5694 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 5695 MDIO_WC_REG_GP2_STATUS_GP_2_1, 5696 &gp_status1); 5697 DP(NETIF_MSG_LINK, "0x81d1 = 0x%x\n", gp_status1); 5698 /* Check for either KR, 1G, or AN up. */ 5699 link_up = ((gp_status1 >> 8) | 5700 (gp_status1 >> 12) | 5701 (gp_status1)) & 5702 (1 << lane); 5703 if (phy->supported & SUPPORTED_20000baseKR2_Full) { 5704 u16 an_link; 5705 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, 5706 MDIO_AN_REG_STATUS, &an_link); 5707 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, 5708 MDIO_AN_REG_STATUS, &an_link); 5709 link_up |= (an_link & (1<<2)); 5710 } 5711 if (link_up && SINGLE_MEDIA_DIRECT(params)) { 5712 u16 pd, gp_status4; 5713 if (phy->req_line_speed == SPEED_AUTO_NEG) { 5714 /* Check Autoneg complete */ 5715 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 5716 MDIO_WC_REG_GP2_STATUS_GP_2_4, 5717 &gp_status4); 5718 if (gp_status4 & ((1<<12)<<lane)) 5719 vars->link_status |= 5720 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; 5721 5722 /* Check parallel detect used */ 5723 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 5724 MDIO_WC_REG_PAR_DET_10G_STATUS, 5725 &pd); 5726 if (pd & (1<<15)) 5727 vars->link_status |= 5728 LINK_STATUS_PARALLEL_DETECTION_USED; 5729 } 5730 bnx2x_ext_phy_resolve_fc(phy, params, vars); 5731 vars->duplex = duplex; 5732 } 5733 } 5734 5735 if ((vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) && 5736 SINGLE_MEDIA_DIRECT(params)) { 5737 u16 val; 5738 5739 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, 5740 MDIO_AN_REG_LP_AUTO_NEG2, &val); 5741 5742 if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX) 5743 vars->link_status |= 5744 LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; 5745 if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 | 5746 MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR)) 5747 vars->link_status |= 5748 LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; 5749 5750 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 5751 MDIO_WC_REG_DIGITAL3_LP_UP1, &val); 5752 5753 if (val & MDIO_OVER_1G_UP1_2_5G) 5754 vars->link_status |= 5755 LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE; 5756 if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH)) 5757 vars->link_status |= 5758 LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; 5759 5760 } 5761 5762 5763 if (lane < 2) { 5764 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 5765 MDIO_WC_REG_GP2_STATUS_GP_2_2, &gp_speed); 5766 } else { 5767 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 5768 MDIO_WC_REG_GP2_STATUS_GP_2_3, &gp_speed); 5769 } 5770 DP(NETIF_MSG_LINK, "lane %d gp_speed 0x%x\n", lane, gp_speed); 5771 5772 if ((lane & 1) == 0) 5773 gp_speed <<= 8; 5774 gp_speed &= 0x3f00; 5775 link_up = !!link_up; 5776 5777 rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed, 5778 duplex); 5779 5780 /* In case of KR link down, start up the recovering procedure */ 5781 if ((!link_up) && (phy->media_type == ETH_PHY_KR) && 5782 (!(phy->flags & FLAGS_WC_DUAL_MODE))) 5783 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; 5784 5785 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", 5786 vars->duplex, vars->flow_ctrl, vars->link_status); 5787 return rc; 5788 } 5789 static void bnx2x_set_gmii_tx_driver(struct link_params *params) 5790 { 5791 struct bnx2x *bp = params->bp; 5792 struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; 5793 u16 lp_up2; 5794 u16 tx_driver; 5795 u16 bank; 5796 5797 /* Read precomp */ 5798 CL22_RD_OVER_CL45(bp, phy, 5799 MDIO_REG_BANK_OVER_1G, 5800 MDIO_OVER_1G_LP_UP2, &lp_up2); 5801 5802 /* Bits [10:7] at lp_up2, positioned at [15:12] */ 5803 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >> 5804 MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) << 5805 MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT); 5806 5807 if (lp_up2 == 0) 5808 return; 5809 5810 for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3; 5811 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) { 5812 CL22_RD_OVER_CL45(bp, phy, 5813 bank, 5814 MDIO_TX0_TX_DRIVER, &tx_driver); 5815 5816 /* Replace tx_driver bits [15:12] */ 5817 if (lp_up2 != 5818 (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) { 5819 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK; 5820 tx_driver |= lp_up2; 5821 CL22_WR_OVER_CL45(bp, phy, 5822 bank, 5823 MDIO_TX0_TX_DRIVER, tx_driver); 5824 } 5825 } 5826 } 5827 5828 static int bnx2x_emac_program(struct link_params *params, 5829 struct link_vars *vars) 5830 { 5831 struct bnx2x *bp = params->bp; 5832 u8 port = params->port; 5833 u16 mode = 0; 5834 5835 DP(NETIF_MSG_LINK, "setting link speed & duplex\n"); 5836 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + 5837 EMAC_REG_EMAC_MODE, 5838 (EMAC_MODE_25G_MODE | 5839 EMAC_MODE_PORT_MII_10M | 5840 EMAC_MODE_HALF_DUPLEX)); 5841 switch (vars->line_speed) { 5842 case SPEED_10: 5843 mode |= EMAC_MODE_PORT_MII_10M; 5844 break; 5845 5846 case SPEED_100: 5847 mode |= EMAC_MODE_PORT_MII; 5848 break; 5849 5850 case SPEED_1000: 5851 mode |= EMAC_MODE_PORT_GMII; 5852 break; 5853 5854 case SPEED_2500: 5855 mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII); 5856 break; 5857 5858 default: 5859 /* 10G not valid for EMAC */ 5860 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", 5861 vars->line_speed); 5862 return -EINVAL; 5863 } 5864 5865 if (vars->duplex == DUPLEX_HALF) 5866 mode |= EMAC_MODE_HALF_DUPLEX; 5867 bnx2x_bits_en(bp, 5868 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE, 5869 mode); 5870 5871 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); 5872 return 0; 5873 } 5874 5875 static void bnx2x_set_preemphasis(struct bnx2x_phy *phy, 5876 struct link_params *params) 5877 { 5878 5879 u16 bank, i = 0; 5880 struct bnx2x *bp = params->bp; 5881 5882 for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3; 5883 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) { 5884 CL22_WR_OVER_CL45(bp, phy, 5885 bank, 5886 MDIO_RX0_RX_EQ_BOOST, 5887 phy->rx_preemphasis[i]); 5888 } 5889 5890 for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3; 5891 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) { 5892 CL22_WR_OVER_CL45(bp, phy, 5893 bank, 5894 MDIO_TX0_TX_DRIVER, 5895 phy->tx_preemphasis[i]); 5896 } 5897 } 5898 5899 static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy, 5900 struct link_params *params, 5901 struct link_vars *vars) 5902 { 5903 struct bnx2x *bp = params->bp; 5904 u8 enable_cl73 = (SINGLE_MEDIA_DIRECT(params) || 5905 (params->loopback_mode == LOOPBACK_XGXS)); 5906 if (!(vars->phy_flags & PHY_SGMII_FLAG)) { 5907 if (SINGLE_MEDIA_DIRECT(params) && 5908 (params->feature_config_flags & 5909 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) 5910 bnx2x_set_preemphasis(phy, params); 5911 5912 /* Forced speed requested? */ 5913 if (vars->line_speed != SPEED_AUTO_NEG || 5914 (SINGLE_MEDIA_DIRECT(params) && 5915 params->loopback_mode == LOOPBACK_EXT)) { 5916 DP(NETIF_MSG_LINK, "not SGMII, no AN\n"); 5917 5918 /* Disable autoneg */ 5919 bnx2x_set_autoneg(phy, params, vars, 0); 5920 5921 /* Program speed and duplex */ 5922 bnx2x_program_serdes(phy, params, vars); 5923 5924 } else { /* AN_mode */ 5925 DP(NETIF_MSG_LINK, "not SGMII, AN\n"); 5926 5927 /* AN enabled */ 5928 bnx2x_set_brcm_cl37_advertisement(phy, params); 5929 5930 /* Program duplex & pause advertisement (for aneg) */ 5931 bnx2x_set_ieee_aneg_advertisement(phy, params, 5932 vars->ieee_fc); 5933 5934 /* Enable autoneg */ 5935 bnx2x_set_autoneg(phy, params, vars, enable_cl73); 5936 5937 /* Enable and restart AN */ 5938 bnx2x_restart_autoneg(phy, params, enable_cl73); 5939 } 5940 5941 } else { /* SGMII mode */ 5942 DP(NETIF_MSG_LINK, "SGMII\n"); 5943 5944 bnx2x_initialize_sgmii_process(phy, params, vars); 5945 } 5946 } 5947 5948 static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy, 5949 struct link_params *params, 5950 struct link_vars *vars) 5951 { 5952 int rc; 5953 vars->phy_flags |= PHY_XGXS_FLAG; 5954 if ((phy->req_line_speed && 5955 ((phy->req_line_speed == SPEED_100) || 5956 (phy->req_line_speed == SPEED_10))) || 5957 (!phy->req_line_speed && 5958 (phy->speed_cap_mask >= 5959 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) && 5960 (phy->speed_cap_mask < 5961 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || 5962 (phy->type == PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD)) 5963 vars->phy_flags |= PHY_SGMII_FLAG; 5964 else 5965 vars->phy_flags &= ~PHY_SGMII_FLAG; 5966 5967 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); 5968 bnx2x_set_aer_mmd(params, phy); 5969 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) 5970 bnx2x_set_master_ln(params, phy); 5971 5972 rc = bnx2x_reset_unicore(params, phy, 0); 5973 /* Reset the SerDes and wait for reset bit return low */ 5974 if (rc) 5975 return rc; 5976 5977 bnx2x_set_aer_mmd(params, phy); 5978 /* Setting the masterLn_def again after the reset */ 5979 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) { 5980 bnx2x_set_master_ln(params, phy); 5981 bnx2x_set_swap_lanes(params, phy); 5982 } 5983 5984 return rc; 5985 } 5986 5987 static u16 bnx2x_wait_reset_complete(struct bnx2x *bp, 5988 struct bnx2x_phy *phy, 5989 struct link_params *params) 5990 { 5991 u16 cnt, ctrl; 5992 /* Wait for soft reset to get cleared up to 1 sec */ 5993 for (cnt = 0; cnt < 1000; cnt++) { 5994 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) 5995 bnx2x_cl22_read(bp, phy, 5996 MDIO_PMA_REG_CTRL, &ctrl); 5997 else 5998 bnx2x_cl45_read(bp, phy, 5999 MDIO_PMA_DEVAD, 6000 MDIO_PMA_REG_CTRL, &ctrl); 6001 if (!(ctrl & (1<<15))) 6002 break; 6003 usleep_range(1000, 2000); 6004 } 6005 6006 if (cnt == 1000) 6007 netdev_err(bp->dev, "Warning: PHY was not initialized," 6008 " Port %d\n", 6009 params->port); 6010 DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt); 6011 return cnt; 6012 } 6013 6014 static void bnx2x_link_int_enable(struct link_params *params) 6015 { 6016 u8 port = params->port; 6017 u32 mask; 6018 struct bnx2x *bp = params->bp; 6019 6020 /* Setting the status to report on link up for either XGXS or SerDes */ 6021 if (CHIP_IS_E3(bp)) { 6022 mask = NIG_MASK_XGXS0_LINK_STATUS; 6023 if (!(SINGLE_MEDIA_DIRECT(params))) 6024 mask |= NIG_MASK_MI_INT; 6025 } else if (params->switch_cfg == SWITCH_CFG_10G) { 6026 mask = (NIG_MASK_XGXS0_LINK10G | 6027 NIG_MASK_XGXS0_LINK_STATUS); 6028 DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n"); 6029 if (!(SINGLE_MEDIA_DIRECT(params)) && 6030 params->phy[INT_PHY].type != 6031 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) { 6032 mask |= NIG_MASK_MI_INT; 6033 DP(NETIF_MSG_LINK, "enabled external phy int\n"); 6034 } 6035 6036 } else { /* SerDes */ 6037 mask = NIG_MASK_SERDES0_LINK_STATUS; 6038 DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n"); 6039 if (!(SINGLE_MEDIA_DIRECT(params)) && 6040 params->phy[INT_PHY].type != 6041 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN) { 6042 mask |= NIG_MASK_MI_INT; 6043 DP(NETIF_MSG_LINK, "enabled external phy int\n"); 6044 } 6045 } 6046 bnx2x_bits_en(bp, 6047 NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 6048 mask); 6049 6050 DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port, 6051 (params->switch_cfg == SWITCH_CFG_10G), 6052 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); 6053 DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n", 6054 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), 6055 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18), 6056 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c)); 6057 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n", 6058 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), 6059 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); 6060 } 6061 6062 static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port, 6063 u8 exp_mi_int) 6064 { 6065 u32 latch_status = 0; 6066 6067 /* Disable the MI INT ( external phy int ) by writing 1 to the 6068 * status register. Link down indication is high-active-signal, 6069 * so in this case we need to write the status to clear the XOR 6070 */ 6071 /* Read Latched signals */ 6072 latch_status = REG_RD(bp, 6073 NIG_REG_LATCH_STATUS_0 + port*8); 6074 DP(NETIF_MSG_LINK, "latch_status = 0x%x\n", latch_status); 6075 /* Handle only those with latched-signal=up.*/ 6076 if (exp_mi_int) 6077 bnx2x_bits_en(bp, 6078 NIG_REG_STATUS_INTERRUPT_PORT0 6079 + port*4, 6080 NIG_STATUS_EMAC0_MI_INT); 6081 else 6082 bnx2x_bits_dis(bp, 6083 NIG_REG_STATUS_INTERRUPT_PORT0 6084 + port*4, 6085 NIG_STATUS_EMAC0_MI_INT); 6086 6087 if (latch_status & 1) { 6088 6089 /* For all latched-signal=up : Re-Arm Latch signals */ 6090 REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8, 6091 (latch_status & 0xfffe) | (latch_status & 1)); 6092 } 6093 /* For all latched-signal=up,Write original_signal to status */ 6094 } 6095 6096 static void bnx2x_link_int_ack(struct link_params *params, 6097 struct link_vars *vars, u8 is_10g_plus) 6098 { 6099 struct bnx2x *bp = params->bp; 6100 u8 port = params->port; 6101 u32 mask; 6102 /* First reset all status we assume only one line will be 6103 * change at a time 6104 */ 6105 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 6106 (NIG_STATUS_XGXS0_LINK10G | 6107 NIG_STATUS_XGXS0_LINK_STATUS | 6108 NIG_STATUS_SERDES0_LINK_STATUS)); 6109 if (vars->phy_link_up) { 6110 if (USES_WARPCORE(bp)) 6111 mask = NIG_STATUS_XGXS0_LINK_STATUS; 6112 else { 6113 if (is_10g_plus) 6114 mask = NIG_STATUS_XGXS0_LINK10G; 6115 else if (params->switch_cfg == SWITCH_CFG_10G) { 6116 /* Disable the link interrupt by writing 1 to 6117 * the relevant lane in the status register 6118 */ 6119 u32 ser_lane = 6120 ((params->lane_config & 6121 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 6122 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 6123 mask = ((1 << ser_lane) << 6124 NIG_STATUS_XGXS0_LINK_STATUS_SIZE); 6125 } else 6126 mask = NIG_STATUS_SERDES0_LINK_STATUS; 6127 } 6128 DP(NETIF_MSG_LINK, "Ack link up interrupt with mask 0x%x\n", 6129 mask); 6130 bnx2x_bits_en(bp, 6131 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 6132 mask); 6133 } 6134 } 6135 6136 static int bnx2x_format_ver(u32 num, u8 *str, u16 *len) 6137 { 6138 u8 *str_ptr = str; 6139 u32 mask = 0xf0000000; 6140 u8 shift = 8*4; 6141 u8 digit; 6142 u8 remove_leading_zeros = 1; 6143 if (*len < 10) { 6144 /* Need more than 10chars for this format */ 6145 *str_ptr = '\0'; 6146 (*len)--; 6147 return -EINVAL; 6148 } 6149 while (shift > 0) { 6150 6151 shift -= 4; 6152 digit = ((num & mask) >> shift); 6153 if (digit == 0 && remove_leading_zeros) { 6154 mask = mask >> 4; 6155 continue; 6156 } else if (digit < 0xa) 6157 *str_ptr = digit + '0'; 6158 else 6159 *str_ptr = digit - 0xa + 'a'; 6160 remove_leading_zeros = 0; 6161 str_ptr++; 6162 (*len)--; 6163 mask = mask >> 4; 6164 if (shift == 4*4) { 6165 *str_ptr = '.'; 6166 str_ptr++; 6167 (*len)--; 6168 remove_leading_zeros = 1; 6169 } 6170 } 6171 return 0; 6172 } 6173 6174 6175 static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len) 6176 { 6177 str[0] = '\0'; 6178 (*len)--; 6179 return 0; 6180 } 6181 6182 int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version, 6183 u16 len) 6184 { 6185 struct bnx2x *bp; 6186 u32 spirom_ver = 0; 6187 int status = 0; 6188 u8 *ver_p = version; 6189 u16 remain_len = len; 6190 if (version == NULL || params == NULL) 6191 return -EINVAL; 6192 bp = params->bp; 6193 6194 /* Extract first external phy*/ 6195 version[0] = '\0'; 6196 spirom_ver = REG_RD(bp, params->phy[EXT_PHY1].ver_addr); 6197 6198 if (params->phy[EXT_PHY1].format_fw_ver) { 6199 status |= params->phy[EXT_PHY1].format_fw_ver(spirom_ver, 6200 ver_p, 6201 &remain_len); 6202 ver_p += (len - remain_len); 6203 } 6204 if ((params->num_phys == MAX_PHYS) && 6205 (params->phy[EXT_PHY2].ver_addr != 0)) { 6206 spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr); 6207 if (params->phy[EXT_PHY2].format_fw_ver) { 6208 *ver_p = '/'; 6209 ver_p++; 6210 remain_len--; 6211 status |= params->phy[EXT_PHY2].format_fw_ver( 6212 spirom_ver, 6213 ver_p, 6214 &remain_len); 6215 ver_p = version + (len - remain_len); 6216 } 6217 } 6218 *ver_p = '\0'; 6219 return status; 6220 } 6221 6222 static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy, 6223 struct link_params *params) 6224 { 6225 u8 port = params->port; 6226 struct bnx2x *bp = params->bp; 6227 6228 if (phy->req_line_speed != SPEED_1000) { 6229 u32 md_devad = 0; 6230 6231 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n"); 6232 6233 if (!CHIP_IS_E3(bp)) { 6234 /* Change the uni_phy_addr in the nig */ 6235 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + 6236 port*0x18)); 6237 6238 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 6239 0x5); 6240 } 6241 6242 bnx2x_cl45_write(bp, phy, 6243 5, 6244 (MDIO_REG_BANK_AER_BLOCK + 6245 (MDIO_AER_BLOCK_AER_REG & 0xf)), 6246 0x2800); 6247 6248 bnx2x_cl45_write(bp, phy, 6249 5, 6250 (MDIO_REG_BANK_CL73_IEEEB0 + 6251 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), 6252 0x6041); 6253 msleep(200); 6254 /* Set aer mmd back */ 6255 bnx2x_set_aer_mmd(params, phy); 6256 6257 if (!CHIP_IS_E3(bp)) { 6258 /* And md_devad */ 6259 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 6260 md_devad); 6261 } 6262 } else { 6263 u16 mii_ctrl; 6264 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n"); 6265 bnx2x_cl45_read(bp, phy, 5, 6266 (MDIO_REG_BANK_COMBO_IEEE0 + 6267 (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)), 6268 &mii_ctrl); 6269 bnx2x_cl45_write(bp, phy, 5, 6270 (MDIO_REG_BANK_COMBO_IEEE0 + 6271 (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)), 6272 mii_ctrl | 6273 MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK); 6274 } 6275 } 6276 6277 int bnx2x_set_led(struct link_params *params, 6278 struct link_vars *vars, u8 mode, u32 speed) 6279 { 6280 u8 port = params->port; 6281 u16 hw_led_mode = params->hw_led_mode; 6282 int rc = 0; 6283 u8 phy_idx; 6284 u32 tmp; 6285 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 6286 struct bnx2x *bp = params->bp; 6287 DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode); 6288 DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n", 6289 speed, hw_led_mode); 6290 /* In case */ 6291 for (phy_idx = EXT_PHY1; phy_idx < MAX_PHYS; phy_idx++) { 6292 if (params->phy[phy_idx].set_link_led) { 6293 params->phy[phy_idx].set_link_led( 6294 ¶ms->phy[phy_idx], params, mode); 6295 } 6296 } 6297 6298 switch (mode) { 6299 case LED_MODE_FRONT_PANEL_OFF: 6300 case LED_MODE_OFF: 6301 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0); 6302 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 6303 SHARED_HW_CFG_LED_MAC1); 6304 6305 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 6306 if (params->phy[EXT_PHY1].type == 6307 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) 6308 tmp &= ~(EMAC_LED_1000MB_OVERRIDE | 6309 EMAC_LED_100MB_OVERRIDE | 6310 EMAC_LED_10MB_OVERRIDE); 6311 else 6312 tmp |= EMAC_LED_OVERRIDE; 6313 6314 EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp); 6315 break; 6316 6317 case LED_MODE_OPER: 6318 /* For all other phys, OPER mode is same as ON, so in case 6319 * link is down, do nothing 6320 */ 6321 if (!vars->link_up) 6322 break; 6323 case LED_MODE_ON: 6324 if (((params->phy[EXT_PHY1].type == 6325 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) || 6326 (params->phy[EXT_PHY1].type == 6327 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) && 6328 CHIP_IS_E2(bp) && params->num_phys == 2) { 6329 /* This is a work-around for E2+8727 Configurations */ 6330 if (mode == LED_MODE_ON || 6331 speed == SPEED_10000){ 6332 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); 6333 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); 6334 6335 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 6336 EMAC_WR(bp, EMAC_REG_EMAC_LED, 6337 (tmp | EMAC_LED_OVERRIDE)); 6338 /* Return here without enabling traffic 6339 * LED blink and setting rate in ON mode. 6340 * In oper mode, enabling LED blink 6341 * and setting rate is needed. 6342 */ 6343 if (mode == LED_MODE_ON) 6344 return rc; 6345 } 6346 } else if (SINGLE_MEDIA_DIRECT(params)) { 6347 /* This is a work-around for HW issue found when link 6348 * is up in CL73 6349 */ 6350 if ((!CHIP_IS_E3(bp)) || 6351 (CHIP_IS_E3(bp) && 6352 mode == LED_MODE_ON)) 6353 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); 6354 6355 if (CHIP_IS_E1x(bp) || 6356 CHIP_IS_E2(bp) || 6357 (mode == LED_MODE_ON)) 6358 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); 6359 else 6360 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 6361 hw_led_mode); 6362 } else if ((params->phy[EXT_PHY1].type == 6363 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) && 6364 (mode == LED_MODE_ON)) { 6365 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); 6366 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 6367 EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp | 6368 EMAC_LED_OVERRIDE | EMAC_LED_1000MB_OVERRIDE); 6369 /* Break here; otherwise, it'll disable the 6370 * intended override. 6371 */ 6372 break; 6373 } else { 6374 u32 nig_led_mode = ((params->hw_led_mode << 6375 SHARED_HW_CFG_LED_MODE_SHIFT) == 6376 SHARED_HW_CFG_LED_EXTPHY2) ? 6377 (SHARED_HW_CFG_LED_PHY1 >> 6378 SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode; 6379 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 6380 nig_led_mode); 6381 } 6382 6383 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0); 6384 /* Set blinking rate to ~15.9Hz */ 6385 if (CHIP_IS_E3(bp)) 6386 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4, 6387 LED_BLINK_RATE_VAL_E3); 6388 else 6389 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4, 6390 LED_BLINK_RATE_VAL_E1X_E2); 6391 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + 6392 port*4, 1); 6393 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 6394 EMAC_WR(bp, EMAC_REG_EMAC_LED, 6395 (tmp & (~EMAC_LED_OVERRIDE))); 6396 6397 if (CHIP_IS_E1(bp) && 6398 ((speed == SPEED_2500) || 6399 (speed == SPEED_1000) || 6400 (speed == SPEED_100) || 6401 (speed == SPEED_10))) { 6402 /* For speeds less than 10G LED scheme is different */ 6403 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 6404 + port*4, 1); 6405 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + 6406 port*4, 0); 6407 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + 6408 port*4, 1); 6409 } 6410 break; 6411 6412 default: 6413 rc = -EINVAL; 6414 DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n", 6415 mode); 6416 break; 6417 } 6418 return rc; 6419 6420 } 6421 6422 /* This function comes to reflect the actual link state read DIRECTLY from the 6423 * HW 6424 */ 6425 int bnx2x_test_link(struct link_params *params, struct link_vars *vars, 6426 u8 is_serdes) 6427 { 6428 struct bnx2x *bp = params->bp; 6429 u16 gp_status = 0, phy_index = 0; 6430 u8 ext_phy_link_up = 0, serdes_phy_type; 6431 struct link_vars temp_vars; 6432 struct bnx2x_phy *int_phy = ¶ms->phy[INT_PHY]; 6433 6434 if (CHIP_IS_E3(bp)) { 6435 u16 link_up; 6436 if (params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)] 6437 > SPEED_10000) { 6438 /* Check 20G link */ 6439 bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, 6440 1, &link_up); 6441 bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, 6442 1, &link_up); 6443 link_up &= (1<<2); 6444 } else { 6445 /* Check 10G link and below*/ 6446 u8 lane = bnx2x_get_warpcore_lane(int_phy, params); 6447 bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, 6448 MDIO_WC_REG_GP2_STATUS_GP_2_1, 6449 &gp_status); 6450 gp_status = ((gp_status >> 8) & 0xf) | 6451 ((gp_status >> 12) & 0xf); 6452 link_up = gp_status & (1 << lane); 6453 } 6454 if (!link_up) 6455 return -ESRCH; 6456 } else { 6457 CL22_RD_OVER_CL45(bp, int_phy, 6458 MDIO_REG_BANK_GP_STATUS, 6459 MDIO_GP_STATUS_TOP_AN_STATUS1, 6460 &gp_status); 6461 /* Link is up only if both local phy and external phy are up */ 6462 if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)) 6463 return -ESRCH; 6464 } 6465 /* In XGXS loopback mode, do not check external PHY */ 6466 if (params->loopback_mode == LOOPBACK_XGXS) 6467 return 0; 6468 6469 switch (params->num_phys) { 6470 case 1: 6471 /* No external PHY */ 6472 return 0; 6473 case 2: 6474 ext_phy_link_up = params->phy[EXT_PHY1].read_status( 6475 ¶ms->phy[EXT_PHY1], 6476 params, &temp_vars); 6477 break; 6478 case 3: /* Dual Media */ 6479 for (phy_index = EXT_PHY1; phy_index < params->num_phys; 6480 phy_index++) { 6481 serdes_phy_type = ((params->phy[phy_index].media_type == 6482 ETH_PHY_SFPP_10G_FIBER) || 6483 (params->phy[phy_index].media_type == 6484 ETH_PHY_SFP_1G_FIBER) || 6485 (params->phy[phy_index].media_type == 6486 ETH_PHY_XFP_FIBER) || 6487 (params->phy[phy_index].media_type == 6488 ETH_PHY_DA_TWINAX)); 6489 6490 if (is_serdes != serdes_phy_type) 6491 continue; 6492 if (params->phy[phy_index].read_status) { 6493 ext_phy_link_up |= 6494 params->phy[phy_index].read_status( 6495 ¶ms->phy[phy_index], 6496 params, &temp_vars); 6497 } 6498 } 6499 break; 6500 } 6501 if (ext_phy_link_up) 6502 return 0; 6503 return -ESRCH; 6504 } 6505 6506 static int bnx2x_link_initialize(struct link_params *params, 6507 struct link_vars *vars) 6508 { 6509 int rc = 0; 6510 u8 phy_index, non_ext_phy; 6511 struct bnx2x *bp = params->bp; 6512 /* In case of external phy existence, the line speed would be the 6513 * line speed linked up by the external phy. In case it is direct 6514 * only, then the line_speed during initialization will be 6515 * equal to the req_line_speed 6516 */ 6517 vars->line_speed = params->phy[INT_PHY].req_line_speed; 6518 6519 /* Initialize the internal phy in case this is a direct board 6520 * (no external phys), or this board has external phy which requires 6521 * to first. 6522 */ 6523 if (!USES_WARPCORE(bp)) 6524 bnx2x_prepare_xgxs(¶ms->phy[INT_PHY], params, vars); 6525 /* init ext phy and enable link state int */ 6526 non_ext_phy = (SINGLE_MEDIA_DIRECT(params) || 6527 (params->loopback_mode == LOOPBACK_XGXS)); 6528 6529 if (non_ext_phy || 6530 (params->phy[EXT_PHY1].flags & FLAGS_INIT_XGXS_FIRST) || 6531 (params->loopback_mode == LOOPBACK_EXT_PHY)) { 6532 struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; 6533 if (vars->line_speed == SPEED_AUTO_NEG && 6534 (CHIP_IS_E1x(bp) || 6535 CHIP_IS_E2(bp))) 6536 bnx2x_set_parallel_detection(phy, params); 6537 if (params->phy[INT_PHY].config_init) 6538 params->phy[INT_PHY].config_init(phy, params, vars); 6539 } 6540 6541 /* Re-read this value in case it was changed inside config_init due to 6542 * limitations of optic module 6543 */ 6544 vars->line_speed = params->phy[INT_PHY].req_line_speed; 6545 6546 /* Init external phy*/ 6547 if (non_ext_phy) { 6548 if (params->phy[INT_PHY].supported & 6549 SUPPORTED_FIBRE) 6550 vars->link_status |= LINK_STATUS_SERDES_LINK; 6551 } else { 6552 for (phy_index = EXT_PHY1; phy_index < params->num_phys; 6553 phy_index++) { 6554 /* No need to initialize second phy in case of first 6555 * phy only selection. In case of second phy, we do 6556 * need to initialize the first phy, since they are 6557 * connected. 6558 */ 6559 if (params->phy[phy_index].supported & 6560 SUPPORTED_FIBRE) 6561 vars->link_status |= LINK_STATUS_SERDES_LINK; 6562 6563 if (phy_index == EXT_PHY2 && 6564 (bnx2x_phy_selection(params) == 6565 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) { 6566 DP(NETIF_MSG_LINK, 6567 "Not initializing second phy\n"); 6568 continue; 6569 } 6570 params->phy[phy_index].config_init( 6571 ¶ms->phy[phy_index], 6572 params, vars); 6573 } 6574 } 6575 /* Reset the interrupt indication after phy was initialized */ 6576 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + 6577 params->port*4, 6578 (NIG_STATUS_XGXS0_LINK10G | 6579 NIG_STATUS_XGXS0_LINK_STATUS | 6580 NIG_STATUS_SERDES0_LINK_STATUS | 6581 NIG_MASK_MI_INT)); 6582 return rc; 6583 } 6584 6585 static void bnx2x_int_link_reset(struct bnx2x_phy *phy, 6586 struct link_params *params) 6587 { 6588 /* Reset the SerDes/XGXS */ 6589 REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, 6590 (0x1ff << (params->port*16))); 6591 } 6592 6593 static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy, 6594 struct link_params *params) 6595 { 6596 struct bnx2x *bp = params->bp; 6597 u8 gpio_port; 6598 /* HW reset */ 6599 if (CHIP_IS_E2(bp)) 6600 gpio_port = BP_PATH(bp); 6601 else 6602 gpio_port = params->port; 6603 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 6604 MISC_REGISTERS_GPIO_OUTPUT_LOW, 6605 gpio_port); 6606 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 6607 MISC_REGISTERS_GPIO_OUTPUT_LOW, 6608 gpio_port); 6609 DP(NETIF_MSG_LINK, "reset external PHY\n"); 6610 } 6611 6612 static int bnx2x_update_link_down(struct link_params *params, 6613 struct link_vars *vars) 6614 { 6615 struct bnx2x *bp = params->bp; 6616 u8 port = params->port; 6617 6618 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port); 6619 bnx2x_set_led(params, vars, LED_MODE_OFF, 0); 6620 vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG; 6621 /* Indicate no mac active */ 6622 vars->mac_type = MAC_TYPE_NONE; 6623 6624 /* Update shared memory */ 6625 vars->link_status &= ~LINK_UPDATE_MASK; 6626 vars->line_speed = 0; 6627 bnx2x_update_mng(params, vars->link_status); 6628 6629 /* Activate nig drain */ 6630 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); 6631 6632 /* Disable emac */ 6633 if (!CHIP_IS_E3(bp)) 6634 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); 6635 6636 usleep_range(10000, 20000); 6637 /* Reset BigMac/Xmac */ 6638 if (CHIP_IS_E1x(bp) || 6639 CHIP_IS_E2(bp)) 6640 bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0); 6641 6642 if (CHIP_IS_E3(bp)) { 6643 /* Prevent LPI Generation by chip */ 6644 REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 6645 0); 6646 REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2), 6647 0); 6648 vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK | 6649 SHMEM_EEE_ACTIVE_BIT); 6650 6651 bnx2x_update_mng_eee(params, vars->eee_status); 6652 bnx2x_set_xmac_rxtx(params, 0); 6653 bnx2x_set_umac_rxtx(params, 0); 6654 } 6655 6656 return 0; 6657 } 6658 6659 static int bnx2x_update_link_up(struct link_params *params, 6660 struct link_vars *vars, 6661 u8 link_10g) 6662 { 6663 struct bnx2x *bp = params->bp; 6664 u8 phy_idx, port = params->port; 6665 int rc = 0; 6666 6667 vars->link_status |= (LINK_STATUS_LINK_UP | 6668 LINK_STATUS_PHYSICAL_LINK_FLAG); 6669 vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG; 6670 6671 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) 6672 vars->link_status |= 6673 LINK_STATUS_TX_FLOW_CONTROL_ENABLED; 6674 6675 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) 6676 vars->link_status |= 6677 LINK_STATUS_RX_FLOW_CONTROL_ENABLED; 6678 if (USES_WARPCORE(bp)) { 6679 if (link_10g) { 6680 if (bnx2x_xmac_enable(params, vars, 0) == 6681 -ESRCH) { 6682 DP(NETIF_MSG_LINK, "Found errors on XMAC\n"); 6683 vars->link_up = 0; 6684 vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; 6685 vars->link_status &= ~LINK_STATUS_LINK_UP; 6686 } 6687 } else 6688 bnx2x_umac_enable(params, vars, 0); 6689 bnx2x_set_led(params, vars, 6690 LED_MODE_OPER, vars->line_speed); 6691 6692 if ((vars->eee_status & SHMEM_EEE_ACTIVE_BIT) && 6693 (vars->eee_status & SHMEM_EEE_LPI_REQUESTED_BIT)) { 6694 DP(NETIF_MSG_LINK, "Enabling LPI assertion\n"); 6695 REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + 6696 (params->port << 2), 1); 6697 REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 1); 6698 REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + 6699 (params->port << 2), 0xfc20); 6700 } 6701 } 6702 if ((CHIP_IS_E1x(bp) || 6703 CHIP_IS_E2(bp))) { 6704 if (link_10g) { 6705 if (bnx2x_bmac_enable(params, vars, 0, 1) == 6706 -ESRCH) { 6707 DP(NETIF_MSG_LINK, "Found errors on BMAC\n"); 6708 vars->link_up = 0; 6709 vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; 6710 vars->link_status &= ~LINK_STATUS_LINK_UP; 6711 } 6712 6713 bnx2x_set_led(params, vars, 6714 LED_MODE_OPER, SPEED_10000); 6715 } else { 6716 rc = bnx2x_emac_program(params, vars); 6717 bnx2x_emac_enable(params, vars, 0); 6718 6719 /* AN complete? */ 6720 if ((vars->link_status & 6721 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) 6722 && (!(vars->phy_flags & PHY_SGMII_FLAG)) && 6723 SINGLE_MEDIA_DIRECT(params)) 6724 bnx2x_set_gmii_tx_driver(params); 6725 } 6726 } 6727 6728 /* PBF - link up */ 6729 if (CHIP_IS_E1x(bp)) 6730 rc |= bnx2x_pbf_update(params, vars->flow_ctrl, 6731 vars->line_speed); 6732 6733 /* Disable drain */ 6734 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0); 6735 6736 /* Update shared memory */ 6737 bnx2x_update_mng(params, vars->link_status); 6738 bnx2x_update_mng_eee(params, vars->eee_status); 6739 /* Check remote fault */ 6740 for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { 6741 if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) { 6742 bnx2x_check_half_open_conn(params, vars, 0); 6743 break; 6744 } 6745 } 6746 msleep(20); 6747 return rc; 6748 } 6749 /* The bnx2x_link_update function should be called upon link 6750 * interrupt. 6751 * Link is considered up as follows: 6752 * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs 6753 * to be up 6754 * - SINGLE_MEDIA - The link between the 577xx and the external 6755 * phy (XGXS) need to up as well as the external link of the 6756 * phy (PHY_EXT1) 6757 * - DUAL_MEDIA - The link between the 577xx and the first 6758 * external phy needs to be up, and at least one of the 2 6759 * external phy link must be up. 6760 */ 6761 int bnx2x_link_update(struct link_params *params, struct link_vars *vars) 6762 { 6763 struct bnx2x *bp = params->bp; 6764 struct link_vars phy_vars[MAX_PHYS]; 6765 u8 port = params->port; 6766 u8 link_10g_plus, phy_index; 6767 u8 ext_phy_link_up = 0, cur_link_up; 6768 int rc = 0; 6769 u8 is_mi_int = 0; 6770 u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed; 6771 u8 active_external_phy = INT_PHY; 6772 vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; 6773 vars->link_status &= ~LINK_UPDATE_MASK; 6774 for (phy_index = INT_PHY; phy_index < params->num_phys; 6775 phy_index++) { 6776 phy_vars[phy_index].flow_ctrl = 0; 6777 phy_vars[phy_index].link_status = 0; 6778 phy_vars[phy_index].line_speed = 0; 6779 phy_vars[phy_index].duplex = DUPLEX_FULL; 6780 phy_vars[phy_index].phy_link_up = 0; 6781 phy_vars[phy_index].link_up = 0; 6782 phy_vars[phy_index].fault_detected = 0; 6783 /* different consideration, since vars holds inner state */ 6784 phy_vars[phy_index].eee_status = vars->eee_status; 6785 } 6786 6787 if (USES_WARPCORE(bp)) 6788 bnx2x_set_aer_mmd(params, ¶ms->phy[INT_PHY]); 6789 6790 DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n", 6791 port, (vars->phy_flags & PHY_XGXS_FLAG), 6792 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); 6793 6794 is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + 6795 port*0x18) > 0); 6796 DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n", 6797 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), 6798 is_mi_int, 6799 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c)); 6800 6801 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n", 6802 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), 6803 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); 6804 6805 /* Disable emac */ 6806 if (!CHIP_IS_E3(bp)) 6807 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); 6808 6809 /* Step 1: 6810 * Check external link change only for external phys, and apply 6811 * priority selection between them in case the link on both phys 6812 * is up. Note that instead of the common vars, a temporary 6813 * vars argument is used since each phy may have different link/ 6814 * speed/duplex result 6815 */ 6816 for (phy_index = EXT_PHY1; phy_index < params->num_phys; 6817 phy_index++) { 6818 struct bnx2x_phy *phy = ¶ms->phy[phy_index]; 6819 if (!phy->read_status) 6820 continue; 6821 /* Read link status and params of this ext phy */ 6822 cur_link_up = phy->read_status(phy, params, 6823 &phy_vars[phy_index]); 6824 if (cur_link_up) { 6825 DP(NETIF_MSG_LINK, "phy in index %d link is up\n", 6826 phy_index); 6827 } else { 6828 DP(NETIF_MSG_LINK, "phy in index %d link is down\n", 6829 phy_index); 6830 continue; 6831 } 6832 6833 if (!ext_phy_link_up) { 6834 ext_phy_link_up = 1; 6835 active_external_phy = phy_index; 6836 } else { 6837 switch (bnx2x_phy_selection(params)) { 6838 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 6839 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 6840 /* In this option, the first PHY makes sure to pass the 6841 * traffic through itself only. 6842 * Its not clear how to reset the link on the second phy 6843 */ 6844 active_external_phy = EXT_PHY1; 6845 break; 6846 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 6847 /* In this option, the first PHY makes sure to pass the 6848 * traffic through the second PHY. 6849 */ 6850 active_external_phy = EXT_PHY2; 6851 break; 6852 default: 6853 /* Link indication on both PHYs with the following cases 6854 * is invalid: 6855 * - FIRST_PHY means that second phy wasn't initialized, 6856 * hence its link is expected to be down 6857 * - SECOND_PHY means that first phy should not be able 6858 * to link up by itself (using configuration) 6859 * - DEFAULT should be overriden during initialiazation 6860 */ 6861 DP(NETIF_MSG_LINK, "Invalid link indication" 6862 "mpc=0x%x. DISABLING LINK !!!\n", 6863 params->multi_phy_config); 6864 ext_phy_link_up = 0; 6865 break; 6866 } 6867 } 6868 } 6869 prev_line_speed = vars->line_speed; 6870 /* Step 2: 6871 * Read the status of the internal phy. In case of 6872 * DIRECT_SINGLE_MEDIA board, this link is the external link, 6873 * otherwise this is the link between the 577xx and the first 6874 * external phy 6875 */ 6876 if (params->phy[INT_PHY].read_status) 6877 params->phy[INT_PHY].read_status( 6878 ¶ms->phy[INT_PHY], 6879 params, vars); 6880 /* The INT_PHY flow control reside in the vars. This include the 6881 * case where the speed or flow control are not set to AUTO. 6882 * Otherwise, the active external phy flow control result is set 6883 * to the vars. The ext_phy_line_speed is needed to check if the 6884 * speed is different between the internal phy and external phy. 6885 * This case may be result of intermediate link speed change. 6886 */ 6887 if (active_external_phy > INT_PHY) { 6888 vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl; 6889 /* Link speed is taken from the XGXS. AN and FC result from 6890 * the external phy. 6891 */ 6892 vars->link_status |= phy_vars[active_external_phy].link_status; 6893 6894 /* if active_external_phy is first PHY and link is up - disable 6895 * disable TX on second external PHY 6896 */ 6897 if (active_external_phy == EXT_PHY1) { 6898 if (params->phy[EXT_PHY2].phy_specific_func) { 6899 DP(NETIF_MSG_LINK, 6900 "Disabling TX on EXT_PHY2\n"); 6901 params->phy[EXT_PHY2].phy_specific_func( 6902 ¶ms->phy[EXT_PHY2], 6903 params, DISABLE_TX); 6904 } 6905 } 6906 6907 ext_phy_line_speed = phy_vars[active_external_phy].line_speed; 6908 vars->duplex = phy_vars[active_external_phy].duplex; 6909 if (params->phy[active_external_phy].supported & 6910 SUPPORTED_FIBRE) 6911 vars->link_status |= LINK_STATUS_SERDES_LINK; 6912 else 6913 vars->link_status &= ~LINK_STATUS_SERDES_LINK; 6914 6915 vars->eee_status = phy_vars[active_external_phy].eee_status; 6916 6917 DP(NETIF_MSG_LINK, "Active external phy selected: %x\n", 6918 active_external_phy); 6919 } 6920 6921 for (phy_index = EXT_PHY1; phy_index < params->num_phys; 6922 phy_index++) { 6923 if (params->phy[phy_index].flags & 6924 FLAGS_REARM_LATCH_SIGNAL) { 6925 bnx2x_rearm_latch_signal(bp, port, 6926 phy_index == 6927 active_external_phy); 6928 break; 6929 } 6930 } 6931 DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x," 6932 " ext_phy_line_speed = %d\n", vars->flow_ctrl, 6933 vars->link_status, ext_phy_line_speed); 6934 /* Upon link speed change set the NIG into drain mode. Comes to 6935 * deals with possible FIFO glitch due to clk change when speed 6936 * is decreased without link down indicator 6937 */ 6938 6939 if (vars->phy_link_up) { 6940 if (!(SINGLE_MEDIA_DIRECT(params)) && ext_phy_link_up && 6941 (ext_phy_line_speed != vars->line_speed)) { 6942 DP(NETIF_MSG_LINK, "Internal link speed %d is" 6943 " different than the external" 6944 " link speed %d\n", vars->line_speed, 6945 ext_phy_line_speed); 6946 vars->phy_link_up = 0; 6947 } else if (prev_line_speed != vars->line_speed) { 6948 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 6949 0); 6950 usleep_range(1000, 2000); 6951 } 6952 } 6953 6954 /* Anything 10 and over uses the bmac */ 6955 link_10g_plus = (vars->line_speed >= SPEED_10000); 6956 6957 bnx2x_link_int_ack(params, vars, link_10g_plus); 6958 6959 /* In case external phy link is up, and internal link is down 6960 * (not initialized yet probably after link initialization, it 6961 * needs to be initialized. 6962 * Note that after link down-up as result of cable plug, the xgxs 6963 * link would probably become up again without the need 6964 * initialize it 6965 */ 6966 if (!(SINGLE_MEDIA_DIRECT(params))) { 6967 DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d," 6968 " init_preceding = %d\n", ext_phy_link_up, 6969 vars->phy_link_up, 6970 params->phy[EXT_PHY1].flags & 6971 FLAGS_INIT_XGXS_FIRST); 6972 if (!(params->phy[EXT_PHY1].flags & 6973 FLAGS_INIT_XGXS_FIRST) 6974 && ext_phy_link_up && !vars->phy_link_up) { 6975 vars->line_speed = ext_phy_line_speed; 6976 if (vars->line_speed < SPEED_1000) 6977 vars->phy_flags |= PHY_SGMII_FLAG; 6978 else 6979 vars->phy_flags &= ~PHY_SGMII_FLAG; 6980 6981 if (params->phy[INT_PHY].config_init) 6982 params->phy[INT_PHY].config_init( 6983 ¶ms->phy[INT_PHY], params, 6984 vars); 6985 } 6986 } 6987 /* Link is up only if both local phy and external phy (in case of 6988 * non-direct board) are up and no fault detected on active PHY. 6989 */ 6990 vars->link_up = (vars->phy_link_up && 6991 (ext_phy_link_up || 6992 SINGLE_MEDIA_DIRECT(params)) && 6993 (phy_vars[active_external_phy].fault_detected == 0)); 6994 6995 /* Update the PFC configuration in case it was changed */ 6996 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) 6997 vars->link_status |= LINK_STATUS_PFC_ENABLED; 6998 else 6999 vars->link_status &= ~LINK_STATUS_PFC_ENABLED; 7000 7001 if (vars->link_up) 7002 rc = bnx2x_update_link_up(params, vars, link_10g_plus); 7003 else 7004 rc = bnx2x_update_link_down(params, vars); 7005 7006 /* Update MCP link status was changed */ 7007 if (params->feature_config_flags & FEATURE_CONFIG_BC_SUPPORTS_AFEX) 7008 bnx2x_fw_command(bp, DRV_MSG_CODE_LINK_STATUS_CHANGED, 0); 7009 7010 return rc; 7011 } 7012 7013 /*****************************************************************************/ 7014 /* External Phy section */ 7015 /*****************************************************************************/ 7016 void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port) 7017 { 7018 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 7019 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 7020 usleep_range(1000, 2000); 7021 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 7022 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); 7023 } 7024 7025 static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port, 7026 u32 spirom_ver, u32 ver_addr) 7027 { 7028 DP(NETIF_MSG_LINK, "FW version 0x%x:0x%x for port %d\n", 7029 (u16)(spirom_ver>>16), (u16)spirom_ver, port); 7030 7031 if (ver_addr) 7032 REG_WR(bp, ver_addr, spirom_ver); 7033 } 7034 7035 static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp, 7036 struct bnx2x_phy *phy, 7037 u8 port) 7038 { 7039 u16 fw_ver1, fw_ver2; 7040 7041 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 7042 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 7043 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 7044 MDIO_PMA_REG_ROM_VER2, &fw_ver2); 7045 bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2), 7046 phy->ver_addr); 7047 } 7048 7049 static void bnx2x_ext_phy_10G_an_resolve(struct bnx2x *bp, 7050 struct bnx2x_phy *phy, 7051 struct link_vars *vars) 7052 { 7053 u16 val; 7054 bnx2x_cl45_read(bp, phy, 7055 MDIO_AN_DEVAD, 7056 MDIO_AN_REG_STATUS, &val); 7057 bnx2x_cl45_read(bp, phy, 7058 MDIO_AN_DEVAD, 7059 MDIO_AN_REG_STATUS, &val); 7060 if (val & (1<<5)) 7061 vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; 7062 if ((val & (1<<0)) == 0) 7063 vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED; 7064 } 7065 7066 /******************************************************************/ 7067 /* common BCM8073/BCM8727 PHY SECTION */ 7068 /******************************************************************/ 7069 static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy, 7070 struct link_params *params, 7071 struct link_vars *vars) 7072 { 7073 struct bnx2x *bp = params->bp; 7074 if (phy->req_line_speed == SPEED_10 || 7075 phy->req_line_speed == SPEED_100) { 7076 vars->flow_ctrl = phy->req_flow_ctrl; 7077 return; 7078 } 7079 7080 if (bnx2x_ext_phy_resolve_fc(phy, params, vars) && 7081 (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE)) { 7082 u16 pause_result; 7083 u16 ld_pause; /* local */ 7084 u16 lp_pause; /* link partner */ 7085 bnx2x_cl45_read(bp, phy, 7086 MDIO_AN_DEVAD, 7087 MDIO_AN_REG_CL37_FC_LD, &ld_pause); 7088 7089 bnx2x_cl45_read(bp, phy, 7090 MDIO_AN_DEVAD, 7091 MDIO_AN_REG_CL37_FC_LP, &lp_pause); 7092 pause_result = (ld_pause & 7093 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5; 7094 pause_result |= (lp_pause & 7095 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7; 7096 7097 bnx2x_pause_resolve(vars, pause_result); 7098 DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n", 7099 pause_result); 7100 } 7101 } 7102 static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, 7103 struct bnx2x_phy *phy, 7104 u8 port) 7105 { 7106 u32 count = 0; 7107 u16 fw_ver1, fw_msgout; 7108 int rc = 0; 7109 7110 /* Boot port from external ROM */ 7111 /* EDC grst */ 7112 bnx2x_cl45_write(bp, phy, 7113 MDIO_PMA_DEVAD, 7114 MDIO_PMA_REG_GEN_CTRL, 7115 0x0001); 7116 7117 /* Ucode reboot and rst */ 7118 bnx2x_cl45_write(bp, phy, 7119 MDIO_PMA_DEVAD, 7120 MDIO_PMA_REG_GEN_CTRL, 7121 0x008c); 7122 7123 bnx2x_cl45_write(bp, phy, 7124 MDIO_PMA_DEVAD, 7125 MDIO_PMA_REG_MISC_CTRL1, 0x0001); 7126 7127 /* Reset internal microprocessor */ 7128 bnx2x_cl45_write(bp, phy, 7129 MDIO_PMA_DEVAD, 7130 MDIO_PMA_REG_GEN_CTRL, 7131 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); 7132 7133 /* Release srst bit */ 7134 bnx2x_cl45_write(bp, phy, 7135 MDIO_PMA_DEVAD, 7136 MDIO_PMA_REG_GEN_CTRL, 7137 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 7138 7139 /* Delay 100ms per the PHY specifications */ 7140 msleep(100); 7141 7142 /* 8073 sometimes taking longer to download */ 7143 do { 7144 count++; 7145 if (count > 300) { 7146 DP(NETIF_MSG_LINK, 7147 "bnx2x_8073_8727_external_rom_boot port %x:" 7148 "Download failed. fw version = 0x%x\n", 7149 port, fw_ver1); 7150 rc = -EINVAL; 7151 break; 7152 } 7153 7154 bnx2x_cl45_read(bp, phy, 7155 MDIO_PMA_DEVAD, 7156 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 7157 bnx2x_cl45_read(bp, phy, 7158 MDIO_PMA_DEVAD, 7159 MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout); 7160 7161 usleep_range(1000, 2000); 7162 } while (fw_ver1 == 0 || fw_ver1 == 0x4321 || 7163 ((fw_msgout & 0xff) != 0x03 && (phy->type == 7164 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))); 7165 7166 /* Clear ser_boot_ctl bit */ 7167 bnx2x_cl45_write(bp, phy, 7168 MDIO_PMA_DEVAD, 7169 MDIO_PMA_REG_MISC_CTRL1, 0x0000); 7170 bnx2x_save_bcm_spirom_ver(bp, phy, port); 7171 7172 DP(NETIF_MSG_LINK, 7173 "bnx2x_8073_8727_external_rom_boot port %x:" 7174 "Download complete. fw version = 0x%x\n", 7175 port, fw_ver1); 7176 7177 return rc; 7178 } 7179 7180 /******************************************************************/ 7181 /* BCM8073 PHY SECTION */ 7182 /******************************************************************/ 7183 static int bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy) 7184 { 7185 /* This is only required for 8073A1, version 102 only */ 7186 u16 val; 7187 7188 /* Read 8073 HW revision*/ 7189 bnx2x_cl45_read(bp, phy, 7190 MDIO_PMA_DEVAD, 7191 MDIO_PMA_REG_8073_CHIP_REV, &val); 7192 7193 if (val != 1) { 7194 /* No need to workaround in 8073 A1 */ 7195 return 0; 7196 } 7197 7198 bnx2x_cl45_read(bp, phy, 7199 MDIO_PMA_DEVAD, 7200 MDIO_PMA_REG_ROM_VER2, &val); 7201 7202 /* SNR should be applied only for version 0x102 */ 7203 if (val != 0x102) 7204 return 0; 7205 7206 return 1; 7207 } 7208 7209 static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy) 7210 { 7211 u16 val, cnt, cnt1 ; 7212 7213 bnx2x_cl45_read(bp, phy, 7214 MDIO_PMA_DEVAD, 7215 MDIO_PMA_REG_8073_CHIP_REV, &val); 7216 7217 if (val > 0) { 7218 /* No need to workaround in 8073 A1 */ 7219 return 0; 7220 } 7221 /* XAUI workaround in 8073 A0: */ 7222 7223 /* After loading the boot ROM and restarting Autoneg, poll 7224 * Dev1, Reg $C820: 7225 */ 7226 7227 for (cnt = 0; cnt < 1000; cnt++) { 7228 bnx2x_cl45_read(bp, phy, 7229 MDIO_PMA_DEVAD, 7230 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, 7231 &val); 7232 /* If bit [14] = 0 or bit [13] = 0, continue on with 7233 * system initialization (XAUI work-around not required, as 7234 * these bits indicate 2.5G or 1G link up). 7235 */ 7236 if (!(val & (1<<14)) || !(val & (1<<13))) { 7237 DP(NETIF_MSG_LINK, "XAUI work-around not required\n"); 7238 return 0; 7239 } else if (!(val & (1<<15))) { 7240 DP(NETIF_MSG_LINK, "bit 15 went off\n"); 7241 /* If bit 15 is 0, then poll Dev1, Reg $C841 until it's 7242 * MSB (bit15) goes to 1 (indicating that the XAUI 7243 * workaround has completed), then continue on with 7244 * system initialization. 7245 */ 7246 for (cnt1 = 0; cnt1 < 1000; cnt1++) { 7247 bnx2x_cl45_read(bp, phy, 7248 MDIO_PMA_DEVAD, 7249 MDIO_PMA_REG_8073_XAUI_WA, &val); 7250 if (val & (1<<15)) { 7251 DP(NETIF_MSG_LINK, 7252 "XAUI workaround has completed\n"); 7253 return 0; 7254 } 7255 usleep_range(3000, 6000); 7256 } 7257 break; 7258 } 7259 usleep_range(3000, 6000); 7260 } 7261 DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n"); 7262 return -EINVAL; 7263 } 7264 7265 static void bnx2x_807x_force_10G(struct bnx2x *bp, struct bnx2x_phy *phy) 7266 { 7267 /* Force KR or KX */ 7268 bnx2x_cl45_write(bp, phy, 7269 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040); 7270 bnx2x_cl45_write(bp, phy, 7271 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0x000b); 7272 bnx2x_cl45_write(bp, phy, 7273 MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0000); 7274 bnx2x_cl45_write(bp, phy, 7275 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000); 7276 } 7277 7278 static void bnx2x_8073_set_pause_cl37(struct link_params *params, 7279 struct bnx2x_phy *phy, 7280 struct link_vars *vars) 7281 { 7282 u16 cl37_val; 7283 struct bnx2x *bp = params->bp; 7284 bnx2x_cl45_read(bp, phy, 7285 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &cl37_val); 7286 7287 cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 7288 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ 7289 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); 7290 if ((vars->ieee_fc & 7291 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) == 7292 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) { 7293 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC; 7294 } 7295 if ((vars->ieee_fc & 7296 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == 7297 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { 7298 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; 7299 } 7300 if ((vars->ieee_fc & 7301 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == 7302 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { 7303 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 7304 } 7305 DP(NETIF_MSG_LINK, 7306 "Ext phy AN advertize cl37 0x%x\n", cl37_val); 7307 7308 bnx2x_cl45_write(bp, phy, 7309 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, cl37_val); 7310 msleep(500); 7311 } 7312 7313 static void bnx2x_8073_specific_func(struct bnx2x_phy *phy, 7314 struct link_params *params, 7315 u32 action) 7316 { 7317 struct bnx2x *bp = params->bp; 7318 switch (action) { 7319 case PHY_INIT: 7320 /* Enable LASI */ 7321 bnx2x_cl45_write(bp, phy, 7322 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2)); 7323 bnx2x_cl45_write(bp, phy, 7324 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004); 7325 break; 7326 } 7327 } 7328 7329 static int bnx2x_8073_config_init(struct bnx2x_phy *phy, 7330 struct link_params *params, 7331 struct link_vars *vars) 7332 { 7333 struct bnx2x *bp = params->bp; 7334 u16 val = 0, tmp1; 7335 u8 gpio_port; 7336 DP(NETIF_MSG_LINK, "Init 8073\n"); 7337 7338 if (CHIP_IS_E2(bp)) 7339 gpio_port = BP_PATH(bp); 7340 else 7341 gpio_port = params->port; 7342 /* Restore normal power mode*/ 7343 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 7344 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); 7345 7346 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 7347 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); 7348 7349 bnx2x_8073_specific_func(phy, params, PHY_INIT); 7350 bnx2x_8073_set_pause_cl37(params, phy, vars); 7351 7352 bnx2x_cl45_read(bp, phy, 7353 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); 7354 7355 bnx2x_cl45_read(bp, phy, 7356 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); 7357 7358 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); 7359 7360 /* Swap polarity if required - Must be done only in non-1G mode */ 7361 if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) { 7362 /* Configure the 8073 to swap _P and _N of the KR lines */ 7363 DP(NETIF_MSG_LINK, "Swapping polarity for the 8073\n"); 7364 /* 10G Rx/Tx and 1G Tx signal polarity swap */ 7365 bnx2x_cl45_read(bp, phy, 7366 MDIO_PMA_DEVAD, 7367 MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val); 7368 bnx2x_cl45_write(bp, phy, 7369 MDIO_PMA_DEVAD, 7370 MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, 7371 (val | (3<<9))); 7372 } 7373 7374 7375 /* Enable CL37 BAM */ 7376 if (REG_RD(bp, params->shmem_base + 7377 offsetof(struct shmem_region, dev_info. 7378 port_hw_config[params->port].default_cfg)) & 7379 PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) { 7380 7381 bnx2x_cl45_read(bp, phy, 7382 MDIO_AN_DEVAD, 7383 MDIO_AN_REG_8073_BAM, &val); 7384 bnx2x_cl45_write(bp, phy, 7385 MDIO_AN_DEVAD, 7386 MDIO_AN_REG_8073_BAM, val | 1); 7387 DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n"); 7388 } 7389 if (params->loopback_mode == LOOPBACK_EXT) { 7390 bnx2x_807x_force_10G(bp, phy); 7391 DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n"); 7392 return 0; 7393 } else { 7394 bnx2x_cl45_write(bp, phy, 7395 MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002); 7396 } 7397 if (phy->req_line_speed != SPEED_AUTO_NEG) { 7398 if (phy->req_line_speed == SPEED_10000) { 7399 val = (1<<7); 7400 } else if (phy->req_line_speed == SPEED_2500) { 7401 val = (1<<5); 7402 /* Note that 2.5G works only when used with 1G 7403 * advertisement 7404 */ 7405 } else 7406 val = (1<<5); 7407 } else { 7408 val = 0; 7409 if (phy->speed_cap_mask & 7410 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 7411 val |= (1<<7); 7412 7413 /* Note that 2.5G works only when used with 1G advertisement */ 7414 if (phy->speed_cap_mask & 7415 (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G | 7416 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) 7417 val |= (1<<5); 7418 DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val); 7419 } 7420 7421 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val); 7422 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, &tmp1); 7423 7424 if (((phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) && 7425 (phy->req_line_speed == SPEED_AUTO_NEG)) || 7426 (phy->req_line_speed == SPEED_2500)) { 7427 u16 phy_ver; 7428 /* Allow 2.5G for A1 and above */ 7429 bnx2x_cl45_read(bp, phy, 7430 MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, 7431 &phy_ver); 7432 DP(NETIF_MSG_LINK, "Add 2.5G\n"); 7433 if (phy_ver > 0) 7434 tmp1 |= 1; 7435 else 7436 tmp1 &= 0xfffe; 7437 } else { 7438 DP(NETIF_MSG_LINK, "Disable 2.5G\n"); 7439 tmp1 &= 0xfffe; 7440 } 7441 7442 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, tmp1); 7443 /* Add support for CL37 (passive mode) II */ 7444 7445 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &tmp1); 7446 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, 7447 (tmp1 | ((phy->req_duplex == DUPLEX_FULL) ? 7448 0x20 : 0x40))); 7449 7450 /* Add support for CL37 (passive mode) III */ 7451 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); 7452 7453 /* The SNR will improve about 2db by changing BW and FEE main 7454 * tap. Rest commands are executed after link is up 7455 * Change FFE main cursor to 5 in EDC register 7456 */ 7457 if (bnx2x_8073_is_snr_needed(bp, phy)) 7458 bnx2x_cl45_write(bp, phy, 7459 MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN, 7460 0xFB0C); 7461 7462 /* Enable FEC (Forware Error Correction) Request in the AN */ 7463 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, &tmp1); 7464 tmp1 |= (1<<15); 7465 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, tmp1); 7466 7467 bnx2x_ext_phy_set_pause(params, phy, vars); 7468 7469 /* Restart autoneg */ 7470 msleep(500); 7471 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); 7472 DP(NETIF_MSG_LINK, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x\n", 7473 ((val & (1<<5)) > 0), ((val & (1<<7)) > 0)); 7474 return 0; 7475 } 7476 7477 static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy, 7478 struct link_params *params, 7479 struct link_vars *vars) 7480 { 7481 struct bnx2x *bp = params->bp; 7482 u8 link_up = 0; 7483 u16 val1, val2; 7484 u16 link_status = 0; 7485 u16 an1000_status = 0; 7486 7487 bnx2x_cl45_read(bp, phy, 7488 MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); 7489 7490 DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1); 7491 7492 /* Clear the interrupt LASI status register */ 7493 bnx2x_cl45_read(bp, phy, 7494 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2); 7495 bnx2x_cl45_read(bp, phy, 7496 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val1); 7497 DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", val2, val1); 7498 /* Clear MSG-OUT */ 7499 bnx2x_cl45_read(bp, phy, 7500 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1); 7501 7502 /* Check the LASI */ 7503 bnx2x_cl45_read(bp, phy, 7504 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2); 7505 7506 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2); 7507 7508 /* Check the link status */ 7509 bnx2x_cl45_read(bp, phy, 7510 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2); 7511 DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2); 7512 7513 bnx2x_cl45_read(bp, phy, 7514 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2); 7515 bnx2x_cl45_read(bp, phy, 7516 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1); 7517 link_up = ((val1 & 4) == 4); 7518 DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1); 7519 7520 if (link_up && 7521 ((phy->req_line_speed != SPEED_10000))) { 7522 if (bnx2x_8073_xaui_wa(bp, phy) != 0) 7523 return 0; 7524 } 7525 bnx2x_cl45_read(bp, phy, 7526 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status); 7527 bnx2x_cl45_read(bp, phy, 7528 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status); 7529 7530 /* Check the link status on 1.1.2 */ 7531 bnx2x_cl45_read(bp, phy, 7532 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2); 7533 bnx2x_cl45_read(bp, phy, 7534 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1); 7535 DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x," 7536 "an_link_status=0x%x\n", val2, val1, an1000_status); 7537 7538 link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1))); 7539 if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) { 7540 /* The SNR will improve about 2dbby changing the BW and FEE main 7541 * tap. The 1st write to change FFE main tap is set before 7542 * restart AN. Change PLL Bandwidth in EDC register 7543 */ 7544 bnx2x_cl45_write(bp, phy, 7545 MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH, 7546 0x26BC); 7547 7548 /* Change CDR Bandwidth in EDC register */ 7549 bnx2x_cl45_write(bp, phy, 7550 MDIO_PMA_DEVAD, MDIO_PMA_REG_CDR_BANDWIDTH, 7551 0x0333); 7552 } 7553 bnx2x_cl45_read(bp, phy, 7554 MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS, 7555 &link_status); 7556 7557 /* Bits 0..2 --> speed detected, bits 13..15--> link is down */ 7558 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) { 7559 link_up = 1; 7560 vars->line_speed = SPEED_10000; 7561 DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n", 7562 params->port); 7563 } else if ((link_status & (1<<1)) && (!(link_status & (1<<14)))) { 7564 link_up = 1; 7565 vars->line_speed = SPEED_2500; 7566 DP(NETIF_MSG_LINK, "port %x: External link up in 2.5G\n", 7567 params->port); 7568 } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) { 7569 link_up = 1; 7570 vars->line_speed = SPEED_1000; 7571 DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n", 7572 params->port); 7573 } else { 7574 link_up = 0; 7575 DP(NETIF_MSG_LINK, "port %x: External link is down\n", 7576 params->port); 7577 } 7578 7579 if (link_up) { 7580 /* Swap polarity if required */ 7581 if (params->lane_config & 7582 PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) { 7583 /* Configure the 8073 to swap P and N of the KR lines */ 7584 bnx2x_cl45_read(bp, phy, 7585 MDIO_XS_DEVAD, 7586 MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1); 7587 /* Set bit 3 to invert Rx in 1G mode and clear this bit 7588 * when it`s in 10G mode. 7589 */ 7590 if (vars->line_speed == SPEED_1000) { 7591 DP(NETIF_MSG_LINK, "Swapping 1G polarity for" 7592 "the 8073\n"); 7593 val1 |= (1<<3); 7594 } else 7595 val1 &= ~(1<<3); 7596 7597 bnx2x_cl45_write(bp, phy, 7598 MDIO_XS_DEVAD, 7599 MDIO_XS_REG_8073_RX_CTRL_PCIE, 7600 val1); 7601 } 7602 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); 7603 bnx2x_8073_resolve_fc(phy, params, vars); 7604 vars->duplex = DUPLEX_FULL; 7605 } 7606 7607 if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { 7608 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, 7609 MDIO_AN_REG_LP_AUTO_NEG2, &val1); 7610 7611 if (val1 & (1<<5)) 7612 vars->link_status |= 7613 LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; 7614 if (val1 & (1<<7)) 7615 vars->link_status |= 7616 LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; 7617 } 7618 7619 return link_up; 7620 } 7621 7622 static void bnx2x_8073_link_reset(struct bnx2x_phy *phy, 7623 struct link_params *params) 7624 { 7625 struct bnx2x *bp = params->bp; 7626 u8 gpio_port; 7627 if (CHIP_IS_E2(bp)) 7628 gpio_port = BP_PATH(bp); 7629 else 7630 gpio_port = params->port; 7631 DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n", 7632 gpio_port); 7633 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 7634 MISC_REGISTERS_GPIO_OUTPUT_LOW, 7635 gpio_port); 7636 } 7637 7638 /******************************************************************/ 7639 /* BCM8705 PHY SECTION */ 7640 /******************************************************************/ 7641 static int bnx2x_8705_config_init(struct bnx2x_phy *phy, 7642 struct link_params *params, 7643 struct link_vars *vars) 7644 { 7645 struct bnx2x *bp = params->bp; 7646 DP(NETIF_MSG_LINK, "init 8705\n"); 7647 /* Restore normal power mode*/ 7648 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 7649 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); 7650 /* HW reset */ 7651 bnx2x_ext_phy_hw_reset(bp, params->port); 7652 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); 7653 bnx2x_wait_reset_complete(bp, phy, params); 7654 7655 bnx2x_cl45_write(bp, phy, 7656 MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288); 7657 bnx2x_cl45_write(bp, phy, 7658 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, 0x7fbf); 7659 bnx2x_cl45_write(bp, phy, 7660 MDIO_PMA_DEVAD, MDIO_PMA_REG_CMU_PLL_BYPASS, 0x0100); 7661 bnx2x_cl45_write(bp, phy, 7662 MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1); 7663 /* BCM8705 doesn't have microcode, hence the 0 */ 7664 bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0); 7665 return 0; 7666 } 7667 7668 static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy, 7669 struct link_params *params, 7670 struct link_vars *vars) 7671 { 7672 u8 link_up = 0; 7673 u16 val1, rx_sd; 7674 struct bnx2x *bp = params->bp; 7675 DP(NETIF_MSG_LINK, "read status 8705\n"); 7676 bnx2x_cl45_read(bp, phy, 7677 MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1); 7678 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1); 7679 7680 bnx2x_cl45_read(bp, phy, 7681 MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1); 7682 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1); 7683 7684 bnx2x_cl45_read(bp, phy, 7685 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd); 7686 7687 bnx2x_cl45_read(bp, phy, 7688 MDIO_PMA_DEVAD, 0xc809, &val1); 7689 bnx2x_cl45_read(bp, phy, 7690 MDIO_PMA_DEVAD, 0xc809, &val1); 7691 7692 DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1); 7693 link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) && ((val1 & (1<<8)) == 0)); 7694 if (link_up) { 7695 vars->line_speed = SPEED_10000; 7696 bnx2x_ext_phy_resolve_fc(phy, params, vars); 7697 } 7698 return link_up; 7699 } 7700 7701 /******************************************************************/ 7702 /* SFP+ module Section */ 7703 /******************************************************************/ 7704 static void bnx2x_set_disable_pmd_transmit(struct link_params *params, 7705 struct bnx2x_phy *phy, 7706 u8 pmd_dis) 7707 { 7708 struct bnx2x *bp = params->bp; 7709 /* Disable transmitter only for bootcodes which can enable it afterwards 7710 * (for D3 link) 7711 */ 7712 if (pmd_dis) { 7713 if (params->feature_config_flags & 7714 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED) 7715 DP(NETIF_MSG_LINK, "Disabling PMD transmitter\n"); 7716 else { 7717 DP(NETIF_MSG_LINK, "NOT disabling PMD transmitter\n"); 7718 return; 7719 } 7720 } else 7721 DP(NETIF_MSG_LINK, "Enabling PMD transmitter\n"); 7722 bnx2x_cl45_write(bp, phy, 7723 MDIO_PMA_DEVAD, 7724 MDIO_PMA_REG_TX_DISABLE, pmd_dis); 7725 } 7726 7727 static u8 bnx2x_get_gpio_port(struct link_params *params) 7728 { 7729 u8 gpio_port; 7730 u32 swap_val, swap_override; 7731 struct bnx2x *bp = params->bp; 7732 if (CHIP_IS_E2(bp)) 7733 gpio_port = BP_PATH(bp); 7734 else 7735 gpio_port = params->port; 7736 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); 7737 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); 7738 return gpio_port ^ (swap_val && swap_override); 7739 } 7740 7741 static void bnx2x_sfp_e1e2_set_transmitter(struct link_params *params, 7742 struct bnx2x_phy *phy, 7743 u8 tx_en) 7744 { 7745 u16 val; 7746 u8 port = params->port; 7747 struct bnx2x *bp = params->bp; 7748 u32 tx_en_mode; 7749 7750 /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/ 7751 tx_en_mode = REG_RD(bp, params->shmem_base + 7752 offsetof(struct shmem_region, 7753 dev_info.port_hw_config[port].sfp_ctrl)) & 7754 PORT_HW_CFG_TX_LASER_MASK; 7755 DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x " 7756 "mode = %x\n", tx_en, port, tx_en_mode); 7757 switch (tx_en_mode) { 7758 case PORT_HW_CFG_TX_LASER_MDIO: 7759 7760 bnx2x_cl45_read(bp, phy, 7761 MDIO_PMA_DEVAD, 7762 MDIO_PMA_REG_PHY_IDENTIFIER, 7763 &val); 7764 7765 if (tx_en) 7766 val &= ~(1<<15); 7767 else 7768 val |= (1<<15); 7769 7770 bnx2x_cl45_write(bp, phy, 7771 MDIO_PMA_DEVAD, 7772 MDIO_PMA_REG_PHY_IDENTIFIER, 7773 val); 7774 break; 7775 case PORT_HW_CFG_TX_LASER_GPIO0: 7776 case PORT_HW_CFG_TX_LASER_GPIO1: 7777 case PORT_HW_CFG_TX_LASER_GPIO2: 7778 case PORT_HW_CFG_TX_LASER_GPIO3: 7779 { 7780 u16 gpio_pin; 7781 u8 gpio_port, gpio_mode; 7782 if (tx_en) 7783 gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH; 7784 else 7785 gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW; 7786 7787 gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0; 7788 gpio_port = bnx2x_get_gpio_port(params); 7789 bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port); 7790 break; 7791 } 7792 default: 7793 DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode); 7794 break; 7795 } 7796 } 7797 7798 static void bnx2x_sfp_set_transmitter(struct link_params *params, 7799 struct bnx2x_phy *phy, 7800 u8 tx_en) 7801 { 7802 struct bnx2x *bp = params->bp; 7803 DP(NETIF_MSG_LINK, "Setting SFP+ transmitter to %d\n", tx_en); 7804 if (CHIP_IS_E3(bp)) 7805 bnx2x_sfp_e3_set_transmitter(params, phy, tx_en); 7806 else 7807 bnx2x_sfp_e1e2_set_transmitter(params, phy, tx_en); 7808 } 7809 7810 static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, 7811 struct link_params *params, 7812 u8 dev_addr, u16 addr, u8 byte_cnt, 7813 u8 *o_buf, u8 is_init) 7814 { 7815 struct bnx2x *bp = params->bp; 7816 u16 val = 0; 7817 u16 i; 7818 if (byte_cnt > SFP_EEPROM_PAGE_SIZE) { 7819 DP(NETIF_MSG_LINK, 7820 "Reading from eeprom is limited to 0xf\n"); 7821 return -EINVAL; 7822 } 7823 /* Set the read command byte count */ 7824 bnx2x_cl45_write(bp, phy, 7825 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, 7826 (byte_cnt | (dev_addr << 8))); 7827 7828 /* Set the read command address */ 7829 bnx2x_cl45_write(bp, phy, 7830 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, 7831 addr); 7832 7833 /* Activate read command */ 7834 bnx2x_cl45_write(bp, phy, 7835 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 7836 0x2c0f); 7837 7838 /* Wait up to 500us for command complete status */ 7839 for (i = 0; i < 100; i++) { 7840 bnx2x_cl45_read(bp, phy, 7841 MDIO_PMA_DEVAD, 7842 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 7843 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 7844 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) 7845 break; 7846 udelay(5); 7847 } 7848 7849 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) != 7850 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) { 7851 DP(NETIF_MSG_LINK, 7852 "Got bad status 0x%x when reading from SFP+ EEPROM\n", 7853 (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK)); 7854 return -EINVAL; 7855 } 7856 7857 /* Read the buffer */ 7858 for (i = 0; i < byte_cnt; i++) { 7859 bnx2x_cl45_read(bp, phy, 7860 MDIO_PMA_DEVAD, 7861 MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val); 7862 o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK); 7863 } 7864 7865 for (i = 0; i < 100; i++) { 7866 bnx2x_cl45_read(bp, phy, 7867 MDIO_PMA_DEVAD, 7868 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 7869 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 7870 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 7871 return 0; 7872 usleep_range(1000, 2000); 7873 } 7874 return -EINVAL; 7875 } 7876 7877 static void bnx2x_warpcore_power_module(struct link_params *params, 7878 u8 power) 7879 { 7880 u32 pin_cfg; 7881 struct bnx2x *bp = params->bp; 7882 7883 pin_cfg = (REG_RD(bp, params->shmem_base + 7884 offsetof(struct shmem_region, 7885 dev_info.port_hw_config[params->port].e3_sfp_ctrl)) & 7886 PORT_HW_CFG_E3_PWR_DIS_MASK) >> 7887 PORT_HW_CFG_E3_PWR_DIS_SHIFT; 7888 7889 if (pin_cfg == PIN_CFG_NA) 7890 return; 7891 DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n", 7892 power, pin_cfg); 7893 /* Low ==> corresponding SFP+ module is powered 7894 * high ==> the SFP+ module is powered down 7895 */ 7896 bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1); 7897 } 7898 static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, 7899 struct link_params *params, 7900 u8 dev_addr, 7901 u16 addr, u8 byte_cnt, 7902 u8 *o_buf, u8 is_init) 7903 { 7904 int rc = 0; 7905 u8 i, j = 0, cnt = 0; 7906 u32 data_array[4]; 7907 u16 addr32; 7908 struct bnx2x *bp = params->bp; 7909 7910 if (byte_cnt > SFP_EEPROM_PAGE_SIZE) { 7911 DP(NETIF_MSG_LINK, 7912 "Reading from eeprom is limited to 16 bytes\n"); 7913 return -EINVAL; 7914 } 7915 7916 /* 4 byte aligned address */ 7917 addr32 = addr & (~0x3); 7918 do { 7919 if ((!is_init) && (cnt == I2C_WA_PWR_ITER)) { 7920 bnx2x_warpcore_power_module(params, 0); 7921 /* Note that 100us are not enough here */ 7922 usleep_range(1000, 2000); 7923 bnx2x_warpcore_power_module(params, 1); 7924 } 7925 rc = bnx2x_bsc_read(params, bp, dev_addr, addr32, 0, byte_cnt, 7926 data_array); 7927 } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT)); 7928 7929 if (rc == 0) { 7930 for (i = (addr - addr32); i < byte_cnt + (addr - addr32); i++) { 7931 o_buf[j] = *((u8 *)data_array + i); 7932 j++; 7933 } 7934 } 7935 7936 return rc; 7937 } 7938 7939 static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, 7940 struct link_params *params, 7941 u8 dev_addr, u16 addr, u8 byte_cnt, 7942 u8 *o_buf, u8 is_init) 7943 { 7944 struct bnx2x *bp = params->bp; 7945 u16 val, i; 7946 7947 if (byte_cnt > SFP_EEPROM_PAGE_SIZE) { 7948 DP(NETIF_MSG_LINK, 7949 "Reading from eeprom is limited to 0xf\n"); 7950 return -EINVAL; 7951 } 7952 7953 /* Set 2-wire transfer rate of SFP+ module EEPROM 7954 * to 100Khz since some DACs(direct attached cables) do 7955 * not work at 400Khz. 7956 */ 7957 bnx2x_cl45_write(bp, phy, 7958 MDIO_PMA_DEVAD, 7959 MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR, 7960 ((dev_addr << 8) | 1)); 7961 7962 /* Need to read from 1.8000 to clear it */ 7963 bnx2x_cl45_read(bp, phy, 7964 MDIO_PMA_DEVAD, 7965 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 7966 &val); 7967 7968 /* Set the read command byte count */ 7969 bnx2x_cl45_write(bp, phy, 7970 MDIO_PMA_DEVAD, 7971 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, 7972 ((byte_cnt < 2) ? 2 : byte_cnt)); 7973 7974 /* Set the read command address */ 7975 bnx2x_cl45_write(bp, phy, 7976 MDIO_PMA_DEVAD, 7977 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, 7978 addr); 7979 /* Set the destination address */ 7980 bnx2x_cl45_write(bp, phy, 7981 MDIO_PMA_DEVAD, 7982 0x8004, 7983 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF); 7984 7985 /* Activate read command */ 7986 bnx2x_cl45_write(bp, phy, 7987 MDIO_PMA_DEVAD, 7988 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 7989 0x8002); 7990 /* Wait appropriate time for two-wire command to finish before 7991 * polling the status register 7992 */ 7993 usleep_range(1000, 2000); 7994 7995 /* Wait up to 500us for command complete status */ 7996 for (i = 0; i < 100; i++) { 7997 bnx2x_cl45_read(bp, phy, 7998 MDIO_PMA_DEVAD, 7999 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 8000 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 8001 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) 8002 break; 8003 udelay(5); 8004 } 8005 8006 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) != 8007 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) { 8008 DP(NETIF_MSG_LINK, 8009 "Got bad status 0x%x when reading from SFP+ EEPROM\n", 8010 (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK)); 8011 return -EFAULT; 8012 } 8013 8014 /* Read the buffer */ 8015 for (i = 0; i < byte_cnt; i++) { 8016 bnx2x_cl45_read(bp, phy, 8017 MDIO_PMA_DEVAD, 8018 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val); 8019 o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK); 8020 } 8021 8022 for (i = 0; i < 100; i++) { 8023 bnx2x_cl45_read(bp, phy, 8024 MDIO_PMA_DEVAD, 8025 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 8026 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 8027 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 8028 return 0; 8029 usleep_range(1000, 2000); 8030 } 8031 8032 return -EINVAL; 8033 } 8034 int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, 8035 struct link_params *params, u8 dev_addr, 8036 u16 addr, u16 byte_cnt, u8 *o_buf) 8037 { 8038 int rc = 0; 8039 struct bnx2x *bp = params->bp; 8040 u8 xfer_size; 8041 u8 *user_data = o_buf; 8042 read_sfp_module_eeprom_func_p read_func; 8043 8044 if ((dev_addr != 0xa0) && (dev_addr != 0xa2)) { 8045 DP(NETIF_MSG_LINK, "invalid dev_addr 0x%x\n", dev_addr); 8046 return -EINVAL; 8047 } 8048 8049 switch (phy->type) { 8050 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 8051 read_func = bnx2x_8726_read_sfp_module_eeprom; 8052 break; 8053 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 8054 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: 8055 read_func = bnx2x_8727_read_sfp_module_eeprom; 8056 break; 8057 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 8058 read_func = bnx2x_warpcore_read_sfp_module_eeprom; 8059 break; 8060 default: 8061 return -EOPNOTSUPP; 8062 } 8063 8064 while (!rc && (byte_cnt > 0)) { 8065 xfer_size = (byte_cnt > SFP_EEPROM_PAGE_SIZE) ? 8066 SFP_EEPROM_PAGE_SIZE : byte_cnt; 8067 rc = read_func(phy, params, dev_addr, addr, xfer_size, 8068 user_data, 0); 8069 byte_cnt -= xfer_size; 8070 user_data += xfer_size; 8071 addr += xfer_size; 8072 } 8073 return rc; 8074 } 8075 8076 static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, 8077 struct link_params *params, 8078 u16 *edc_mode) 8079 { 8080 struct bnx2x *bp = params->bp; 8081 u32 sync_offset = 0, phy_idx, media_types; 8082 u8 gport, val[2], check_limiting_mode = 0; 8083 *edc_mode = EDC_MODE_LIMITING; 8084 phy->media_type = ETH_PHY_UNSPECIFIED; 8085 /* First check for copper cable */ 8086 if (bnx2x_read_sfp_module_eeprom(phy, 8087 params, 8088 I2C_DEV_ADDR_A0, 8089 SFP_EEPROM_CON_TYPE_ADDR, 8090 2, 8091 (u8 *)val) != 0) { 8092 DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n"); 8093 return -EINVAL; 8094 } 8095 8096 switch (val[0]) { 8097 case SFP_EEPROM_CON_TYPE_VAL_COPPER: 8098 { 8099 u8 copper_module_type; 8100 phy->media_type = ETH_PHY_DA_TWINAX; 8101 /* Check if its active cable (includes SFP+ module) 8102 * of passive cable 8103 */ 8104 if (bnx2x_read_sfp_module_eeprom(phy, 8105 params, 8106 I2C_DEV_ADDR_A0, 8107 SFP_EEPROM_FC_TX_TECH_ADDR, 8108 1, 8109 &copper_module_type) != 0) { 8110 DP(NETIF_MSG_LINK, 8111 "Failed to read copper-cable-type" 8112 " from SFP+ EEPROM\n"); 8113 return -EINVAL; 8114 } 8115 8116 if (copper_module_type & 8117 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { 8118 DP(NETIF_MSG_LINK, "Active Copper cable detected\n"); 8119 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) 8120 *edc_mode = EDC_MODE_ACTIVE_DAC; 8121 else 8122 check_limiting_mode = 1; 8123 } else if (copper_module_type & 8124 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { 8125 DP(NETIF_MSG_LINK, 8126 "Passive Copper cable detected\n"); 8127 *edc_mode = 8128 EDC_MODE_PASSIVE_DAC; 8129 } else { 8130 DP(NETIF_MSG_LINK, 8131 "Unknown copper-cable-type 0x%x !!!\n", 8132 copper_module_type); 8133 return -EINVAL; 8134 } 8135 break; 8136 } 8137 case SFP_EEPROM_CON_TYPE_VAL_LC: 8138 case SFP_EEPROM_CON_TYPE_VAL_RJ45: 8139 check_limiting_mode = 1; 8140 if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK | 8141 SFP_EEPROM_COMP_CODE_LR_MASK | 8142 SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) { 8143 DP(NETIF_MSG_LINK, "1G SFP module detected\n"); 8144 gport = params->port; 8145 phy->media_type = ETH_PHY_SFP_1G_FIBER; 8146 if (phy->req_line_speed != SPEED_1000) { 8147 phy->req_line_speed = SPEED_1000; 8148 if (!CHIP_IS_E1x(bp)) { 8149 gport = BP_PATH(bp) + 8150 (params->port << 1); 8151 } 8152 netdev_err(bp->dev, 8153 "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n", 8154 gport); 8155 } 8156 } else { 8157 int idx, cfg_idx = 0; 8158 DP(NETIF_MSG_LINK, "10G Optic module detected\n"); 8159 for (idx = INT_PHY; idx < MAX_PHYS; idx++) { 8160 if (params->phy[idx].type == phy->type) { 8161 cfg_idx = LINK_CONFIG_IDX(idx); 8162 break; 8163 } 8164 } 8165 phy->media_type = ETH_PHY_SFPP_10G_FIBER; 8166 phy->req_line_speed = params->req_line_speed[cfg_idx]; 8167 } 8168 break; 8169 default: 8170 DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n", 8171 val[0]); 8172 return -EINVAL; 8173 } 8174 sync_offset = params->shmem_base + 8175 offsetof(struct shmem_region, 8176 dev_info.port_hw_config[params->port].media_type); 8177 media_types = REG_RD(bp, sync_offset); 8178 /* Update media type for non-PMF sync */ 8179 for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { 8180 if (&(params->phy[phy_idx]) == phy) { 8181 media_types &= ~(PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK << 8182 (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx)); 8183 media_types |= ((phy->media_type & 8184 PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) << 8185 (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx)); 8186 break; 8187 } 8188 } 8189 REG_WR(bp, sync_offset, media_types); 8190 if (check_limiting_mode) { 8191 u8 options[SFP_EEPROM_OPTIONS_SIZE]; 8192 if (bnx2x_read_sfp_module_eeprom(phy, 8193 params, 8194 I2C_DEV_ADDR_A0, 8195 SFP_EEPROM_OPTIONS_ADDR, 8196 SFP_EEPROM_OPTIONS_SIZE, 8197 options) != 0) { 8198 DP(NETIF_MSG_LINK, 8199 "Failed to read Option field from module EEPROM\n"); 8200 return -EINVAL; 8201 } 8202 if ((options[0] & SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK)) 8203 *edc_mode = EDC_MODE_LINEAR; 8204 else 8205 *edc_mode = EDC_MODE_LIMITING; 8206 } 8207 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode); 8208 return 0; 8209 } 8210 /* This function read the relevant field from the module (SFP+), and verify it 8211 * is compliant with this board 8212 */ 8213 static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy, 8214 struct link_params *params) 8215 { 8216 struct bnx2x *bp = params->bp; 8217 u32 val, cmd; 8218 u32 fw_resp, fw_cmd_param; 8219 char vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE+1]; 8220 char vendor_pn[SFP_EEPROM_PART_NO_SIZE+1]; 8221 phy->flags &= ~FLAGS_SFP_NOT_APPROVED; 8222 val = REG_RD(bp, params->shmem_base + 8223 offsetof(struct shmem_region, dev_info. 8224 port_feature_config[params->port].config)); 8225 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 8226 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT) { 8227 DP(NETIF_MSG_LINK, "NOT enforcing module verification\n"); 8228 return 0; 8229 } 8230 8231 if (params->feature_config_flags & 8232 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY) { 8233 /* Use specific phy request */ 8234 cmd = DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL; 8235 } else if (params->feature_config_flags & 8236 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY) { 8237 /* Use first phy request only in case of non-dual media*/ 8238 if (DUAL_MEDIA(params)) { 8239 DP(NETIF_MSG_LINK, 8240 "FW does not support OPT MDL verification\n"); 8241 return -EINVAL; 8242 } 8243 cmd = DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL; 8244 } else { 8245 /* No support in OPT MDL detection */ 8246 DP(NETIF_MSG_LINK, 8247 "FW does not support OPT MDL verification\n"); 8248 return -EINVAL; 8249 } 8250 8251 fw_cmd_param = FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl); 8252 fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param); 8253 if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) { 8254 DP(NETIF_MSG_LINK, "Approved module\n"); 8255 return 0; 8256 } 8257 8258 /* Format the warning message */ 8259 if (bnx2x_read_sfp_module_eeprom(phy, 8260 params, 8261 I2C_DEV_ADDR_A0, 8262 SFP_EEPROM_VENDOR_NAME_ADDR, 8263 SFP_EEPROM_VENDOR_NAME_SIZE, 8264 (u8 *)vendor_name)) 8265 vendor_name[0] = '\0'; 8266 else 8267 vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0'; 8268 if (bnx2x_read_sfp_module_eeprom(phy, 8269 params, 8270 I2C_DEV_ADDR_A0, 8271 SFP_EEPROM_PART_NO_ADDR, 8272 SFP_EEPROM_PART_NO_SIZE, 8273 (u8 *)vendor_pn)) 8274 vendor_pn[0] = '\0'; 8275 else 8276 vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0'; 8277 8278 netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected," 8279 " Port %d from %s part number %s\n", 8280 params->port, vendor_name, vendor_pn); 8281 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) != 8282 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG) 8283 phy->flags |= FLAGS_SFP_NOT_APPROVED; 8284 return -EINVAL; 8285 } 8286 8287 static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy, 8288 struct link_params *params) 8289 8290 { 8291 u8 val; 8292 int rc; 8293 struct bnx2x *bp = params->bp; 8294 u16 timeout; 8295 /* Initialization time after hot-plug may take up to 300ms for 8296 * some phys type ( e.g. JDSU ) 8297 */ 8298 8299 for (timeout = 0; timeout < 60; timeout++) { 8300 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) 8301 rc = bnx2x_warpcore_read_sfp_module_eeprom( 8302 phy, params, I2C_DEV_ADDR_A0, 1, 1, &val, 8303 1); 8304 else 8305 rc = bnx2x_read_sfp_module_eeprom(phy, params, 8306 I2C_DEV_ADDR_A0, 8307 1, 1, &val); 8308 if (rc == 0) { 8309 DP(NETIF_MSG_LINK, 8310 "SFP+ module initialization took %d ms\n", 8311 timeout * 5); 8312 return 0; 8313 } 8314 usleep_range(5000, 10000); 8315 } 8316 rc = bnx2x_read_sfp_module_eeprom(phy, params, I2C_DEV_ADDR_A0, 8317 1, 1, &val); 8318 return rc; 8319 } 8320 8321 static void bnx2x_8727_power_module(struct bnx2x *bp, 8322 struct bnx2x_phy *phy, 8323 u8 is_power_up) { 8324 /* Make sure GPIOs are not using for LED mode */ 8325 u16 val; 8326 /* In the GPIO register, bit 4 is use to determine if the GPIOs are 8327 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for 8328 * output 8329 * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0 8330 * Bits 8-9 determine the GPIOs value for INPUT in case bit 4 val is 1 8331 * where the 1st bit is the over-current(only input), and 2nd bit is 8332 * for power( only output ) 8333 * 8334 * In case of NOC feature is disabled and power is up, set GPIO control 8335 * as input to enable listening of over-current indication 8336 */ 8337 if (phy->flags & FLAGS_NOC) 8338 return; 8339 if (is_power_up) 8340 val = (1<<4); 8341 else 8342 /* Set GPIO control to OUTPUT, and set the power bit 8343 * to according to the is_power_up 8344 */ 8345 val = (1<<1); 8346 8347 bnx2x_cl45_write(bp, phy, 8348 MDIO_PMA_DEVAD, 8349 MDIO_PMA_REG_8727_GPIO_CTRL, 8350 val); 8351 } 8352 8353 static int bnx2x_8726_set_limiting_mode(struct bnx2x *bp, 8354 struct bnx2x_phy *phy, 8355 u16 edc_mode) 8356 { 8357 u16 cur_limiting_mode; 8358 8359 bnx2x_cl45_read(bp, phy, 8360 MDIO_PMA_DEVAD, 8361 MDIO_PMA_REG_ROM_VER2, 8362 &cur_limiting_mode); 8363 DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n", 8364 cur_limiting_mode); 8365 8366 if (edc_mode == EDC_MODE_LIMITING) { 8367 DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n"); 8368 bnx2x_cl45_write(bp, phy, 8369 MDIO_PMA_DEVAD, 8370 MDIO_PMA_REG_ROM_VER2, 8371 EDC_MODE_LIMITING); 8372 } else { /* LRM mode ( default )*/ 8373 8374 DP(NETIF_MSG_LINK, "Setting LRM MODE\n"); 8375 8376 /* Changing to LRM mode takes quite few seconds. So do it only 8377 * if current mode is limiting (default is LRM) 8378 */ 8379 if (cur_limiting_mode != EDC_MODE_LIMITING) 8380 return 0; 8381 8382 bnx2x_cl45_write(bp, phy, 8383 MDIO_PMA_DEVAD, 8384 MDIO_PMA_REG_LRM_MODE, 8385 0); 8386 bnx2x_cl45_write(bp, phy, 8387 MDIO_PMA_DEVAD, 8388 MDIO_PMA_REG_ROM_VER2, 8389 0x128); 8390 bnx2x_cl45_write(bp, phy, 8391 MDIO_PMA_DEVAD, 8392 MDIO_PMA_REG_MISC_CTRL0, 8393 0x4008); 8394 bnx2x_cl45_write(bp, phy, 8395 MDIO_PMA_DEVAD, 8396 MDIO_PMA_REG_LRM_MODE, 8397 0xaaaa); 8398 } 8399 return 0; 8400 } 8401 8402 static int bnx2x_8727_set_limiting_mode(struct bnx2x *bp, 8403 struct bnx2x_phy *phy, 8404 u16 edc_mode) 8405 { 8406 u16 phy_identifier; 8407 u16 rom_ver2_val; 8408 bnx2x_cl45_read(bp, phy, 8409 MDIO_PMA_DEVAD, 8410 MDIO_PMA_REG_PHY_IDENTIFIER, 8411 &phy_identifier); 8412 8413 bnx2x_cl45_write(bp, phy, 8414 MDIO_PMA_DEVAD, 8415 MDIO_PMA_REG_PHY_IDENTIFIER, 8416 (phy_identifier & ~(1<<9))); 8417 8418 bnx2x_cl45_read(bp, phy, 8419 MDIO_PMA_DEVAD, 8420 MDIO_PMA_REG_ROM_VER2, 8421 &rom_ver2_val); 8422 /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */ 8423 bnx2x_cl45_write(bp, phy, 8424 MDIO_PMA_DEVAD, 8425 MDIO_PMA_REG_ROM_VER2, 8426 (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff)); 8427 8428 bnx2x_cl45_write(bp, phy, 8429 MDIO_PMA_DEVAD, 8430 MDIO_PMA_REG_PHY_IDENTIFIER, 8431 (phy_identifier | (1<<9))); 8432 8433 return 0; 8434 } 8435 8436 static void bnx2x_8727_specific_func(struct bnx2x_phy *phy, 8437 struct link_params *params, 8438 u32 action) 8439 { 8440 struct bnx2x *bp = params->bp; 8441 u16 val; 8442 switch (action) { 8443 case DISABLE_TX: 8444 bnx2x_sfp_set_transmitter(params, phy, 0); 8445 break; 8446 case ENABLE_TX: 8447 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) 8448 bnx2x_sfp_set_transmitter(params, phy, 1); 8449 break; 8450 case PHY_INIT: 8451 bnx2x_cl45_write(bp, phy, 8452 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, 8453 (1<<2) | (1<<5)); 8454 bnx2x_cl45_write(bp, phy, 8455 MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, 8456 0); 8457 bnx2x_cl45_write(bp, phy, 8458 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0006); 8459 /* Make MOD_ABS give interrupt on change */ 8460 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 8461 MDIO_PMA_REG_8727_PCS_OPT_CTRL, 8462 &val); 8463 val |= (1<<12); 8464 if (phy->flags & FLAGS_NOC) 8465 val |= (3<<5); 8466 /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0 8467 * status which reflect SFP+ module over-current 8468 */ 8469 if (!(phy->flags & FLAGS_NOC)) 8470 val &= 0xff8f; /* Reset bits 4-6 */ 8471 bnx2x_cl45_write(bp, phy, 8472 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, 8473 val); 8474 break; 8475 default: 8476 DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n", 8477 action); 8478 return; 8479 } 8480 } 8481 8482 static void bnx2x_set_e1e2_module_fault_led(struct link_params *params, 8483 u8 gpio_mode) 8484 { 8485 struct bnx2x *bp = params->bp; 8486 8487 u32 fault_led_gpio = REG_RD(bp, params->shmem_base + 8488 offsetof(struct shmem_region, 8489 dev_info.port_hw_config[params->port].sfp_ctrl)) & 8490 PORT_HW_CFG_FAULT_MODULE_LED_MASK; 8491 switch (fault_led_gpio) { 8492 case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED: 8493 return; 8494 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0: 8495 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1: 8496 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2: 8497 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3: 8498 { 8499 u8 gpio_port = bnx2x_get_gpio_port(params); 8500 u16 gpio_pin = fault_led_gpio - 8501 PORT_HW_CFG_FAULT_MODULE_LED_GPIO0; 8502 DP(NETIF_MSG_LINK, "Set fault module-detected led " 8503 "pin %x port %x mode %x\n", 8504 gpio_pin, gpio_port, gpio_mode); 8505 bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port); 8506 } 8507 break; 8508 default: 8509 DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n", 8510 fault_led_gpio); 8511 } 8512 } 8513 8514 static void bnx2x_set_e3_module_fault_led(struct link_params *params, 8515 u8 gpio_mode) 8516 { 8517 u32 pin_cfg; 8518 u8 port = params->port; 8519 struct bnx2x *bp = params->bp; 8520 pin_cfg = (REG_RD(bp, params->shmem_base + 8521 offsetof(struct shmem_region, 8522 dev_info.port_hw_config[port].e3_sfp_ctrl)) & 8523 PORT_HW_CFG_E3_FAULT_MDL_LED_MASK) >> 8524 PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT; 8525 DP(NETIF_MSG_LINK, "Setting Fault LED to %d using pin cfg %d\n", 8526 gpio_mode, pin_cfg); 8527 bnx2x_set_cfg_pin(bp, pin_cfg, gpio_mode); 8528 } 8529 8530 static void bnx2x_set_sfp_module_fault_led(struct link_params *params, 8531 u8 gpio_mode) 8532 { 8533 struct bnx2x *bp = params->bp; 8534 DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode); 8535 if (CHIP_IS_E3(bp)) { 8536 /* Low ==> if SFP+ module is supported otherwise 8537 * High ==> if SFP+ module is not on the approved vendor list 8538 */ 8539 bnx2x_set_e3_module_fault_led(params, gpio_mode); 8540 } else 8541 bnx2x_set_e1e2_module_fault_led(params, gpio_mode); 8542 } 8543 8544 static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy, 8545 struct link_params *params) 8546 { 8547 struct bnx2x *bp = params->bp; 8548 bnx2x_warpcore_power_module(params, 0); 8549 /* Put Warpcore in low power mode */ 8550 REG_WR(bp, MISC_REG_WC0_RESET, 0x0c0e); 8551 8552 /* Put LCPLL in low power mode */ 8553 REG_WR(bp, MISC_REG_LCPLL_E40_PWRDWN, 1); 8554 REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_ANA, 0); 8555 REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_DIG, 0); 8556 } 8557 8558 static void bnx2x_power_sfp_module(struct link_params *params, 8559 struct bnx2x_phy *phy, 8560 u8 power) 8561 { 8562 struct bnx2x *bp = params->bp; 8563 DP(NETIF_MSG_LINK, "Setting SFP+ power to %x\n", power); 8564 8565 switch (phy->type) { 8566 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 8567 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: 8568 bnx2x_8727_power_module(params->bp, phy, power); 8569 break; 8570 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 8571 bnx2x_warpcore_power_module(params, power); 8572 break; 8573 default: 8574 break; 8575 } 8576 } 8577 static void bnx2x_warpcore_set_limiting_mode(struct link_params *params, 8578 struct bnx2x_phy *phy, 8579 u16 edc_mode) 8580 { 8581 u16 val = 0; 8582 u16 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; 8583 struct bnx2x *bp = params->bp; 8584 8585 u8 lane = bnx2x_get_warpcore_lane(phy, params); 8586 /* This is a global register which controls all lanes */ 8587 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 8588 MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val); 8589 val &= ~(0xf << (lane << 2)); 8590 8591 switch (edc_mode) { 8592 case EDC_MODE_LINEAR: 8593 case EDC_MODE_LIMITING: 8594 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; 8595 break; 8596 case EDC_MODE_PASSIVE_DAC: 8597 case EDC_MODE_ACTIVE_DAC: 8598 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC; 8599 break; 8600 default: 8601 break; 8602 } 8603 8604 val |= (mode << (lane << 2)); 8605 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 8606 MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, val); 8607 /* A must read */ 8608 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 8609 MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val); 8610 8611 /* Restart microcode to re-read the new mode */ 8612 bnx2x_warpcore_reset_lane(bp, phy, 1); 8613 bnx2x_warpcore_reset_lane(bp, phy, 0); 8614 8615 } 8616 8617 static void bnx2x_set_limiting_mode(struct link_params *params, 8618 struct bnx2x_phy *phy, 8619 u16 edc_mode) 8620 { 8621 switch (phy->type) { 8622 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 8623 bnx2x_8726_set_limiting_mode(params->bp, phy, edc_mode); 8624 break; 8625 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 8626 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: 8627 bnx2x_8727_set_limiting_mode(params->bp, phy, edc_mode); 8628 break; 8629 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 8630 bnx2x_warpcore_set_limiting_mode(params, phy, edc_mode); 8631 break; 8632 } 8633 } 8634 8635 int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, 8636 struct link_params *params) 8637 { 8638 struct bnx2x *bp = params->bp; 8639 u16 edc_mode; 8640 int rc = 0; 8641 8642 u32 val = REG_RD(bp, params->shmem_base + 8643 offsetof(struct shmem_region, dev_info. 8644 port_feature_config[params->port].config)); 8645 /* Enabled transmitter by default */ 8646 bnx2x_sfp_set_transmitter(params, phy, 1); 8647 DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n", 8648 params->port); 8649 /* Power up module */ 8650 bnx2x_power_sfp_module(params, phy, 1); 8651 if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) { 8652 DP(NETIF_MSG_LINK, "Failed to get valid module type\n"); 8653 return -EINVAL; 8654 } else if (bnx2x_verify_sfp_module(phy, params) != 0) { 8655 /* Check SFP+ module compatibility */ 8656 DP(NETIF_MSG_LINK, "Module verification failed!!\n"); 8657 rc = -EINVAL; 8658 /* Turn on fault module-detected led */ 8659 bnx2x_set_sfp_module_fault_led(params, 8660 MISC_REGISTERS_GPIO_HIGH); 8661 8662 /* Check if need to power down the SFP+ module */ 8663 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 8664 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN) { 8665 DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n"); 8666 bnx2x_power_sfp_module(params, phy, 0); 8667 return rc; 8668 } 8669 } else { 8670 /* Turn off fault module-detected led */ 8671 bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW); 8672 } 8673 8674 /* Check and set limiting mode / LRM mode on 8726. On 8727 it 8675 * is done automatically 8676 */ 8677 bnx2x_set_limiting_mode(params, phy, edc_mode); 8678 8679 /* Disable transmit for this module if the module is not approved, and 8680 * laser needs to be disabled. 8681 */ 8682 if ((rc) && 8683 ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 8684 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)) 8685 bnx2x_sfp_set_transmitter(params, phy, 0); 8686 8687 return rc; 8688 } 8689 8690 void bnx2x_handle_module_detect_int(struct link_params *params) 8691 { 8692 struct bnx2x *bp = params->bp; 8693 struct bnx2x_phy *phy; 8694 u32 gpio_val; 8695 u8 gpio_num, gpio_port; 8696 if (CHIP_IS_E3(bp)) { 8697 phy = ¶ms->phy[INT_PHY]; 8698 /* Always enable TX laser,will be disabled in case of fault */ 8699 bnx2x_sfp_set_transmitter(params, phy, 1); 8700 } else { 8701 phy = ¶ms->phy[EXT_PHY1]; 8702 } 8703 if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, params->shmem_base, 8704 params->port, &gpio_num, &gpio_port) == 8705 -EINVAL) { 8706 DP(NETIF_MSG_LINK, "Failed to get MOD_ABS interrupt config\n"); 8707 return; 8708 } 8709 8710 /* Set valid module led off */ 8711 bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH); 8712 8713 /* Get current gpio val reflecting module plugged in / out*/ 8714 gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port); 8715 8716 /* Call the handling function in case module is detected */ 8717 if (gpio_val == 0) { 8718 bnx2x_set_mdio_emac_per_phy(bp, params); 8719 bnx2x_set_aer_mmd(params, phy); 8720 8721 bnx2x_power_sfp_module(params, phy, 1); 8722 bnx2x_set_gpio_int(bp, gpio_num, 8723 MISC_REGISTERS_GPIO_INT_OUTPUT_CLR, 8724 gpio_port); 8725 if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) { 8726 bnx2x_sfp_module_detection(phy, params); 8727 if (CHIP_IS_E3(bp)) { 8728 u16 rx_tx_in_reset; 8729 /* In case WC is out of reset, reconfigure the 8730 * link speed while taking into account 1G 8731 * module limitation. 8732 */ 8733 bnx2x_cl45_read(bp, phy, 8734 MDIO_WC_DEVAD, 8735 MDIO_WC_REG_DIGITAL5_MISC6, 8736 &rx_tx_in_reset); 8737 if ((!rx_tx_in_reset) && 8738 (params->link_flags & 8739 PHY_INITIALIZED)) { 8740 bnx2x_warpcore_reset_lane(bp, phy, 1); 8741 bnx2x_warpcore_config_sfi(phy, params); 8742 bnx2x_warpcore_reset_lane(bp, phy, 0); 8743 } 8744 } 8745 } else { 8746 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); 8747 } 8748 } else { 8749 bnx2x_set_gpio_int(bp, gpio_num, 8750 MISC_REGISTERS_GPIO_INT_OUTPUT_SET, 8751 gpio_port); 8752 /* Module was plugged out. 8753 * Disable transmit for this module 8754 */ 8755 phy->media_type = ETH_PHY_NOT_PRESENT; 8756 } 8757 } 8758 8759 /******************************************************************/ 8760 /* Used by 8706 and 8727 */ 8761 /******************************************************************/ 8762 static void bnx2x_sfp_mask_fault(struct bnx2x *bp, 8763 struct bnx2x_phy *phy, 8764 u16 alarm_status_offset, 8765 u16 alarm_ctrl_offset) 8766 { 8767 u16 alarm_status, val; 8768 bnx2x_cl45_read(bp, phy, 8769 MDIO_PMA_DEVAD, alarm_status_offset, 8770 &alarm_status); 8771 bnx2x_cl45_read(bp, phy, 8772 MDIO_PMA_DEVAD, alarm_status_offset, 8773 &alarm_status); 8774 /* Mask or enable the fault event. */ 8775 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, &val); 8776 if (alarm_status & (1<<0)) 8777 val &= ~(1<<0); 8778 else 8779 val |= (1<<0); 8780 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, val); 8781 } 8782 /******************************************************************/ 8783 /* common BCM8706/BCM8726 PHY SECTION */ 8784 /******************************************************************/ 8785 static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy, 8786 struct link_params *params, 8787 struct link_vars *vars) 8788 { 8789 u8 link_up = 0; 8790 u16 val1, val2, rx_sd, pcs_status; 8791 struct bnx2x *bp = params->bp; 8792 DP(NETIF_MSG_LINK, "XGXS 8706/8726\n"); 8793 /* Clear RX Alarm*/ 8794 bnx2x_cl45_read(bp, phy, 8795 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2); 8796 8797 bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT, 8798 MDIO_PMA_LASI_TXCTRL); 8799 8800 /* Clear LASI indication*/ 8801 bnx2x_cl45_read(bp, phy, 8802 MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); 8803 bnx2x_cl45_read(bp, phy, 8804 MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2); 8805 DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x--> 0x%x\n", val1, val2); 8806 8807 bnx2x_cl45_read(bp, phy, 8808 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd); 8809 bnx2x_cl45_read(bp, phy, 8810 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &pcs_status); 8811 bnx2x_cl45_read(bp, phy, 8812 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2); 8813 bnx2x_cl45_read(bp, phy, 8814 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2); 8815 8816 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps" 8817 " link_status 0x%x\n", rx_sd, pcs_status, val2); 8818 /* Link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status 8819 * are set, or if the autoneg bit 1 is set 8820 */ 8821 link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1))); 8822 if (link_up) { 8823 if (val2 & (1<<1)) 8824 vars->line_speed = SPEED_1000; 8825 else 8826 vars->line_speed = SPEED_10000; 8827 bnx2x_ext_phy_resolve_fc(phy, params, vars); 8828 vars->duplex = DUPLEX_FULL; 8829 } 8830 8831 /* Capture 10G link fault. Read twice to clear stale value. */ 8832 if (vars->line_speed == SPEED_10000) { 8833 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 8834 MDIO_PMA_LASI_TXSTAT, &val1); 8835 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 8836 MDIO_PMA_LASI_TXSTAT, &val1); 8837 if (val1 & (1<<0)) 8838 vars->fault_detected = 1; 8839 } 8840 8841 return link_up; 8842 } 8843 8844 /******************************************************************/ 8845 /* BCM8706 PHY SECTION */ 8846 /******************************************************************/ 8847 static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, 8848 struct link_params *params, 8849 struct link_vars *vars) 8850 { 8851 u32 tx_en_mode; 8852 u16 cnt, val, tmp1; 8853 struct bnx2x *bp = params->bp; 8854 8855 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 8856 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); 8857 /* HW reset */ 8858 bnx2x_ext_phy_hw_reset(bp, params->port); 8859 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); 8860 bnx2x_wait_reset_complete(bp, phy, params); 8861 8862 /* Wait until fw is loaded */ 8863 for (cnt = 0; cnt < 100; cnt++) { 8864 bnx2x_cl45_read(bp, phy, 8865 MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val); 8866 if (val) 8867 break; 8868 usleep_range(10000, 20000); 8869 } 8870 DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt); 8871 if ((params->feature_config_flags & 8872 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) { 8873 u8 i; 8874 u16 reg; 8875 for (i = 0; i < 4; i++) { 8876 reg = MDIO_XS_8706_REG_BANK_RX0 + 8877 i*(MDIO_XS_8706_REG_BANK_RX1 - 8878 MDIO_XS_8706_REG_BANK_RX0); 8879 bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, reg, &val); 8880 /* Clear first 3 bits of the control */ 8881 val &= ~0x7; 8882 /* Set control bits according to configuration */ 8883 val |= (phy->rx_preemphasis[i] & 0x7); 8884 DP(NETIF_MSG_LINK, "Setting RX Equalizer to BCM8706" 8885 " reg 0x%x <-- val 0x%x\n", reg, val); 8886 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, reg, val); 8887 } 8888 } 8889 /* Force speed */ 8890 if (phy->req_line_speed == SPEED_10000) { 8891 DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n"); 8892 8893 bnx2x_cl45_write(bp, phy, 8894 MDIO_PMA_DEVAD, 8895 MDIO_PMA_REG_DIGITAL_CTRL, 0x400); 8896 bnx2x_cl45_write(bp, phy, 8897 MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, 8898 0); 8899 /* Arm LASI for link and Tx fault. */ 8900 bnx2x_cl45_write(bp, phy, 8901 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 3); 8902 } else { 8903 /* Force 1Gbps using autoneg with 1G advertisement */ 8904 8905 /* Allow CL37 through CL73 */ 8906 DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n"); 8907 bnx2x_cl45_write(bp, phy, 8908 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c); 8909 8910 /* Enable Full-Duplex advertisement on CL37 */ 8911 bnx2x_cl45_write(bp, phy, 8912 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LP, 0x0020); 8913 /* Enable CL37 AN */ 8914 bnx2x_cl45_write(bp, phy, 8915 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); 8916 /* 1G support */ 8917 bnx2x_cl45_write(bp, phy, 8918 MDIO_AN_DEVAD, MDIO_AN_REG_ADV, (1<<5)); 8919 8920 /* Enable clause 73 AN */ 8921 bnx2x_cl45_write(bp, phy, 8922 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); 8923 bnx2x_cl45_write(bp, phy, 8924 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, 8925 0x0400); 8926 bnx2x_cl45_write(bp, phy, 8927 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 8928 0x0004); 8929 } 8930 bnx2x_save_bcm_spirom_ver(bp, phy, params->port); 8931 8932 /* If TX Laser is controlled by GPIO_0, do not let PHY go into low 8933 * power mode, if TX Laser is disabled 8934 */ 8935 8936 tx_en_mode = REG_RD(bp, params->shmem_base + 8937 offsetof(struct shmem_region, 8938 dev_info.port_hw_config[params->port].sfp_ctrl)) 8939 & PORT_HW_CFG_TX_LASER_MASK; 8940 8941 if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) { 8942 DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n"); 8943 bnx2x_cl45_read(bp, phy, 8944 MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1); 8945 tmp1 |= 0x1; 8946 bnx2x_cl45_write(bp, phy, 8947 MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1); 8948 } 8949 8950 return 0; 8951 } 8952 8953 static int bnx2x_8706_read_status(struct bnx2x_phy *phy, 8954 struct link_params *params, 8955 struct link_vars *vars) 8956 { 8957 return bnx2x_8706_8726_read_status(phy, params, vars); 8958 } 8959 8960 /******************************************************************/ 8961 /* BCM8726 PHY SECTION */ 8962 /******************************************************************/ 8963 static void bnx2x_8726_config_loopback(struct bnx2x_phy *phy, 8964 struct link_params *params) 8965 { 8966 struct bnx2x *bp = params->bp; 8967 DP(NETIF_MSG_LINK, "PMA/PMD ext_phy_loopback: 8726\n"); 8968 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001); 8969 } 8970 8971 static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy, 8972 struct link_params *params) 8973 { 8974 struct bnx2x *bp = params->bp; 8975 /* Need to wait 100ms after reset */ 8976 msleep(100); 8977 8978 /* Micro controller re-boot */ 8979 bnx2x_cl45_write(bp, phy, 8980 MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x018B); 8981 8982 /* Set soft reset */ 8983 bnx2x_cl45_write(bp, phy, 8984 MDIO_PMA_DEVAD, 8985 MDIO_PMA_REG_GEN_CTRL, 8986 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); 8987 8988 bnx2x_cl45_write(bp, phy, 8989 MDIO_PMA_DEVAD, 8990 MDIO_PMA_REG_MISC_CTRL1, 0x0001); 8991 8992 bnx2x_cl45_write(bp, phy, 8993 MDIO_PMA_DEVAD, 8994 MDIO_PMA_REG_GEN_CTRL, 8995 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 8996 8997 /* Wait for 150ms for microcode load */ 8998 msleep(150); 8999 9000 /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */ 9001 bnx2x_cl45_write(bp, phy, 9002 MDIO_PMA_DEVAD, 9003 MDIO_PMA_REG_MISC_CTRL1, 0x0000); 9004 9005 msleep(200); 9006 bnx2x_save_bcm_spirom_ver(bp, phy, params->port); 9007 } 9008 9009 static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy, 9010 struct link_params *params, 9011 struct link_vars *vars) 9012 { 9013 struct bnx2x *bp = params->bp; 9014 u16 val1; 9015 u8 link_up = bnx2x_8706_8726_read_status(phy, params, vars); 9016 if (link_up) { 9017 bnx2x_cl45_read(bp, phy, 9018 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, 9019 &val1); 9020 if (val1 & (1<<15)) { 9021 DP(NETIF_MSG_LINK, "Tx is disabled\n"); 9022 link_up = 0; 9023 vars->line_speed = 0; 9024 } 9025 } 9026 return link_up; 9027 } 9028 9029 9030 static int bnx2x_8726_config_init(struct bnx2x_phy *phy, 9031 struct link_params *params, 9032 struct link_vars *vars) 9033 { 9034 struct bnx2x *bp = params->bp; 9035 DP(NETIF_MSG_LINK, "Initializing BCM8726\n"); 9036 9037 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); 9038 bnx2x_wait_reset_complete(bp, phy, params); 9039 9040 bnx2x_8726_external_rom_boot(phy, params); 9041 9042 /* Need to call module detected on initialization since the module 9043 * detection triggered by actual module insertion might occur before 9044 * driver is loaded, and when driver is loaded, it reset all 9045 * registers, including the transmitter 9046 */ 9047 bnx2x_sfp_module_detection(phy, params); 9048 9049 if (phy->req_line_speed == SPEED_1000) { 9050 DP(NETIF_MSG_LINK, "Setting 1G force\n"); 9051 bnx2x_cl45_write(bp, phy, 9052 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40); 9053 bnx2x_cl45_write(bp, phy, 9054 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD); 9055 bnx2x_cl45_write(bp, phy, 9056 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x5); 9057 bnx2x_cl45_write(bp, phy, 9058 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, 9059 0x400); 9060 } else if ((phy->req_line_speed == SPEED_AUTO_NEG) && 9061 (phy->speed_cap_mask & 9062 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) && 9063 ((phy->speed_cap_mask & 9064 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) != 9065 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { 9066 DP(NETIF_MSG_LINK, "Setting 1G clause37\n"); 9067 /* Set Flow control */ 9068 bnx2x_ext_phy_set_pause(params, phy, vars); 9069 bnx2x_cl45_write(bp, phy, 9070 MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20); 9071 bnx2x_cl45_write(bp, phy, 9072 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c); 9073 bnx2x_cl45_write(bp, phy, 9074 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, 0x0020); 9075 bnx2x_cl45_write(bp, phy, 9076 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); 9077 bnx2x_cl45_write(bp, phy, 9078 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); 9079 /* Enable RX-ALARM control to receive interrupt for 1G speed 9080 * change 9081 */ 9082 bnx2x_cl45_write(bp, phy, 9083 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x4); 9084 bnx2x_cl45_write(bp, phy, 9085 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, 9086 0x400); 9087 9088 } else { /* Default 10G. Set only LASI control */ 9089 bnx2x_cl45_write(bp, phy, 9090 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 1); 9091 } 9092 9093 /* Set TX PreEmphasis if needed */ 9094 if ((params->feature_config_flags & 9095 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) { 9096 DP(NETIF_MSG_LINK, 9097 "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n", 9098 phy->tx_preemphasis[0], 9099 phy->tx_preemphasis[1]); 9100 bnx2x_cl45_write(bp, phy, 9101 MDIO_PMA_DEVAD, 9102 MDIO_PMA_REG_8726_TX_CTRL1, 9103 phy->tx_preemphasis[0]); 9104 9105 bnx2x_cl45_write(bp, phy, 9106 MDIO_PMA_DEVAD, 9107 MDIO_PMA_REG_8726_TX_CTRL2, 9108 phy->tx_preemphasis[1]); 9109 } 9110 9111 return 0; 9112 9113 } 9114 9115 static void bnx2x_8726_link_reset(struct bnx2x_phy *phy, 9116 struct link_params *params) 9117 { 9118 struct bnx2x *bp = params->bp; 9119 DP(NETIF_MSG_LINK, "bnx2x_8726_link_reset port %d\n", params->port); 9120 /* Set serial boot control for external load */ 9121 bnx2x_cl45_write(bp, phy, 9122 MDIO_PMA_DEVAD, 9123 MDIO_PMA_REG_GEN_CTRL, 0x0001); 9124 } 9125 9126 /******************************************************************/ 9127 /* BCM8727 PHY SECTION */ 9128 /******************************************************************/ 9129 9130 static void bnx2x_8727_set_link_led(struct bnx2x_phy *phy, 9131 struct link_params *params, u8 mode) 9132 { 9133 struct bnx2x *bp = params->bp; 9134 u16 led_mode_bitmask = 0; 9135 u16 gpio_pins_bitmask = 0; 9136 u16 val; 9137 /* Only NOC flavor requires to set the LED specifically */ 9138 if (!(phy->flags & FLAGS_NOC)) 9139 return; 9140 switch (mode) { 9141 case LED_MODE_FRONT_PANEL_OFF: 9142 case LED_MODE_OFF: 9143 led_mode_bitmask = 0; 9144 gpio_pins_bitmask = 0x03; 9145 break; 9146 case LED_MODE_ON: 9147 led_mode_bitmask = 0; 9148 gpio_pins_bitmask = 0x02; 9149 break; 9150 case LED_MODE_OPER: 9151 led_mode_bitmask = 0x60; 9152 gpio_pins_bitmask = 0x11; 9153 break; 9154 } 9155 bnx2x_cl45_read(bp, phy, 9156 MDIO_PMA_DEVAD, 9157 MDIO_PMA_REG_8727_PCS_OPT_CTRL, 9158 &val); 9159 val &= 0xff8f; 9160 val |= led_mode_bitmask; 9161 bnx2x_cl45_write(bp, phy, 9162 MDIO_PMA_DEVAD, 9163 MDIO_PMA_REG_8727_PCS_OPT_CTRL, 9164 val); 9165 bnx2x_cl45_read(bp, phy, 9166 MDIO_PMA_DEVAD, 9167 MDIO_PMA_REG_8727_GPIO_CTRL, 9168 &val); 9169 val &= 0xffe0; 9170 val |= gpio_pins_bitmask; 9171 bnx2x_cl45_write(bp, phy, 9172 MDIO_PMA_DEVAD, 9173 MDIO_PMA_REG_8727_GPIO_CTRL, 9174 val); 9175 } 9176 static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy, 9177 struct link_params *params) { 9178 u32 swap_val, swap_override; 9179 u8 port; 9180 /* The PHY reset is controlled by GPIO 1. Fake the port number 9181 * to cancel the swap done in set_gpio() 9182 */ 9183 struct bnx2x *bp = params->bp; 9184 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); 9185 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); 9186 port = (swap_val && swap_override) ^ 1; 9187 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 9188 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 9189 } 9190 9191 static void bnx2x_8727_config_speed(struct bnx2x_phy *phy, 9192 struct link_params *params) 9193 { 9194 struct bnx2x *bp = params->bp; 9195 u16 tmp1, val; 9196 /* Set option 1G speed */ 9197 if ((phy->req_line_speed == SPEED_1000) || 9198 (phy->media_type == ETH_PHY_SFP_1G_FIBER)) { 9199 DP(NETIF_MSG_LINK, "Setting 1G force\n"); 9200 bnx2x_cl45_write(bp, phy, 9201 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40); 9202 bnx2x_cl45_write(bp, phy, 9203 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD); 9204 bnx2x_cl45_read(bp, phy, 9205 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1); 9206 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1); 9207 /* Power down the XAUI until link is up in case of dual-media 9208 * and 1G 9209 */ 9210 if (DUAL_MEDIA(params)) { 9211 bnx2x_cl45_read(bp, phy, 9212 MDIO_PMA_DEVAD, 9213 MDIO_PMA_REG_8727_PCS_GP, &val); 9214 val |= (3<<10); 9215 bnx2x_cl45_write(bp, phy, 9216 MDIO_PMA_DEVAD, 9217 MDIO_PMA_REG_8727_PCS_GP, val); 9218 } 9219 } else if ((phy->req_line_speed == SPEED_AUTO_NEG) && 9220 ((phy->speed_cap_mask & 9221 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) && 9222 ((phy->speed_cap_mask & 9223 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) != 9224 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { 9225 9226 DP(NETIF_MSG_LINK, "Setting 1G clause37\n"); 9227 bnx2x_cl45_write(bp, phy, 9228 MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0); 9229 bnx2x_cl45_write(bp, phy, 9230 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300); 9231 } else { 9232 /* Since the 8727 has only single reset pin, need to set the 10G 9233 * registers although it is default 9234 */ 9235 bnx2x_cl45_write(bp, phy, 9236 MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 9237 0x0020); 9238 bnx2x_cl45_write(bp, phy, 9239 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100); 9240 bnx2x_cl45_write(bp, phy, 9241 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040); 9242 bnx2x_cl45_write(bp, phy, 9243 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 9244 0x0008); 9245 } 9246 } 9247 9248 static int bnx2x_8727_config_init(struct bnx2x_phy *phy, 9249 struct link_params *params, 9250 struct link_vars *vars) 9251 { 9252 u32 tx_en_mode; 9253 u16 tmp1, mod_abs, tmp2; 9254 struct bnx2x *bp = params->bp; 9255 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ 9256 9257 bnx2x_wait_reset_complete(bp, phy, params); 9258 9259 DP(NETIF_MSG_LINK, "Initializing BCM8727\n"); 9260 9261 bnx2x_8727_specific_func(phy, params, PHY_INIT); 9262 /* Initially configure MOD_ABS to interrupt when module is 9263 * presence( bit 8) 9264 */ 9265 bnx2x_cl45_read(bp, phy, 9266 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); 9267 /* Set EDC off by setting OPTXLOS signal input to low (bit 9). 9268 * When the EDC is off it locks onto a reference clock and avoids 9269 * becoming 'lost' 9270 */ 9271 mod_abs &= ~(1<<8); 9272 if (!(phy->flags & FLAGS_NOC)) 9273 mod_abs &= ~(1<<9); 9274 bnx2x_cl45_write(bp, phy, 9275 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 9276 9277 /* Enable/Disable PHY transmitter output */ 9278 bnx2x_set_disable_pmd_transmit(params, phy, 0); 9279 9280 bnx2x_8727_power_module(bp, phy, 1); 9281 9282 bnx2x_cl45_read(bp, phy, 9283 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); 9284 9285 bnx2x_cl45_read(bp, phy, 9286 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); 9287 9288 bnx2x_8727_config_speed(phy, params); 9289 9290 9291 /* Set TX PreEmphasis if needed */ 9292 if ((params->feature_config_flags & 9293 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) { 9294 DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n", 9295 phy->tx_preemphasis[0], 9296 phy->tx_preemphasis[1]); 9297 bnx2x_cl45_write(bp, phy, 9298 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL1, 9299 phy->tx_preemphasis[0]); 9300 9301 bnx2x_cl45_write(bp, phy, 9302 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL2, 9303 phy->tx_preemphasis[1]); 9304 } 9305 9306 /* If TX Laser is controlled by GPIO_0, do not let PHY go into low 9307 * power mode, if TX Laser is disabled 9308 */ 9309 tx_en_mode = REG_RD(bp, params->shmem_base + 9310 offsetof(struct shmem_region, 9311 dev_info.port_hw_config[params->port].sfp_ctrl)) 9312 & PORT_HW_CFG_TX_LASER_MASK; 9313 9314 if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) { 9315 9316 DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n"); 9317 bnx2x_cl45_read(bp, phy, 9318 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2); 9319 tmp2 |= 0x1000; 9320 tmp2 &= 0xFFEF; 9321 bnx2x_cl45_write(bp, phy, 9322 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2); 9323 bnx2x_cl45_read(bp, phy, 9324 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, 9325 &tmp2); 9326 bnx2x_cl45_write(bp, phy, 9327 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, 9328 (tmp2 & 0x7fff)); 9329 } 9330 9331 return 0; 9332 } 9333 9334 static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, 9335 struct link_params *params) 9336 { 9337 struct bnx2x *bp = params->bp; 9338 u16 mod_abs, rx_alarm_status; 9339 u32 val = REG_RD(bp, params->shmem_base + 9340 offsetof(struct shmem_region, dev_info. 9341 port_feature_config[params->port]. 9342 config)); 9343 bnx2x_cl45_read(bp, phy, 9344 MDIO_PMA_DEVAD, 9345 MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); 9346 if (mod_abs & (1<<8)) { 9347 9348 /* Module is absent */ 9349 DP(NETIF_MSG_LINK, 9350 "MOD_ABS indication show module is absent\n"); 9351 phy->media_type = ETH_PHY_NOT_PRESENT; 9352 /* 1. Set mod_abs to detect next module 9353 * presence event 9354 * 2. Set EDC off by setting OPTXLOS signal input to low 9355 * (bit 9). 9356 * When the EDC is off it locks onto a reference clock and 9357 * avoids becoming 'lost'. 9358 */ 9359 mod_abs &= ~(1<<8); 9360 if (!(phy->flags & FLAGS_NOC)) 9361 mod_abs &= ~(1<<9); 9362 bnx2x_cl45_write(bp, phy, 9363 MDIO_PMA_DEVAD, 9364 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 9365 9366 /* Clear RX alarm since it stays up as long as 9367 * the mod_abs wasn't changed 9368 */ 9369 bnx2x_cl45_read(bp, phy, 9370 MDIO_PMA_DEVAD, 9371 MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); 9372 9373 } else { 9374 /* Module is present */ 9375 DP(NETIF_MSG_LINK, 9376 "MOD_ABS indication show module is present\n"); 9377 /* First disable transmitter, and if the module is ok, the 9378 * module_detection will enable it 9379 * 1. Set mod_abs to detect next module absent event ( bit 8) 9380 * 2. Restore the default polarity of the OPRXLOS signal and 9381 * this signal will then correctly indicate the presence or 9382 * absence of the Rx signal. (bit 9) 9383 */ 9384 mod_abs |= (1<<8); 9385 if (!(phy->flags & FLAGS_NOC)) 9386 mod_abs |= (1<<9); 9387 bnx2x_cl45_write(bp, phy, 9388 MDIO_PMA_DEVAD, 9389 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 9390 9391 /* Clear RX alarm since it stays up as long as the mod_abs 9392 * wasn't changed. This is need to be done before calling the 9393 * module detection, otherwise it will clear* the link update 9394 * alarm 9395 */ 9396 bnx2x_cl45_read(bp, phy, 9397 MDIO_PMA_DEVAD, 9398 MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); 9399 9400 9401 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 9402 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 9403 bnx2x_sfp_set_transmitter(params, phy, 0); 9404 9405 if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) 9406 bnx2x_sfp_module_detection(phy, params); 9407 else 9408 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); 9409 9410 /* Reconfigure link speed based on module type limitations */ 9411 bnx2x_8727_config_speed(phy, params); 9412 } 9413 9414 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", 9415 rx_alarm_status); 9416 /* No need to check link status in case of module plugged in/out */ 9417 } 9418 9419 static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, 9420 struct link_params *params, 9421 struct link_vars *vars) 9422 9423 { 9424 struct bnx2x *bp = params->bp; 9425 u8 link_up = 0, oc_port = params->port; 9426 u16 link_status = 0; 9427 u16 rx_alarm_status, lasi_ctrl, val1; 9428 9429 /* If PHY is not initialized, do not check link status */ 9430 bnx2x_cl45_read(bp, phy, 9431 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 9432 &lasi_ctrl); 9433 if (!lasi_ctrl) 9434 return 0; 9435 9436 /* Check the LASI on Rx */ 9437 bnx2x_cl45_read(bp, phy, 9438 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, 9439 &rx_alarm_status); 9440 vars->line_speed = 0; 9441 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", rx_alarm_status); 9442 9443 bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT, 9444 MDIO_PMA_LASI_TXCTRL); 9445 9446 bnx2x_cl45_read(bp, phy, 9447 MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); 9448 9449 DP(NETIF_MSG_LINK, "8727 LASI status 0x%x\n", val1); 9450 9451 /* Clear MSG-OUT */ 9452 bnx2x_cl45_read(bp, phy, 9453 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1); 9454 9455 /* If a module is present and there is need to check 9456 * for over current 9457 */ 9458 if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) { 9459 /* Check over-current using 8727 GPIO0 input*/ 9460 bnx2x_cl45_read(bp, phy, 9461 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL, 9462 &val1); 9463 9464 if ((val1 & (1<<8)) == 0) { 9465 if (!CHIP_IS_E1x(bp)) 9466 oc_port = BP_PATH(bp) + (params->port << 1); 9467 DP(NETIF_MSG_LINK, 9468 "8727 Power fault has been detected on port %d\n", 9469 oc_port); 9470 netdev_err(bp->dev, "Error: Power fault on Port %d has " 9471 "been detected and the power to " 9472 "that SFP+ module has been removed " 9473 "to prevent failure of the card. " 9474 "Please remove the SFP+ module and " 9475 "restart the system to clear this " 9476 "error.\n", 9477 oc_port); 9478 /* Disable all RX_ALARMs except for mod_abs */ 9479 bnx2x_cl45_write(bp, phy, 9480 MDIO_PMA_DEVAD, 9481 MDIO_PMA_LASI_RXCTRL, (1<<5)); 9482 9483 bnx2x_cl45_read(bp, phy, 9484 MDIO_PMA_DEVAD, 9485 MDIO_PMA_REG_PHY_IDENTIFIER, &val1); 9486 /* Wait for module_absent_event */ 9487 val1 |= (1<<8); 9488 bnx2x_cl45_write(bp, phy, 9489 MDIO_PMA_DEVAD, 9490 MDIO_PMA_REG_PHY_IDENTIFIER, val1); 9491 /* Clear RX alarm */ 9492 bnx2x_cl45_read(bp, phy, 9493 MDIO_PMA_DEVAD, 9494 MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); 9495 bnx2x_8727_power_module(params->bp, phy, 0); 9496 return 0; 9497 } 9498 } /* Over current check */ 9499 9500 /* When module absent bit is set, check module */ 9501 if (rx_alarm_status & (1<<5)) { 9502 bnx2x_8727_handle_mod_abs(phy, params); 9503 /* Enable all mod_abs and link detection bits */ 9504 bnx2x_cl45_write(bp, phy, 9505 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, 9506 ((1<<5) | (1<<2))); 9507 } 9508 9509 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) { 9510 DP(NETIF_MSG_LINK, "Enabling 8727 TX laser\n"); 9511 bnx2x_sfp_set_transmitter(params, phy, 1); 9512 } else { 9513 DP(NETIF_MSG_LINK, "Tx is disabled\n"); 9514 return 0; 9515 } 9516 9517 bnx2x_cl45_read(bp, phy, 9518 MDIO_PMA_DEVAD, 9519 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status); 9520 9521 /* Bits 0..2 --> speed detected, 9522 * Bits 13..15--> link is down 9523 */ 9524 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) { 9525 link_up = 1; 9526 vars->line_speed = SPEED_10000; 9527 DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n", 9528 params->port); 9529 } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) { 9530 link_up = 1; 9531 vars->line_speed = SPEED_1000; 9532 DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n", 9533 params->port); 9534 } else { 9535 link_up = 0; 9536 DP(NETIF_MSG_LINK, "port %x: External link is down\n", 9537 params->port); 9538 } 9539 9540 /* Capture 10G link fault. */ 9541 if (vars->line_speed == SPEED_10000) { 9542 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 9543 MDIO_PMA_LASI_TXSTAT, &val1); 9544 9545 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 9546 MDIO_PMA_LASI_TXSTAT, &val1); 9547 9548 if (val1 & (1<<0)) { 9549 vars->fault_detected = 1; 9550 } 9551 } 9552 9553 if (link_up) { 9554 bnx2x_ext_phy_resolve_fc(phy, params, vars); 9555 vars->duplex = DUPLEX_FULL; 9556 DP(NETIF_MSG_LINK, "duplex = 0x%x\n", vars->duplex); 9557 } 9558 9559 if ((DUAL_MEDIA(params)) && 9560 (phy->req_line_speed == SPEED_1000)) { 9561 bnx2x_cl45_read(bp, phy, 9562 MDIO_PMA_DEVAD, 9563 MDIO_PMA_REG_8727_PCS_GP, &val1); 9564 /* In case of dual-media board and 1G, power up the XAUI side, 9565 * otherwise power it down. For 10G it is done automatically 9566 */ 9567 if (link_up) 9568 val1 &= ~(3<<10); 9569 else 9570 val1 |= (3<<10); 9571 bnx2x_cl45_write(bp, phy, 9572 MDIO_PMA_DEVAD, 9573 MDIO_PMA_REG_8727_PCS_GP, val1); 9574 } 9575 return link_up; 9576 } 9577 9578 static void bnx2x_8727_link_reset(struct bnx2x_phy *phy, 9579 struct link_params *params) 9580 { 9581 struct bnx2x *bp = params->bp; 9582 9583 /* Enable/Disable PHY transmitter output */ 9584 bnx2x_set_disable_pmd_transmit(params, phy, 1); 9585 9586 /* Disable Transmitter */ 9587 bnx2x_sfp_set_transmitter(params, phy, 0); 9588 /* Clear LASI */ 9589 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0); 9590 9591 } 9592 9593 /******************************************************************/ 9594 /* BCM8481/BCM84823/BCM84833 PHY SECTION */ 9595 /******************************************************************/ 9596 static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, 9597 struct bnx2x *bp, 9598 u8 port) 9599 { 9600 u16 val, fw_ver2, cnt, i; 9601 static struct bnx2x_reg_set reg_set[] = { 9602 {MDIO_PMA_DEVAD, 0xA819, 0x0014}, 9603 {MDIO_PMA_DEVAD, 0xA81A, 0xc200}, 9604 {MDIO_PMA_DEVAD, 0xA81B, 0x0000}, 9605 {MDIO_PMA_DEVAD, 0xA81C, 0x0300}, 9606 {MDIO_PMA_DEVAD, 0xA817, 0x0009} 9607 }; 9608 u16 fw_ver1; 9609 9610 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || 9611 (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { 9612 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1); 9613 bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff, 9614 phy->ver_addr); 9615 } else { 9616 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */ 9617 /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ 9618 for (i = 0; i < ARRAY_SIZE(reg_set); i++) 9619 bnx2x_cl45_write(bp, phy, reg_set[i].devad, 9620 reg_set[i].reg, reg_set[i].val); 9621 9622 for (cnt = 0; cnt < 100; cnt++) { 9623 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); 9624 if (val & 1) 9625 break; 9626 udelay(5); 9627 } 9628 if (cnt == 100) { 9629 DP(NETIF_MSG_LINK, "Unable to read 848xx " 9630 "phy fw version(1)\n"); 9631 bnx2x_save_spirom_version(bp, port, 0, 9632 phy->ver_addr); 9633 return; 9634 } 9635 9636 9637 /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */ 9638 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000); 9639 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); 9640 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A); 9641 for (cnt = 0; cnt < 100; cnt++) { 9642 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); 9643 if (val & 1) 9644 break; 9645 udelay(5); 9646 } 9647 if (cnt == 100) { 9648 DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw " 9649 "version(2)\n"); 9650 bnx2x_save_spirom_version(bp, port, 0, 9651 phy->ver_addr); 9652 return; 9653 } 9654 9655 /* lower 16 bits of the register SPI_FW_STATUS */ 9656 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1); 9657 /* upper 16 bits of register SPI_FW_STATUS */ 9658 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2); 9659 9660 bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1, 9661 phy->ver_addr); 9662 } 9663 9664 } 9665 static void bnx2x_848xx_set_led(struct bnx2x *bp, 9666 struct bnx2x_phy *phy) 9667 { 9668 u16 val, offset, i; 9669 static struct bnx2x_reg_set reg_set[] = { 9670 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080}, 9671 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018}, 9672 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006}, 9673 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_BLINK, 0x0000}, 9674 {MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH, 9675 MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ}, 9676 {MDIO_AN_DEVAD, 0xFFFB, 0xFFFD} 9677 }; 9678 /* PHYC_CTL_LED_CTL */ 9679 bnx2x_cl45_read(bp, phy, 9680 MDIO_PMA_DEVAD, 9681 MDIO_PMA_REG_8481_LINK_SIGNAL, &val); 9682 val &= 0xFE00; 9683 val |= 0x0092; 9684 9685 bnx2x_cl45_write(bp, phy, 9686 MDIO_PMA_DEVAD, 9687 MDIO_PMA_REG_8481_LINK_SIGNAL, val); 9688 9689 for (i = 0; i < ARRAY_SIZE(reg_set); i++) 9690 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, 9691 reg_set[i].val); 9692 9693 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || 9694 (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) 9695 offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1; 9696 else 9697 offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1; 9698 9699 /* stretch_en for LED3*/ 9700 bnx2x_cl45_read_or_write(bp, phy, 9701 MDIO_PMA_DEVAD, offset, 9702 MDIO_PMA_REG_84823_LED3_STRETCH_EN); 9703 } 9704 9705 static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy, 9706 struct link_params *params, 9707 u32 action) 9708 { 9709 struct bnx2x *bp = params->bp; 9710 switch (action) { 9711 case PHY_INIT: 9712 if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && 9713 (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { 9714 /* Save spirom version */ 9715 bnx2x_save_848xx_spirom_version(phy, bp, params->port); 9716 } 9717 /* This phy uses the NIG latch mechanism since link indication 9718 * arrives through its LED4 and not via its LASI signal, so we 9719 * get steady signal instead of clear on read 9720 */ 9721 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, 9722 1 << NIG_LATCH_BC_ENABLE_MI_INT); 9723 9724 bnx2x_848xx_set_led(bp, phy); 9725 break; 9726 } 9727 } 9728 9729 static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, 9730 struct link_params *params, 9731 struct link_vars *vars) 9732 { 9733 struct bnx2x *bp = params->bp; 9734 u16 autoneg_val, an_1000_val, an_10_100_val; 9735 9736 bnx2x_848xx_specific_func(phy, params, PHY_INIT); 9737 bnx2x_cl45_write(bp, phy, 9738 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000); 9739 9740 /* set 1000 speed advertisement */ 9741 bnx2x_cl45_read(bp, phy, 9742 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, 9743 &an_1000_val); 9744 9745 bnx2x_ext_phy_set_pause(params, phy, vars); 9746 bnx2x_cl45_read(bp, phy, 9747 MDIO_AN_DEVAD, 9748 MDIO_AN_REG_8481_LEGACY_AN_ADV, 9749 &an_10_100_val); 9750 bnx2x_cl45_read(bp, phy, 9751 MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_MII_CTRL, 9752 &autoneg_val); 9753 /* Disable forced speed */ 9754 autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13)); 9755 an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8)); 9756 9757 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 9758 (phy->speed_cap_mask & 9759 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || 9760 (phy->req_line_speed == SPEED_1000)) { 9761 an_1000_val |= (1<<8); 9762 autoneg_val |= (1<<9 | 1<<12); 9763 if (phy->req_duplex == DUPLEX_FULL) 9764 an_1000_val |= (1<<9); 9765 DP(NETIF_MSG_LINK, "Advertising 1G\n"); 9766 } else 9767 an_1000_val &= ~((1<<8) | (1<<9)); 9768 9769 bnx2x_cl45_write(bp, phy, 9770 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, 9771 an_1000_val); 9772 9773 /* Set 10/100 speed advertisement */ 9774 if (phy->req_line_speed == SPEED_AUTO_NEG) { 9775 if (phy->speed_cap_mask & 9776 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) { 9777 /* Enable autoneg and restart autoneg for legacy speeds 9778 */ 9779 autoneg_val |= (1<<9 | 1<<12); 9780 an_10_100_val |= (1<<8); 9781 DP(NETIF_MSG_LINK, "Advertising 100M-FD\n"); 9782 } 9783 9784 if (phy->speed_cap_mask & 9785 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) { 9786 /* Enable autoneg and restart autoneg for legacy speeds 9787 */ 9788 autoneg_val |= (1<<9 | 1<<12); 9789 an_10_100_val |= (1<<7); 9790 DP(NETIF_MSG_LINK, "Advertising 100M-HD\n"); 9791 } 9792 9793 if ((phy->speed_cap_mask & 9794 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) && 9795 (phy->supported & SUPPORTED_10baseT_Full)) { 9796 an_10_100_val |= (1<<6); 9797 autoneg_val |= (1<<9 | 1<<12); 9798 DP(NETIF_MSG_LINK, "Advertising 10M-FD\n"); 9799 } 9800 9801 if ((phy->speed_cap_mask & 9802 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) && 9803 (phy->supported & SUPPORTED_10baseT_Half)) { 9804 an_10_100_val |= (1<<5); 9805 autoneg_val |= (1<<9 | 1<<12); 9806 DP(NETIF_MSG_LINK, "Advertising 10M-HD\n"); 9807 } 9808 } 9809 9810 /* Only 10/100 are allowed to work in FORCE mode */ 9811 if ((phy->req_line_speed == SPEED_100) && 9812 (phy->supported & 9813 (SUPPORTED_100baseT_Half | 9814 SUPPORTED_100baseT_Full))) { 9815 autoneg_val |= (1<<13); 9816 /* Enabled AUTO-MDIX when autoneg is disabled */ 9817 bnx2x_cl45_write(bp, phy, 9818 MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL, 9819 (1<<15 | 1<<9 | 7<<0)); 9820 /* The PHY needs this set even for forced link. */ 9821 an_10_100_val |= (1<<8) | (1<<7); 9822 DP(NETIF_MSG_LINK, "Setting 100M force\n"); 9823 } 9824 if ((phy->req_line_speed == SPEED_10) && 9825 (phy->supported & 9826 (SUPPORTED_10baseT_Half | 9827 SUPPORTED_10baseT_Full))) { 9828 /* Enabled AUTO-MDIX when autoneg is disabled */ 9829 bnx2x_cl45_write(bp, phy, 9830 MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL, 9831 (1<<15 | 1<<9 | 7<<0)); 9832 DP(NETIF_MSG_LINK, "Setting 10M force\n"); 9833 } 9834 9835 bnx2x_cl45_write(bp, phy, 9836 MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_AN_ADV, 9837 an_10_100_val); 9838 9839 if (phy->req_duplex == DUPLEX_FULL) 9840 autoneg_val |= (1<<8); 9841 9842 /* Always write this if this is not 84833/4. 9843 * For 84833/4, write it only when it's a forced speed. 9844 */ 9845 if (((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && 9846 (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) || 9847 ((autoneg_val & (1<<12)) == 0)) 9848 bnx2x_cl45_write(bp, phy, 9849 MDIO_AN_DEVAD, 9850 MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val); 9851 9852 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 9853 (phy->speed_cap_mask & 9854 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || 9855 (phy->req_line_speed == SPEED_10000)) { 9856 DP(NETIF_MSG_LINK, "Advertising 10G\n"); 9857 /* Restart autoneg for 10G*/ 9858 9859 bnx2x_cl45_read_or_write( 9860 bp, phy, 9861 MDIO_AN_DEVAD, 9862 MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, 9863 0x1000); 9864 bnx2x_cl45_write(bp, phy, 9865 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 9866 0x3200); 9867 } else 9868 bnx2x_cl45_write(bp, phy, 9869 MDIO_AN_DEVAD, 9870 MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, 9871 1); 9872 9873 return 0; 9874 } 9875 9876 static int bnx2x_8481_config_init(struct bnx2x_phy *phy, 9877 struct link_params *params, 9878 struct link_vars *vars) 9879 { 9880 struct bnx2x *bp = params->bp; 9881 /* Restore normal power mode*/ 9882 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 9883 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); 9884 9885 /* HW reset */ 9886 bnx2x_ext_phy_hw_reset(bp, params->port); 9887 bnx2x_wait_reset_complete(bp, phy, params); 9888 9889 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); 9890 return bnx2x_848xx_cmn_config_init(phy, params, vars); 9891 } 9892 9893 #define PHY84833_CMDHDLR_WAIT 300 9894 #define PHY84833_CMDHDLR_MAX_ARGS 5 9895 static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, 9896 struct link_params *params, u16 fw_cmd, 9897 u16 cmd_args[], int argc) 9898 { 9899 int idx; 9900 u16 val; 9901 struct bnx2x *bp = params->bp; 9902 /* Write CMD_OPEN_OVERRIDE to STATUS reg */ 9903 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 9904 MDIO_84833_CMD_HDLR_STATUS, 9905 PHY84833_STATUS_CMD_OPEN_OVERRIDE); 9906 for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) { 9907 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 9908 MDIO_84833_CMD_HDLR_STATUS, &val); 9909 if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS) 9910 break; 9911 usleep_range(1000, 2000); 9912 } 9913 if (idx >= PHY84833_CMDHDLR_WAIT) { 9914 DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n"); 9915 return -EINVAL; 9916 } 9917 9918 /* Prepare argument(s) and issue command */ 9919 for (idx = 0; idx < argc; idx++) { 9920 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 9921 MDIO_84833_CMD_HDLR_DATA1 + idx, 9922 cmd_args[idx]); 9923 } 9924 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 9925 MDIO_84833_CMD_HDLR_COMMAND, fw_cmd); 9926 for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) { 9927 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 9928 MDIO_84833_CMD_HDLR_STATUS, &val); 9929 if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) || 9930 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) 9931 break; 9932 usleep_range(1000, 2000); 9933 } 9934 if ((idx >= PHY84833_CMDHDLR_WAIT) || 9935 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { 9936 DP(NETIF_MSG_LINK, "FW cmd failed.\n"); 9937 return -EINVAL; 9938 } 9939 /* Gather returning data */ 9940 for (idx = 0; idx < argc; idx++) { 9941 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 9942 MDIO_84833_CMD_HDLR_DATA1 + idx, 9943 &cmd_args[idx]); 9944 } 9945 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 9946 MDIO_84833_CMD_HDLR_STATUS, 9947 PHY84833_STATUS_CMD_CLEAR_COMPLETE); 9948 return 0; 9949 } 9950 9951 static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy, 9952 struct link_params *params, 9953 struct link_vars *vars) 9954 { 9955 u32 pair_swap; 9956 u16 data[PHY84833_CMDHDLR_MAX_ARGS]; 9957 int status; 9958 struct bnx2x *bp = params->bp; 9959 9960 /* Check for configuration. */ 9961 pair_swap = REG_RD(bp, params->shmem_base + 9962 offsetof(struct shmem_region, 9963 dev_info.port_hw_config[params->port].xgbt_phy_cfg)) & 9964 PORT_HW_CFG_RJ45_PAIR_SWAP_MASK; 9965 9966 if (pair_swap == 0) 9967 return 0; 9968 9969 /* Only the second argument is used for this command */ 9970 data[1] = (u16)pair_swap; 9971 9972 status = bnx2x_84833_cmd_hdlr(phy, params, 9973 PHY84833_CMD_SET_PAIR_SWAP, data, PHY84833_CMDHDLR_MAX_ARGS); 9974 if (status == 0) 9975 DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]); 9976 9977 return status; 9978 } 9979 9980 static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp, 9981 u32 shmem_base_path[], 9982 u32 chip_id) 9983 { 9984 u32 reset_pin[2]; 9985 u32 idx; 9986 u8 reset_gpios; 9987 if (CHIP_IS_E3(bp)) { 9988 /* Assume that these will be GPIOs, not EPIOs. */ 9989 for (idx = 0; idx < 2; idx++) { 9990 /* Map config param to register bit. */ 9991 reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] + 9992 offsetof(struct shmem_region, 9993 dev_info.port_hw_config[0].e3_cmn_pin_cfg)); 9994 reset_pin[idx] = (reset_pin[idx] & 9995 PORT_HW_CFG_E3_PHY_RESET_MASK) >> 9996 PORT_HW_CFG_E3_PHY_RESET_SHIFT; 9997 reset_pin[idx] -= PIN_CFG_GPIO0_P0; 9998 reset_pin[idx] = (1 << reset_pin[idx]); 9999 } 10000 reset_gpios = (u8)(reset_pin[0] | reset_pin[1]); 10001 } else { 10002 /* E2, look from diff place of shmem. */ 10003 for (idx = 0; idx < 2; idx++) { 10004 reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] + 10005 offsetof(struct shmem_region, 10006 dev_info.port_hw_config[0].default_cfg)); 10007 reset_pin[idx] &= PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK; 10008 reset_pin[idx] -= PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0; 10009 reset_pin[idx] >>= PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT; 10010 reset_pin[idx] = (1 << reset_pin[idx]); 10011 } 10012 reset_gpios = (u8)(reset_pin[0] | reset_pin[1]); 10013 } 10014 10015 return reset_gpios; 10016 } 10017 10018 static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy, 10019 struct link_params *params) 10020 { 10021 struct bnx2x *bp = params->bp; 10022 u8 reset_gpios; 10023 u32 other_shmem_base_addr = REG_RD(bp, params->shmem2_base + 10024 offsetof(struct shmem2_region, 10025 other_shmem_base_addr)); 10026 10027 u32 shmem_base_path[2]; 10028 10029 /* Work around for 84833 LED failure inside RESET status */ 10030 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 10031 MDIO_AN_REG_8481_LEGACY_MII_CTRL, 10032 MDIO_AN_REG_8481_MII_CTRL_FORCE_1G); 10033 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 10034 MDIO_AN_REG_8481_1G_100T_EXT_CTRL, 10035 MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF); 10036 10037 shmem_base_path[0] = params->shmem_base; 10038 shmem_base_path[1] = other_shmem_base_addr; 10039 10040 reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, 10041 params->chip_id); 10042 10043 bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW); 10044 udelay(10); 10045 DP(NETIF_MSG_LINK, "84833 hw reset on pin values 0x%x\n", 10046 reset_gpios); 10047 10048 return 0; 10049 } 10050 10051 static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy, 10052 struct link_params *params, 10053 struct link_vars *vars) 10054 { 10055 int rc; 10056 struct bnx2x *bp = params->bp; 10057 u16 cmd_args = 0; 10058 10059 DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n"); 10060 10061 /* Prevent Phy from working in EEE and advertising it */ 10062 rc = bnx2x_84833_cmd_hdlr(phy, params, 10063 PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); 10064 if (rc) { 10065 DP(NETIF_MSG_LINK, "EEE disable failed.\n"); 10066 return rc; 10067 } 10068 10069 return bnx2x_eee_disable(phy, params, vars); 10070 } 10071 10072 static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy, 10073 struct link_params *params, 10074 struct link_vars *vars) 10075 { 10076 int rc; 10077 struct bnx2x *bp = params->bp; 10078 u16 cmd_args = 1; 10079 10080 rc = bnx2x_84833_cmd_hdlr(phy, params, 10081 PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); 10082 if (rc) { 10083 DP(NETIF_MSG_LINK, "EEE enable failed.\n"); 10084 return rc; 10085 } 10086 10087 return bnx2x_eee_advertise(phy, params, vars, SHMEM_EEE_10G_ADV); 10088 } 10089 10090 #define PHY84833_CONSTANT_LATENCY 1193 10091 static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, 10092 struct link_params *params, 10093 struct link_vars *vars) 10094 { 10095 struct bnx2x *bp = params->bp; 10096 u8 port, initialize = 1; 10097 u16 val; 10098 u32 actual_phy_selection; 10099 u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS]; 10100 int rc = 0; 10101 10102 usleep_range(1000, 2000); 10103 10104 if (!(CHIP_IS_E1x(bp))) 10105 port = BP_PATH(bp); 10106 else 10107 port = params->port; 10108 10109 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) { 10110 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 10111 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 10112 port); 10113 } else { 10114 /* MDIO reset */ 10115 bnx2x_cl45_write(bp, phy, 10116 MDIO_PMA_DEVAD, 10117 MDIO_PMA_REG_CTRL, 0x8000); 10118 } 10119 10120 bnx2x_wait_reset_complete(bp, phy, params); 10121 10122 /* Wait for GPHY to come out of reset */ 10123 msleep(50); 10124 if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && 10125 (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { 10126 /* BCM84823 requires that XGXS links up first @ 10G for normal 10127 * behavior. 10128 */ 10129 u16 temp; 10130 temp = vars->line_speed; 10131 vars->line_speed = SPEED_10000; 10132 bnx2x_set_autoneg(¶ms->phy[INT_PHY], params, vars, 0); 10133 bnx2x_program_serdes(¶ms->phy[INT_PHY], params, vars); 10134 vars->line_speed = temp; 10135 } 10136 10137 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 10138 MDIO_CTL_REG_84823_MEDIA, &val); 10139 val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | 10140 MDIO_CTL_REG_84823_MEDIA_LINE_MASK | 10141 MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN | 10142 MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK | 10143 MDIO_CTL_REG_84823_MEDIA_FIBER_1G); 10144 10145 if (CHIP_IS_E3(bp)) { 10146 val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | 10147 MDIO_CTL_REG_84823_MEDIA_LINE_MASK); 10148 } else { 10149 val |= (MDIO_CTL_REG_84823_CTRL_MAC_XFI | 10150 MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L); 10151 } 10152 10153 actual_phy_selection = bnx2x_phy_selection(params); 10154 10155 switch (actual_phy_selection) { 10156 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 10157 /* Do nothing. Essentially this is like the priority copper */ 10158 break; 10159 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 10160 val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER; 10161 break; 10162 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 10163 val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER; 10164 break; 10165 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: 10166 /* Do nothing here. The first PHY won't be initialized at all */ 10167 break; 10168 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: 10169 val |= MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN; 10170 initialize = 0; 10171 break; 10172 } 10173 if (params->phy[EXT_PHY2].req_line_speed == SPEED_1000) 10174 val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G; 10175 10176 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 10177 MDIO_CTL_REG_84823_MEDIA, val); 10178 DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n", 10179 params->multi_phy_config, val); 10180 10181 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || 10182 (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { 10183 bnx2x_84833_pair_swap_cfg(phy, params, vars); 10184 10185 /* Keep AutogrEEEn disabled. */ 10186 cmd_args[0] = 0x0; 10187 cmd_args[1] = 0x0; 10188 cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1; 10189 cmd_args[3] = PHY84833_CONSTANT_LATENCY; 10190 rc = bnx2x_84833_cmd_hdlr(phy, params, 10191 PHY84833_CMD_SET_EEE_MODE, cmd_args, 10192 PHY84833_CMDHDLR_MAX_ARGS); 10193 if (rc) 10194 DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n"); 10195 } 10196 if (initialize) 10197 rc = bnx2x_848xx_cmn_config_init(phy, params, vars); 10198 else 10199 bnx2x_save_848xx_spirom_version(phy, bp, params->port); 10200 /* 84833 PHY has a better feature and doesn't need to support this. */ 10201 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) { 10202 u32 cms_enable = REG_RD(bp, params->shmem_base + 10203 offsetof(struct shmem_region, 10204 dev_info.port_hw_config[params->port].default_cfg)) & 10205 PORT_HW_CFG_ENABLE_CMS_MASK; 10206 10207 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 10208 MDIO_CTL_REG_84823_USER_CTRL_REG, &val); 10209 if (cms_enable) 10210 val |= MDIO_CTL_REG_84823_USER_CTRL_CMS; 10211 else 10212 val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS; 10213 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 10214 MDIO_CTL_REG_84823_USER_CTRL_REG, val); 10215 } 10216 10217 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 10218 MDIO_84833_TOP_CFG_FW_REV, &val); 10219 10220 /* Configure EEE support */ 10221 if ((val >= MDIO_84833_TOP_CFG_FW_EEE) && 10222 (val != MDIO_84833_TOP_CFG_FW_NO_EEE) && 10223 bnx2x_eee_has_cap(params)) { 10224 rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_10G_ADV); 10225 if (rc) { 10226 DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n"); 10227 bnx2x_8483x_disable_eee(phy, params, vars); 10228 return rc; 10229 } 10230 10231 if ((phy->req_duplex == DUPLEX_FULL) && 10232 (params->eee_mode & EEE_MODE_ADV_LPI) && 10233 (bnx2x_eee_calc_timer(params) || 10234 !(params->eee_mode & EEE_MODE_ENABLE_LPI))) 10235 rc = bnx2x_8483x_enable_eee(phy, params, vars); 10236 else 10237 rc = bnx2x_8483x_disable_eee(phy, params, vars); 10238 if (rc) { 10239 DP(NETIF_MSG_LINK, "Failed to set EEE advertisement\n"); 10240 return rc; 10241 } 10242 } else { 10243 vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK; 10244 } 10245 10246 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || 10247 (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { 10248 /* Bring PHY out of super isolate mode as the final step. */ 10249 bnx2x_cl45_read_and_write(bp, phy, 10250 MDIO_CTL_DEVAD, 10251 MDIO_84833_TOP_CFG_XGPHY_STRAP1, 10252 (u16)~MDIO_84833_SUPER_ISOLATE); 10253 } 10254 return rc; 10255 } 10256 10257 static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, 10258 struct link_params *params, 10259 struct link_vars *vars) 10260 { 10261 struct bnx2x *bp = params->bp; 10262 u16 val, val1, val2; 10263 u8 link_up = 0; 10264 10265 10266 /* Check 10G-BaseT link status */ 10267 /* Check PMD signal ok */ 10268 bnx2x_cl45_read(bp, phy, 10269 MDIO_AN_DEVAD, 0xFFFA, &val1); 10270 bnx2x_cl45_read(bp, phy, 10271 MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL, 10272 &val2); 10273 DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2); 10274 10275 /* Check link 10G */ 10276 if (val2 & (1<<11)) { 10277 vars->line_speed = SPEED_10000; 10278 vars->duplex = DUPLEX_FULL; 10279 link_up = 1; 10280 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); 10281 } else { /* Check Legacy speed link */ 10282 u16 legacy_status, legacy_speed; 10283 10284 /* Enable expansion register 0x42 (Operation mode status) */ 10285 bnx2x_cl45_write(bp, phy, 10286 MDIO_AN_DEVAD, 10287 MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf42); 10288 10289 /* Get legacy speed operation status */ 10290 bnx2x_cl45_read(bp, phy, 10291 MDIO_AN_DEVAD, 10292 MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, 10293 &legacy_status); 10294 10295 DP(NETIF_MSG_LINK, "Legacy speed status = 0x%x\n", 10296 legacy_status); 10297 link_up = ((legacy_status & (1<<11)) == (1<<11)); 10298 legacy_speed = (legacy_status & (3<<9)); 10299 if (legacy_speed == (0<<9)) 10300 vars->line_speed = SPEED_10; 10301 else if (legacy_speed == (1<<9)) 10302 vars->line_speed = SPEED_100; 10303 else if (legacy_speed == (2<<9)) 10304 vars->line_speed = SPEED_1000; 10305 else { /* Should not happen: Treat as link down */ 10306 vars->line_speed = 0; 10307 link_up = 0; 10308 } 10309 10310 if (link_up) { 10311 if (legacy_status & (1<<8)) 10312 vars->duplex = DUPLEX_FULL; 10313 else 10314 vars->duplex = DUPLEX_HALF; 10315 10316 DP(NETIF_MSG_LINK, 10317 "Link is up in %dMbps, is_duplex_full= %d\n", 10318 vars->line_speed, 10319 (vars->duplex == DUPLEX_FULL)); 10320 /* Check legacy speed AN resolution */ 10321 bnx2x_cl45_read(bp, phy, 10322 MDIO_AN_DEVAD, 10323 MDIO_AN_REG_8481_LEGACY_MII_STATUS, 10324 &val); 10325 if (val & (1<<5)) 10326 vars->link_status |= 10327 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; 10328 bnx2x_cl45_read(bp, phy, 10329 MDIO_AN_DEVAD, 10330 MDIO_AN_REG_8481_LEGACY_AN_EXPANSION, 10331 &val); 10332 if ((val & (1<<0)) == 0) 10333 vars->link_status |= 10334 LINK_STATUS_PARALLEL_DETECTION_USED; 10335 } 10336 } 10337 if (link_up) { 10338 DP(NETIF_MSG_LINK, "BCM848x3: link speed is %d\n", 10339 vars->line_speed); 10340 bnx2x_ext_phy_resolve_fc(phy, params, vars); 10341 10342 /* Read LP advertised speeds */ 10343 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, 10344 MDIO_AN_REG_CL37_FC_LP, &val); 10345 if (val & (1<<5)) 10346 vars->link_status |= 10347 LINK_STATUS_LINK_PARTNER_10THD_CAPABLE; 10348 if (val & (1<<6)) 10349 vars->link_status |= 10350 LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE; 10351 if (val & (1<<7)) 10352 vars->link_status |= 10353 LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE; 10354 if (val & (1<<8)) 10355 vars->link_status |= 10356 LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE; 10357 if (val & (1<<9)) 10358 vars->link_status |= 10359 LINK_STATUS_LINK_PARTNER_100T4_CAPABLE; 10360 10361 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, 10362 MDIO_AN_REG_1000T_STATUS, &val); 10363 10364 if (val & (1<<10)) 10365 vars->link_status |= 10366 LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE; 10367 if (val & (1<<11)) 10368 vars->link_status |= 10369 LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; 10370 10371 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, 10372 MDIO_AN_REG_MASTER_STATUS, &val); 10373 10374 if (val & (1<<11)) 10375 vars->link_status |= 10376 LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; 10377 10378 /* Determine if EEE was negotiated */ 10379 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || 10380 (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) 10381 bnx2x_eee_an_resolve(phy, params, vars); 10382 } 10383 10384 return link_up; 10385 } 10386 10387 static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len) 10388 { 10389 int status = 0; 10390 u32 spirom_ver; 10391 spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F); 10392 status = bnx2x_format_ver(spirom_ver, str, len); 10393 return status; 10394 } 10395 10396 static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy, 10397 struct link_params *params) 10398 { 10399 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, 10400 MISC_REGISTERS_GPIO_OUTPUT_LOW, 0); 10401 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, 10402 MISC_REGISTERS_GPIO_OUTPUT_LOW, 1); 10403 } 10404 10405 static void bnx2x_8481_link_reset(struct bnx2x_phy *phy, 10406 struct link_params *params) 10407 { 10408 bnx2x_cl45_write(params->bp, phy, 10409 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000); 10410 bnx2x_cl45_write(params->bp, phy, 10411 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1); 10412 } 10413 10414 static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy, 10415 struct link_params *params) 10416 { 10417 struct bnx2x *bp = params->bp; 10418 u8 port; 10419 u16 val16; 10420 10421 if (!(CHIP_IS_E1x(bp))) 10422 port = BP_PATH(bp); 10423 else 10424 port = params->port; 10425 10426 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) { 10427 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 10428 MISC_REGISTERS_GPIO_OUTPUT_LOW, 10429 port); 10430 } else { 10431 bnx2x_cl45_read(bp, phy, 10432 MDIO_CTL_DEVAD, 10433 MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val16); 10434 val16 |= MDIO_84833_SUPER_ISOLATE; 10435 bnx2x_cl45_write(bp, phy, 10436 MDIO_CTL_DEVAD, 10437 MDIO_84833_TOP_CFG_XGPHY_STRAP1, val16); 10438 } 10439 } 10440 10441 static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, 10442 struct link_params *params, u8 mode) 10443 { 10444 struct bnx2x *bp = params->bp; 10445 u16 val; 10446 u8 port; 10447 10448 if (!(CHIP_IS_E1x(bp))) 10449 port = BP_PATH(bp); 10450 else 10451 port = params->port; 10452 10453 switch (mode) { 10454 case LED_MODE_OFF: 10455 10456 DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", port); 10457 10458 if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == 10459 SHARED_HW_CFG_LED_EXTPHY1) { 10460 10461 /* Set LED masks */ 10462 bnx2x_cl45_write(bp, phy, 10463 MDIO_PMA_DEVAD, 10464 MDIO_PMA_REG_8481_LED1_MASK, 10465 0x0); 10466 10467 bnx2x_cl45_write(bp, phy, 10468 MDIO_PMA_DEVAD, 10469 MDIO_PMA_REG_8481_LED2_MASK, 10470 0x0); 10471 10472 bnx2x_cl45_write(bp, phy, 10473 MDIO_PMA_DEVAD, 10474 MDIO_PMA_REG_8481_LED3_MASK, 10475 0x0); 10476 10477 bnx2x_cl45_write(bp, phy, 10478 MDIO_PMA_DEVAD, 10479 MDIO_PMA_REG_8481_LED5_MASK, 10480 0x0); 10481 10482 } else { 10483 bnx2x_cl45_write(bp, phy, 10484 MDIO_PMA_DEVAD, 10485 MDIO_PMA_REG_8481_LED1_MASK, 10486 0x0); 10487 } 10488 break; 10489 case LED_MODE_FRONT_PANEL_OFF: 10490 10491 DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE FRONT PANEL OFF\n", 10492 port); 10493 10494 if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == 10495 SHARED_HW_CFG_LED_EXTPHY1) { 10496 10497 /* Set LED masks */ 10498 bnx2x_cl45_write(bp, phy, 10499 MDIO_PMA_DEVAD, 10500 MDIO_PMA_REG_8481_LED1_MASK, 10501 0x0); 10502 10503 bnx2x_cl45_write(bp, phy, 10504 MDIO_PMA_DEVAD, 10505 MDIO_PMA_REG_8481_LED2_MASK, 10506 0x0); 10507 10508 bnx2x_cl45_write(bp, phy, 10509 MDIO_PMA_DEVAD, 10510 MDIO_PMA_REG_8481_LED3_MASK, 10511 0x0); 10512 10513 bnx2x_cl45_write(bp, phy, 10514 MDIO_PMA_DEVAD, 10515 MDIO_PMA_REG_8481_LED5_MASK, 10516 0x20); 10517 10518 } else { 10519 bnx2x_cl45_write(bp, phy, 10520 MDIO_PMA_DEVAD, 10521 MDIO_PMA_REG_8481_LED1_MASK, 10522 0x0); 10523 if (phy->type == 10524 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) { 10525 /* Disable MI_INT interrupt before setting LED4 10526 * source to constant off. 10527 */ 10528 if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + 10529 params->port*4) & 10530 NIG_MASK_MI_INT) { 10531 params->link_flags |= 10532 LINK_FLAGS_INT_DISABLED; 10533 10534 bnx2x_bits_dis( 10535 bp, 10536 NIG_REG_MASK_INTERRUPT_PORT0 + 10537 params->port*4, 10538 NIG_MASK_MI_INT); 10539 } 10540 bnx2x_cl45_write(bp, phy, 10541 MDIO_PMA_DEVAD, 10542 MDIO_PMA_REG_8481_SIGNAL_MASK, 10543 0x0); 10544 } 10545 } 10546 break; 10547 case LED_MODE_ON: 10548 10549 DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", port); 10550 10551 if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == 10552 SHARED_HW_CFG_LED_EXTPHY1) { 10553 /* Set control reg */ 10554 bnx2x_cl45_read(bp, phy, 10555 MDIO_PMA_DEVAD, 10556 MDIO_PMA_REG_8481_LINK_SIGNAL, 10557 &val); 10558 val &= 0x8000; 10559 val |= 0x2492; 10560 10561 bnx2x_cl45_write(bp, phy, 10562 MDIO_PMA_DEVAD, 10563 MDIO_PMA_REG_8481_LINK_SIGNAL, 10564 val); 10565 10566 /* Set LED masks */ 10567 bnx2x_cl45_write(bp, phy, 10568 MDIO_PMA_DEVAD, 10569 MDIO_PMA_REG_8481_LED1_MASK, 10570 0x0); 10571 10572 bnx2x_cl45_write(bp, phy, 10573 MDIO_PMA_DEVAD, 10574 MDIO_PMA_REG_8481_LED2_MASK, 10575 0x20); 10576 10577 bnx2x_cl45_write(bp, phy, 10578 MDIO_PMA_DEVAD, 10579 MDIO_PMA_REG_8481_LED3_MASK, 10580 0x20); 10581 10582 bnx2x_cl45_write(bp, phy, 10583 MDIO_PMA_DEVAD, 10584 MDIO_PMA_REG_8481_LED5_MASK, 10585 0x0); 10586 } else { 10587 bnx2x_cl45_write(bp, phy, 10588 MDIO_PMA_DEVAD, 10589 MDIO_PMA_REG_8481_LED1_MASK, 10590 0x20); 10591 if (phy->type == 10592 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) { 10593 /* Disable MI_INT interrupt before setting LED4 10594 * source to constant on. 10595 */ 10596 if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + 10597 params->port*4) & 10598 NIG_MASK_MI_INT) { 10599 params->link_flags |= 10600 LINK_FLAGS_INT_DISABLED; 10601 10602 bnx2x_bits_dis( 10603 bp, 10604 NIG_REG_MASK_INTERRUPT_PORT0 + 10605 params->port*4, 10606 NIG_MASK_MI_INT); 10607 } 10608 bnx2x_cl45_write(bp, phy, 10609 MDIO_PMA_DEVAD, 10610 MDIO_PMA_REG_8481_SIGNAL_MASK, 10611 0x20); 10612 } 10613 } 10614 break; 10615 10616 case LED_MODE_OPER: 10617 10618 DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", port); 10619 10620 if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == 10621 SHARED_HW_CFG_LED_EXTPHY1) { 10622 10623 /* Set control reg */ 10624 bnx2x_cl45_read(bp, phy, 10625 MDIO_PMA_DEVAD, 10626 MDIO_PMA_REG_8481_LINK_SIGNAL, 10627 &val); 10628 10629 if (!((val & 10630 MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK) 10631 >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) { 10632 DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n"); 10633 bnx2x_cl45_write(bp, phy, 10634 MDIO_PMA_DEVAD, 10635 MDIO_PMA_REG_8481_LINK_SIGNAL, 10636 0xa492); 10637 } 10638 10639 /* Set LED masks */ 10640 bnx2x_cl45_write(bp, phy, 10641 MDIO_PMA_DEVAD, 10642 MDIO_PMA_REG_8481_LED1_MASK, 10643 0x10); 10644 10645 bnx2x_cl45_write(bp, phy, 10646 MDIO_PMA_DEVAD, 10647 MDIO_PMA_REG_8481_LED2_MASK, 10648 0x80); 10649 10650 bnx2x_cl45_write(bp, phy, 10651 MDIO_PMA_DEVAD, 10652 MDIO_PMA_REG_8481_LED3_MASK, 10653 0x98); 10654 10655 bnx2x_cl45_write(bp, phy, 10656 MDIO_PMA_DEVAD, 10657 MDIO_PMA_REG_8481_LED5_MASK, 10658 0x40); 10659 10660 } else { 10661 /* EXTPHY2 LED mode indicate that the 100M/1G/10G LED 10662 * sources are all wired through LED1, rather than only 10663 * 10G in other modes. 10664 */ 10665 val = ((params->hw_led_mode << 10666 SHARED_HW_CFG_LED_MODE_SHIFT) == 10667 SHARED_HW_CFG_LED_EXTPHY2) ? 0x98 : 0x80; 10668 10669 bnx2x_cl45_write(bp, phy, 10670 MDIO_PMA_DEVAD, 10671 MDIO_PMA_REG_8481_LED1_MASK, 10672 val); 10673 10674 /* Tell LED3 to blink on source */ 10675 bnx2x_cl45_read(bp, phy, 10676 MDIO_PMA_DEVAD, 10677 MDIO_PMA_REG_8481_LINK_SIGNAL, 10678 &val); 10679 val &= ~(7<<6); 10680 val |= (1<<6); /* A83B[8:6]= 1 */ 10681 bnx2x_cl45_write(bp, phy, 10682 MDIO_PMA_DEVAD, 10683 MDIO_PMA_REG_8481_LINK_SIGNAL, 10684 val); 10685 if (phy->type == 10686 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) { 10687 /* Restore LED4 source to external link, 10688 * and re-enable interrupts. 10689 */ 10690 bnx2x_cl45_write(bp, phy, 10691 MDIO_PMA_DEVAD, 10692 MDIO_PMA_REG_8481_SIGNAL_MASK, 10693 0x40); 10694 if (params->link_flags & 10695 LINK_FLAGS_INT_DISABLED) { 10696 bnx2x_link_int_enable(params); 10697 params->link_flags &= 10698 ~LINK_FLAGS_INT_DISABLED; 10699 } 10700 } 10701 } 10702 break; 10703 } 10704 10705 /* This is a workaround for E3+84833 until autoneg 10706 * restart is fixed in f/w 10707 */ 10708 if (CHIP_IS_E3(bp)) { 10709 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 10710 MDIO_WC_REG_GP2_STATUS_GP_2_1, &val); 10711 } 10712 } 10713 10714 /******************************************************************/ 10715 /* 54618SE PHY SECTION */ 10716 /******************************************************************/ 10717 static void bnx2x_54618se_specific_func(struct bnx2x_phy *phy, 10718 struct link_params *params, 10719 u32 action) 10720 { 10721 struct bnx2x *bp = params->bp; 10722 u16 temp; 10723 switch (action) { 10724 case PHY_INIT: 10725 /* Configure LED4: set to INTR (0x6). */ 10726 /* Accessing shadow register 0xe. */ 10727 bnx2x_cl22_write(bp, phy, 10728 MDIO_REG_GPHY_SHADOW, 10729 MDIO_REG_GPHY_SHADOW_LED_SEL2); 10730 bnx2x_cl22_read(bp, phy, 10731 MDIO_REG_GPHY_SHADOW, 10732 &temp); 10733 temp &= ~(0xf << 4); 10734 temp |= (0x6 << 4); 10735 bnx2x_cl22_write(bp, phy, 10736 MDIO_REG_GPHY_SHADOW, 10737 MDIO_REG_GPHY_SHADOW_WR_ENA | temp); 10738 /* Configure INTR based on link status change. */ 10739 bnx2x_cl22_write(bp, phy, 10740 MDIO_REG_INTR_MASK, 10741 ~MDIO_REG_INTR_MASK_LINK_STATUS); 10742 break; 10743 } 10744 } 10745 10746 static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, 10747 struct link_params *params, 10748 struct link_vars *vars) 10749 { 10750 struct bnx2x *bp = params->bp; 10751 u8 port; 10752 u16 autoneg_val, an_1000_val, an_10_100_val, fc_val, temp; 10753 u32 cfg_pin; 10754 10755 DP(NETIF_MSG_LINK, "54618SE cfg init\n"); 10756 usleep_range(1000, 2000); 10757 10758 /* This works with E3 only, no need to check the chip 10759 * before determining the port. 10760 */ 10761 port = params->port; 10762 10763 cfg_pin = (REG_RD(bp, params->shmem_base + 10764 offsetof(struct shmem_region, 10765 dev_info.port_hw_config[port].e3_cmn_pin_cfg)) & 10766 PORT_HW_CFG_E3_PHY_RESET_MASK) >> 10767 PORT_HW_CFG_E3_PHY_RESET_SHIFT; 10768 10769 /* Drive pin high to bring the GPHY out of reset. */ 10770 bnx2x_set_cfg_pin(bp, cfg_pin, 1); 10771 10772 /* wait for GPHY to reset */ 10773 msleep(50); 10774 10775 /* reset phy */ 10776 bnx2x_cl22_write(bp, phy, 10777 MDIO_PMA_REG_CTRL, 0x8000); 10778 bnx2x_wait_reset_complete(bp, phy, params); 10779 10780 /* Wait for GPHY to reset */ 10781 msleep(50); 10782 10783 10784 bnx2x_54618se_specific_func(phy, params, PHY_INIT); 10785 /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */ 10786 bnx2x_cl22_write(bp, phy, 10787 MDIO_REG_GPHY_SHADOW, 10788 MDIO_REG_GPHY_SHADOW_AUTO_DET_MED); 10789 bnx2x_cl22_read(bp, phy, 10790 MDIO_REG_GPHY_SHADOW, 10791 &temp); 10792 temp |= MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD; 10793 bnx2x_cl22_write(bp, phy, 10794 MDIO_REG_GPHY_SHADOW, 10795 MDIO_REG_GPHY_SHADOW_WR_ENA | temp); 10796 10797 /* Set up fc */ 10798 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ 10799 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); 10800 fc_val = 0; 10801 if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == 10802 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) 10803 fc_val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; 10804 10805 if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == 10806 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) 10807 fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE; 10808 10809 /* Read all advertisement */ 10810 bnx2x_cl22_read(bp, phy, 10811 0x09, 10812 &an_1000_val); 10813 10814 bnx2x_cl22_read(bp, phy, 10815 0x04, 10816 &an_10_100_val); 10817 10818 bnx2x_cl22_read(bp, phy, 10819 MDIO_PMA_REG_CTRL, 10820 &autoneg_val); 10821 10822 /* Disable forced speed */ 10823 autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13)); 10824 an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8) | (1<<10) | 10825 (1<<11)); 10826 10827 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 10828 (phy->speed_cap_mask & 10829 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || 10830 (phy->req_line_speed == SPEED_1000)) { 10831 an_1000_val |= (1<<8); 10832 autoneg_val |= (1<<9 | 1<<12); 10833 if (phy->req_duplex == DUPLEX_FULL) 10834 an_1000_val |= (1<<9); 10835 DP(NETIF_MSG_LINK, "Advertising 1G\n"); 10836 } else 10837 an_1000_val &= ~((1<<8) | (1<<9)); 10838 10839 bnx2x_cl22_write(bp, phy, 10840 0x09, 10841 an_1000_val); 10842 bnx2x_cl22_read(bp, phy, 10843 0x09, 10844 &an_1000_val); 10845 10846 /* Set 100 speed advertisement */ 10847 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 10848 (phy->speed_cap_mask & 10849 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | 10850 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) { 10851 an_10_100_val |= (1<<7); 10852 /* Enable autoneg and restart autoneg for legacy speeds */ 10853 autoneg_val |= (1<<9 | 1<<12); 10854 10855 if (phy->req_duplex == DUPLEX_FULL) 10856 an_10_100_val |= (1<<8); 10857 DP(NETIF_MSG_LINK, "Advertising 100M\n"); 10858 } 10859 10860 /* Set 10 speed advertisement */ 10861 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 10862 (phy->speed_cap_mask & 10863 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | 10864 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) { 10865 an_10_100_val |= (1<<5); 10866 autoneg_val |= (1<<9 | 1<<12); 10867 if (phy->req_duplex == DUPLEX_FULL) 10868 an_10_100_val |= (1<<6); 10869 DP(NETIF_MSG_LINK, "Advertising 10M\n"); 10870 } 10871 10872 /* Only 10/100 are allowed to work in FORCE mode */ 10873 if (phy->req_line_speed == SPEED_100) { 10874 autoneg_val |= (1<<13); 10875 /* Enabled AUTO-MDIX when autoneg is disabled */ 10876 bnx2x_cl22_write(bp, phy, 10877 0x18, 10878 (1<<15 | 1<<9 | 7<<0)); 10879 DP(NETIF_MSG_LINK, "Setting 100M force\n"); 10880 } 10881 if (phy->req_line_speed == SPEED_10) { 10882 /* Enabled AUTO-MDIX when autoneg is disabled */ 10883 bnx2x_cl22_write(bp, phy, 10884 0x18, 10885 (1<<15 | 1<<9 | 7<<0)); 10886 DP(NETIF_MSG_LINK, "Setting 10M force\n"); 10887 } 10888 10889 if ((phy->flags & FLAGS_EEE) && bnx2x_eee_has_cap(params)) { 10890 int rc; 10891 10892 bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS, 10893 MDIO_REG_GPHY_EXP_ACCESS_TOP | 10894 MDIO_REG_GPHY_EXP_TOP_2K_BUF); 10895 bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, &temp); 10896 temp &= 0xfffe; 10897 bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, temp); 10898 10899 rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_1G_ADV); 10900 if (rc) { 10901 DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n"); 10902 bnx2x_eee_disable(phy, params, vars); 10903 } else if ((params->eee_mode & EEE_MODE_ADV_LPI) && 10904 (phy->req_duplex == DUPLEX_FULL) && 10905 (bnx2x_eee_calc_timer(params) || 10906 !(params->eee_mode & EEE_MODE_ENABLE_LPI))) { 10907 /* Need to advertise EEE only when requested, 10908 * and either no LPI assertion was requested, 10909 * or it was requested and a valid timer was set. 10910 * Also notice full duplex is required for EEE. 10911 */ 10912 bnx2x_eee_advertise(phy, params, vars, 10913 SHMEM_EEE_1G_ADV); 10914 } else { 10915 DP(NETIF_MSG_LINK, "Don't Advertise 1GBase-T EEE\n"); 10916 bnx2x_eee_disable(phy, params, vars); 10917 } 10918 } else { 10919 vars->eee_status &= ~SHMEM_EEE_1G_ADV << 10920 SHMEM_EEE_SUPPORTED_SHIFT; 10921 10922 if (phy->flags & FLAGS_EEE) { 10923 /* Handle legacy auto-grEEEn */ 10924 if (params->feature_config_flags & 10925 FEATURE_CONFIG_AUTOGREEEN_ENABLED) { 10926 temp = 6; 10927 DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n"); 10928 } else { 10929 temp = 0; 10930 DP(NETIF_MSG_LINK, "Don't Adv. EEE\n"); 10931 } 10932 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 10933 MDIO_AN_REG_EEE_ADV, temp); 10934 } 10935 } 10936 10937 bnx2x_cl22_write(bp, phy, 10938 0x04, 10939 an_10_100_val | fc_val); 10940 10941 if (phy->req_duplex == DUPLEX_FULL) 10942 autoneg_val |= (1<<8); 10943 10944 bnx2x_cl22_write(bp, phy, 10945 MDIO_PMA_REG_CTRL, autoneg_val); 10946 10947 return 0; 10948 } 10949 10950 10951 static void bnx2x_5461x_set_link_led(struct bnx2x_phy *phy, 10952 struct link_params *params, u8 mode) 10953 { 10954 struct bnx2x *bp = params->bp; 10955 u16 temp; 10956 10957 bnx2x_cl22_write(bp, phy, 10958 MDIO_REG_GPHY_SHADOW, 10959 MDIO_REG_GPHY_SHADOW_LED_SEL1); 10960 bnx2x_cl22_read(bp, phy, 10961 MDIO_REG_GPHY_SHADOW, 10962 &temp); 10963 temp &= 0xff00; 10964 10965 DP(NETIF_MSG_LINK, "54618x set link led (mode=%x)\n", mode); 10966 switch (mode) { 10967 case LED_MODE_FRONT_PANEL_OFF: 10968 case LED_MODE_OFF: 10969 temp |= 0x00ee; 10970 break; 10971 case LED_MODE_OPER: 10972 temp |= 0x0001; 10973 break; 10974 case LED_MODE_ON: 10975 temp |= 0x00ff; 10976 break; 10977 default: 10978 break; 10979 } 10980 bnx2x_cl22_write(bp, phy, 10981 MDIO_REG_GPHY_SHADOW, 10982 MDIO_REG_GPHY_SHADOW_WR_ENA | temp); 10983 return; 10984 } 10985 10986 10987 static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy, 10988 struct link_params *params) 10989 { 10990 struct bnx2x *bp = params->bp; 10991 u32 cfg_pin; 10992 u8 port; 10993 10994 /* In case of no EPIO routed to reset the GPHY, put it 10995 * in low power mode. 10996 */ 10997 bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800); 10998 /* This works with E3 only, no need to check the chip 10999 * before determining the port. 11000 */ 11001 port = params->port; 11002 cfg_pin = (REG_RD(bp, params->shmem_base + 11003 offsetof(struct shmem_region, 11004 dev_info.port_hw_config[port].e3_cmn_pin_cfg)) & 11005 PORT_HW_CFG_E3_PHY_RESET_MASK) >> 11006 PORT_HW_CFG_E3_PHY_RESET_SHIFT; 11007 11008 /* Drive pin low to put GPHY in reset. */ 11009 bnx2x_set_cfg_pin(bp, cfg_pin, 0); 11010 } 11011 11012 static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy, 11013 struct link_params *params, 11014 struct link_vars *vars) 11015 { 11016 struct bnx2x *bp = params->bp; 11017 u16 val; 11018 u8 link_up = 0; 11019 u16 legacy_status, legacy_speed; 11020 11021 /* Get speed operation status */ 11022 bnx2x_cl22_read(bp, phy, 11023 MDIO_REG_GPHY_AUX_STATUS, 11024 &legacy_status); 11025 DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status); 11026 11027 /* Read status to clear the PHY interrupt. */ 11028 bnx2x_cl22_read(bp, phy, 11029 MDIO_REG_INTR_STATUS, 11030 &val); 11031 11032 link_up = ((legacy_status & (1<<2)) == (1<<2)); 11033 11034 if (link_up) { 11035 legacy_speed = (legacy_status & (7<<8)); 11036 if (legacy_speed == (7<<8)) { 11037 vars->line_speed = SPEED_1000; 11038 vars->duplex = DUPLEX_FULL; 11039 } else if (legacy_speed == (6<<8)) { 11040 vars->line_speed = SPEED_1000; 11041 vars->duplex = DUPLEX_HALF; 11042 } else if (legacy_speed == (5<<8)) { 11043 vars->line_speed = SPEED_100; 11044 vars->duplex = DUPLEX_FULL; 11045 } 11046 /* Omitting 100Base-T4 for now */ 11047 else if (legacy_speed == (3<<8)) { 11048 vars->line_speed = SPEED_100; 11049 vars->duplex = DUPLEX_HALF; 11050 } else if (legacy_speed == (2<<8)) { 11051 vars->line_speed = SPEED_10; 11052 vars->duplex = DUPLEX_FULL; 11053 } else if (legacy_speed == (1<<8)) { 11054 vars->line_speed = SPEED_10; 11055 vars->duplex = DUPLEX_HALF; 11056 } else /* Should not happen */ 11057 vars->line_speed = 0; 11058 11059 DP(NETIF_MSG_LINK, 11060 "Link is up in %dMbps, is_duplex_full= %d\n", 11061 vars->line_speed, 11062 (vars->duplex == DUPLEX_FULL)); 11063 11064 /* Check legacy speed AN resolution */ 11065 bnx2x_cl22_read(bp, phy, 11066 0x01, 11067 &val); 11068 if (val & (1<<5)) 11069 vars->link_status |= 11070 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; 11071 bnx2x_cl22_read(bp, phy, 11072 0x06, 11073 &val); 11074 if ((val & (1<<0)) == 0) 11075 vars->link_status |= 11076 LINK_STATUS_PARALLEL_DETECTION_USED; 11077 11078 DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n", 11079 vars->line_speed); 11080 11081 bnx2x_ext_phy_resolve_fc(phy, params, vars); 11082 11083 if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { 11084 /* Report LP advertised speeds */ 11085 bnx2x_cl22_read(bp, phy, 0x5, &val); 11086 11087 if (val & (1<<5)) 11088 vars->link_status |= 11089 LINK_STATUS_LINK_PARTNER_10THD_CAPABLE; 11090 if (val & (1<<6)) 11091 vars->link_status |= 11092 LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE; 11093 if (val & (1<<7)) 11094 vars->link_status |= 11095 LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE; 11096 if (val & (1<<8)) 11097 vars->link_status |= 11098 LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE; 11099 if (val & (1<<9)) 11100 vars->link_status |= 11101 LINK_STATUS_LINK_PARTNER_100T4_CAPABLE; 11102 11103 bnx2x_cl22_read(bp, phy, 0xa, &val); 11104 if (val & (1<<10)) 11105 vars->link_status |= 11106 LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE; 11107 if (val & (1<<11)) 11108 vars->link_status |= 11109 LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; 11110 11111 if ((phy->flags & FLAGS_EEE) && 11112 bnx2x_eee_has_cap(params)) 11113 bnx2x_eee_an_resolve(phy, params, vars); 11114 } 11115 } 11116 return link_up; 11117 } 11118 11119 static void bnx2x_54618se_config_loopback(struct bnx2x_phy *phy, 11120 struct link_params *params) 11121 { 11122 struct bnx2x *bp = params->bp; 11123 u16 val; 11124 u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 11125 11126 DP(NETIF_MSG_LINK, "2PMA/PMD ext_phy_loopback: 54618se\n"); 11127 11128 /* Enable master/slave manual mmode and set to master */ 11129 /* mii write 9 [bits set 11 12] */ 11130 bnx2x_cl22_write(bp, phy, 0x09, 3<<11); 11131 11132 /* forced 1G and disable autoneg */ 11133 /* set val [mii read 0] */ 11134 /* set val [expr $val & [bits clear 6 12 13]] */ 11135 /* set val [expr $val | [bits set 6 8]] */ 11136 /* mii write 0 $val */ 11137 bnx2x_cl22_read(bp, phy, 0x00, &val); 11138 val &= ~((1<<6) | (1<<12) | (1<<13)); 11139 val |= (1<<6) | (1<<8); 11140 bnx2x_cl22_write(bp, phy, 0x00, val); 11141 11142 /* Set external loopback and Tx using 6dB coding */ 11143 /* mii write 0x18 7 */ 11144 /* set val [mii read 0x18] */ 11145 /* mii write 0x18 [expr $val | [bits set 10 15]] */ 11146 bnx2x_cl22_write(bp, phy, 0x18, 7); 11147 bnx2x_cl22_read(bp, phy, 0x18, &val); 11148 bnx2x_cl22_write(bp, phy, 0x18, val | (1<<10) | (1<<15)); 11149 11150 /* This register opens the gate for the UMAC despite its name */ 11151 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); 11152 11153 /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame 11154 * length used by the MAC receive logic to check frames. 11155 */ 11156 REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); 11157 } 11158 11159 /******************************************************************/ 11160 /* SFX7101 PHY SECTION */ 11161 /******************************************************************/ 11162 static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy, 11163 struct link_params *params) 11164 { 11165 struct bnx2x *bp = params->bp; 11166 /* SFX7101_XGXS_TEST1 */ 11167 bnx2x_cl45_write(bp, phy, 11168 MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100); 11169 } 11170 11171 static int bnx2x_7101_config_init(struct bnx2x_phy *phy, 11172 struct link_params *params, 11173 struct link_vars *vars) 11174 { 11175 u16 fw_ver1, fw_ver2, val; 11176 struct bnx2x *bp = params->bp; 11177 DP(NETIF_MSG_LINK, "Setting the SFX7101 LASI indication\n"); 11178 11179 /* Restore normal power mode*/ 11180 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 11181 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); 11182 /* HW reset */ 11183 bnx2x_ext_phy_hw_reset(bp, params->port); 11184 bnx2x_wait_reset_complete(bp, phy, params); 11185 11186 bnx2x_cl45_write(bp, phy, 11187 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x1); 11188 DP(NETIF_MSG_LINK, "Setting the SFX7101 LED to blink on traffic\n"); 11189 bnx2x_cl45_write(bp, phy, 11190 MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1<<3)); 11191 11192 bnx2x_ext_phy_set_pause(params, phy, vars); 11193 /* Restart autoneg */ 11194 bnx2x_cl45_read(bp, phy, 11195 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, &val); 11196 val |= 0x200; 11197 bnx2x_cl45_write(bp, phy, 11198 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, val); 11199 11200 /* Save spirom version */ 11201 bnx2x_cl45_read(bp, phy, 11202 MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER1, &fw_ver1); 11203 11204 bnx2x_cl45_read(bp, phy, 11205 MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2); 11206 bnx2x_save_spirom_version(bp, params->port, 11207 (u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr); 11208 return 0; 11209 } 11210 11211 static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy, 11212 struct link_params *params, 11213 struct link_vars *vars) 11214 { 11215 struct bnx2x *bp = params->bp; 11216 u8 link_up; 11217 u16 val1, val2; 11218 bnx2x_cl45_read(bp, phy, 11219 MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2); 11220 bnx2x_cl45_read(bp, phy, 11221 MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); 11222 DP(NETIF_MSG_LINK, "10G-base-T LASI status 0x%x->0x%x\n", 11223 val2, val1); 11224 bnx2x_cl45_read(bp, phy, 11225 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2); 11226 bnx2x_cl45_read(bp, phy, 11227 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1); 11228 DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n", 11229 val2, val1); 11230 link_up = ((val1 & 4) == 4); 11231 /* If link is up print the AN outcome of the SFX7101 PHY */ 11232 if (link_up) { 11233 bnx2x_cl45_read(bp, phy, 11234 MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS, 11235 &val2); 11236 vars->line_speed = SPEED_10000; 11237 vars->duplex = DUPLEX_FULL; 11238 DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n", 11239 val2, (val2 & (1<<14))); 11240 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); 11241 bnx2x_ext_phy_resolve_fc(phy, params, vars); 11242 11243 /* Read LP advertised speeds */ 11244 if (val2 & (1<<11)) 11245 vars->link_status |= 11246 LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; 11247 } 11248 return link_up; 11249 } 11250 11251 static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len) 11252 { 11253 if (*len < 5) 11254 return -EINVAL; 11255 str[0] = (spirom_ver & 0xFF); 11256 str[1] = (spirom_ver & 0xFF00) >> 8; 11257 str[2] = (spirom_ver & 0xFF0000) >> 16; 11258 str[3] = (spirom_ver & 0xFF000000) >> 24; 11259 str[4] = '\0'; 11260 *len -= 5; 11261 return 0; 11262 } 11263 11264 void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy) 11265 { 11266 u16 val, cnt; 11267 11268 bnx2x_cl45_read(bp, phy, 11269 MDIO_PMA_DEVAD, 11270 MDIO_PMA_REG_7101_RESET, &val); 11271 11272 for (cnt = 0; cnt < 10; cnt++) { 11273 msleep(50); 11274 /* Writes a self-clearing reset */ 11275 bnx2x_cl45_write(bp, phy, 11276 MDIO_PMA_DEVAD, 11277 MDIO_PMA_REG_7101_RESET, 11278 (val | (1<<15))); 11279 /* Wait for clear */ 11280 bnx2x_cl45_read(bp, phy, 11281 MDIO_PMA_DEVAD, 11282 MDIO_PMA_REG_7101_RESET, &val); 11283 11284 if ((val & (1<<15)) == 0) 11285 break; 11286 } 11287 } 11288 11289 static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy, 11290 struct link_params *params) { 11291 /* Low power mode is controlled by GPIO 2 */ 11292 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2, 11293 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); 11294 /* The PHY reset is controlled by GPIO 1 */ 11295 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, 11296 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); 11297 } 11298 11299 static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy, 11300 struct link_params *params, u8 mode) 11301 { 11302 u16 val = 0; 11303 struct bnx2x *bp = params->bp; 11304 switch (mode) { 11305 case LED_MODE_FRONT_PANEL_OFF: 11306 case LED_MODE_OFF: 11307 val = 2; 11308 break; 11309 case LED_MODE_ON: 11310 val = 1; 11311 break; 11312 case LED_MODE_OPER: 11313 val = 0; 11314 break; 11315 } 11316 bnx2x_cl45_write(bp, phy, 11317 MDIO_PMA_DEVAD, 11318 MDIO_PMA_REG_7107_LINK_LED_CNTL, 11319 val); 11320 } 11321 11322 /******************************************************************/ 11323 /* STATIC PHY DECLARATION */ 11324 /******************************************************************/ 11325 11326 static const struct bnx2x_phy phy_null = { 11327 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN, 11328 .addr = 0, 11329 .def_md_devad = 0, 11330 .flags = FLAGS_INIT_XGXS_FIRST, 11331 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11332 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11333 .mdio_ctrl = 0, 11334 .supported = 0, 11335 .media_type = ETH_PHY_NOT_PRESENT, 11336 .ver_addr = 0, 11337 .req_flow_ctrl = 0, 11338 .req_line_speed = 0, 11339 .speed_cap_mask = 0, 11340 .req_duplex = 0, 11341 .rsrv = 0, 11342 .config_init = (config_init_t)NULL, 11343 .read_status = (read_status_t)NULL, 11344 .link_reset = (link_reset_t)NULL, 11345 .config_loopback = (config_loopback_t)NULL, 11346 .format_fw_ver = (format_fw_ver_t)NULL, 11347 .hw_reset = (hw_reset_t)NULL, 11348 .set_link_led = (set_link_led_t)NULL, 11349 .phy_specific_func = (phy_specific_func_t)NULL 11350 }; 11351 11352 static const struct bnx2x_phy phy_serdes = { 11353 .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT, 11354 .addr = 0xff, 11355 .def_md_devad = 0, 11356 .flags = 0, 11357 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11358 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11359 .mdio_ctrl = 0, 11360 .supported = (SUPPORTED_10baseT_Half | 11361 SUPPORTED_10baseT_Full | 11362 SUPPORTED_100baseT_Half | 11363 SUPPORTED_100baseT_Full | 11364 SUPPORTED_1000baseT_Full | 11365 SUPPORTED_2500baseX_Full | 11366 SUPPORTED_TP | 11367 SUPPORTED_Autoneg | 11368 SUPPORTED_Pause | 11369 SUPPORTED_Asym_Pause), 11370 .media_type = ETH_PHY_BASE_T, 11371 .ver_addr = 0, 11372 .req_flow_ctrl = 0, 11373 .req_line_speed = 0, 11374 .speed_cap_mask = 0, 11375 .req_duplex = 0, 11376 .rsrv = 0, 11377 .config_init = (config_init_t)bnx2x_xgxs_config_init, 11378 .read_status = (read_status_t)bnx2x_link_settings_status, 11379 .link_reset = (link_reset_t)bnx2x_int_link_reset, 11380 .config_loopback = (config_loopback_t)NULL, 11381 .format_fw_ver = (format_fw_ver_t)NULL, 11382 .hw_reset = (hw_reset_t)NULL, 11383 .set_link_led = (set_link_led_t)NULL, 11384 .phy_specific_func = (phy_specific_func_t)NULL 11385 }; 11386 11387 static const struct bnx2x_phy phy_xgxs = { 11388 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, 11389 .addr = 0xff, 11390 .def_md_devad = 0, 11391 .flags = 0, 11392 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11393 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11394 .mdio_ctrl = 0, 11395 .supported = (SUPPORTED_10baseT_Half | 11396 SUPPORTED_10baseT_Full | 11397 SUPPORTED_100baseT_Half | 11398 SUPPORTED_100baseT_Full | 11399 SUPPORTED_1000baseT_Full | 11400 SUPPORTED_2500baseX_Full | 11401 SUPPORTED_10000baseT_Full | 11402 SUPPORTED_FIBRE | 11403 SUPPORTED_Autoneg | 11404 SUPPORTED_Pause | 11405 SUPPORTED_Asym_Pause), 11406 .media_type = ETH_PHY_CX4, 11407 .ver_addr = 0, 11408 .req_flow_ctrl = 0, 11409 .req_line_speed = 0, 11410 .speed_cap_mask = 0, 11411 .req_duplex = 0, 11412 .rsrv = 0, 11413 .config_init = (config_init_t)bnx2x_xgxs_config_init, 11414 .read_status = (read_status_t)bnx2x_link_settings_status, 11415 .link_reset = (link_reset_t)bnx2x_int_link_reset, 11416 .config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback, 11417 .format_fw_ver = (format_fw_ver_t)NULL, 11418 .hw_reset = (hw_reset_t)NULL, 11419 .set_link_led = (set_link_led_t)NULL, 11420 .phy_specific_func = (phy_specific_func_t)bnx2x_xgxs_specific_func 11421 }; 11422 static const struct bnx2x_phy phy_warpcore = { 11423 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, 11424 .addr = 0xff, 11425 .def_md_devad = 0, 11426 .flags = FLAGS_TX_ERROR_CHECK, 11427 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11428 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11429 .mdio_ctrl = 0, 11430 .supported = (SUPPORTED_10baseT_Half | 11431 SUPPORTED_10baseT_Full | 11432 SUPPORTED_100baseT_Half | 11433 SUPPORTED_100baseT_Full | 11434 SUPPORTED_1000baseT_Full | 11435 SUPPORTED_10000baseT_Full | 11436 SUPPORTED_20000baseKR2_Full | 11437 SUPPORTED_20000baseMLD2_Full | 11438 SUPPORTED_FIBRE | 11439 SUPPORTED_Autoneg | 11440 SUPPORTED_Pause | 11441 SUPPORTED_Asym_Pause), 11442 .media_type = ETH_PHY_UNSPECIFIED, 11443 .ver_addr = 0, 11444 .req_flow_ctrl = 0, 11445 .req_line_speed = 0, 11446 .speed_cap_mask = 0, 11447 /* req_duplex = */0, 11448 /* rsrv = */0, 11449 .config_init = (config_init_t)bnx2x_warpcore_config_init, 11450 .read_status = (read_status_t)bnx2x_warpcore_read_status, 11451 .link_reset = (link_reset_t)bnx2x_warpcore_link_reset, 11452 .config_loopback = (config_loopback_t)bnx2x_set_warpcore_loopback, 11453 .format_fw_ver = (format_fw_ver_t)NULL, 11454 .hw_reset = (hw_reset_t)bnx2x_warpcore_hw_reset, 11455 .set_link_led = (set_link_led_t)NULL, 11456 .phy_specific_func = (phy_specific_func_t)NULL 11457 }; 11458 11459 11460 static const struct bnx2x_phy phy_7101 = { 11461 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, 11462 .addr = 0xff, 11463 .def_md_devad = 0, 11464 .flags = FLAGS_FAN_FAILURE_DET_REQ, 11465 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11466 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11467 .mdio_ctrl = 0, 11468 .supported = (SUPPORTED_10000baseT_Full | 11469 SUPPORTED_TP | 11470 SUPPORTED_Autoneg | 11471 SUPPORTED_Pause | 11472 SUPPORTED_Asym_Pause), 11473 .media_type = ETH_PHY_BASE_T, 11474 .ver_addr = 0, 11475 .req_flow_ctrl = 0, 11476 .req_line_speed = 0, 11477 .speed_cap_mask = 0, 11478 .req_duplex = 0, 11479 .rsrv = 0, 11480 .config_init = (config_init_t)bnx2x_7101_config_init, 11481 .read_status = (read_status_t)bnx2x_7101_read_status, 11482 .link_reset = (link_reset_t)bnx2x_common_ext_link_reset, 11483 .config_loopback = (config_loopback_t)bnx2x_7101_config_loopback, 11484 .format_fw_ver = (format_fw_ver_t)bnx2x_7101_format_ver, 11485 .hw_reset = (hw_reset_t)bnx2x_7101_hw_reset, 11486 .set_link_led = (set_link_led_t)bnx2x_7101_set_link_led, 11487 .phy_specific_func = (phy_specific_func_t)NULL 11488 }; 11489 static const struct bnx2x_phy phy_8073 = { 11490 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, 11491 .addr = 0xff, 11492 .def_md_devad = 0, 11493 .flags = 0, 11494 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11495 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11496 .mdio_ctrl = 0, 11497 .supported = (SUPPORTED_10000baseT_Full | 11498 SUPPORTED_2500baseX_Full | 11499 SUPPORTED_1000baseT_Full | 11500 SUPPORTED_FIBRE | 11501 SUPPORTED_Autoneg | 11502 SUPPORTED_Pause | 11503 SUPPORTED_Asym_Pause), 11504 .media_type = ETH_PHY_KR, 11505 .ver_addr = 0, 11506 .req_flow_ctrl = 0, 11507 .req_line_speed = 0, 11508 .speed_cap_mask = 0, 11509 .req_duplex = 0, 11510 .rsrv = 0, 11511 .config_init = (config_init_t)bnx2x_8073_config_init, 11512 .read_status = (read_status_t)bnx2x_8073_read_status, 11513 .link_reset = (link_reset_t)bnx2x_8073_link_reset, 11514 .config_loopback = (config_loopback_t)NULL, 11515 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, 11516 .hw_reset = (hw_reset_t)NULL, 11517 .set_link_led = (set_link_led_t)NULL, 11518 .phy_specific_func = (phy_specific_func_t)bnx2x_8073_specific_func 11519 }; 11520 static const struct bnx2x_phy phy_8705 = { 11521 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705, 11522 .addr = 0xff, 11523 .def_md_devad = 0, 11524 .flags = FLAGS_INIT_XGXS_FIRST, 11525 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11526 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11527 .mdio_ctrl = 0, 11528 .supported = (SUPPORTED_10000baseT_Full | 11529 SUPPORTED_FIBRE | 11530 SUPPORTED_Pause | 11531 SUPPORTED_Asym_Pause), 11532 .media_type = ETH_PHY_XFP_FIBER, 11533 .ver_addr = 0, 11534 .req_flow_ctrl = 0, 11535 .req_line_speed = 0, 11536 .speed_cap_mask = 0, 11537 .req_duplex = 0, 11538 .rsrv = 0, 11539 .config_init = (config_init_t)bnx2x_8705_config_init, 11540 .read_status = (read_status_t)bnx2x_8705_read_status, 11541 .link_reset = (link_reset_t)bnx2x_common_ext_link_reset, 11542 .config_loopback = (config_loopback_t)NULL, 11543 .format_fw_ver = (format_fw_ver_t)bnx2x_null_format_ver, 11544 .hw_reset = (hw_reset_t)NULL, 11545 .set_link_led = (set_link_led_t)NULL, 11546 .phy_specific_func = (phy_specific_func_t)NULL 11547 }; 11548 static const struct bnx2x_phy phy_8706 = { 11549 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, 11550 .addr = 0xff, 11551 .def_md_devad = 0, 11552 .flags = FLAGS_INIT_XGXS_FIRST, 11553 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11554 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11555 .mdio_ctrl = 0, 11556 .supported = (SUPPORTED_10000baseT_Full | 11557 SUPPORTED_1000baseT_Full | 11558 SUPPORTED_FIBRE | 11559 SUPPORTED_Pause | 11560 SUPPORTED_Asym_Pause), 11561 .media_type = ETH_PHY_SFPP_10G_FIBER, 11562 .ver_addr = 0, 11563 .req_flow_ctrl = 0, 11564 .req_line_speed = 0, 11565 .speed_cap_mask = 0, 11566 .req_duplex = 0, 11567 .rsrv = 0, 11568 .config_init = (config_init_t)bnx2x_8706_config_init, 11569 .read_status = (read_status_t)bnx2x_8706_read_status, 11570 .link_reset = (link_reset_t)bnx2x_common_ext_link_reset, 11571 .config_loopback = (config_loopback_t)NULL, 11572 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, 11573 .hw_reset = (hw_reset_t)NULL, 11574 .set_link_led = (set_link_led_t)NULL, 11575 .phy_specific_func = (phy_specific_func_t)NULL 11576 }; 11577 11578 static const struct bnx2x_phy phy_8726 = { 11579 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726, 11580 .addr = 0xff, 11581 .def_md_devad = 0, 11582 .flags = (FLAGS_INIT_XGXS_FIRST | 11583 FLAGS_TX_ERROR_CHECK), 11584 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11585 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11586 .mdio_ctrl = 0, 11587 .supported = (SUPPORTED_10000baseT_Full | 11588 SUPPORTED_1000baseT_Full | 11589 SUPPORTED_Autoneg | 11590 SUPPORTED_FIBRE | 11591 SUPPORTED_Pause | 11592 SUPPORTED_Asym_Pause), 11593 .media_type = ETH_PHY_NOT_PRESENT, 11594 .ver_addr = 0, 11595 .req_flow_ctrl = 0, 11596 .req_line_speed = 0, 11597 .speed_cap_mask = 0, 11598 .req_duplex = 0, 11599 .rsrv = 0, 11600 .config_init = (config_init_t)bnx2x_8726_config_init, 11601 .read_status = (read_status_t)bnx2x_8726_read_status, 11602 .link_reset = (link_reset_t)bnx2x_8726_link_reset, 11603 .config_loopback = (config_loopback_t)bnx2x_8726_config_loopback, 11604 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, 11605 .hw_reset = (hw_reset_t)NULL, 11606 .set_link_led = (set_link_led_t)NULL, 11607 .phy_specific_func = (phy_specific_func_t)NULL 11608 }; 11609 11610 static const struct bnx2x_phy phy_8727 = { 11611 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 11612 .addr = 0xff, 11613 .def_md_devad = 0, 11614 .flags = (FLAGS_FAN_FAILURE_DET_REQ | 11615 FLAGS_TX_ERROR_CHECK), 11616 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11617 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11618 .mdio_ctrl = 0, 11619 .supported = (SUPPORTED_10000baseT_Full | 11620 SUPPORTED_1000baseT_Full | 11621 SUPPORTED_FIBRE | 11622 SUPPORTED_Pause | 11623 SUPPORTED_Asym_Pause), 11624 .media_type = ETH_PHY_NOT_PRESENT, 11625 .ver_addr = 0, 11626 .req_flow_ctrl = 0, 11627 .req_line_speed = 0, 11628 .speed_cap_mask = 0, 11629 .req_duplex = 0, 11630 .rsrv = 0, 11631 .config_init = (config_init_t)bnx2x_8727_config_init, 11632 .read_status = (read_status_t)bnx2x_8727_read_status, 11633 .link_reset = (link_reset_t)bnx2x_8727_link_reset, 11634 .config_loopback = (config_loopback_t)NULL, 11635 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, 11636 .hw_reset = (hw_reset_t)bnx2x_8727_hw_reset, 11637 .set_link_led = (set_link_led_t)bnx2x_8727_set_link_led, 11638 .phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func 11639 }; 11640 static const struct bnx2x_phy phy_8481 = { 11641 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 11642 .addr = 0xff, 11643 .def_md_devad = 0, 11644 .flags = FLAGS_FAN_FAILURE_DET_REQ | 11645 FLAGS_REARM_LATCH_SIGNAL, 11646 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11647 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11648 .mdio_ctrl = 0, 11649 .supported = (SUPPORTED_10baseT_Half | 11650 SUPPORTED_10baseT_Full | 11651 SUPPORTED_100baseT_Half | 11652 SUPPORTED_100baseT_Full | 11653 SUPPORTED_1000baseT_Full | 11654 SUPPORTED_10000baseT_Full | 11655 SUPPORTED_TP | 11656 SUPPORTED_Autoneg | 11657 SUPPORTED_Pause | 11658 SUPPORTED_Asym_Pause), 11659 .media_type = ETH_PHY_BASE_T, 11660 .ver_addr = 0, 11661 .req_flow_ctrl = 0, 11662 .req_line_speed = 0, 11663 .speed_cap_mask = 0, 11664 .req_duplex = 0, 11665 .rsrv = 0, 11666 .config_init = (config_init_t)bnx2x_8481_config_init, 11667 .read_status = (read_status_t)bnx2x_848xx_read_status, 11668 .link_reset = (link_reset_t)bnx2x_8481_link_reset, 11669 .config_loopback = (config_loopback_t)NULL, 11670 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, 11671 .hw_reset = (hw_reset_t)bnx2x_8481_hw_reset, 11672 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, 11673 .phy_specific_func = (phy_specific_func_t)NULL 11674 }; 11675 11676 static const struct bnx2x_phy phy_84823 = { 11677 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823, 11678 .addr = 0xff, 11679 .def_md_devad = 0, 11680 .flags = (FLAGS_FAN_FAILURE_DET_REQ | 11681 FLAGS_REARM_LATCH_SIGNAL | 11682 FLAGS_TX_ERROR_CHECK), 11683 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11684 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11685 .mdio_ctrl = 0, 11686 .supported = (SUPPORTED_10baseT_Half | 11687 SUPPORTED_10baseT_Full | 11688 SUPPORTED_100baseT_Half | 11689 SUPPORTED_100baseT_Full | 11690 SUPPORTED_1000baseT_Full | 11691 SUPPORTED_10000baseT_Full | 11692 SUPPORTED_TP | 11693 SUPPORTED_Autoneg | 11694 SUPPORTED_Pause | 11695 SUPPORTED_Asym_Pause), 11696 .media_type = ETH_PHY_BASE_T, 11697 .ver_addr = 0, 11698 .req_flow_ctrl = 0, 11699 .req_line_speed = 0, 11700 .speed_cap_mask = 0, 11701 .req_duplex = 0, 11702 .rsrv = 0, 11703 .config_init = (config_init_t)bnx2x_848x3_config_init, 11704 .read_status = (read_status_t)bnx2x_848xx_read_status, 11705 .link_reset = (link_reset_t)bnx2x_848x3_link_reset, 11706 .config_loopback = (config_loopback_t)NULL, 11707 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, 11708 .hw_reset = (hw_reset_t)NULL, 11709 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, 11710 .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func 11711 }; 11712 11713 static const struct bnx2x_phy phy_84833 = { 11714 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833, 11715 .addr = 0xff, 11716 .def_md_devad = 0, 11717 .flags = (FLAGS_FAN_FAILURE_DET_REQ | 11718 FLAGS_REARM_LATCH_SIGNAL | 11719 FLAGS_TX_ERROR_CHECK), 11720 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11721 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11722 .mdio_ctrl = 0, 11723 .supported = (SUPPORTED_100baseT_Half | 11724 SUPPORTED_100baseT_Full | 11725 SUPPORTED_1000baseT_Full | 11726 SUPPORTED_10000baseT_Full | 11727 SUPPORTED_TP | 11728 SUPPORTED_Autoneg | 11729 SUPPORTED_Pause | 11730 SUPPORTED_Asym_Pause), 11731 .media_type = ETH_PHY_BASE_T, 11732 .ver_addr = 0, 11733 .req_flow_ctrl = 0, 11734 .req_line_speed = 0, 11735 .speed_cap_mask = 0, 11736 .req_duplex = 0, 11737 .rsrv = 0, 11738 .config_init = (config_init_t)bnx2x_848x3_config_init, 11739 .read_status = (read_status_t)bnx2x_848xx_read_status, 11740 .link_reset = (link_reset_t)bnx2x_848x3_link_reset, 11741 .config_loopback = (config_loopback_t)NULL, 11742 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, 11743 .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, 11744 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, 11745 .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func 11746 }; 11747 11748 static const struct bnx2x_phy phy_84834 = { 11749 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834, 11750 .addr = 0xff, 11751 .def_md_devad = 0, 11752 .flags = FLAGS_FAN_FAILURE_DET_REQ | 11753 FLAGS_REARM_LATCH_SIGNAL, 11754 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11755 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11756 .mdio_ctrl = 0, 11757 .supported = (SUPPORTED_100baseT_Half | 11758 SUPPORTED_100baseT_Full | 11759 SUPPORTED_1000baseT_Full | 11760 SUPPORTED_10000baseT_Full | 11761 SUPPORTED_TP | 11762 SUPPORTED_Autoneg | 11763 SUPPORTED_Pause | 11764 SUPPORTED_Asym_Pause), 11765 .media_type = ETH_PHY_BASE_T, 11766 .ver_addr = 0, 11767 .req_flow_ctrl = 0, 11768 .req_line_speed = 0, 11769 .speed_cap_mask = 0, 11770 .req_duplex = 0, 11771 .rsrv = 0, 11772 .config_init = (config_init_t)bnx2x_848x3_config_init, 11773 .read_status = (read_status_t)bnx2x_848xx_read_status, 11774 .link_reset = (link_reset_t)bnx2x_848x3_link_reset, 11775 .config_loopback = (config_loopback_t)NULL, 11776 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, 11777 .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, 11778 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, 11779 .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func 11780 }; 11781 11782 static const struct bnx2x_phy phy_54618se = { 11783 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE, 11784 .addr = 0xff, 11785 .def_md_devad = 0, 11786 .flags = FLAGS_INIT_XGXS_FIRST, 11787 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11788 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11789 .mdio_ctrl = 0, 11790 .supported = (SUPPORTED_10baseT_Half | 11791 SUPPORTED_10baseT_Full | 11792 SUPPORTED_100baseT_Half | 11793 SUPPORTED_100baseT_Full | 11794 SUPPORTED_1000baseT_Full | 11795 SUPPORTED_TP | 11796 SUPPORTED_Autoneg | 11797 SUPPORTED_Pause | 11798 SUPPORTED_Asym_Pause), 11799 .media_type = ETH_PHY_BASE_T, 11800 .ver_addr = 0, 11801 .req_flow_ctrl = 0, 11802 .req_line_speed = 0, 11803 .speed_cap_mask = 0, 11804 /* req_duplex = */0, 11805 /* rsrv = */0, 11806 .config_init = (config_init_t)bnx2x_54618se_config_init, 11807 .read_status = (read_status_t)bnx2x_54618se_read_status, 11808 .link_reset = (link_reset_t)bnx2x_54618se_link_reset, 11809 .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback, 11810 .format_fw_ver = (format_fw_ver_t)NULL, 11811 .hw_reset = (hw_reset_t)NULL, 11812 .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led, 11813 .phy_specific_func = (phy_specific_func_t)bnx2x_54618se_specific_func 11814 }; 11815 /*****************************************************************/ 11816 /* */ 11817 /* Populate the phy according. Main function: bnx2x_populate_phy */ 11818 /* */ 11819 /*****************************************************************/ 11820 11821 static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base, 11822 struct bnx2x_phy *phy, u8 port, 11823 u8 phy_index) 11824 { 11825 /* Get the 4 lanes xgxs config rx and tx */ 11826 u32 rx = 0, tx = 0, i; 11827 for (i = 0; i < 2; i++) { 11828 /* INT_PHY and EXT_PHY1 share the same value location in 11829 * the shmem. When num_phys is greater than 1, than this value 11830 * applies only to EXT_PHY1 11831 */ 11832 if (phy_index == INT_PHY || phy_index == EXT_PHY1) { 11833 rx = REG_RD(bp, shmem_base + 11834 offsetof(struct shmem_region, 11835 dev_info.port_hw_config[port].xgxs_config_rx[i<<1])); 11836 11837 tx = REG_RD(bp, shmem_base + 11838 offsetof(struct shmem_region, 11839 dev_info.port_hw_config[port].xgxs_config_tx[i<<1])); 11840 } else { 11841 rx = REG_RD(bp, shmem_base + 11842 offsetof(struct shmem_region, 11843 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); 11844 11845 tx = REG_RD(bp, shmem_base + 11846 offsetof(struct shmem_region, 11847 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); 11848 } 11849 11850 phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff); 11851 phy->rx_preemphasis[(i << 1) + 1] = (rx & 0xffff); 11852 11853 phy->tx_preemphasis[i << 1] = ((tx>>16) & 0xffff); 11854 phy->tx_preemphasis[(i << 1) + 1] = (tx & 0xffff); 11855 } 11856 } 11857 11858 static u32 bnx2x_get_ext_phy_config(struct bnx2x *bp, u32 shmem_base, 11859 u8 phy_index, u8 port) 11860 { 11861 u32 ext_phy_config = 0; 11862 switch (phy_index) { 11863 case EXT_PHY1: 11864 ext_phy_config = REG_RD(bp, shmem_base + 11865 offsetof(struct shmem_region, 11866 dev_info.port_hw_config[port].external_phy_config)); 11867 break; 11868 case EXT_PHY2: 11869 ext_phy_config = REG_RD(bp, shmem_base + 11870 offsetof(struct shmem_region, 11871 dev_info.port_hw_config[port].external_phy_config2)); 11872 break; 11873 default: 11874 DP(NETIF_MSG_LINK, "Invalid phy_index %d\n", phy_index); 11875 return -EINVAL; 11876 } 11877 11878 return ext_phy_config; 11879 } 11880 static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, 11881 struct bnx2x_phy *phy) 11882 { 11883 u32 phy_addr; 11884 u32 chip_id; 11885 u32 switch_cfg = (REG_RD(bp, shmem_base + 11886 offsetof(struct shmem_region, 11887 dev_info.port_feature_config[port].link_config)) & 11888 PORT_FEATURE_CONNECTED_SWITCH_MASK); 11889 chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) | 11890 ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12); 11891 11892 DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id); 11893 if (USES_WARPCORE(bp)) { 11894 u32 serdes_net_if; 11895 phy_addr = REG_RD(bp, 11896 MISC_REG_WC0_CTRL_PHY_ADDR); 11897 *phy = phy_warpcore; 11898 if (REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR) == 0x3) 11899 phy->flags |= FLAGS_4_PORT_MODE; 11900 else 11901 phy->flags &= ~FLAGS_4_PORT_MODE; 11902 /* Check Dual mode */ 11903 serdes_net_if = (REG_RD(bp, shmem_base + 11904 offsetof(struct shmem_region, dev_info. 11905 port_hw_config[port].default_cfg)) & 11906 PORT_HW_CFG_NET_SERDES_IF_MASK); 11907 /* Set the appropriate supported and flags indications per 11908 * interface type of the chip 11909 */ 11910 switch (serdes_net_if) { 11911 case PORT_HW_CFG_NET_SERDES_IF_SGMII: 11912 phy->supported &= (SUPPORTED_10baseT_Half | 11913 SUPPORTED_10baseT_Full | 11914 SUPPORTED_100baseT_Half | 11915 SUPPORTED_100baseT_Full | 11916 SUPPORTED_1000baseT_Full | 11917 SUPPORTED_FIBRE | 11918 SUPPORTED_Autoneg | 11919 SUPPORTED_Pause | 11920 SUPPORTED_Asym_Pause); 11921 phy->media_type = ETH_PHY_BASE_T; 11922 break; 11923 case PORT_HW_CFG_NET_SERDES_IF_XFI: 11924 phy->supported &= (SUPPORTED_1000baseT_Full | 11925 SUPPORTED_10000baseT_Full | 11926 SUPPORTED_FIBRE | 11927 SUPPORTED_Pause | 11928 SUPPORTED_Asym_Pause); 11929 phy->media_type = ETH_PHY_XFP_FIBER; 11930 break; 11931 case PORT_HW_CFG_NET_SERDES_IF_SFI: 11932 phy->supported &= (SUPPORTED_1000baseT_Full | 11933 SUPPORTED_10000baseT_Full | 11934 SUPPORTED_FIBRE | 11935 SUPPORTED_Pause | 11936 SUPPORTED_Asym_Pause); 11937 phy->media_type = ETH_PHY_SFPP_10G_FIBER; 11938 break; 11939 case PORT_HW_CFG_NET_SERDES_IF_KR: 11940 phy->media_type = ETH_PHY_KR; 11941 phy->supported &= (SUPPORTED_1000baseT_Full | 11942 SUPPORTED_10000baseT_Full | 11943 SUPPORTED_FIBRE | 11944 SUPPORTED_Autoneg | 11945 SUPPORTED_Pause | 11946 SUPPORTED_Asym_Pause); 11947 break; 11948 case PORT_HW_CFG_NET_SERDES_IF_DXGXS: 11949 phy->media_type = ETH_PHY_KR; 11950 phy->flags |= FLAGS_WC_DUAL_MODE; 11951 phy->supported &= (SUPPORTED_20000baseMLD2_Full | 11952 SUPPORTED_FIBRE | 11953 SUPPORTED_Pause | 11954 SUPPORTED_Asym_Pause); 11955 break; 11956 case PORT_HW_CFG_NET_SERDES_IF_KR2: 11957 phy->media_type = ETH_PHY_KR; 11958 phy->flags |= FLAGS_WC_DUAL_MODE; 11959 phy->supported &= (SUPPORTED_20000baseKR2_Full | 11960 SUPPORTED_10000baseT_Full | 11961 SUPPORTED_1000baseT_Full | 11962 SUPPORTED_Autoneg | 11963 SUPPORTED_FIBRE | 11964 SUPPORTED_Pause | 11965 SUPPORTED_Asym_Pause); 11966 phy->flags &= ~FLAGS_TX_ERROR_CHECK; 11967 break; 11968 default: 11969 DP(NETIF_MSG_LINK, "Unknown WC interface type 0x%x\n", 11970 serdes_net_if); 11971 break; 11972 } 11973 11974 /* Enable MDC/MDIO work-around for E3 A0 since free running MDC 11975 * was not set as expected. For B0, ECO will be enabled so there 11976 * won't be an issue there 11977 */ 11978 if (CHIP_REV(bp) == CHIP_REV_Ax) 11979 phy->flags |= FLAGS_MDC_MDIO_WA; 11980 else 11981 phy->flags |= FLAGS_MDC_MDIO_WA_B0; 11982 } else { 11983 switch (switch_cfg) { 11984 case SWITCH_CFG_1G: 11985 phy_addr = REG_RD(bp, 11986 NIG_REG_SERDES0_CTRL_PHY_ADDR + 11987 port * 0x10); 11988 *phy = phy_serdes; 11989 break; 11990 case SWITCH_CFG_10G: 11991 phy_addr = REG_RD(bp, 11992 NIG_REG_XGXS0_CTRL_PHY_ADDR + 11993 port * 0x18); 11994 *phy = phy_xgxs; 11995 break; 11996 default: 11997 DP(NETIF_MSG_LINK, "Invalid switch_cfg\n"); 11998 return -EINVAL; 11999 } 12000 } 12001 phy->addr = (u8)phy_addr; 12002 phy->mdio_ctrl = bnx2x_get_emac_base(bp, 12003 SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH, 12004 port); 12005 if (CHIP_IS_E2(bp)) 12006 phy->def_md_devad = E2_DEFAULT_PHY_DEV_ADDR; 12007 else 12008 phy->def_md_devad = DEFAULT_PHY_DEV_ADDR; 12009 12010 DP(NETIF_MSG_LINK, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x\n", 12011 port, phy->addr, phy->mdio_ctrl); 12012 12013 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, INT_PHY); 12014 return 0; 12015 } 12016 12017 static int bnx2x_populate_ext_phy(struct bnx2x *bp, 12018 u8 phy_index, 12019 u32 shmem_base, 12020 u32 shmem2_base, 12021 u8 port, 12022 struct bnx2x_phy *phy) 12023 { 12024 u32 ext_phy_config, phy_type, config2; 12025 u32 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH; 12026 ext_phy_config = bnx2x_get_ext_phy_config(bp, shmem_base, 12027 phy_index, port); 12028 phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); 12029 /* Select the phy type */ 12030 switch (phy_type) { 12031 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 12032 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED; 12033 *phy = phy_8073; 12034 break; 12035 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: 12036 *phy = phy_8705; 12037 break; 12038 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: 12039 *phy = phy_8706; 12040 break; 12041 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 12042 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1; 12043 *phy = phy_8726; 12044 break; 12045 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC: 12046 /* BCM8727_NOC => BCM8727 no over current */ 12047 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1; 12048 *phy = phy_8727; 12049 phy->flags |= FLAGS_NOC; 12050 break; 12051 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: 12052 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 12053 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1; 12054 *phy = phy_8727; 12055 break; 12056 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: 12057 *phy = phy_8481; 12058 break; 12059 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823: 12060 *phy = phy_84823; 12061 break; 12062 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: 12063 *phy = phy_84833; 12064 break; 12065 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834: 12066 *phy = phy_84834; 12067 break; 12068 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616: 12069 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE: 12070 *phy = phy_54618se; 12071 if (phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) 12072 phy->flags |= FLAGS_EEE; 12073 break; 12074 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 12075 *phy = phy_7101; 12076 break; 12077 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: 12078 *phy = phy_null; 12079 return -EINVAL; 12080 default: 12081 *phy = phy_null; 12082 /* In case external PHY wasn't found */ 12083 if ((phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && 12084 (phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) 12085 return -EINVAL; 12086 return 0; 12087 } 12088 12089 phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config); 12090 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index); 12091 12092 /* The shmem address of the phy version is located on different 12093 * structures. In case this structure is too old, do not set 12094 * the address 12095 */ 12096 config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region, 12097 dev_info.shared_hw_config.config2)); 12098 if (phy_index == EXT_PHY1) { 12099 phy->ver_addr = shmem_base + offsetof(struct shmem_region, 12100 port_mb[port].ext_phy_fw_version); 12101 12102 /* Check specific mdc mdio settings */ 12103 if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK) 12104 mdc_mdio_access = config2 & 12105 SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK; 12106 } else { 12107 u32 size = REG_RD(bp, shmem2_base); 12108 12109 if (size > 12110 offsetof(struct shmem2_region, ext_phy_fw_version2)) { 12111 phy->ver_addr = shmem2_base + 12112 offsetof(struct shmem2_region, 12113 ext_phy_fw_version2[port]); 12114 } 12115 /* Check specific mdc mdio settings */ 12116 if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) 12117 mdc_mdio_access = (config2 & 12118 SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) >> 12119 (SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT - 12120 SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT); 12121 } 12122 phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port); 12123 12124 if (((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || 12125 (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) && 12126 (phy->ver_addr)) { 12127 /* Remove 100Mb link supported for BCM84833/4 when phy fw 12128 * version lower than or equal to 1.39 12129 */ 12130 u32 raw_ver = REG_RD(bp, phy->ver_addr); 12131 if (((raw_ver & 0x7F) <= 39) && 12132 (((raw_ver & 0xF80) >> 7) <= 1)) 12133 phy->supported &= ~(SUPPORTED_100baseT_Half | 12134 SUPPORTED_100baseT_Full); 12135 } 12136 12137 DP(NETIF_MSG_LINK, "phy_type 0x%x port %d found in index %d\n", 12138 phy_type, port, phy_index); 12139 DP(NETIF_MSG_LINK, " addr=0x%x, mdio_ctl=0x%x\n", 12140 phy->addr, phy->mdio_ctrl); 12141 return 0; 12142 } 12143 12144 static int bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base, 12145 u32 shmem2_base, u8 port, struct bnx2x_phy *phy) 12146 { 12147 int status = 0; 12148 phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN; 12149 if (phy_index == INT_PHY) 12150 return bnx2x_populate_int_phy(bp, shmem_base, port, phy); 12151 status = bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base, 12152 port, phy); 12153 return status; 12154 } 12155 12156 static void bnx2x_phy_def_cfg(struct link_params *params, 12157 struct bnx2x_phy *phy, 12158 u8 phy_index) 12159 { 12160 struct bnx2x *bp = params->bp; 12161 u32 link_config; 12162 /* Populate the default phy configuration for MF mode */ 12163 if (phy_index == EXT_PHY2) { 12164 link_config = REG_RD(bp, params->shmem_base + 12165 offsetof(struct shmem_region, dev_info. 12166 port_feature_config[params->port].link_config2)); 12167 phy->speed_cap_mask = REG_RD(bp, params->shmem_base + 12168 offsetof(struct shmem_region, 12169 dev_info. 12170 port_hw_config[params->port].speed_capability_mask2)); 12171 } else { 12172 link_config = REG_RD(bp, params->shmem_base + 12173 offsetof(struct shmem_region, dev_info. 12174 port_feature_config[params->port].link_config)); 12175 phy->speed_cap_mask = REG_RD(bp, params->shmem_base + 12176 offsetof(struct shmem_region, 12177 dev_info. 12178 port_hw_config[params->port].speed_capability_mask)); 12179 } 12180 DP(NETIF_MSG_LINK, 12181 "Default config phy idx %x cfg 0x%x speed_cap_mask 0x%x\n", 12182 phy_index, link_config, phy->speed_cap_mask); 12183 12184 phy->req_duplex = DUPLEX_FULL; 12185 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { 12186 case PORT_FEATURE_LINK_SPEED_10M_HALF: 12187 phy->req_duplex = DUPLEX_HALF; 12188 case PORT_FEATURE_LINK_SPEED_10M_FULL: 12189 phy->req_line_speed = SPEED_10; 12190 break; 12191 case PORT_FEATURE_LINK_SPEED_100M_HALF: 12192 phy->req_duplex = DUPLEX_HALF; 12193 case PORT_FEATURE_LINK_SPEED_100M_FULL: 12194 phy->req_line_speed = SPEED_100; 12195 break; 12196 case PORT_FEATURE_LINK_SPEED_1G: 12197 phy->req_line_speed = SPEED_1000; 12198 break; 12199 case PORT_FEATURE_LINK_SPEED_2_5G: 12200 phy->req_line_speed = SPEED_2500; 12201 break; 12202 case PORT_FEATURE_LINK_SPEED_10G_CX4: 12203 phy->req_line_speed = SPEED_10000; 12204 break; 12205 default: 12206 phy->req_line_speed = SPEED_AUTO_NEG; 12207 break; 12208 } 12209 12210 switch (link_config & PORT_FEATURE_FLOW_CONTROL_MASK) { 12211 case PORT_FEATURE_FLOW_CONTROL_AUTO: 12212 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO; 12213 break; 12214 case PORT_FEATURE_FLOW_CONTROL_TX: 12215 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_TX; 12216 break; 12217 case PORT_FEATURE_FLOW_CONTROL_RX: 12218 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_RX; 12219 break; 12220 case PORT_FEATURE_FLOW_CONTROL_BOTH: 12221 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_BOTH; 12222 break; 12223 default: 12224 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_NONE; 12225 break; 12226 } 12227 } 12228 12229 u32 bnx2x_phy_selection(struct link_params *params) 12230 { 12231 u32 phy_config_swapped, prio_cfg; 12232 u32 return_cfg = PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT; 12233 12234 phy_config_swapped = params->multi_phy_config & 12235 PORT_HW_CFG_PHY_SWAPPED_ENABLED; 12236 12237 prio_cfg = params->multi_phy_config & 12238 PORT_HW_CFG_PHY_SELECTION_MASK; 12239 12240 if (phy_config_swapped) { 12241 switch (prio_cfg) { 12242 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 12243 return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY; 12244 break; 12245 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 12246 return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY; 12247 break; 12248 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: 12249 return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; 12250 break; 12251 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: 12252 return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; 12253 break; 12254 } 12255 } else 12256 return_cfg = prio_cfg; 12257 12258 return return_cfg; 12259 } 12260 12261 int bnx2x_phy_probe(struct link_params *params) 12262 { 12263 u8 phy_index, actual_phy_idx; 12264 u32 phy_config_swapped, sync_offset, media_types; 12265 struct bnx2x *bp = params->bp; 12266 struct bnx2x_phy *phy; 12267 params->num_phys = 0; 12268 DP(NETIF_MSG_LINK, "Begin phy probe\n"); 12269 phy_config_swapped = params->multi_phy_config & 12270 PORT_HW_CFG_PHY_SWAPPED_ENABLED; 12271 12272 for (phy_index = INT_PHY; phy_index < MAX_PHYS; 12273 phy_index++) { 12274 actual_phy_idx = phy_index; 12275 if (phy_config_swapped) { 12276 if (phy_index == EXT_PHY1) 12277 actual_phy_idx = EXT_PHY2; 12278 else if (phy_index == EXT_PHY2) 12279 actual_phy_idx = EXT_PHY1; 12280 } 12281 DP(NETIF_MSG_LINK, "phy_config_swapped %x, phy_index %x," 12282 " actual_phy_idx %x\n", phy_config_swapped, 12283 phy_index, actual_phy_idx); 12284 phy = ¶ms->phy[actual_phy_idx]; 12285 if (bnx2x_populate_phy(bp, phy_index, params->shmem_base, 12286 params->shmem2_base, params->port, 12287 phy) != 0) { 12288 params->num_phys = 0; 12289 DP(NETIF_MSG_LINK, "phy probe failed in phy index %d\n", 12290 phy_index); 12291 for (phy_index = INT_PHY; 12292 phy_index < MAX_PHYS; 12293 phy_index++) 12294 *phy = phy_null; 12295 return -EINVAL; 12296 } 12297 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) 12298 break; 12299 12300 if (params->feature_config_flags & 12301 FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET) 12302 phy->flags &= ~FLAGS_TX_ERROR_CHECK; 12303 12304 if (!(params->feature_config_flags & 12305 FEATURE_CONFIG_MT_SUPPORT)) 12306 phy->flags |= FLAGS_MDC_MDIO_WA_G; 12307 12308 sync_offset = params->shmem_base + 12309 offsetof(struct shmem_region, 12310 dev_info.port_hw_config[params->port].media_type); 12311 media_types = REG_RD(bp, sync_offset); 12312 12313 /* Update media type for non-PMF sync only for the first time 12314 * In case the media type changes afterwards, it will be updated 12315 * using the update_status function 12316 */ 12317 if ((media_types & (PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK << 12318 (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * 12319 actual_phy_idx))) == 0) { 12320 media_types |= ((phy->media_type & 12321 PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) << 12322 (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * 12323 actual_phy_idx)); 12324 } 12325 REG_WR(bp, sync_offset, media_types); 12326 12327 bnx2x_phy_def_cfg(params, phy, phy_index); 12328 params->num_phys++; 12329 } 12330 12331 DP(NETIF_MSG_LINK, "End phy probe. #phys found %x\n", params->num_phys); 12332 return 0; 12333 } 12334 12335 static void bnx2x_init_bmac_loopback(struct link_params *params, 12336 struct link_vars *vars) 12337 { 12338 struct bnx2x *bp = params->bp; 12339 vars->link_up = 1; 12340 vars->line_speed = SPEED_10000; 12341 vars->duplex = DUPLEX_FULL; 12342 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 12343 vars->mac_type = MAC_TYPE_BMAC; 12344 12345 vars->phy_flags = PHY_XGXS_FLAG; 12346 12347 bnx2x_xgxs_deassert(params); 12348 12349 /* Set bmac loopback */ 12350 bnx2x_bmac_enable(params, vars, 1, 1); 12351 12352 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); 12353 } 12354 12355 static void bnx2x_init_emac_loopback(struct link_params *params, 12356 struct link_vars *vars) 12357 { 12358 struct bnx2x *bp = params->bp; 12359 vars->link_up = 1; 12360 vars->line_speed = SPEED_1000; 12361 vars->duplex = DUPLEX_FULL; 12362 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 12363 vars->mac_type = MAC_TYPE_EMAC; 12364 12365 vars->phy_flags = PHY_XGXS_FLAG; 12366 12367 bnx2x_xgxs_deassert(params); 12368 /* Set bmac loopback */ 12369 bnx2x_emac_enable(params, vars, 1); 12370 bnx2x_emac_program(params, vars); 12371 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); 12372 } 12373 12374 static void bnx2x_init_xmac_loopback(struct link_params *params, 12375 struct link_vars *vars) 12376 { 12377 struct bnx2x *bp = params->bp; 12378 vars->link_up = 1; 12379 if (!params->req_line_speed[0]) 12380 vars->line_speed = SPEED_10000; 12381 else 12382 vars->line_speed = params->req_line_speed[0]; 12383 vars->duplex = DUPLEX_FULL; 12384 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 12385 vars->mac_type = MAC_TYPE_XMAC; 12386 vars->phy_flags = PHY_XGXS_FLAG; 12387 /* Set WC to loopback mode since link is required to provide clock 12388 * to the XMAC in 20G mode 12389 */ 12390 bnx2x_set_aer_mmd(params, ¶ms->phy[0]); 12391 bnx2x_warpcore_reset_lane(bp, ¶ms->phy[0], 0); 12392 params->phy[INT_PHY].config_loopback( 12393 ¶ms->phy[INT_PHY], 12394 params); 12395 12396 bnx2x_xmac_enable(params, vars, 1); 12397 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); 12398 } 12399 12400 static void bnx2x_init_umac_loopback(struct link_params *params, 12401 struct link_vars *vars) 12402 { 12403 struct bnx2x *bp = params->bp; 12404 vars->link_up = 1; 12405 vars->line_speed = SPEED_1000; 12406 vars->duplex = DUPLEX_FULL; 12407 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 12408 vars->mac_type = MAC_TYPE_UMAC; 12409 vars->phy_flags = PHY_XGXS_FLAG; 12410 bnx2x_umac_enable(params, vars, 1); 12411 12412 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); 12413 } 12414 12415 static void bnx2x_init_xgxs_loopback(struct link_params *params, 12416 struct link_vars *vars) 12417 { 12418 struct bnx2x *bp = params->bp; 12419 struct bnx2x_phy *int_phy = ¶ms->phy[INT_PHY]; 12420 vars->link_up = 1; 12421 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 12422 vars->duplex = DUPLEX_FULL; 12423 if (params->req_line_speed[0] == SPEED_1000) 12424 vars->line_speed = SPEED_1000; 12425 else if ((params->req_line_speed[0] == SPEED_20000) || 12426 (int_phy->flags & FLAGS_WC_DUAL_MODE)) 12427 vars->line_speed = SPEED_20000; 12428 else 12429 vars->line_speed = SPEED_10000; 12430 12431 if (!USES_WARPCORE(bp)) 12432 bnx2x_xgxs_deassert(params); 12433 bnx2x_link_initialize(params, vars); 12434 12435 if (params->req_line_speed[0] == SPEED_1000) { 12436 if (USES_WARPCORE(bp)) 12437 bnx2x_umac_enable(params, vars, 0); 12438 else { 12439 bnx2x_emac_program(params, vars); 12440 bnx2x_emac_enable(params, vars, 0); 12441 } 12442 } else { 12443 if (USES_WARPCORE(bp)) 12444 bnx2x_xmac_enable(params, vars, 0); 12445 else 12446 bnx2x_bmac_enable(params, vars, 0, 1); 12447 } 12448 12449 if (params->loopback_mode == LOOPBACK_XGXS) { 12450 /* Set 10G XGXS loopback */ 12451 int_phy->config_loopback(int_phy, params); 12452 } else { 12453 /* Set external phy loopback */ 12454 u8 phy_index; 12455 for (phy_index = EXT_PHY1; 12456 phy_index < params->num_phys; phy_index++) 12457 if (params->phy[phy_index].config_loopback) 12458 params->phy[phy_index].config_loopback( 12459 ¶ms->phy[phy_index], 12460 params); 12461 } 12462 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); 12463 12464 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); 12465 } 12466 12467 void bnx2x_set_rx_filter(struct link_params *params, u8 en) 12468 { 12469 struct bnx2x *bp = params->bp; 12470 u8 val = en * 0x1F; 12471 12472 /* Open / close the gate between the NIG and the BRB */ 12473 if (!CHIP_IS_E1x(bp)) 12474 val |= en * 0x20; 12475 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + params->port*4, val); 12476 12477 if (!CHIP_IS_E1(bp)) { 12478 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + params->port*4, 12479 en*0x3); 12480 } 12481 12482 REG_WR(bp, (params->port ? NIG_REG_LLH1_BRB1_NOT_MCP : 12483 NIG_REG_LLH0_BRB1_NOT_MCP), en); 12484 } 12485 static int bnx2x_avoid_link_flap(struct link_params *params, 12486 struct link_vars *vars) 12487 { 12488 u32 phy_idx; 12489 u32 dont_clear_stat, lfa_sts; 12490 struct bnx2x *bp = params->bp; 12491 12492 /* Sync the link parameters */ 12493 bnx2x_link_status_update(params, vars); 12494 12495 /* 12496 * The module verification was already done by previous link owner, 12497 * so this call is meant only to get warning message 12498 */ 12499 12500 for (phy_idx = INT_PHY; phy_idx < params->num_phys; phy_idx++) { 12501 struct bnx2x_phy *phy = ¶ms->phy[phy_idx]; 12502 if (phy->phy_specific_func) { 12503 DP(NETIF_MSG_LINK, "Calling PHY specific func\n"); 12504 phy->phy_specific_func(phy, params, PHY_INIT); 12505 } 12506 if ((phy->media_type == ETH_PHY_SFPP_10G_FIBER) || 12507 (phy->media_type == ETH_PHY_SFP_1G_FIBER) || 12508 (phy->media_type == ETH_PHY_DA_TWINAX)) 12509 bnx2x_verify_sfp_module(phy, params); 12510 } 12511 lfa_sts = REG_RD(bp, params->lfa_base + 12512 offsetof(struct shmem_lfa, 12513 lfa_sts)); 12514 12515 dont_clear_stat = lfa_sts & SHMEM_LFA_DONT_CLEAR_STAT; 12516 12517 /* Re-enable the NIG/MAC */ 12518 if (CHIP_IS_E3(bp)) { 12519 if (!dont_clear_stat) { 12520 REG_WR(bp, GRCBASE_MISC + 12521 MISC_REGISTERS_RESET_REG_2_CLEAR, 12522 (MISC_REGISTERS_RESET_REG_2_MSTAT0 << 12523 params->port)); 12524 REG_WR(bp, GRCBASE_MISC + 12525 MISC_REGISTERS_RESET_REG_2_SET, 12526 (MISC_REGISTERS_RESET_REG_2_MSTAT0 << 12527 params->port)); 12528 } 12529 if (vars->line_speed < SPEED_10000) 12530 bnx2x_umac_enable(params, vars, 0); 12531 else 12532 bnx2x_xmac_enable(params, vars, 0); 12533 } else { 12534 if (vars->line_speed < SPEED_10000) 12535 bnx2x_emac_enable(params, vars, 0); 12536 else 12537 bnx2x_bmac_enable(params, vars, 0, !dont_clear_stat); 12538 } 12539 12540 /* Increment LFA count */ 12541 lfa_sts = ((lfa_sts & ~LINK_FLAP_AVOIDANCE_COUNT_MASK) | 12542 (((((lfa_sts & LINK_FLAP_AVOIDANCE_COUNT_MASK) >> 12543 LINK_FLAP_AVOIDANCE_COUNT_OFFSET) + 1) & 0xff) 12544 << LINK_FLAP_AVOIDANCE_COUNT_OFFSET)); 12545 /* Clear link flap reason */ 12546 lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK; 12547 12548 REG_WR(bp, params->lfa_base + 12549 offsetof(struct shmem_lfa, lfa_sts), lfa_sts); 12550 12551 /* Disable NIG DRAIN */ 12552 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); 12553 12554 /* Enable interrupts */ 12555 bnx2x_link_int_enable(params); 12556 return 0; 12557 } 12558 12559 static void bnx2x_cannot_avoid_link_flap(struct link_params *params, 12560 struct link_vars *vars, 12561 int lfa_status) 12562 { 12563 u32 lfa_sts, cfg_idx, tmp_val; 12564 struct bnx2x *bp = params->bp; 12565 12566 bnx2x_link_reset(params, vars, 1); 12567 12568 if (!params->lfa_base) 12569 return; 12570 /* Store the new link parameters */ 12571 REG_WR(bp, params->lfa_base + 12572 offsetof(struct shmem_lfa, req_duplex), 12573 params->req_duplex[0] | (params->req_duplex[1] << 16)); 12574 12575 REG_WR(bp, params->lfa_base + 12576 offsetof(struct shmem_lfa, req_flow_ctrl), 12577 params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16)); 12578 12579 REG_WR(bp, params->lfa_base + 12580 offsetof(struct shmem_lfa, req_line_speed), 12581 params->req_line_speed[0] | (params->req_line_speed[1] << 16)); 12582 12583 for (cfg_idx = 0; cfg_idx < SHMEM_LINK_CONFIG_SIZE; cfg_idx++) { 12584 REG_WR(bp, params->lfa_base + 12585 offsetof(struct shmem_lfa, 12586 speed_cap_mask[cfg_idx]), 12587 params->speed_cap_mask[cfg_idx]); 12588 } 12589 12590 tmp_val = REG_RD(bp, params->lfa_base + 12591 offsetof(struct shmem_lfa, additional_config)); 12592 tmp_val &= ~REQ_FC_AUTO_ADV_MASK; 12593 tmp_val |= params->req_fc_auto_adv; 12594 12595 REG_WR(bp, params->lfa_base + 12596 offsetof(struct shmem_lfa, additional_config), tmp_val); 12597 12598 lfa_sts = REG_RD(bp, params->lfa_base + 12599 offsetof(struct shmem_lfa, lfa_sts)); 12600 12601 /* Clear the "Don't Clear Statistics" bit, and set reason */ 12602 lfa_sts &= ~SHMEM_LFA_DONT_CLEAR_STAT; 12603 12604 /* Set link flap reason */ 12605 lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK; 12606 lfa_sts |= ((lfa_status & LFA_LINK_FLAP_REASON_MASK) << 12607 LFA_LINK_FLAP_REASON_OFFSET); 12608 12609 /* Increment link flap counter */ 12610 lfa_sts = ((lfa_sts & ~LINK_FLAP_COUNT_MASK) | 12611 (((((lfa_sts & LINK_FLAP_COUNT_MASK) >> 12612 LINK_FLAP_COUNT_OFFSET) + 1) & 0xff) 12613 << LINK_FLAP_COUNT_OFFSET)); 12614 REG_WR(bp, params->lfa_base + 12615 offsetof(struct shmem_lfa, lfa_sts), lfa_sts); 12616 /* Proceed with regular link initialization */ 12617 } 12618 12619 int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) 12620 { 12621 int lfa_status; 12622 struct bnx2x *bp = params->bp; 12623 DP(NETIF_MSG_LINK, "Phy Initialization started\n"); 12624 DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n", 12625 params->req_line_speed[0], params->req_flow_ctrl[0]); 12626 DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n", 12627 params->req_line_speed[1], params->req_flow_ctrl[1]); 12628 DP(NETIF_MSG_LINK, "req_adv_flow_ctrl 0x%x\n", params->req_fc_auto_adv); 12629 vars->link_status = 0; 12630 vars->phy_link_up = 0; 12631 vars->link_up = 0; 12632 vars->line_speed = 0; 12633 vars->duplex = DUPLEX_FULL; 12634 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 12635 vars->mac_type = MAC_TYPE_NONE; 12636 vars->phy_flags = 0; 12637 vars->check_kr2_recovery_cnt = 0; 12638 params->link_flags = PHY_INITIALIZED; 12639 /* Driver opens NIG-BRB filters */ 12640 bnx2x_set_rx_filter(params, 1); 12641 /* Check if link flap can be avoided */ 12642 lfa_status = bnx2x_check_lfa(params); 12643 12644 if (lfa_status == 0) { 12645 DP(NETIF_MSG_LINK, "Link Flap Avoidance in progress\n"); 12646 return bnx2x_avoid_link_flap(params, vars); 12647 } 12648 12649 DP(NETIF_MSG_LINK, "Cannot avoid link flap lfa_sta=0x%x\n", 12650 lfa_status); 12651 bnx2x_cannot_avoid_link_flap(params, vars, lfa_status); 12652 12653 /* Disable attentions */ 12654 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, 12655 (NIG_MASK_XGXS0_LINK_STATUS | 12656 NIG_MASK_XGXS0_LINK10G | 12657 NIG_MASK_SERDES0_LINK_STATUS | 12658 NIG_MASK_MI_INT)); 12659 12660 bnx2x_emac_init(params, vars); 12661 12662 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) 12663 vars->link_status |= LINK_STATUS_PFC_ENABLED; 12664 12665 if (params->num_phys == 0) { 12666 DP(NETIF_MSG_LINK, "No phy found for initialization !!\n"); 12667 return -EINVAL; 12668 } 12669 set_phy_vars(params, vars); 12670 12671 DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys); 12672 switch (params->loopback_mode) { 12673 case LOOPBACK_BMAC: 12674 bnx2x_init_bmac_loopback(params, vars); 12675 break; 12676 case LOOPBACK_EMAC: 12677 bnx2x_init_emac_loopback(params, vars); 12678 break; 12679 case LOOPBACK_XMAC: 12680 bnx2x_init_xmac_loopback(params, vars); 12681 break; 12682 case LOOPBACK_UMAC: 12683 bnx2x_init_umac_loopback(params, vars); 12684 break; 12685 case LOOPBACK_XGXS: 12686 case LOOPBACK_EXT_PHY: 12687 bnx2x_init_xgxs_loopback(params, vars); 12688 break; 12689 default: 12690 if (!CHIP_IS_E3(bp)) { 12691 if (params->switch_cfg == SWITCH_CFG_10G) 12692 bnx2x_xgxs_deassert(params); 12693 else 12694 bnx2x_serdes_deassert(bp, params->port); 12695 } 12696 bnx2x_link_initialize(params, vars); 12697 msleep(30); 12698 bnx2x_link_int_enable(params); 12699 break; 12700 } 12701 bnx2x_update_mng(params, vars->link_status); 12702 12703 bnx2x_update_mng_eee(params, vars->eee_status); 12704 return 0; 12705 } 12706 12707 int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, 12708 u8 reset_ext_phy) 12709 { 12710 struct bnx2x *bp = params->bp; 12711 u8 phy_index, port = params->port, clear_latch_ind = 0; 12712 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port); 12713 /* Disable attentions */ 12714 vars->link_status = 0; 12715 bnx2x_update_mng(params, vars->link_status); 12716 vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK | 12717 SHMEM_EEE_ACTIVE_BIT); 12718 bnx2x_update_mng_eee(params, vars->eee_status); 12719 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 12720 (NIG_MASK_XGXS0_LINK_STATUS | 12721 NIG_MASK_XGXS0_LINK10G | 12722 NIG_MASK_SERDES0_LINK_STATUS | 12723 NIG_MASK_MI_INT)); 12724 12725 /* Activate nig drain */ 12726 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); 12727 12728 /* Disable nig egress interface */ 12729 if (!CHIP_IS_E3(bp)) { 12730 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0); 12731 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); 12732 } 12733 12734 if (!CHIP_IS_E3(bp)) { 12735 bnx2x_set_bmac_rx(bp, params->chip_id, port, 0); 12736 } else { 12737 bnx2x_set_xmac_rxtx(params, 0); 12738 bnx2x_set_umac_rxtx(params, 0); 12739 } 12740 /* Disable emac */ 12741 if (!CHIP_IS_E3(bp)) 12742 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); 12743 12744 usleep_range(10000, 20000); 12745 /* The PHY reset is controlled by GPIO 1 12746 * Hold it as vars low 12747 */ 12748 /* Clear link led */ 12749 bnx2x_set_mdio_emac_per_phy(bp, params); 12750 bnx2x_set_led(params, vars, LED_MODE_OFF, 0); 12751 12752 if (reset_ext_phy) { 12753 for (phy_index = EXT_PHY1; phy_index < params->num_phys; 12754 phy_index++) { 12755 if (params->phy[phy_index].link_reset) { 12756 bnx2x_set_aer_mmd(params, 12757 ¶ms->phy[phy_index]); 12758 params->phy[phy_index].link_reset( 12759 ¶ms->phy[phy_index], 12760 params); 12761 } 12762 if (params->phy[phy_index].flags & 12763 FLAGS_REARM_LATCH_SIGNAL) 12764 clear_latch_ind = 1; 12765 } 12766 } 12767 12768 if (clear_latch_ind) { 12769 /* Clear latching indication */ 12770 bnx2x_rearm_latch_signal(bp, port, 0); 12771 bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4, 12772 1 << NIG_LATCH_BC_ENABLE_MI_INT); 12773 } 12774 if (params->phy[INT_PHY].link_reset) 12775 params->phy[INT_PHY].link_reset( 12776 ¶ms->phy[INT_PHY], params); 12777 12778 /* Disable nig ingress interface */ 12779 if (!CHIP_IS_E3(bp)) { 12780 /* Reset BigMac */ 12781 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 12782 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 12783 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0); 12784 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0); 12785 } else { 12786 u32 xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 12787 bnx2x_set_xumac_nig(params, 0, 0); 12788 if (REG_RD(bp, MISC_REG_RESET_REG_2) & 12789 MISC_REGISTERS_RESET_REG_2_XMAC) 12790 REG_WR(bp, xmac_base + XMAC_REG_CTRL, 12791 XMAC_CTRL_REG_SOFT_RESET); 12792 } 12793 vars->link_up = 0; 12794 vars->phy_flags = 0; 12795 return 0; 12796 } 12797 int bnx2x_lfa_reset(struct link_params *params, 12798 struct link_vars *vars) 12799 { 12800 struct bnx2x *bp = params->bp; 12801 vars->link_up = 0; 12802 vars->phy_flags = 0; 12803 params->link_flags &= ~PHY_INITIALIZED; 12804 if (!params->lfa_base) 12805 return bnx2x_link_reset(params, vars, 1); 12806 /* 12807 * Activate NIG drain so that during this time the device won't send 12808 * anything while it is unable to response. 12809 */ 12810 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1); 12811 12812 /* 12813 * Close gracefully the gate from BMAC to NIG such that no half packets 12814 * are passed. 12815 */ 12816 if (!CHIP_IS_E3(bp)) 12817 bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0); 12818 12819 if (CHIP_IS_E3(bp)) { 12820 bnx2x_set_xmac_rxtx(params, 0); 12821 bnx2x_set_umac_rxtx(params, 0); 12822 } 12823 /* Wait 10ms for the pipe to clean up*/ 12824 usleep_range(10000, 20000); 12825 12826 /* Clean the NIG-BRB using the network filters in a way that will 12827 * not cut a packet in the middle. 12828 */ 12829 bnx2x_set_rx_filter(params, 0); 12830 12831 /* 12832 * Re-open the gate between the BMAC and the NIG, after verifying the 12833 * gate to the BRB is closed, otherwise packets may arrive to the 12834 * firmware before driver had initialized it. The target is to achieve 12835 * minimum management protocol down time. 12836 */ 12837 if (!CHIP_IS_E3(bp)) 12838 bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 1); 12839 12840 if (CHIP_IS_E3(bp)) { 12841 bnx2x_set_xmac_rxtx(params, 1); 12842 bnx2x_set_umac_rxtx(params, 1); 12843 } 12844 /* Disable NIG drain */ 12845 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); 12846 return 0; 12847 } 12848 12849 /****************************************************************************/ 12850 /* Common function */ 12851 /****************************************************************************/ 12852 static int bnx2x_8073_common_init_phy(struct bnx2x *bp, 12853 u32 shmem_base_path[], 12854 u32 shmem2_base_path[], u8 phy_index, 12855 u32 chip_id) 12856 { 12857 struct bnx2x_phy phy[PORT_MAX]; 12858 struct bnx2x_phy *phy_blk[PORT_MAX]; 12859 u16 val; 12860 s8 port = 0; 12861 s8 port_of_path = 0; 12862 u32 swap_val, swap_override; 12863 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); 12864 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); 12865 port ^= (swap_val && swap_override); 12866 bnx2x_ext_phy_hw_reset(bp, port); 12867 /* PART1 - Reset both phys */ 12868 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 12869 u32 shmem_base, shmem2_base; 12870 /* In E2, same phy is using for port0 of the two paths */ 12871 if (CHIP_IS_E1x(bp)) { 12872 shmem_base = shmem_base_path[0]; 12873 shmem2_base = shmem2_base_path[0]; 12874 port_of_path = port; 12875 } else { 12876 shmem_base = shmem_base_path[port]; 12877 shmem2_base = shmem2_base_path[port]; 12878 port_of_path = 0; 12879 } 12880 12881 /* Extract the ext phy address for the port */ 12882 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, 12883 port_of_path, &phy[port]) != 12884 0) { 12885 DP(NETIF_MSG_LINK, "populate_phy failed\n"); 12886 return -EINVAL; 12887 } 12888 /* Disable attentions */ 12889 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + 12890 port_of_path*4, 12891 (NIG_MASK_XGXS0_LINK_STATUS | 12892 NIG_MASK_XGXS0_LINK10G | 12893 NIG_MASK_SERDES0_LINK_STATUS | 12894 NIG_MASK_MI_INT)); 12895 12896 /* Need to take the phy out of low power mode in order 12897 * to write to access its registers 12898 */ 12899 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 12900 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 12901 port); 12902 12903 /* Reset the phy */ 12904 bnx2x_cl45_write(bp, &phy[port], 12905 MDIO_PMA_DEVAD, 12906 MDIO_PMA_REG_CTRL, 12907 1<<15); 12908 } 12909 12910 /* Add delay of 150ms after reset */ 12911 msleep(150); 12912 12913 if (phy[PORT_0].addr & 0x1) { 12914 phy_blk[PORT_0] = &(phy[PORT_1]); 12915 phy_blk[PORT_1] = &(phy[PORT_0]); 12916 } else { 12917 phy_blk[PORT_0] = &(phy[PORT_0]); 12918 phy_blk[PORT_1] = &(phy[PORT_1]); 12919 } 12920 12921 /* PART2 - Download firmware to both phys */ 12922 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 12923 if (CHIP_IS_E1x(bp)) 12924 port_of_path = port; 12925 else 12926 port_of_path = 0; 12927 12928 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", 12929 phy_blk[port]->addr); 12930 if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], 12931 port_of_path)) 12932 return -EINVAL; 12933 12934 /* Only set bit 10 = 1 (Tx power down) */ 12935 bnx2x_cl45_read(bp, phy_blk[port], 12936 MDIO_PMA_DEVAD, 12937 MDIO_PMA_REG_TX_POWER_DOWN, &val); 12938 12939 /* Phase1 of TX_POWER_DOWN reset */ 12940 bnx2x_cl45_write(bp, phy_blk[port], 12941 MDIO_PMA_DEVAD, 12942 MDIO_PMA_REG_TX_POWER_DOWN, 12943 (val | 1<<10)); 12944 } 12945 12946 /* Toggle Transmitter: Power down and then up with 600ms delay 12947 * between 12948 */ 12949 msleep(600); 12950 12951 /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */ 12952 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 12953 /* Phase2 of POWER_DOWN_RESET */ 12954 /* Release bit 10 (Release Tx power down) */ 12955 bnx2x_cl45_read(bp, phy_blk[port], 12956 MDIO_PMA_DEVAD, 12957 MDIO_PMA_REG_TX_POWER_DOWN, &val); 12958 12959 bnx2x_cl45_write(bp, phy_blk[port], 12960 MDIO_PMA_DEVAD, 12961 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); 12962 usleep_range(15000, 30000); 12963 12964 /* Read modify write the SPI-ROM version select register */ 12965 bnx2x_cl45_read(bp, phy_blk[port], 12966 MDIO_PMA_DEVAD, 12967 MDIO_PMA_REG_EDC_FFE_MAIN, &val); 12968 bnx2x_cl45_write(bp, phy_blk[port], 12969 MDIO_PMA_DEVAD, 12970 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12))); 12971 12972 /* set GPIO2 back to LOW */ 12973 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 12974 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 12975 } 12976 return 0; 12977 } 12978 static int bnx2x_8726_common_init_phy(struct bnx2x *bp, 12979 u32 shmem_base_path[], 12980 u32 shmem2_base_path[], u8 phy_index, 12981 u32 chip_id) 12982 { 12983 u32 val; 12984 s8 port; 12985 struct bnx2x_phy phy; 12986 /* Use port1 because of the static port-swap */ 12987 /* Enable the module detection interrupt */ 12988 val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN); 12989 val |= ((1<<MISC_REGISTERS_GPIO_3)| 12990 (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT))); 12991 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val); 12992 12993 bnx2x_ext_phy_hw_reset(bp, 0); 12994 usleep_range(5000, 10000); 12995 for (port = 0; port < PORT_MAX; port++) { 12996 u32 shmem_base, shmem2_base; 12997 12998 /* In E2, same phy is using for port0 of the two paths */ 12999 if (CHIP_IS_E1x(bp)) { 13000 shmem_base = shmem_base_path[0]; 13001 shmem2_base = shmem2_base_path[0]; 13002 } else { 13003 shmem_base = shmem_base_path[port]; 13004 shmem2_base = shmem2_base_path[port]; 13005 } 13006 /* Extract the ext phy address for the port */ 13007 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, 13008 port, &phy) != 13009 0) { 13010 DP(NETIF_MSG_LINK, "populate phy failed\n"); 13011 return -EINVAL; 13012 } 13013 13014 /* Reset phy*/ 13015 bnx2x_cl45_write(bp, &phy, 13016 MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001); 13017 13018 13019 /* Set fault module detected LED on */ 13020 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 13021 MISC_REGISTERS_GPIO_HIGH, 13022 port); 13023 } 13024 13025 return 0; 13026 } 13027 static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base, 13028 u8 *io_gpio, u8 *io_port) 13029 { 13030 13031 u32 phy_gpio_reset = REG_RD(bp, shmem_base + 13032 offsetof(struct shmem_region, 13033 dev_info.port_hw_config[PORT_0].default_cfg)); 13034 switch (phy_gpio_reset) { 13035 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0: 13036 *io_gpio = 0; 13037 *io_port = 0; 13038 break; 13039 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0: 13040 *io_gpio = 1; 13041 *io_port = 0; 13042 break; 13043 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0: 13044 *io_gpio = 2; 13045 *io_port = 0; 13046 break; 13047 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0: 13048 *io_gpio = 3; 13049 *io_port = 0; 13050 break; 13051 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1: 13052 *io_gpio = 0; 13053 *io_port = 1; 13054 break; 13055 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1: 13056 *io_gpio = 1; 13057 *io_port = 1; 13058 break; 13059 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1: 13060 *io_gpio = 2; 13061 *io_port = 1; 13062 break; 13063 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1: 13064 *io_gpio = 3; 13065 *io_port = 1; 13066 break; 13067 default: 13068 /* Don't override the io_gpio and io_port */ 13069 break; 13070 } 13071 } 13072 13073 static int bnx2x_8727_common_init_phy(struct bnx2x *bp, 13074 u32 shmem_base_path[], 13075 u32 shmem2_base_path[], u8 phy_index, 13076 u32 chip_id) 13077 { 13078 s8 port, reset_gpio; 13079 u32 swap_val, swap_override; 13080 struct bnx2x_phy phy[PORT_MAX]; 13081 struct bnx2x_phy *phy_blk[PORT_MAX]; 13082 s8 port_of_path; 13083 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); 13084 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); 13085 13086 reset_gpio = MISC_REGISTERS_GPIO_1; 13087 port = 1; 13088 13089 /* Retrieve the reset gpio/port which control the reset. 13090 * Default is GPIO1, PORT1 13091 */ 13092 bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0], 13093 (u8 *)&reset_gpio, (u8 *)&port); 13094 13095 /* Calculate the port based on port swap */ 13096 port ^= (swap_val && swap_override); 13097 13098 /* Initiate PHY reset*/ 13099 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW, 13100 port); 13101 usleep_range(1000, 2000); 13102 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH, 13103 port); 13104 13105 usleep_range(5000, 10000); 13106 13107 /* PART1 - Reset both phys */ 13108 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 13109 u32 shmem_base, shmem2_base; 13110 13111 /* In E2, same phy is using for port0 of the two paths */ 13112 if (CHIP_IS_E1x(bp)) { 13113 shmem_base = shmem_base_path[0]; 13114 shmem2_base = shmem2_base_path[0]; 13115 port_of_path = port; 13116 } else { 13117 shmem_base = shmem_base_path[port]; 13118 shmem2_base = shmem2_base_path[port]; 13119 port_of_path = 0; 13120 } 13121 13122 /* Extract the ext phy address for the port */ 13123 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, 13124 port_of_path, &phy[port]) != 13125 0) { 13126 DP(NETIF_MSG_LINK, "populate phy failed\n"); 13127 return -EINVAL; 13128 } 13129 /* disable attentions */ 13130 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + 13131 port_of_path*4, 13132 (NIG_MASK_XGXS0_LINK_STATUS | 13133 NIG_MASK_XGXS0_LINK10G | 13134 NIG_MASK_SERDES0_LINK_STATUS | 13135 NIG_MASK_MI_INT)); 13136 13137 13138 /* Reset the phy */ 13139 bnx2x_cl45_write(bp, &phy[port], 13140 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); 13141 } 13142 13143 /* Add delay of 150ms after reset */ 13144 msleep(150); 13145 if (phy[PORT_0].addr & 0x1) { 13146 phy_blk[PORT_0] = &(phy[PORT_1]); 13147 phy_blk[PORT_1] = &(phy[PORT_0]); 13148 } else { 13149 phy_blk[PORT_0] = &(phy[PORT_0]); 13150 phy_blk[PORT_1] = &(phy[PORT_1]); 13151 } 13152 /* PART2 - Download firmware to both phys */ 13153 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 13154 if (CHIP_IS_E1x(bp)) 13155 port_of_path = port; 13156 else 13157 port_of_path = 0; 13158 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", 13159 phy_blk[port]->addr); 13160 if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], 13161 port_of_path)) 13162 return -EINVAL; 13163 /* Disable PHY transmitter output */ 13164 bnx2x_cl45_write(bp, phy_blk[port], 13165 MDIO_PMA_DEVAD, 13166 MDIO_PMA_REG_TX_DISABLE, 1); 13167 13168 } 13169 return 0; 13170 } 13171 13172 static int bnx2x_84833_common_init_phy(struct bnx2x *bp, 13173 u32 shmem_base_path[], 13174 u32 shmem2_base_path[], 13175 u8 phy_index, 13176 u32 chip_id) 13177 { 13178 u8 reset_gpios; 13179 reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id); 13180 bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW); 13181 udelay(10); 13182 bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH); 13183 DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n", 13184 reset_gpios); 13185 return 0; 13186 } 13187 13188 static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], 13189 u32 shmem2_base_path[], u8 phy_index, 13190 u32 ext_phy_type, u32 chip_id) 13191 { 13192 int rc = 0; 13193 13194 switch (ext_phy_type) { 13195 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 13196 rc = bnx2x_8073_common_init_phy(bp, shmem_base_path, 13197 shmem2_base_path, 13198 phy_index, chip_id); 13199 break; 13200 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: 13201 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 13202 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC: 13203 rc = bnx2x_8727_common_init_phy(bp, shmem_base_path, 13204 shmem2_base_path, 13205 phy_index, chip_id); 13206 break; 13207 13208 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 13209 /* GPIO1 affects both ports, so there's need to pull 13210 * it for single port alone 13211 */ 13212 rc = bnx2x_8726_common_init_phy(bp, shmem_base_path, 13213 shmem2_base_path, 13214 phy_index, chip_id); 13215 break; 13216 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: 13217 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834: 13218 /* GPIO3's are linked, and so both need to be toggled 13219 * to obtain required 2us pulse. 13220 */ 13221 rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, 13222 shmem2_base_path, 13223 phy_index, chip_id); 13224 break; 13225 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: 13226 rc = -EINVAL; 13227 break; 13228 default: 13229 DP(NETIF_MSG_LINK, 13230 "ext_phy 0x%x common init not required\n", 13231 ext_phy_type); 13232 break; 13233 } 13234 13235 if (rc) 13236 netdev_err(bp->dev, "Warning: PHY was not initialized," 13237 " Port %d\n", 13238 0); 13239 return rc; 13240 } 13241 13242 int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], 13243 u32 shmem2_base_path[], u32 chip_id) 13244 { 13245 int rc = 0; 13246 u32 phy_ver, val; 13247 u8 phy_index = 0; 13248 u32 ext_phy_type, ext_phy_config; 13249 13250 bnx2x_set_mdio_clk(bp, chip_id, GRCBASE_EMAC0); 13251 bnx2x_set_mdio_clk(bp, chip_id, GRCBASE_EMAC1); 13252 DP(NETIF_MSG_LINK, "Begin common phy init\n"); 13253 if (CHIP_IS_E3(bp)) { 13254 /* Enable EPIO */ 13255 val = REG_RD(bp, MISC_REG_GEN_PURP_HWG); 13256 REG_WR(bp, MISC_REG_GEN_PURP_HWG, val | 1); 13257 } 13258 /* Check if common init was already done */ 13259 phy_ver = REG_RD(bp, shmem_base_path[0] + 13260 offsetof(struct shmem_region, 13261 port_mb[PORT_0].ext_phy_fw_version)); 13262 if (phy_ver) { 13263 DP(NETIF_MSG_LINK, "Not doing common init; phy ver is 0x%x\n", 13264 phy_ver); 13265 return 0; 13266 } 13267 13268 /* Read the ext_phy_type for arbitrary port(0) */ 13269 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; 13270 phy_index++) { 13271 ext_phy_config = bnx2x_get_ext_phy_config(bp, 13272 shmem_base_path[0], 13273 phy_index, 0); 13274 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); 13275 rc |= bnx2x_ext_phy_common_init(bp, shmem_base_path, 13276 shmem2_base_path, 13277 phy_index, ext_phy_type, 13278 chip_id); 13279 } 13280 return rc; 13281 } 13282 13283 static void bnx2x_check_over_curr(struct link_params *params, 13284 struct link_vars *vars) 13285 { 13286 struct bnx2x *bp = params->bp; 13287 u32 cfg_pin; 13288 u8 port = params->port; 13289 u32 pin_val; 13290 13291 cfg_pin = (REG_RD(bp, params->shmem_base + 13292 offsetof(struct shmem_region, 13293 dev_info.port_hw_config[port].e3_cmn_pin_cfg1)) & 13294 PORT_HW_CFG_E3_OVER_CURRENT_MASK) >> 13295 PORT_HW_CFG_E3_OVER_CURRENT_SHIFT; 13296 13297 /* Ignore check if no external input PIN available */ 13298 if (bnx2x_get_cfg_pin(bp, cfg_pin, &pin_val) != 0) 13299 return; 13300 13301 if (!pin_val) { 13302 if ((vars->phy_flags & PHY_OVER_CURRENT_FLAG) == 0) { 13303 netdev_err(bp->dev, "Error: Power fault on Port %d has" 13304 " been detected and the power to " 13305 "that SFP+ module has been removed" 13306 " to prevent failure of the card." 13307 " Please remove the SFP+ module and" 13308 " restart the system to clear this" 13309 " error.\n", 13310 params->port); 13311 vars->phy_flags |= PHY_OVER_CURRENT_FLAG; 13312 bnx2x_warpcore_power_module(params, 0); 13313 } 13314 } else 13315 vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG; 13316 } 13317 13318 /* Returns 0 if no change occured since last check; 1 otherwise. */ 13319 static u8 bnx2x_analyze_link_error(struct link_params *params, 13320 struct link_vars *vars, u32 status, 13321 u32 phy_flag, u32 link_flag, u8 notify) 13322 { 13323 struct bnx2x *bp = params->bp; 13324 /* Compare new value with previous value */ 13325 u8 led_mode; 13326 u32 old_status = (vars->phy_flags & phy_flag) ? 1 : 0; 13327 13328 if ((status ^ old_status) == 0) 13329 return 0; 13330 13331 /* If values differ */ 13332 switch (phy_flag) { 13333 case PHY_HALF_OPEN_CONN_FLAG: 13334 DP(NETIF_MSG_LINK, "Analyze Remote Fault\n"); 13335 break; 13336 case PHY_SFP_TX_FAULT_FLAG: 13337 DP(NETIF_MSG_LINK, "Analyze TX Fault\n"); 13338 break; 13339 default: 13340 DP(NETIF_MSG_LINK, "Analyze UNKNOWN\n"); 13341 } 13342 DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, 13343 old_status, status); 13344 13345 /* a. Update shmem->link_status accordingly 13346 * b. Update link_vars->link_up 13347 */ 13348 if (status) { 13349 vars->link_status &= ~LINK_STATUS_LINK_UP; 13350 vars->link_status |= link_flag; 13351 vars->link_up = 0; 13352 vars->phy_flags |= phy_flag; 13353 13354 /* activate nig drain */ 13355 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1); 13356 /* Set LED mode to off since the PHY doesn't know about these 13357 * errors 13358 */ 13359 led_mode = LED_MODE_OFF; 13360 } else { 13361 vars->link_status |= LINK_STATUS_LINK_UP; 13362 vars->link_status &= ~link_flag; 13363 vars->link_up = 1; 13364 vars->phy_flags &= ~phy_flag; 13365 led_mode = LED_MODE_OPER; 13366 13367 /* Clear nig drain */ 13368 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); 13369 } 13370 bnx2x_sync_link(params, vars); 13371 /* Update the LED according to the link state */ 13372 bnx2x_set_led(params, vars, led_mode, SPEED_10000); 13373 13374 /* Update link status in the shared memory */ 13375 bnx2x_update_mng(params, vars->link_status); 13376 13377 /* C. Trigger General Attention */ 13378 vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT; 13379 if (notify) 13380 bnx2x_notify_link_changed(bp); 13381 13382 return 1; 13383 } 13384 13385 /****************************************************************************** 13386 * Description: 13387 * This function checks for half opened connection change indication. 13388 * When such change occurs, it calls the bnx2x_analyze_link_error 13389 * to check if Remote Fault is set or cleared. Reception of remote fault 13390 * status message in the MAC indicates that the peer's MAC has detected 13391 * a fault, for example, due to break in the TX side of fiber. 13392 * 13393 ******************************************************************************/ 13394 int bnx2x_check_half_open_conn(struct link_params *params, 13395 struct link_vars *vars, 13396 u8 notify) 13397 { 13398 struct bnx2x *bp = params->bp; 13399 u32 lss_status = 0; 13400 u32 mac_base; 13401 /* In case link status is physically up @ 10G do */ 13402 if (((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) || 13403 (REG_RD(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4))) 13404 return 0; 13405 13406 if (CHIP_IS_E3(bp) && 13407 (REG_RD(bp, MISC_REG_RESET_REG_2) & 13408 (MISC_REGISTERS_RESET_REG_2_XMAC))) { 13409 /* Check E3 XMAC */ 13410 /* Note that link speed cannot be queried here, since it may be 13411 * zero while link is down. In case UMAC is active, LSS will 13412 * simply not be set 13413 */ 13414 mac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 13415 13416 /* Clear stick bits (Requires rising edge) */ 13417 REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0); 13418 REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 13419 XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS | 13420 XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS); 13421 if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS)) 13422 lss_status = 1; 13423 13424 bnx2x_analyze_link_error(params, vars, lss_status, 13425 PHY_HALF_OPEN_CONN_FLAG, 13426 LINK_STATUS_NONE, notify); 13427 } else if (REG_RD(bp, MISC_REG_RESET_REG_2) & 13428 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) { 13429 /* Check E1X / E2 BMAC */ 13430 u32 lss_status_reg; 13431 u32 wb_data[2]; 13432 mac_base = params->port ? NIG_REG_INGRESS_BMAC1_MEM : 13433 NIG_REG_INGRESS_BMAC0_MEM; 13434 /* Read BIGMAC_REGISTER_RX_LSS_STATUS */ 13435 if (CHIP_IS_E2(bp)) 13436 lss_status_reg = BIGMAC2_REGISTER_RX_LSS_STAT; 13437 else 13438 lss_status_reg = BIGMAC_REGISTER_RX_LSS_STATUS; 13439 13440 REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2); 13441 lss_status = (wb_data[0] > 0); 13442 13443 bnx2x_analyze_link_error(params, vars, lss_status, 13444 PHY_HALF_OPEN_CONN_FLAG, 13445 LINK_STATUS_NONE, notify); 13446 } 13447 return 0; 13448 } 13449 static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy, 13450 struct link_params *params, 13451 struct link_vars *vars) 13452 { 13453 struct bnx2x *bp = params->bp; 13454 u32 cfg_pin, value = 0; 13455 u8 led_change, port = params->port; 13456 13457 /* Get The SFP+ TX_Fault controlling pin ([eg]pio) */ 13458 cfg_pin = (REG_RD(bp, params->shmem_base + offsetof(struct shmem_region, 13459 dev_info.port_hw_config[port].e3_cmn_pin_cfg)) & 13460 PORT_HW_CFG_E3_TX_FAULT_MASK) >> 13461 PORT_HW_CFG_E3_TX_FAULT_SHIFT; 13462 13463 if (bnx2x_get_cfg_pin(bp, cfg_pin, &value)) { 13464 DP(NETIF_MSG_LINK, "Failed to read pin 0x%02x\n", cfg_pin); 13465 return; 13466 } 13467 13468 led_change = bnx2x_analyze_link_error(params, vars, value, 13469 PHY_SFP_TX_FAULT_FLAG, 13470 LINK_STATUS_SFP_TX_FAULT, 1); 13471 13472 if (led_change) { 13473 /* Change TX_Fault led, set link status for further syncs */ 13474 u8 led_mode; 13475 13476 if (vars->phy_flags & PHY_SFP_TX_FAULT_FLAG) { 13477 led_mode = MISC_REGISTERS_GPIO_HIGH; 13478 vars->link_status |= LINK_STATUS_SFP_TX_FAULT; 13479 } else { 13480 led_mode = MISC_REGISTERS_GPIO_LOW; 13481 vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT; 13482 } 13483 13484 /* If module is unapproved, led should be on regardless */ 13485 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) { 13486 DP(NETIF_MSG_LINK, "Change TX_Fault LED: ->%x\n", 13487 led_mode); 13488 bnx2x_set_e3_module_fault_led(params, led_mode); 13489 } 13490 } 13491 } 13492 static void bnx2x_kr2_recovery(struct link_params *params, 13493 struct link_vars *vars, 13494 struct bnx2x_phy *phy) 13495 { 13496 struct bnx2x *bp = params->bp; 13497 DP(NETIF_MSG_LINK, "KR2 recovery\n"); 13498 bnx2x_warpcore_enable_AN_KR2(phy, params, vars); 13499 bnx2x_warpcore_restart_AN_KR(phy, params); 13500 } 13501 13502 static void bnx2x_check_kr2_wa(struct link_params *params, 13503 struct link_vars *vars, 13504 struct bnx2x_phy *phy) 13505 { 13506 struct bnx2x *bp = params->bp; 13507 u16 base_page, next_page, not_kr2_device, lane; 13508 int sigdet; 13509 13510 /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery 13511 * Since some switches tend to reinit the AN process and clear the 13512 * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled 13513 * and recovered many times 13514 */ 13515 if (vars->check_kr2_recovery_cnt > 0) { 13516 vars->check_kr2_recovery_cnt--; 13517 return; 13518 } 13519 13520 sigdet = bnx2x_warpcore_get_sigdet(phy, params); 13521 if (!sigdet) { 13522 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { 13523 bnx2x_kr2_recovery(params, vars, phy); 13524 DP(NETIF_MSG_LINK, "No sigdet\n"); 13525 } 13526 return; 13527 } 13528 13529 lane = bnx2x_get_warpcore_lane(phy, params); 13530 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 13531 MDIO_AER_BLOCK_AER_REG, lane); 13532 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, 13533 MDIO_AN_REG_LP_AUTO_NEG, &base_page); 13534 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, 13535 MDIO_AN_REG_LP_AUTO_NEG2, &next_page); 13536 bnx2x_set_aer_mmd(params, phy); 13537 13538 /* CL73 has not begun yet */ 13539 if (base_page == 0) { 13540 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { 13541 bnx2x_kr2_recovery(params, vars, phy); 13542 DP(NETIF_MSG_LINK, "No BP\n"); 13543 } 13544 return; 13545 } 13546 13547 /* In case NP bit is not set in the BasePage, or it is set, 13548 * but only KX is advertised, declare this link partner as non-KR2 13549 * device. 13550 */ 13551 not_kr2_device = (((base_page & 0x8000) == 0) || 13552 (((base_page & 0x8000) && 13553 ((next_page & 0xe0) == 0x2)))); 13554 13555 /* In case KR2 is already disabled, check if we need to re-enable it */ 13556 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { 13557 if (!not_kr2_device) { 13558 DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, 13559 next_page); 13560 bnx2x_kr2_recovery(params, vars, phy); 13561 } 13562 return; 13563 } 13564 /* KR2 is enabled, but not KR2 device */ 13565 if (not_kr2_device) { 13566 /* Disable KR2 on both lanes */ 13567 DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page); 13568 bnx2x_disable_kr2(params, vars, phy); 13569 /* Restart AN on leading lane */ 13570 bnx2x_warpcore_restart_AN_KR(phy, params); 13571 return; 13572 } 13573 } 13574 13575 void bnx2x_period_func(struct link_params *params, struct link_vars *vars) 13576 { 13577 u16 phy_idx; 13578 struct bnx2x *bp = params->bp; 13579 for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { 13580 if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) { 13581 bnx2x_set_aer_mmd(params, ¶ms->phy[phy_idx]); 13582 if (bnx2x_check_half_open_conn(params, vars, 1) != 13583 0) 13584 DP(NETIF_MSG_LINK, "Fault detection failed\n"); 13585 break; 13586 } 13587 } 13588 13589 if (CHIP_IS_E3(bp)) { 13590 struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; 13591 bnx2x_set_aer_mmd(params, phy); 13592 if ((phy->supported & SUPPORTED_20000baseKR2_Full) && 13593 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) 13594 bnx2x_check_kr2_wa(params, vars, phy); 13595 bnx2x_check_over_curr(params, vars); 13596 if (vars->rx_tx_asic_rst) 13597 bnx2x_warpcore_config_runtime(phy, params, vars); 13598 13599 if ((REG_RD(bp, params->shmem_base + 13600 offsetof(struct shmem_region, dev_info. 13601 port_hw_config[params->port].default_cfg)) 13602 & PORT_HW_CFG_NET_SERDES_IF_MASK) == 13603 PORT_HW_CFG_NET_SERDES_IF_SFI) { 13604 if (bnx2x_is_sfp_module_plugged(phy, params)) { 13605 bnx2x_sfp_tx_fault_detection(phy, params, vars); 13606 } else if (vars->link_status & 13607 LINK_STATUS_SFP_TX_FAULT) { 13608 /* Clean trail, interrupt corrects the leds */ 13609 vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT; 13610 vars->phy_flags &= ~PHY_SFP_TX_FAULT_FLAG; 13611 /* Update link status in the shared memory */ 13612 bnx2x_update_mng(params, vars->link_status); 13613 } 13614 } 13615 } 13616 } 13617 13618 u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, 13619 u32 shmem_base, 13620 u32 shmem2_base, 13621 u8 port) 13622 { 13623 u8 phy_index, fan_failure_det_req = 0; 13624 struct bnx2x_phy phy; 13625 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; 13626 phy_index++) { 13627 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, 13628 port, &phy) 13629 != 0) { 13630 DP(NETIF_MSG_LINK, "populate phy failed\n"); 13631 return 0; 13632 } 13633 fan_failure_det_req |= (phy.flags & 13634 FLAGS_FAN_FAILURE_DET_REQ); 13635 } 13636 return fan_failure_det_req; 13637 } 13638 13639 void bnx2x_hw_reset_phy(struct link_params *params) 13640 { 13641 u8 phy_index; 13642 struct bnx2x *bp = params->bp; 13643 bnx2x_update_mng(params, 0); 13644 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, 13645 (NIG_MASK_XGXS0_LINK_STATUS | 13646 NIG_MASK_XGXS0_LINK10G | 13647 NIG_MASK_SERDES0_LINK_STATUS | 13648 NIG_MASK_MI_INT)); 13649 13650 for (phy_index = INT_PHY; phy_index < MAX_PHYS; 13651 phy_index++) { 13652 if (params->phy[phy_index].hw_reset) { 13653 params->phy[phy_index].hw_reset( 13654 ¶ms->phy[phy_index], 13655 params); 13656 params->phy[phy_index] = phy_null; 13657 } 13658 } 13659 } 13660 13661 void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars, 13662 u32 chip_id, u32 shmem_base, u32 shmem2_base, 13663 u8 port) 13664 { 13665 u8 gpio_num = 0xff, gpio_port = 0xff, phy_index; 13666 u32 val; 13667 u32 offset, aeu_mask, swap_val, swap_override, sync_offset; 13668 if (CHIP_IS_E3(bp)) { 13669 if (bnx2x_get_mod_abs_int_cfg(bp, chip_id, 13670 shmem_base, 13671 port, 13672 &gpio_num, 13673 &gpio_port) != 0) 13674 return; 13675 } else { 13676 struct bnx2x_phy phy; 13677 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; 13678 phy_index++) { 13679 if (bnx2x_populate_phy(bp, phy_index, shmem_base, 13680 shmem2_base, port, &phy) 13681 != 0) { 13682 DP(NETIF_MSG_LINK, "populate phy failed\n"); 13683 return; 13684 } 13685 if (phy.type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) { 13686 gpio_num = MISC_REGISTERS_GPIO_3; 13687 gpio_port = port; 13688 break; 13689 } 13690 } 13691 } 13692 13693 if (gpio_num == 0xff) 13694 return; 13695 13696 /* Set GPIO3 to trigger SFP+ module insertion/removal */ 13697 bnx2x_set_gpio(bp, gpio_num, MISC_REGISTERS_GPIO_INPUT_HI_Z, gpio_port); 13698 13699 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); 13700 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); 13701 gpio_port ^= (swap_val && swap_override); 13702 13703 vars->aeu_int_mask = AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 << 13704 (gpio_num + (gpio_port << 2)); 13705 13706 sync_offset = shmem_base + 13707 offsetof(struct shmem_region, 13708 dev_info.port_hw_config[port].aeu_int_mask); 13709 REG_WR(bp, sync_offset, vars->aeu_int_mask); 13710 13711 DP(NETIF_MSG_LINK, "Setting MOD_ABS (GPIO%d_P%d) AEU to 0x%x\n", 13712 gpio_num, gpio_port, vars->aeu_int_mask); 13713 13714 if (port == 0) 13715 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 13716 else 13717 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0; 13718 13719 /* Open appropriate AEU for interrupts */ 13720 aeu_mask = REG_RD(bp, offset); 13721 aeu_mask |= vars->aeu_int_mask; 13722 REG_WR(bp, offset, aeu_mask); 13723 13724 /* Enable the GPIO to trigger interrupt */ 13725 val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN); 13726 val |= 1 << (gpio_num + (gpio_port << 2)); 13727 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val); 13728 } 13729