1 /******************************************************************************* 2 3 Intel(R) Gigabit Ethernet Linux driver 4 Copyright(c) 2007-2013 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 /* e1000_82575 29 * e1000_82576 30 */ 31 32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33 34 #include <linux/types.h> 35 #include <linux/if_ether.h> 36 #include <linux/i2c.h> 37 38 #include "e1000_mac.h" 39 #include "e1000_82575.h" 40 #include "e1000_i210.h" 41 42 static s32 igb_get_invariants_82575(struct e1000_hw *); 43 static s32 igb_acquire_phy_82575(struct e1000_hw *); 44 static void igb_release_phy_82575(struct e1000_hw *); 45 static s32 igb_acquire_nvm_82575(struct e1000_hw *); 46 static void igb_release_nvm_82575(struct e1000_hw *); 47 static s32 igb_check_for_link_82575(struct e1000_hw *); 48 static s32 igb_get_cfg_done_82575(struct e1000_hw *); 49 static s32 igb_init_hw_82575(struct e1000_hw *); 50 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); 51 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); 52 static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *); 53 static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16); 54 static s32 igb_reset_hw_82575(struct e1000_hw *); 55 static s32 igb_reset_hw_82580(struct e1000_hw *); 56 static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); 57 static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool); 58 static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool); 59 static s32 igb_setup_copper_link_82575(struct e1000_hw *); 60 static s32 igb_setup_serdes_link_82575(struct e1000_hw *); 61 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16); 62 static void igb_clear_hw_cntrs_82575(struct e1000_hw *); 63 static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16); 64 static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *, 65 u16 *); 66 static s32 igb_get_phy_id_82575(struct e1000_hw *); 67 static void igb_release_swfw_sync_82575(struct e1000_hw *, u16); 68 static bool igb_sgmii_active_82575(struct e1000_hw *); 69 static s32 igb_reset_init_script_82575(struct e1000_hw *); 70 static s32 igb_read_mac_addr_82575(struct e1000_hw *); 71 static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); 72 static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); 73 static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw); 74 static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); 75 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); 76 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); 77 static const u16 e1000_82580_rxpbs_table[] = 78 { 36, 72, 144, 1, 2, 4, 8, 16, 79 35, 70, 140 }; 80 #define E1000_82580_RXPBS_TABLE_SIZE \ 81 (sizeof(e1000_82580_rxpbs_table)/sizeof(u16)) 82 83 /** 84 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO 85 * @hw: pointer to the HW structure 86 * 87 * Called to determine if the I2C pins are being used for I2C or as an 88 * external MDIO interface since the two options are mutually exclusive. 89 **/ 90 static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) 91 { 92 u32 reg = 0; 93 bool ext_mdio = false; 94 95 switch (hw->mac.type) { 96 case e1000_82575: 97 case e1000_82576: 98 reg = rd32(E1000_MDIC); 99 ext_mdio = !!(reg & E1000_MDIC_DEST); 100 break; 101 case e1000_82580: 102 case e1000_i350: 103 case e1000_i354: 104 case e1000_i210: 105 case e1000_i211: 106 reg = rd32(E1000_MDICNFG); 107 ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); 108 break; 109 default: 110 break; 111 } 112 return ext_mdio; 113 } 114 115 /** 116 * igb_init_phy_params_82575 - Init PHY func ptrs. 117 * @hw: pointer to the HW structure 118 **/ 119 static s32 igb_init_phy_params_82575(struct e1000_hw *hw) 120 { 121 struct e1000_phy_info *phy = &hw->phy; 122 s32 ret_val = 0; 123 u32 ctrl_ext; 124 125 if (hw->phy.media_type != e1000_media_type_copper) { 126 phy->type = e1000_phy_none; 127 goto out; 128 } 129 130 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 131 phy->reset_delay_us = 100; 132 133 ctrl_ext = rd32(E1000_CTRL_EXT); 134 135 if (igb_sgmii_active_82575(hw)) { 136 phy->ops.reset = igb_phy_hw_reset_sgmii_82575; 137 ctrl_ext |= E1000_CTRL_I2C_ENA; 138 } else { 139 phy->ops.reset = igb_phy_hw_reset; 140 ctrl_ext &= ~E1000_CTRL_I2C_ENA; 141 } 142 143 wr32(E1000_CTRL_EXT, ctrl_ext); 144 igb_reset_mdicnfg_82580(hw); 145 146 if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { 147 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; 148 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; 149 } else { 150 switch (hw->mac.type) { 151 case e1000_82580: 152 case e1000_i350: 153 case e1000_i354: 154 phy->ops.read_reg = igb_read_phy_reg_82580; 155 phy->ops.write_reg = igb_write_phy_reg_82580; 156 break; 157 case e1000_i210: 158 case e1000_i211: 159 phy->ops.read_reg = igb_read_phy_reg_gs40g; 160 phy->ops.write_reg = igb_write_phy_reg_gs40g; 161 break; 162 default: 163 phy->ops.read_reg = igb_read_phy_reg_igp; 164 phy->ops.write_reg = igb_write_phy_reg_igp; 165 } 166 } 167 168 /* set lan id */ 169 hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> 170 E1000_STATUS_FUNC_SHIFT; 171 172 /* Set phy->phy_addr and phy->id. */ 173 ret_val = igb_get_phy_id_82575(hw); 174 if (ret_val) 175 return ret_val; 176 177 /* Verify phy id and set remaining function pointers */ 178 switch (phy->id) { 179 case M88E1543_E_PHY_ID: 180 case I347AT4_E_PHY_ID: 181 case M88E1112_E_PHY_ID: 182 case M88E1111_I_PHY_ID: 183 phy->type = e1000_phy_m88; 184 phy->ops.check_polarity = igb_check_polarity_m88; 185 phy->ops.get_phy_info = igb_get_phy_info_m88; 186 if (phy->id != M88E1111_I_PHY_ID) 187 phy->ops.get_cable_length = 188 igb_get_cable_length_m88_gen2; 189 else 190 phy->ops.get_cable_length = igb_get_cable_length_m88; 191 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; 192 break; 193 case IGP03E1000_E_PHY_ID: 194 phy->type = e1000_phy_igp_3; 195 phy->ops.get_phy_info = igb_get_phy_info_igp; 196 phy->ops.get_cable_length = igb_get_cable_length_igp_2; 197 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp; 198 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; 199 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; 200 break; 201 case I82580_I_PHY_ID: 202 case I350_I_PHY_ID: 203 phy->type = e1000_phy_82580; 204 phy->ops.force_speed_duplex = 205 igb_phy_force_speed_duplex_82580; 206 phy->ops.get_cable_length = igb_get_cable_length_82580; 207 phy->ops.get_phy_info = igb_get_phy_info_82580; 208 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; 209 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; 210 break; 211 case I210_I_PHY_ID: 212 phy->type = e1000_phy_i210; 213 phy->ops.check_polarity = igb_check_polarity_m88; 214 phy->ops.get_phy_info = igb_get_phy_info_m88; 215 phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; 216 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; 217 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; 218 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; 219 break; 220 default: 221 ret_val = -E1000_ERR_PHY; 222 goto out; 223 } 224 225 out: 226 return ret_val; 227 } 228 229 /** 230 * igb_init_nvm_params_82575 - Init NVM func ptrs. 231 * @hw: pointer to the HW structure 232 **/ 233 static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) 234 { 235 struct e1000_nvm_info *nvm = &hw->nvm; 236 u32 eecd = rd32(E1000_EECD); 237 u16 size; 238 239 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 240 E1000_EECD_SIZE_EX_SHIFT); 241 242 /* Added to a constant, "size" becomes the left-shift value 243 * for setting word_size. 244 */ 245 size += NVM_WORD_SIZE_BASE_SHIFT; 246 247 /* Just in case size is out of range, cap it to the largest 248 * EEPROM size supported 249 */ 250 if (size > 15) 251 size = 15; 252 253 nvm->word_size = 1 << size; 254 nvm->opcode_bits = 8; 255 nvm->delay_usec = 1; 256 257 switch (nvm->override) { 258 case e1000_nvm_override_spi_large: 259 nvm->page_size = 32; 260 nvm->address_bits = 16; 261 break; 262 case e1000_nvm_override_spi_small: 263 nvm->page_size = 8; 264 nvm->address_bits = 8; 265 break; 266 default: 267 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; 268 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 269 16 : 8; 270 break; 271 } 272 if (nvm->word_size == (1 << 15)) 273 nvm->page_size = 128; 274 275 nvm->type = e1000_nvm_eeprom_spi; 276 277 /* NVM Function Pointers */ 278 nvm->ops.acquire = igb_acquire_nvm_82575; 279 nvm->ops.release = igb_release_nvm_82575; 280 nvm->ops.write = igb_write_nvm_spi; 281 nvm->ops.validate = igb_validate_nvm_checksum; 282 nvm->ops.update = igb_update_nvm_checksum; 283 if (nvm->word_size < (1 << 15)) 284 nvm->ops.read = igb_read_nvm_eerd; 285 else 286 nvm->ops.read = igb_read_nvm_spi; 287 288 /* override generic family function pointers for specific descendants */ 289 switch (hw->mac.type) { 290 case e1000_82580: 291 nvm->ops.validate = igb_validate_nvm_checksum_82580; 292 nvm->ops.update = igb_update_nvm_checksum_82580; 293 break; 294 case e1000_i354: 295 case e1000_i350: 296 nvm->ops.validate = igb_validate_nvm_checksum_i350; 297 nvm->ops.update = igb_update_nvm_checksum_i350; 298 break; 299 default: 300 break; 301 } 302 303 return 0; 304 } 305 306 /** 307 * igb_init_mac_params_82575 - Init MAC func ptrs. 308 * @hw: pointer to the HW structure 309 **/ 310 static s32 igb_init_mac_params_82575(struct e1000_hw *hw) 311 { 312 struct e1000_mac_info *mac = &hw->mac; 313 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 314 315 /* Set mta register count */ 316 mac->mta_reg_count = 128; 317 /* Set rar entry count */ 318 switch (mac->type) { 319 case e1000_82576: 320 mac->rar_entry_count = E1000_RAR_ENTRIES_82576; 321 break; 322 case e1000_82580: 323 mac->rar_entry_count = E1000_RAR_ENTRIES_82580; 324 break; 325 case e1000_i350: 326 case e1000_i354: 327 mac->rar_entry_count = E1000_RAR_ENTRIES_I350; 328 break; 329 default: 330 mac->rar_entry_count = E1000_RAR_ENTRIES_82575; 331 break; 332 } 333 /* reset */ 334 if (mac->type >= e1000_82580) 335 mac->ops.reset_hw = igb_reset_hw_82580; 336 else 337 mac->ops.reset_hw = igb_reset_hw_82575; 338 339 if (mac->type >= e1000_i210) { 340 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210; 341 mac->ops.release_swfw_sync = igb_release_swfw_sync_i210; 342 343 } else { 344 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575; 345 mac->ops.release_swfw_sync = igb_release_swfw_sync_82575; 346 } 347 348 /* Set if part includes ASF firmware */ 349 mac->asf_firmware_present = true; 350 /* Set if manageability features are enabled. */ 351 mac->arc_subsystem_valid = 352 (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) 353 ? true : false; 354 /* enable EEE on i350 parts and later parts */ 355 if (mac->type >= e1000_i350) 356 dev_spec->eee_disable = false; 357 else 358 dev_spec->eee_disable = true; 359 /* Allow a single clear of the SW semaphore on I210 and newer */ 360 if (mac->type >= e1000_i210) 361 dev_spec->clear_semaphore_once = true; 362 /* physical interface link setup */ 363 mac->ops.setup_physical_interface = 364 (hw->phy.media_type == e1000_media_type_copper) 365 ? igb_setup_copper_link_82575 366 : igb_setup_serdes_link_82575; 367 368 return 0; 369 } 370 371 /** 372 * igb_set_sfp_media_type_82575 - derives SFP module media type. 373 * @hw: pointer to the HW structure 374 * 375 * The media type is chosen based on SFP module. 376 * compatibility flags retrieved from SFP ID EEPROM. 377 **/ 378 static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw) 379 { 380 s32 ret_val = E1000_ERR_CONFIG; 381 u32 ctrl_ext = 0; 382 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 383 struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; 384 u8 tranceiver_type = 0; 385 s32 timeout = 3; 386 387 /* Turn I2C interface ON and power on sfp cage */ 388 ctrl_ext = rd32(E1000_CTRL_EXT); 389 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; 390 wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); 391 392 wrfl(); 393 394 /* Read SFP module data */ 395 while (timeout) { 396 ret_val = igb_read_sfp_data_byte(hw, 397 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), 398 &tranceiver_type); 399 if (ret_val == 0) 400 break; 401 msleep(100); 402 timeout--; 403 } 404 if (ret_val != 0) 405 goto out; 406 407 ret_val = igb_read_sfp_data_byte(hw, 408 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), 409 (u8 *)eth_flags); 410 if (ret_val != 0) 411 goto out; 412 413 /* Check if there is some SFP module plugged and powered */ 414 if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || 415 (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { 416 dev_spec->module_plugged = true; 417 if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { 418 hw->phy.media_type = e1000_media_type_internal_serdes; 419 } else if (eth_flags->e100_base_fx) { 420 dev_spec->sgmii_active = true; 421 hw->phy.media_type = e1000_media_type_internal_serdes; 422 } else if (eth_flags->e1000_base_t) { 423 dev_spec->sgmii_active = true; 424 hw->phy.media_type = e1000_media_type_copper; 425 } else { 426 hw->phy.media_type = e1000_media_type_unknown; 427 hw_dbg("PHY module has not been recognized\n"); 428 goto out; 429 } 430 } else { 431 hw->phy.media_type = e1000_media_type_unknown; 432 } 433 ret_val = 0; 434 out: 435 /* Restore I2C interface setting */ 436 wr32(E1000_CTRL_EXT, ctrl_ext); 437 return ret_val; 438 } 439 440 static s32 igb_get_invariants_82575(struct e1000_hw *hw) 441 { 442 struct e1000_mac_info *mac = &hw->mac; 443 struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575; 444 s32 ret_val; 445 u32 ctrl_ext = 0; 446 u32 link_mode = 0; 447 448 switch (hw->device_id) { 449 case E1000_DEV_ID_82575EB_COPPER: 450 case E1000_DEV_ID_82575EB_FIBER_SERDES: 451 case E1000_DEV_ID_82575GB_QUAD_COPPER: 452 mac->type = e1000_82575; 453 break; 454 case E1000_DEV_ID_82576: 455 case E1000_DEV_ID_82576_NS: 456 case E1000_DEV_ID_82576_NS_SERDES: 457 case E1000_DEV_ID_82576_FIBER: 458 case E1000_DEV_ID_82576_SERDES: 459 case E1000_DEV_ID_82576_QUAD_COPPER: 460 case E1000_DEV_ID_82576_QUAD_COPPER_ET2: 461 case E1000_DEV_ID_82576_SERDES_QUAD: 462 mac->type = e1000_82576; 463 break; 464 case E1000_DEV_ID_82580_COPPER: 465 case E1000_DEV_ID_82580_FIBER: 466 case E1000_DEV_ID_82580_QUAD_FIBER: 467 case E1000_DEV_ID_82580_SERDES: 468 case E1000_DEV_ID_82580_SGMII: 469 case E1000_DEV_ID_82580_COPPER_DUAL: 470 case E1000_DEV_ID_DH89XXCC_SGMII: 471 case E1000_DEV_ID_DH89XXCC_SERDES: 472 case E1000_DEV_ID_DH89XXCC_BACKPLANE: 473 case E1000_DEV_ID_DH89XXCC_SFP: 474 mac->type = e1000_82580; 475 break; 476 case E1000_DEV_ID_I350_COPPER: 477 case E1000_DEV_ID_I350_FIBER: 478 case E1000_DEV_ID_I350_SERDES: 479 case E1000_DEV_ID_I350_SGMII: 480 mac->type = e1000_i350; 481 break; 482 case E1000_DEV_ID_I210_COPPER: 483 case E1000_DEV_ID_I210_FIBER: 484 case E1000_DEV_ID_I210_SERDES: 485 case E1000_DEV_ID_I210_SGMII: 486 case E1000_DEV_ID_I210_COPPER_FLASHLESS: 487 case E1000_DEV_ID_I210_SERDES_FLASHLESS: 488 mac->type = e1000_i210; 489 break; 490 case E1000_DEV_ID_I211_COPPER: 491 mac->type = e1000_i211; 492 break; 493 case E1000_DEV_ID_I354_BACKPLANE_1GBPS: 494 case E1000_DEV_ID_I354_SGMII: 495 case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: 496 mac->type = e1000_i354; 497 break; 498 default: 499 return -E1000_ERR_MAC_INIT; 500 break; 501 } 502 503 /* Set media type */ 504 /* The 82575 uses bits 22:23 for link mode. The mode can be changed 505 * based on the EEPROM. We cannot rely upon device ID. There 506 * is no distinguishable difference between fiber and internal 507 * SerDes mode on the 82575. There can be an external PHY attached 508 * on the SGMII interface. For this, we'll set sgmii_active to true. 509 */ 510 hw->phy.media_type = e1000_media_type_copper; 511 dev_spec->sgmii_active = false; 512 dev_spec->module_plugged = false; 513 514 ctrl_ext = rd32(E1000_CTRL_EXT); 515 516 link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; 517 switch (link_mode) { 518 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 519 hw->phy.media_type = e1000_media_type_internal_serdes; 520 break; 521 case E1000_CTRL_EXT_LINK_MODE_SGMII: 522 /* Get phy control interface type set (MDIO vs. I2C)*/ 523 if (igb_sgmii_uses_mdio_82575(hw)) { 524 hw->phy.media_type = e1000_media_type_copper; 525 dev_spec->sgmii_active = true; 526 break; 527 } 528 /* fall through for I2C based SGMII */ 529 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: 530 /* read media type from SFP EEPROM */ 531 ret_val = igb_set_sfp_media_type_82575(hw); 532 if ((ret_val != 0) || 533 (hw->phy.media_type == e1000_media_type_unknown)) { 534 /* If media type was not identified then return media 535 * type defined by the CTRL_EXT settings. 536 */ 537 hw->phy.media_type = e1000_media_type_internal_serdes; 538 539 if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { 540 hw->phy.media_type = e1000_media_type_copper; 541 dev_spec->sgmii_active = true; 542 } 543 544 break; 545 } 546 547 /* do not change link mode for 100BaseFX */ 548 if (dev_spec->eth_flags.e100_base_fx) 549 break; 550 551 /* change current link mode setting */ 552 ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; 553 554 if (hw->phy.media_type == e1000_media_type_copper) 555 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; 556 else 557 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; 558 559 wr32(E1000_CTRL_EXT, ctrl_ext); 560 561 break; 562 default: 563 break; 564 } 565 566 /* mac initialization and operations */ 567 ret_val = igb_init_mac_params_82575(hw); 568 if (ret_val) 569 goto out; 570 571 /* NVM initialization */ 572 ret_val = igb_init_nvm_params_82575(hw); 573 switch (hw->mac.type) { 574 case e1000_i210: 575 case e1000_i211: 576 ret_val = igb_init_nvm_params_i210(hw); 577 break; 578 default: 579 break; 580 } 581 582 if (ret_val) 583 goto out; 584 585 /* if part supports SR-IOV then initialize mailbox parameters */ 586 switch (mac->type) { 587 case e1000_82576: 588 case e1000_i350: 589 igb_init_mbx_params_pf(hw); 590 break; 591 default: 592 break; 593 } 594 595 /* setup PHY parameters */ 596 ret_val = igb_init_phy_params_82575(hw); 597 598 out: 599 return ret_val; 600 } 601 602 /** 603 * igb_acquire_phy_82575 - Acquire rights to access PHY 604 * @hw: pointer to the HW structure 605 * 606 * Acquire access rights to the correct PHY. This is a 607 * function pointer entry point called by the api module. 608 **/ 609 static s32 igb_acquire_phy_82575(struct e1000_hw *hw) 610 { 611 u16 mask = E1000_SWFW_PHY0_SM; 612 613 if (hw->bus.func == E1000_FUNC_1) 614 mask = E1000_SWFW_PHY1_SM; 615 else if (hw->bus.func == E1000_FUNC_2) 616 mask = E1000_SWFW_PHY2_SM; 617 else if (hw->bus.func == E1000_FUNC_3) 618 mask = E1000_SWFW_PHY3_SM; 619 620 return hw->mac.ops.acquire_swfw_sync(hw, mask); 621 } 622 623 /** 624 * igb_release_phy_82575 - Release rights to access PHY 625 * @hw: pointer to the HW structure 626 * 627 * A wrapper to release access rights to the correct PHY. This is a 628 * function pointer entry point called by the api module. 629 **/ 630 static void igb_release_phy_82575(struct e1000_hw *hw) 631 { 632 u16 mask = E1000_SWFW_PHY0_SM; 633 634 if (hw->bus.func == E1000_FUNC_1) 635 mask = E1000_SWFW_PHY1_SM; 636 else if (hw->bus.func == E1000_FUNC_2) 637 mask = E1000_SWFW_PHY2_SM; 638 else if (hw->bus.func == E1000_FUNC_3) 639 mask = E1000_SWFW_PHY3_SM; 640 641 hw->mac.ops.release_swfw_sync(hw, mask); 642 } 643 644 /** 645 * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii 646 * @hw: pointer to the HW structure 647 * @offset: register offset to be read 648 * @data: pointer to the read data 649 * 650 * Reads the PHY register at offset using the serial gigabit media independent 651 * interface and stores the retrieved information in data. 652 **/ 653 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 654 u16 *data) 655 { 656 s32 ret_val = -E1000_ERR_PARAM; 657 658 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 659 hw_dbg("PHY Address %u is out of range\n", offset); 660 goto out; 661 } 662 663 ret_val = hw->phy.ops.acquire(hw); 664 if (ret_val) 665 goto out; 666 667 ret_val = igb_read_phy_reg_i2c(hw, offset, data); 668 669 hw->phy.ops.release(hw); 670 671 out: 672 return ret_val; 673 } 674 675 /** 676 * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii 677 * @hw: pointer to the HW structure 678 * @offset: register offset to write to 679 * @data: data to write at register offset 680 * 681 * Writes the data to PHY register at the offset using the serial gigabit 682 * media independent interface. 683 **/ 684 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 685 u16 data) 686 { 687 s32 ret_val = -E1000_ERR_PARAM; 688 689 690 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 691 hw_dbg("PHY Address %d is out of range\n", offset); 692 goto out; 693 } 694 695 ret_val = hw->phy.ops.acquire(hw); 696 if (ret_val) 697 goto out; 698 699 ret_val = igb_write_phy_reg_i2c(hw, offset, data); 700 701 hw->phy.ops.release(hw); 702 703 out: 704 return ret_val; 705 } 706 707 /** 708 * igb_get_phy_id_82575 - Retrieve PHY addr and id 709 * @hw: pointer to the HW structure 710 * 711 * Retrieves the PHY address and ID for both PHY's which do and do not use 712 * sgmi interface. 713 **/ 714 static s32 igb_get_phy_id_82575(struct e1000_hw *hw) 715 { 716 struct e1000_phy_info *phy = &hw->phy; 717 s32 ret_val = 0; 718 u16 phy_id; 719 u32 ctrl_ext; 720 u32 mdic; 721 722 /* Extra read required for some PHY's on i354 */ 723 if (hw->mac.type == e1000_i354) 724 igb_get_phy_id(hw); 725 726 /* For SGMII PHYs, we try the list of possible addresses until 727 * we find one that works. For non-SGMII PHYs 728 * (e.g. integrated copper PHYs), an address of 1 should 729 * work. The result of this function should mean phy->phy_addr 730 * and phy->id are set correctly. 731 */ 732 if (!(igb_sgmii_active_82575(hw))) { 733 phy->addr = 1; 734 ret_val = igb_get_phy_id(hw); 735 goto out; 736 } 737 738 if (igb_sgmii_uses_mdio_82575(hw)) { 739 switch (hw->mac.type) { 740 case e1000_82575: 741 case e1000_82576: 742 mdic = rd32(E1000_MDIC); 743 mdic &= E1000_MDIC_PHY_MASK; 744 phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; 745 break; 746 case e1000_82580: 747 case e1000_i350: 748 case e1000_i354: 749 case e1000_i210: 750 case e1000_i211: 751 mdic = rd32(E1000_MDICNFG); 752 mdic &= E1000_MDICNFG_PHY_MASK; 753 phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; 754 break; 755 default: 756 ret_val = -E1000_ERR_PHY; 757 goto out; 758 break; 759 } 760 ret_val = igb_get_phy_id(hw); 761 goto out; 762 } 763 764 /* Power on sgmii phy if it is disabled */ 765 ctrl_ext = rd32(E1000_CTRL_EXT); 766 wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); 767 wrfl(); 768 msleep(300); 769 770 /* The address field in the I2CCMD register is 3 bits and 0 is invalid. 771 * Therefore, we need to test 1-7 772 */ 773 for (phy->addr = 1; phy->addr < 8; phy->addr++) { 774 ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); 775 if (ret_val == 0) { 776 hw_dbg("Vendor ID 0x%08X read at address %u\n", 777 phy_id, phy->addr); 778 /* At the time of this writing, The M88 part is 779 * the only supported SGMII PHY product. 780 */ 781 if (phy_id == M88_VENDOR) 782 break; 783 } else { 784 hw_dbg("PHY address %u was unreadable\n", phy->addr); 785 } 786 } 787 788 /* A valid PHY type couldn't be found. */ 789 if (phy->addr == 8) { 790 phy->addr = 0; 791 ret_val = -E1000_ERR_PHY; 792 goto out; 793 } else { 794 ret_val = igb_get_phy_id(hw); 795 } 796 797 /* restore previous sfp cage power state */ 798 wr32(E1000_CTRL_EXT, ctrl_ext); 799 800 out: 801 return ret_val; 802 } 803 804 /** 805 * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset 806 * @hw: pointer to the HW structure 807 * 808 * Resets the PHY using the serial gigabit media independent interface. 809 **/ 810 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) 811 { 812 s32 ret_val; 813 814 /* This isn't a true "hard" reset, but is the only reset 815 * available to us at this time. 816 */ 817 818 hw_dbg("Soft resetting SGMII attached PHY...\n"); 819 820 /* SFP documentation requires the following to configure the SPF module 821 * to work on SGMII. No further documentation is given. 822 */ 823 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); 824 if (ret_val) 825 goto out; 826 827 ret_val = igb_phy_sw_reset(hw); 828 829 out: 830 return ret_val; 831 } 832 833 /** 834 * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state 835 * @hw: pointer to the HW structure 836 * @active: true to enable LPLU, false to disable 837 * 838 * Sets the LPLU D0 state according to the active flag. When 839 * activating LPLU this function also disables smart speed 840 * and vice versa. LPLU will not be activated unless the 841 * device autonegotiation advertisement meets standards of 842 * either 10 or 10/100 or 10/100/1000 at all duplexes. 843 * This is a function pointer entry point only called by 844 * PHY setup routines. 845 **/ 846 static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) 847 { 848 struct e1000_phy_info *phy = &hw->phy; 849 s32 ret_val; 850 u16 data; 851 852 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); 853 if (ret_val) 854 goto out; 855 856 if (active) { 857 data |= IGP02E1000_PM_D0_LPLU; 858 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 859 data); 860 if (ret_val) 861 goto out; 862 863 /* When LPLU is enabled, we should disable SmartSpeed */ 864 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 865 &data); 866 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 867 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 868 data); 869 if (ret_val) 870 goto out; 871 } else { 872 data &= ~IGP02E1000_PM_D0_LPLU; 873 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 874 data); 875 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 876 * during Dx states where the power conservation is most 877 * important. During driver activity we should enable 878 * SmartSpeed, so performance is maintained. 879 */ 880 if (phy->smart_speed == e1000_smart_speed_on) { 881 ret_val = phy->ops.read_reg(hw, 882 IGP01E1000_PHY_PORT_CONFIG, &data); 883 if (ret_val) 884 goto out; 885 886 data |= IGP01E1000_PSCFR_SMART_SPEED; 887 ret_val = phy->ops.write_reg(hw, 888 IGP01E1000_PHY_PORT_CONFIG, data); 889 if (ret_val) 890 goto out; 891 } else if (phy->smart_speed == e1000_smart_speed_off) { 892 ret_val = phy->ops.read_reg(hw, 893 IGP01E1000_PHY_PORT_CONFIG, &data); 894 if (ret_val) 895 goto out; 896 897 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 898 ret_val = phy->ops.write_reg(hw, 899 IGP01E1000_PHY_PORT_CONFIG, data); 900 if (ret_val) 901 goto out; 902 } 903 } 904 905 out: 906 return ret_val; 907 } 908 909 /** 910 * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state 911 * @hw: pointer to the HW structure 912 * @active: true to enable LPLU, false to disable 913 * 914 * Sets the LPLU D0 state according to the active flag. When 915 * activating LPLU this function also disables smart speed 916 * and vice versa. LPLU will not be activated unless the 917 * device autonegotiation advertisement meets standards of 918 * either 10 or 10/100 or 10/100/1000 at all duplexes. 919 * This is a function pointer entry point only called by 920 * PHY setup routines. 921 **/ 922 static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) 923 { 924 struct e1000_phy_info *phy = &hw->phy; 925 s32 ret_val = 0; 926 u16 data; 927 928 data = rd32(E1000_82580_PHY_POWER_MGMT); 929 930 if (active) { 931 data |= E1000_82580_PM_D0_LPLU; 932 933 /* When LPLU is enabled, we should disable SmartSpeed */ 934 data &= ~E1000_82580_PM_SPD; 935 } else { 936 data &= ~E1000_82580_PM_D0_LPLU; 937 938 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 939 * during Dx states where the power conservation is most 940 * important. During driver activity we should enable 941 * SmartSpeed, so performance is maintained. 942 */ 943 if (phy->smart_speed == e1000_smart_speed_on) 944 data |= E1000_82580_PM_SPD; 945 else if (phy->smart_speed == e1000_smart_speed_off) 946 data &= ~E1000_82580_PM_SPD; } 947 948 wr32(E1000_82580_PHY_POWER_MGMT, data); 949 return ret_val; 950 } 951 952 /** 953 * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3 954 * @hw: pointer to the HW structure 955 * @active: boolean used to enable/disable lplu 956 * 957 * Success returns 0, Failure returns 1 958 * 959 * The low power link up (lplu) state is set to the power management level D3 960 * and SmartSpeed is disabled when active is true, else clear lplu for D3 961 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU 962 * is used during Dx states where the power conservation is most important. 963 * During driver activity, SmartSpeed should be enabled so performance is 964 * maintained. 965 **/ 966 static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) 967 { 968 struct e1000_phy_info *phy = &hw->phy; 969 s32 ret_val = 0; 970 u16 data; 971 972 data = rd32(E1000_82580_PHY_POWER_MGMT); 973 974 if (!active) { 975 data &= ~E1000_82580_PM_D3_LPLU; 976 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 977 * during Dx states where the power conservation is most 978 * important. During driver activity we should enable 979 * SmartSpeed, so performance is maintained. 980 */ 981 if (phy->smart_speed == e1000_smart_speed_on) 982 data |= E1000_82580_PM_SPD; 983 else if (phy->smart_speed == e1000_smart_speed_off) 984 data &= ~E1000_82580_PM_SPD; 985 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || 986 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || 987 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { 988 data |= E1000_82580_PM_D3_LPLU; 989 /* When LPLU is enabled, we should disable SmartSpeed */ 990 data &= ~E1000_82580_PM_SPD; 991 } 992 993 wr32(E1000_82580_PHY_POWER_MGMT, data); 994 return ret_val; 995 } 996 997 /** 998 * igb_acquire_nvm_82575 - Request for access to EEPROM 999 * @hw: pointer to the HW structure 1000 * 1001 * Acquire the necessary semaphores for exclusive access to the EEPROM. 1002 * Set the EEPROM access request bit and wait for EEPROM access grant bit. 1003 * Return successful if access grant bit set, else clear the request for 1004 * EEPROM access and return -E1000_ERR_NVM (-1). 1005 **/ 1006 static s32 igb_acquire_nvm_82575(struct e1000_hw *hw) 1007 { 1008 s32 ret_val; 1009 1010 ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM); 1011 if (ret_val) 1012 goto out; 1013 1014 ret_val = igb_acquire_nvm(hw); 1015 1016 if (ret_val) 1017 hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); 1018 1019 out: 1020 return ret_val; 1021 } 1022 1023 /** 1024 * igb_release_nvm_82575 - Release exclusive access to EEPROM 1025 * @hw: pointer to the HW structure 1026 * 1027 * Stop any current commands to the EEPROM and clear the EEPROM request bit, 1028 * then release the semaphores acquired. 1029 **/ 1030 static void igb_release_nvm_82575(struct e1000_hw *hw) 1031 { 1032 igb_release_nvm(hw); 1033 hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); 1034 } 1035 1036 /** 1037 * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore 1038 * @hw: pointer to the HW structure 1039 * @mask: specifies which semaphore to acquire 1040 * 1041 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask 1042 * will also specify which port we're acquiring the lock for. 1043 **/ 1044 static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) 1045 { 1046 u32 swfw_sync; 1047 u32 swmask = mask; 1048 u32 fwmask = mask << 16; 1049 s32 ret_val = 0; 1050 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ 1051 1052 while (i < timeout) { 1053 if (igb_get_hw_semaphore(hw)) { 1054 ret_val = -E1000_ERR_SWFW_SYNC; 1055 goto out; 1056 } 1057 1058 swfw_sync = rd32(E1000_SW_FW_SYNC); 1059 if (!(swfw_sync & (fwmask | swmask))) 1060 break; 1061 1062 /* Firmware currently using resource (fwmask) 1063 * or other software thread using resource (swmask) 1064 */ 1065 igb_put_hw_semaphore(hw); 1066 mdelay(5); 1067 i++; 1068 } 1069 1070 if (i == timeout) { 1071 hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); 1072 ret_val = -E1000_ERR_SWFW_SYNC; 1073 goto out; 1074 } 1075 1076 swfw_sync |= swmask; 1077 wr32(E1000_SW_FW_SYNC, swfw_sync); 1078 1079 igb_put_hw_semaphore(hw); 1080 1081 out: 1082 return ret_val; 1083 } 1084 1085 /** 1086 * igb_release_swfw_sync_82575 - Release SW/FW semaphore 1087 * @hw: pointer to the HW structure 1088 * @mask: specifies which semaphore to acquire 1089 * 1090 * Release the SW/FW semaphore used to access the PHY or NVM. The mask 1091 * will also specify which port we're releasing the lock for. 1092 **/ 1093 static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) 1094 { 1095 u32 swfw_sync; 1096 1097 while (igb_get_hw_semaphore(hw) != 0); 1098 /* Empty */ 1099 1100 swfw_sync = rd32(E1000_SW_FW_SYNC); 1101 swfw_sync &= ~mask; 1102 wr32(E1000_SW_FW_SYNC, swfw_sync); 1103 1104 igb_put_hw_semaphore(hw); 1105 } 1106 1107 /** 1108 * igb_get_cfg_done_82575 - Read config done bit 1109 * @hw: pointer to the HW structure 1110 * 1111 * Read the management control register for the config done bit for 1112 * completion status. NOTE: silicon which is EEPROM-less will fail trying 1113 * to read the config done bit, so an error is *ONLY* logged and returns 1114 * 0. If we were to return with error, EEPROM-less silicon 1115 * would not be able to be reset or change link. 1116 **/ 1117 static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) 1118 { 1119 s32 timeout = PHY_CFG_TIMEOUT; 1120 s32 ret_val = 0; 1121 u32 mask = E1000_NVM_CFG_DONE_PORT_0; 1122 1123 if (hw->bus.func == 1) 1124 mask = E1000_NVM_CFG_DONE_PORT_1; 1125 else if (hw->bus.func == E1000_FUNC_2) 1126 mask = E1000_NVM_CFG_DONE_PORT_2; 1127 else if (hw->bus.func == E1000_FUNC_3) 1128 mask = E1000_NVM_CFG_DONE_PORT_3; 1129 1130 while (timeout) { 1131 if (rd32(E1000_EEMNGCTL) & mask) 1132 break; 1133 msleep(1); 1134 timeout--; 1135 } 1136 if (!timeout) 1137 hw_dbg("MNG configuration cycle has not completed.\n"); 1138 1139 /* If EEPROM is not marked present, init the PHY manually */ 1140 if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) && 1141 (hw->phy.type == e1000_phy_igp_3)) 1142 igb_phy_init_script_igp3(hw); 1143 1144 return ret_val; 1145 } 1146 1147 /** 1148 * igb_get_link_up_info_82575 - Get link speed/duplex info 1149 * @hw: pointer to the HW structure 1150 * @speed: stores the current speed 1151 * @duplex: stores the current duplex 1152 * 1153 * This is a wrapper function, if using the serial gigabit media independent 1154 * interface, use PCS to retrieve the link speed and duplex information. 1155 * Otherwise, use the generic function to get the link speed and duplex info. 1156 **/ 1157 static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, 1158 u16 *duplex) 1159 { 1160 s32 ret_val; 1161 1162 if (hw->phy.media_type != e1000_media_type_copper) 1163 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed, 1164 duplex); 1165 else 1166 ret_val = igb_get_speed_and_duplex_copper(hw, speed, 1167 duplex); 1168 1169 return ret_val; 1170 } 1171 1172 /** 1173 * igb_check_for_link_82575 - Check for link 1174 * @hw: pointer to the HW structure 1175 * 1176 * If sgmii is enabled, then use the pcs register to determine link, otherwise 1177 * use the generic interface for determining link. 1178 **/ 1179 static s32 igb_check_for_link_82575(struct e1000_hw *hw) 1180 { 1181 s32 ret_val; 1182 u16 speed, duplex; 1183 1184 if (hw->phy.media_type != e1000_media_type_copper) { 1185 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, 1186 &duplex); 1187 /* Use this flag to determine if link needs to be checked or 1188 * not. If we have link clear the flag so that we do not 1189 * continue to check for link. 1190 */ 1191 hw->mac.get_link_status = !hw->mac.serdes_has_link; 1192 1193 /* Configure Flow Control now that Auto-Neg has completed. 1194 * First, we need to restore the desired flow control 1195 * settings because we may have had to re-autoneg with a 1196 * different link partner. 1197 */ 1198 ret_val = igb_config_fc_after_link_up(hw); 1199 if (ret_val) 1200 hw_dbg("Error configuring flow control\n"); 1201 } else { 1202 ret_val = igb_check_for_copper_link(hw); 1203 } 1204 1205 return ret_val; 1206 } 1207 1208 /** 1209 * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown 1210 * @hw: pointer to the HW structure 1211 **/ 1212 void igb_power_up_serdes_link_82575(struct e1000_hw *hw) 1213 { 1214 u32 reg; 1215 1216 1217 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1218 !igb_sgmii_active_82575(hw)) 1219 return; 1220 1221 /* Enable PCS to turn on link */ 1222 reg = rd32(E1000_PCS_CFG0); 1223 reg |= E1000_PCS_CFG_PCS_EN; 1224 wr32(E1000_PCS_CFG0, reg); 1225 1226 /* Power up the laser */ 1227 reg = rd32(E1000_CTRL_EXT); 1228 reg &= ~E1000_CTRL_EXT_SDP3_DATA; 1229 wr32(E1000_CTRL_EXT, reg); 1230 1231 /* flush the write to verify completion */ 1232 wrfl(); 1233 msleep(1); 1234 } 1235 1236 /** 1237 * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex 1238 * @hw: pointer to the HW structure 1239 * @speed: stores the current speed 1240 * @duplex: stores the current duplex 1241 * 1242 * Using the physical coding sub-layer (PCS), retrieve the current speed and 1243 * duplex, then store the values in the pointers provided. 1244 **/ 1245 static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, 1246 u16 *duplex) 1247 { 1248 struct e1000_mac_info *mac = &hw->mac; 1249 u32 pcs, status; 1250 1251 /* Set up defaults for the return values of this function */ 1252 mac->serdes_has_link = false; 1253 *speed = 0; 1254 *duplex = 0; 1255 1256 /* Read the PCS Status register for link state. For non-copper mode, 1257 * the status register is not accurate. The PCS status register is 1258 * used instead. 1259 */ 1260 pcs = rd32(E1000_PCS_LSTAT); 1261 1262 /* The link up bit determines when link is up on autoneg. The sync ok 1263 * gets set once both sides sync up and agree upon link. Stable link 1264 * can be determined by checking for both link up and link sync ok 1265 */ 1266 if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { 1267 mac->serdes_has_link = true; 1268 1269 /* Detect and store PCS speed */ 1270 if (pcs & E1000_PCS_LSTS_SPEED_1000) 1271 *speed = SPEED_1000; 1272 else if (pcs & E1000_PCS_LSTS_SPEED_100) 1273 *speed = SPEED_100; 1274 else 1275 *speed = SPEED_10; 1276 1277 /* Detect and store PCS duplex */ 1278 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) 1279 *duplex = FULL_DUPLEX; 1280 else 1281 *duplex = HALF_DUPLEX; 1282 1283 /* Check if it is an I354 2.5Gb backplane connection. */ 1284 if (mac->type == e1000_i354) { 1285 status = rd32(E1000_STATUS); 1286 if ((status & E1000_STATUS_2P5_SKU) && 1287 !(status & E1000_STATUS_2P5_SKU_OVER)) { 1288 *speed = SPEED_2500; 1289 *duplex = FULL_DUPLEX; 1290 hw_dbg("2500 Mbs, "); 1291 hw_dbg("Full Duplex\n"); 1292 } 1293 } 1294 1295 } 1296 1297 return 0; 1298 } 1299 1300 /** 1301 * igb_shutdown_serdes_link_82575 - Remove link during power down 1302 * @hw: pointer to the HW structure 1303 * 1304 * In the case of fiber serdes, shut down optics and PCS on driver unload 1305 * when management pass thru is not enabled. 1306 **/ 1307 void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) 1308 { 1309 u32 reg; 1310 1311 if (hw->phy.media_type != e1000_media_type_internal_serdes && 1312 igb_sgmii_active_82575(hw)) 1313 return; 1314 1315 if (!igb_enable_mng_pass_thru(hw)) { 1316 /* Disable PCS to turn off link */ 1317 reg = rd32(E1000_PCS_CFG0); 1318 reg &= ~E1000_PCS_CFG_PCS_EN; 1319 wr32(E1000_PCS_CFG0, reg); 1320 1321 /* shutdown the laser */ 1322 reg = rd32(E1000_CTRL_EXT); 1323 reg |= E1000_CTRL_EXT_SDP3_DATA; 1324 wr32(E1000_CTRL_EXT, reg); 1325 1326 /* flush the write to verify completion */ 1327 wrfl(); 1328 msleep(1); 1329 } 1330 } 1331 1332 /** 1333 * igb_reset_hw_82575 - Reset hardware 1334 * @hw: pointer to the HW structure 1335 * 1336 * This resets the hardware into a known state. This is a 1337 * function pointer entry point called by the api module. 1338 **/ 1339 static s32 igb_reset_hw_82575(struct e1000_hw *hw) 1340 { 1341 u32 ctrl; 1342 s32 ret_val; 1343 1344 /* Prevent the PCI-E bus from sticking if there is no TLP connection 1345 * on the last TLP read/write transaction when MAC is reset. 1346 */ 1347 ret_val = igb_disable_pcie_master(hw); 1348 if (ret_val) 1349 hw_dbg("PCI-E Master disable polling has failed.\n"); 1350 1351 /* set the completion timeout for interface */ 1352 ret_val = igb_set_pcie_completion_timeout(hw); 1353 if (ret_val) { 1354 hw_dbg("PCI-E Set completion timeout has failed.\n"); 1355 } 1356 1357 hw_dbg("Masking off all interrupts\n"); 1358 wr32(E1000_IMC, 0xffffffff); 1359 1360 wr32(E1000_RCTL, 0); 1361 wr32(E1000_TCTL, E1000_TCTL_PSP); 1362 wrfl(); 1363 1364 msleep(10); 1365 1366 ctrl = rd32(E1000_CTRL); 1367 1368 hw_dbg("Issuing a global reset to MAC\n"); 1369 wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); 1370 1371 ret_val = igb_get_auto_rd_done(hw); 1372 if (ret_val) { 1373 /* When auto config read does not complete, do not 1374 * return with an error. This can happen in situations 1375 * where there is no eeprom and prevents getting link. 1376 */ 1377 hw_dbg("Auto Read Done did not complete\n"); 1378 } 1379 1380 /* If EEPROM is not present, run manual init scripts */ 1381 if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) 1382 igb_reset_init_script_82575(hw); 1383 1384 /* Clear any pending interrupt events. */ 1385 wr32(E1000_IMC, 0xffffffff); 1386 rd32(E1000_ICR); 1387 1388 /* Install any alternate MAC address into RAR0 */ 1389 ret_val = igb_check_alt_mac_addr(hw); 1390 1391 return ret_val; 1392 } 1393 1394 /** 1395 * igb_init_hw_82575 - Initialize hardware 1396 * @hw: pointer to the HW structure 1397 * 1398 * This inits the hardware readying it for operation. 1399 **/ 1400 static s32 igb_init_hw_82575(struct e1000_hw *hw) 1401 { 1402 struct e1000_mac_info *mac = &hw->mac; 1403 s32 ret_val; 1404 u16 i, rar_count = mac->rar_entry_count; 1405 1406 /* Initialize identification LED */ 1407 ret_val = igb_id_led_init(hw); 1408 if (ret_val) { 1409 hw_dbg("Error initializing identification LED\n"); 1410 /* This is not fatal and we should not stop init due to this */ 1411 } 1412 1413 /* Disabling VLAN filtering */ 1414 hw_dbg("Initializing the IEEE VLAN\n"); 1415 if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) 1416 igb_clear_vfta_i350(hw); 1417 else 1418 igb_clear_vfta(hw); 1419 1420 /* Setup the receive address */ 1421 igb_init_rx_addrs(hw, rar_count); 1422 1423 /* Zero out the Multicast HASH table */ 1424 hw_dbg("Zeroing the MTA\n"); 1425 for (i = 0; i < mac->mta_reg_count; i++) 1426 array_wr32(E1000_MTA, i, 0); 1427 1428 /* Zero out the Unicast HASH table */ 1429 hw_dbg("Zeroing the UTA\n"); 1430 for (i = 0; i < mac->uta_reg_count; i++) 1431 array_wr32(E1000_UTA, i, 0); 1432 1433 /* Setup link and flow control */ 1434 ret_val = igb_setup_link(hw); 1435 1436 /* Clear all of the statistics registers (clear on read). It is 1437 * important that we do this after we have tried to establish link 1438 * because the symbol error count will increment wildly if there 1439 * is no link. 1440 */ 1441 igb_clear_hw_cntrs_82575(hw); 1442 return ret_val; 1443 } 1444 1445 /** 1446 * igb_setup_copper_link_82575 - Configure copper link settings 1447 * @hw: pointer to the HW structure 1448 * 1449 * Configures the link for auto-neg or forced speed and duplex. Then we check 1450 * for link, once link is established calls to configure collision distance 1451 * and flow control are called. 1452 **/ 1453 static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) 1454 { 1455 u32 ctrl; 1456 s32 ret_val; 1457 u32 phpm_reg; 1458 1459 ctrl = rd32(E1000_CTRL); 1460 ctrl |= E1000_CTRL_SLU; 1461 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1462 wr32(E1000_CTRL, ctrl); 1463 1464 /* Clear Go Link Disconnect bit on supported devices */ 1465 switch (hw->mac.type) { 1466 case e1000_82580: 1467 case e1000_i350: 1468 case e1000_i210: 1469 case e1000_i211: 1470 phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT); 1471 phpm_reg &= ~E1000_82580_PM_GO_LINKD; 1472 wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg); 1473 break; 1474 default: 1475 break; 1476 } 1477 1478 ret_val = igb_setup_serdes_link_82575(hw); 1479 if (ret_val) 1480 goto out; 1481 1482 if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { 1483 /* allow time for SFP cage time to power up phy */ 1484 msleep(300); 1485 1486 ret_val = hw->phy.ops.reset(hw); 1487 if (ret_val) { 1488 hw_dbg("Error resetting the PHY.\n"); 1489 goto out; 1490 } 1491 } 1492 switch (hw->phy.type) { 1493 case e1000_phy_i210: 1494 case e1000_phy_m88: 1495 switch (hw->phy.id) { 1496 case I347AT4_E_PHY_ID: 1497 case M88E1112_E_PHY_ID: 1498 case M88E1543_E_PHY_ID: 1499 case I210_I_PHY_ID: 1500 ret_val = igb_copper_link_setup_m88_gen2(hw); 1501 break; 1502 default: 1503 ret_val = igb_copper_link_setup_m88(hw); 1504 break; 1505 } 1506 break; 1507 case e1000_phy_igp_3: 1508 ret_val = igb_copper_link_setup_igp(hw); 1509 break; 1510 case e1000_phy_82580: 1511 ret_val = igb_copper_link_setup_82580(hw); 1512 break; 1513 default: 1514 ret_val = -E1000_ERR_PHY; 1515 break; 1516 } 1517 1518 if (ret_val) 1519 goto out; 1520 1521 ret_val = igb_setup_copper_link(hw); 1522 out: 1523 return ret_val; 1524 } 1525 1526 /** 1527 * igb_setup_serdes_link_82575 - Setup link for serdes 1528 * @hw: pointer to the HW structure 1529 * 1530 * Configure the physical coding sub-layer (PCS) link. The PCS link is 1531 * used on copper connections where the serialized gigabit media independent 1532 * interface (sgmii), or serdes fiber is being used. Configures the link 1533 * for auto-negotiation or forces speed/duplex. 1534 **/ 1535 static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) 1536 { 1537 u32 ctrl_ext, ctrl_reg, reg, anadv_reg; 1538 bool pcs_autoneg; 1539 s32 ret_val = E1000_SUCCESS; 1540 u16 data; 1541 1542 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1543 !igb_sgmii_active_82575(hw)) 1544 return ret_val; 1545 1546 1547 /* On the 82575, SerDes loopback mode persists until it is 1548 * explicitly turned off or a power cycle is performed. A read to 1549 * the register does not indicate its status. Therefore, we ensure 1550 * loopback mode is disabled during initialization. 1551 */ 1552 wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); 1553 1554 /* power on the sfp cage if present and turn on I2C */ 1555 ctrl_ext = rd32(E1000_CTRL_EXT); 1556 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; 1557 ctrl_ext |= E1000_CTRL_I2C_ENA; 1558 wr32(E1000_CTRL_EXT, ctrl_ext); 1559 1560 ctrl_reg = rd32(E1000_CTRL); 1561 ctrl_reg |= E1000_CTRL_SLU; 1562 1563 if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { 1564 /* set both sw defined pins */ 1565 ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; 1566 1567 /* Set switch control to serdes energy detect */ 1568 reg = rd32(E1000_CONNSW); 1569 reg |= E1000_CONNSW_ENRGSRC; 1570 wr32(E1000_CONNSW, reg); 1571 } 1572 1573 reg = rd32(E1000_PCS_LCTL); 1574 1575 /* default pcs_autoneg to the same setting as mac autoneg */ 1576 pcs_autoneg = hw->mac.autoneg; 1577 1578 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { 1579 case E1000_CTRL_EXT_LINK_MODE_SGMII: 1580 /* sgmii mode lets the phy handle forcing speed/duplex */ 1581 pcs_autoneg = true; 1582 /* autoneg time out should be disabled for SGMII mode */ 1583 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); 1584 break; 1585 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 1586 /* disable PCS autoneg and support parallel detect only */ 1587 pcs_autoneg = false; 1588 default: 1589 if (hw->mac.type == e1000_82575 || 1590 hw->mac.type == e1000_82576) { 1591 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); 1592 if (ret_val) { 1593 printk(KERN_DEBUG "NVM Read Error\n\n"); 1594 return ret_val; 1595 } 1596 1597 if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) 1598 pcs_autoneg = false; 1599 } 1600 1601 /* non-SGMII modes only supports a speed of 1000/Full for the 1602 * link so it is best to just force the MAC and let the pcs 1603 * link either autoneg or be forced to 1000/Full 1604 */ 1605 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | 1606 E1000_CTRL_FD | E1000_CTRL_FRCDPX; 1607 1608 /* set speed of 1000/Full if speed/duplex is forced */ 1609 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; 1610 break; 1611 } 1612 1613 wr32(E1000_CTRL, ctrl_reg); 1614 1615 /* New SerDes mode allows for forcing speed or autonegotiating speed 1616 * at 1gb. Autoneg should be default set by most drivers. This is the 1617 * mode that will be compatible with older link partners and switches. 1618 * However, both are supported by the hardware and some drivers/tools. 1619 */ 1620 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | 1621 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); 1622 1623 if (pcs_autoneg) { 1624 /* Set PCS register for autoneg */ 1625 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ 1626 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ 1627 1628 /* Disable force flow control for autoneg */ 1629 reg &= ~E1000_PCS_LCTL_FORCE_FCTRL; 1630 1631 /* Configure flow control advertisement for autoneg */ 1632 anadv_reg = rd32(E1000_PCS_ANADV); 1633 anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE); 1634 switch (hw->fc.requested_mode) { 1635 case e1000_fc_full: 1636 case e1000_fc_rx_pause: 1637 anadv_reg |= E1000_TXCW_ASM_DIR; 1638 anadv_reg |= E1000_TXCW_PAUSE; 1639 break; 1640 case e1000_fc_tx_pause: 1641 anadv_reg |= E1000_TXCW_ASM_DIR; 1642 break; 1643 default: 1644 break; 1645 } 1646 wr32(E1000_PCS_ANADV, anadv_reg); 1647 1648 hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); 1649 } else { 1650 /* Set PCS register for forced link */ 1651 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ 1652 1653 /* Force flow control for forced link */ 1654 reg |= E1000_PCS_LCTL_FORCE_FCTRL; 1655 1656 hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); 1657 } 1658 1659 wr32(E1000_PCS_LCTL, reg); 1660 1661 if (!pcs_autoneg && !igb_sgmii_active_82575(hw)) 1662 igb_force_mac_fc(hw); 1663 1664 return ret_val; 1665 } 1666 1667 /** 1668 * igb_sgmii_active_82575 - Return sgmii state 1669 * @hw: pointer to the HW structure 1670 * 1671 * 82575 silicon has a serialized gigabit media independent interface (sgmii) 1672 * which can be enabled for use in the embedded applications. Simply 1673 * return the current state of the sgmii interface. 1674 **/ 1675 static bool igb_sgmii_active_82575(struct e1000_hw *hw) 1676 { 1677 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 1678 return dev_spec->sgmii_active; 1679 } 1680 1681 /** 1682 * igb_reset_init_script_82575 - Inits HW defaults after reset 1683 * @hw: pointer to the HW structure 1684 * 1685 * Inits recommended HW defaults after a reset when there is no EEPROM 1686 * detected. This is only for the 82575. 1687 **/ 1688 static s32 igb_reset_init_script_82575(struct e1000_hw *hw) 1689 { 1690 if (hw->mac.type == e1000_82575) { 1691 hw_dbg("Running reset init script for 82575\n"); 1692 /* SerDes configuration via SERDESCTRL */ 1693 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); 1694 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); 1695 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23); 1696 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15); 1697 1698 /* CCM configuration via CCMCTL register */ 1699 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00); 1700 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00); 1701 1702 /* PCIe lanes configuration */ 1703 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC); 1704 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF); 1705 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05); 1706 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81); 1707 1708 /* PCIe PLL Configuration */ 1709 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47); 1710 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00); 1711 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00); 1712 } 1713 1714 return 0; 1715 } 1716 1717 /** 1718 * igb_read_mac_addr_82575 - Read device MAC address 1719 * @hw: pointer to the HW structure 1720 **/ 1721 static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) 1722 { 1723 s32 ret_val = 0; 1724 1725 /* If there's an alternate MAC address place it in RAR0 1726 * so that it will override the Si installed default perm 1727 * address. 1728 */ 1729 ret_val = igb_check_alt_mac_addr(hw); 1730 if (ret_val) 1731 goto out; 1732 1733 ret_val = igb_read_mac_addr(hw); 1734 1735 out: 1736 return ret_val; 1737 } 1738 1739 /** 1740 * igb_power_down_phy_copper_82575 - Remove link during PHY power down 1741 * @hw: pointer to the HW structure 1742 * 1743 * In the case of a PHY power down to save power, or to turn off link during a 1744 * driver unload, or wake on lan is not enabled, remove the link. 1745 **/ 1746 void igb_power_down_phy_copper_82575(struct e1000_hw *hw) 1747 { 1748 /* If the management interface is not enabled, then power down */ 1749 if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw))) 1750 igb_power_down_phy_copper(hw); 1751 } 1752 1753 /** 1754 * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters 1755 * @hw: pointer to the HW structure 1756 * 1757 * Clears the hardware counters by reading the counter registers. 1758 **/ 1759 static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) 1760 { 1761 igb_clear_hw_cntrs_base(hw); 1762 1763 rd32(E1000_PRC64); 1764 rd32(E1000_PRC127); 1765 rd32(E1000_PRC255); 1766 rd32(E1000_PRC511); 1767 rd32(E1000_PRC1023); 1768 rd32(E1000_PRC1522); 1769 rd32(E1000_PTC64); 1770 rd32(E1000_PTC127); 1771 rd32(E1000_PTC255); 1772 rd32(E1000_PTC511); 1773 rd32(E1000_PTC1023); 1774 rd32(E1000_PTC1522); 1775 1776 rd32(E1000_ALGNERRC); 1777 rd32(E1000_RXERRC); 1778 rd32(E1000_TNCRS); 1779 rd32(E1000_CEXTERR); 1780 rd32(E1000_TSCTC); 1781 rd32(E1000_TSCTFC); 1782 1783 rd32(E1000_MGTPRC); 1784 rd32(E1000_MGTPDC); 1785 rd32(E1000_MGTPTC); 1786 1787 rd32(E1000_IAC); 1788 rd32(E1000_ICRXOC); 1789 1790 rd32(E1000_ICRXPTC); 1791 rd32(E1000_ICRXATC); 1792 rd32(E1000_ICTXPTC); 1793 rd32(E1000_ICTXATC); 1794 rd32(E1000_ICTXQEC); 1795 rd32(E1000_ICTXQMTC); 1796 rd32(E1000_ICRXDMTC); 1797 1798 rd32(E1000_CBTMPC); 1799 rd32(E1000_HTDPMC); 1800 rd32(E1000_CBRMPC); 1801 rd32(E1000_RPTHC); 1802 rd32(E1000_HGPTC); 1803 rd32(E1000_HTCBDPC); 1804 rd32(E1000_HGORCL); 1805 rd32(E1000_HGORCH); 1806 rd32(E1000_HGOTCL); 1807 rd32(E1000_HGOTCH); 1808 rd32(E1000_LENERRS); 1809 1810 /* This register should not be read in copper configurations */ 1811 if (hw->phy.media_type == e1000_media_type_internal_serdes || 1812 igb_sgmii_active_82575(hw)) 1813 rd32(E1000_SCVPC); 1814 } 1815 1816 /** 1817 * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable 1818 * @hw: pointer to the HW structure 1819 * 1820 * After rx enable if managability is enabled then there is likely some 1821 * bad data at the start of the fifo and possibly in the DMA fifo. This 1822 * function clears the fifos and flushes any packets that came in as rx was 1823 * being enabled. 1824 **/ 1825 void igb_rx_fifo_flush_82575(struct e1000_hw *hw) 1826 { 1827 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; 1828 int i, ms_wait; 1829 1830 if (hw->mac.type != e1000_82575 || 1831 !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) 1832 return; 1833 1834 /* Disable all RX queues */ 1835 for (i = 0; i < 4; i++) { 1836 rxdctl[i] = rd32(E1000_RXDCTL(i)); 1837 wr32(E1000_RXDCTL(i), 1838 rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); 1839 } 1840 /* Poll all queues to verify they have shut down */ 1841 for (ms_wait = 0; ms_wait < 10; ms_wait++) { 1842 msleep(1); 1843 rx_enabled = 0; 1844 for (i = 0; i < 4; i++) 1845 rx_enabled |= rd32(E1000_RXDCTL(i)); 1846 if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) 1847 break; 1848 } 1849 1850 if (ms_wait == 10) 1851 hw_dbg("Queue disable timed out after 10ms\n"); 1852 1853 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all 1854 * incoming packets are rejected. Set enable and wait 2ms so that 1855 * any packet that was coming in as RCTL.EN was set is flushed 1856 */ 1857 rfctl = rd32(E1000_RFCTL); 1858 wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); 1859 1860 rlpml = rd32(E1000_RLPML); 1861 wr32(E1000_RLPML, 0); 1862 1863 rctl = rd32(E1000_RCTL); 1864 temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); 1865 temp_rctl |= E1000_RCTL_LPE; 1866 1867 wr32(E1000_RCTL, temp_rctl); 1868 wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); 1869 wrfl(); 1870 msleep(2); 1871 1872 /* Enable RX queues that were previously enabled and restore our 1873 * previous state 1874 */ 1875 for (i = 0; i < 4; i++) 1876 wr32(E1000_RXDCTL(i), rxdctl[i]); 1877 wr32(E1000_RCTL, rctl); 1878 wrfl(); 1879 1880 wr32(E1000_RLPML, rlpml); 1881 wr32(E1000_RFCTL, rfctl); 1882 1883 /* Flush receive errors generated by workaround */ 1884 rd32(E1000_ROC); 1885 rd32(E1000_RNBC); 1886 rd32(E1000_MPC); 1887 } 1888 1889 /** 1890 * igb_set_pcie_completion_timeout - set pci-e completion timeout 1891 * @hw: pointer to the HW structure 1892 * 1893 * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, 1894 * however the hardware default for these parts is 500us to 1ms which is less 1895 * than the 10ms recommended by the pci-e spec. To address this we need to 1896 * increase the value to either 10ms to 200ms for capability version 1 config, 1897 * or 16ms to 55ms for version 2. 1898 **/ 1899 static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) 1900 { 1901 u32 gcr = rd32(E1000_GCR); 1902 s32 ret_val = 0; 1903 u16 pcie_devctl2; 1904 1905 /* only take action if timeout value is defaulted to 0 */ 1906 if (gcr & E1000_GCR_CMPL_TMOUT_MASK) 1907 goto out; 1908 1909 /* if capabilities version is type 1 we can write the 1910 * timeout of 10ms to 200ms through the GCR register 1911 */ 1912 if (!(gcr & E1000_GCR_CAP_VER2)) { 1913 gcr |= E1000_GCR_CMPL_TMOUT_10ms; 1914 goto out; 1915 } 1916 1917 /* for version 2 capabilities we need to write the config space 1918 * directly in order to set the completion timeout value for 1919 * 16ms to 55ms 1920 */ 1921 ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 1922 &pcie_devctl2); 1923 if (ret_val) 1924 goto out; 1925 1926 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; 1927 1928 ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 1929 &pcie_devctl2); 1930 out: 1931 /* disable completion timeout resend */ 1932 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; 1933 1934 wr32(E1000_GCR, gcr); 1935 return ret_val; 1936 } 1937 1938 /** 1939 * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing 1940 * @hw: pointer to the hardware struct 1941 * @enable: state to enter, either enabled or disabled 1942 * @pf: Physical Function pool - do not set anti-spoofing for the PF 1943 * 1944 * enables/disables L2 switch anti-spoofing functionality. 1945 **/ 1946 void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) 1947 { 1948 u32 reg_val, reg_offset; 1949 1950 switch (hw->mac.type) { 1951 case e1000_82576: 1952 reg_offset = E1000_DTXSWC; 1953 break; 1954 case e1000_i350: 1955 case e1000_i354: 1956 reg_offset = E1000_TXSWC; 1957 break; 1958 default: 1959 return; 1960 } 1961 1962 reg_val = rd32(reg_offset); 1963 if (enable) { 1964 reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | 1965 E1000_DTXSWC_VLAN_SPOOF_MASK); 1966 /* The PF can spoof - it has to in order to 1967 * support emulation mode NICs 1968 */ 1969 reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); 1970 } else { 1971 reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | 1972 E1000_DTXSWC_VLAN_SPOOF_MASK); 1973 } 1974 wr32(reg_offset, reg_val); 1975 } 1976 1977 /** 1978 * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback 1979 * @hw: pointer to the hardware struct 1980 * @enable: state to enter, either enabled or disabled 1981 * 1982 * enables/disables L2 switch loopback functionality. 1983 **/ 1984 void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) 1985 { 1986 u32 dtxswc; 1987 1988 switch (hw->mac.type) { 1989 case e1000_82576: 1990 dtxswc = rd32(E1000_DTXSWC); 1991 if (enable) 1992 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; 1993 else 1994 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; 1995 wr32(E1000_DTXSWC, dtxswc); 1996 break; 1997 case e1000_i354: 1998 case e1000_i350: 1999 dtxswc = rd32(E1000_TXSWC); 2000 if (enable) 2001 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2002 else 2003 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2004 wr32(E1000_TXSWC, dtxswc); 2005 break; 2006 default: 2007 /* Currently no other hardware supports loopback */ 2008 break; 2009 } 2010 2011 } 2012 2013 /** 2014 * igb_vmdq_set_replication_pf - enable or disable vmdq replication 2015 * @hw: pointer to the hardware struct 2016 * @enable: state to enter, either enabled or disabled 2017 * 2018 * enables/disables replication of packets across multiple pools. 2019 **/ 2020 void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) 2021 { 2022 u32 vt_ctl = rd32(E1000_VT_CTL); 2023 2024 if (enable) 2025 vt_ctl |= E1000_VT_CTL_VM_REPL_EN; 2026 else 2027 vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; 2028 2029 wr32(E1000_VT_CTL, vt_ctl); 2030 } 2031 2032 /** 2033 * igb_read_phy_reg_82580 - Read 82580 MDI control register 2034 * @hw: pointer to the HW structure 2035 * @offset: register offset to be read 2036 * @data: pointer to the read data 2037 * 2038 * Reads the MDI control register in the PHY at offset and stores the 2039 * information read to data. 2040 **/ 2041 static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) 2042 { 2043 s32 ret_val; 2044 2045 ret_val = hw->phy.ops.acquire(hw); 2046 if (ret_val) 2047 goto out; 2048 2049 ret_val = igb_read_phy_reg_mdic(hw, offset, data); 2050 2051 hw->phy.ops.release(hw); 2052 2053 out: 2054 return ret_val; 2055 } 2056 2057 /** 2058 * igb_write_phy_reg_82580 - Write 82580 MDI control register 2059 * @hw: pointer to the HW structure 2060 * @offset: register offset to write to 2061 * @data: data to write to register at offset 2062 * 2063 * Writes data to MDI control register in the PHY at offset. 2064 **/ 2065 static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) 2066 { 2067 s32 ret_val; 2068 2069 2070 ret_val = hw->phy.ops.acquire(hw); 2071 if (ret_val) 2072 goto out; 2073 2074 ret_val = igb_write_phy_reg_mdic(hw, offset, data); 2075 2076 hw->phy.ops.release(hw); 2077 2078 out: 2079 return ret_val; 2080 } 2081 2082 /** 2083 * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits 2084 * @hw: pointer to the HW structure 2085 * 2086 * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on 2087 * the values found in the EEPROM. This addresses an issue in which these 2088 * bits are not restored from EEPROM after reset. 2089 **/ 2090 static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw) 2091 { 2092 s32 ret_val = 0; 2093 u32 mdicnfg; 2094 u16 nvm_data = 0; 2095 2096 if (hw->mac.type != e1000_82580) 2097 goto out; 2098 if (!igb_sgmii_active_82575(hw)) 2099 goto out; 2100 2101 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + 2102 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, 2103 &nvm_data); 2104 if (ret_val) { 2105 hw_dbg("NVM Read Error\n"); 2106 goto out; 2107 } 2108 2109 mdicnfg = rd32(E1000_MDICNFG); 2110 if (nvm_data & NVM_WORD24_EXT_MDIO) 2111 mdicnfg |= E1000_MDICNFG_EXT_MDIO; 2112 if (nvm_data & NVM_WORD24_COM_MDIO) 2113 mdicnfg |= E1000_MDICNFG_COM_MDIO; 2114 wr32(E1000_MDICNFG, mdicnfg); 2115 out: 2116 return ret_val; 2117 } 2118 2119 /** 2120 * igb_reset_hw_82580 - Reset hardware 2121 * @hw: pointer to the HW structure 2122 * 2123 * This resets function or entire device (all ports, etc.) 2124 * to a known state. 2125 **/ 2126 static s32 igb_reset_hw_82580(struct e1000_hw *hw) 2127 { 2128 s32 ret_val = 0; 2129 /* BH SW mailbox bit in SW_FW_SYNC */ 2130 u16 swmbsw_mask = E1000_SW_SYNCH_MB; 2131 u32 ctrl; 2132 bool global_device_reset = hw->dev_spec._82575.global_device_reset; 2133 2134 hw->dev_spec._82575.global_device_reset = false; 2135 2136 /* due to hw errata, global device reset doesn't always 2137 * work on 82580 2138 */ 2139 if (hw->mac.type == e1000_82580) 2140 global_device_reset = false; 2141 2142 /* Get current control state. */ 2143 ctrl = rd32(E1000_CTRL); 2144 2145 /* Prevent the PCI-E bus from sticking if there is no TLP connection 2146 * on the last TLP read/write transaction when MAC is reset. 2147 */ 2148 ret_val = igb_disable_pcie_master(hw); 2149 if (ret_val) 2150 hw_dbg("PCI-E Master disable polling has failed.\n"); 2151 2152 hw_dbg("Masking off all interrupts\n"); 2153 wr32(E1000_IMC, 0xffffffff); 2154 wr32(E1000_RCTL, 0); 2155 wr32(E1000_TCTL, E1000_TCTL_PSP); 2156 wrfl(); 2157 2158 msleep(10); 2159 2160 /* Determine whether or not a global dev reset is requested */ 2161 if (global_device_reset && 2162 hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask)) 2163 global_device_reset = false; 2164 2165 if (global_device_reset && 2166 !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) 2167 ctrl |= E1000_CTRL_DEV_RST; 2168 else 2169 ctrl |= E1000_CTRL_RST; 2170 2171 wr32(E1000_CTRL, ctrl); 2172 wrfl(); 2173 2174 /* Add delay to insure DEV_RST has time to complete */ 2175 if (global_device_reset) 2176 msleep(5); 2177 2178 ret_val = igb_get_auto_rd_done(hw); 2179 if (ret_val) { 2180 /* When auto config read does not complete, do not 2181 * return with an error. This can happen in situations 2182 * where there is no eeprom and prevents getting link. 2183 */ 2184 hw_dbg("Auto Read Done did not complete\n"); 2185 } 2186 2187 /* clear global device reset status bit */ 2188 wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); 2189 2190 /* Clear any pending interrupt events. */ 2191 wr32(E1000_IMC, 0xffffffff); 2192 rd32(E1000_ICR); 2193 2194 ret_val = igb_reset_mdicnfg_82580(hw); 2195 if (ret_val) 2196 hw_dbg("Could not reset MDICNFG based on EEPROM\n"); 2197 2198 /* Install any alternate MAC address into RAR0 */ 2199 ret_val = igb_check_alt_mac_addr(hw); 2200 2201 /* Release semaphore */ 2202 if (global_device_reset) 2203 hw->mac.ops.release_swfw_sync(hw, swmbsw_mask); 2204 2205 return ret_val; 2206 } 2207 2208 /** 2209 * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size 2210 * @data: data received by reading RXPBS register 2211 * 2212 * The 82580 uses a table based approach for packet buffer allocation sizes. 2213 * This function converts the retrieved value into the correct table value 2214 * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 2215 * 0x0 36 72 144 1 2 4 8 16 2216 * 0x8 35 70 140 rsv rsv rsv rsv rsv 2217 */ 2218 u16 igb_rxpbs_adjust_82580(u32 data) 2219 { 2220 u16 ret_val = 0; 2221 2222 if (data < E1000_82580_RXPBS_TABLE_SIZE) 2223 ret_val = e1000_82580_rxpbs_table[data]; 2224 2225 return ret_val; 2226 } 2227 2228 /** 2229 * igb_validate_nvm_checksum_with_offset - Validate EEPROM 2230 * checksum 2231 * @hw: pointer to the HW structure 2232 * @offset: offset in words of the checksum protected region 2233 * 2234 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM 2235 * and then verifies that the sum of the EEPROM is equal to 0xBABA. 2236 **/ 2237 static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, 2238 u16 offset) 2239 { 2240 s32 ret_val = 0; 2241 u16 checksum = 0; 2242 u16 i, nvm_data; 2243 2244 for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { 2245 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); 2246 if (ret_val) { 2247 hw_dbg("NVM Read Error\n"); 2248 goto out; 2249 } 2250 checksum += nvm_data; 2251 } 2252 2253 if (checksum != (u16) NVM_SUM) { 2254 hw_dbg("NVM Checksum Invalid\n"); 2255 ret_val = -E1000_ERR_NVM; 2256 goto out; 2257 } 2258 2259 out: 2260 return ret_val; 2261 } 2262 2263 /** 2264 * igb_update_nvm_checksum_with_offset - Update EEPROM 2265 * checksum 2266 * @hw: pointer to the HW structure 2267 * @offset: offset in words of the checksum protected region 2268 * 2269 * Updates the EEPROM checksum by reading/adding each word of the EEPROM 2270 * up to the checksum. Then calculates the EEPROM checksum and writes the 2271 * value to the EEPROM. 2272 **/ 2273 static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) 2274 { 2275 s32 ret_val; 2276 u16 checksum = 0; 2277 u16 i, nvm_data; 2278 2279 for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { 2280 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); 2281 if (ret_val) { 2282 hw_dbg("NVM Read Error while updating checksum.\n"); 2283 goto out; 2284 } 2285 checksum += nvm_data; 2286 } 2287 checksum = (u16) NVM_SUM - checksum; 2288 ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, 2289 &checksum); 2290 if (ret_val) 2291 hw_dbg("NVM Write Error while updating checksum.\n"); 2292 2293 out: 2294 return ret_val; 2295 } 2296 2297 /** 2298 * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum 2299 * @hw: pointer to the HW structure 2300 * 2301 * Calculates the EEPROM section checksum by reading/adding each word of 2302 * the EEPROM and then verifies that the sum of the EEPROM is 2303 * equal to 0xBABA. 2304 **/ 2305 static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) 2306 { 2307 s32 ret_val = 0; 2308 u16 eeprom_regions_count = 1; 2309 u16 j, nvm_data; 2310 u16 nvm_offset; 2311 2312 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); 2313 if (ret_val) { 2314 hw_dbg("NVM Read Error\n"); 2315 goto out; 2316 } 2317 2318 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { 2319 /* if checksums compatibility bit is set validate checksums 2320 * for all 4 ports. 2321 */ 2322 eeprom_regions_count = 4; 2323 } 2324 2325 for (j = 0; j < eeprom_regions_count; j++) { 2326 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2327 ret_val = igb_validate_nvm_checksum_with_offset(hw, 2328 nvm_offset); 2329 if (ret_val != 0) 2330 goto out; 2331 } 2332 2333 out: 2334 return ret_val; 2335 } 2336 2337 /** 2338 * igb_update_nvm_checksum_82580 - Update EEPROM checksum 2339 * @hw: pointer to the HW structure 2340 * 2341 * Updates the EEPROM section checksums for all 4 ports by reading/adding 2342 * each word of the EEPROM up to the checksum. Then calculates the EEPROM 2343 * checksum and writes the value to the EEPROM. 2344 **/ 2345 static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) 2346 { 2347 s32 ret_val; 2348 u16 j, nvm_data; 2349 u16 nvm_offset; 2350 2351 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); 2352 if (ret_val) { 2353 hw_dbg("NVM Read Error while updating checksum" 2354 " compatibility bit.\n"); 2355 goto out; 2356 } 2357 2358 if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { 2359 /* set compatibility bit to validate checksums appropriately */ 2360 nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; 2361 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, 2362 &nvm_data); 2363 if (ret_val) { 2364 hw_dbg("NVM Write Error while updating checksum" 2365 " compatibility bit.\n"); 2366 goto out; 2367 } 2368 } 2369 2370 for (j = 0; j < 4; j++) { 2371 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2372 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); 2373 if (ret_val) 2374 goto out; 2375 } 2376 2377 out: 2378 return ret_val; 2379 } 2380 2381 /** 2382 * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum 2383 * @hw: pointer to the HW structure 2384 * 2385 * Calculates the EEPROM section checksum by reading/adding each word of 2386 * the EEPROM and then verifies that the sum of the EEPROM is 2387 * equal to 0xBABA. 2388 **/ 2389 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw) 2390 { 2391 s32 ret_val = 0; 2392 u16 j; 2393 u16 nvm_offset; 2394 2395 for (j = 0; j < 4; j++) { 2396 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2397 ret_val = igb_validate_nvm_checksum_with_offset(hw, 2398 nvm_offset); 2399 if (ret_val != 0) 2400 goto out; 2401 } 2402 2403 out: 2404 return ret_val; 2405 } 2406 2407 /** 2408 * igb_update_nvm_checksum_i350 - Update EEPROM checksum 2409 * @hw: pointer to the HW structure 2410 * 2411 * Updates the EEPROM section checksums for all 4 ports by reading/adding 2412 * each word of the EEPROM up to the checksum. Then calculates the EEPROM 2413 * checksum and writes the value to the EEPROM. 2414 **/ 2415 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) 2416 { 2417 s32 ret_val = 0; 2418 u16 j; 2419 u16 nvm_offset; 2420 2421 for (j = 0; j < 4; j++) { 2422 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2423 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); 2424 if (ret_val != 0) 2425 goto out; 2426 } 2427 2428 out: 2429 return ret_val; 2430 } 2431 2432 /** 2433 * __igb_access_emi_reg - Read/write EMI register 2434 * @hw: pointer to the HW structure 2435 * @addr: EMI address to program 2436 * @data: pointer to value to read/write from/to the EMI address 2437 * @read: boolean flag to indicate read or write 2438 **/ 2439 static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address, 2440 u16 *data, bool read) 2441 { 2442 s32 ret_val = E1000_SUCCESS; 2443 2444 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); 2445 if (ret_val) 2446 return ret_val; 2447 2448 if (read) 2449 ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); 2450 else 2451 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); 2452 2453 return ret_val; 2454 } 2455 2456 /** 2457 * igb_read_emi_reg - Read Extended Management Interface register 2458 * @hw: pointer to the HW structure 2459 * @addr: EMI address to program 2460 * @data: value to be read from the EMI address 2461 **/ 2462 s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) 2463 { 2464 return __igb_access_emi_reg(hw, addr, data, true); 2465 } 2466 2467 /** 2468 * igb_set_eee_i350 - Enable/disable EEE support 2469 * @hw: pointer to the HW structure 2470 * 2471 * Enable/disable EEE based on setting in dev_spec structure. 2472 * 2473 **/ 2474 s32 igb_set_eee_i350(struct e1000_hw *hw) 2475 { 2476 s32 ret_val = 0; 2477 u32 ipcnfg, eeer; 2478 2479 if ((hw->mac.type < e1000_i350) || 2480 (hw->phy.media_type != e1000_media_type_copper)) 2481 goto out; 2482 ipcnfg = rd32(E1000_IPCNFG); 2483 eeer = rd32(E1000_EEER); 2484 2485 /* enable or disable per user setting */ 2486 if (!(hw->dev_spec._82575.eee_disable)) { 2487 u32 eee_su = rd32(E1000_EEE_SU); 2488 2489 ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); 2490 eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | 2491 E1000_EEER_LPI_FC); 2492 2493 /* This bit should not be set in normal operation. */ 2494 if (eee_su & E1000_EEE_SU_LPI_CLK_STP) 2495 hw_dbg("LPI Clock Stop Bit should not be set!\n"); 2496 2497 } else { 2498 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | 2499 E1000_IPCNFG_EEE_100M_AN); 2500 eeer &= ~(E1000_EEER_TX_LPI_EN | 2501 E1000_EEER_RX_LPI_EN | 2502 E1000_EEER_LPI_FC); 2503 } 2504 wr32(E1000_IPCNFG, ipcnfg); 2505 wr32(E1000_EEER, eeer); 2506 rd32(E1000_IPCNFG); 2507 rd32(E1000_EEER); 2508 out: 2509 2510 return ret_val; 2511 } 2512 2513 /** 2514 * igb_set_eee_i354 - Enable/disable EEE support 2515 * @hw: pointer to the HW structure 2516 * 2517 * Enable/disable EEE legacy mode based on setting in dev_spec structure. 2518 * 2519 **/ 2520 s32 igb_set_eee_i354(struct e1000_hw *hw) 2521 { 2522 struct e1000_phy_info *phy = &hw->phy; 2523 s32 ret_val = 0; 2524 u16 phy_data; 2525 2526 if ((hw->phy.media_type != e1000_media_type_copper) || 2527 (phy->id != M88E1543_E_PHY_ID)) 2528 goto out; 2529 2530 if (!hw->dev_spec._82575.eee_disable) { 2531 /* Switch to PHY page 18. */ 2532 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); 2533 if (ret_val) 2534 goto out; 2535 2536 ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, 2537 &phy_data); 2538 if (ret_val) 2539 goto out; 2540 2541 phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; 2542 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, 2543 phy_data); 2544 if (ret_val) 2545 goto out; 2546 2547 /* Return the PHY to page 0. */ 2548 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); 2549 if (ret_val) 2550 goto out; 2551 2552 /* Turn on EEE advertisement. */ 2553 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2554 E1000_EEE_ADV_DEV_I354, 2555 &phy_data); 2556 if (ret_val) 2557 goto out; 2558 2559 phy_data |= E1000_EEE_ADV_100_SUPPORTED | 2560 E1000_EEE_ADV_1000_SUPPORTED; 2561 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2562 E1000_EEE_ADV_DEV_I354, 2563 phy_data); 2564 } else { 2565 /* Turn off EEE advertisement. */ 2566 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2567 E1000_EEE_ADV_DEV_I354, 2568 &phy_data); 2569 if (ret_val) 2570 goto out; 2571 2572 phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | 2573 E1000_EEE_ADV_1000_SUPPORTED); 2574 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2575 E1000_EEE_ADV_DEV_I354, 2576 phy_data); 2577 } 2578 2579 out: 2580 return ret_val; 2581 } 2582 2583 /** 2584 * igb_get_eee_status_i354 - Get EEE status 2585 * @hw: pointer to the HW structure 2586 * @status: EEE status 2587 * 2588 * Get EEE status by guessing based on whether Tx or Rx LPI indications have 2589 * been received. 2590 **/ 2591 s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status) 2592 { 2593 struct e1000_phy_info *phy = &hw->phy; 2594 s32 ret_val = 0; 2595 u16 phy_data; 2596 2597 /* Check if EEE is supported on this device. */ 2598 if ((hw->phy.media_type != e1000_media_type_copper) || 2599 (phy->id != M88E1543_E_PHY_ID)) 2600 goto out; 2601 2602 ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, 2603 E1000_PCS_STATUS_DEV_I354, 2604 &phy_data); 2605 if (ret_val) 2606 goto out; 2607 2608 *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | 2609 E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; 2610 2611 out: 2612 return ret_val; 2613 } 2614 2615 static const u8 e1000_emc_temp_data[4] = { 2616 E1000_EMC_INTERNAL_DATA, 2617 E1000_EMC_DIODE1_DATA, 2618 E1000_EMC_DIODE2_DATA, 2619 E1000_EMC_DIODE3_DATA 2620 }; 2621 static const u8 e1000_emc_therm_limit[4] = { 2622 E1000_EMC_INTERNAL_THERM_LIMIT, 2623 E1000_EMC_DIODE1_THERM_LIMIT, 2624 E1000_EMC_DIODE2_THERM_LIMIT, 2625 E1000_EMC_DIODE3_THERM_LIMIT 2626 }; 2627 2628 /** 2629 * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data 2630 * @hw: pointer to hardware structure 2631 * 2632 * Updates the temperatures in mac.thermal_sensor_data 2633 **/ 2634 s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) 2635 { 2636 s32 status = E1000_SUCCESS; 2637 u16 ets_offset; 2638 u16 ets_cfg; 2639 u16 ets_sensor; 2640 u8 num_sensors; 2641 u8 sensor_index; 2642 u8 sensor_location; 2643 u8 i; 2644 struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 2645 2646 if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) 2647 return E1000_NOT_IMPLEMENTED; 2648 2649 data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF); 2650 2651 /* Return the internal sensor only if ETS is unsupported */ 2652 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); 2653 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) 2654 return status; 2655 2656 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); 2657 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) 2658 != NVM_ETS_TYPE_EMC) 2659 return E1000_NOT_IMPLEMENTED; 2660 2661 num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); 2662 if (num_sensors > E1000_MAX_SENSORS) 2663 num_sensors = E1000_MAX_SENSORS; 2664 2665 for (i = 1; i < num_sensors; i++) { 2666 hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); 2667 sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> 2668 NVM_ETS_DATA_INDEX_SHIFT); 2669 sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> 2670 NVM_ETS_DATA_LOC_SHIFT); 2671 2672 if (sensor_location != 0) 2673 hw->phy.ops.read_i2c_byte(hw, 2674 e1000_emc_temp_data[sensor_index], 2675 E1000_I2C_THERMAL_SENSOR_ADDR, 2676 &data->sensor[i].temp); 2677 } 2678 return status; 2679 } 2680 2681 /** 2682 * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds 2683 * @hw: pointer to hardware structure 2684 * 2685 * Sets the thermal sensor thresholds according to the NVM map 2686 * and save off the threshold and location values into mac.thermal_sensor_data 2687 **/ 2688 s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) 2689 { 2690 s32 status = E1000_SUCCESS; 2691 u16 ets_offset; 2692 u16 ets_cfg; 2693 u16 ets_sensor; 2694 u8 low_thresh_delta; 2695 u8 num_sensors; 2696 u8 sensor_index; 2697 u8 sensor_location; 2698 u8 therm_limit; 2699 u8 i; 2700 struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 2701 2702 if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) 2703 return E1000_NOT_IMPLEMENTED; 2704 2705 memset(data, 0, sizeof(struct e1000_thermal_sensor_data)); 2706 2707 data->sensor[0].location = 0x1; 2708 data->sensor[0].caution_thresh = 2709 (rd32(E1000_THHIGHTC) & 0xFF); 2710 data->sensor[0].max_op_thresh = 2711 (rd32(E1000_THLOWTC) & 0xFF); 2712 2713 /* Return the internal sensor only if ETS is unsupported */ 2714 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); 2715 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) 2716 return status; 2717 2718 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); 2719 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) 2720 != NVM_ETS_TYPE_EMC) 2721 return E1000_NOT_IMPLEMENTED; 2722 2723 low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >> 2724 NVM_ETS_LTHRES_DELTA_SHIFT); 2725 num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); 2726 2727 for (i = 1; i <= num_sensors; i++) { 2728 hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); 2729 sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> 2730 NVM_ETS_DATA_INDEX_SHIFT); 2731 sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> 2732 NVM_ETS_DATA_LOC_SHIFT); 2733 therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK; 2734 2735 hw->phy.ops.write_i2c_byte(hw, 2736 e1000_emc_therm_limit[sensor_index], 2737 E1000_I2C_THERMAL_SENSOR_ADDR, 2738 therm_limit); 2739 2740 if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) { 2741 data->sensor[i].location = sensor_location; 2742 data->sensor[i].caution_thresh = therm_limit; 2743 data->sensor[i].max_op_thresh = therm_limit - 2744 low_thresh_delta; 2745 } 2746 } 2747 return status; 2748 } 2749 2750 static struct e1000_mac_operations e1000_mac_ops_82575 = { 2751 .init_hw = igb_init_hw_82575, 2752 .check_for_link = igb_check_for_link_82575, 2753 .rar_set = igb_rar_set, 2754 .read_mac_addr = igb_read_mac_addr_82575, 2755 .get_speed_and_duplex = igb_get_link_up_info_82575, 2756 #ifdef CONFIG_IGB_HWMON 2757 .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic, 2758 .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic, 2759 #endif 2760 }; 2761 2762 static struct e1000_phy_operations e1000_phy_ops_82575 = { 2763 .acquire = igb_acquire_phy_82575, 2764 .get_cfg_done = igb_get_cfg_done_82575, 2765 .release = igb_release_phy_82575, 2766 .write_i2c_byte = igb_write_i2c_byte, 2767 .read_i2c_byte = igb_read_i2c_byte, 2768 }; 2769 2770 static struct e1000_nvm_operations e1000_nvm_ops_82575 = { 2771 .acquire = igb_acquire_nvm_82575, 2772 .read = igb_read_nvm_eerd, 2773 .release = igb_release_nvm_82575, 2774 .write = igb_write_nvm_spi, 2775 }; 2776 2777 const struct e1000_info e1000_82575_info = { 2778 .get_invariants = igb_get_invariants_82575, 2779 .mac_ops = &e1000_mac_ops_82575, 2780 .phy_ops = &e1000_phy_ops_82575, 2781 .nvm_ops = &e1000_nvm_ops_82575, 2782 }; 2783 2784