1 /******************************************************************************* 2 3 Intel(R) Gigabit Ethernet Linux driver 4 Copyright(c) 2007-2011 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 /* e1000_82575 29 * e1000_82576 30 */ 31 32 #include <linux/types.h> 33 #include <linux/if_ether.h> 34 35 #include "e1000_mac.h" 36 #include "e1000_82575.h" 37 38 static s32 igb_get_invariants_82575(struct e1000_hw *); 39 static s32 igb_acquire_phy_82575(struct e1000_hw *); 40 static void igb_release_phy_82575(struct e1000_hw *); 41 static s32 igb_acquire_nvm_82575(struct e1000_hw *); 42 static void igb_release_nvm_82575(struct e1000_hw *); 43 static s32 igb_check_for_link_82575(struct e1000_hw *); 44 static s32 igb_get_cfg_done_82575(struct e1000_hw *); 45 static s32 igb_init_hw_82575(struct e1000_hw *); 46 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); 47 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); 48 static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *); 49 static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16); 50 static s32 igb_reset_hw_82575(struct e1000_hw *); 51 static s32 igb_reset_hw_82580(struct e1000_hw *); 52 static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); 53 static s32 igb_setup_copper_link_82575(struct e1000_hw *); 54 static s32 igb_setup_serdes_link_82575(struct e1000_hw *); 55 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16); 56 static void igb_clear_hw_cntrs_82575(struct e1000_hw *); 57 static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16); 58 static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *, 59 u16 *); 60 static s32 igb_get_phy_id_82575(struct e1000_hw *); 61 static void igb_release_swfw_sync_82575(struct e1000_hw *, u16); 62 static bool igb_sgmii_active_82575(struct e1000_hw *); 63 static s32 igb_reset_init_script_82575(struct e1000_hw *); 64 static s32 igb_read_mac_addr_82575(struct e1000_hw *); 65 static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); 66 static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); 67 static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw); 68 static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); 69 static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, 70 u16 offset); 71 static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, 72 u16 offset); 73 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); 74 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); 75 static const u16 e1000_82580_rxpbs_table[] = 76 { 36, 72, 144, 1, 2, 4, 8, 16, 77 35, 70, 140 }; 78 #define E1000_82580_RXPBS_TABLE_SIZE \ 79 (sizeof(e1000_82580_rxpbs_table)/sizeof(u16)) 80 81 /** 82 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO 83 * @hw: pointer to the HW structure 84 * 85 * Called to determine if the I2C pins are being used for I2C or as an 86 * external MDIO interface since the two options are mutually exclusive. 87 **/ 88 static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) 89 { 90 u32 reg = 0; 91 bool ext_mdio = false; 92 93 switch (hw->mac.type) { 94 case e1000_82575: 95 case e1000_82576: 96 reg = rd32(E1000_MDIC); 97 ext_mdio = !!(reg & E1000_MDIC_DEST); 98 break; 99 case e1000_82580: 100 case e1000_i350: 101 reg = rd32(E1000_MDICNFG); 102 ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); 103 break; 104 default: 105 break; 106 } 107 return ext_mdio; 108 } 109 110 static s32 igb_get_invariants_82575(struct e1000_hw *hw) 111 { 112 struct e1000_phy_info *phy = &hw->phy; 113 struct e1000_nvm_info *nvm = &hw->nvm; 114 struct e1000_mac_info *mac = &hw->mac; 115 struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575; 116 u32 eecd; 117 s32 ret_val; 118 u16 size; 119 u32 ctrl_ext = 0; 120 121 switch (hw->device_id) { 122 case E1000_DEV_ID_82575EB_COPPER: 123 case E1000_DEV_ID_82575EB_FIBER_SERDES: 124 case E1000_DEV_ID_82575GB_QUAD_COPPER: 125 mac->type = e1000_82575; 126 break; 127 case E1000_DEV_ID_82576: 128 case E1000_DEV_ID_82576_NS: 129 case E1000_DEV_ID_82576_NS_SERDES: 130 case E1000_DEV_ID_82576_FIBER: 131 case E1000_DEV_ID_82576_SERDES: 132 case E1000_DEV_ID_82576_QUAD_COPPER: 133 case E1000_DEV_ID_82576_QUAD_COPPER_ET2: 134 case E1000_DEV_ID_82576_SERDES_QUAD: 135 mac->type = e1000_82576; 136 break; 137 case E1000_DEV_ID_82580_COPPER: 138 case E1000_DEV_ID_82580_FIBER: 139 case E1000_DEV_ID_82580_QUAD_FIBER: 140 case E1000_DEV_ID_82580_SERDES: 141 case E1000_DEV_ID_82580_SGMII: 142 case E1000_DEV_ID_82580_COPPER_DUAL: 143 case E1000_DEV_ID_DH89XXCC_SGMII: 144 case E1000_DEV_ID_DH89XXCC_SERDES: 145 case E1000_DEV_ID_DH89XXCC_BACKPLANE: 146 case E1000_DEV_ID_DH89XXCC_SFP: 147 mac->type = e1000_82580; 148 break; 149 case E1000_DEV_ID_I350_COPPER: 150 case E1000_DEV_ID_I350_FIBER: 151 case E1000_DEV_ID_I350_SERDES: 152 case E1000_DEV_ID_I350_SGMII: 153 mac->type = e1000_i350; 154 break; 155 default: 156 return -E1000_ERR_MAC_INIT; 157 break; 158 } 159 160 /* Set media type */ 161 /* 162 * The 82575 uses bits 22:23 for link mode. The mode can be changed 163 * based on the EEPROM. We cannot rely upon device ID. There 164 * is no distinguishable difference between fiber and internal 165 * SerDes mode on the 82575. There can be an external PHY attached 166 * on the SGMII interface. For this, we'll set sgmii_active to true. 167 */ 168 phy->media_type = e1000_media_type_copper; 169 dev_spec->sgmii_active = false; 170 171 ctrl_ext = rd32(E1000_CTRL_EXT); 172 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { 173 case E1000_CTRL_EXT_LINK_MODE_SGMII: 174 dev_spec->sgmii_active = true; 175 break; 176 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 177 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: 178 hw->phy.media_type = e1000_media_type_internal_serdes; 179 break; 180 default: 181 break; 182 } 183 184 /* Set mta register count */ 185 mac->mta_reg_count = 128; 186 /* Set rar entry count */ 187 mac->rar_entry_count = E1000_RAR_ENTRIES_82575; 188 if (mac->type == e1000_82576) 189 mac->rar_entry_count = E1000_RAR_ENTRIES_82576; 190 if (mac->type == e1000_82580) 191 mac->rar_entry_count = E1000_RAR_ENTRIES_82580; 192 if (mac->type == e1000_i350) 193 mac->rar_entry_count = E1000_RAR_ENTRIES_I350; 194 /* reset */ 195 if (mac->type >= e1000_82580) 196 mac->ops.reset_hw = igb_reset_hw_82580; 197 else 198 mac->ops.reset_hw = igb_reset_hw_82575; 199 /* Set if part includes ASF firmware */ 200 mac->asf_firmware_present = true; 201 /* Set if manageability features are enabled. */ 202 mac->arc_subsystem_valid = 203 (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) 204 ? true : false; 205 /* enable EEE on i350 parts */ 206 if (mac->type == e1000_i350) 207 dev_spec->eee_disable = false; 208 else 209 dev_spec->eee_disable = true; 210 /* physical interface link setup */ 211 mac->ops.setup_physical_interface = 212 (hw->phy.media_type == e1000_media_type_copper) 213 ? igb_setup_copper_link_82575 214 : igb_setup_serdes_link_82575; 215 216 /* NVM initialization */ 217 eecd = rd32(E1000_EECD); 218 219 nvm->opcode_bits = 8; 220 nvm->delay_usec = 1; 221 switch (nvm->override) { 222 case e1000_nvm_override_spi_large: 223 nvm->page_size = 32; 224 nvm->address_bits = 16; 225 break; 226 case e1000_nvm_override_spi_small: 227 nvm->page_size = 8; 228 nvm->address_bits = 8; 229 break; 230 default: 231 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; 232 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; 233 break; 234 } 235 236 nvm->type = e1000_nvm_eeprom_spi; 237 238 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 239 E1000_EECD_SIZE_EX_SHIFT); 240 241 /* 242 * Added to a constant, "size" becomes the left-shift value 243 * for setting word_size. 244 */ 245 size += NVM_WORD_SIZE_BASE_SHIFT; 246 247 /* 248 * Check for invalid size 249 */ 250 if ((hw->mac.type == e1000_82576) && (size > 15)) { 251 printk("igb: The NVM size is not valid, " 252 "defaulting to 32K.\n"); 253 size = 15; 254 } 255 nvm->word_size = 1 << size; 256 if (nvm->word_size == (1 << 15)) 257 nvm->page_size = 128; 258 259 /* NVM Function Pointers */ 260 nvm->ops.acquire = igb_acquire_nvm_82575; 261 if (nvm->word_size < (1 << 15)) 262 nvm->ops.read = igb_read_nvm_eerd; 263 else 264 nvm->ops.read = igb_read_nvm_spi; 265 266 nvm->ops.release = igb_release_nvm_82575; 267 switch (hw->mac.type) { 268 case e1000_82580: 269 nvm->ops.validate = igb_validate_nvm_checksum_82580; 270 nvm->ops.update = igb_update_nvm_checksum_82580; 271 break; 272 case e1000_i350: 273 nvm->ops.validate = igb_validate_nvm_checksum_i350; 274 nvm->ops.update = igb_update_nvm_checksum_i350; 275 break; 276 default: 277 nvm->ops.validate = igb_validate_nvm_checksum; 278 nvm->ops.update = igb_update_nvm_checksum; 279 } 280 nvm->ops.write = igb_write_nvm_spi; 281 282 /* if part supports SR-IOV then initialize mailbox parameters */ 283 switch (mac->type) { 284 case e1000_82576: 285 case e1000_i350: 286 igb_init_mbx_params_pf(hw); 287 break; 288 default: 289 break; 290 } 291 292 /* setup PHY parameters */ 293 if (phy->media_type != e1000_media_type_copper) { 294 phy->type = e1000_phy_none; 295 return 0; 296 } 297 298 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 299 phy->reset_delay_us = 100; 300 301 ctrl_ext = rd32(E1000_CTRL_EXT); 302 303 /* PHY function pointers */ 304 if (igb_sgmii_active_82575(hw)) { 305 phy->ops.reset = igb_phy_hw_reset_sgmii_82575; 306 ctrl_ext |= E1000_CTRL_I2C_ENA; 307 } else { 308 phy->ops.reset = igb_phy_hw_reset; 309 ctrl_ext &= ~E1000_CTRL_I2C_ENA; 310 } 311 312 wr32(E1000_CTRL_EXT, ctrl_ext); 313 igb_reset_mdicnfg_82580(hw); 314 315 if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { 316 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; 317 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; 318 } else if (hw->mac.type >= e1000_82580) { 319 phy->ops.read_reg = igb_read_phy_reg_82580; 320 phy->ops.write_reg = igb_write_phy_reg_82580; 321 } else { 322 phy->ops.read_reg = igb_read_phy_reg_igp; 323 phy->ops.write_reg = igb_write_phy_reg_igp; 324 } 325 326 /* set lan id */ 327 hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> 328 E1000_STATUS_FUNC_SHIFT; 329 330 /* Set phy->phy_addr and phy->id. */ 331 ret_val = igb_get_phy_id_82575(hw); 332 if (ret_val) 333 return ret_val; 334 335 /* Verify phy id and set remaining function pointers */ 336 switch (phy->id) { 337 case I347AT4_E_PHY_ID: 338 case M88E1112_E_PHY_ID: 339 case M88E1111_I_PHY_ID: 340 phy->type = e1000_phy_m88; 341 phy->ops.get_phy_info = igb_get_phy_info_m88; 342 343 if (phy->id == I347AT4_E_PHY_ID || 344 phy->id == M88E1112_E_PHY_ID) 345 phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; 346 else 347 phy->ops.get_cable_length = igb_get_cable_length_m88; 348 349 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; 350 break; 351 case IGP03E1000_E_PHY_ID: 352 phy->type = e1000_phy_igp_3; 353 phy->ops.get_phy_info = igb_get_phy_info_igp; 354 phy->ops.get_cable_length = igb_get_cable_length_igp_2; 355 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp; 356 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; 357 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; 358 break; 359 case I82580_I_PHY_ID: 360 case I350_I_PHY_ID: 361 phy->type = e1000_phy_82580; 362 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580; 363 phy->ops.get_cable_length = igb_get_cable_length_82580; 364 phy->ops.get_phy_info = igb_get_phy_info_82580; 365 break; 366 default: 367 return -E1000_ERR_PHY; 368 } 369 370 return 0; 371 } 372 373 /** 374 * igb_acquire_phy_82575 - Acquire rights to access PHY 375 * @hw: pointer to the HW structure 376 * 377 * Acquire access rights to the correct PHY. This is a 378 * function pointer entry point called by the api module. 379 **/ 380 static s32 igb_acquire_phy_82575(struct e1000_hw *hw) 381 { 382 u16 mask = E1000_SWFW_PHY0_SM; 383 384 if (hw->bus.func == E1000_FUNC_1) 385 mask = E1000_SWFW_PHY1_SM; 386 else if (hw->bus.func == E1000_FUNC_2) 387 mask = E1000_SWFW_PHY2_SM; 388 else if (hw->bus.func == E1000_FUNC_3) 389 mask = E1000_SWFW_PHY3_SM; 390 391 return igb_acquire_swfw_sync_82575(hw, mask); 392 } 393 394 /** 395 * igb_release_phy_82575 - Release rights to access PHY 396 * @hw: pointer to the HW structure 397 * 398 * A wrapper to release access rights to the correct PHY. This is a 399 * function pointer entry point called by the api module. 400 **/ 401 static void igb_release_phy_82575(struct e1000_hw *hw) 402 { 403 u16 mask = E1000_SWFW_PHY0_SM; 404 405 if (hw->bus.func == E1000_FUNC_1) 406 mask = E1000_SWFW_PHY1_SM; 407 else if (hw->bus.func == E1000_FUNC_2) 408 mask = E1000_SWFW_PHY2_SM; 409 else if (hw->bus.func == E1000_FUNC_3) 410 mask = E1000_SWFW_PHY3_SM; 411 412 igb_release_swfw_sync_82575(hw, mask); 413 } 414 415 /** 416 * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii 417 * @hw: pointer to the HW structure 418 * @offset: register offset to be read 419 * @data: pointer to the read data 420 * 421 * Reads the PHY register at offset using the serial gigabit media independent 422 * interface and stores the retrieved information in data. 423 **/ 424 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 425 u16 *data) 426 { 427 s32 ret_val = -E1000_ERR_PARAM; 428 429 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 430 hw_dbg("PHY Address %u is out of range\n", offset); 431 goto out; 432 } 433 434 ret_val = hw->phy.ops.acquire(hw); 435 if (ret_val) 436 goto out; 437 438 ret_val = igb_read_phy_reg_i2c(hw, offset, data); 439 440 hw->phy.ops.release(hw); 441 442 out: 443 return ret_val; 444 } 445 446 /** 447 * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii 448 * @hw: pointer to the HW structure 449 * @offset: register offset to write to 450 * @data: data to write at register offset 451 * 452 * Writes the data to PHY register at the offset using the serial gigabit 453 * media independent interface. 454 **/ 455 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 456 u16 data) 457 { 458 s32 ret_val = -E1000_ERR_PARAM; 459 460 461 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 462 hw_dbg("PHY Address %d is out of range\n", offset); 463 goto out; 464 } 465 466 ret_val = hw->phy.ops.acquire(hw); 467 if (ret_val) 468 goto out; 469 470 ret_val = igb_write_phy_reg_i2c(hw, offset, data); 471 472 hw->phy.ops.release(hw); 473 474 out: 475 return ret_val; 476 } 477 478 /** 479 * igb_get_phy_id_82575 - Retrieve PHY addr and id 480 * @hw: pointer to the HW structure 481 * 482 * Retrieves the PHY address and ID for both PHY's which do and do not use 483 * sgmi interface. 484 **/ 485 static s32 igb_get_phy_id_82575(struct e1000_hw *hw) 486 { 487 struct e1000_phy_info *phy = &hw->phy; 488 s32 ret_val = 0; 489 u16 phy_id; 490 u32 ctrl_ext; 491 u32 mdic; 492 493 /* 494 * For SGMII PHYs, we try the list of possible addresses until 495 * we find one that works. For non-SGMII PHYs 496 * (e.g. integrated copper PHYs), an address of 1 should 497 * work. The result of this function should mean phy->phy_addr 498 * and phy->id are set correctly. 499 */ 500 if (!(igb_sgmii_active_82575(hw))) { 501 phy->addr = 1; 502 ret_val = igb_get_phy_id(hw); 503 goto out; 504 } 505 506 if (igb_sgmii_uses_mdio_82575(hw)) { 507 switch (hw->mac.type) { 508 case e1000_82575: 509 case e1000_82576: 510 mdic = rd32(E1000_MDIC); 511 mdic &= E1000_MDIC_PHY_MASK; 512 phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; 513 break; 514 case e1000_82580: 515 case e1000_i350: 516 mdic = rd32(E1000_MDICNFG); 517 mdic &= E1000_MDICNFG_PHY_MASK; 518 phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; 519 break; 520 default: 521 ret_val = -E1000_ERR_PHY; 522 goto out; 523 break; 524 } 525 ret_val = igb_get_phy_id(hw); 526 goto out; 527 } 528 529 /* Power on sgmii phy if it is disabled */ 530 ctrl_ext = rd32(E1000_CTRL_EXT); 531 wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); 532 wrfl(); 533 msleep(300); 534 535 /* 536 * The address field in the I2CCMD register is 3 bits and 0 is invalid. 537 * Therefore, we need to test 1-7 538 */ 539 for (phy->addr = 1; phy->addr < 8; phy->addr++) { 540 ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); 541 if (ret_val == 0) { 542 hw_dbg("Vendor ID 0x%08X read at address %u\n", 543 phy_id, phy->addr); 544 /* 545 * At the time of this writing, The M88 part is 546 * the only supported SGMII PHY product. 547 */ 548 if (phy_id == M88_VENDOR) 549 break; 550 } else { 551 hw_dbg("PHY address %u was unreadable\n", phy->addr); 552 } 553 } 554 555 /* A valid PHY type couldn't be found. */ 556 if (phy->addr == 8) { 557 phy->addr = 0; 558 ret_val = -E1000_ERR_PHY; 559 goto out; 560 } else { 561 ret_val = igb_get_phy_id(hw); 562 } 563 564 /* restore previous sfp cage power state */ 565 wr32(E1000_CTRL_EXT, ctrl_ext); 566 567 out: 568 return ret_val; 569 } 570 571 /** 572 * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset 573 * @hw: pointer to the HW structure 574 * 575 * Resets the PHY using the serial gigabit media independent interface. 576 **/ 577 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) 578 { 579 s32 ret_val; 580 581 /* 582 * This isn't a true "hard" reset, but is the only reset 583 * available to us at this time. 584 */ 585 586 hw_dbg("Soft resetting SGMII attached PHY...\n"); 587 588 /* 589 * SFP documentation requires the following to configure the SPF module 590 * to work on SGMII. No further documentation is given. 591 */ 592 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); 593 if (ret_val) 594 goto out; 595 596 ret_val = igb_phy_sw_reset(hw); 597 598 out: 599 return ret_val; 600 } 601 602 /** 603 * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state 604 * @hw: pointer to the HW structure 605 * @active: true to enable LPLU, false to disable 606 * 607 * Sets the LPLU D0 state according to the active flag. When 608 * activating LPLU this function also disables smart speed 609 * and vice versa. LPLU will not be activated unless the 610 * device autonegotiation advertisement meets standards of 611 * either 10 or 10/100 or 10/100/1000 at all duplexes. 612 * This is a function pointer entry point only called by 613 * PHY setup routines. 614 **/ 615 static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) 616 { 617 struct e1000_phy_info *phy = &hw->phy; 618 s32 ret_val; 619 u16 data; 620 621 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); 622 if (ret_val) 623 goto out; 624 625 if (active) { 626 data |= IGP02E1000_PM_D0_LPLU; 627 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 628 data); 629 if (ret_val) 630 goto out; 631 632 /* When LPLU is enabled, we should disable SmartSpeed */ 633 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 634 &data); 635 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 636 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 637 data); 638 if (ret_val) 639 goto out; 640 } else { 641 data &= ~IGP02E1000_PM_D0_LPLU; 642 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 643 data); 644 /* 645 * LPLU and SmartSpeed are mutually exclusive. LPLU is used 646 * during Dx states where the power conservation is most 647 * important. During driver activity we should enable 648 * SmartSpeed, so performance is maintained. 649 */ 650 if (phy->smart_speed == e1000_smart_speed_on) { 651 ret_val = phy->ops.read_reg(hw, 652 IGP01E1000_PHY_PORT_CONFIG, &data); 653 if (ret_val) 654 goto out; 655 656 data |= IGP01E1000_PSCFR_SMART_SPEED; 657 ret_val = phy->ops.write_reg(hw, 658 IGP01E1000_PHY_PORT_CONFIG, data); 659 if (ret_val) 660 goto out; 661 } else if (phy->smart_speed == e1000_smart_speed_off) { 662 ret_val = phy->ops.read_reg(hw, 663 IGP01E1000_PHY_PORT_CONFIG, &data); 664 if (ret_val) 665 goto out; 666 667 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 668 ret_val = phy->ops.write_reg(hw, 669 IGP01E1000_PHY_PORT_CONFIG, data); 670 if (ret_val) 671 goto out; 672 } 673 } 674 675 out: 676 return ret_val; 677 } 678 679 /** 680 * igb_acquire_nvm_82575 - Request for access to EEPROM 681 * @hw: pointer to the HW structure 682 * 683 * Acquire the necessary semaphores for exclusive access to the EEPROM. 684 * Set the EEPROM access request bit and wait for EEPROM access grant bit. 685 * Return successful if access grant bit set, else clear the request for 686 * EEPROM access and return -E1000_ERR_NVM (-1). 687 **/ 688 static s32 igb_acquire_nvm_82575(struct e1000_hw *hw) 689 { 690 s32 ret_val; 691 692 ret_val = igb_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); 693 if (ret_val) 694 goto out; 695 696 ret_val = igb_acquire_nvm(hw); 697 698 if (ret_val) 699 igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); 700 701 out: 702 return ret_val; 703 } 704 705 /** 706 * igb_release_nvm_82575 - Release exclusive access to EEPROM 707 * @hw: pointer to the HW structure 708 * 709 * Stop any current commands to the EEPROM and clear the EEPROM request bit, 710 * then release the semaphores acquired. 711 **/ 712 static void igb_release_nvm_82575(struct e1000_hw *hw) 713 { 714 igb_release_nvm(hw); 715 igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); 716 } 717 718 /** 719 * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore 720 * @hw: pointer to the HW structure 721 * @mask: specifies which semaphore to acquire 722 * 723 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask 724 * will also specify which port we're acquiring the lock for. 725 **/ 726 static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) 727 { 728 u32 swfw_sync; 729 u32 swmask = mask; 730 u32 fwmask = mask << 16; 731 s32 ret_val = 0; 732 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ 733 734 while (i < timeout) { 735 if (igb_get_hw_semaphore(hw)) { 736 ret_val = -E1000_ERR_SWFW_SYNC; 737 goto out; 738 } 739 740 swfw_sync = rd32(E1000_SW_FW_SYNC); 741 if (!(swfw_sync & (fwmask | swmask))) 742 break; 743 744 /* 745 * Firmware currently using resource (fwmask) 746 * or other software thread using resource (swmask) 747 */ 748 igb_put_hw_semaphore(hw); 749 mdelay(5); 750 i++; 751 } 752 753 if (i == timeout) { 754 hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); 755 ret_val = -E1000_ERR_SWFW_SYNC; 756 goto out; 757 } 758 759 swfw_sync |= swmask; 760 wr32(E1000_SW_FW_SYNC, swfw_sync); 761 762 igb_put_hw_semaphore(hw); 763 764 out: 765 return ret_val; 766 } 767 768 /** 769 * igb_release_swfw_sync_82575 - Release SW/FW semaphore 770 * @hw: pointer to the HW structure 771 * @mask: specifies which semaphore to acquire 772 * 773 * Release the SW/FW semaphore used to access the PHY or NVM. The mask 774 * will also specify which port we're releasing the lock for. 775 **/ 776 static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) 777 { 778 u32 swfw_sync; 779 780 while (igb_get_hw_semaphore(hw) != 0); 781 /* Empty */ 782 783 swfw_sync = rd32(E1000_SW_FW_SYNC); 784 swfw_sync &= ~mask; 785 wr32(E1000_SW_FW_SYNC, swfw_sync); 786 787 igb_put_hw_semaphore(hw); 788 } 789 790 /** 791 * igb_get_cfg_done_82575 - Read config done bit 792 * @hw: pointer to the HW structure 793 * 794 * Read the management control register for the config done bit for 795 * completion status. NOTE: silicon which is EEPROM-less will fail trying 796 * to read the config done bit, so an error is *ONLY* logged and returns 797 * 0. If we were to return with error, EEPROM-less silicon 798 * would not be able to be reset or change link. 799 **/ 800 static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) 801 { 802 s32 timeout = PHY_CFG_TIMEOUT; 803 s32 ret_val = 0; 804 u32 mask = E1000_NVM_CFG_DONE_PORT_0; 805 806 if (hw->bus.func == 1) 807 mask = E1000_NVM_CFG_DONE_PORT_1; 808 else if (hw->bus.func == E1000_FUNC_2) 809 mask = E1000_NVM_CFG_DONE_PORT_2; 810 else if (hw->bus.func == E1000_FUNC_3) 811 mask = E1000_NVM_CFG_DONE_PORT_3; 812 813 while (timeout) { 814 if (rd32(E1000_EEMNGCTL) & mask) 815 break; 816 msleep(1); 817 timeout--; 818 } 819 if (!timeout) 820 hw_dbg("MNG configuration cycle has not completed.\n"); 821 822 /* If EEPROM is not marked present, init the PHY manually */ 823 if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) && 824 (hw->phy.type == e1000_phy_igp_3)) 825 igb_phy_init_script_igp3(hw); 826 827 return ret_val; 828 } 829 830 /** 831 * igb_check_for_link_82575 - Check for link 832 * @hw: pointer to the HW structure 833 * 834 * If sgmii is enabled, then use the pcs register to determine link, otherwise 835 * use the generic interface for determining link. 836 **/ 837 static s32 igb_check_for_link_82575(struct e1000_hw *hw) 838 { 839 s32 ret_val; 840 u16 speed, duplex; 841 842 if (hw->phy.media_type != e1000_media_type_copper) { 843 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, 844 &duplex); 845 /* 846 * Use this flag to determine if link needs to be checked or 847 * not. If we have link clear the flag so that we do not 848 * continue to check for link. 849 */ 850 hw->mac.get_link_status = !hw->mac.serdes_has_link; 851 } else { 852 ret_val = igb_check_for_copper_link(hw); 853 } 854 855 return ret_val; 856 } 857 858 /** 859 * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown 860 * @hw: pointer to the HW structure 861 **/ 862 void igb_power_up_serdes_link_82575(struct e1000_hw *hw) 863 { 864 u32 reg; 865 866 867 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 868 !igb_sgmii_active_82575(hw)) 869 return; 870 871 /* Enable PCS to turn on link */ 872 reg = rd32(E1000_PCS_CFG0); 873 reg |= E1000_PCS_CFG_PCS_EN; 874 wr32(E1000_PCS_CFG0, reg); 875 876 /* Power up the laser */ 877 reg = rd32(E1000_CTRL_EXT); 878 reg &= ~E1000_CTRL_EXT_SDP3_DATA; 879 wr32(E1000_CTRL_EXT, reg); 880 881 /* flush the write to verify completion */ 882 wrfl(); 883 msleep(1); 884 } 885 886 /** 887 * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex 888 * @hw: pointer to the HW structure 889 * @speed: stores the current speed 890 * @duplex: stores the current duplex 891 * 892 * Using the physical coding sub-layer (PCS), retrieve the current speed and 893 * duplex, then store the values in the pointers provided. 894 **/ 895 static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, 896 u16 *duplex) 897 { 898 struct e1000_mac_info *mac = &hw->mac; 899 u32 pcs; 900 901 /* Set up defaults for the return values of this function */ 902 mac->serdes_has_link = false; 903 *speed = 0; 904 *duplex = 0; 905 906 /* 907 * Read the PCS Status register for link state. For non-copper mode, 908 * the status register is not accurate. The PCS status register is 909 * used instead. 910 */ 911 pcs = rd32(E1000_PCS_LSTAT); 912 913 /* 914 * The link up bit determines when link is up on autoneg. The sync ok 915 * gets set once both sides sync up and agree upon link. Stable link 916 * can be determined by checking for both link up and link sync ok 917 */ 918 if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { 919 mac->serdes_has_link = true; 920 921 /* Detect and store PCS speed */ 922 if (pcs & E1000_PCS_LSTS_SPEED_1000) { 923 *speed = SPEED_1000; 924 } else if (pcs & E1000_PCS_LSTS_SPEED_100) { 925 *speed = SPEED_100; 926 } else { 927 *speed = SPEED_10; 928 } 929 930 /* Detect and store PCS duplex */ 931 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) { 932 *duplex = FULL_DUPLEX; 933 } else { 934 *duplex = HALF_DUPLEX; 935 } 936 } 937 938 return 0; 939 } 940 941 /** 942 * igb_shutdown_serdes_link_82575 - Remove link during power down 943 * @hw: pointer to the HW structure 944 * 945 * In the case of fiber serdes, shut down optics and PCS on driver unload 946 * when management pass thru is not enabled. 947 **/ 948 void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) 949 { 950 u32 reg; 951 952 if (hw->phy.media_type != e1000_media_type_internal_serdes && 953 igb_sgmii_active_82575(hw)) 954 return; 955 956 if (!igb_enable_mng_pass_thru(hw)) { 957 /* Disable PCS to turn off link */ 958 reg = rd32(E1000_PCS_CFG0); 959 reg &= ~E1000_PCS_CFG_PCS_EN; 960 wr32(E1000_PCS_CFG0, reg); 961 962 /* shutdown the laser */ 963 reg = rd32(E1000_CTRL_EXT); 964 reg |= E1000_CTRL_EXT_SDP3_DATA; 965 wr32(E1000_CTRL_EXT, reg); 966 967 /* flush the write to verify completion */ 968 wrfl(); 969 msleep(1); 970 } 971 } 972 973 /** 974 * igb_reset_hw_82575 - Reset hardware 975 * @hw: pointer to the HW structure 976 * 977 * This resets the hardware into a known state. This is a 978 * function pointer entry point called by the api module. 979 **/ 980 static s32 igb_reset_hw_82575(struct e1000_hw *hw) 981 { 982 u32 ctrl, icr; 983 s32 ret_val; 984 985 /* 986 * Prevent the PCI-E bus from sticking if there is no TLP connection 987 * on the last TLP read/write transaction when MAC is reset. 988 */ 989 ret_val = igb_disable_pcie_master(hw); 990 if (ret_val) 991 hw_dbg("PCI-E Master disable polling has failed.\n"); 992 993 /* set the completion timeout for interface */ 994 ret_val = igb_set_pcie_completion_timeout(hw); 995 if (ret_val) { 996 hw_dbg("PCI-E Set completion timeout has failed.\n"); 997 } 998 999 hw_dbg("Masking off all interrupts\n"); 1000 wr32(E1000_IMC, 0xffffffff); 1001 1002 wr32(E1000_RCTL, 0); 1003 wr32(E1000_TCTL, E1000_TCTL_PSP); 1004 wrfl(); 1005 1006 msleep(10); 1007 1008 ctrl = rd32(E1000_CTRL); 1009 1010 hw_dbg("Issuing a global reset to MAC\n"); 1011 wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); 1012 1013 ret_val = igb_get_auto_rd_done(hw); 1014 if (ret_val) { 1015 /* 1016 * When auto config read does not complete, do not 1017 * return with an error. This can happen in situations 1018 * where there is no eeprom and prevents getting link. 1019 */ 1020 hw_dbg("Auto Read Done did not complete\n"); 1021 } 1022 1023 /* If EEPROM is not present, run manual init scripts */ 1024 if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) 1025 igb_reset_init_script_82575(hw); 1026 1027 /* Clear any pending interrupt events. */ 1028 wr32(E1000_IMC, 0xffffffff); 1029 icr = rd32(E1000_ICR); 1030 1031 /* Install any alternate MAC address into RAR0 */ 1032 ret_val = igb_check_alt_mac_addr(hw); 1033 1034 return ret_val; 1035 } 1036 1037 /** 1038 * igb_init_hw_82575 - Initialize hardware 1039 * @hw: pointer to the HW structure 1040 * 1041 * This inits the hardware readying it for operation. 1042 **/ 1043 static s32 igb_init_hw_82575(struct e1000_hw *hw) 1044 { 1045 struct e1000_mac_info *mac = &hw->mac; 1046 s32 ret_val; 1047 u16 i, rar_count = mac->rar_entry_count; 1048 1049 /* Initialize identification LED */ 1050 ret_val = igb_id_led_init(hw); 1051 if (ret_val) { 1052 hw_dbg("Error initializing identification LED\n"); 1053 /* This is not fatal and we should not stop init due to this */ 1054 } 1055 1056 /* Disabling VLAN filtering */ 1057 hw_dbg("Initializing the IEEE VLAN\n"); 1058 igb_clear_vfta(hw); 1059 1060 /* Setup the receive address */ 1061 igb_init_rx_addrs(hw, rar_count); 1062 1063 /* Zero out the Multicast HASH table */ 1064 hw_dbg("Zeroing the MTA\n"); 1065 for (i = 0; i < mac->mta_reg_count; i++) 1066 array_wr32(E1000_MTA, i, 0); 1067 1068 /* Zero out the Unicast HASH table */ 1069 hw_dbg("Zeroing the UTA\n"); 1070 for (i = 0; i < mac->uta_reg_count; i++) 1071 array_wr32(E1000_UTA, i, 0); 1072 1073 /* Setup link and flow control */ 1074 ret_val = igb_setup_link(hw); 1075 1076 /* 1077 * Clear all of the statistics registers (clear on read). It is 1078 * important that we do this after we have tried to establish link 1079 * because the symbol error count will increment wildly if there 1080 * is no link. 1081 */ 1082 igb_clear_hw_cntrs_82575(hw); 1083 1084 return ret_val; 1085 } 1086 1087 /** 1088 * igb_setup_copper_link_82575 - Configure copper link settings 1089 * @hw: pointer to the HW structure 1090 * 1091 * Configures the link for auto-neg or forced speed and duplex. Then we check 1092 * for link, once link is established calls to configure collision distance 1093 * and flow control are called. 1094 **/ 1095 static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) 1096 { 1097 u32 ctrl; 1098 s32 ret_val; 1099 1100 ctrl = rd32(E1000_CTRL); 1101 ctrl |= E1000_CTRL_SLU; 1102 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1103 wr32(E1000_CTRL, ctrl); 1104 1105 ret_val = igb_setup_serdes_link_82575(hw); 1106 if (ret_val) 1107 goto out; 1108 1109 if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { 1110 /* allow time for SFP cage time to power up phy */ 1111 msleep(300); 1112 1113 ret_val = hw->phy.ops.reset(hw); 1114 if (ret_val) { 1115 hw_dbg("Error resetting the PHY.\n"); 1116 goto out; 1117 } 1118 } 1119 switch (hw->phy.type) { 1120 case e1000_phy_m88: 1121 if (hw->phy.id == I347AT4_E_PHY_ID || 1122 hw->phy.id == M88E1112_E_PHY_ID) 1123 ret_val = igb_copper_link_setup_m88_gen2(hw); 1124 else 1125 ret_val = igb_copper_link_setup_m88(hw); 1126 break; 1127 case e1000_phy_igp_3: 1128 ret_val = igb_copper_link_setup_igp(hw); 1129 break; 1130 case e1000_phy_82580: 1131 ret_val = igb_copper_link_setup_82580(hw); 1132 break; 1133 default: 1134 ret_val = -E1000_ERR_PHY; 1135 break; 1136 } 1137 1138 if (ret_val) 1139 goto out; 1140 1141 ret_val = igb_setup_copper_link(hw); 1142 out: 1143 return ret_val; 1144 } 1145 1146 /** 1147 * igb_setup_serdes_link_82575 - Setup link for serdes 1148 * @hw: pointer to the HW structure 1149 * 1150 * Configure the physical coding sub-layer (PCS) link. The PCS link is 1151 * used on copper connections where the serialized gigabit media independent 1152 * interface (sgmii), or serdes fiber is being used. Configures the link 1153 * for auto-negotiation or forces speed/duplex. 1154 **/ 1155 static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) 1156 { 1157 u32 ctrl_ext, ctrl_reg, reg; 1158 bool pcs_autoneg; 1159 s32 ret_val = E1000_SUCCESS; 1160 u16 data; 1161 1162 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1163 !igb_sgmii_active_82575(hw)) 1164 return ret_val; 1165 1166 1167 /* 1168 * On the 82575, SerDes loopback mode persists until it is 1169 * explicitly turned off or a power cycle is performed. A read to 1170 * the register does not indicate its status. Therefore, we ensure 1171 * loopback mode is disabled during initialization. 1172 */ 1173 wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); 1174 1175 /* power on the sfp cage if present */ 1176 ctrl_ext = rd32(E1000_CTRL_EXT); 1177 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; 1178 wr32(E1000_CTRL_EXT, ctrl_ext); 1179 1180 ctrl_reg = rd32(E1000_CTRL); 1181 ctrl_reg |= E1000_CTRL_SLU; 1182 1183 if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { 1184 /* set both sw defined pins */ 1185 ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; 1186 1187 /* Set switch control to serdes energy detect */ 1188 reg = rd32(E1000_CONNSW); 1189 reg |= E1000_CONNSW_ENRGSRC; 1190 wr32(E1000_CONNSW, reg); 1191 } 1192 1193 reg = rd32(E1000_PCS_LCTL); 1194 1195 /* default pcs_autoneg to the same setting as mac autoneg */ 1196 pcs_autoneg = hw->mac.autoneg; 1197 1198 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { 1199 case E1000_CTRL_EXT_LINK_MODE_SGMII: 1200 /* sgmii mode lets the phy handle forcing speed/duplex */ 1201 pcs_autoneg = true; 1202 /* autoneg time out should be disabled for SGMII mode */ 1203 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); 1204 break; 1205 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 1206 /* disable PCS autoneg and support parallel detect only */ 1207 pcs_autoneg = false; 1208 default: 1209 if (hw->mac.type == e1000_82575 || 1210 hw->mac.type == e1000_82576) { 1211 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); 1212 if (ret_val) { 1213 printk(KERN_DEBUG "NVM Read Error\n\n"); 1214 return ret_val; 1215 } 1216 1217 if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) 1218 pcs_autoneg = false; 1219 } 1220 1221 /* 1222 * non-SGMII modes only supports a speed of 1000/Full for the 1223 * link so it is best to just force the MAC and let the pcs 1224 * link either autoneg or be forced to 1000/Full 1225 */ 1226 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | 1227 E1000_CTRL_FD | E1000_CTRL_FRCDPX; 1228 1229 /* set speed of 1000/Full if speed/duplex is forced */ 1230 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; 1231 break; 1232 } 1233 1234 wr32(E1000_CTRL, ctrl_reg); 1235 1236 /* 1237 * New SerDes mode allows for forcing speed or autonegotiating speed 1238 * at 1gb. Autoneg should be default set by most drivers. This is the 1239 * mode that will be compatible with older link partners and switches. 1240 * However, both are supported by the hardware and some drivers/tools. 1241 */ 1242 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | 1243 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); 1244 1245 /* 1246 * We force flow control to prevent the CTRL register values from being 1247 * overwritten by the autonegotiated flow control values 1248 */ 1249 reg |= E1000_PCS_LCTL_FORCE_FCTRL; 1250 1251 if (pcs_autoneg) { 1252 /* Set PCS register for autoneg */ 1253 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ 1254 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ 1255 hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); 1256 } else { 1257 /* Set PCS register for forced link */ 1258 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ 1259 1260 hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); 1261 } 1262 1263 wr32(E1000_PCS_LCTL, reg); 1264 1265 if (!igb_sgmii_active_82575(hw)) 1266 igb_force_mac_fc(hw); 1267 1268 return ret_val; 1269 } 1270 1271 /** 1272 * igb_sgmii_active_82575 - Return sgmii state 1273 * @hw: pointer to the HW structure 1274 * 1275 * 82575 silicon has a serialized gigabit media independent interface (sgmii) 1276 * which can be enabled for use in the embedded applications. Simply 1277 * return the current state of the sgmii interface. 1278 **/ 1279 static bool igb_sgmii_active_82575(struct e1000_hw *hw) 1280 { 1281 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 1282 return dev_spec->sgmii_active; 1283 } 1284 1285 /** 1286 * igb_reset_init_script_82575 - Inits HW defaults after reset 1287 * @hw: pointer to the HW structure 1288 * 1289 * Inits recommended HW defaults after a reset when there is no EEPROM 1290 * detected. This is only for the 82575. 1291 **/ 1292 static s32 igb_reset_init_script_82575(struct e1000_hw *hw) 1293 { 1294 if (hw->mac.type == e1000_82575) { 1295 hw_dbg("Running reset init script for 82575\n"); 1296 /* SerDes configuration via SERDESCTRL */ 1297 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); 1298 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); 1299 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23); 1300 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15); 1301 1302 /* CCM configuration via CCMCTL register */ 1303 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00); 1304 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00); 1305 1306 /* PCIe lanes configuration */ 1307 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC); 1308 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF); 1309 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05); 1310 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81); 1311 1312 /* PCIe PLL Configuration */ 1313 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47); 1314 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00); 1315 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00); 1316 } 1317 1318 return 0; 1319 } 1320 1321 /** 1322 * igb_read_mac_addr_82575 - Read device MAC address 1323 * @hw: pointer to the HW structure 1324 **/ 1325 static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) 1326 { 1327 s32 ret_val = 0; 1328 1329 /* 1330 * If there's an alternate MAC address place it in RAR0 1331 * so that it will override the Si installed default perm 1332 * address. 1333 */ 1334 ret_val = igb_check_alt_mac_addr(hw); 1335 if (ret_val) 1336 goto out; 1337 1338 ret_val = igb_read_mac_addr(hw); 1339 1340 out: 1341 return ret_val; 1342 } 1343 1344 /** 1345 * igb_power_down_phy_copper_82575 - Remove link during PHY power down 1346 * @hw: pointer to the HW structure 1347 * 1348 * In the case of a PHY power down to save power, or to turn off link during a 1349 * driver unload, or wake on lan is not enabled, remove the link. 1350 **/ 1351 void igb_power_down_phy_copper_82575(struct e1000_hw *hw) 1352 { 1353 /* If the management interface is not enabled, then power down */ 1354 if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw))) 1355 igb_power_down_phy_copper(hw); 1356 } 1357 1358 /** 1359 * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters 1360 * @hw: pointer to the HW structure 1361 * 1362 * Clears the hardware counters by reading the counter registers. 1363 **/ 1364 static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) 1365 { 1366 igb_clear_hw_cntrs_base(hw); 1367 1368 rd32(E1000_PRC64); 1369 rd32(E1000_PRC127); 1370 rd32(E1000_PRC255); 1371 rd32(E1000_PRC511); 1372 rd32(E1000_PRC1023); 1373 rd32(E1000_PRC1522); 1374 rd32(E1000_PTC64); 1375 rd32(E1000_PTC127); 1376 rd32(E1000_PTC255); 1377 rd32(E1000_PTC511); 1378 rd32(E1000_PTC1023); 1379 rd32(E1000_PTC1522); 1380 1381 rd32(E1000_ALGNERRC); 1382 rd32(E1000_RXERRC); 1383 rd32(E1000_TNCRS); 1384 rd32(E1000_CEXTERR); 1385 rd32(E1000_TSCTC); 1386 rd32(E1000_TSCTFC); 1387 1388 rd32(E1000_MGTPRC); 1389 rd32(E1000_MGTPDC); 1390 rd32(E1000_MGTPTC); 1391 1392 rd32(E1000_IAC); 1393 rd32(E1000_ICRXOC); 1394 1395 rd32(E1000_ICRXPTC); 1396 rd32(E1000_ICRXATC); 1397 rd32(E1000_ICTXPTC); 1398 rd32(E1000_ICTXATC); 1399 rd32(E1000_ICTXQEC); 1400 rd32(E1000_ICTXQMTC); 1401 rd32(E1000_ICRXDMTC); 1402 1403 rd32(E1000_CBTMPC); 1404 rd32(E1000_HTDPMC); 1405 rd32(E1000_CBRMPC); 1406 rd32(E1000_RPTHC); 1407 rd32(E1000_HGPTC); 1408 rd32(E1000_HTCBDPC); 1409 rd32(E1000_HGORCL); 1410 rd32(E1000_HGORCH); 1411 rd32(E1000_HGOTCL); 1412 rd32(E1000_HGOTCH); 1413 rd32(E1000_LENERRS); 1414 1415 /* This register should not be read in copper configurations */ 1416 if (hw->phy.media_type == e1000_media_type_internal_serdes || 1417 igb_sgmii_active_82575(hw)) 1418 rd32(E1000_SCVPC); 1419 } 1420 1421 /** 1422 * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable 1423 * @hw: pointer to the HW structure 1424 * 1425 * After rx enable if managability is enabled then there is likely some 1426 * bad data at the start of the fifo and possibly in the DMA fifo. This 1427 * function clears the fifos and flushes any packets that came in as rx was 1428 * being enabled. 1429 **/ 1430 void igb_rx_fifo_flush_82575(struct e1000_hw *hw) 1431 { 1432 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; 1433 int i, ms_wait; 1434 1435 if (hw->mac.type != e1000_82575 || 1436 !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) 1437 return; 1438 1439 /* Disable all RX queues */ 1440 for (i = 0; i < 4; i++) { 1441 rxdctl[i] = rd32(E1000_RXDCTL(i)); 1442 wr32(E1000_RXDCTL(i), 1443 rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); 1444 } 1445 /* Poll all queues to verify they have shut down */ 1446 for (ms_wait = 0; ms_wait < 10; ms_wait++) { 1447 msleep(1); 1448 rx_enabled = 0; 1449 for (i = 0; i < 4; i++) 1450 rx_enabled |= rd32(E1000_RXDCTL(i)); 1451 if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) 1452 break; 1453 } 1454 1455 if (ms_wait == 10) 1456 hw_dbg("Queue disable timed out after 10ms\n"); 1457 1458 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all 1459 * incoming packets are rejected. Set enable and wait 2ms so that 1460 * any packet that was coming in as RCTL.EN was set is flushed 1461 */ 1462 rfctl = rd32(E1000_RFCTL); 1463 wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); 1464 1465 rlpml = rd32(E1000_RLPML); 1466 wr32(E1000_RLPML, 0); 1467 1468 rctl = rd32(E1000_RCTL); 1469 temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); 1470 temp_rctl |= E1000_RCTL_LPE; 1471 1472 wr32(E1000_RCTL, temp_rctl); 1473 wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); 1474 wrfl(); 1475 msleep(2); 1476 1477 /* Enable RX queues that were previously enabled and restore our 1478 * previous state 1479 */ 1480 for (i = 0; i < 4; i++) 1481 wr32(E1000_RXDCTL(i), rxdctl[i]); 1482 wr32(E1000_RCTL, rctl); 1483 wrfl(); 1484 1485 wr32(E1000_RLPML, rlpml); 1486 wr32(E1000_RFCTL, rfctl); 1487 1488 /* Flush receive errors generated by workaround */ 1489 rd32(E1000_ROC); 1490 rd32(E1000_RNBC); 1491 rd32(E1000_MPC); 1492 } 1493 1494 /** 1495 * igb_set_pcie_completion_timeout - set pci-e completion timeout 1496 * @hw: pointer to the HW structure 1497 * 1498 * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, 1499 * however the hardware default for these parts is 500us to 1ms which is less 1500 * than the 10ms recommended by the pci-e spec. To address this we need to 1501 * increase the value to either 10ms to 200ms for capability version 1 config, 1502 * or 16ms to 55ms for version 2. 1503 **/ 1504 static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) 1505 { 1506 u32 gcr = rd32(E1000_GCR); 1507 s32 ret_val = 0; 1508 u16 pcie_devctl2; 1509 1510 /* only take action if timeout value is defaulted to 0 */ 1511 if (gcr & E1000_GCR_CMPL_TMOUT_MASK) 1512 goto out; 1513 1514 /* 1515 * if capababilities version is type 1 we can write the 1516 * timeout of 10ms to 200ms through the GCR register 1517 */ 1518 if (!(gcr & E1000_GCR_CAP_VER2)) { 1519 gcr |= E1000_GCR_CMPL_TMOUT_10ms; 1520 goto out; 1521 } 1522 1523 /* 1524 * for version 2 capabilities we need to write the config space 1525 * directly in order to set the completion timeout value for 1526 * 16ms to 55ms 1527 */ 1528 ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 1529 &pcie_devctl2); 1530 if (ret_val) 1531 goto out; 1532 1533 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; 1534 1535 ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 1536 &pcie_devctl2); 1537 out: 1538 /* disable completion timeout resend */ 1539 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; 1540 1541 wr32(E1000_GCR, gcr); 1542 return ret_val; 1543 } 1544 1545 /** 1546 * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing 1547 * @hw: pointer to the hardware struct 1548 * @enable: state to enter, either enabled or disabled 1549 * @pf: Physical Function pool - do not set anti-spoofing for the PF 1550 * 1551 * enables/disables L2 switch anti-spoofing functionality. 1552 **/ 1553 void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) 1554 { 1555 u32 dtxswc; 1556 1557 switch (hw->mac.type) { 1558 case e1000_82576: 1559 case e1000_i350: 1560 dtxswc = rd32(E1000_DTXSWC); 1561 if (enable) { 1562 dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK | 1563 E1000_DTXSWC_VLAN_SPOOF_MASK); 1564 /* The PF can spoof - it has to in order to 1565 * support emulation mode NICs */ 1566 dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); 1567 } else { 1568 dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | 1569 E1000_DTXSWC_VLAN_SPOOF_MASK); 1570 } 1571 wr32(E1000_DTXSWC, dtxswc); 1572 break; 1573 default: 1574 break; 1575 } 1576 } 1577 1578 /** 1579 * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback 1580 * @hw: pointer to the hardware struct 1581 * @enable: state to enter, either enabled or disabled 1582 * 1583 * enables/disables L2 switch loopback functionality. 1584 **/ 1585 void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) 1586 { 1587 u32 dtxswc = rd32(E1000_DTXSWC); 1588 1589 if (enable) 1590 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; 1591 else 1592 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; 1593 1594 wr32(E1000_DTXSWC, dtxswc); 1595 } 1596 1597 /** 1598 * igb_vmdq_set_replication_pf - enable or disable vmdq replication 1599 * @hw: pointer to the hardware struct 1600 * @enable: state to enter, either enabled or disabled 1601 * 1602 * enables/disables replication of packets across multiple pools. 1603 **/ 1604 void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) 1605 { 1606 u32 vt_ctl = rd32(E1000_VT_CTL); 1607 1608 if (enable) 1609 vt_ctl |= E1000_VT_CTL_VM_REPL_EN; 1610 else 1611 vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; 1612 1613 wr32(E1000_VT_CTL, vt_ctl); 1614 } 1615 1616 /** 1617 * igb_read_phy_reg_82580 - Read 82580 MDI control register 1618 * @hw: pointer to the HW structure 1619 * @offset: register offset to be read 1620 * @data: pointer to the read data 1621 * 1622 * Reads the MDI control register in the PHY at offset and stores the 1623 * information read to data. 1624 **/ 1625 static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) 1626 { 1627 s32 ret_val; 1628 1629 1630 ret_val = hw->phy.ops.acquire(hw); 1631 if (ret_val) 1632 goto out; 1633 1634 ret_val = igb_read_phy_reg_mdic(hw, offset, data); 1635 1636 hw->phy.ops.release(hw); 1637 1638 out: 1639 return ret_val; 1640 } 1641 1642 /** 1643 * igb_write_phy_reg_82580 - Write 82580 MDI control register 1644 * @hw: pointer to the HW structure 1645 * @offset: register offset to write to 1646 * @data: data to write to register at offset 1647 * 1648 * Writes data to MDI control register in the PHY at offset. 1649 **/ 1650 static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) 1651 { 1652 s32 ret_val; 1653 1654 1655 ret_val = hw->phy.ops.acquire(hw); 1656 if (ret_val) 1657 goto out; 1658 1659 ret_val = igb_write_phy_reg_mdic(hw, offset, data); 1660 1661 hw->phy.ops.release(hw); 1662 1663 out: 1664 return ret_val; 1665 } 1666 1667 /** 1668 * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits 1669 * @hw: pointer to the HW structure 1670 * 1671 * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on 1672 * the values found in the EEPROM. This addresses an issue in which these 1673 * bits are not restored from EEPROM after reset. 1674 **/ 1675 static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw) 1676 { 1677 s32 ret_val = 0; 1678 u32 mdicnfg; 1679 u16 nvm_data = 0; 1680 1681 if (hw->mac.type != e1000_82580) 1682 goto out; 1683 if (!igb_sgmii_active_82575(hw)) 1684 goto out; 1685 1686 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + 1687 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, 1688 &nvm_data); 1689 if (ret_val) { 1690 hw_dbg("NVM Read Error\n"); 1691 goto out; 1692 } 1693 1694 mdicnfg = rd32(E1000_MDICNFG); 1695 if (nvm_data & NVM_WORD24_EXT_MDIO) 1696 mdicnfg |= E1000_MDICNFG_EXT_MDIO; 1697 if (nvm_data & NVM_WORD24_COM_MDIO) 1698 mdicnfg |= E1000_MDICNFG_COM_MDIO; 1699 wr32(E1000_MDICNFG, mdicnfg); 1700 out: 1701 return ret_val; 1702 } 1703 1704 /** 1705 * igb_reset_hw_82580 - Reset hardware 1706 * @hw: pointer to the HW structure 1707 * 1708 * This resets function or entire device (all ports, etc.) 1709 * to a known state. 1710 **/ 1711 static s32 igb_reset_hw_82580(struct e1000_hw *hw) 1712 { 1713 s32 ret_val = 0; 1714 /* BH SW mailbox bit in SW_FW_SYNC */ 1715 u16 swmbsw_mask = E1000_SW_SYNCH_MB; 1716 u32 ctrl, icr; 1717 bool global_device_reset = hw->dev_spec._82575.global_device_reset; 1718 1719 1720 hw->dev_spec._82575.global_device_reset = false; 1721 1722 /* Get current control state. */ 1723 ctrl = rd32(E1000_CTRL); 1724 1725 /* 1726 * Prevent the PCI-E bus from sticking if there is no TLP connection 1727 * on the last TLP read/write transaction when MAC is reset. 1728 */ 1729 ret_val = igb_disable_pcie_master(hw); 1730 if (ret_val) 1731 hw_dbg("PCI-E Master disable polling has failed.\n"); 1732 1733 hw_dbg("Masking off all interrupts\n"); 1734 wr32(E1000_IMC, 0xffffffff); 1735 wr32(E1000_RCTL, 0); 1736 wr32(E1000_TCTL, E1000_TCTL_PSP); 1737 wrfl(); 1738 1739 msleep(10); 1740 1741 /* Determine whether or not a global dev reset is requested */ 1742 if (global_device_reset && 1743 igb_acquire_swfw_sync_82575(hw, swmbsw_mask)) 1744 global_device_reset = false; 1745 1746 if (global_device_reset && 1747 !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) 1748 ctrl |= E1000_CTRL_DEV_RST; 1749 else 1750 ctrl |= E1000_CTRL_RST; 1751 1752 wr32(E1000_CTRL, ctrl); 1753 wrfl(); 1754 1755 /* Add delay to insure DEV_RST has time to complete */ 1756 if (global_device_reset) 1757 msleep(5); 1758 1759 ret_val = igb_get_auto_rd_done(hw); 1760 if (ret_val) { 1761 /* 1762 * When auto config read does not complete, do not 1763 * return with an error. This can happen in situations 1764 * where there is no eeprom and prevents getting link. 1765 */ 1766 hw_dbg("Auto Read Done did not complete\n"); 1767 } 1768 1769 /* If EEPROM is not present, run manual init scripts */ 1770 if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) 1771 igb_reset_init_script_82575(hw); 1772 1773 /* clear global device reset status bit */ 1774 wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); 1775 1776 /* Clear any pending interrupt events. */ 1777 wr32(E1000_IMC, 0xffffffff); 1778 icr = rd32(E1000_ICR); 1779 1780 ret_val = igb_reset_mdicnfg_82580(hw); 1781 if (ret_val) 1782 hw_dbg("Could not reset MDICNFG based on EEPROM\n"); 1783 1784 /* Install any alternate MAC address into RAR0 */ 1785 ret_val = igb_check_alt_mac_addr(hw); 1786 1787 /* Release semaphore */ 1788 if (global_device_reset) 1789 igb_release_swfw_sync_82575(hw, swmbsw_mask); 1790 1791 return ret_val; 1792 } 1793 1794 /** 1795 * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size 1796 * @data: data received by reading RXPBS register 1797 * 1798 * The 82580 uses a table based approach for packet buffer allocation sizes. 1799 * This function converts the retrieved value into the correct table value 1800 * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 1801 * 0x0 36 72 144 1 2 4 8 16 1802 * 0x8 35 70 140 rsv rsv rsv rsv rsv 1803 */ 1804 u16 igb_rxpbs_adjust_82580(u32 data) 1805 { 1806 u16 ret_val = 0; 1807 1808 if (data < E1000_82580_RXPBS_TABLE_SIZE) 1809 ret_val = e1000_82580_rxpbs_table[data]; 1810 1811 return ret_val; 1812 } 1813 1814 /** 1815 * igb_validate_nvm_checksum_with_offset - Validate EEPROM 1816 * checksum 1817 * @hw: pointer to the HW structure 1818 * @offset: offset in words of the checksum protected region 1819 * 1820 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM 1821 * and then verifies that the sum of the EEPROM is equal to 0xBABA. 1822 **/ 1823 s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) 1824 { 1825 s32 ret_val = 0; 1826 u16 checksum = 0; 1827 u16 i, nvm_data; 1828 1829 for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { 1830 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); 1831 if (ret_val) { 1832 hw_dbg("NVM Read Error\n"); 1833 goto out; 1834 } 1835 checksum += nvm_data; 1836 } 1837 1838 if (checksum != (u16) NVM_SUM) { 1839 hw_dbg("NVM Checksum Invalid\n"); 1840 ret_val = -E1000_ERR_NVM; 1841 goto out; 1842 } 1843 1844 out: 1845 return ret_val; 1846 } 1847 1848 /** 1849 * igb_update_nvm_checksum_with_offset - Update EEPROM 1850 * checksum 1851 * @hw: pointer to the HW structure 1852 * @offset: offset in words of the checksum protected region 1853 * 1854 * Updates the EEPROM checksum by reading/adding each word of the EEPROM 1855 * up to the checksum. Then calculates the EEPROM checksum and writes the 1856 * value to the EEPROM. 1857 **/ 1858 s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) 1859 { 1860 s32 ret_val; 1861 u16 checksum = 0; 1862 u16 i, nvm_data; 1863 1864 for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { 1865 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); 1866 if (ret_val) { 1867 hw_dbg("NVM Read Error while updating checksum.\n"); 1868 goto out; 1869 } 1870 checksum += nvm_data; 1871 } 1872 checksum = (u16) NVM_SUM - checksum; 1873 ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, 1874 &checksum); 1875 if (ret_val) 1876 hw_dbg("NVM Write Error while updating checksum.\n"); 1877 1878 out: 1879 return ret_val; 1880 } 1881 1882 /** 1883 * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum 1884 * @hw: pointer to the HW structure 1885 * 1886 * Calculates the EEPROM section checksum by reading/adding each word of 1887 * the EEPROM and then verifies that the sum of the EEPROM is 1888 * equal to 0xBABA. 1889 **/ 1890 static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) 1891 { 1892 s32 ret_val = 0; 1893 u16 eeprom_regions_count = 1; 1894 u16 j, nvm_data; 1895 u16 nvm_offset; 1896 1897 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); 1898 if (ret_val) { 1899 hw_dbg("NVM Read Error\n"); 1900 goto out; 1901 } 1902 1903 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { 1904 /* if checksums compatibility bit is set validate checksums 1905 * for all 4 ports. */ 1906 eeprom_regions_count = 4; 1907 } 1908 1909 for (j = 0; j < eeprom_regions_count; j++) { 1910 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 1911 ret_val = igb_validate_nvm_checksum_with_offset(hw, 1912 nvm_offset); 1913 if (ret_val != 0) 1914 goto out; 1915 } 1916 1917 out: 1918 return ret_val; 1919 } 1920 1921 /** 1922 * igb_update_nvm_checksum_82580 - Update EEPROM checksum 1923 * @hw: pointer to the HW structure 1924 * 1925 * Updates the EEPROM section checksums for all 4 ports by reading/adding 1926 * each word of the EEPROM up to the checksum. Then calculates the EEPROM 1927 * checksum and writes the value to the EEPROM. 1928 **/ 1929 static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) 1930 { 1931 s32 ret_val; 1932 u16 j, nvm_data; 1933 u16 nvm_offset; 1934 1935 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); 1936 if (ret_val) { 1937 hw_dbg("NVM Read Error while updating checksum" 1938 " compatibility bit.\n"); 1939 goto out; 1940 } 1941 1942 if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { 1943 /* set compatibility bit to validate checksums appropriately */ 1944 nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; 1945 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, 1946 &nvm_data); 1947 if (ret_val) { 1948 hw_dbg("NVM Write Error while updating checksum" 1949 " compatibility bit.\n"); 1950 goto out; 1951 } 1952 } 1953 1954 for (j = 0; j < 4; j++) { 1955 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 1956 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); 1957 if (ret_val) 1958 goto out; 1959 } 1960 1961 out: 1962 return ret_val; 1963 } 1964 1965 /** 1966 * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum 1967 * @hw: pointer to the HW structure 1968 * 1969 * Calculates the EEPROM section checksum by reading/adding each word of 1970 * the EEPROM and then verifies that the sum of the EEPROM is 1971 * equal to 0xBABA. 1972 **/ 1973 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw) 1974 { 1975 s32 ret_val = 0; 1976 u16 j; 1977 u16 nvm_offset; 1978 1979 for (j = 0; j < 4; j++) { 1980 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 1981 ret_val = igb_validate_nvm_checksum_with_offset(hw, 1982 nvm_offset); 1983 if (ret_val != 0) 1984 goto out; 1985 } 1986 1987 out: 1988 return ret_val; 1989 } 1990 1991 /** 1992 * igb_update_nvm_checksum_i350 - Update EEPROM checksum 1993 * @hw: pointer to the HW structure 1994 * 1995 * Updates the EEPROM section checksums for all 4 ports by reading/adding 1996 * each word of the EEPROM up to the checksum. Then calculates the EEPROM 1997 * checksum and writes the value to the EEPROM. 1998 **/ 1999 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) 2000 { 2001 s32 ret_val = 0; 2002 u16 j; 2003 u16 nvm_offset; 2004 2005 for (j = 0; j < 4; j++) { 2006 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2007 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); 2008 if (ret_val != 0) 2009 goto out; 2010 } 2011 2012 out: 2013 return ret_val; 2014 } 2015 2016 /** 2017 * igb_set_eee_i350 - Enable/disable EEE support 2018 * @hw: pointer to the HW structure 2019 * 2020 * Enable/disable EEE based on setting in dev_spec structure. 2021 * 2022 **/ 2023 s32 igb_set_eee_i350(struct e1000_hw *hw) 2024 { 2025 s32 ret_val = 0; 2026 u32 ipcnfg, eeer, ctrl_ext; 2027 2028 ctrl_ext = rd32(E1000_CTRL_EXT); 2029 if ((hw->mac.type != e1000_i350) || 2030 (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK)) 2031 goto out; 2032 ipcnfg = rd32(E1000_IPCNFG); 2033 eeer = rd32(E1000_EEER); 2034 2035 /* enable or disable per user setting */ 2036 if (!(hw->dev_spec._82575.eee_disable)) { 2037 ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | 2038 E1000_IPCNFG_EEE_100M_AN); 2039 eeer |= (E1000_EEER_TX_LPI_EN | 2040 E1000_EEER_RX_LPI_EN | 2041 E1000_EEER_LPI_FC); 2042 2043 } else { 2044 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | 2045 E1000_IPCNFG_EEE_100M_AN); 2046 eeer &= ~(E1000_EEER_TX_LPI_EN | 2047 E1000_EEER_RX_LPI_EN | 2048 E1000_EEER_LPI_FC); 2049 } 2050 wr32(E1000_IPCNFG, ipcnfg); 2051 wr32(E1000_EEER, eeer); 2052 out: 2053 2054 return ret_val; 2055 } 2056 2057 static struct e1000_mac_operations e1000_mac_ops_82575 = { 2058 .init_hw = igb_init_hw_82575, 2059 .check_for_link = igb_check_for_link_82575, 2060 .rar_set = igb_rar_set, 2061 .read_mac_addr = igb_read_mac_addr_82575, 2062 .get_speed_and_duplex = igb_get_speed_and_duplex_copper, 2063 }; 2064 2065 static struct e1000_phy_operations e1000_phy_ops_82575 = { 2066 .acquire = igb_acquire_phy_82575, 2067 .get_cfg_done = igb_get_cfg_done_82575, 2068 .release = igb_release_phy_82575, 2069 }; 2070 2071 static struct e1000_nvm_operations e1000_nvm_ops_82575 = { 2072 .acquire = igb_acquire_nvm_82575, 2073 .read = igb_read_nvm_eerd, 2074 .release = igb_release_nvm_82575, 2075 .write = igb_write_nvm_spi, 2076 }; 2077 2078 const struct e1000_info e1000_82575_info = { 2079 .get_invariants = igb_get_invariants_82575, 2080 .mac_ops = &e1000_mac_ops_82575, 2081 .phy_ops = &e1000_phy_ops_82575, 2082 .nvm_ops = &e1000_nvm_ops_82575, 2083 }; 2084 2085