1 /******************************************************************************* 2 3 Intel(R) Gigabit Ethernet Linux driver 4 Copyright(c) 2007-2011 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 /* e1000_82575 29 * e1000_82576 30 */ 31 32 #include <linux/types.h> 33 #include <linux/if_ether.h> 34 35 #include "e1000_mac.h" 36 #include "e1000_82575.h" 37 38 static s32 igb_get_invariants_82575(struct e1000_hw *); 39 static s32 igb_acquire_phy_82575(struct e1000_hw *); 40 static void igb_release_phy_82575(struct e1000_hw *); 41 static s32 igb_acquire_nvm_82575(struct e1000_hw *); 42 static void igb_release_nvm_82575(struct e1000_hw *); 43 static s32 igb_check_for_link_82575(struct e1000_hw *); 44 static s32 igb_get_cfg_done_82575(struct e1000_hw *); 45 static s32 igb_init_hw_82575(struct e1000_hw *); 46 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); 47 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); 48 static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *); 49 static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16); 50 static s32 igb_reset_hw_82575(struct e1000_hw *); 51 static s32 igb_reset_hw_82580(struct e1000_hw *); 52 static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); 53 static s32 igb_setup_copper_link_82575(struct e1000_hw *); 54 static s32 igb_setup_serdes_link_82575(struct e1000_hw *); 55 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16); 56 static void igb_clear_hw_cntrs_82575(struct e1000_hw *); 57 static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16); 58 static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *, 59 u16 *); 60 static s32 igb_get_phy_id_82575(struct e1000_hw *); 61 static void igb_release_swfw_sync_82575(struct e1000_hw *, u16); 62 static bool igb_sgmii_active_82575(struct e1000_hw *); 63 static s32 igb_reset_init_script_82575(struct e1000_hw *); 64 static s32 igb_read_mac_addr_82575(struct e1000_hw *); 65 static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); 66 static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); 67 static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw); 68 static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); 69 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); 70 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); 71 static const u16 e1000_82580_rxpbs_table[] = 72 { 36, 72, 144, 1, 2, 4, 8, 16, 73 35, 70, 140 }; 74 #define E1000_82580_RXPBS_TABLE_SIZE \ 75 (sizeof(e1000_82580_rxpbs_table)/sizeof(u16)) 76 77 /** 78 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO 79 * @hw: pointer to the HW structure 80 * 81 * Called to determine if the I2C pins are being used for I2C or as an 82 * external MDIO interface since the two options are mutually exclusive. 83 **/ 84 static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) 85 { 86 u32 reg = 0; 87 bool ext_mdio = false; 88 89 switch (hw->mac.type) { 90 case e1000_82575: 91 case e1000_82576: 92 reg = rd32(E1000_MDIC); 93 ext_mdio = !!(reg & E1000_MDIC_DEST); 94 break; 95 case e1000_82580: 96 case e1000_i350: 97 reg = rd32(E1000_MDICNFG); 98 ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); 99 break; 100 default: 101 break; 102 } 103 return ext_mdio; 104 } 105 106 static s32 igb_get_invariants_82575(struct e1000_hw *hw) 107 { 108 struct e1000_phy_info *phy = &hw->phy; 109 struct e1000_nvm_info *nvm = &hw->nvm; 110 struct e1000_mac_info *mac = &hw->mac; 111 struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575; 112 u32 eecd; 113 s32 ret_val; 114 u16 size; 115 u32 ctrl_ext = 0; 116 117 switch (hw->device_id) { 118 case E1000_DEV_ID_82575EB_COPPER: 119 case E1000_DEV_ID_82575EB_FIBER_SERDES: 120 case E1000_DEV_ID_82575GB_QUAD_COPPER: 121 mac->type = e1000_82575; 122 break; 123 case E1000_DEV_ID_82576: 124 case E1000_DEV_ID_82576_NS: 125 case E1000_DEV_ID_82576_NS_SERDES: 126 case E1000_DEV_ID_82576_FIBER: 127 case E1000_DEV_ID_82576_SERDES: 128 case E1000_DEV_ID_82576_QUAD_COPPER: 129 case E1000_DEV_ID_82576_QUAD_COPPER_ET2: 130 case E1000_DEV_ID_82576_SERDES_QUAD: 131 mac->type = e1000_82576; 132 break; 133 case E1000_DEV_ID_82580_COPPER: 134 case E1000_DEV_ID_82580_FIBER: 135 case E1000_DEV_ID_82580_QUAD_FIBER: 136 case E1000_DEV_ID_82580_SERDES: 137 case E1000_DEV_ID_82580_SGMII: 138 case E1000_DEV_ID_82580_COPPER_DUAL: 139 case E1000_DEV_ID_DH89XXCC_SGMII: 140 case E1000_DEV_ID_DH89XXCC_SERDES: 141 case E1000_DEV_ID_DH89XXCC_BACKPLANE: 142 case E1000_DEV_ID_DH89XXCC_SFP: 143 mac->type = e1000_82580; 144 break; 145 case E1000_DEV_ID_I350_COPPER: 146 case E1000_DEV_ID_I350_FIBER: 147 case E1000_DEV_ID_I350_SERDES: 148 case E1000_DEV_ID_I350_SGMII: 149 mac->type = e1000_i350; 150 break; 151 default: 152 return -E1000_ERR_MAC_INIT; 153 break; 154 } 155 156 /* Set media type */ 157 /* 158 * The 82575 uses bits 22:23 for link mode. The mode can be changed 159 * based on the EEPROM. We cannot rely upon device ID. There 160 * is no distinguishable difference between fiber and internal 161 * SerDes mode on the 82575. There can be an external PHY attached 162 * on the SGMII interface. For this, we'll set sgmii_active to true. 163 */ 164 phy->media_type = e1000_media_type_copper; 165 dev_spec->sgmii_active = false; 166 167 ctrl_ext = rd32(E1000_CTRL_EXT); 168 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { 169 case E1000_CTRL_EXT_LINK_MODE_SGMII: 170 dev_spec->sgmii_active = true; 171 break; 172 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 173 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: 174 hw->phy.media_type = e1000_media_type_internal_serdes; 175 break; 176 default: 177 break; 178 } 179 180 /* Set mta register count */ 181 mac->mta_reg_count = 128; 182 /* Set rar entry count */ 183 mac->rar_entry_count = E1000_RAR_ENTRIES_82575; 184 if (mac->type == e1000_82576) 185 mac->rar_entry_count = E1000_RAR_ENTRIES_82576; 186 if (mac->type == e1000_82580) 187 mac->rar_entry_count = E1000_RAR_ENTRIES_82580; 188 if (mac->type == e1000_i350) 189 mac->rar_entry_count = E1000_RAR_ENTRIES_I350; 190 /* reset */ 191 if (mac->type >= e1000_82580) 192 mac->ops.reset_hw = igb_reset_hw_82580; 193 else 194 mac->ops.reset_hw = igb_reset_hw_82575; 195 /* Set if part includes ASF firmware */ 196 mac->asf_firmware_present = true; 197 /* Set if manageability features are enabled. */ 198 mac->arc_subsystem_valid = 199 (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) 200 ? true : false; 201 /* enable EEE on i350 parts */ 202 if (mac->type == e1000_i350) 203 dev_spec->eee_disable = false; 204 else 205 dev_spec->eee_disable = true; 206 /* physical interface link setup */ 207 mac->ops.setup_physical_interface = 208 (hw->phy.media_type == e1000_media_type_copper) 209 ? igb_setup_copper_link_82575 210 : igb_setup_serdes_link_82575; 211 212 /* NVM initialization */ 213 eecd = rd32(E1000_EECD); 214 215 nvm->opcode_bits = 8; 216 nvm->delay_usec = 1; 217 switch (nvm->override) { 218 case e1000_nvm_override_spi_large: 219 nvm->page_size = 32; 220 nvm->address_bits = 16; 221 break; 222 case e1000_nvm_override_spi_small: 223 nvm->page_size = 8; 224 nvm->address_bits = 8; 225 break; 226 default: 227 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; 228 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; 229 break; 230 } 231 232 nvm->type = e1000_nvm_eeprom_spi; 233 234 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 235 E1000_EECD_SIZE_EX_SHIFT); 236 237 /* 238 * Added to a constant, "size" becomes the left-shift value 239 * for setting word_size. 240 */ 241 size += NVM_WORD_SIZE_BASE_SHIFT; 242 243 /* 244 * Check for invalid size 245 */ 246 if ((hw->mac.type == e1000_82576) && (size > 15)) { 247 printk("igb: The NVM size is not valid, " 248 "defaulting to 32K.\n"); 249 size = 15; 250 } 251 nvm->word_size = 1 << size; 252 if (nvm->word_size == (1 << 15)) 253 nvm->page_size = 128; 254 255 /* NVM Function Pointers */ 256 nvm->ops.acquire = igb_acquire_nvm_82575; 257 if (nvm->word_size < (1 << 15)) 258 nvm->ops.read = igb_read_nvm_eerd; 259 else 260 nvm->ops.read = igb_read_nvm_spi; 261 262 nvm->ops.release = igb_release_nvm_82575; 263 switch (hw->mac.type) { 264 case e1000_82580: 265 nvm->ops.validate = igb_validate_nvm_checksum_82580; 266 nvm->ops.update = igb_update_nvm_checksum_82580; 267 break; 268 case e1000_i350: 269 nvm->ops.validate = igb_validate_nvm_checksum_i350; 270 nvm->ops.update = igb_update_nvm_checksum_i350; 271 break; 272 default: 273 nvm->ops.validate = igb_validate_nvm_checksum; 274 nvm->ops.update = igb_update_nvm_checksum; 275 } 276 nvm->ops.write = igb_write_nvm_spi; 277 278 /* if part supports SR-IOV then initialize mailbox parameters */ 279 switch (mac->type) { 280 case e1000_82576: 281 case e1000_i350: 282 igb_init_mbx_params_pf(hw); 283 break; 284 default: 285 break; 286 } 287 288 /* setup PHY parameters */ 289 if (phy->media_type != e1000_media_type_copper) { 290 phy->type = e1000_phy_none; 291 return 0; 292 } 293 294 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 295 phy->reset_delay_us = 100; 296 297 ctrl_ext = rd32(E1000_CTRL_EXT); 298 299 /* PHY function pointers */ 300 if (igb_sgmii_active_82575(hw)) { 301 phy->ops.reset = igb_phy_hw_reset_sgmii_82575; 302 ctrl_ext |= E1000_CTRL_I2C_ENA; 303 } else { 304 phy->ops.reset = igb_phy_hw_reset; 305 ctrl_ext &= ~E1000_CTRL_I2C_ENA; 306 } 307 308 wr32(E1000_CTRL_EXT, ctrl_ext); 309 igb_reset_mdicnfg_82580(hw); 310 311 if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { 312 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; 313 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; 314 } else if (hw->mac.type >= e1000_82580) { 315 phy->ops.read_reg = igb_read_phy_reg_82580; 316 phy->ops.write_reg = igb_write_phy_reg_82580; 317 } else { 318 phy->ops.read_reg = igb_read_phy_reg_igp; 319 phy->ops.write_reg = igb_write_phy_reg_igp; 320 } 321 322 /* set lan id */ 323 hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> 324 E1000_STATUS_FUNC_SHIFT; 325 326 /* Set phy->phy_addr and phy->id. */ 327 ret_val = igb_get_phy_id_82575(hw); 328 if (ret_val) 329 return ret_val; 330 331 /* Verify phy id and set remaining function pointers */ 332 switch (phy->id) { 333 case I347AT4_E_PHY_ID: 334 case M88E1112_E_PHY_ID: 335 case M88E1111_I_PHY_ID: 336 phy->type = e1000_phy_m88; 337 phy->ops.get_phy_info = igb_get_phy_info_m88; 338 339 if (phy->id == I347AT4_E_PHY_ID || 340 phy->id == M88E1112_E_PHY_ID) 341 phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; 342 else 343 phy->ops.get_cable_length = igb_get_cable_length_m88; 344 345 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; 346 break; 347 case IGP03E1000_E_PHY_ID: 348 phy->type = e1000_phy_igp_3; 349 phy->ops.get_phy_info = igb_get_phy_info_igp; 350 phy->ops.get_cable_length = igb_get_cable_length_igp_2; 351 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp; 352 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; 353 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; 354 break; 355 case I82580_I_PHY_ID: 356 case I350_I_PHY_ID: 357 phy->type = e1000_phy_82580; 358 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580; 359 phy->ops.get_cable_length = igb_get_cable_length_82580; 360 phy->ops.get_phy_info = igb_get_phy_info_82580; 361 break; 362 default: 363 return -E1000_ERR_PHY; 364 } 365 366 return 0; 367 } 368 369 /** 370 * igb_acquire_phy_82575 - Acquire rights to access PHY 371 * @hw: pointer to the HW structure 372 * 373 * Acquire access rights to the correct PHY. This is a 374 * function pointer entry point called by the api module. 375 **/ 376 static s32 igb_acquire_phy_82575(struct e1000_hw *hw) 377 { 378 u16 mask = E1000_SWFW_PHY0_SM; 379 380 if (hw->bus.func == E1000_FUNC_1) 381 mask = E1000_SWFW_PHY1_SM; 382 else if (hw->bus.func == E1000_FUNC_2) 383 mask = E1000_SWFW_PHY2_SM; 384 else if (hw->bus.func == E1000_FUNC_3) 385 mask = E1000_SWFW_PHY3_SM; 386 387 return igb_acquire_swfw_sync_82575(hw, mask); 388 } 389 390 /** 391 * igb_release_phy_82575 - Release rights to access PHY 392 * @hw: pointer to the HW structure 393 * 394 * A wrapper to release access rights to the correct PHY. This is a 395 * function pointer entry point called by the api module. 396 **/ 397 static void igb_release_phy_82575(struct e1000_hw *hw) 398 { 399 u16 mask = E1000_SWFW_PHY0_SM; 400 401 if (hw->bus.func == E1000_FUNC_1) 402 mask = E1000_SWFW_PHY1_SM; 403 else if (hw->bus.func == E1000_FUNC_2) 404 mask = E1000_SWFW_PHY2_SM; 405 else if (hw->bus.func == E1000_FUNC_3) 406 mask = E1000_SWFW_PHY3_SM; 407 408 igb_release_swfw_sync_82575(hw, mask); 409 } 410 411 /** 412 * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii 413 * @hw: pointer to the HW structure 414 * @offset: register offset to be read 415 * @data: pointer to the read data 416 * 417 * Reads the PHY register at offset using the serial gigabit media independent 418 * interface and stores the retrieved information in data. 419 **/ 420 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 421 u16 *data) 422 { 423 s32 ret_val = -E1000_ERR_PARAM; 424 425 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 426 hw_dbg("PHY Address %u is out of range\n", offset); 427 goto out; 428 } 429 430 ret_val = hw->phy.ops.acquire(hw); 431 if (ret_val) 432 goto out; 433 434 ret_val = igb_read_phy_reg_i2c(hw, offset, data); 435 436 hw->phy.ops.release(hw); 437 438 out: 439 return ret_val; 440 } 441 442 /** 443 * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii 444 * @hw: pointer to the HW structure 445 * @offset: register offset to write to 446 * @data: data to write at register offset 447 * 448 * Writes the data to PHY register at the offset using the serial gigabit 449 * media independent interface. 450 **/ 451 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 452 u16 data) 453 { 454 s32 ret_val = -E1000_ERR_PARAM; 455 456 457 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 458 hw_dbg("PHY Address %d is out of range\n", offset); 459 goto out; 460 } 461 462 ret_val = hw->phy.ops.acquire(hw); 463 if (ret_val) 464 goto out; 465 466 ret_val = igb_write_phy_reg_i2c(hw, offset, data); 467 468 hw->phy.ops.release(hw); 469 470 out: 471 return ret_val; 472 } 473 474 /** 475 * igb_get_phy_id_82575 - Retrieve PHY addr and id 476 * @hw: pointer to the HW structure 477 * 478 * Retrieves the PHY address and ID for both PHY's which do and do not use 479 * sgmi interface. 480 **/ 481 static s32 igb_get_phy_id_82575(struct e1000_hw *hw) 482 { 483 struct e1000_phy_info *phy = &hw->phy; 484 s32 ret_val = 0; 485 u16 phy_id; 486 u32 ctrl_ext; 487 u32 mdic; 488 489 /* 490 * For SGMII PHYs, we try the list of possible addresses until 491 * we find one that works. For non-SGMII PHYs 492 * (e.g. integrated copper PHYs), an address of 1 should 493 * work. The result of this function should mean phy->phy_addr 494 * and phy->id are set correctly. 495 */ 496 if (!(igb_sgmii_active_82575(hw))) { 497 phy->addr = 1; 498 ret_val = igb_get_phy_id(hw); 499 goto out; 500 } 501 502 if (igb_sgmii_uses_mdio_82575(hw)) { 503 switch (hw->mac.type) { 504 case e1000_82575: 505 case e1000_82576: 506 mdic = rd32(E1000_MDIC); 507 mdic &= E1000_MDIC_PHY_MASK; 508 phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; 509 break; 510 case e1000_82580: 511 case e1000_i350: 512 mdic = rd32(E1000_MDICNFG); 513 mdic &= E1000_MDICNFG_PHY_MASK; 514 phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; 515 break; 516 default: 517 ret_val = -E1000_ERR_PHY; 518 goto out; 519 break; 520 } 521 ret_val = igb_get_phy_id(hw); 522 goto out; 523 } 524 525 /* Power on sgmii phy if it is disabled */ 526 ctrl_ext = rd32(E1000_CTRL_EXT); 527 wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); 528 wrfl(); 529 msleep(300); 530 531 /* 532 * The address field in the I2CCMD register is 3 bits and 0 is invalid. 533 * Therefore, we need to test 1-7 534 */ 535 for (phy->addr = 1; phy->addr < 8; phy->addr++) { 536 ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); 537 if (ret_val == 0) { 538 hw_dbg("Vendor ID 0x%08X read at address %u\n", 539 phy_id, phy->addr); 540 /* 541 * At the time of this writing, The M88 part is 542 * the only supported SGMII PHY product. 543 */ 544 if (phy_id == M88_VENDOR) 545 break; 546 } else { 547 hw_dbg("PHY address %u was unreadable\n", phy->addr); 548 } 549 } 550 551 /* A valid PHY type couldn't be found. */ 552 if (phy->addr == 8) { 553 phy->addr = 0; 554 ret_val = -E1000_ERR_PHY; 555 goto out; 556 } else { 557 ret_val = igb_get_phy_id(hw); 558 } 559 560 /* restore previous sfp cage power state */ 561 wr32(E1000_CTRL_EXT, ctrl_ext); 562 563 out: 564 return ret_val; 565 } 566 567 /** 568 * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset 569 * @hw: pointer to the HW structure 570 * 571 * Resets the PHY using the serial gigabit media independent interface. 572 **/ 573 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) 574 { 575 s32 ret_val; 576 577 /* 578 * This isn't a true "hard" reset, but is the only reset 579 * available to us at this time. 580 */ 581 582 hw_dbg("Soft resetting SGMII attached PHY...\n"); 583 584 /* 585 * SFP documentation requires the following to configure the SPF module 586 * to work on SGMII. No further documentation is given. 587 */ 588 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); 589 if (ret_val) 590 goto out; 591 592 ret_val = igb_phy_sw_reset(hw); 593 594 out: 595 return ret_val; 596 } 597 598 /** 599 * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state 600 * @hw: pointer to the HW structure 601 * @active: true to enable LPLU, false to disable 602 * 603 * Sets the LPLU D0 state according to the active flag. When 604 * activating LPLU this function also disables smart speed 605 * and vice versa. LPLU will not be activated unless the 606 * device autonegotiation advertisement meets standards of 607 * either 10 or 10/100 or 10/100/1000 at all duplexes. 608 * This is a function pointer entry point only called by 609 * PHY setup routines. 610 **/ 611 static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) 612 { 613 struct e1000_phy_info *phy = &hw->phy; 614 s32 ret_val; 615 u16 data; 616 617 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); 618 if (ret_val) 619 goto out; 620 621 if (active) { 622 data |= IGP02E1000_PM_D0_LPLU; 623 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 624 data); 625 if (ret_val) 626 goto out; 627 628 /* When LPLU is enabled, we should disable SmartSpeed */ 629 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 630 &data); 631 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 632 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 633 data); 634 if (ret_val) 635 goto out; 636 } else { 637 data &= ~IGP02E1000_PM_D0_LPLU; 638 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 639 data); 640 /* 641 * LPLU and SmartSpeed are mutually exclusive. LPLU is used 642 * during Dx states where the power conservation is most 643 * important. During driver activity we should enable 644 * SmartSpeed, so performance is maintained. 645 */ 646 if (phy->smart_speed == e1000_smart_speed_on) { 647 ret_val = phy->ops.read_reg(hw, 648 IGP01E1000_PHY_PORT_CONFIG, &data); 649 if (ret_val) 650 goto out; 651 652 data |= IGP01E1000_PSCFR_SMART_SPEED; 653 ret_val = phy->ops.write_reg(hw, 654 IGP01E1000_PHY_PORT_CONFIG, data); 655 if (ret_val) 656 goto out; 657 } else if (phy->smart_speed == e1000_smart_speed_off) { 658 ret_val = phy->ops.read_reg(hw, 659 IGP01E1000_PHY_PORT_CONFIG, &data); 660 if (ret_val) 661 goto out; 662 663 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 664 ret_val = phy->ops.write_reg(hw, 665 IGP01E1000_PHY_PORT_CONFIG, data); 666 if (ret_val) 667 goto out; 668 } 669 } 670 671 out: 672 return ret_val; 673 } 674 675 /** 676 * igb_acquire_nvm_82575 - Request for access to EEPROM 677 * @hw: pointer to the HW structure 678 * 679 * Acquire the necessary semaphores for exclusive access to the EEPROM. 680 * Set the EEPROM access request bit and wait for EEPROM access grant bit. 681 * Return successful if access grant bit set, else clear the request for 682 * EEPROM access and return -E1000_ERR_NVM (-1). 683 **/ 684 static s32 igb_acquire_nvm_82575(struct e1000_hw *hw) 685 { 686 s32 ret_val; 687 688 ret_val = igb_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); 689 if (ret_val) 690 goto out; 691 692 ret_val = igb_acquire_nvm(hw); 693 694 if (ret_val) 695 igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); 696 697 out: 698 return ret_val; 699 } 700 701 /** 702 * igb_release_nvm_82575 - Release exclusive access to EEPROM 703 * @hw: pointer to the HW structure 704 * 705 * Stop any current commands to the EEPROM and clear the EEPROM request bit, 706 * then release the semaphores acquired. 707 **/ 708 static void igb_release_nvm_82575(struct e1000_hw *hw) 709 { 710 igb_release_nvm(hw); 711 igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); 712 } 713 714 /** 715 * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore 716 * @hw: pointer to the HW structure 717 * @mask: specifies which semaphore to acquire 718 * 719 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask 720 * will also specify which port we're acquiring the lock for. 721 **/ 722 static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) 723 { 724 u32 swfw_sync; 725 u32 swmask = mask; 726 u32 fwmask = mask << 16; 727 s32 ret_val = 0; 728 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ 729 730 while (i < timeout) { 731 if (igb_get_hw_semaphore(hw)) { 732 ret_val = -E1000_ERR_SWFW_SYNC; 733 goto out; 734 } 735 736 swfw_sync = rd32(E1000_SW_FW_SYNC); 737 if (!(swfw_sync & (fwmask | swmask))) 738 break; 739 740 /* 741 * Firmware currently using resource (fwmask) 742 * or other software thread using resource (swmask) 743 */ 744 igb_put_hw_semaphore(hw); 745 mdelay(5); 746 i++; 747 } 748 749 if (i == timeout) { 750 hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); 751 ret_val = -E1000_ERR_SWFW_SYNC; 752 goto out; 753 } 754 755 swfw_sync |= swmask; 756 wr32(E1000_SW_FW_SYNC, swfw_sync); 757 758 igb_put_hw_semaphore(hw); 759 760 out: 761 return ret_val; 762 } 763 764 /** 765 * igb_release_swfw_sync_82575 - Release SW/FW semaphore 766 * @hw: pointer to the HW structure 767 * @mask: specifies which semaphore to acquire 768 * 769 * Release the SW/FW semaphore used to access the PHY or NVM. The mask 770 * will also specify which port we're releasing the lock for. 771 **/ 772 static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) 773 { 774 u32 swfw_sync; 775 776 while (igb_get_hw_semaphore(hw) != 0); 777 /* Empty */ 778 779 swfw_sync = rd32(E1000_SW_FW_SYNC); 780 swfw_sync &= ~mask; 781 wr32(E1000_SW_FW_SYNC, swfw_sync); 782 783 igb_put_hw_semaphore(hw); 784 } 785 786 /** 787 * igb_get_cfg_done_82575 - Read config done bit 788 * @hw: pointer to the HW structure 789 * 790 * Read the management control register for the config done bit for 791 * completion status. NOTE: silicon which is EEPROM-less will fail trying 792 * to read the config done bit, so an error is *ONLY* logged and returns 793 * 0. If we were to return with error, EEPROM-less silicon 794 * would not be able to be reset or change link. 795 **/ 796 static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) 797 { 798 s32 timeout = PHY_CFG_TIMEOUT; 799 s32 ret_val = 0; 800 u32 mask = E1000_NVM_CFG_DONE_PORT_0; 801 802 if (hw->bus.func == 1) 803 mask = E1000_NVM_CFG_DONE_PORT_1; 804 else if (hw->bus.func == E1000_FUNC_2) 805 mask = E1000_NVM_CFG_DONE_PORT_2; 806 else if (hw->bus.func == E1000_FUNC_3) 807 mask = E1000_NVM_CFG_DONE_PORT_3; 808 809 while (timeout) { 810 if (rd32(E1000_EEMNGCTL) & mask) 811 break; 812 msleep(1); 813 timeout--; 814 } 815 if (!timeout) 816 hw_dbg("MNG configuration cycle has not completed.\n"); 817 818 /* If EEPROM is not marked present, init the PHY manually */ 819 if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) && 820 (hw->phy.type == e1000_phy_igp_3)) 821 igb_phy_init_script_igp3(hw); 822 823 return ret_val; 824 } 825 826 /** 827 * igb_check_for_link_82575 - Check for link 828 * @hw: pointer to the HW structure 829 * 830 * If sgmii is enabled, then use the pcs register to determine link, otherwise 831 * use the generic interface for determining link. 832 **/ 833 static s32 igb_check_for_link_82575(struct e1000_hw *hw) 834 { 835 s32 ret_val; 836 u16 speed, duplex; 837 838 if (hw->phy.media_type != e1000_media_type_copper) { 839 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, 840 &duplex); 841 /* 842 * Use this flag to determine if link needs to be checked or 843 * not. If we have link clear the flag so that we do not 844 * continue to check for link. 845 */ 846 hw->mac.get_link_status = !hw->mac.serdes_has_link; 847 } else { 848 ret_val = igb_check_for_copper_link(hw); 849 } 850 851 return ret_val; 852 } 853 854 /** 855 * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown 856 * @hw: pointer to the HW structure 857 **/ 858 void igb_power_up_serdes_link_82575(struct e1000_hw *hw) 859 { 860 u32 reg; 861 862 863 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 864 !igb_sgmii_active_82575(hw)) 865 return; 866 867 /* Enable PCS to turn on link */ 868 reg = rd32(E1000_PCS_CFG0); 869 reg |= E1000_PCS_CFG_PCS_EN; 870 wr32(E1000_PCS_CFG0, reg); 871 872 /* Power up the laser */ 873 reg = rd32(E1000_CTRL_EXT); 874 reg &= ~E1000_CTRL_EXT_SDP3_DATA; 875 wr32(E1000_CTRL_EXT, reg); 876 877 /* flush the write to verify completion */ 878 wrfl(); 879 msleep(1); 880 } 881 882 /** 883 * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex 884 * @hw: pointer to the HW structure 885 * @speed: stores the current speed 886 * @duplex: stores the current duplex 887 * 888 * Using the physical coding sub-layer (PCS), retrieve the current speed and 889 * duplex, then store the values in the pointers provided. 890 **/ 891 static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, 892 u16 *duplex) 893 { 894 struct e1000_mac_info *mac = &hw->mac; 895 u32 pcs; 896 897 /* Set up defaults for the return values of this function */ 898 mac->serdes_has_link = false; 899 *speed = 0; 900 *duplex = 0; 901 902 /* 903 * Read the PCS Status register for link state. For non-copper mode, 904 * the status register is not accurate. The PCS status register is 905 * used instead. 906 */ 907 pcs = rd32(E1000_PCS_LSTAT); 908 909 /* 910 * The link up bit determines when link is up on autoneg. The sync ok 911 * gets set once both sides sync up and agree upon link. Stable link 912 * can be determined by checking for both link up and link sync ok 913 */ 914 if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { 915 mac->serdes_has_link = true; 916 917 /* Detect and store PCS speed */ 918 if (pcs & E1000_PCS_LSTS_SPEED_1000) { 919 *speed = SPEED_1000; 920 } else if (pcs & E1000_PCS_LSTS_SPEED_100) { 921 *speed = SPEED_100; 922 } else { 923 *speed = SPEED_10; 924 } 925 926 /* Detect and store PCS duplex */ 927 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) { 928 *duplex = FULL_DUPLEX; 929 } else { 930 *duplex = HALF_DUPLEX; 931 } 932 } 933 934 return 0; 935 } 936 937 /** 938 * igb_shutdown_serdes_link_82575 - Remove link during power down 939 * @hw: pointer to the HW structure 940 * 941 * In the case of fiber serdes, shut down optics and PCS on driver unload 942 * when management pass thru is not enabled. 943 **/ 944 void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) 945 { 946 u32 reg; 947 948 if (hw->phy.media_type != e1000_media_type_internal_serdes && 949 igb_sgmii_active_82575(hw)) 950 return; 951 952 if (!igb_enable_mng_pass_thru(hw)) { 953 /* Disable PCS to turn off link */ 954 reg = rd32(E1000_PCS_CFG0); 955 reg &= ~E1000_PCS_CFG_PCS_EN; 956 wr32(E1000_PCS_CFG0, reg); 957 958 /* shutdown the laser */ 959 reg = rd32(E1000_CTRL_EXT); 960 reg |= E1000_CTRL_EXT_SDP3_DATA; 961 wr32(E1000_CTRL_EXT, reg); 962 963 /* flush the write to verify completion */ 964 wrfl(); 965 msleep(1); 966 } 967 } 968 969 /** 970 * igb_reset_hw_82575 - Reset hardware 971 * @hw: pointer to the HW structure 972 * 973 * This resets the hardware into a known state. This is a 974 * function pointer entry point called by the api module. 975 **/ 976 static s32 igb_reset_hw_82575(struct e1000_hw *hw) 977 { 978 u32 ctrl, icr; 979 s32 ret_val; 980 981 /* 982 * Prevent the PCI-E bus from sticking if there is no TLP connection 983 * on the last TLP read/write transaction when MAC is reset. 984 */ 985 ret_val = igb_disable_pcie_master(hw); 986 if (ret_val) 987 hw_dbg("PCI-E Master disable polling has failed.\n"); 988 989 /* set the completion timeout for interface */ 990 ret_val = igb_set_pcie_completion_timeout(hw); 991 if (ret_val) { 992 hw_dbg("PCI-E Set completion timeout has failed.\n"); 993 } 994 995 hw_dbg("Masking off all interrupts\n"); 996 wr32(E1000_IMC, 0xffffffff); 997 998 wr32(E1000_RCTL, 0); 999 wr32(E1000_TCTL, E1000_TCTL_PSP); 1000 wrfl(); 1001 1002 msleep(10); 1003 1004 ctrl = rd32(E1000_CTRL); 1005 1006 hw_dbg("Issuing a global reset to MAC\n"); 1007 wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); 1008 1009 ret_val = igb_get_auto_rd_done(hw); 1010 if (ret_val) { 1011 /* 1012 * When auto config read does not complete, do not 1013 * return with an error. This can happen in situations 1014 * where there is no eeprom and prevents getting link. 1015 */ 1016 hw_dbg("Auto Read Done did not complete\n"); 1017 } 1018 1019 /* If EEPROM is not present, run manual init scripts */ 1020 if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) 1021 igb_reset_init_script_82575(hw); 1022 1023 /* Clear any pending interrupt events. */ 1024 wr32(E1000_IMC, 0xffffffff); 1025 icr = rd32(E1000_ICR); 1026 1027 /* Install any alternate MAC address into RAR0 */ 1028 ret_val = igb_check_alt_mac_addr(hw); 1029 1030 return ret_val; 1031 } 1032 1033 /** 1034 * igb_init_hw_82575 - Initialize hardware 1035 * @hw: pointer to the HW structure 1036 * 1037 * This inits the hardware readying it for operation. 1038 **/ 1039 static s32 igb_init_hw_82575(struct e1000_hw *hw) 1040 { 1041 struct e1000_mac_info *mac = &hw->mac; 1042 s32 ret_val; 1043 u16 i, rar_count = mac->rar_entry_count; 1044 1045 /* Initialize identification LED */ 1046 ret_val = igb_id_led_init(hw); 1047 if (ret_val) { 1048 hw_dbg("Error initializing identification LED\n"); 1049 /* This is not fatal and we should not stop init due to this */ 1050 } 1051 1052 /* Disabling VLAN filtering */ 1053 hw_dbg("Initializing the IEEE VLAN\n"); 1054 if (hw->mac.type == e1000_i350) 1055 igb_clear_vfta_i350(hw); 1056 else 1057 igb_clear_vfta(hw); 1058 1059 /* Setup the receive address */ 1060 igb_init_rx_addrs(hw, rar_count); 1061 1062 /* Zero out the Multicast HASH table */ 1063 hw_dbg("Zeroing the MTA\n"); 1064 for (i = 0; i < mac->mta_reg_count; i++) 1065 array_wr32(E1000_MTA, i, 0); 1066 1067 /* Zero out the Unicast HASH table */ 1068 hw_dbg("Zeroing the UTA\n"); 1069 for (i = 0; i < mac->uta_reg_count; i++) 1070 array_wr32(E1000_UTA, i, 0); 1071 1072 /* Setup link and flow control */ 1073 ret_val = igb_setup_link(hw); 1074 1075 /* 1076 * Clear all of the statistics registers (clear on read). It is 1077 * important that we do this after we have tried to establish link 1078 * because the symbol error count will increment wildly if there 1079 * is no link. 1080 */ 1081 igb_clear_hw_cntrs_82575(hw); 1082 1083 return ret_val; 1084 } 1085 1086 /** 1087 * igb_setup_copper_link_82575 - Configure copper link settings 1088 * @hw: pointer to the HW structure 1089 * 1090 * Configures the link for auto-neg or forced speed and duplex. Then we check 1091 * for link, once link is established calls to configure collision distance 1092 * and flow control are called. 1093 **/ 1094 static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) 1095 { 1096 u32 ctrl; 1097 s32 ret_val; 1098 1099 ctrl = rd32(E1000_CTRL); 1100 ctrl |= E1000_CTRL_SLU; 1101 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1102 wr32(E1000_CTRL, ctrl); 1103 1104 ret_val = igb_setup_serdes_link_82575(hw); 1105 if (ret_val) 1106 goto out; 1107 1108 if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { 1109 /* allow time for SFP cage time to power up phy */ 1110 msleep(300); 1111 1112 ret_val = hw->phy.ops.reset(hw); 1113 if (ret_val) { 1114 hw_dbg("Error resetting the PHY.\n"); 1115 goto out; 1116 } 1117 } 1118 switch (hw->phy.type) { 1119 case e1000_phy_m88: 1120 if (hw->phy.id == I347AT4_E_PHY_ID || 1121 hw->phy.id == M88E1112_E_PHY_ID) 1122 ret_val = igb_copper_link_setup_m88_gen2(hw); 1123 else 1124 ret_val = igb_copper_link_setup_m88(hw); 1125 break; 1126 case e1000_phy_igp_3: 1127 ret_val = igb_copper_link_setup_igp(hw); 1128 break; 1129 case e1000_phy_82580: 1130 ret_val = igb_copper_link_setup_82580(hw); 1131 break; 1132 default: 1133 ret_val = -E1000_ERR_PHY; 1134 break; 1135 } 1136 1137 if (ret_val) 1138 goto out; 1139 1140 ret_val = igb_setup_copper_link(hw); 1141 out: 1142 return ret_val; 1143 } 1144 1145 /** 1146 * igb_setup_serdes_link_82575 - Setup link for serdes 1147 * @hw: pointer to the HW structure 1148 * 1149 * Configure the physical coding sub-layer (PCS) link. The PCS link is 1150 * used on copper connections where the serialized gigabit media independent 1151 * interface (sgmii), or serdes fiber is being used. Configures the link 1152 * for auto-negotiation or forces speed/duplex. 1153 **/ 1154 static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) 1155 { 1156 u32 ctrl_ext, ctrl_reg, reg; 1157 bool pcs_autoneg; 1158 s32 ret_val = E1000_SUCCESS; 1159 u16 data; 1160 1161 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1162 !igb_sgmii_active_82575(hw)) 1163 return ret_val; 1164 1165 1166 /* 1167 * On the 82575, SerDes loopback mode persists until it is 1168 * explicitly turned off or a power cycle is performed. A read to 1169 * the register does not indicate its status. Therefore, we ensure 1170 * loopback mode is disabled during initialization. 1171 */ 1172 wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); 1173 1174 /* power on the sfp cage if present */ 1175 ctrl_ext = rd32(E1000_CTRL_EXT); 1176 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; 1177 wr32(E1000_CTRL_EXT, ctrl_ext); 1178 1179 ctrl_reg = rd32(E1000_CTRL); 1180 ctrl_reg |= E1000_CTRL_SLU; 1181 1182 if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { 1183 /* set both sw defined pins */ 1184 ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; 1185 1186 /* Set switch control to serdes energy detect */ 1187 reg = rd32(E1000_CONNSW); 1188 reg |= E1000_CONNSW_ENRGSRC; 1189 wr32(E1000_CONNSW, reg); 1190 } 1191 1192 reg = rd32(E1000_PCS_LCTL); 1193 1194 /* default pcs_autoneg to the same setting as mac autoneg */ 1195 pcs_autoneg = hw->mac.autoneg; 1196 1197 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { 1198 case E1000_CTRL_EXT_LINK_MODE_SGMII: 1199 /* sgmii mode lets the phy handle forcing speed/duplex */ 1200 pcs_autoneg = true; 1201 /* autoneg time out should be disabled for SGMII mode */ 1202 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); 1203 break; 1204 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 1205 /* disable PCS autoneg and support parallel detect only */ 1206 pcs_autoneg = false; 1207 default: 1208 if (hw->mac.type == e1000_82575 || 1209 hw->mac.type == e1000_82576) { 1210 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); 1211 if (ret_val) { 1212 printk(KERN_DEBUG "NVM Read Error\n\n"); 1213 return ret_val; 1214 } 1215 1216 if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) 1217 pcs_autoneg = false; 1218 } 1219 1220 /* 1221 * non-SGMII modes only supports a speed of 1000/Full for the 1222 * link so it is best to just force the MAC and let the pcs 1223 * link either autoneg or be forced to 1000/Full 1224 */ 1225 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | 1226 E1000_CTRL_FD | E1000_CTRL_FRCDPX; 1227 1228 /* set speed of 1000/Full if speed/duplex is forced */ 1229 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; 1230 break; 1231 } 1232 1233 wr32(E1000_CTRL, ctrl_reg); 1234 1235 /* 1236 * New SerDes mode allows for forcing speed or autonegotiating speed 1237 * at 1gb. Autoneg should be default set by most drivers. This is the 1238 * mode that will be compatible with older link partners and switches. 1239 * However, both are supported by the hardware and some drivers/tools. 1240 */ 1241 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | 1242 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); 1243 1244 /* 1245 * We force flow control to prevent the CTRL register values from being 1246 * overwritten by the autonegotiated flow control values 1247 */ 1248 reg |= E1000_PCS_LCTL_FORCE_FCTRL; 1249 1250 if (pcs_autoneg) { 1251 /* Set PCS register for autoneg */ 1252 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ 1253 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ 1254 hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); 1255 } else { 1256 /* Set PCS register for forced link */ 1257 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ 1258 1259 hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); 1260 } 1261 1262 wr32(E1000_PCS_LCTL, reg); 1263 1264 if (!igb_sgmii_active_82575(hw)) 1265 igb_force_mac_fc(hw); 1266 1267 return ret_val; 1268 } 1269 1270 /** 1271 * igb_sgmii_active_82575 - Return sgmii state 1272 * @hw: pointer to the HW structure 1273 * 1274 * 82575 silicon has a serialized gigabit media independent interface (sgmii) 1275 * which can be enabled for use in the embedded applications. Simply 1276 * return the current state of the sgmii interface. 1277 **/ 1278 static bool igb_sgmii_active_82575(struct e1000_hw *hw) 1279 { 1280 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 1281 return dev_spec->sgmii_active; 1282 } 1283 1284 /** 1285 * igb_reset_init_script_82575 - Inits HW defaults after reset 1286 * @hw: pointer to the HW structure 1287 * 1288 * Inits recommended HW defaults after a reset when there is no EEPROM 1289 * detected. This is only for the 82575. 1290 **/ 1291 static s32 igb_reset_init_script_82575(struct e1000_hw *hw) 1292 { 1293 if (hw->mac.type == e1000_82575) { 1294 hw_dbg("Running reset init script for 82575\n"); 1295 /* SerDes configuration via SERDESCTRL */ 1296 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); 1297 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); 1298 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23); 1299 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15); 1300 1301 /* CCM configuration via CCMCTL register */ 1302 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00); 1303 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00); 1304 1305 /* PCIe lanes configuration */ 1306 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC); 1307 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF); 1308 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05); 1309 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81); 1310 1311 /* PCIe PLL Configuration */ 1312 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47); 1313 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00); 1314 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00); 1315 } 1316 1317 return 0; 1318 } 1319 1320 /** 1321 * igb_read_mac_addr_82575 - Read device MAC address 1322 * @hw: pointer to the HW structure 1323 **/ 1324 static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) 1325 { 1326 s32 ret_val = 0; 1327 1328 /* 1329 * If there's an alternate MAC address place it in RAR0 1330 * so that it will override the Si installed default perm 1331 * address. 1332 */ 1333 ret_val = igb_check_alt_mac_addr(hw); 1334 if (ret_val) 1335 goto out; 1336 1337 ret_val = igb_read_mac_addr(hw); 1338 1339 out: 1340 return ret_val; 1341 } 1342 1343 /** 1344 * igb_power_down_phy_copper_82575 - Remove link during PHY power down 1345 * @hw: pointer to the HW structure 1346 * 1347 * In the case of a PHY power down to save power, or to turn off link during a 1348 * driver unload, or wake on lan is not enabled, remove the link. 1349 **/ 1350 void igb_power_down_phy_copper_82575(struct e1000_hw *hw) 1351 { 1352 /* If the management interface is not enabled, then power down */ 1353 if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw))) 1354 igb_power_down_phy_copper(hw); 1355 } 1356 1357 /** 1358 * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters 1359 * @hw: pointer to the HW structure 1360 * 1361 * Clears the hardware counters by reading the counter registers. 1362 **/ 1363 static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) 1364 { 1365 igb_clear_hw_cntrs_base(hw); 1366 1367 rd32(E1000_PRC64); 1368 rd32(E1000_PRC127); 1369 rd32(E1000_PRC255); 1370 rd32(E1000_PRC511); 1371 rd32(E1000_PRC1023); 1372 rd32(E1000_PRC1522); 1373 rd32(E1000_PTC64); 1374 rd32(E1000_PTC127); 1375 rd32(E1000_PTC255); 1376 rd32(E1000_PTC511); 1377 rd32(E1000_PTC1023); 1378 rd32(E1000_PTC1522); 1379 1380 rd32(E1000_ALGNERRC); 1381 rd32(E1000_RXERRC); 1382 rd32(E1000_TNCRS); 1383 rd32(E1000_CEXTERR); 1384 rd32(E1000_TSCTC); 1385 rd32(E1000_TSCTFC); 1386 1387 rd32(E1000_MGTPRC); 1388 rd32(E1000_MGTPDC); 1389 rd32(E1000_MGTPTC); 1390 1391 rd32(E1000_IAC); 1392 rd32(E1000_ICRXOC); 1393 1394 rd32(E1000_ICRXPTC); 1395 rd32(E1000_ICRXATC); 1396 rd32(E1000_ICTXPTC); 1397 rd32(E1000_ICTXATC); 1398 rd32(E1000_ICTXQEC); 1399 rd32(E1000_ICTXQMTC); 1400 rd32(E1000_ICRXDMTC); 1401 1402 rd32(E1000_CBTMPC); 1403 rd32(E1000_HTDPMC); 1404 rd32(E1000_CBRMPC); 1405 rd32(E1000_RPTHC); 1406 rd32(E1000_HGPTC); 1407 rd32(E1000_HTCBDPC); 1408 rd32(E1000_HGORCL); 1409 rd32(E1000_HGORCH); 1410 rd32(E1000_HGOTCL); 1411 rd32(E1000_HGOTCH); 1412 rd32(E1000_LENERRS); 1413 1414 /* This register should not be read in copper configurations */ 1415 if (hw->phy.media_type == e1000_media_type_internal_serdes || 1416 igb_sgmii_active_82575(hw)) 1417 rd32(E1000_SCVPC); 1418 } 1419 1420 /** 1421 * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable 1422 * @hw: pointer to the HW structure 1423 * 1424 * After rx enable if managability is enabled then there is likely some 1425 * bad data at the start of the fifo and possibly in the DMA fifo. This 1426 * function clears the fifos and flushes any packets that came in as rx was 1427 * being enabled. 1428 **/ 1429 void igb_rx_fifo_flush_82575(struct e1000_hw *hw) 1430 { 1431 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; 1432 int i, ms_wait; 1433 1434 if (hw->mac.type != e1000_82575 || 1435 !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) 1436 return; 1437 1438 /* Disable all RX queues */ 1439 for (i = 0; i < 4; i++) { 1440 rxdctl[i] = rd32(E1000_RXDCTL(i)); 1441 wr32(E1000_RXDCTL(i), 1442 rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); 1443 } 1444 /* Poll all queues to verify they have shut down */ 1445 for (ms_wait = 0; ms_wait < 10; ms_wait++) { 1446 msleep(1); 1447 rx_enabled = 0; 1448 for (i = 0; i < 4; i++) 1449 rx_enabled |= rd32(E1000_RXDCTL(i)); 1450 if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) 1451 break; 1452 } 1453 1454 if (ms_wait == 10) 1455 hw_dbg("Queue disable timed out after 10ms\n"); 1456 1457 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all 1458 * incoming packets are rejected. Set enable and wait 2ms so that 1459 * any packet that was coming in as RCTL.EN was set is flushed 1460 */ 1461 rfctl = rd32(E1000_RFCTL); 1462 wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); 1463 1464 rlpml = rd32(E1000_RLPML); 1465 wr32(E1000_RLPML, 0); 1466 1467 rctl = rd32(E1000_RCTL); 1468 temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); 1469 temp_rctl |= E1000_RCTL_LPE; 1470 1471 wr32(E1000_RCTL, temp_rctl); 1472 wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); 1473 wrfl(); 1474 msleep(2); 1475 1476 /* Enable RX queues that were previously enabled and restore our 1477 * previous state 1478 */ 1479 for (i = 0; i < 4; i++) 1480 wr32(E1000_RXDCTL(i), rxdctl[i]); 1481 wr32(E1000_RCTL, rctl); 1482 wrfl(); 1483 1484 wr32(E1000_RLPML, rlpml); 1485 wr32(E1000_RFCTL, rfctl); 1486 1487 /* Flush receive errors generated by workaround */ 1488 rd32(E1000_ROC); 1489 rd32(E1000_RNBC); 1490 rd32(E1000_MPC); 1491 } 1492 1493 /** 1494 * igb_set_pcie_completion_timeout - set pci-e completion timeout 1495 * @hw: pointer to the HW structure 1496 * 1497 * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, 1498 * however the hardware default for these parts is 500us to 1ms which is less 1499 * than the 10ms recommended by the pci-e spec. To address this we need to 1500 * increase the value to either 10ms to 200ms for capability version 1 config, 1501 * or 16ms to 55ms for version 2. 1502 **/ 1503 static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) 1504 { 1505 u32 gcr = rd32(E1000_GCR); 1506 s32 ret_val = 0; 1507 u16 pcie_devctl2; 1508 1509 /* only take action if timeout value is defaulted to 0 */ 1510 if (gcr & E1000_GCR_CMPL_TMOUT_MASK) 1511 goto out; 1512 1513 /* 1514 * if capababilities version is type 1 we can write the 1515 * timeout of 10ms to 200ms through the GCR register 1516 */ 1517 if (!(gcr & E1000_GCR_CAP_VER2)) { 1518 gcr |= E1000_GCR_CMPL_TMOUT_10ms; 1519 goto out; 1520 } 1521 1522 /* 1523 * for version 2 capabilities we need to write the config space 1524 * directly in order to set the completion timeout value for 1525 * 16ms to 55ms 1526 */ 1527 ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 1528 &pcie_devctl2); 1529 if (ret_val) 1530 goto out; 1531 1532 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; 1533 1534 ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 1535 &pcie_devctl2); 1536 out: 1537 /* disable completion timeout resend */ 1538 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; 1539 1540 wr32(E1000_GCR, gcr); 1541 return ret_val; 1542 } 1543 1544 /** 1545 * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing 1546 * @hw: pointer to the hardware struct 1547 * @enable: state to enter, either enabled or disabled 1548 * @pf: Physical Function pool - do not set anti-spoofing for the PF 1549 * 1550 * enables/disables L2 switch anti-spoofing functionality. 1551 **/ 1552 void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) 1553 { 1554 u32 dtxswc; 1555 1556 switch (hw->mac.type) { 1557 case e1000_82576: 1558 case e1000_i350: 1559 dtxswc = rd32(E1000_DTXSWC); 1560 if (enable) { 1561 dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK | 1562 E1000_DTXSWC_VLAN_SPOOF_MASK); 1563 /* The PF can spoof - it has to in order to 1564 * support emulation mode NICs */ 1565 dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); 1566 } else { 1567 dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | 1568 E1000_DTXSWC_VLAN_SPOOF_MASK); 1569 } 1570 wr32(E1000_DTXSWC, dtxswc); 1571 break; 1572 default: 1573 break; 1574 } 1575 } 1576 1577 /** 1578 * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback 1579 * @hw: pointer to the hardware struct 1580 * @enable: state to enter, either enabled or disabled 1581 * 1582 * enables/disables L2 switch loopback functionality. 1583 **/ 1584 void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) 1585 { 1586 u32 dtxswc; 1587 1588 switch (hw->mac.type) { 1589 case e1000_82576: 1590 dtxswc = rd32(E1000_DTXSWC); 1591 if (enable) 1592 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; 1593 else 1594 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; 1595 wr32(E1000_DTXSWC, dtxswc); 1596 break; 1597 case e1000_i350: 1598 dtxswc = rd32(E1000_TXSWC); 1599 if (enable) 1600 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; 1601 else 1602 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; 1603 wr32(E1000_TXSWC, dtxswc); 1604 break; 1605 default: 1606 /* Currently no other hardware supports loopback */ 1607 break; 1608 } 1609 1610 1611 } 1612 1613 /** 1614 * igb_vmdq_set_replication_pf - enable or disable vmdq replication 1615 * @hw: pointer to the hardware struct 1616 * @enable: state to enter, either enabled or disabled 1617 * 1618 * enables/disables replication of packets across multiple pools. 1619 **/ 1620 void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) 1621 { 1622 u32 vt_ctl = rd32(E1000_VT_CTL); 1623 1624 if (enable) 1625 vt_ctl |= E1000_VT_CTL_VM_REPL_EN; 1626 else 1627 vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; 1628 1629 wr32(E1000_VT_CTL, vt_ctl); 1630 } 1631 1632 /** 1633 * igb_read_phy_reg_82580 - Read 82580 MDI control register 1634 * @hw: pointer to the HW structure 1635 * @offset: register offset to be read 1636 * @data: pointer to the read data 1637 * 1638 * Reads the MDI control register in the PHY at offset and stores the 1639 * information read to data. 1640 **/ 1641 static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) 1642 { 1643 s32 ret_val; 1644 1645 1646 ret_val = hw->phy.ops.acquire(hw); 1647 if (ret_val) 1648 goto out; 1649 1650 ret_val = igb_read_phy_reg_mdic(hw, offset, data); 1651 1652 hw->phy.ops.release(hw); 1653 1654 out: 1655 return ret_val; 1656 } 1657 1658 /** 1659 * igb_write_phy_reg_82580 - Write 82580 MDI control register 1660 * @hw: pointer to the HW structure 1661 * @offset: register offset to write to 1662 * @data: data to write to register at offset 1663 * 1664 * Writes data to MDI control register in the PHY at offset. 1665 **/ 1666 static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) 1667 { 1668 s32 ret_val; 1669 1670 1671 ret_val = hw->phy.ops.acquire(hw); 1672 if (ret_val) 1673 goto out; 1674 1675 ret_val = igb_write_phy_reg_mdic(hw, offset, data); 1676 1677 hw->phy.ops.release(hw); 1678 1679 out: 1680 return ret_val; 1681 } 1682 1683 /** 1684 * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits 1685 * @hw: pointer to the HW structure 1686 * 1687 * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on 1688 * the values found in the EEPROM. This addresses an issue in which these 1689 * bits are not restored from EEPROM after reset. 1690 **/ 1691 static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw) 1692 { 1693 s32 ret_val = 0; 1694 u32 mdicnfg; 1695 u16 nvm_data = 0; 1696 1697 if (hw->mac.type != e1000_82580) 1698 goto out; 1699 if (!igb_sgmii_active_82575(hw)) 1700 goto out; 1701 1702 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + 1703 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, 1704 &nvm_data); 1705 if (ret_val) { 1706 hw_dbg("NVM Read Error\n"); 1707 goto out; 1708 } 1709 1710 mdicnfg = rd32(E1000_MDICNFG); 1711 if (nvm_data & NVM_WORD24_EXT_MDIO) 1712 mdicnfg |= E1000_MDICNFG_EXT_MDIO; 1713 if (nvm_data & NVM_WORD24_COM_MDIO) 1714 mdicnfg |= E1000_MDICNFG_COM_MDIO; 1715 wr32(E1000_MDICNFG, mdicnfg); 1716 out: 1717 return ret_val; 1718 } 1719 1720 /** 1721 * igb_reset_hw_82580 - Reset hardware 1722 * @hw: pointer to the HW structure 1723 * 1724 * This resets function or entire device (all ports, etc.) 1725 * to a known state. 1726 **/ 1727 static s32 igb_reset_hw_82580(struct e1000_hw *hw) 1728 { 1729 s32 ret_val = 0; 1730 /* BH SW mailbox bit in SW_FW_SYNC */ 1731 u16 swmbsw_mask = E1000_SW_SYNCH_MB; 1732 u32 ctrl, icr; 1733 bool global_device_reset = hw->dev_spec._82575.global_device_reset; 1734 1735 1736 hw->dev_spec._82575.global_device_reset = false; 1737 1738 /* Get current control state. */ 1739 ctrl = rd32(E1000_CTRL); 1740 1741 /* 1742 * Prevent the PCI-E bus from sticking if there is no TLP connection 1743 * on the last TLP read/write transaction when MAC is reset. 1744 */ 1745 ret_val = igb_disable_pcie_master(hw); 1746 if (ret_val) 1747 hw_dbg("PCI-E Master disable polling has failed.\n"); 1748 1749 hw_dbg("Masking off all interrupts\n"); 1750 wr32(E1000_IMC, 0xffffffff); 1751 wr32(E1000_RCTL, 0); 1752 wr32(E1000_TCTL, E1000_TCTL_PSP); 1753 wrfl(); 1754 1755 msleep(10); 1756 1757 /* Determine whether or not a global dev reset is requested */ 1758 if (global_device_reset && 1759 igb_acquire_swfw_sync_82575(hw, swmbsw_mask)) 1760 global_device_reset = false; 1761 1762 if (global_device_reset && 1763 !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) 1764 ctrl |= E1000_CTRL_DEV_RST; 1765 else 1766 ctrl |= E1000_CTRL_RST; 1767 1768 wr32(E1000_CTRL, ctrl); 1769 wrfl(); 1770 1771 /* Add delay to insure DEV_RST has time to complete */ 1772 if (global_device_reset) 1773 msleep(5); 1774 1775 ret_val = igb_get_auto_rd_done(hw); 1776 if (ret_val) { 1777 /* 1778 * When auto config read does not complete, do not 1779 * return with an error. This can happen in situations 1780 * where there is no eeprom and prevents getting link. 1781 */ 1782 hw_dbg("Auto Read Done did not complete\n"); 1783 } 1784 1785 /* If EEPROM is not present, run manual init scripts */ 1786 if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) 1787 igb_reset_init_script_82575(hw); 1788 1789 /* clear global device reset status bit */ 1790 wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); 1791 1792 /* Clear any pending interrupt events. */ 1793 wr32(E1000_IMC, 0xffffffff); 1794 icr = rd32(E1000_ICR); 1795 1796 ret_val = igb_reset_mdicnfg_82580(hw); 1797 if (ret_val) 1798 hw_dbg("Could not reset MDICNFG based on EEPROM\n"); 1799 1800 /* Install any alternate MAC address into RAR0 */ 1801 ret_val = igb_check_alt_mac_addr(hw); 1802 1803 /* Release semaphore */ 1804 if (global_device_reset) 1805 igb_release_swfw_sync_82575(hw, swmbsw_mask); 1806 1807 return ret_val; 1808 } 1809 1810 /** 1811 * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size 1812 * @data: data received by reading RXPBS register 1813 * 1814 * The 82580 uses a table based approach for packet buffer allocation sizes. 1815 * This function converts the retrieved value into the correct table value 1816 * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 1817 * 0x0 36 72 144 1 2 4 8 16 1818 * 0x8 35 70 140 rsv rsv rsv rsv rsv 1819 */ 1820 u16 igb_rxpbs_adjust_82580(u32 data) 1821 { 1822 u16 ret_val = 0; 1823 1824 if (data < E1000_82580_RXPBS_TABLE_SIZE) 1825 ret_val = e1000_82580_rxpbs_table[data]; 1826 1827 return ret_val; 1828 } 1829 1830 /** 1831 * igb_validate_nvm_checksum_with_offset - Validate EEPROM 1832 * checksum 1833 * @hw: pointer to the HW structure 1834 * @offset: offset in words of the checksum protected region 1835 * 1836 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM 1837 * and then verifies that the sum of the EEPROM is equal to 0xBABA. 1838 **/ 1839 static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, 1840 u16 offset) 1841 { 1842 s32 ret_val = 0; 1843 u16 checksum = 0; 1844 u16 i, nvm_data; 1845 1846 for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { 1847 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); 1848 if (ret_val) { 1849 hw_dbg("NVM Read Error\n"); 1850 goto out; 1851 } 1852 checksum += nvm_data; 1853 } 1854 1855 if (checksum != (u16) NVM_SUM) { 1856 hw_dbg("NVM Checksum Invalid\n"); 1857 ret_val = -E1000_ERR_NVM; 1858 goto out; 1859 } 1860 1861 out: 1862 return ret_val; 1863 } 1864 1865 /** 1866 * igb_update_nvm_checksum_with_offset - Update EEPROM 1867 * checksum 1868 * @hw: pointer to the HW structure 1869 * @offset: offset in words of the checksum protected region 1870 * 1871 * Updates the EEPROM checksum by reading/adding each word of the EEPROM 1872 * up to the checksum. Then calculates the EEPROM checksum and writes the 1873 * value to the EEPROM. 1874 **/ 1875 static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) 1876 { 1877 s32 ret_val; 1878 u16 checksum = 0; 1879 u16 i, nvm_data; 1880 1881 for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { 1882 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); 1883 if (ret_val) { 1884 hw_dbg("NVM Read Error while updating checksum.\n"); 1885 goto out; 1886 } 1887 checksum += nvm_data; 1888 } 1889 checksum = (u16) NVM_SUM - checksum; 1890 ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, 1891 &checksum); 1892 if (ret_val) 1893 hw_dbg("NVM Write Error while updating checksum.\n"); 1894 1895 out: 1896 return ret_val; 1897 } 1898 1899 /** 1900 * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum 1901 * @hw: pointer to the HW structure 1902 * 1903 * Calculates the EEPROM section checksum by reading/adding each word of 1904 * the EEPROM and then verifies that the sum of the EEPROM is 1905 * equal to 0xBABA. 1906 **/ 1907 static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) 1908 { 1909 s32 ret_val = 0; 1910 u16 eeprom_regions_count = 1; 1911 u16 j, nvm_data; 1912 u16 nvm_offset; 1913 1914 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); 1915 if (ret_val) { 1916 hw_dbg("NVM Read Error\n"); 1917 goto out; 1918 } 1919 1920 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { 1921 /* if checksums compatibility bit is set validate checksums 1922 * for all 4 ports. */ 1923 eeprom_regions_count = 4; 1924 } 1925 1926 for (j = 0; j < eeprom_regions_count; j++) { 1927 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 1928 ret_val = igb_validate_nvm_checksum_with_offset(hw, 1929 nvm_offset); 1930 if (ret_val != 0) 1931 goto out; 1932 } 1933 1934 out: 1935 return ret_val; 1936 } 1937 1938 /** 1939 * igb_update_nvm_checksum_82580 - Update EEPROM checksum 1940 * @hw: pointer to the HW structure 1941 * 1942 * Updates the EEPROM section checksums for all 4 ports by reading/adding 1943 * each word of the EEPROM up to the checksum. Then calculates the EEPROM 1944 * checksum and writes the value to the EEPROM. 1945 **/ 1946 static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) 1947 { 1948 s32 ret_val; 1949 u16 j, nvm_data; 1950 u16 nvm_offset; 1951 1952 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); 1953 if (ret_val) { 1954 hw_dbg("NVM Read Error while updating checksum" 1955 " compatibility bit.\n"); 1956 goto out; 1957 } 1958 1959 if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { 1960 /* set compatibility bit to validate checksums appropriately */ 1961 nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; 1962 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, 1963 &nvm_data); 1964 if (ret_val) { 1965 hw_dbg("NVM Write Error while updating checksum" 1966 " compatibility bit.\n"); 1967 goto out; 1968 } 1969 } 1970 1971 for (j = 0; j < 4; j++) { 1972 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 1973 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); 1974 if (ret_val) 1975 goto out; 1976 } 1977 1978 out: 1979 return ret_val; 1980 } 1981 1982 /** 1983 * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum 1984 * @hw: pointer to the HW structure 1985 * 1986 * Calculates the EEPROM section checksum by reading/adding each word of 1987 * the EEPROM and then verifies that the sum of the EEPROM is 1988 * equal to 0xBABA. 1989 **/ 1990 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw) 1991 { 1992 s32 ret_val = 0; 1993 u16 j; 1994 u16 nvm_offset; 1995 1996 for (j = 0; j < 4; j++) { 1997 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 1998 ret_val = igb_validate_nvm_checksum_with_offset(hw, 1999 nvm_offset); 2000 if (ret_val != 0) 2001 goto out; 2002 } 2003 2004 out: 2005 return ret_val; 2006 } 2007 2008 /** 2009 * igb_update_nvm_checksum_i350 - Update EEPROM checksum 2010 * @hw: pointer to the HW structure 2011 * 2012 * Updates the EEPROM section checksums for all 4 ports by reading/adding 2013 * each word of the EEPROM up to the checksum. Then calculates the EEPROM 2014 * checksum and writes the value to the EEPROM. 2015 **/ 2016 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) 2017 { 2018 s32 ret_val = 0; 2019 u16 j; 2020 u16 nvm_offset; 2021 2022 for (j = 0; j < 4; j++) { 2023 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2024 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); 2025 if (ret_val != 0) 2026 goto out; 2027 } 2028 2029 out: 2030 return ret_val; 2031 } 2032 2033 /** 2034 * igb_set_eee_i350 - Enable/disable EEE support 2035 * @hw: pointer to the HW structure 2036 * 2037 * Enable/disable EEE based on setting in dev_spec structure. 2038 * 2039 **/ 2040 s32 igb_set_eee_i350(struct e1000_hw *hw) 2041 { 2042 s32 ret_val = 0; 2043 u32 ipcnfg, eeer, ctrl_ext; 2044 2045 ctrl_ext = rd32(E1000_CTRL_EXT); 2046 if ((hw->mac.type != e1000_i350) || 2047 (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK)) 2048 goto out; 2049 ipcnfg = rd32(E1000_IPCNFG); 2050 eeer = rd32(E1000_EEER); 2051 2052 /* enable or disable per user setting */ 2053 if (!(hw->dev_spec._82575.eee_disable)) { 2054 ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | 2055 E1000_IPCNFG_EEE_100M_AN); 2056 eeer |= (E1000_EEER_TX_LPI_EN | 2057 E1000_EEER_RX_LPI_EN | 2058 E1000_EEER_LPI_FC); 2059 2060 } else { 2061 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | 2062 E1000_IPCNFG_EEE_100M_AN); 2063 eeer &= ~(E1000_EEER_TX_LPI_EN | 2064 E1000_EEER_RX_LPI_EN | 2065 E1000_EEER_LPI_FC); 2066 } 2067 wr32(E1000_IPCNFG, ipcnfg); 2068 wr32(E1000_EEER, eeer); 2069 out: 2070 2071 return ret_val; 2072 } 2073 2074 static struct e1000_mac_operations e1000_mac_ops_82575 = { 2075 .init_hw = igb_init_hw_82575, 2076 .check_for_link = igb_check_for_link_82575, 2077 .rar_set = igb_rar_set, 2078 .read_mac_addr = igb_read_mac_addr_82575, 2079 .get_speed_and_duplex = igb_get_speed_and_duplex_copper, 2080 }; 2081 2082 static struct e1000_phy_operations e1000_phy_ops_82575 = { 2083 .acquire = igb_acquire_phy_82575, 2084 .get_cfg_done = igb_get_cfg_done_82575, 2085 .release = igb_release_phy_82575, 2086 }; 2087 2088 static struct e1000_nvm_operations e1000_nvm_ops_82575 = { 2089 .acquire = igb_acquire_nvm_82575, 2090 .read = igb_read_nvm_eerd, 2091 .release = igb_release_nvm_82575, 2092 .write = igb_write_nvm_spi, 2093 }; 2094 2095 const struct e1000_info e1000_82575_info = { 2096 .get_invariants = igb_get_invariants_82575, 2097 .mac_ops = &e1000_mac_ops_82575, 2098 .phy_ops = &e1000_phy_ops_82575, 2099 .nvm_ops = &e1000_nvm_ops_82575, 2100 }; 2101 2102