1 /* Intel(R) Gigabit Ethernet Linux driver 2 * Copyright(c) 2007-2015 Intel Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, see <http://www.gnu.org/licenses/>. 15 * 16 * The full GNU General Public License is included in this distribution in 17 * the file called "COPYING". 18 * 19 * Contact Information: 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 22 */ 23 24 /* e1000_82575 25 * e1000_82576 26 */ 27 28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29 30 #include <linux/types.h> 31 #include <linux/if_ether.h> 32 #include <linux/i2c.h> 33 34 #include "e1000_mac.h" 35 #include "e1000_82575.h" 36 #include "e1000_i210.h" 37 38 static s32 igb_get_invariants_82575(struct e1000_hw *); 39 static s32 igb_acquire_phy_82575(struct e1000_hw *); 40 static void igb_release_phy_82575(struct e1000_hw *); 41 static s32 igb_acquire_nvm_82575(struct e1000_hw *); 42 static void igb_release_nvm_82575(struct e1000_hw *); 43 static s32 igb_check_for_link_82575(struct e1000_hw *); 44 static s32 igb_get_cfg_done_82575(struct e1000_hw *); 45 static s32 igb_init_hw_82575(struct e1000_hw *); 46 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); 47 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); 48 static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *); 49 static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16); 50 static s32 igb_reset_hw_82575(struct e1000_hw *); 51 static s32 igb_reset_hw_82580(struct e1000_hw *); 52 static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); 53 static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool); 54 static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool); 55 static s32 igb_setup_copper_link_82575(struct e1000_hw *); 56 static s32 igb_setup_serdes_link_82575(struct e1000_hw *); 57 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16); 58 static void igb_clear_hw_cntrs_82575(struct e1000_hw *); 59 static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16); 60 static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *, 61 u16 *); 62 static s32 igb_get_phy_id_82575(struct e1000_hw *); 63 static void igb_release_swfw_sync_82575(struct e1000_hw *, u16); 64 static bool igb_sgmii_active_82575(struct e1000_hw *); 65 static s32 igb_reset_init_script_82575(struct e1000_hw *); 66 static s32 igb_read_mac_addr_82575(struct e1000_hw *); 67 static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); 68 static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); 69 static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw); 70 static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); 71 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); 72 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); 73 static const u16 e1000_82580_rxpbs_table[] = { 74 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; 75 76 /** 77 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO 78 * @hw: pointer to the HW structure 79 * 80 * Called to determine if the I2C pins are being used for I2C or as an 81 * external MDIO interface since the two options are mutually exclusive. 82 **/ 83 static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) 84 { 85 u32 reg = 0; 86 bool ext_mdio = false; 87 88 switch (hw->mac.type) { 89 case e1000_82575: 90 case e1000_82576: 91 reg = rd32(E1000_MDIC); 92 ext_mdio = !!(reg & E1000_MDIC_DEST); 93 break; 94 case e1000_82580: 95 case e1000_i350: 96 case e1000_i354: 97 case e1000_i210: 98 case e1000_i211: 99 reg = rd32(E1000_MDICNFG); 100 ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); 101 break; 102 default: 103 break; 104 } 105 return ext_mdio; 106 } 107 108 /** 109 * igb_check_for_link_media_swap - Check which M88E1112 interface linked 110 * @hw: pointer to the HW structure 111 * 112 * Poll the M88E1112 interfaces to see which interface achieved link. 113 */ 114 static s32 igb_check_for_link_media_swap(struct e1000_hw *hw) 115 { 116 struct e1000_phy_info *phy = &hw->phy; 117 s32 ret_val; 118 u16 data; 119 u8 port = 0; 120 121 /* Check the copper medium. */ 122 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); 123 if (ret_val) 124 return ret_val; 125 126 ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); 127 if (ret_val) 128 return ret_val; 129 130 if (data & E1000_M88E1112_STATUS_LINK) 131 port = E1000_MEDIA_PORT_COPPER; 132 133 /* Check the other medium. */ 134 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); 135 if (ret_val) 136 return ret_val; 137 138 ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); 139 if (ret_val) 140 return ret_val; 141 142 /* reset page to 0 */ 143 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); 144 if (ret_val) 145 return ret_val; 146 147 if (data & E1000_M88E1112_STATUS_LINK) 148 port = E1000_MEDIA_PORT_OTHER; 149 150 /* Determine if a swap needs to happen. */ 151 if (port && (hw->dev_spec._82575.media_port != port)) { 152 hw->dev_spec._82575.media_port = port; 153 hw->dev_spec._82575.media_changed = true; 154 } else { 155 ret_val = igb_check_for_link_82575(hw); 156 } 157 158 return 0; 159 } 160 161 /** 162 * igb_init_phy_params_82575 - Init PHY func ptrs. 163 * @hw: pointer to the HW structure 164 **/ 165 static s32 igb_init_phy_params_82575(struct e1000_hw *hw) 166 { 167 struct e1000_phy_info *phy = &hw->phy; 168 s32 ret_val = 0; 169 u32 ctrl_ext; 170 171 if (hw->phy.media_type != e1000_media_type_copper) { 172 phy->type = e1000_phy_none; 173 goto out; 174 } 175 176 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 177 phy->reset_delay_us = 100; 178 179 ctrl_ext = rd32(E1000_CTRL_EXT); 180 181 if (igb_sgmii_active_82575(hw)) { 182 phy->ops.reset = igb_phy_hw_reset_sgmii_82575; 183 ctrl_ext |= E1000_CTRL_I2C_ENA; 184 } else { 185 phy->ops.reset = igb_phy_hw_reset; 186 ctrl_ext &= ~E1000_CTRL_I2C_ENA; 187 } 188 189 wr32(E1000_CTRL_EXT, ctrl_ext); 190 igb_reset_mdicnfg_82580(hw); 191 192 if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { 193 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; 194 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; 195 } else { 196 switch (hw->mac.type) { 197 case e1000_82580: 198 case e1000_i350: 199 case e1000_i354: 200 phy->ops.read_reg = igb_read_phy_reg_82580; 201 phy->ops.write_reg = igb_write_phy_reg_82580; 202 break; 203 case e1000_i210: 204 case e1000_i211: 205 phy->ops.read_reg = igb_read_phy_reg_gs40g; 206 phy->ops.write_reg = igb_write_phy_reg_gs40g; 207 break; 208 default: 209 phy->ops.read_reg = igb_read_phy_reg_igp; 210 phy->ops.write_reg = igb_write_phy_reg_igp; 211 } 212 } 213 214 /* set lan id */ 215 hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> 216 E1000_STATUS_FUNC_SHIFT; 217 218 /* Set phy->phy_addr and phy->id. */ 219 ret_val = igb_get_phy_id_82575(hw); 220 if (ret_val) 221 return ret_val; 222 223 /* Verify phy id and set remaining function pointers */ 224 switch (phy->id) { 225 case M88E1543_E_PHY_ID: 226 case I347AT4_E_PHY_ID: 227 case M88E1112_E_PHY_ID: 228 case M88E1111_I_PHY_ID: 229 phy->type = e1000_phy_m88; 230 phy->ops.check_polarity = igb_check_polarity_m88; 231 phy->ops.get_phy_info = igb_get_phy_info_m88; 232 if (phy->id != M88E1111_I_PHY_ID) 233 phy->ops.get_cable_length = 234 igb_get_cable_length_m88_gen2; 235 else 236 phy->ops.get_cable_length = igb_get_cable_length_m88; 237 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; 238 /* Check if this PHY is confgured for media swap. */ 239 if (phy->id == M88E1112_E_PHY_ID) { 240 u16 data; 241 242 ret_val = phy->ops.write_reg(hw, 243 E1000_M88E1112_PAGE_ADDR, 244 2); 245 if (ret_val) 246 goto out; 247 248 ret_val = phy->ops.read_reg(hw, 249 E1000_M88E1112_MAC_CTRL_1, 250 &data); 251 if (ret_val) 252 goto out; 253 254 data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >> 255 E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT; 256 if (data == E1000_M88E1112_AUTO_COPPER_SGMII || 257 data == E1000_M88E1112_AUTO_COPPER_BASEX) 258 hw->mac.ops.check_for_link = 259 igb_check_for_link_media_swap; 260 } 261 break; 262 case IGP03E1000_E_PHY_ID: 263 phy->type = e1000_phy_igp_3; 264 phy->ops.get_phy_info = igb_get_phy_info_igp; 265 phy->ops.get_cable_length = igb_get_cable_length_igp_2; 266 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp; 267 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; 268 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; 269 break; 270 case I82580_I_PHY_ID: 271 case I350_I_PHY_ID: 272 phy->type = e1000_phy_82580; 273 phy->ops.force_speed_duplex = 274 igb_phy_force_speed_duplex_82580; 275 phy->ops.get_cable_length = igb_get_cable_length_82580; 276 phy->ops.get_phy_info = igb_get_phy_info_82580; 277 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; 278 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; 279 break; 280 case I210_I_PHY_ID: 281 phy->type = e1000_phy_i210; 282 phy->ops.check_polarity = igb_check_polarity_m88; 283 phy->ops.get_phy_info = igb_get_phy_info_m88; 284 phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; 285 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; 286 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; 287 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; 288 break; 289 default: 290 ret_val = -E1000_ERR_PHY; 291 goto out; 292 } 293 294 out: 295 return ret_val; 296 } 297 298 /** 299 * igb_init_nvm_params_82575 - Init NVM func ptrs. 300 * @hw: pointer to the HW structure 301 **/ 302 static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) 303 { 304 struct e1000_nvm_info *nvm = &hw->nvm; 305 u32 eecd = rd32(E1000_EECD); 306 u16 size; 307 308 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 309 E1000_EECD_SIZE_EX_SHIFT); 310 311 /* Added to a constant, "size" becomes the left-shift value 312 * for setting word_size. 313 */ 314 size += NVM_WORD_SIZE_BASE_SHIFT; 315 316 /* Just in case size is out of range, cap it to the largest 317 * EEPROM size supported 318 */ 319 if (size > 15) 320 size = 15; 321 322 nvm->word_size = 1 << size; 323 nvm->opcode_bits = 8; 324 nvm->delay_usec = 1; 325 326 switch (nvm->override) { 327 case e1000_nvm_override_spi_large: 328 nvm->page_size = 32; 329 nvm->address_bits = 16; 330 break; 331 case e1000_nvm_override_spi_small: 332 nvm->page_size = 8; 333 nvm->address_bits = 8; 334 break; 335 default: 336 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; 337 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 338 16 : 8; 339 break; 340 } 341 if (nvm->word_size == (1 << 15)) 342 nvm->page_size = 128; 343 344 nvm->type = e1000_nvm_eeprom_spi; 345 346 /* NVM Function Pointers */ 347 nvm->ops.acquire = igb_acquire_nvm_82575; 348 nvm->ops.release = igb_release_nvm_82575; 349 nvm->ops.write = igb_write_nvm_spi; 350 nvm->ops.validate = igb_validate_nvm_checksum; 351 nvm->ops.update = igb_update_nvm_checksum; 352 if (nvm->word_size < (1 << 15)) 353 nvm->ops.read = igb_read_nvm_eerd; 354 else 355 nvm->ops.read = igb_read_nvm_spi; 356 357 /* override generic family function pointers for specific descendants */ 358 switch (hw->mac.type) { 359 case e1000_82580: 360 nvm->ops.validate = igb_validate_nvm_checksum_82580; 361 nvm->ops.update = igb_update_nvm_checksum_82580; 362 break; 363 case e1000_i354: 364 case e1000_i350: 365 nvm->ops.validate = igb_validate_nvm_checksum_i350; 366 nvm->ops.update = igb_update_nvm_checksum_i350; 367 break; 368 default: 369 break; 370 } 371 372 return 0; 373 } 374 375 /** 376 * igb_init_mac_params_82575 - Init MAC func ptrs. 377 * @hw: pointer to the HW structure 378 **/ 379 static s32 igb_init_mac_params_82575(struct e1000_hw *hw) 380 { 381 struct e1000_mac_info *mac = &hw->mac; 382 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 383 384 /* Set mta register count */ 385 mac->mta_reg_count = 128; 386 /* Set rar entry count */ 387 switch (mac->type) { 388 case e1000_82576: 389 mac->rar_entry_count = E1000_RAR_ENTRIES_82576; 390 break; 391 case e1000_82580: 392 mac->rar_entry_count = E1000_RAR_ENTRIES_82580; 393 break; 394 case e1000_i350: 395 case e1000_i354: 396 mac->rar_entry_count = E1000_RAR_ENTRIES_I350; 397 break; 398 default: 399 mac->rar_entry_count = E1000_RAR_ENTRIES_82575; 400 break; 401 } 402 /* reset */ 403 if (mac->type >= e1000_82580) 404 mac->ops.reset_hw = igb_reset_hw_82580; 405 else 406 mac->ops.reset_hw = igb_reset_hw_82575; 407 408 if (mac->type >= e1000_i210) { 409 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210; 410 mac->ops.release_swfw_sync = igb_release_swfw_sync_i210; 411 412 } else { 413 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575; 414 mac->ops.release_swfw_sync = igb_release_swfw_sync_82575; 415 } 416 417 /* Set if part includes ASF firmware */ 418 mac->asf_firmware_present = true; 419 /* Set if manageability features are enabled. */ 420 mac->arc_subsystem_valid = 421 (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) 422 ? true : false; 423 /* enable EEE on i350 parts and later parts */ 424 if (mac->type >= e1000_i350) 425 dev_spec->eee_disable = false; 426 else 427 dev_spec->eee_disable = true; 428 /* Allow a single clear of the SW semaphore on I210 and newer */ 429 if (mac->type >= e1000_i210) 430 dev_spec->clear_semaphore_once = true; 431 /* physical interface link setup */ 432 mac->ops.setup_physical_interface = 433 (hw->phy.media_type == e1000_media_type_copper) 434 ? igb_setup_copper_link_82575 435 : igb_setup_serdes_link_82575; 436 437 if (mac->type == e1000_82580) { 438 switch (hw->device_id) { 439 /* feature not supported on these id's */ 440 case E1000_DEV_ID_DH89XXCC_SGMII: 441 case E1000_DEV_ID_DH89XXCC_SERDES: 442 case E1000_DEV_ID_DH89XXCC_BACKPLANE: 443 case E1000_DEV_ID_DH89XXCC_SFP: 444 break; 445 default: 446 hw->dev_spec._82575.mas_capable = true; 447 break; 448 } 449 } 450 return 0; 451 } 452 453 /** 454 * igb_set_sfp_media_type_82575 - derives SFP module media type. 455 * @hw: pointer to the HW structure 456 * 457 * The media type is chosen based on SFP module. 458 * compatibility flags retrieved from SFP ID EEPROM. 459 **/ 460 static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw) 461 { 462 s32 ret_val = E1000_ERR_CONFIG; 463 u32 ctrl_ext = 0; 464 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 465 struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; 466 u8 tranceiver_type = 0; 467 s32 timeout = 3; 468 469 /* Turn I2C interface ON and power on sfp cage */ 470 ctrl_ext = rd32(E1000_CTRL_EXT); 471 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; 472 wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); 473 474 wrfl(); 475 476 /* Read SFP module data */ 477 while (timeout) { 478 ret_val = igb_read_sfp_data_byte(hw, 479 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), 480 &tranceiver_type); 481 if (ret_val == 0) 482 break; 483 msleep(100); 484 timeout--; 485 } 486 if (ret_val != 0) 487 goto out; 488 489 ret_val = igb_read_sfp_data_byte(hw, 490 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), 491 (u8 *)eth_flags); 492 if (ret_val != 0) 493 goto out; 494 495 /* Check if there is some SFP module plugged and powered */ 496 if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || 497 (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { 498 dev_spec->module_plugged = true; 499 if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { 500 hw->phy.media_type = e1000_media_type_internal_serdes; 501 } else if (eth_flags->e100_base_fx) { 502 dev_spec->sgmii_active = true; 503 hw->phy.media_type = e1000_media_type_internal_serdes; 504 } else if (eth_flags->e1000_base_t) { 505 dev_spec->sgmii_active = true; 506 hw->phy.media_type = e1000_media_type_copper; 507 } else { 508 hw->phy.media_type = e1000_media_type_unknown; 509 hw_dbg("PHY module has not been recognized\n"); 510 goto out; 511 } 512 } else { 513 hw->phy.media_type = e1000_media_type_unknown; 514 } 515 ret_val = 0; 516 out: 517 /* Restore I2C interface setting */ 518 wr32(E1000_CTRL_EXT, ctrl_ext); 519 return ret_val; 520 } 521 522 static s32 igb_get_invariants_82575(struct e1000_hw *hw) 523 { 524 struct e1000_mac_info *mac = &hw->mac; 525 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 526 s32 ret_val; 527 u32 ctrl_ext = 0; 528 u32 link_mode = 0; 529 530 switch (hw->device_id) { 531 case E1000_DEV_ID_82575EB_COPPER: 532 case E1000_DEV_ID_82575EB_FIBER_SERDES: 533 case E1000_DEV_ID_82575GB_QUAD_COPPER: 534 mac->type = e1000_82575; 535 break; 536 case E1000_DEV_ID_82576: 537 case E1000_DEV_ID_82576_NS: 538 case E1000_DEV_ID_82576_NS_SERDES: 539 case E1000_DEV_ID_82576_FIBER: 540 case E1000_DEV_ID_82576_SERDES: 541 case E1000_DEV_ID_82576_QUAD_COPPER: 542 case E1000_DEV_ID_82576_QUAD_COPPER_ET2: 543 case E1000_DEV_ID_82576_SERDES_QUAD: 544 mac->type = e1000_82576; 545 break; 546 case E1000_DEV_ID_82580_COPPER: 547 case E1000_DEV_ID_82580_FIBER: 548 case E1000_DEV_ID_82580_QUAD_FIBER: 549 case E1000_DEV_ID_82580_SERDES: 550 case E1000_DEV_ID_82580_SGMII: 551 case E1000_DEV_ID_82580_COPPER_DUAL: 552 case E1000_DEV_ID_DH89XXCC_SGMII: 553 case E1000_DEV_ID_DH89XXCC_SERDES: 554 case E1000_DEV_ID_DH89XXCC_BACKPLANE: 555 case E1000_DEV_ID_DH89XXCC_SFP: 556 mac->type = e1000_82580; 557 break; 558 case E1000_DEV_ID_I350_COPPER: 559 case E1000_DEV_ID_I350_FIBER: 560 case E1000_DEV_ID_I350_SERDES: 561 case E1000_DEV_ID_I350_SGMII: 562 mac->type = e1000_i350; 563 break; 564 case E1000_DEV_ID_I210_COPPER: 565 case E1000_DEV_ID_I210_FIBER: 566 case E1000_DEV_ID_I210_SERDES: 567 case E1000_DEV_ID_I210_SGMII: 568 case E1000_DEV_ID_I210_COPPER_FLASHLESS: 569 case E1000_DEV_ID_I210_SERDES_FLASHLESS: 570 mac->type = e1000_i210; 571 break; 572 case E1000_DEV_ID_I211_COPPER: 573 mac->type = e1000_i211; 574 break; 575 case E1000_DEV_ID_I354_BACKPLANE_1GBPS: 576 case E1000_DEV_ID_I354_SGMII: 577 case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: 578 mac->type = e1000_i354; 579 break; 580 default: 581 return -E1000_ERR_MAC_INIT; 582 } 583 584 /* Set media type */ 585 /* The 82575 uses bits 22:23 for link mode. The mode can be changed 586 * based on the EEPROM. We cannot rely upon device ID. There 587 * is no distinguishable difference between fiber and internal 588 * SerDes mode on the 82575. There can be an external PHY attached 589 * on the SGMII interface. For this, we'll set sgmii_active to true. 590 */ 591 hw->phy.media_type = e1000_media_type_copper; 592 dev_spec->sgmii_active = false; 593 dev_spec->module_plugged = false; 594 595 ctrl_ext = rd32(E1000_CTRL_EXT); 596 597 link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; 598 switch (link_mode) { 599 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 600 hw->phy.media_type = e1000_media_type_internal_serdes; 601 break; 602 case E1000_CTRL_EXT_LINK_MODE_SGMII: 603 /* Get phy control interface type set (MDIO vs. I2C)*/ 604 if (igb_sgmii_uses_mdio_82575(hw)) { 605 hw->phy.media_type = e1000_media_type_copper; 606 dev_spec->sgmii_active = true; 607 break; 608 } 609 /* fall through for I2C based SGMII */ 610 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: 611 /* read media type from SFP EEPROM */ 612 ret_val = igb_set_sfp_media_type_82575(hw); 613 if ((ret_val != 0) || 614 (hw->phy.media_type == e1000_media_type_unknown)) { 615 /* If media type was not identified then return media 616 * type defined by the CTRL_EXT settings. 617 */ 618 hw->phy.media_type = e1000_media_type_internal_serdes; 619 620 if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { 621 hw->phy.media_type = e1000_media_type_copper; 622 dev_spec->sgmii_active = true; 623 } 624 625 break; 626 } 627 628 /* do not change link mode for 100BaseFX */ 629 if (dev_spec->eth_flags.e100_base_fx) 630 break; 631 632 /* change current link mode setting */ 633 ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; 634 635 if (hw->phy.media_type == e1000_media_type_copper) 636 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; 637 else 638 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; 639 640 wr32(E1000_CTRL_EXT, ctrl_ext); 641 642 break; 643 default: 644 break; 645 } 646 647 /* mac initialization and operations */ 648 ret_val = igb_init_mac_params_82575(hw); 649 if (ret_val) 650 goto out; 651 652 /* NVM initialization */ 653 ret_val = igb_init_nvm_params_82575(hw); 654 switch (hw->mac.type) { 655 case e1000_i210: 656 case e1000_i211: 657 ret_val = igb_init_nvm_params_i210(hw); 658 break; 659 default: 660 break; 661 } 662 663 if (ret_val) 664 goto out; 665 666 /* if part supports SR-IOV then initialize mailbox parameters */ 667 switch (mac->type) { 668 case e1000_82576: 669 case e1000_i350: 670 igb_init_mbx_params_pf(hw); 671 break; 672 default: 673 break; 674 } 675 676 /* setup PHY parameters */ 677 ret_val = igb_init_phy_params_82575(hw); 678 679 out: 680 return ret_val; 681 } 682 683 /** 684 * igb_acquire_phy_82575 - Acquire rights to access PHY 685 * @hw: pointer to the HW structure 686 * 687 * Acquire access rights to the correct PHY. This is a 688 * function pointer entry point called by the api module. 689 **/ 690 static s32 igb_acquire_phy_82575(struct e1000_hw *hw) 691 { 692 u16 mask = E1000_SWFW_PHY0_SM; 693 694 if (hw->bus.func == E1000_FUNC_1) 695 mask = E1000_SWFW_PHY1_SM; 696 else if (hw->bus.func == E1000_FUNC_2) 697 mask = E1000_SWFW_PHY2_SM; 698 else if (hw->bus.func == E1000_FUNC_3) 699 mask = E1000_SWFW_PHY3_SM; 700 701 return hw->mac.ops.acquire_swfw_sync(hw, mask); 702 } 703 704 /** 705 * igb_release_phy_82575 - Release rights to access PHY 706 * @hw: pointer to the HW structure 707 * 708 * A wrapper to release access rights to the correct PHY. This is a 709 * function pointer entry point called by the api module. 710 **/ 711 static void igb_release_phy_82575(struct e1000_hw *hw) 712 { 713 u16 mask = E1000_SWFW_PHY0_SM; 714 715 if (hw->bus.func == E1000_FUNC_1) 716 mask = E1000_SWFW_PHY1_SM; 717 else if (hw->bus.func == E1000_FUNC_2) 718 mask = E1000_SWFW_PHY2_SM; 719 else if (hw->bus.func == E1000_FUNC_3) 720 mask = E1000_SWFW_PHY3_SM; 721 722 hw->mac.ops.release_swfw_sync(hw, mask); 723 } 724 725 /** 726 * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii 727 * @hw: pointer to the HW structure 728 * @offset: register offset to be read 729 * @data: pointer to the read data 730 * 731 * Reads the PHY register at offset using the serial gigabit media independent 732 * interface and stores the retrieved information in data. 733 **/ 734 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 735 u16 *data) 736 { 737 s32 ret_val = -E1000_ERR_PARAM; 738 739 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 740 hw_dbg("PHY Address %u is out of range\n", offset); 741 goto out; 742 } 743 744 ret_val = hw->phy.ops.acquire(hw); 745 if (ret_val) 746 goto out; 747 748 ret_val = igb_read_phy_reg_i2c(hw, offset, data); 749 750 hw->phy.ops.release(hw); 751 752 out: 753 return ret_val; 754 } 755 756 /** 757 * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii 758 * @hw: pointer to the HW structure 759 * @offset: register offset to write to 760 * @data: data to write at register offset 761 * 762 * Writes the data to PHY register at the offset using the serial gigabit 763 * media independent interface. 764 **/ 765 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 766 u16 data) 767 { 768 s32 ret_val = -E1000_ERR_PARAM; 769 770 771 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 772 hw_dbg("PHY Address %d is out of range\n", offset); 773 goto out; 774 } 775 776 ret_val = hw->phy.ops.acquire(hw); 777 if (ret_val) 778 goto out; 779 780 ret_val = igb_write_phy_reg_i2c(hw, offset, data); 781 782 hw->phy.ops.release(hw); 783 784 out: 785 return ret_val; 786 } 787 788 /** 789 * igb_get_phy_id_82575 - Retrieve PHY addr and id 790 * @hw: pointer to the HW structure 791 * 792 * Retrieves the PHY address and ID for both PHY's which do and do not use 793 * sgmi interface. 794 **/ 795 static s32 igb_get_phy_id_82575(struct e1000_hw *hw) 796 { 797 struct e1000_phy_info *phy = &hw->phy; 798 s32 ret_val = 0; 799 u16 phy_id; 800 u32 ctrl_ext; 801 u32 mdic; 802 803 /* Extra read required for some PHY's on i354 */ 804 if (hw->mac.type == e1000_i354) 805 igb_get_phy_id(hw); 806 807 /* For SGMII PHYs, we try the list of possible addresses until 808 * we find one that works. For non-SGMII PHYs 809 * (e.g. integrated copper PHYs), an address of 1 should 810 * work. The result of this function should mean phy->phy_addr 811 * and phy->id are set correctly. 812 */ 813 if (!(igb_sgmii_active_82575(hw))) { 814 phy->addr = 1; 815 ret_val = igb_get_phy_id(hw); 816 goto out; 817 } 818 819 if (igb_sgmii_uses_mdio_82575(hw)) { 820 switch (hw->mac.type) { 821 case e1000_82575: 822 case e1000_82576: 823 mdic = rd32(E1000_MDIC); 824 mdic &= E1000_MDIC_PHY_MASK; 825 phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; 826 break; 827 case e1000_82580: 828 case e1000_i350: 829 case e1000_i354: 830 case e1000_i210: 831 case e1000_i211: 832 mdic = rd32(E1000_MDICNFG); 833 mdic &= E1000_MDICNFG_PHY_MASK; 834 phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; 835 break; 836 default: 837 ret_val = -E1000_ERR_PHY; 838 goto out; 839 } 840 ret_val = igb_get_phy_id(hw); 841 goto out; 842 } 843 844 /* Power on sgmii phy if it is disabled */ 845 ctrl_ext = rd32(E1000_CTRL_EXT); 846 wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); 847 wrfl(); 848 msleep(300); 849 850 /* The address field in the I2CCMD register is 3 bits and 0 is invalid. 851 * Therefore, we need to test 1-7 852 */ 853 for (phy->addr = 1; phy->addr < 8; phy->addr++) { 854 ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); 855 if (ret_val == 0) { 856 hw_dbg("Vendor ID 0x%08X read at address %u\n", 857 phy_id, phy->addr); 858 /* At the time of this writing, The M88 part is 859 * the only supported SGMII PHY product. 860 */ 861 if (phy_id == M88_VENDOR) 862 break; 863 } else { 864 hw_dbg("PHY address %u was unreadable\n", phy->addr); 865 } 866 } 867 868 /* A valid PHY type couldn't be found. */ 869 if (phy->addr == 8) { 870 phy->addr = 0; 871 ret_val = -E1000_ERR_PHY; 872 goto out; 873 } else { 874 ret_val = igb_get_phy_id(hw); 875 } 876 877 /* restore previous sfp cage power state */ 878 wr32(E1000_CTRL_EXT, ctrl_ext); 879 880 out: 881 return ret_val; 882 } 883 884 /** 885 * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset 886 * @hw: pointer to the HW structure 887 * 888 * Resets the PHY using the serial gigabit media independent interface. 889 **/ 890 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) 891 { 892 s32 ret_val; 893 894 /* This isn't a true "hard" reset, but is the only reset 895 * available to us at this time. 896 */ 897 898 hw_dbg("Soft resetting SGMII attached PHY...\n"); 899 900 /* SFP documentation requires the following to configure the SPF module 901 * to work on SGMII. No further documentation is given. 902 */ 903 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); 904 if (ret_val) 905 goto out; 906 907 ret_val = igb_phy_sw_reset(hw); 908 909 out: 910 return ret_val; 911 } 912 913 /** 914 * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state 915 * @hw: pointer to the HW structure 916 * @active: true to enable LPLU, false to disable 917 * 918 * Sets the LPLU D0 state according to the active flag. When 919 * activating LPLU this function also disables smart speed 920 * and vice versa. LPLU will not be activated unless the 921 * device autonegotiation advertisement meets standards of 922 * either 10 or 10/100 or 10/100/1000 at all duplexes. 923 * This is a function pointer entry point only called by 924 * PHY setup routines. 925 **/ 926 static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) 927 { 928 struct e1000_phy_info *phy = &hw->phy; 929 s32 ret_val; 930 u16 data; 931 932 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); 933 if (ret_val) 934 goto out; 935 936 if (active) { 937 data |= IGP02E1000_PM_D0_LPLU; 938 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 939 data); 940 if (ret_val) 941 goto out; 942 943 /* When LPLU is enabled, we should disable SmartSpeed */ 944 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 945 &data); 946 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 947 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 948 data); 949 if (ret_val) 950 goto out; 951 } else { 952 data &= ~IGP02E1000_PM_D0_LPLU; 953 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 954 data); 955 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 956 * during Dx states where the power conservation is most 957 * important. During driver activity we should enable 958 * SmartSpeed, so performance is maintained. 959 */ 960 if (phy->smart_speed == e1000_smart_speed_on) { 961 ret_val = phy->ops.read_reg(hw, 962 IGP01E1000_PHY_PORT_CONFIG, &data); 963 if (ret_val) 964 goto out; 965 966 data |= IGP01E1000_PSCFR_SMART_SPEED; 967 ret_val = phy->ops.write_reg(hw, 968 IGP01E1000_PHY_PORT_CONFIG, data); 969 if (ret_val) 970 goto out; 971 } else if (phy->smart_speed == e1000_smart_speed_off) { 972 ret_val = phy->ops.read_reg(hw, 973 IGP01E1000_PHY_PORT_CONFIG, &data); 974 if (ret_val) 975 goto out; 976 977 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 978 ret_val = phy->ops.write_reg(hw, 979 IGP01E1000_PHY_PORT_CONFIG, data); 980 if (ret_val) 981 goto out; 982 } 983 } 984 985 out: 986 return ret_val; 987 } 988 989 /** 990 * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state 991 * @hw: pointer to the HW structure 992 * @active: true to enable LPLU, false to disable 993 * 994 * Sets the LPLU D0 state according to the active flag. When 995 * activating LPLU this function also disables smart speed 996 * and vice versa. LPLU will not be activated unless the 997 * device autonegotiation advertisement meets standards of 998 * either 10 or 10/100 or 10/100/1000 at all duplexes. 999 * This is a function pointer entry point only called by 1000 * PHY setup routines. 1001 **/ 1002 static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) 1003 { 1004 struct e1000_phy_info *phy = &hw->phy; 1005 u16 data; 1006 1007 data = rd32(E1000_82580_PHY_POWER_MGMT); 1008 1009 if (active) { 1010 data |= E1000_82580_PM_D0_LPLU; 1011 1012 /* When LPLU is enabled, we should disable SmartSpeed */ 1013 data &= ~E1000_82580_PM_SPD; 1014 } else { 1015 data &= ~E1000_82580_PM_D0_LPLU; 1016 1017 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 1018 * during Dx states where the power conservation is most 1019 * important. During driver activity we should enable 1020 * SmartSpeed, so performance is maintained. 1021 */ 1022 if (phy->smart_speed == e1000_smart_speed_on) 1023 data |= E1000_82580_PM_SPD; 1024 else if (phy->smart_speed == e1000_smart_speed_off) 1025 data &= ~E1000_82580_PM_SPD; } 1026 1027 wr32(E1000_82580_PHY_POWER_MGMT, data); 1028 return 0; 1029 } 1030 1031 /** 1032 * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3 1033 * @hw: pointer to the HW structure 1034 * @active: boolean used to enable/disable lplu 1035 * 1036 * Success returns 0, Failure returns 1 1037 * 1038 * The low power link up (lplu) state is set to the power management level D3 1039 * and SmartSpeed is disabled when active is true, else clear lplu for D3 1040 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU 1041 * is used during Dx states where the power conservation is most important. 1042 * During driver activity, SmartSpeed should be enabled so performance is 1043 * maintained. 1044 **/ 1045 static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) 1046 { 1047 struct e1000_phy_info *phy = &hw->phy; 1048 u16 data; 1049 1050 data = rd32(E1000_82580_PHY_POWER_MGMT); 1051 1052 if (!active) { 1053 data &= ~E1000_82580_PM_D3_LPLU; 1054 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 1055 * during Dx states where the power conservation is most 1056 * important. During driver activity we should enable 1057 * SmartSpeed, so performance is maintained. 1058 */ 1059 if (phy->smart_speed == e1000_smart_speed_on) 1060 data |= E1000_82580_PM_SPD; 1061 else if (phy->smart_speed == e1000_smart_speed_off) 1062 data &= ~E1000_82580_PM_SPD; 1063 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || 1064 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || 1065 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { 1066 data |= E1000_82580_PM_D3_LPLU; 1067 /* When LPLU is enabled, we should disable SmartSpeed */ 1068 data &= ~E1000_82580_PM_SPD; 1069 } 1070 1071 wr32(E1000_82580_PHY_POWER_MGMT, data); 1072 return 0; 1073 } 1074 1075 /** 1076 * igb_acquire_nvm_82575 - Request for access to EEPROM 1077 * @hw: pointer to the HW structure 1078 * 1079 * Acquire the necessary semaphores for exclusive access to the EEPROM. 1080 * Set the EEPROM access request bit and wait for EEPROM access grant bit. 1081 * Return successful if access grant bit set, else clear the request for 1082 * EEPROM access and return -E1000_ERR_NVM (-1). 1083 **/ 1084 static s32 igb_acquire_nvm_82575(struct e1000_hw *hw) 1085 { 1086 s32 ret_val; 1087 1088 ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM); 1089 if (ret_val) 1090 goto out; 1091 1092 ret_val = igb_acquire_nvm(hw); 1093 1094 if (ret_val) 1095 hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); 1096 1097 out: 1098 return ret_val; 1099 } 1100 1101 /** 1102 * igb_release_nvm_82575 - Release exclusive access to EEPROM 1103 * @hw: pointer to the HW structure 1104 * 1105 * Stop any current commands to the EEPROM and clear the EEPROM request bit, 1106 * then release the semaphores acquired. 1107 **/ 1108 static void igb_release_nvm_82575(struct e1000_hw *hw) 1109 { 1110 igb_release_nvm(hw); 1111 hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); 1112 } 1113 1114 /** 1115 * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore 1116 * @hw: pointer to the HW structure 1117 * @mask: specifies which semaphore to acquire 1118 * 1119 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask 1120 * will also specify which port we're acquiring the lock for. 1121 **/ 1122 static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) 1123 { 1124 u32 swfw_sync; 1125 u32 swmask = mask; 1126 u32 fwmask = mask << 16; 1127 s32 ret_val = 0; 1128 s32 i = 0, timeout = 200; 1129 1130 while (i < timeout) { 1131 if (igb_get_hw_semaphore(hw)) { 1132 ret_val = -E1000_ERR_SWFW_SYNC; 1133 goto out; 1134 } 1135 1136 swfw_sync = rd32(E1000_SW_FW_SYNC); 1137 if (!(swfw_sync & (fwmask | swmask))) 1138 break; 1139 1140 /* Firmware currently using resource (fwmask) 1141 * or other software thread using resource (swmask) 1142 */ 1143 igb_put_hw_semaphore(hw); 1144 mdelay(5); 1145 i++; 1146 } 1147 1148 if (i == timeout) { 1149 hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); 1150 ret_val = -E1000_ERR_SWFW_SYNC; 1151 goto out; 1152 } 1153 1154 swfw_sync |= swmask; 1155 wr32(E1000_SW_FW_SYNC, swfw_sync); 1156 1157 igb_put_hw_semaphore(hw); 1158 1159 out: 1160 return ret_val; 1161 } 1162 1163 /** 1164 * igb_release_swfw_sync_82575 - Release SW/FW semaphore 1165 * @hw: pointer to the HW structure 1166 * @mask: specifies which semaphore to acquire 1167 * 1168 * Release the SW/FW semaphore used to access the PHY or NVM. The mask 1169 * will also specify which port we're releasing the lock for. 1170 **/ 1171 static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) 1172 { 1173 u32 swfw_sync; 1174 1175 while (igb_get_hw_semaphore(hw) != 0) 1176 ; /* Empty */ 1177 1178 swfw_sync = rd32(E1000_SW_FW_SYNC); 1179 swfw_sync &= ~mask; 1180 wr32(E1000_SW_FW_SYNC, swfw_sync); 1181 1182 igb_put_hw_semaphore(hw); 1183 } 1184 1185 /** 1186 * igb_get_cfg_done_82575 - Read config done bit 1187 * @hw: pointer to the HW structure 1188 * 1189 * Read the management control register for the config done bit for 1190 * completion status. NOTE: silicon which is EEPROM-less will fail trying 1191 * to read the config done bit, so an error is *ONLY* logged and returns 1192 * 0. If we were to return with error, EEPROM-less silicon 1193 * would not be able to be reset or change link. 1194 **/ 1195 static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) 1196 { 1197 s32 timeout = PHY_CFG_TIMEOUT; 1198 u32 mask = E1000_NVM_CFG_DONE_PORT_0; 1199 1200 if (hw->bus.func == 1) 1201 mask = E1000_NVM_CFG_DONE_PORT_1; 1202 else if (hw->bus.func == E1000_FUNC_2) 1203 mask = E1000_NVM_CFG_DONE_PORT_2; 1204 else if (hw->bus.func == E1000_FUNC_3) 1205 mask = E1000_NVM_CFG_DONE_PORT_3; 1206 1207 while (timeout) { 1208 if (rd32(E1000_EEMNGCTL) & mask) 1209 break; 1210 usleep_range(1000, 2000); 1211 timeout--; 1212 } 1213 if (!timeout) 1214 hw_dbg("MNG configuration cycle has not completed.\n"); 1215 1216 /* If EEPROM is not marked present, init the PHY manually */ 1217 if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) && 1218 (hw->phy.type == e1000_phy_igp_3)) 1219 igb_phy_init_script_igp3(hw); 1220 1221 return 0; 1222 } 1223 1224 /** 1225 * igb_get_link_up_info_82575 - Get link speed/duplex info 1226 * @hw: pointer to the HW structure 1227 * @speed: stores the current speed 1228 * @duplex: stores the current duplex 1229 * 1230 * This is a wrapper function, if using the serial gigabit media independent 1231 * interface, use PCS to retrieve the link speed and duplex information. 1232 * Otherwise, use the generic function to get the link speed and duplex info. 1233 **/ 1234 static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, 1235 u16 *duplex) 1236 { 1237 s32 ret_val; 1238 1239 if (hw->phy.media_type != e1000_media_type_copper) 1240 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed, 1241 duplex); 1242 else 1243 ret_val = igb_get_speed_and_duplex_copper(hw, speed, 1244 duplex); 1245 1246 return ret_val; 1247 } 1248 1249 /** 1250 * igb_check_for_link_82575 - Check for link 1251 * @hw: pointer to the HW structure 1252 * 1253 * If sgmii is enabled, then use the pcs register to determine link, otherwise 1254 * use the generic interface for determining link. 1255 **/ 1256 static s32 igb_check_for_link_82575(struct e1000_hw *hw) 1257 { 1258 s32 ret_val; 1259 u16 speed, duplex; 1260 1261 if (hw->phy.media_type != e1000_media_type_copper) { 1262 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, 1263 &duplex); 1264 /* Use this flag to determine if link needs to be checked or 1265 * not. If we have link clear the flag so that we do not 1266 * continue to check for link. 1267 */ 1268 hw->mac.get_link_status = !hw->mac.serdes_has_link; 1269 1270 /* Configure Flow Control now that Auto-Neg has completed. 1271 * First, we need to restore the desired flow control 1272 * settings because we may have had to re-autoneg with a 1273 * different link partner. 1274 */ 1275 ret_val = igb_config_fc_after_link_up(hw); 1276 if (ret_val) 1277 hw_dbg("Error configuring flow control\n"); 1278 } else { 1279 ret_val = igb_check_for_copper_link(hw); 1280 } 1281 1282 return ret_val; 1283 } 1284 1285 /** 1286 * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown 1287 * @hw: pointer to the HW structure 1288 **/ 1289 void igb_power_up_serdes_link_82575(struct e1000_hw *hw) 1290 { 1291 u32 reg; 1292 1293 1294 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1295 !igb_sgmii_active_82575(hw)) 1296 return; 1297 1298 /* Enable PCS to turn on link */ 1299 reg = rd32(E1000_PCS_CFG0); 1300 reg |= E1000_PCS_CFG_PCS_EN; 1301 wr32(E1000_PCS_CFG0, reg); 1302 1303 /* Power up the laser */ 1304 reg = rd32(E1000_CTRL_EXT); 1305 reg &= ~E1000_CTRL_EXT_SDP3_DATA; 1306 wr32(E1000_CTRL_EXT, reg); 1307 1308 /* flush the write to verify completion */ 1309 wrfl(); 1310 usleep_range(1000, 2000); 1311 } 1312 1313 /** 1314 * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex 1315 * @hw: pointer to the HW structure 1316 * @speed: stores the current speed 1317 * @duplex: stores the current duplex 1318 * 1319 * Using the physical coding sub-layer (PCS), retrieve the current speed and 1320 * duplex, then store the values in the pointers provided. 1321 **/ 1322 static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, 1323 u16 *duplex) 1324 { 1325 struct e1000_mac_info *mac = &hw->mac; 1326 u32 pcs, status; 1327 1328 /* Set up defaults for the return values of this function */ 1329 mac->serdes_has_link = false; 1330 *speed = 0; 1331 *duplex = 0; 1332 1333 /* Read the PCS Status register for link state. For non-copper mode, 1334 * the status register is not accurate. The PCS status register is 1335 * used instead. 1336 */ 1337 pcs = rd32(E1000_PCS_LSTAT); 1338 1339 /* The link up bit determines when link is up on autoneg. The sync ok 1340 * gets set once both sides sync up and agree upon link. Stable link 1341 * can be determined by checking for both link up and link sync ok 1342 */ 1343 if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { 1344 mac->serdes_has_link = true; 1345 1346 /* Detect and store PCS speed */ 1347 if (pcs & E1000_PCS_LSTS_SPEED_1000) 1348 *speed = SPEED_1000; 1349 else if (pcs & E1000_PCS_LSTS_SPEED_100) 1350 *speed = SPEED_100; 1351 else 1352 *speed = SPEED_10; 1353 1354 /* Detect and store PCS duplex */ 1355 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) 1356 *duplex = FULL_DUPLEX; 1357 else 1358 *duplex = HALF_DUPLEX; 1359 1360 /* Check if it is an I354 2.5Gb backplane connection. */ 1361 if (mac->type == e1000_i354) { 1362 status = rd32(E1000_STATUS); 1363 if ((status & E1000_STATUS_2P5_SKU) && 1364 !(status & E1000_STATUS_2P5_SKU_OVER)) { 1365 *speed = SPEED_2500; 1366 *duplex = FULL_DUPLEX; 1367 hw_dbg("2500 Mbs, "); 1368 hw_dbg("Full Duplex\n"); 1369 } 1370 } 1371 1372 } 1373 1374 return 0; 1375 } 1376 1377 /** 1378 * igb_shutdown_serdes_link_82575 - Remove link during power down 1379 * @hw: pointer to the HW structure 1380 * 1381 * In the case of fiber serdes, shut down optics and PCS on driver unload 1382 * when management pass thru is not enabled. 1383 **/ 1384 void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) 1385 { 1386 u32 reg; 1387 1388 if (hw->phy.media_type != e1000_media_type_internal_serdes && 1389 igb_sgmii_active_82575(hw)) 1390 return; 1391 1392 if (!igb_enable_mng_pass_thru(hw)) { 1393 /* Disable PCS to turn off link */ 1394 reg = rd32(E1000_PCS_CFG0); 1395 reg &= ~E1000_PCS_CFG_PCS_EN; 1396 wr32(E1000_PCS_CFG0, reg); 1397 1398 /* shutdown the laser */ 1399 reg = rd32(E1000_CTRL_EXT); 1400 reg |= E1000_CTRL_EXT_SDP3_DATA; 1401 wr32(E1000_CTRL_EXT, reg); 1402 1403 /* flush the write to verify completion */ 1404 wrfl(); 1405 usleep_range(1000, 2000); 1406 } 1407 } 1408 1409 /** 1410 * igb_reset_hw_82575 - Reset hardware 1411 * @hw: pointer to the HW structure 1412 * 1413 * This resets the hardware into a known state. This is a 1414 * function pointer entry point called by the api module. 1415 **/ 1416 static s32 igb_reset_hw_82575(struct e1000_hw *hw) 1417 { 1418 u32 ctrl; 1419 s32 ret_val; 1420 1421 /* Prevent the PCI-E bus from sticking if there is no TLP connection 1422 * on the last TLP read/write transaction when MAC is reset. 1423 */ 1424 ret_val = igb_disable_pcie_master(hw); 1425 if (ret_val) 1426 hw_dbg("PCI-E Master disable polling has failed.\n"); 1427 1428 /* set the completion timeout for interface */ 1429 ret_val = igb_set_pcie_completion_timeout(hw); 1430 if (ret_val) 1431 hw_dbg("PCI-E Set completion timeout has failed.\n"); 1432 1433 hw_dbg("Masking off all interrupts\n"); 1434 wr32(E1000_IMC, 0xffffffff); 1435 1436 wr32(E1000_RCTL, 0); 1437 wr32(E1000_TCTL, E1000_TCTL_PSP); 1438 wrfl(); 1439 1440 usleep_range(10000, 20000); 1441 1442 ctrl = rd32(E1000_CTRL); 1443 1444 hw_dbg("Issuing a global reset to MAC\n"); 1445 wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); 1446 1447 ret_val = igb_get_auto_rd_done(hw); 1448 if (ret_val) { 1449 /* When auto config read does not complete, do not 1450 * return with an error. This can happen in situations 1451 * where there is no eeprom and prevents getting link. 1452 */ 1453 hw_dbg("Auto Read Done did not complete\n"); 1454 } 1455 1456 /* If EEPROM is not present, run manual init scripts */ 1457 if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) 1458 igb_reset_init_script_82575(hw); 1459 1460 /* Clear any pending interrupt events. */ 1461 wr32(E1000_IMC, 0xffffffff); 1462 rd32(E1000_ICR); 1463 1464 /* Install any alternate MAC address into RAR0 */ 1465 ret_val = igb_check_alt_mac_addr(hw); 1466 1467 return ret_val; 1468 } 1469 1470 /** 1471 * igb_init_hw_82575 - Initialize hardware 1472 * @hw: pointer to the HW structure 1473 * 1474 * This inits the hardware readying it for operation. 1475 **/ 1476 static s32 igb_init_hw_82575(struct e1000_hw *hw) 1477 { 1478 struct e1000_mac_info *mac = &hw->mac; 1479 s32 ret_val; 1480 u16 i, rar_count = mac->rar_entry_count; 1481 1482 if ((hw->mac.type >= e1000_i210) && 1483 !(igb_get_flash_presence_i210(hw))) { 1484 ret_val = igb_pll_workaround_i210(hw); 1485 if (ret_val) 1486 return ret_val; 1487 } 1488 1489 /* Initialize identification LED */ 1490 ret_val = igb_id_led_init(hw); 1491 if (ret_val) { 1492 hw_dbg("Error initializing identification LED\n"); 1493 /* This is not fatal and we should not stop init due to this */ 1494 } 1495 1496 /* Disabling VLAN filtering */ 1497 hw_dbg("Initializing the IEEE VLAN\n"); 1498 if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) 1499 igb_clear_vfta_i350(hw); 1500 else 1501 igb_clear_vfta(hw); 1502 1503 /* Setup the receive address */ 1504 igb_init_rx_addrs(hw, rar_count); 1505 1506 /* Zero out the Multicast HASH table */ 1507 hw_dbg("Zeroing the MTA\n"); 1508 for (i = 0; i < mac->mta_reg_count; i++) 1509 array_wr32(E1000_MTA, i, 0); 1510 1511 /* Zero out the Unicast HASH table */ 1512 hw_dbg("Zeroing the UTA\n"); 1513 for (i = 0; i < mac->uta_reg_count; i++) 1514 array_wr32(E1000_UTA, i, 0); 1515 1516 /* Setup link and flow control */ 1517 ret_val = igb_setup_link(hw); 1518 1519 /* Clear all of the statistics registers (clear on read). It is 1520 * important that we do this after we have tried to establish link 1521 * because the symbol error count will increment wildly if there 1522 * is no link. 1523 */ 1524 igb_clear_hw_cntrs_82575(hw); 1525 return ret_val; 1526 } 1527 1528 /** 1529 * igb_setup_copper_link_82575 - Configure copper link settings 1530 * @hw: pointer to the HW structure 1531 * 1532 * Configures the link for auto-neg or forced speed and duplex. Then we check 1533 * for link, once link is established calls to configure collision distance 1534 * and flow control are called. 1535 **/ 1536 static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) 1537 { 1538 u32 ctrl; 1539 s32 ret_val; 1540 u32 phpm_reg; 1541 1542 ctrl = rd32(E1000_CTRL); 1543 ctrl |= E1000_CTRL_SLU; 1544 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1545 wr32(E1000_CTRL, ctrl); 1546 1547 /* Clear Go Link Disconnect bit on supported devices */ 1548 switch (hw->mac.type) { 1549 case e1000_82580: 1550 case e1000_i350: 1551 case e1000_i210: 1552 case e1000_i211: 1553 phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT); 1554 phpm_reg &= ~E1000_82580_PM_GO_LINKD; 1555 wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg); 1556 break; 1557 default: 1558 break; 1559 } 1560 1561 ret_val = igb_setup_serdes_link_82575(hw); 1562 if (ret_val) 1563 goto out; 1564 1565 if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { 1566 /* allow time for SFP cage time to power up phy */ 1567 msleep(300); 1568 1569 ret_val = hw->phy.ops.reset(hw); 1570 if (ret_val) { 1571 hw_dbg("Error resetting the PHY.\n"); 1572 goto out; 1573 } 1574 } 1575 switch (hw->phy.type) { 1576 case e1000_phy_i210: 1577 case e1000_phy_m88: 1578 switch (hw->phy.id) { 1579 case I347AT4_E_PHY_ID: 1580 case M88E1112_E_PHY_ID: 1581 case M88E1543_E_PHY_ID: 1582 case I210_I_PHY_ID: 1583 ret_val = igb_copper_link_setup_m88_gen2(hw); 1584 break; 1585 default: 1586 ret_val = igb_copper_link_setup_m88(hw); 1587 break; 1588 } 1589 break; 1590 case e1000_phy_igp_3: 1591 ret_val = igb_copper_link_setup_igp(hw); 1592 break; 1593 case e1000_phy_82580: 1594 ret_val = igb_copper_link_setup_82580(hw); 1595 break; 1596 default: 1597 ret_val = -E1000_ERR_PHY; 1598 break; 1599 } 1600 1601 if (ret_val) 1602 goto out; 1603 1604 ret_val = igb_setup_copper_link(hw); 1605 out: 1606 return ret_val; 1607 } 1608 1609 /** 1610 * igb_setup_serdes_link_82575 - Setup link for serdes 1611 * @hw: pointer to the HW structure 1612 * 1613 * Configure the physical coding sub-layer (PCS) link. The PCS link is 1614 * used on copper connections where the serialized gigabit media independent 1615 * interface (sgmii), or serdes fiber is being used. Configures the link 1616 * for auto-negotiation or forces speed/duplex. 1617 **/ 1618 static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) 1619 { 1620 u32 ctrl_ext, ctrl_reg, reg, anadv_reg; 1621 bool pcs_autoneg; 1622 s32 ret_val = 0; 1623 u16 data; 1624 1625 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1626 !igb_sgmii_active_82575(hw)) 1627 return ret_val; 1628 1629 1630 /* On the 82575, SerDes loopback mode persists until it is 1631 * explicitly turned off or a power cycle is performed. A read to 1632 * the register does not indicate its status. Therefore, we ensure 1633 * loopback mode is disabled during initialization. 1634 */ 1635 wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); 1636 1637 /* power on the sfp cage if present and turn on I2C */ 1638 ctrl_ext = rd32(E1000_CTRL_EXT); 1639 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; 1640 ctrl_ext |= E1000_CTRL_I2C_ENA; 1641 wr32(E1000_CTRL_EXT, ctrl_ext); 1642 1643 ctrl_reg = rd32(E1000_CTRL); 1644 ctrl_reg |= E1000_CTRL_SLU; 1645 1646 if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { 1647 /* set both sw defined pins */ 1648 ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; 1649 1650 /* Set switch control to serdes energy detect */ 1651 reg = rd32(E1000_CONNSW); 1652 reg |= E1000_CONNSW_ENRGSRC; 1653 wr32(E1000_CONNSW, reg); 1654 } 1655 1656 reg = rd32(E1000_PCS_LCTL); 1657 1658 /* default pcs_autoneg to the same setting as mac autoneg */ 1659 pcs_autoneg = hw->mac.autoneg; 1660 1661 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { 1662 case E1000_CTRL_EXT_LINK_MODE_SGMII: 1663 /* sgmii mode lets the phy handle forcing speed/duplex */ 1664 pcs_autoneg = true; 1665 /* autoneg time out should be disabled for SGMII mode */ 1666 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); 1667 break; 1668 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 1669 /* disable PCS autoneg and support parallel detect only */ 1670 pcs_autoneg = false; 1671 default: 1672 if (hw->mac.type == e1000_82575 || 1673 hw->mac.type == e1000_82576) { 1674 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); 1675 if (ret_val) { 1676 hw_dbg(KERN_DEBUG "NVM Read Error\n\n"); 1677 return ret_val; 1678 } 1679 1680 if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) 1681 pcs_autoneg = false; 1682 } 1683 1684 /* non-SGMII modes only supports a speed of 1000/Full for the 1685 * link so it is best to just force the MAC and let the pcs 1686 * link either autoneg or be forced to 1000/Full 1687 */ 1688 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | 1689 E1000_CTRL_FD | E1000_CTRL_FRCDPX; 1690 1691 /* set speed of 1000/Full if speed/duplex is forced */ 1692 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; 1693 break; 1694 } 1695 1696 wr32(E1000_CTRL, ctrl_reg); 1697 1698 /* New SerDes mode allows for forcing speed or autonegotiating speed 1699 * at 1gb. Autoneg should be default set by most drivers. This is the 1700 * mode that will be compatible with older link partners and switches. 1701 * However, both are supported by the hardware and some drivers/tools. 1702 */ 1703 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | 1704 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); 1705 1706 if (pcs_autoneg) { 1707 /* Set PCS register for autoneg */ 1708 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ 1709 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ 1710 1711 /* Disable force flow control for autoneg */ 1712 reg &= ~E1000_PCS_LCTL_FORCE_FCTRL; 1713 1714 /* Configure flow control advertisement for autoneg */ 1715 anadv_reg = rd32(E1000_PCS_ANADV); 1716 anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE); 1717 switch (hw->fc.requested_mode) { 1718 case e1000_fc_full: 1719 case e1000_fc_rx_pause: 1720 anadv_reg |= E1000_TXCW_ASM_DIR; 1721 anadv_reg |= E1000_TXCW_PAUSE; 1722 break; 1723 case e1000_fc_tx_pause: 1724 anadv_reg |= E1000_TXCW_ASM_DIR; 1725 break; 1726 default: 1727 break; 1728 } 1729 wr32(E1000_PCS_ANADV, anadv_reg); 1730 1731 hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); 1732 } else { 1733 /* Set PCS register for forced link */ 1734 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ 1735 1736 /* Force flow control for forced link */ 1737 reg |= E1000_PCS_LCTL_FORCE_FCTRL; 1738 1739 hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); 1740 } 1741 1742 wr32(E1000_PCS_LCTL, reg); 1743 1744 if (!pcs_autoneg && !igb_sgmii_active_82575(hw)) 1745 igb_force_mac_fc(hw); 1746 1747 return ret_val; 1748 } 1749 1750 /** 1751 * igb_sgmii_active_82575 - Return sgmii state 1752 * @hw: pointer to the HW structure 1753 * 1754 * 82575 silicon has a serialized gigabit media independent interface (sgmii) 1755 * which can be enabled for use in the embedded applications. Simply 1756 * return the current state of the sgmii interface. 1757 **/ 1758 static bool igb_sgmii_active_82575(struct e1000_hw *hw) 1759 { 1760 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 1761 return dev_spec->sgmii_active; 1762 } 1763 1764 /** 1765 * igb_reset_init_script_82575 - Inits HW defaults after reset 1766 * @hw: pointer to the HW structure 1767 * 1768 * Inits recommended HW defaults after a reset when there is no EEPROM 1769 * detected. This is only for the 82575. 1770 **/ 1771 static s32 igb_reset_init_script_82575(struct e1000_hw *hw) 1772 { 1773 if (hw->mac.type == e1000_82575) { 1774 hw_dbg("Running reset init script for 82575\n"); 1775 /* SerDes configuration via SERDESCTRL */ 1776 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); 1777 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); 1778 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23); 1779 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15); 1780 1781 /* CCM configuration via CCMCTL register */ 1782 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00); 1783 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00); 1784 1785 /* PCIe lanes configuration */ 1786 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC); 1787 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF); 1788 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05); 1789 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81); 1790 1791 /* PCIe PLL Configuration */ 1792 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47); 1793 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00); 1794 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00); 1795 } 1796 1797 return 0; 1798 } 1799 1800 /** 1801 * igb_read_mac_addr_82575 - Read device MAC address 1802 * @hw: pointer to the HW structure 1803 **/ 1804 static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) 1805 { 1806 s32 ret_val = 0; 1807 1808 /* If there's an alternate MAC address place it in RAR0 1809 * so that it will override the Si installed default perm 1810 * address. 1811 */ 1812 ret_val = igb_check_alt_mac_addr(hw); 1813 if (ret_val) 1814 goto out; 1815 1816 ret_val = igb_read_mac_addr(hw); 1817 1818 out: 1819 return ret_val; 1820 } 1821 1822 /** 1823 * igb_power_down_phy_copper_82575 - Remove link during PHY power down 1824 * @hw: pointer to the HW structure 1825 * 1826 * In the case of a PHY power down to save power, or to turn off link during a 1827 * driver unload, or wake on lan is not enabled, remove the link. 1828 **/ 1829 void igb_power_down_phy_copper_82575(struct e1000_hw *hw) 1830 { 1831 /* If the management interface is not enabled, then power down */ 1832 if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw))) 1833 igb_power_down_phy_copper(hw); 1834 } 1835 1836 /** 1837 * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters 1838 * @hw: pointer to the HW structure 1839 * 1840 * Clears the hardware counters by reading the counter registers. 1841 **/ 1842 static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) 1843 { 1844 igb_clear_hw_cntrs_base(hw); 1845 1846 rd32(E1000_PRC64); 1847 rd32(E1000_PRC127); 1848 rd32(E1000_PRC255); 1849 rd32(E1000_PRC511); 1850 rd32(E1000_PRC1023); 1851 rd32(E1000_PRC1522); 1852 rd32(E1000_PTC64); 1853 rd32(E1000_PTC127); 1854 rd32(E1000_PTC255); 1855 rd32(E1000_PTC511); 1856 rd32(E1000_PTC1023); 1857 rd32(E1000_PTC1522); 1858 1859 rd32(E1000_ALGNERRC); 1860 rd32(E1000_RXERRC); 1861 rd32(E1000_TNCRS); 1862 rd32(E1000_CEXTERR); 1863 rd32(E1000_TSCTC); 1864 rd32(E1000_TSCTFC); 1865 1866 rd32(E1000_MGTPRC); 1867 rd32(E1000_MGTPDC); 1868 rd32(E1000_MGTPTC); 1869 1870 rd32(E1000_IAC); 1871 rd32(E1000_ICRXOC); 1872 1873 rd32(E1000_ICRXPTC); 1874 rd32(E1000_ICRXATC); 1875 rd32(E1000_ICTXPTC); 1876 rd32(E1000_ICTXATC); 1877 rd32(E1000_ICTXQEC); 1878 rd32(E1000_ICTXQMTC); 1879 rd32(E1000_ICRXDMTC); 1880 1881 rd32(E1000_CBTMPC); 1882 rd32(E1000_HTDPMC); 1883 rd32(E1000_CBRMPC); 1884 rd32(E1000_RPTHC); 1885 rd32(E1000_HGPTC); 1886 rd32(E1000_HTCBDPC); 1887 rd32(E1000_HGORCL); 1888 rd32(E1000_HGORCH); 1889 rd32(E1000_HGOTCL); 1890 rd32(E1000_HGOTCH); 1891 rd32(E1000_LENERRS); 1892 1893 /* This register should not be read in copper configurations */ 1894 if (hw->phy.media_type == e1000_media_type_internal_serdes || 1895 igb_sgmii_active_82575(hw)) 1896 rd32(E1000_SCVPC); 1897 } 1898 1899 /** 1900 * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable 1901 * @hw: pointer to the HW structure 1902 * 1903 * After rx enable if manageability is enabled then there is likely some 1904 * bad data at the start of the fifo and possibly in the DMA fifo. This 1905 * function clears the fifos and flushes any packets that came in as rx was 1906 * being enabled. 1907 **/ 1908 void igb_rx_fifo_flush_82575(struct e1000_hw *hw) 1909 { 1910 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; 1911 int i, ms_wait; 1912 1913 /* disable IPv6 options as per hardware errata */ 1914 rfctl = rd32(E1000_RFCTL); 1915 rfctl |= E1000_RFCTL_IPV6_EX_DIS; 1916 wr32(E1000_RFCTL, rfctl); 1917 1918 if (hw->mac.type != e1000_82575 || 1919 !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) 1920 return; 1921 1922 /* Disable all RX queues */ 1923 for (i = 0; i < 4; i++) { 1924 rxdctl[i] = rd32(E1000_RXDCTL(i)); 1925 wr32(E1000_RXDCTL(i), 1926 rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); 1927 } 1928 /* Poll all queues to verify they have shut down */ 1929 for (ms_wait = 0; ms_wait < 10; ms_wait++) { 1930 usleep_range(1000, 2000); 1931 rx_enabled = 0; 1932 for (i = 0; i < 4; i++) 1933 rx_enabled |= rd32(E1000_RXDCTL(i)); 1934 if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) 1935 break; 1936 } 1937 1938 if (ms_wait == 10) 1939 hw_dbg("Queue disable timed out after 10ms\n"); 1940 1941 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all 1942 * incoming packets are rejected. Set enable and wait 2ms so that 1943 * any packet that was coming in as RCTL.EN was set is flushed 1944 */ 1945 wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); 1946 1947 rlpml = rd32(E1000_RLPML); 1948 wr32(E1000_RLPML, 0); 1949 1950 rctl = rd32(E1000_RCTL); 1951 temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); 1952 temp_rctl |= E1000_RCTL_LPE; 1953 1954 wr32(E1000_RCTL, temp_rctl); 1955 wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); 1956 wrfl(); 1957 usleep_range(2000, 3000); 1958 1959 /* Enable RX queues that were previously enabled and restore our 1960 * previous state 1961 */ 1962 for (i = 0; i < 4; i++) 1963 wr32(E1000_RXDCTL(i), rxdctl[i]); 1964 wr32(E1000_RCTL, rctl); 1965 wrfl(); 1966 1967 wr32(E1000_RLPML, rlpml); 1968 wr32(E1000_RFCTL, rfctl); 1969 1970 /* Flush receive errors generated by workaround */ 1971 rd32(E1000_ROC); 1972 rd32(E1000_RNBC); 1973 rd32(E1000_MPC); 1974 } 1975 1976 /** 1977 * igb_set_pcie_completion_timeout - set pci-e completion timeout 1978 * @hw: pointer to the HW structure 1979 * 1980 * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, 1981 * however the hardware default for these parts is 500us to 1ms which is less 1982 * than the 10ms recommended by the pci-e spec. To address this we need to 1983 * increase the value to either 10ms to 200ms for capability version 1 config, 1984 * or 16ms to 55ms for version 2. 1985 **/ 1986 static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) 1987 { 1988 u32 gcr = rd32(E1000_GCR); 1989 s32 ret_val = 0; 1990 u16 pcie_devctl2; 1991 1992 /* only take action if timeout value is defaulted to 0 */ 1993 if (gcr & E1000_GCR_CMPL_TMOUT_MASK) 1994 goto out; 1995 1996 /* if capabilities version is type 1 we can write the 1997 * timeout of 10ms to 200ms through the GCR register 1998 */ 1999 if (!(gcr & E1000_GCR_CAP_VER2)) { 2000 gcr |= E1000_GCR_CMPL_TMOUT_10ms; 2001 goto out; 2002 } 2003 2004 /* for version 2 capabilities we need to write the config space 2005 * directly in order to set the completion timeout value for 2006 * 16ms to 55ms 2007 */ 2008 ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 2009 &pcie_devctl2); 2010 if (ret_val) 2011 goto out; 2012 2013 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; 2014 2015 ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 2016 &pcie_devctl2); 2017 out: 2018 /* disable completion timeout resend */ 2019 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; 2020 2021 wr32(E1000_GCR, gcr); 2022 return ret_val; 2023 } 2024 2025 /** 2026 * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing 2027 * @hw: pointer to the hardware struct 2028 * @enable: state to enter, either enabled or disabled 2029 * @pf: Physical Function pool - do not set anti-spoofing for the PF 2030 * 2031 * enables/disables L2 switch anti-spoofing functionality. 2032 **/ 2033 void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) 2034 { 2035 u32 reg_val, reg_offset; 2036 2037 switch (hw->mac.type) { 2038 case e1000_82576: 2039 reg_offset = E1000_DTXSWC; 2040 break; 2041 case e1000_i350: 2042 case e1000_i354: 2043 reg_offset = E1000_TXSWC; 2044 break; 2045 default: 2046 return; 2047 } 2048 2049 reg_val = rd32(reg_offset); 2050 if (enable) { 2051 reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | 2052 E1000_DTXSWC_VLAN_SPOOF_MASK); 2053 /* The PF can spoof - it has to in order to 2054 * support emulation mode NICs 2055 */ 2056 reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); 2057 } else { 2058 reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | 2059 E1000_DTXSWC_VLAN_SPOOF_MASK); 2060 } 2061 wr32(reg_offset, reg_val); 2062 } 2063 2064 /** 2065 * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback 2066 * @hw: pointer to the hardware struct 2067 * @enable: state to enter, either enabled or disabled 2068 * 2069 * enables/disables L2 switch loopback functionality. 2070 **/ 2071 void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) 2072 { 2073 u32 dtxswc; 2074 2075 switch (hw->mac.type) { 2076 case e1000_82576: 2077 dtxswc = rd32(E1000_DTXSWC); 2078 if (enable) 2079 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2080 else 2081 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2082 wr32(E1000_DTXSWC, dtxswc); 2083 break; 2084 case e1000_i354: 2085 case e1000_i350: 2086 dtxswc = rd32(E1000_TXSWC); 2087 if (enable) 2088 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2089 else 2090 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2091 wr32(E1000_TXSWC, dtxswc); 2092 break; 2093 default: 2094 /* Currently no other hardware supports loopback */ 2095 break; 2096 } 2097 2098 } 2099 2100 /** 2101 * igb_vmdq_set_replication_pf - enable or disable vmdq replication 2102 * @hw: pointer to the hardware struct 2103 * @enable: state to enter, either enabled or disabled 2104 * 2105 * enables/disables replication of packets across multiple pools. 2106 **/ 2107 void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) 2108 { 2109 u32 vt_ctl = rd32(E1000_VT_CTL); 2110 2111 if (enable) 2112 vt_ctl |= E1000_VT_CTL_VM_REPL_EN; 2113 else 2114 vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; 2115 2116 wr32(E1000_VT_CTL, vt_ctl); 2117 } 2118 2119 /** 2120 * igb_read_phy_reg_82580 - Read 82580 MDI control register 2121 * @hw: pointer to the HW structure 2122 * @offset: register offset to be read 2123 * @data: pointer to the read data 2124 * 2125 * Reads the MDI control register in the PHY at offset and stores the 2126 * information read to data. 2127 **/ 2128 static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) 2129 { 2130 s32 ret_val; 2131 2132 ret_val = hw->phy.ops.acquire(hw); 2133 if (ret_val) 2134 goto out; 2135 2136 ret_val = igb_read_phy_reg_mdic(hw, offset, data); 2137 2138 hw->phy.ops.release(hw); 2139 2140 out: 2141 return ret_val; 2142 } 2143 2144 /** 2145 * igb_write_phy_reg_82580 - Write 82580 MDI control register 2146 * @hw: pointer to the HW structure 2147 * @offset: register offset to write to 2148 * @data: data to write to register at offset 2149 * 2150 * Writes data to MDI control register in the PHY at offset. 2151 **/ 2152 static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) 2153 { 2154 s32 ret_val; 2155 2156 2157 ret_val = hw->phy.ops.acquire(hw); 2158 if (ret_val) 2159 goto out; 2160 2161 ret_val = igb_write_phy_reg_mdic(hw, offset, data); 2162 2163 hw->phy.ops.release(hw); 2164 2165 out: 2166 return ret_val; 2167 } 2168 2169 /** 2170 * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits 2171 * @hw: pointer to the HW structure 2172 * 2173 * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on 2174 * the values found in the EEPROM. This addresses an issue in which these 2175 * bits are not restored from EEPROM after reset. 2176 **/ 2177 static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw) 2178 { 2179 s32 ret_val = 0; 2180 u32 mdicnfg; 2181 u16 nvm_data = 0; 2182 2183 if (hw->mac.type != e1000_82580) 2184 goto out; 2185 if (!igb_sgmii_active_82575(hw)) 2186 goto out; 2187 2188 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + 2189 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, 2190 &nvm_data); 2191 if (ret_val) { 2192 hw_dbg("NVM Read Error\n"); 2193 goto out; 2194 } 2195 2196 mdicnfg = rd32(E1000_MDICNFG); 2197 if (nvm_data & NVM_WORD24_EXT_MDIO) 2198 mdicnfg |= E1000_MDICNFG_EXT_MDIO; 2199 if (nvm_data & NVM_WORD24_COM_MDIO) 2200 mdicnfg |= E1000_MDICNFG_COM_MDIO; 2201 wr32(E1000_MDICNFG, mdicnfg); 2202 out: 2203 return ret_val; 2204 } 2205 2206 /** 2207 * igb_reset_hw_82580 - Reset hardware 2208 * @hw: pointer to the HW structure 2209 * 2210 * This resets function or entire device (all ports, etc.) 2211 * to a known state. 2212 **/ 2213 static s32 igb_reset_hw_82580(struct e1000_hw *hw) 2214 { 2215 s32 ret_val = 0; 2216 /* BH SW mailbox bit in SW_FW_SYNC */ 2217 u16 swmbsw_mask = E1000_SW_SYNCH_MB; 2218 u32 ctrl; 2219 bool global_device_reset = hw->dev_spec._82575.global_device_reset; 2220 2221 hw->dev_spec._82575.global_device_reset = false; 2222 2223 /* due to hw errata, global device reset doesn't always 2224 * work on 82580 2225 */ 2226 if (hw->mac.type == e1000_82580) 2227 global_device_reset = false; 2228 2229 /* Get current control state. */ 2230 ctrl = rd32(E1000_CTRL); 2231 2232 /* Prevent the PCI-E bus from sticking if there is no TLP connection 2233 * on the last TLP read/write transaction when MAC is reset. 2234 */ 2235 ret_val = igb_disable_pcie_master(hw); 2236 if (ret_val) 2237 hw_dbg("PCI-E Master disable polling has failed.\n"); 2238 2239 hw_dbg("Masking off all interrupts\n"); 2240 wr32(E1000_IMC, 0xffffffff); 2241 wr32(E1000_RCTL, 0); 2242 wr32(E1000_TCTL, E1000_TCTL_PSP); 2243 wrfl(); 2244 2245 usleep_range(10000, 11000); 2246 2247 /* Determine whether or not a global dev reset is requested */ 2248 if (global_device_reset && 2249 hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask)) 2250 global_device_reset = false; 2251 2252 if (global_device_reset && 2253 !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) 2254 ctrl |= E1000_CTRL_DEV_RST; 2255 else 2256 ctrl |= E1000_CTRL_RST; 2257 2258 wr32(E1000_CTRL, ctrl); 2259 wrfl(); 2260 2261 /* Add delay to insure DEV_RST has time to complete */ 2262 if (global_device_reset) 2263 usleep_range(5000, 6000); 2264 2265 ret_val = igb_get_auto_rd_done(hw); 2266 if (ret_val) { 2267 /* When auto config read does not complete, do not 2268 * return with an error. This can happen in situations 2269 * where there is no eeprom and prevents getting link. 2270 */ 2271 hw_dbg("Auto Read Done did not complete\n"); 2272 } 2273 2274 /* clear global device reset status bit */ 2275 wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); 2276 2277 /* Clear any pending interrupt events. */ 2278 wr32(E1000_IMC, 0xffffffff); 2279 rd32(E1000_ICR); 2280 2281 ret_val = igb_reset_mdicnfg_82580(hw); 2282 if (ret_val) 2283 hw_dbg("Could not reset MDICNFG based on EEPROM\n"); 2284 2285 /* Install any alternate MAC address into RAR0 */ 2286 ret_val = igb_check_alt_mac_addr(hw); 2287 2288 /* Release semaphore */ 2289 if (global_device_reset) 2290 hw->mac.ops.release_swfw_sync(hw, swmbsw_mask); 2291 2292 return ret_val; 2293 } 2294 2295 /** 2296 * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size 2297 * @data: data received by reading RXPBS register 2298 * 2299 * The 82580 uses a table based approach for packet buffer allocation sizes. 2300 * This function converts the retrieved value into the correct table value 2301 * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 2302 * 0x0 36 72 144 1 2 4 8 16 2303 * 0x8 35 70 140 rsv rsv rsv rsv rsv 2304 */ 2305 u16 igb_rxpbs_adjust_82580(u32 data) 2306 { 2307 u16 ret_val = 0; 2308 2309 if (data < ARRAY_SIZE(e1000_82580_rxpbs_table)) 2310 ret_val = e1000_82580_rxpbs_table[data]; 2311 2312 return ret_val; 2313 } 2314 2315 /** 2316 * igb_validate_nvm_checksum_with_offset - Validate EEPROM 2317 * checksum 2318 * @hw: pointer to the HW structure 2319 * @offset: offset in words of the checksum protected region 2320 * 2321 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM 2322 * and then verifies that the sum of the EEPROM is equal to 0xBABA. 2323 **/ 2324 static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, 2325 u16 offset) 2326 { 2327 s32 ret_val = 0; 2328 u16 checksum = 0; 2329 u16 i, nvm_data; 2330 2331 for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { 2332 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); 2333 if (ret_val) { 2334 hw_dbg("NVM Read Error\n"); 2335 goto out; 2336 } 2337 checksum += nvm_data; 2338 } 2339 2340 if (checksum != (u16) NVM_SUM) { 2341 hw_dbg("NVM Checksum Invalid\n"); 2342 ret_val = -E1000_ERR_NVM; 2343 goto out; 2344 } 2345 2346 out: 2347 return ret_val; 2348 } 2349 2350 /** 2351 * igb_update_nvm_checksum_with_offset - Update EEPROM 2352 * checksum 2353 * @hw: pointer to the HW structure 2354 * @offset: offset in words of the checksum protected region 2355 * 2356 * Updates the EEPROM checksum by reading/adding each word of the EEPROM 2357 * up to the checksum. Then calculates the EEPROM checksum and writes the 2358 * value to the EEPROM. 2359 **/ 2360 static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) 2361 { 2362 s32 ret_val; 2363 u16 checksum = 0; 2364 u16 i, nvm_data; 2365 2366 for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { 2367 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); 2368 if (ret_val) { 2369 hw_dbg("NVM Read Error while updating checksum.\n"); 2370 goto out; 2371 } 2372 checksum += nvm_data; 2373 } 2374 checksum = (u16) NVM_SUM - checksum; 2375 ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, 2376 &checksum); 2377 if (ret_val) 2378 hw_dbg("NVM Write Error while updating checksum.\n"); 2379 2380 out: 2381 return ret_val; 2382 } 2383 2384 /** 2385 * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum 2386 * @hw: pointer to the HW structure 2387 * 2388 * Calculates the EEPROM section checksum by reading/adding each word of 2389 * the EEPROM and then verifies that the sum of the EEPROM is 2390 * equal to 0xBABA. 2391 **/ 2392 static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) 2393 { 2394 s32 ret_val = 0; 2395 u16 eeprom_regions_count = 1; 2396 u16 j, nvm_data; 2397 u16 nvm_offset; 2398 2399 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); 2400 if (ret_val) { 2401 hw_dbg("NVM Read Error\n"); 2402 goto out; 2403 } 2404 2405 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { 2406 /* if checksums compatibility bit is set validate checksums 2407 * for all 4 ports. 2408 */ 2409 eeprom_regions_count = 4; 2410 } 2411 2412 for (j = 0; j < eeprom_regions_count; j++) { 2413 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2414 ret_val = igb_validate_nvm_checksum_with_offset(hw, 2415 nvm_offset); 2416 if (ret_val != 0) 2417 goto out; 2418 } 2419 2420 out: 2421 return ret_val; 2422 } 2423 2424 /** 2425 * igb_update_nvm_checksum_82580 - Update EEPROM checksum 2426 * @hw: pointer to the HW structure 2427 * 2428 * Updates the EEPROM section checksums for all 4 ports by reading/adding 2429 * each word of the EEPROM up to the checksum. Then calculates the EEPROM 2430 * checksum and writes the value to the EEPROM. 2431 **/ 2432 static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) 2433 { 2434 s32 ret_val; 2435 u16 j, nvm_data; 2436 u16 nvm_offset; 2437 2438 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); 2439 if (ret_val) { 2440 hw_dbg("NVM Read Error while updating checksum compatibility bit.\n"); 2441 goto out; 2442 } 2443 2444 if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { 2445 /* set compatibility bit to validate checksums appropriately */ 2446 nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; 2447 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, 2448 &nvm_data); 2449 if (ret_val) { 2450 hw_dbg("NVM Write Error while updating checksum compatibility bit.\n"); 2451 goto out; 2452 } 2453 } 2454 2455 for (j = 0; j < 4; j++) { 2456 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2457 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); 2458 if (ret_val) 2459 goto out; 2460 } 2461 2462 out: 2463 return ret_val; 2464 } 2465 2466 /** 2467 * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum 2468 * @hw: pointer to the HW structure 2469 * 2470 * Calculates the EEPROM section checksum by reading/adding each word of 2471 * the EEPROM and then verifies that the sum of the EEPROM is 2472 * equal to 0xBABA. 2473 **/ 2474 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw) 2475 { 2476 s32 ret_val = 0; 2477 u16 j; 2478 u16 nvm_offset; 2479 2480 for (j = 0; j < 4; j++) { 2481 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2482 ret_val = igb_validate_nvm_checksum_with_offset(hw, 2483 nvm_offset); 2484 if (ret_val != 0) 2485 goto out; 2486 } 2487 2488 out: 2489 return ret_val; 2490 } 2491 2492 /** 2493 * igb_update_nvm_checksum_i350 - Update EEPROM checksum 2494 * @hw: pointer to the HW structure 2495 * 2496 * Updates the EEPROM section checksums for all 4 ports by reading/adding 2497 * each word of the EEPROM up to the checksum. Then calculates the EEPROM 2498 * checksum and writes the value to the EEPROM. 2499 **/ 2500 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) 2501 { 2502 s32 ret_val = 0; 2503 u16 j; 2504 u16 nvm_offset; 2505 2506 for (j = 0; j < 4; j++) { 2507 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2508 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); 2509 if (ret_val != 0) 2510 goto out; 2511 } 2512 2513 out: 2514 return ret_val; 2515 } 2516 2517 /** 2518 * __igb_access_emi_reg - Read/write EMI register 2519 * @hw: pointer to the HW structure 2520 * @addr: EMI address to program 2521 * @data: pointer to value to read/write from/to the EMI address 2522 * @read: boolean flag to indicate read or write 2523 **/ 2524 static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address, 2525 u16 *data, bool read) 2526 { 2527 s32 ret_val = 0; 2528 2529 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); 2530 if (ret_val) 2531 return ret_val; 2532 2533 if (read) 2534 ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); 2535 else 2536 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); 2537 2538 return ret_val; 2539 } 2540 2541 /** 2542 * igb_read_emi_reg - Read Extended Management Interface register 2543 * @hw: pointer to the HW structure 2544 * @addr: EMI address to program 2545 * @data: value to be read from the EMI address 2546 **/ 2547 s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) 2548 { 2549 return __igb_access_emi_reg(hw, addr, data, true); 2550 } 2551 2552 /** 2553 * igb_set_eee_i350 - Enable/disable EEE support 2554 * @hw: pointer to the HW structure 2555 * @adv1G: boolean flag enabling 1G EEE advertisement 2556 * @adv100m: boolean flag enabling 100M EEE advertisement 2557 * 2558 * Enable/disable EEE based on setting in dev_spec structure. 2559 * 2560 **/ 2561 s32 igb_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M) 2562 { 2563 u32 ipcnfg, eeer; 2564 2565 if ((hw->mac.type < e1000_i350) || 2566 (hw->phy.media_type != e1000_media_type_copper)) 2567 goto out; 2568 ipcnfg = rd32(E1000_IPCNFG); 2569 eeer = rd32(E1000_EEER); 2570 2571 /* enable or disable per user setting */ 2572 if (!(hw->dev_spec._82575.eee_disable)) { 2573 u32 eee_su = rd32(E1000_EEE_SU); 2574 2575 if (adv100M) 2576 ipcnfg |= E1000_IPCNFG_EEE_100M_AN; 2577 else 2578 ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN; 2579 2580 if (adv1G) 2581 ipcnfg |= E1000_IPCNFG_EEE_1G_AN; 2582 else 2583 ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN; 2584 2585 eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | 2586 E1000_EEER_LPI_FC); 2587 2588 /* This bit should not be set in normal operation. */ 2589 if (eee_su & E1000_EEE_SU_LPI_CLK_STP) 2590 hw_dbg("LPI Clock Stop Bit should not be set!\n"); 2591 2592 } else { 2593 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | 2594 E1000_IPCNFG_EEE_100M_AN); 2595 eeer &= ~(E1000_EEER_TX_LPI_EN | 2596 E1000_EEER_RX_LPI_EN | 2597 E1000_EEER_LPI_FC); 2598 } 2599 wr32(E1000_IPCNFG, ipcnfg); 2600 wr32(E1000_EEER, eeer); 2601 rd32(E1000_IPCNFG); 2602 rd32(E1000_EEER); 2603 out: 2604 2605 return 0; 2606 } 2607 2608 /** 2609 * igb_set_eee_i354 - Enable/disable EEE support 2610 * @hw: pointer to the HW structure 2611 * @adv1G: boolean flag enabling 1G EEE advertisement 2612 * @adv100m: boolean flag enabling 100M EEE advertisement 2613 * 2614 * Enable/disable EEE legacy mode based on setting in dev_spec structure. 2615 * 2616 **/ 2617 s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M) 2618 { 2619 struct e1000_phy_info *phy = &hw->phy; 2620 s32 ret_val = 0; 2621 u16 phy_data; 2622 2623 if ((hw->phy.media_type != e1000_media_type_copper) || 2624 (phy->id != M88E1543_E_PHY_ID)) 2625 goto out; 2626 2627 if (!hw->dev_spec._82575.eee_disable) { 2628 /* Switch to PHY page 18. */ 2629 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); 2630 if (ret_val) 2631 goto out; 2632 2633 ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, 2634 &phy_data); 2635 if (ret_val) 2636 goto out; 2637 2638 phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; 2639 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, 2640 phy_data); 2641 if (ret_val) 2642 goto out; 2643 2644 /* Return the PHY to page 0. */ 2645 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); 2646 if (ret_val) 2647 goto out; 2648 2649 /* Turn on EEE advertisement. */ 2650 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2651 E1000_EEE_ADV_DEV_I354, 2652 &phy_data); 2653 if (ret_val) 2654 goto out; 2655 2656 if (adv100M) 2657 phy_data |= E1000_EEE_ADV_100_SUPPORTED; 2658 else 2659 phy_data &= ~E1000_EEE_ADV_100_SUPPORTED; 2660 2661 if (adv1G) 2662 phy_data |= E1000_EEE_ADV_1000_SUPPORTED; 2663 else 2664 phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED; 2665 2666 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2667 E1000_EEE_ADV_DEV_I354, 2668 phy_data); 2669 } else { 2670 /* Turn off EEE advertisement. */ 2671 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2672 E1000_EEE_ADV_DEV_I354, 2673 &phy_data); 2674 if (ret_val) 2675 goto out; 2676 2677 phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | 2678 E1000_EEE_ADV_1000_SUPPORTED); 2679 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2680 E1000_EEE_ADV_DEV_I354, 2681 phy_data); 2682 } 2683 2684 out: 2685 return ret_val; 2686 } 2687 2688 /** 2689 * igb_get_eee_status_i354 - Get EEE status 2690 * @hw: pointer to the HW structure 2691 * @status: EEE status 2692 * 2693 * Get EEE status by guessing based on whether Tx or Rx LPI indications have 2694 * been received. 2695 **/ 2696 s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status) 2697 { 2698 struct e1000_phy_info *phy = &hw->phy; 2699 s32 ret_val = 0; 2700 u16 phy_data; 2701 2702 /* Check if EEE is supported on this device. */ 2703 if ((hw->phy.media_type != e1000_media_type_copper) || 2704 (phy->id != M88E1543_E_PHY_ID)) 2705 goto out; 2706 2707 ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, 2708 E1000_PCS_STATUS_DEV_I354, 2709 &phy_data); 2710 if (ret_val) 2711 goto out; 2712 2713 *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | 2714 E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; 2715 2716 out: 2717 return ret_val; 2718 } 2719 2720 static const u8 e1000_emc_temp_data[4] = { 2721 E1000_EMC_INTERNAL_DATA, 2722 E1000_EMC_DIODE1_DATA, 2723 E1000_EMC_DIODE2_DATA, 2724 E1000_EMC_DIODE3_DATA 2725 }; 2726 static const u8 e1000_emc_therm_limit[4] = { 2727 E1000_EMC_INTERNAL_THERM_LIMIT, 2728 E1000_EMC_DIODE1_THERM_LIMIT, 2729 E1000_EMC_DIODE2_THERM_LIMIT, 2730 E1000_EMC_DIODE3_THERM_LIMIT 2731 }; 2732 2733 #ifdef CONFIG_IGB_HWMON 2734 /** 2735 * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data 2736 * @hw: pointer to hardware structure 2737 * 2738 * Updates the temperatures in mac.thermal_sensor_data 2739 **/ 2740 static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) 2741 { 2742 u16 ets_offset; 2743 u16 ets_cfg; 2744 u16 ets_sensor; 2745 u8 num_sensors; 2746 u8 sensor_index; 2747 u8 sensor_location; 2748 u8 i; 2749 struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 2750 2751 if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) 2752 return E1000_NOT_IMPLEMENTED; 2753 2754 data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF); 2755 2756 /* Return the internal sensor only if ETS is unsupported */ 2757 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); 2758 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) 2759 return 0; 2760 2761 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); 2762 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) 2763 != NVM_ETS_TYPE_EMC) 2764 return E1000_NOT_IMPLEMENTED; 2765 2766 num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); 2767 if (num_sensors > E1000_MAX_SENSORS) 2768 num_sensors = E1000_MAX_SENSORS; 2769 2770 for (i = 1; i < num_sensors; i++) { 2771 hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); 2772 sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> 2773 NVM_ETS_DATA_INDEX_SHIFT); 2774 sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> 2775 NVM_ETS_DATA_LOC_SHIFT); 2776 2777 if (sensor_location != 0) 2778 hw->phy.ops.read_i2c_byte(hw, 2779 e1000_emc_temp_data[sensor_index], 2780 E1000_I2C_THERMAL_SENSOR_ADDR, 2781 &data->sensor[i].temp); 2782 } 2783 return 0; 2784 } 2785 2786 /** 2787 * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds 2788 * @hw: pointer to hardware structure 2789 * 2790 * Sets the thermal sensor thresholds according to the NVM map 2791 * and save off the threshold and location values into mac.thermal_sensor_data 2792 **/ 2793 static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) 2794 { 2795 u16 ets_offset; 2796 u16 ets_cfg; 2797 u16 ets_sensor; 2798 u8 low_thresh_delta; 2799 u8 num_sensors; 2800 u8 sensor_index; 2801 u8 sensor_location; 2802 u8 therm_limit; 2803 u8 i; 2804 struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 2805 2806 if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) 2807 return E1000_NOT_IMPLEMENTED; 2808 2809 memset(data, 0, sizeof(struct e1000_thermal_sensor_data)); 2810 2811 data->sensor[0].location = 0x1; 2812 data->sensor[0].caution_thresh = 2813 (rd32(E1000_THHIGHTC) & 0xFF); 2814 data->sensor[0].max_op_thresh = 2815 (rd32(E1000_THLOWTC) & 0xFF); 2816 2817 /* Return the internal sensor only if ETS is unsupported */ 2818 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); 2819 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) 2820 return 0; 2821 2822 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); 2823 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) 2824 != NVM_ETS_TYPE_EMC) 2825 return E1000_NOT_IMPLEMENTED; 2826 2827 low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >> 2828 NVM_ETS_LTHRES_DELTA_SHIFT); 2829 num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); 2830 2831 for (i = 1; i <= num_sensors; i++) { 2832 hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); 2833 sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> 2834 NVM_ETS_DATA_INDEX_SHIFT); 2835 sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> 2836 NVM_ETS_DATA_LOC_SHIFT); 2837 therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK; 2838 2839 hw->phy.ops.write_i2c_byte(hw, 2840 e1000_emc_therm_limit[sensor_index], 2841 E1000_I2C_THERMAL_SENSOR_ADDR, 2842 therm_limit); 2843 2844 if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) { 2845 data->sensor[i].location = sensor_location; 2846 data->sensor[i].caution_thresh = therm_limit; 2847 data->sensor[i].max_op_thresh = therm_limit - 2848 low_thresh_delta; 2849 } 2850 } 2851 return 0; 2852 } 2853 2854 #endif 2855 static struct e1000_mac_operations e1000_mac_ops_82575 = { 2856 .init_hw = igb_init_hw_82575, 2857 .check_for_link = igb_check_for_link_82575, 2858 .rar_set = igb_rar_set, 2859 .read_mac_addr = igb_read_mac_addr_82575, 2860 .get_speed_and_duplex = igb_get_link_up_info_82575, 2861 #ifdef CONFIG_IGB_HWMON 2862 .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic, 2863 .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic, 2864 #endif 2865 }; 2866 2867 static struct e1000_phy_operations e1000_phy_ops_82575 = { 2868 .acquire = igb_acquire_phy_82575, 2869 .get_cfg_done = igb_get_cfg_done_82575, 2870 .release = igb_release_phy_82575, 2871 .write_i2c_byte = igb_write_i2c_byte, 2872 .read_i2c_byte = igb_read_i2c_byte, 2873 }; 2874 2875 static struct e1000_nvm_operations e1000_nvm_ops_82575 = { 2876 .acquire = igb_acquire_nvm_82575, 2877 .read = igb_read_nvm_eerd, 2878 .release = igb_release_nvm_82575, 2879 .write = igb_write_nvm_spi, 2880 }; 2881 2882 const struct e1000_info e1000_82575_info = { 2883 .get_invariants = igb_get_invariants_82575, 2884 .mac_ops = &e1000_mac_ops_82575, 2885 .phy_ops = &e1000_phy_ops_82575, 2886 .nvm_ops = &e1000_nvm_ops_82575, 2887 }; 2888 2889