1 /* Intel(R) Gigabit Ethernet Linux driver 2 * Copyright(c) 2007-2014 Intel Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, see <http://www.gnu.org/licenses/>. 15 * 16 * The full GNU General Public License is included in this distribution in 17 * the file called "COPYING". 18 * 19 * Contact Information: 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 22 */ 23 24 /* e1000_82575 25 * e1000_82576 26 */ 27 28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29 30 #include <linux/types.h> 31 #include <linux/if_ether.h> 32 #include <linux/i2c.h> 33 34 #include "e1000_mac.h" 35 #include "e1000_82575.h" 36 #include "e1000_i210.h" 37 38 static s32 igb_get_invariants_82575(struct e1000_hw *); 39 static s32 igb_acquire_phy_82575(struct e1000_hw *); 40 static void igb_release_phy_82575(struct e1000_hw *); 41 static s32 igb_acquire_nvm_82575(struct e1000_hw *); 42 static void igb_release_nvm_82575(struct e1000_hw *); 43 static s32 igb_check_for_link_82575(struct e1000_hw *); 44 static s32 igb_get_cfg_done_82575(struct e1000_hw *); 45 static s32 igb_init_hw_82575(struct e1000_hw *); 46 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); 47 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); 48 static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *); 49 static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16); 50 static s32 igb_reset_hw_82575(struct e1000_hw *); 51 static s32 igb_reset_hw_82580(struct e1000_hw *); 52 static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); 53 static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool); 54 static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool); 55 static s32 igb_setup_copper_link_82575(struct e1000_hw *); 56 static s32 igb_setup_serdes_link_82575(struct e1000_hw *); 57 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16); 58 static void igb_clear_hw_cntrs_82575(struct e1000_hw *); 59 static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16); 60 static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *, 61 u16 *); 62 static s32 igb_get_phy_id_82575(struct e1000_hw *); 63 static void igb_release_swfw_sync_82575(struct e1000_hw *, u16); 64 static bool igb_sgmii_active_82575(struct e1000_hw *); 65 static s32 igb_reset_init_script_82575(struct e1000_hw *); 66 static s32 igb_read_mac_addr_82575(struct e1000_hw *); 67 static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); 68 static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); 69 static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw); 70 static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); 71 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); 72 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); 73 static const u16 e1000_82580_rxpbs_table[] = { 74 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; 75 76 /** 77 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO 78 * @hw: pointer to the HW structure 79 * 80 * Called to determine if the I2C pins are being used for I2C or as an 81 * external MDIO interface since the two options are mutually exclusive. 82 **/ 83 static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) 84 { 85 u32 reg = 0; 86 bool ext_mdio = false; 87 88 switch (hw->mac.type) { 89 case e1000_82575: 90 case e1000_82576: 91 reg = rd32(E1000_MDIC); 92 ext_mdio = !!(reg & E1000_MDIC_DEST); 93 break; 94 case e1000_82580: 95 case e1000_i350: 96 case e1000_i354: 97 case e1000_i210: 98 case e1000_i211: 99 reg = rd32(E1000_MDICNFG); 100 ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); 101 break; 102 default: 103 break; 104 } 105 return ext_mdio; 106 } 107 108 /** 109 * igb_check_for_link_media_swap - Check which M88E1112 interface linked 110 * @hw: pointer to the HW structure 111 * 112 * Poll the M88E1112 interfaces to see which interface achieved link. 113 */ 114 static s32 igb_check_for_link_media_swap(struct e1000_hw *hw) 115 { 116 struct e1000_phy_info *phy = &hw->phy; 117 s32 ret_val; 118 u16 data; 119 u8 port = 0; 120 121 /* Check the copper medium. */ 122 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); 123 if (ret_val) 124 return ret_val; 125 126 ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); 127 if (ret_val) 128 return ret_val; 129 130 if (data & E1000_M88E1112_STATUS_LINK) 131 port = E1000_MEDIA_PORT_COPPER; 132 133 /* Check the other medium. */ 134 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); 135 if (ret_val) 136 return ret_val; 137 138 ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); 139 if (ret_val) 140 return ret_val; 141 142 /* reset page to 0 */ 143 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); 144 if (ret_val) 145 return ret_val; 146 147 if (data & E1000_M88E1112_STATUS_LINK) 148 port = E1000_MEDIA_PORT_OTHER; 149 150 /* Determine if a swap needs to happen. */ 151 if (port && (hw->dev_spec._82575.media_port != port)) { 152 hw->dev_spec._82575.media_port = port; 153 hw->dev_spec._82575.media_changed = true; 154 } else { 155 ret_val = igb_check_for_link_82575(hw); 156 } 157 158 return 0; 159 } 160 161 /** 162 * igb_init_phy_params_82575 - Init PHY func ptrs. 163 * @hw: pointer to the HW structure 164 **/ 165 static s32 igb_init_phy_params_82575(struct e1000_hw *hw) 166 { 167 struct e1000_phy_info *phy = &hw->phy; 168 s32 ret_val = 0; 169 u32 ctrl_ext; 170 171 if (hw->phy.media_type != e1000_media_type_copper) { 172 phy->type = e1000_phy_none; 173 goto out; 174 } 175 176 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 177 phy->reset_delay_us = 100; 178 179 ctrl_ext = rd32(E1000_CTRL_EXT); 180 181 if (igb_sgmii_active_82575(hw)) { 182 phy->ops.reset = igb_phy_hw_reset_sgmii_82575; 183 ctrl_ext |= E1000_CTRL_I2C_ENA; 184 } else { 185 phy->ops.reset = igb_phy_hw_reset; 186 ctrl_ext &= ~E1000_CTRL_I2C_ENA; 187 } 188 189 wr32(E1000_CTRL_EXT, ctrl_ext); 190 igb_reset_mdicnfg_82580(hw); 191 192 if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { 193 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; 194 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; 195 } else { 196 switch (hw->mac.type) { 197 case e1000_82580: 198 case e1000_i350: 199 case e1000_i354: 200 phy->ops.read_reg = igb_read_phy_reg_82580; 201 phy->ops.write_reg = igb_write_phy_reg_82580; 202 break; 203 case e1000_i210: 204 case e1000_i211: 205 phy->ops.read_reg = igb_read_phy_reg_gs40g; 206 phy->ops.write_reg = igb_write_phy_reg_gs40g; 207 break; 208 default: 209 phy->ops.read_reg = igb_read_phy_reg_igp; 210 phy->ops.write_reg = igb_write_phy_reg_igp; 211 } 212 } 213 214 /* set lan id */ 215 hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> 216 E1000_STATUS_FUNC_SHIFT; 217 218 /* Set phy->phy_addr and phy->id. */ 219 ret_val = igb_get_phy_id_82575(hw); 220 if (ret_val) 221 return ret_val; 222 223 /* Verify phy id and set remaining function pointers */ 224 switch (phy->id) { 225 case M88E1543_E_PHY_ID: 226 case I347AT4_E_PHY_ID: 227 case M88E1112_E_PHY_ID: 228 case M88E1111_I_PHY_ID: 229 phy->type = e1000_phy_m88; 230 phy->ops.check_polarity = igb_check_polarity_m88; 231 phy->ops.get_phy_info = igb_get_phy_info_m88; 232 if (phy->id != M88E1111_I_PHY_ID) 233 phy->ops.get_cable_length = 234 igb_get_cable_length_m88_gen2; 235 else 236 phy->ops.get_cable_length = igb_get_cable_length_m88; 237 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; 238 /* Check if this PHY is confgured for media swap. */ 239 if (phy->id == M88E1112_E_PHY_ID) { 240 u16 data; 241 242 ret_val = phy->ops.write_reg(hw, 243 E1000_M88E1112_PAGE_ADDR, 244 2); 245 if (ret_val) 246 goto out; 247 248 ret_val = phy->ops.read_reg(hw, 249 E1000_M88E1112_MAC_CTRL_1, 250 &data); 251 if (ret_val) 252 goto out; 253 254 data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >> 255 E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT; 256 if (data == E1000_M88E1112_AUTO_COPPER_SGMII || 257 data == E1000_M88E1112_AUTO_COPPER_BASEX) 258 hw->mac.ops.check_for_link = 259 igb_check_for_link_media_swap; 260 } 261 break; 262 case IGP03E1000_E_PHY_ID: 263 phy->type = e1000_phy_igp_3; 264 phy->ops.get_phy_info = igb_get_phy_info_igp; 265 phy->ops.get_cable_length = igb_get_cable_length_igp_2; 266 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp; 267 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; 268 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; 269 break; 270 case I82580_I_PHY_ID: 271 case I350_I_PHY_ID: 272 phy->type = e1000_phy_82580; 273 phy->ops.force_speed_duplex = 274 igb_phy_force_speed_duplex_82580; 275 phy->ops.get_cable_length = igb_get_cable_length_82580; 276 phy->ops.get_phy_info = igb_get_phy_info_82580; 277 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; 278 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; 279 break; 280 case I210_I_PHY_ID: 281 phy->type = e1000_phy_i210; 282 phy->ops.check_polarity = igb_check_polarity_m88; 283 phy->ops.get_phy_info = igb_get_phy_info_m88; 284 phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; 285 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; 286 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; 287 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; 288 break; 289 default: 290 ret_val = -E1000_ERR_PHY; 291 goto out; 292 } 293 294 out: 295 return ret_val; 296 } 297 298 /** 299 * igb_init_nvm_params_82575 - Init NVM func ptrs. 300 * @hw: pointer to the HW structure 301 **/ 302 static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) 303 { 304 struct e1000_nvm_info *nvm = &hw->nvm; 305 u32 eecd = rd32(E1000_EECD); 306 u16 size; 307 308 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 309 E1000_EECD_SIZE_EX_SHIFT); 310 311 /* Added to a constant, "size" becomes the left-shift value 312 * for setting word_size. 313 */ 314 size += NVM_WORD_SIZE_BASE_SHIFT; 315 316 /* Just in case size is out of range, cap it to the largest 317 * EEPROM size supported 318 */ 319 if (size > 15) 320 size = 15; 321 322 nvm->word_size = 1 << size; 323 nvm->opcode_bits = 8; 324 nvm->delay_usec = 1; 325 326 switch (nvm->override) { 327 case e1000_nvm_override_spi_large: 328 nvm->page_size = 32; 329 nvm->address_bits = 16; 330 break; 331 case e1000_nvm_override_spi_small: 332 nvm->page_size = 8; 333 nvm->address_bits = 8; 334 break; 335 default: 336 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; 337 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 338 16 : 8; 339 break; 340 } 341 if (nvm->word_size == (1 << 15)) 342 nvm->page_size = 128; 343 344 nvm->type = e1000_nvm_eeprom_spi; 345 346 /* NVM Function Pointers */ 347 nvm->ops.acquire = igb_acquire_nvm_82575; 348 nvm->ops.release = igb_release_nvm_82575; 349 nvm->ops.write = igb_write_nvm_spi; 350 nvm->ops.validate = igb_validate_nvm_checksum; 351 nvm->ops.update = igb_update_nvm_checksum; 352 if (nvm->word_size < (1 << 15)) 353 nvm->ops.read = igb_read_nvm_eerd; 354 else 355 nvm->ops.read = igb_read_nvm_spi; 356 357 /* override generic family function pointers for specific descendants */ 358 switch (hw->mac.type) { 359 case e1000_82580: 360 nvm->ops.validate = igb_validate_nvm_checksum_82580; 361 nvm->ops.update = igb_update_nvm_checksum_82580; 362 break; 363 case e1000_i354: 364 case e1000_i350: 365 nvm->ops.validate = igb_validate_nvm_checksum_i350; 366 nvm->ops.update = igb_update_nvm_checksum_i350; 367 break; 368 default: 369 break; 370 } 371 372 return 0; 373 } 374 375 /** 376 * igb_init_mac_params_82575 - Init MAC func ptrs. 377 * @hw: pointer to the HW structure 378 **/ 379 static s32 igb_init_mac_params_82575(struct e1000_hw *hw) 380 { 381 struct e1000_mac_info *mac = &hw->mac; 382 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 383 384 /* Set mta register count */ 385 mac->mta_reg_count = 128; 386 /* Set rar entry count */ 387 switch (mac->type) { 388 case e1000_82576: 389 mac->rar_entry_count = E1000_RAR_ENTRIES_82576; 390 break; 391 case e1000_82580: 392 mac->rar_entry_count = E1000_RAR_ENTRIES_82580; 393 break; 394 case e1000_i350: 395 case e1000_i354: 396 mac->rar_entry_count = E1000_RAR_ENTRIES_I350; 397 break; 398 default: 399 mac->rar_entry_count = E1000_RAR_ENTRIES_82575; 400 break; 401 } 402 /* reset */ 403 if (mac->type >= e1000_82580) 404 mac->ops.reset_hw = igb_reset_hw_82580; 405 else 406 mac->ops.reset_hw = igb_reset_hw_82575; 407 408 if (mac->type >= e1000_i210) { 409 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210; 410 mac->ops.release_swfw_sync = igb_release_swfw_sync_i210; 411 412 } else { 413 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575; 414 mac->ops.release_swfw_sync = igb_release_swfw_sync_82575; 415 } 416 417 /* Set if part includes ASF firmware */ 418 mac->asf_firmware_present = true; 419 /* Set if manageability features are enabled. */ 420 mac->arc_subsystem_valid = 421 (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) 422 ? true : false; 423 /* enable EEE on i350 parts and later parts */ 424 if (mac->type >= e1000_i350) 425 dev_spec->eee_disable = false; 426 else 427 dev_spec->eee_disable = true; 428 /* Allow a single clear of the SW semaphore on I210 and newer */ 429 if (mac->type >= e1000_i210) 430 dev_spec->clear_semaphore_once = true; 431 /* physical interface link setup */ 432 mac->ops.setup_physical_interface = 433 (hw->phy.media_type == e1000_media_type_copper) 434 ? igb_setup_copper_link_82575 435 : igb_setup_serdes_link_82575; 436 437 if (mac->type == e1000_82580) { 438 switch (hw->device_id) { 439 /* feature not supported on these id's */ 440 case E1000_DEV_ID_DH89XXCC_SGMII: 441 case E1000_DEV_ID_DH89XXCC_SERDES: 442 case E1000_DEV_ID_DH89XXCC_BACKPLANE: 443 case E1000_DEV_ID_DH89XXCC_SFP: 444 break; 445 default: 446 hw->dev_spec._82575.mas_capable = true; 447 break; 448 } 449 } 450 return 0; 451 } 452 453 /** 454 * igb_set_sfp_media_type_82575 - derives SFP module media type. 455 * @hw: pointer to the HW structure 456 * 457 * The media type is chosen based on SFP module. 458 * compatibility flags retrieved from SFP ID EEPROM. 459 **/ 460 static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw) 461 { 462 s32 ret_val = E1000_ERR_CONFIG; 463 u32 ctrl_ext = 0; 464 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 465 struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; 466 u8 tranceiver_type = 0; 467 s32 timeout = 3; 468 469 /* Turn I2C interface ON and power on sfp cage */ 470 ctrl_ext = rd32(E1000_CTRL_EXT); 471 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; 472 wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); 473 474 wrfl(); 475 476 /* Read SFP module data */ 477 while (timeout) { 478 ret_val = igb_read_sfp_data_byte(hw, 479 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), 480 &tranceiver_type); 481 if (ret_val == 0) 482 break; 483 msleep(100); 484 timeout--; 485 } 486 if (ret_val != 0) 487 goto out; 488 489 ret_val = igb_read_sfp_data_byte(hw, 490 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), 491 (u8 *)eth_flags); 492 if (ret_val != 0) 493 goto out; 494 495 /* Check if there is some SFP module plugged and powered */ 496 if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || 497 (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { 498 dev_spec->module_plugged = true; 499 if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { 500 hw->phy.media_type = e1000_media_type_internal_serdes; 501 } else if (eth_flags->e100_base_fx) { 502 dev_spec->sgmii_active = true; 503 hw->phy.media_type = e1000_media_type_internal_serdes; 504 } else if (eth_flags->e1000_base_t) { 505 dev_spec->sgmii_active = true; 506 hw->phy.media_type = e1000_media_type_copper; 507 } else { 508 hw->phy.media_type = e1000_media_type_unknown; 509 hw_dbg("PHY module has not been recognized\n"); 510 goto out; 511 } 512 } else { 513 hw->phy.media_type = e1000_media_type_unknown; 514 } 515 ret_val = 0; 516 out: 517 /* Restore I2C interface setting */ 518 wr32(E1000_CTRL_EXT, ctrl_ext); 519 return ret_val; 520 } 521 522 static s32 igb_get_invariants_82575(struct e1000_hw *hw) 523 { 524 struct e1000_mac_info *mac = &hw->mac; 525 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 526 s32 ret_val; 527 u32 ctrl_ext = 0; 528 u32 link_mode = 0; 529 530 switch (hw->device_id) { 531 case E1000_DEV_ID_82575EB_COPPER: 532 case E1000_DEV_ID_82575EB_FIBER_SERDES: 533 case E1000_DEV_ID_82575GB_QUAD_COPPER: 534 mac->type = e1000_82575; 535 break; 536 case E1000_DEV_ID_82576: 537 case E1000_DEV_ID_82576_NS: 538 case E1000_DEV_ID_82576_NS_SERDES: 539 case E1000_DEV_ID_82576_FIBER: 540 case E1000_DEV_ID_82576_SERDES: 541 case E1000_DEV_ID_82576_QUAD_COPPER: 542 case E1000_DEV_ID_82576_QUAD_COPPER_ET2: 543 case E1000_DEV_ID_82576_SERDES_QUAD: 544 mac->type = e1000_82576; 545 break; 546 case E1000_DEV_ID_82580_COPPER: 547 case E1000_DEV_ID_82580_FIBER: 548 case E1000_DEV_ID_82580_QUAD_FIBER: 549 case E1000_DEV_ID_82580_SERDES: 550 case E1000_DEV_ID_82580_SGMII: 551 case E1000_DEV_ID_82580_COPPER_DUAL: 552 case E1000_DEV_ID_DH89XXCC_SGMII: 553 case E1000_DEV_ID_DH89XXCC_SERDES: 554 case E1000_DEV_ID_DH89XXCC_BACKPLANE: 555 case E1000_DEV_ID_DH89XXCC_SFP: 556 mac->type = e1000_82580; 557 break; 558 case E1000_DEV_ID_I350_COPPER: 559 case E1000_DEV_ID_I350_FIBER: 560 case E1000_DEV_ID_I350_SERDES: 561 case E1000_DEV_ID_I350_SGMII: 562 mac->type = e1000_i350; 563 break; 564 case E1000_DEV_ID_I210_COPPER: 565 case E1000_DEV_ID_I210_FIBER: 566 case E1000_DEV_ID_I210_SERDES: 567 case E1000_DEV_ID_I210_SGMII: 568 case E1000_DEV_ID_I210_COPPER_FLASHLESS: 569 case E1000_DEV_ID_I210_SERDES_FLASHLESS: 570 mac->type = e1000_i210; 571 break; 572 case E1000_DEV_ID_I211_COPPER: 573 mac->type = e1000_i211; 574 break; 575 case E1000_DEV_ID_I354_BACKPLANE_1GBPS: 576 case E1000_DEV_ID_I354_SGMII: 577 case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: 578 mac->type = e1000_i354; 579 break; 580 default: 581 return -E1000_ERR_MAC_INIT; 582 break; 583 } 584 585 /* Set media type */ 586 /* The 82575 uses bits 22:23 for link mode. The mode can be changed 587 * based on the EEPROM. We cannot rely upon device ID. There 588 * is no distinguishable difference between fiber and internal 589 * SerDes mode on the 82575. There can be an external PHY attached 590 * on the SGMII interface. For this, we'll set sgmii_active to true. 591 */ 592 hw->phy.media_type = e1000_media_type_copper; 593 dev_spec->sgmii_active = false; 594 dev_spec->module_plugged = false; 595 596 ctrl_ext = rd32(E1000_CTRL_EXT); 597 598 link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; 599 switch (link_mode) { 600 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 601 hw->phy.media_type = e1000_media_type_internal_serdes; 602 break; 603 case E1000_CTRL_EXT_LINK_MODE_SGMII: 604 /* Get phy control interface type set (MDIO vs. I2C)*/ 605 if (igb_sgmii_uses_mdio_82575(hw)) { 606 hw->phy.media_type = e1000_media_type_copper; 607 dev_spec->sgmii_active = true; 608 break; 609 } 610 /* fall through for I2C based SGMII */ 611 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: 612 /* read media type from SFP EEPROM */ 613 ret_val = igb_set_sfp_media_type_82575(hw); 614 if ((ret_val != 0) || 615 (hw->phy.media_type == e1000_media_type_unknown)) { 616 /* If media type was not identified then return media 617 * type defined by the CTRL_EXT settings. 618 */ 619 hw->phy.media_type = e1000_media_type_internal_serdes; 620 621 if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { 622 hw->phy.media_type = e1000_media_type_copper; 623 dev_spec->sgmii_active = true; 624 } 625 626 break; 627 } 628 629 /* do not change link mode for 100BaseFX */ 630 if (dev_spec->eth_flags.e100_base_fx) 631 break; 632 633 /* change current link mode setting */ 634 ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; 635 636 if (hw->phy.media_type == e1000_media_type_copper) 637 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; 638 else 639 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; 640 641 wr32(E1000_CTRL_EXT, ctrl_ext); 642 643 break; 644 default: 645 break; 646 } 647 648 /* mac initialization and operations */ 649 ret_val = igb_init_mac_params_82575(hw); 650 if (ret_val) 651 goto out; 652 653 /* NVM initialization */ 654 ret_val = igb_init_nvm_params_82575(hw); 655 switch (hw->mac.type) { 656 case e1000_i210: 657 case e1000_i211: 658 ret_val = igb_init_nvm_params_i210(hw); 659 break; 660 default: 661 break; 662 } 663 664 if (ret_val) 665 goto out; 666 667 /* if part supports SR-IOV then initialize mailbox parameters */ 668 switch (mac->type) { 669 case e1000_82576: 670 case e1000_i350: 671 igb_init_mbx_params_pf(hw); 672 break; 673 default: 674 break; 675 } 676 677 /* setup PHY parameters */ 678 ret_val = igb_init_phy_params_82575(hw); 679 680 out: 681 return ret_val; 682 } 683 684 /** 685 * igb_acquire_phy_82575 - Acquire rights to access PHY 686 * @hw: pointer to the HW structure 687 * 688 * Acquire access rights to the correct PHY. This is a 689 * function pointer entry point called by the api module. 690 **/ 691 static s32 igb_acquire_phy_82575(struct e1000_hw *hw) 692 { 693 u16 mask = E1000_SWFW_PHY0_SM; 694 695 if (hw->bus.func == E1000_FUNC_1) 696 mask = E1000_SWFW_PHY1_SM; 697 else if (hw->bus.func == E1000_FUNC_2) 698 mask = E1000_SWFW_PHY2_SM; 699 else if (hw->bus.func == E1000_FUNC_3) 700 mask = E1000_SWFW_PHY3_SM; 701 702 return hw->mac.ops.acquire_swfw_sync(hw, mask); 703 } 704 705 /** 706 * igb_release_phy_82575 - Release rights to access PHY 707 * @hw: pointer to the HW structure 708 * 709 * A wrapper to release access rights to the correct PHY. This is a 710 * function pointer entry point called by the api module. 711 **/ 712 static void igb_release_phy_82575(struct e1000_hw *hw) 713 { 714 u16 mask = E1000_SWFW_PHY0_SM; 715 716 if (hw->bus.func == E1000_FUNC_1) 717 mask = E1000_SWFW_PHY1_SM; 718 else if (hw->bus.func == E1000_FUNC_2) 719 mask = E1000_SWFW_PHY2_SM; 720 else if (hw->bus.func == E1000_FUNC_3) 721 mask = E1000_SWFW_PHY3_SM; 722 723 hw->mac.ops.release_swfw_sync(hw, mask); 724 } 725 726 /** 727 * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii 728 * @hw: pointer to the HW structure 729 * @offset: register offset to be read 730 * @data: pointer to the read data 731 * 732 * Reads the PHY register at offset using the serial gigabit media independent 733 * interface and stores the retrieved information in data. 734 **/ 735 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 736 u16 *data) 737 { 738 s32 ret_val = -E1000_ERR_PARAM; 739 740 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 741 hw_dbg("PHY Address %u is out of range\n", offset); 742 goto out; 743 } 744 745 ret_val = hw->phy.ops.acquire(hw); 746 if (ret_val) 747 goto out; 748 749 ret_val = igb_read_phy_reg_i2c(hw, offset, data); 750 751 hw->phy.ops.release(hw); 752 753 out: 754 return ret_val; 755 } 756 757 /** 758 * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii 759 * @hw: pointer to the HW structure 760 * @offset: register offset to write to 761 * @data: data to write at register offset 762 * 763 * Writes the data to PHY register at the offset using the serial gigabit 764 * media independent interface. 765 **/ 766 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 767 u16 data) 768 { 769 s32 ret_val = -E1000_ERR_PARAM; 770 771 772 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 773 hw_dbg("PHY Address %d is out of range\n", offset); 774 goto out; 775 } 776 777 ret_val = hw->phy.ops.acquire(hw); 778 if (ret_val) 779 goto out; 780 781 ret_val = igb_write_phy_reg_i2c(hw, offset, data); 782 783 hw->phy.ops.release(hw); 784 785 out: 786 return ret_val; 787 } 788 789 /** 790 * igb_get_phy_id_82575 - Retrieve PHY addr and id 791 * @hw: pointer to the HW structure 792 * 793 * Retrieves the PHY address and ID for both PHY's which do and do not use 794 * sgmi interface. 795 **/ 796 static s32 igb_get_phy_id_82575(struct e1000_hw *hw) 797 { 798 struct e1000_phy_info *phy = &hw->phy; 799 s32 ret_val = 0; 800 u16 phy_id; 801 u32 ctrl_ext; 802 u32 mdic; 803 804 /* Extra read required for some PHY's on i354 */ 805 if (hw->mac.type == e1000_i354) 806 igb_get_phy_id(hw); 807 808 /* For SGMII PHYs, we try the list of possible addresses until 809 * we find one that works. For non-SGMII PHYs 810 * (e.g. integrated copper PHYs), an address of 1 should 811 * work. The result of this function should mean phy->phy_addr 812 * and phy->id are set correctly. 813 */ 814 if (!(igb_sgmii_active_82575(hw))) { 815 phy->addr = 1; 816 ret_val = igb_get_phy_id(hw); 817 goto out; 818 } 819 820 if (igb_sgmii_uses_mdio_82575(hw)) { 821 switch (hw->mac.type) { 822 case e1000_82575: 823 case e1000_82576: 824 mdic = rd32(E1000_MDIC); 825 mdic &= E1000_MDIC_PHY_MASK; 826 phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; 827 break; 828 case e1000_82580: 829 case e1000_i350: 830 case e1000_i354: 831 case e1000_i210: 832 case e1000_i211: 833 mdic = rd32(E1000_MDICNFG); 834 mdic &= E1000_MDICNFG_PHY_MASK; 835 phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; 836 break; 837 default: 838 ret_val = -E1000_ERR_PHY; 839 goto out; 840 break; 841 } 842 ret_val = igb_get_phy_id(hw); 843 goto out; 844 } 845 846 /* Power on sgmii phy if it is disabled */ 847 ctrl_ext = rd32(E1000_CTRL_EXT); 848 wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); 849 wrfl(); 850 msleep(300); 851 852 /* The address field in the I2CCMD register is 3 bits and 0 is invalid. 853 * Therefore, we need to test 1-7 854 */ 855 for (phy->addr = 1; phy->addr < 8; phy->addr++) { 856 ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); 857 if (ret_val == 0) { 858 hw_dbg("Vendor ID 0x%08X read at address %u\n", 859 phy_id, phy->addr); 860 /* At the time of this writing, The M88 part is 861 * the only supported SGMII PHY product. 862 */ 863 if (phy_id == M88_VENDOR) 864 break; 865 } else { 866 hw_dbg("PHY address %u was unreadable\n", phy->addr); 867 } 868 } 869 870 /* A valid PHY type couldn't be found. */ 871 if (phy->addr == 8) { 872 phy->addr = 0; 873 ret_val = -E1000_ERR_PHY; 874 goto out; 875 } else { 876 ret_val = igb_get_phy_id(hw); 877 } 878 879 /* restore previous sfp cage power state */ 880 wr32(E1000_CTRL_EXT, ctrl_ext); 881 882 out: 883 return ret_val; 884 } 885 886 /** 887 * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset 888 * @hw: pointer to the HW structure 889 * 890 * Resets the PHY using the serial gigabit media independent interface. 891 **/ 892 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) 893 { 894 s32 ret_val; 895 896 /* This isn't a true "hard" reset, but is the only reset 897 * available to us at this time. 898 */ 899 900 hw_dbg("Soft resetting SGMII attached PHY...\n"); 901 902 /* SFP documentation requires the following to configure the SPF module 903 * to work on SGMII. No further documentation is given. 904 */ 905 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); 906 if (ret_val) 907 goto out; 908 909 ret_val = igb_phy_sw_reset(hw); 910 911 out: 912 return ret_val; 913 } 914 915 /** 916 * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state 917 * @hw: pointer to the HW structure 918 * @active: true to enable LPLU, false to disable 919 * 920 * Sets the LPLU D0 state according to the active flag. When 921 * activating LPLU this function also disables smart speed 922 * and vice versa. LPLU will not be activated unless the 923 * device autonegotiation advertisement meets standards of 924 * either 10 or 10/100 or 10/100/1000 at all duplexes. 925 * This is a function pointer entry point only called by 926 * PHY setup routines. 927 **/ 928 static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) 929 { 930 struct e1000_phy_info *phy = &hw->phy; 931 s32 ret_val; 932 u16 data; 933 934 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); 935 if (ret_val) 936 goto out; 937 938 if (active) { 939 data |= IGP02E1000_PM_D0_LPLU; 940 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 941 data); 942 if (ret_val) 943 goto out; 944 945 /* When LPLU is enabled, we should disable SmartSpeed */ 946 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 947 &data); 948 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 949 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 950 data); 951 if (ret_val) 952 goto out; 953 } else { 954 data &= ~IGP02E1000_PM_D0_LPLU; 955 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 956 data); 957 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 958 * during Dx states where the power conservation is most 959 * important. During driver activity we should enable 960 * SmartSpeed, so performance is maintained. 961 */ 962 if (phy->smart_speed == e1000_smart_speed_on) { 963 ret_val = phy->ops.read_reg(hw, 964 IGP01E1000_PHY_PORT_CONFIG, &data); 965 if (ret_val) 966 goto out; 967 968 data |= IGP01E1000_PSCFR_SMART_SPEED; 969 ret_val = phy->ops.write_reg(hw, 970 IGP01E1000_PHY_PORT_CONFIG, data); 971 if (ret_val) 972 goto out; 973 } else if (phy->smart_speed == e1000_smart_speed_off) { 974 ret_val = phy->ops.read_reg(hw, 975 IGP01E1000_PHY_PORT_CONFIG, &data); 976 if (ret_val) 977 goto out; 978 979 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 980 ret_val = phy->ops.write_reg(hw, 981 IGP01E1000_PHY_PORT_CONFIG, data); 982 if (ret_val) 983 goto out; 984 } 985 } 986 987 out: 988 return ret_val; 989 } 990 991 /** 992 * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state 993 * @hw: pointer to the HW structure 994 * @active: true to enable LPLU, false to disable 995 * 996 * Sets the LPLU D0 state according to the active flag. When 997 * activating LPLU this function also disables smart speed 998 * and vice versa. LPLU will not be activated unless the 999 * device autonegotiation advertisement meets standards of 1000 * either 10 or 10/100 or 10/100/1000 at all duplexes. 1001 * This is a function pointer entry point only called by 1002 * PHY setup routines. 1003 **/ 1004 static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) 1005 { 1006 struct e1000_phy_info *phy = &hw->phy; 1007 u16 data; 1008 1009 data = rd32(E1000_82580_PHY_POWER_MGMT); 1010 1011 if (active) { 1012 data |= E1000_82580_PM_D0_LPLU; 1013 1014 /* When LPLU is enabled, we should disable SmartSpeed */ 1015 data &= ~E1000_82580_PM_SPD; 1016 } else { 1017 data &= ~E1000_82580_PM_D0_LPLU; 1018 1019 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 1020 * during Dx states where the power conservation is most 1021 * important. During driver activity we should enable 1022 * SmartSpeed, so performance is maintained. 1023 */ 1024 if (phy->smart_speed == e1000_smart_speed_on) 1025 data |= E1000_82580_PM_SPD; 1026 else if (phy->smart_speed == e1000_smart_speed_off) 1027 data &= ~E1000_82580_PM_SPD; } 1028 1029 wr32(E1000_82580_PHY_POWER_MGMT, data); 1030 return 0; 1031 } 1032 1033 /** 1034 * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3 1035 * @hw: pointer to the HW structure 1036 * @active: boolean used to enable/disable lplu 1037 * 1038 * Success returns 0, Failure returns 1 1039 * 1040 * The low power link up (lplu) state is set to the power management level D3 1041 * and SmartSpeed is disabled when active is true, else clear lplu for D3 1042 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU 1043 * is used during Dx states where the power conservation is most important. 1044 * During driver activity, SmartSpeed should be enabled so performance is 1045 * maintained. 1046 **/ 1047 static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) 1048 { 1049 struct e1000_phy_info *phy = &hw->phy; 1050 u16 data; 1051 1052 data = rd32(E1000_82580_PHY_POWER_MGMT); 1053 1054 if (!active) { 1055 data &= ~E1000_82580_PM_D3_LPLU; 1056 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 1057 * during Dx states where the power conservation is most 1058 * important. During driver activity we should enable 1059 * SmartSpeed, so performance is maintained. 1060 */ 1061 if (phy->smart_speed == e1000_smart_speed_on) 1062 data |= E1000_82580_PM_SPD; 1063 else if (phy->smart_speed == e1000_smart_speed_off) 1064 data &= ~E1000_82580_PM_SPD; 1065 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || 1066 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || 1067 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { 1068 data |= E1000_82580_PM_D3_LPLU; 1069 /* When LPLU is enabled, we should disable SmartSpeed */ 1070 data &= ~E1000_82580_PM_SPD; 1071 } 1072 1073 wr32(E1000_82580_PHY_POWER_MGMT, data); 1074 return 0; 1075 } 1076 1077 /** 1078 * igb_acquire_nvm_82575 - Request for access to EEPROM 1079 * @hw: pointer to the HW structure 1080 * 1081 * Acquire the necessary semaphores for exclusive access to the EEPROM. 1082 * Set the EEPROM access request bit and wait for EEPROM access grant bit. 1083 * Return successful if access grant bit set, else clear the request for 1084 * EEPROM access and return -E1000_ERR_NVM (-1). 1085 **/ 1086 static s32 igb_acquire_nvm_82575(struct e1000_hw *hw) 1087 { 1088 s32 ret_val; 1089 1090 ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM); 1091 if (ret_val) 1092 goto out; 1093 1094 ret_val = igb_acquire_nvm(hw); 1095 1096 if (ret_val) 1097 hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); 1098 1099 out: 1100 return ret_val; 1101 } 1102 1103 /** 1104 * igb_release_nvm_82575 - Release exclusive access to EEPROM 1105 * @hw: pointer to the HW structure 1106 * 1107 * Stop any current commands to the EEPROM and clear the EEPROM request bit, 1108 * then release the semaphores acquired. 1109 **/ 1110 static void igb_release_nvm_82575(struct e1000_hw *hw) 1111 { 1112 igb_release_nvm(hw); 1113 hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); 1114 } 1115 1116 /** 1117 * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore 1118 * @hw: pointer to the HW structure 1119 * @mask: specifies which semaphore to acquire 1120 * 1121 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask 1122 * will also specify which port we're acquiring the lock for. 1123 **/ 1124 static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) 1125 { 1126 u32 swfw_sync; 1127 u32 swmask = mask; 1128 u32 fwmask = mask << 16; 1129 s32 ret_val = 0; 1130 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ 1131 1132 while (i < timeout) { 1133 if (igb_get_hw_semaphore(hw)) { 1134 ret_val = -E1000_ERR_SWFW_SYNC; 1135 goto out; 1136 } 1137 1138 swfw_sync = rd32(E1000_SW_FW_SYNC); 1139 if (!(swfw_sync & (fwmask | swmask))) 1140 break; 1141 1142 /* Firmware currently using resource (fwmask) 1143 * or other software thread using resource (swmask) 1144 */ 1145 igb_put_hw_semaphore(hw); 1146 mdelay(5); 1147 i++; 1148 } 1149 1150 if (i == timeout) { 1151 hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); 1152 ret_val = -E1000_ERR_SWFW_SYNC; 1153 goto out; 1154 } 1155 1156 swfw_sync |= swmask; 1157 wr32(E1000_SW_FW_SYNC, swfw_sync); 1158 1159 igb_put_hw_semaphore(hw); 1160 1161 out: 1162 return ret_val; 1163 } 1164 1165 /** 1166 * igb_release_swfw_sync_82575 - Release SW/FW semaphore 1167 * @hw: pointer to the HW structure 1168 * @mask: specifies which semaphore to acquire 1169 * 1170 * Release the SW/FW semaphore used to access the PHY or NVM. The mask 1171 * will also specify which port we're releasing the lock for. 1172 **/ 1173 static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) 1174 { 1175 u32 swfw_sync; 1176 1177 while (igb_get_hw_semaphore(hw) != 0) 1178 ; /* Empty */ 1179 1180 swfw_sync = rd32(E1000_SW_FW_SYNC); 1181 swfw_sync &= ~mask; 1182 wr32(E1000_SW_FW_SYNC, swfw_sync); 1183 1184 igb_put_hw_semaphore(hw); 1185 } 1186 1187 /** 1188 * igb_get_cfg_done_82575 - Read config done bit 1189 * @hw: pointer to the HW structure 1190 * 1191 * Read the management control register for the config done bit for 1192 * completion status. NOTE: silicon which is EEPROM-less will fail trying 1193 * to read the config done bit, so an error is *ONLY* logged and returns 1194 * 0. If we were to return with error, EEPROM-less silicon 1195 * would not be able to be reset or change link. 1196 **/ 1197 static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) 1198 { 1199 s32 timeout = PHY_CFG_TIMEOUT; 1200 u32 mask = E1000_NVM_CFG_DONE_PORT_0; 1201 1202 if (hw->bus.func == 1) 1203 mask = E1000_NVM_CFG_DONE_PORT_1; 1204 else if (hw->bus.func == E1000_FUNC_2) 1205 mask = E1000_NVM_CFG_DONE_PORT_2; 1206 else if (hw->bus.func == E1000_FUNC_3) 1207 mask = E1000_NVM_CFG_DONE_PORT_3; 1208 1209 while (timeout) { 1210 if (rd32(E1000_EEMNGCTL) & mask) 1211 break; 1212 usleep_range(1000, 2000); 1213 timeout--; 1214 } 1215 if (!timeout) 1216 hw_dbg("MNG configuration cycle has not completed.\n"); 1217 1218 /* If EEPROM is not marked present, init the PHY manually */ 1219 if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) && 1220 (hw->phy.type == e1000_phy_igp_3)) 1221 igb_phy_init_script_igp3(hw); 1222 1223 return 0; 1224 } 1225 1226 /** 1227 * igb_get_link_up_info_82575 - Get link speed/duplex info 1228 * @hw: pointer to the HW structure 1229 * @speed: stores the current speed 1230 * @duplex: stores the current duplex 1231 * 1232 * This is a wrapper function, if using the serial gigabit media independent 1233 * interface, use PCS to retrieve the link speed and duplex information. 1234 * Otherwise, use the generic function to get the link speed and duplex info. 1235 **/ 1236 static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, 1237 u16 *duplex) 1238 { 1239 s32 ret_val; 1240 1241 if (hw->phy.media_type != e1000_media_type_copper) 1242 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed, 1243 duplex); 1244 else 1245 ret_val = igb_get_speed_and_duplex_copper(hw, speed, 1246 duplex); 1247 1248 return ret_val; 1249 } 1250 1251 /** 1252 * igb_check_for_link_82575 - Check for link 1253 * @hw: pointer to the HW structure 1254 * 1255 * If sgmii is enabled, then use the pcs register to determine link, otherwise 1256 * use the generic interface for determining link. 1257 **/ 1258 static s32 igb_check_for_link_82575(struct e1000_hw *hw) 1259 { 1260 s32 ret_val; 1261 u16 speed, duplex; 1262 1263 if (hw->phy.media_type != e1000_media_type_copper) { 1264 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, 1265 &duplex); 1266 /* Use this flag to determine if link needs to be checked or 1267 * not. If we have link clear the flag so that we do not 1268 * continue to check for link. 1269 */ 1270 hw->mac.get_link_status = !hw->mac.serdes_has_link; 1271 1272 /* Configure Flow Control now that Auto-Neg has completed. 1273 * First, we need to restore the desired flow control 1274 * settings because we may have had to re-autoneg with a 1275 * different link partner. 1276 */ 1277 ret_val = igb_config_fc_after_link_up(hw); 1278 if (ret_val) 1279 hw_dbg("Error configuring flow control\n"); 1280 } else { 1281 ret_val = igb_check_for_copper_link(hw); 1282 } 1283 1284 return ret_val; 1285 } 1286 1287 /** 1288 * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown 1289 * @hw: pointer to the HW structure 1290 **/ 1291 void igb_power_up_serdes_link_82575(struct e1000_hw *hw) 1292 { 1293 u32 reg; 1294 1295 1296 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1297 !igb_sgmii_active_82575(hw)) 1298 return; 1299 1300 /* Enable PCS to turn on link */ 1301 reg = rd32(E1000_PCS_CFG0); 1302 reg |= E1000_PCS_CFG_PCS_EN; 1303 wr32(E1000_PCS_CFG0, reg); 1304 1305 /* Power up the laser */ 1306 reg = rd32(E1000_CTRL_EXT); 1307 reg &= ~E1000_CTRL_EXT_SDP3_DATA; 1308 wr32(E1000_CTRL_EXT, reg); 1309 1310 /* flush the write to verify completion */ 1311 wrfl(); 1312 usleep_range(1000, 2000); 1313 } 1314 1315 /** 1316 * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex 1317 * @hw: pointer to the HW structure 1318 * @speed: stores the current speed 1319 * @duplex: stores the current duplex 1320 * 1321 * Using the physical coding sub-layer (PCS), retrieve the current speed and 1322 * duplex, then store the values in the pointers provided. 1323 **/ 1324 static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, 1325 u16 *duplex) 1326 { 1327 struct e1000_mac_info *mac = &hw->mac; 1328 u32 pcs, status; 1329 1330 /* Set up defaults for the return values of this function */ 1331 mac->serdes_has_link = false; 1332 *speed = 0; 1333 *duplex = 0; 1334 1335 /* Read the PCS Status register for link state. For non-copper mode, 1336 * the status register is not accurate. The PCS status register is 1337 * used instead. 1338 */ 1339 pcs = rd32(E1000_PCS_LSTAT); 1340 1341 /* The link up bit determines when link is up on autoneg. The sync ok 1342 * gets set once both sides sync up and agree upon link. Stable link 1343 * can be determined by checking for both link up and link sync ok 1344 */ 1345 if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { 1346 mac->serdes_has_link = true; 1347 1348 /* Detect and store PCS speed */ 1349 if (pcs & E1000_PCS_LSTS_SPEED_1000) 1350 *speed = SPEED_1000; 1351 else if (pcs & E1000_PCS_LSTS_SPEED_100) 1352 *speed = SPEED_100; 1353 else 1354 *speed = SPEED_10; 1355 1356 /* Detect and store PCS duplex */ 1357 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) 1358 *duplex = FULL_DUPLEX; 1359 else 1360 *duplex = HALF_DUPLEX; 1361 1362 /* Check if it is an I354 2.5Gb backplane connection. */ 1363 if (mac->type == e1000_i354) { 1364 status = rd32(E1000_STATUS); 1365 if ((status & E1000_STATUS_2P5_SKU) && 1366 !(status & E1000_STATUS_2P5_SKU_OVER)) { 1367 *speed = SPEED_2500; 1368 *duplex = FULL_DUPLEX; 1369 hw_dbg("2500 Mbs, "); 1370 hw_dbg("Full Duplex\n"); 1371 } 1372 } 1373 1374 } 1375 1376 return 0; 1377 } 1378 1379 /** 1380 * igb_shutdown_serdes_link_82575 - Remove link during power down 1381 * @hw: pointer to the HW structure 1382 * 1383 * In the case of fiber serdes, shut down optics and PCS on driver unload 1384 * when management pass thru is not enabled. 1385 **/ 1386 void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) 1387 { 1388 u32 reg; 1389 1390 if (hw->phy.media_type != e1000_media_type_internal_serdes && 1391 igb_sgmii_active_82575(hw)) 1392 return; 1393 1394 if (!igb_enable_mng_pass_thru(hw)) { 1395 /* Disable PCS to turn off link */ 1396 reg = rd32(E1000_PCS_CFG0); 1397 reg &= ~E1000_PCS_CFG_PCS_EN; 1398 wr32(E1000_PCS_CFG0, reg); 1399 1400 /* shutdown the laser */ 1401 reg = rd32(E1000_CTRL_EXT); 1402 reg |= E1000_CTRL_EXT_SDP3_DATA; 1403 wr32(E1000_CTRL_EXT, reg); 1404 1405 /* flush the write to verify completion */ 1406 wrfl(); 1407 usleep_range(1000, 2000); 1408 } 1409 } 1410 1411 /** 1412 * igb_reset_hw_82575 - Reset hardware 1413 * @hw: pointer to the HW structure 1414 * 1415 * This resets the hardware into a known state. This is a 1416 * function pointer entry point called by the api module. 1417 **/ 1418 static s32 igb_reset_hw_82575(struct e1000_hw *hw) 1419 { 1420 u32 ctrl; 1421 s32 ret_val; 1422 1423 /* Prevent the PCI-E bus from sticking if there is no TLP connection 1424 * on the last TLP read/write transaction when MAC is reset. 1425 */ 1426 ret_val = igb_disable_pcie_master(hw); 1427 if (ret_val) 1428 hw_dbg("PCI-E Master disable polling has failed.\n"); 1429 1430 /* set the completion timeout for interface */ 1431 ret_val = igb_set_pcie_completion_timeout(hw); 1432 if (ret_val) 1433 hw_dbg("PCI-E Set completion timeout has failed.\n"); 1434 1435 hw_dbg("Masking off all interrupts\n"); 1436 wr32(E1000_IMC, 0xffffffff); 1437 1438 wr32(E1000_RCTL, 0); 1439 wr32(E1000_TCTL, E1000_TCTL_PSP); 1440 wrfl(); 1441 1442 usleep_range(10000, 20000); 1443 1444 ctrl = rd32(E1000_CTRL); 1445 1446 hw_dbg("Issuing a global reset to MAC\n"); 1447 wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); 1448 1449 ret_val = igb_get_auto_rd_done(hw); 1450 if (ret_val) { 1451 /* When auto config read does not complete, do not 1452 * return with an error. This can happen in situations 1453 * where there is no eeprom and prevents getting link. 1454 */ 1455 hw_dbg("Auto Read Done did not complete\n"); 1456 } 1457 1458 /* If EEPROM is not present, run manual init scripts */ 1459 if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) 1460 igb_reset_init_script_82575(hw); 1461 1462 /* Clear any pending interrupt events. */ 1463 wr32(E1000_IMC, 0xffffffff); 1464 rd32(E1000_ICR); 1465 1466 /* Install any alternate MAC address into RAR0 */ 1467 ret_val = igb_check_alt_mac_addr(hw); 1468 1469 return ret_val; 1470 } 1471 1472 /** 1473 * igb_init_hw_82575 - Initialize hardware 1474 * @hw: pointer to the HW structure 1475 * 1476 * This inits the hardware readying it for operation. 1477 **/ 1478 static s32 igb_init_hw_82575(struct e1000_hw *hw) 1479 { 1480 struct e1000_mac_info *mac = &hw->mac; 1481 s32 ret_val; 1482 u16 i, rar_count = mac->rar_entry_count; 1483 1484 /* Initialize identification LED */ 1485 ret_val = igb_id_led_init(hw); 1486 if (ret_val) { 1487 hw_dbg("Error initializing identification LED\n"); 1488 /* This is not fatal and we should not stop init due to this */ 1489 } 1490 1491 /* Disabling VLAN filtering */ 1492 hw_dbg("Initializing the IEEE VLAN\n"); 1493 if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) 1494 igb_clear_vfta_i350(hw); 1495 else 1496 igb_clear_vfta(hw); 1497 1498 /* Setup the receive address */ 1499 igb_init_rx_addrs(hw, rar_count); 1500 1501 /* Zero out the Multicast HASH table */ 1502 hw_dbg("Zeroing the MTA\n"); 1503 for (i = 0; i < mac->mta_reg_count; i++) 1504 array_wr32(E1000_MTA, i, 0); 1505 1506 /* Zero out the Unicast HASH table */ 1507 hw_dbg("Zeroing the UTA\n"); 1508 for (i = 0; i < mac->uta_reg_count; i++) 1509 array_wr32(E1000_UTA, i, 0); 1510 1511 /* Setup link and flow control */ 1512 ret_val = igb_setup_link(hw); 1513 1514 /* Clear all of the statistics registers (clear on read). It is 1515 * important that we do this after we have tried to establish link 1516 * because the symbol error count will increment wildly if there 1517 * is no link. 1518 */ 1519 igb_clear_hw_cntrs_82575(hw); 1520 return ret_val; 1521 } 1522 1523 /** 1524 * igb_setup_copper_link_82575 - Configure copper link settings 1525 * @hw: pointer to the HW structure 1526 * 1527 * Configures the link for auto-neg or forced speed and duplex. Then we check 1528 * for link, once link is established calls to configure collision distance 1529 * and flow control are called. 1530 **/ 1531 static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) 1532 { 1533 u32 ctrl; 1534 s32 ret_val; 1535 u32 phpm_reg; 1536 1537 ctrl = rd32(E1000_CTRL); 1538 ctrl |= E1000_CTRL_SLU; 1539 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1540 wr32(E1000_CTRL, ctrl); 1541 1542 /* Clear Go Link Disconnect bit on supported devices */ 1543 switch (hw->mac.type) { 1544 case e1000_82580: 1545 case e1000_i350: 1546 case e1000_i210: 1547 case e1000_i211: 1548 phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT); 1549 phpm_reg &= ~E1000_82580_PM_GO_LINKD; 1550 wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg); 1551 break; 1552 default: 1553 break; 1554 } 1555 1556 ret_val = igb_setup_serdes_link_82575(hw); 1557 if (ret_val) 1558 goto out; 1559 1560 if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { 1561 /* allow time for SFP cage time to power up phy */ 1562 msleep(300); 1563 1564 ret_val = hw->phy.ops.reset(hw); 1565 if (ret_val) { 1566 hw_dbg("Error resetting the PHY.\n"); 1567 goto out; 1568 } 1569 } 1570 switch (hw->phy.type) { 1571 case e1000_phy_i210: 1572 case e1000_phy_m88: 1573 switch (hw->phy.id) { 1574 case I347AT4_E_PHY_ID: 1575 case M88E1112_E_PHY_ID: 1576 case M88E1543_E_PHY_ID: 1577 case I210_I_PHY_ID: 1578 ret_val = igb_copper_link_setup_m88_gen2(hw); 1579 break; 1580 default: 1581 ret_val = igb_copper_link_setup_m88(hw); 1582 break; 1583 } 1584 break; 1585 case e1000_phy_igp_3: 1586 ret_val = igb_copper_link_setup_igp(hw); 1587 break; 1588 case e1000_phy_82580: 1589 ret_val = igb_copper_link_setup_82580(hw); 1590 break; 1591 default: 1592 ret_val = -E1000_ERR_PHY; 1593 break; 1594 } 1595 1596 if (ret_val) 1597 goto out; 1598 1599 ret_val = igb_setup_copper_link(hw); 1600 out: 1601 return ret_val; 1602 } 1603 1604 /** 1605 * igb_setup_serdes_link_82575 - Setup link for serdes 1606 * @hw: pointer to the HW structure 1607 * 1608 * Configure the physical coding sub-layer (PCS) link. The PCS link is 1609 * used on copper connections where the serialized gigabit media independent 1610 * interface (sgmii), or serdes fiber is being used. Configures the link 1611 * for auto-negotiation or forces speed/duplex. 1612 **/ 1613 static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) 1614 { 1615 u32 ctrl_ext, ctrl_reg, reg, anadv_reg; 1616 bool pcs_autoneg; 1617 s32 ret_val = 0; 1618 u16 data; 1619 1620 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1621 !igb_sgmii_active_82575(hw)) 1622 return ret_val; 1623 1624 1625 /* On the 82575, SerDes loopback mode persists until it is 1626 * explicitly turned off or a power cycle is performed. A read to 1627 * the register does not indicate its status. Therefore, we ensure 1628 * loopback mode is disabled during initialization. 1629 */ 1630 wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); 1631 1632 /* power on the sfp cage if present and turn on I2C */ 1633 ctrl_ext = rd32(E1000_CTRL_EXT); 1634 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; 1635 ctrl_ext |= E1000_CTRL_I2C_ENA; 1636 wr32(E1000_CTRL_EXT, ctrl_ext); 1637 1638 ctrl_reg = rd32(E1000_CTRL); 1639 ctrl_reg |= E1000_CTRL_SLU; 1640 1641 if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { 1642 /* set both sw defined pins */ 1643 ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; 1644 1645 /* Set switch control to serdes energy detect */ 1646 reg = rd32(E1000_CONNSW); 1647 reg |= E1000_CONNSW_ENRGSRC; 1648 wr32(E1000_CONNSW, reg); 1649 } 1650 1651 reg = rd32(E1000_PCS_LCTL); 1652 1653 /* default pcs_autoneg to the same setting as mac autoneg */ 1654 pcs_autoneg = hw->mac.autoneg; 1655 1656 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { 1657 case E1000_CTRL_EXT_LINK_MODE_SGMII: 1658 /* sgmii mode lets the phy handle forcing speed/duplex */ 1659 pcs_autoneg = true; 1660 /* autoneg time out should be disabled for SGMII mode */ 1661 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); 1662 break; 1663 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 1664 /* disable PCS autoneg and support parallel detect only */ 1665 pcs_autoneg = false; 1666 default: 1667 if (hw->mac.type == e1000_82575 || 1668 hw->mac.type == e1000_82576) { 1669 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); 1670 if (ret_val) { 1671 hw_dbg(KERN_DEBUG "NVM Read Error\n\n"); 1672 return ret_val; 1673 } 1674 1675 if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) 1676 pcs_autoneg = false; 1677 } 1678 1679 /* non-SGMII modes only supports a speed of 1000/Full for the 1680 * link so it is best to just force the MAC and let the pcs 1681 * link either autoneg or be forced to 1000/Full 1682 */ 1683 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | 1684 E1000_CTRL_FD | E1000_CTRL_FRCDPX; 1685 1686 /* set speed of 1000/Full if speed/duplex is forced */ 1687 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; 1688 break; 1689 } 1690 1691 wr32(E1000_CTRL, ctrl_reg); 1692 1693 /* New SerDes mode allows for forcing speed or autonegotiating speed 1694 * at 1gb. Autoneg should be default set by most drivers. This is the 1695 * mode that will be compatible with older link partners and switches. 1696 * However, both are supported by the hardware and some drivers/tools. 1697 */ 1698 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | 1699 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); 1700 1701 if (pcs_autoneg) { 1702 /* Set PCS register for autoneg */ 1703 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ 1704 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ 1705 1706 /* Disable force flow control for autoneg */ 1707 reg &= ~E1000_PCS_LCTL_FORCE_FCTRL; 1708 1709 /* Configure flow control advertisement for autoneg */ 1710 anadv_reg = rd32(E1000_PCS_ANADV); 1711 anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE); 1712 switch (hw->fc.requested_mode) { 1713 case e1000_fc_full: 1714 case e1000_fc_rx_pause: 1715 anadv_reg |= E1000_TXCW_ASM_DIR; 1716 anadv_reg |= E1000_TXCW_PAUSE; 1717 break; 1718 case e1000_fc_tx_pause: 1719 anadv_reg |= E1000_TXCW_ASM_DIR; 1720 break; 1721 default: 1722 break; 1723 } 1724 wr32(E1000_PCS_ANADV, anadv_reg); 1725 1726 hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); 1727 } else { 1728 /* Set PCS register for forced link */ 1729 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ 1730 1731 /* Force flow control for forced link */ 1732 reg |= E1000_PCS_LCTL_FORCE_FCTRL; 1733 1734 hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); 1735 } 1736 1737 wr32(E1000_PCS_LCTL, reg); 1738 1739 if (!pcs_autoneg && !igb_sgmii_active_82575(hw)) 1740 igb_force_mac_fc(hw); 1741 1742 return ret_val; 1743 } 1744 1745 /** 1746 * igb_sgmii_active_82575 - Return sgmii state 1747 * @hw: pointer to the HW structure 1748 * 1749 * 82575 silicon has a serialized gigabit media independent interface (sgmii) 1750 * which can be enabled for use in the embedded applications. Simply 1751 * return the current state of the sgmii interface. 1752 **/ 1753 static bool igb_sgmii_active_82575(struct e1000_hw *hw) 1754 { 1755 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 1756 return dev_spec->sgmii_active; 1757 } 1758 1759 /** 1760 * igb_reset_init_script_82575 - Inits HW defaults after reset 1761 * @hw: pointer to the HW structure 1762 * 1763 * Inits recommended HW defaults after a reset when there is no EEPROM 1764 * detected. This is only for the 82575. 1765 **/ 1766 static s32 igb_reset_init_script_82575(struct e1000_hw *hw) 1767 { 1768 if (hw->mac.type == e1000_82575) { 1769 hw_dbg("Running reset init script for 82575\n"); 1770 /* SerDes configuration via SERDESCTRL */ 1771 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); 1772 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); 1773 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23); 1774 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15); 1775 1776 /* CCM configuration via CCMCTL register */ 1777 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00); 1778 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00); 1779 1780 /* PCIe lanes configuration */ 1781 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC); 1782 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF); 1783 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05); 1784 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81); 1785 1786 /* PCIe PLL Configuration */ 1787 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47); 1788 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00); 1789 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00); 1790 } 1791 1792 return 0; 1793 } 1794 1795 /** 1796 * igb_read_mac_addr_82575 - Read device MAC address 1797 * @hw: pointer to the HW structure 1798 **/ 1799 static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) 1800 { 1801 s32 ret_val = 0; 1802 1803 /* If there's an alternate MAC address place it in RAR0 1804 * so that it will override the Si installed default perm 1805 * address. 1806 */ 1807 ret_val = igb_check_alt_mac_addr(hw); 1808 if (ret_val) 1809 goto out; 1810 1811 ret_val = igb_read_mac_addr(hw); 1812 1813 out: 1814 return ret_val; 1815 } 1816 1817 /** 1818 * igb_power_down_phy_copper_82575 - Remove link during PHY power down 1819 * @hw: pointer to the HW structure 1820 * 1821 * In the case of a PHY power down to save power, or to turn off link during a 1822 * driver unload, or wake on lan is not enabled, remove the link. 1823 **/ 1824 void igb_power_down_phy_copper_82575(struct e1000_hw *hw) 1825 { 1826 /* If the management interface is not enabled, then power down */ 1827 if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw))) 1828 igb_power_down_phy_copper(hw); 1829 } 1830 1831 /** 1832 * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters 1833 * @hw: pointer to the HW structure 1834 * 1835 * Clears the hardware counters by reading the counter registers. 1836 **/ 1837 static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) 1838 { 1839 igb_clear_hw_cntrs_base(hw); 1840 1841 rd32(E1000_PRC64); 1842 rd32(E1000_PRC127); 1843 rd32(E1000_PRC255); 1844 rd32(E1000_PRC511); 1845 rd32(E1000_PRC1023); 1846 rd32(E1000_PRC1522); 1847 rd32(E1000_PTC64); 1848 rd32(E1000_PTC127); 1849 rd32(E1000_PTC255); 1850 rd32(E1000_PTC511); 1851 rd32(E1000_PTC1023); 1852 rd32(E1000_PTC1522); 1853 1854 rd32(E1000_ALGNERRC); 1855 rd32(E1000_RXERRC); 1856 rd32(E1000_TNCRS); 1857 rd32(E1000_CEXTERR); 1858 rd32(E1000_TSCTC); 1859 rd32(E1000_TSCTFC); 1860 1861 rd32(E1000_MGTPRC); 1862 rd32(E1000_MGTPDC); 1863 rd32(E1000_MGTPTC); 1864 1865 rd32(E1000_IAC); 1866 rd32(E1000_ICRXOC); 1867 1868 rd32(E1000_ICRXPTC); 1869 rd32(E1000_ICRXATC); 1870 rd32(E1000_ICTXPTC); 1871 rd32(E1000_ICTXATC); 1872 rd32(E1000_ICTXQEC); 1873 rd32(E1000_ICTXQMTC); 1874 rd32(E1000_ICRXDMTC); 1875 1876 rd32(E1000_CBTMPC); 1877 rd32(E1000_HTDPMC); 1878 rd32(E1000_CBRMPC); 1879 rd32(E1000_RPTHC); 1880 rd32(E1000_HGPTC); 1881 rd32(E1000_HTCBDPC); 1882 rd32(E1000_HGORCL); 1883 rd32(E1000_HGORCH); 1884 rd32(E1000_HGOTCL); 1885 rd32(E1000_HGOTCH); 1886 rd32(E1000_LENERRS); 1887 1888 /* This register should not be read in copper configurations */ 1889 if (hw->phy.media_type == e1000_media_type_internal_serdes || 1890 igb_sgmii_active_82575(hw)) 1891 rd32(E1000_SCVPC); 1892 } 1893 1894 /** 1895 * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable 1896 * @hw: pointer to the HW structure 1897 * 1898 * After rx enable if managability is enabled then there is likely some 1899 * bad data at the start of the fifo and possibly in the DMA fifo. This 1900 * function clears the fifos and flushes any packets that came in as rx was 1901 * being enabled. 1902 **/ 1903 void igb_rx_fifo_flush_82575(struct e1000_hw *hw) 1904 { 1905 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; 1906 int i, ms_wait; 1907 1908 if (hw->mac.type != e1000_82575 || 1909 !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) 1910 return; 1911 1912 /* Disable all RX queues */ 1913 for (i = 0; i < 4; i++) { 1914 rxdctl[i] = rd32(E1000_RXDCTL(i)); 1915 wr32(E1000_RXDCTL(i), 1916 rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); 1917 } 1918 /* Poll all queues to verify they have shut down */ 1919 for (ms_wait = 0; ms_wait < 10; ms_wait++) { 1920 usleep_range(1000, 2000); 1921 rx_enabled = 0; 1922 for (i = 0; i < 4; i++) 1923 rx_enabled |= rd32(E1000_RXDCTL(i)); 1924 if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) 1925 break; 1926 } 1927 1928 if (ms_wait == 10) 1929 hw_dbg("Queue disable timed out after 10ms\n"); 1930 1931 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all 1932 * incoming packets are rejected. Set enable and wait 2ms so that 1933 * any packet that was coming in as RCTL.EN was set is flushed 1934 */ 1935 rfctl = rd32(E1000_RFCTL); 1936 wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); 1937 1938 rlpml = rd32(E1000_RLPML); 1939 wr32(E1000_RLPML, 0); 1940 1941 rctl = rd32(E1000_RCTL); 1942 temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); 1943 temp_rctl |= E1000_RCTL_LPE; 1944 1945 wr32(E1000_RCTL, temp_rctl); 1946 wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); 1947 wrfl(); 1948 usleep_range(2000, 3000); 1949 1950 /* Enable RX queues that were previously enabled and restore our 1951 * previous state 1952 */ 1953 for (i = 0; i < 4; i++) 1954 wr32(E1000_RXDCTL(i), rxdctl[i]); 1955 wr32(E1000_RCTL, rctl); 1956 wrfl(); 1957 1958 wr32(E1000_RLPML, rlpml); 1959 wr32(E1000_RFCTL, rfctl); 1960 1961 /* Flush receive errors generated by workaround */ 1962 rd32(E1000_ROC); 1963 rd32(E1000_RNBC); 1964 rd32(E1000_MPC); 1965 } 1966 1967 /** 1968 * igb_set_pcie_completion_timeout - set pci-e completion timeout 1969 * @hw: pointer to the HW structure 1970 * 1971 * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, 1972 * however the hardware default for these parts is 500us to 1ms which is less 1973 * than the 10ms recommended by the pci-e spec. To address this we need to 1974 * increase the value to either 10ms to 200ms for capability version 1 config, 1975 * or 16ms to 55ms for version 2. 1976 **/ 1977 static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) 1978 { 1979 u32 gcr = rd32(E1000_GCR); 1980 s32 ret_val = 0; 1981 u16 pcie_devctl2; 1982 1983 /* only take action if timeout value is defaulted to 0 */ 1984 if (gcr & E1000_GCR_CMPL_TMOUT_MASK) 1985 goto out; 1986 1987 /* if capabilities version is type 1 we can write the 1988 * timeout of 10ms to 200ms through the GCR register 1989 */ 1990 if (!(gcr & E1000_GCR_CAP_VER2)) { 1991 gcr |= E1000_GCR_CMPL_TMOUT_10ms; 1992 goto out; 1993 } 1994 1995 /* for version 2 capabilities we need to write the config space 1996 * directly in order to set the completion timeout value for 1997 * 16ms to 55ms 1998 */ 1999 ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 2000 &pcie_devctl2); 2001 if (ret_val) 2002 goto out; 2003 2004 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; 2005 2006 ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 2007 &pcie_devctl2); 2008 out: 2009 /* disable completion timeout resend */ 2010 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; 2011 2012 wr32(E1000_GCR, gcr); 2013 return ret_val; 2014 } 2015 2016 /** 2017 * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing 2018 * @hw: pointer to the hardware struct 2019 * @enable: state to enter, either enabled or disabled 2020 * @pf: Physical Function pool - do not set anti-spoofing for the PF 2021 * 2022 * enables/disables L2 switch anti-spoofing functionality. 2023 **/ 2024 void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) 2025 { 2026 u32 reg_val, reg_offset; 2027 2028 switch (hw->mac.type) { 2029 case e1000_82576: 2030 reg_offset = E1000_DTXSWC; 2031 break; 2032 case e1000_i350: 2033 case e1000_i354: 2034 reg_offset = E1000_TXSWC; 2035 break; 2036 default: 2037 return; 2038 } 2039 2040 reg_val = rd32(reg_offset); 2041 if (enable) { 2042 reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | 2043 E1000_DTXSWC_VLAN_SPOOF_MASK); 2044 /* The PF can spoof - it has to in order to 2045 * support emulation mode NICs 2046 */ 2047 reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); 2048 } else { 2049 reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | 2050 E1000_DTXSWC_VLAN_SPOOF_MASK); 2051 } 2052 wr32(reg_offset, reg_val); 2053 } 2054 2055 /** 2056 * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback 2057 * @hw: pointer to the hardware struct 2058 * @enable: state to enter, either enabled or disabled 2059 * 2060 * enables/disables L2 switch loopback functionality. 2061 **/ 2062 void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) 2063 { 2064 u32 dtxswc; 2065 2066 switch (hw->mac.type) { 2067 case e1000_82576: 2068 dtxswc = rd32(E1000_DTXSWC); 2069 if (enable) 2070 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2071 else 2072 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2073 wr32(E1000_DTXSWC, dtxswc); 2074 break; 2075 case e1000_i354: 2076 case e1000_i350: 2077 dtxswc = rd32(E1000_TXSWC); 2078 if (enable) 2079 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2080 else 2081 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2082 wr32(E1000_TXSWC, dtxswc); 2083 break; 2084 default: 2085 /* Currently no other hardware supports loopback */ 2086 break; 2087 } 2088 2089 } 2090 2091 /** 2092 * igb_vmdq_set_replication_pf - enable or disable vmdq replication 2093 * @hw: pointer to the hardware struct 2094 * @enable: state to enter, either enabled or disabled 2095 * 2096 * enables/disables replication of packets across multiple pools. 2097 **/ 2098 void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) 2099 { 2100 u32 vt_ctl = rd32(E1000_VT_CTL); 2101 2102 if (enable) 2103 vt_ctl |= E1000_VT_CTL_VM_REPL_EN; 2104 else 2105 vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; 2106 2107 wr32(E1000_VT_CTL, vt_ctl); 2108 } 2109 2110 /** 2111 * igb_read_phy_reg_82580 - Read 82580 MDI control register 2112 * @hw: pointer to the HW structure 2113 * @offset: register offset to be read 2114 * @data: pointer to the read data 2115 * 2116 * Reads the MDI control register in the PHY at offset and stores the 2117 * information read to data. 2118 **/ 2119 static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) 2120 { 2121 s32 ret_val; 2122 2123 ret_val = hw->phy.ops.acquire(hw); 2124 if (ret_val) 2125 goto out; 2126 2127 ret_val = igb_read_phy_reg_mdic(hw, offset, data); 2128 2129 hw->phy.ops.release(hw); 2130 2131 out: 2132 return ret_val; 2133 } 2134 2135 /** 2136 * igb_write_phy_reg_82580 - Write 82580 MDI control register 2137 * @hw: pointer to the HW structure 2138 * @offset: register offset to write to 2139 * @data: data to write to register at offset 2140 * 2141 * Writes data to MDI control register in the PHY at offset. 2142 **/ 2143 static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) 2144 { 2145 s32 ret_val; 2146 2147 2148 ret_val = hw->phy.ops.acquire(hw); 2149 if (ret_val) 2150 goto out; 2151 2152 ret_val = igb_write_phy_reg_mdic(hw, offset, data); 2153 2154 hw->phy.ops.release(hw); 2155 2156 out: 2157 return ret_val; 2158 } 2159 2160 /** 2161 * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits 2162 * @hw: pointer to the HW structure 2163 * 2164 * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on 2165 * the values found in the EEPROM. This addresses an issue in which these 2166 * bits are not restored from EEPROM after reset. 2167 **/ 2168 static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw) 2169 { 2170 s32 ret_val = 0; 2171 u32 mdicnfg; 2172 u16 nvm_data = 0; 2173 2174 if (hw->mac.type != e1000_82580) 2175 goto out; 2176 if (!igb_sgmii_active_82575(hw)) 2177 goto out; 2178 2179 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + 2180 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, 2181 &nvm_data); 2182 if (ret_val) { 2183 hw_dbg("NVM Read Error\n"); 2184 goto out; 2185 } 2186 2187 mdicnfg = rd32(E1000_MDICNFG); 2188 if (nvm_data & NVM_WORD24_EXT_MDIO) 2189 mdicnfg |= E1000_MDICNFG_EXT_MDIO; 2190 if (nvm_data & NVM_WORD24_COM_MDIO) 2191 mdicnfg |= E1000_MDICNFG_COM_MDIO; 2192 wr32(E1000_MDICNFG, mdicnfg); 2193 out: 2194 return ret_val; 2195 } 2196 2197 /** 2198 * igb_reset_hw_82580 - Reset hardware 2199 * @hw: pointer to the HW structure 2200 * 2201 * This resets function or entire device (all ports, etc.) 2202 * to a known state. 2203 **/ 2204 static s32 igb_reset_hw_82580(struct e1000_hw *hw) 2205 { 2206 s32 ret_val = 0; 2207 /* BH SW mailbox bit in SW_FW_SYNC */ 2208 u16 swmbsw_mask = E1000_SW_SYNCH_MB; 2209 u32 ctrl; 2210 bool global_device_reset = hw->dev_spec._82575.global_device_reset; 2211 2212 hw->dev_spec._82575.global_device_reset = false; 2213 2214 /* due to hw errata, global device reset doesn't always 2215 * work on 82580 2216 */ 2217 if (hw->mac.type == e1000_82580) 2218 global_device_reset = false; 2219 2220 /* Get current control state. */ 2221 ctrl = rd32(E1000_CTRL); 2222 2223 /* Prevent the PCI-E bus from sticking if there is no TLP connection 2224 * on the last TLP read/write transaction when MAC is reset. 2225 */ 2226 ret_val = igb_disable_pcie_master(hw); 2227 if (ret_val) 2228 hw_dbg("PCI-E Master disable polling has failed.\n"); 2229 2230 hw_dbg("Masking off all interrupts\n"); 2231 wr32(E1000_IMC, 0xffffffff); 2232 wr32(E1000_RCTL, 0); 2233 wr32(E1000_TCTL, E1000_TCTL_PSP); 2234 wrfl(); 2235 2236 usleep_range(10000, 11000); 2237 2238 /* Determine whether or not a global dev reset is requested */ 2239 if (global_device_reset && 2240 hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask)) 2241 global_device_reset = false; 2242 2243 if (global_device_reset && 2244 !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) 2245 ctrl |= E1000_CTRL_DEV_RST; 2246 else 2247 ctrl |= E1000_CTRL_RST; 2248 2249 wr32(E1000_CTRL, ctrl); 2250 wrfl(); 2251 2252 /* Add delay to insure DEV_RST has time to complete */ 2253 if (global_device_reset) 2254 usleep_range(5000, 6000); 2255 2256 ret_val = igb_get_auto_rd_done(hw); 2257 if (ret_val) { 2258 /* When auto config read does not complete, do not 2259 * return with an error. This can happen in situations 2260 * where there is no eeprom and prevents getting link. 2261 */ 2262 hw_dbg("Auto Read Done did not complete\n"); 2263 } 2264 2265 /* clear global device reset status bit */ 2266 wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); 2267 2268 /* Clear any pending interrupt events. */ 2269 wr32(E1000_IMC, 0xffffffff); 2270 rd32(E1000_ICR); 2271 2272 ret_val = igb_reset_mdicnfg_82580(hw); 2273 if (ret_val) 2274 hw_dbg("Could not reset MDICNFG based on EEPROM\n"); 2275 2276 /* Install any alternate MAC address into RAR0 */ 2277 ret_val = igb_check_alt_mac_addr(hw); 2278 2279 /* Release semaphore */ 2280 if (global_device_reset) 2281 hw->mac.ops.release_swfw_sync(hw, swmbsw_mask); 2282 2283 return ret_val; 2284 } 2285 2286 /** 2287 * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size 2288 * @data: data received by reading RXPBS register 2289 * 2290 * The 82580 uses a table based approach for packet buffer allocation sizes. 2291 * This function converts the retrieved value into the correct table value 2292 * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 2293 * 0x0 36 72 144 1 2 4 8 16 2294 * 0x8 35 70 140 rsv rsv rsv rsv rsv 2295 */ 2296 u16 igb_rxpbs_adjust_82580(u32 data) 2297 { 2298 u16 ret_val = 0; 2299 2300 if (data < ARRAY_SIZE(e1000_82580_rxpbs_table)) 2301 ret_val = e1000_82580_rxpbs_table[data]; 2302 2303 return ret_val; 2304 } 2305 2306 /** 2307 * igb_validate_nvm_checksum_with_offset - Validate EEPROM 2308 * checksum 2309 * @hw: pointer to the HW structure 2310 * @offset: offset in words of the checksum protected region 2311 * 2312 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM 2313 * and then verifies that the sum of the EEPROM is equal to 0xBABA. 2314 **/ 2315 static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, 2316 u16 offset) 2317 { 2318 s32 ret_val = 0; 2319 u16 checksum = 0; 2320 u16 i, nvm_data; 2321 2322 for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { 2323 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); 2324 if (ret_val) { 2325 hw_dbg("NVM Read Error\n"); 2326 goto out; 2327 } 2328 checksum += nvm_data; 2329 } 2330 2331 if (checksum != (u16) NVM_SUM) { 2332 hw_dbg("NVM Checksum Invalid\n"); 2333 ret_val = -E1000_ERR_NVM; 2334 goto out; 2335 } 2336 2337 out: 2338 return ret_val; 2339 } 2340 2341 /** 2342 * igb_update_nvm_checksum_with_offset - Update EEPROM 2343 * checksum 2344 * @hw: pointer to the HW structure 2345 * @offset: offset in words of the checksum protected region 2346 * 2347 * Updates the EEPROM checksum by reading/adding each word of the EEPROM 2348 * up to the checksum. Then calculates the EEPROM checksum and writes the 2349 * value to the EEPROM. 2350 **/ 2351 static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) 2352 { 2353 s32 ret_val; 2354 u16 checksum = 0; 2355 u16 i, nvm_data; 2356 2357 for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { 2358 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); 2359 if (ret_val) { 2360 hw_dbg("NVM Read Error while updating checksum.\n"); 2361 goto out; 2362 } 2363 checksum += nvm_data; 2364 } 2365 checksum = (u16) NVM_SUM - checksum; 2366 ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, 2367 &checksum); 2368 if (ret_val) 2369 hw_dbg("NVM Write Error while updating checksum.\n"); 2370 2371 out: 2372 return ret_val; 2373 } 2374 2375 /** 2376 * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum 2377 * @hw: pointer to the HW structure 2378 * 2379 * Calculates the EEPROM section checksum by reading/adding each word of 2380 * the EEPROM and then verifies that the sum of the EEPROM is 2381 * equal to 0xBABA. 2382 **/ 2383 static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) 2384 { 2385 s32 ret_val = 0; 2386 u16 eeprom_regions_count = 1; 2387 u16 j, nvm_data; 2388 u16 nvm_offset; 2389 2390 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); 2391 if (ret_val) { 2392 hw_dbg("NVM Read Error\n"); 2393 goto out; 2394 } 2395 2396 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { 2397 /* if checksums compatibility bit is set validate checksums 2398 * for all 4 ports. 2399 */ 2400 eeprom_regions_count = 4; 2401 } 2402 2403 for (j = 0; j < eeprom_regions_count; j++) { 2404 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2405 ret_val = igb_validate_nvm_checksum_with_offset(hw, 2406 nvm_offset); 2407 if (ret_val != 0) 2408 goto out; 2409 } 2410 2411 out: 2412 return ret_val; 2413 } 2414 2415 /** 2416 * igb_update_nvm_checksum_82580 - Update EEPROM checksum 2417 * @hw: pointer to the HW structure 2418 * 2419 * Updates the EEPROM section checksums for all 4 ports by reading/adding 2420 * each word of the EEPROM up to the checksum. Then calculates the EEPROM 2421 * checksum and writes the value to the EEPROM. 2422 **/ 2423 static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) 2424 { 2425 s32 ret_val; 2426 u16 j, nvm_data; 2427 u16 nvm_offset; 2428 2429 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); 2430 if (ret_val) { 2431 hw_dbg("NVM Read Error while updating checksum compatibility bit.\n"); 2432 goto out; 2433 } 2434 2435 if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { 2436 /* set compatibility bit to validate checksums appropriately */ 2437 nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; 2438 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, 2439 &nvm_data); 2440 if (ret_val) { 2441 hw_dbg("NVM Write Error while updating checksum compatibility bit.\n"); 2442 goto out; 2443 } 2444 } 2445 2446 for (j = 0; j < 4; j++) { 2447 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2448 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); 2449 if (ret_val) 2450 goto out; 2451 } 2452 2453 out: 2454 return ret_val; 2455 } 2456 2457 /** 2458 * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum 2459 * @hw: pointer to the HW structure 2460 * 2461 * Calculates the EEPROM section checksum by reading/adding each word of 2462 * the EEPROM and then verifies that the sum of the EEPROM is 2463 * equal to 0xBABA. 2464 **/ 2465 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw) 2466 { 2467 s32 ret_val = 0; 2468 u16 j; 2469 u16 nvm_offset; 2470 2471 for (j = 0; j < 4; j++) { 2472 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2473 ret_val = igb_validate_nvm_checksum_with_offset(hw, 2474 nvm_offset); 2475 if (ret_val != 0) 2476 goto out; 2477 } 2478 2479 out: 2480 return ret_val; 2481 } 2482 2483 /** 2484 * igb_update_nvm_checksum_i350 - Update EEPROM checksum 2485 * @hw: pointer to the HW structure 2486 * 2487 * Updates the EEPROM section checksums for all 4 ports by reading/adding 2488 * each word of the EEPROM up to the checksum. Then calculates the EEPROM 2489 * checksum and writes the value to the EEPROM. 2490 **/ 2491 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) 2492 { 2493 s32 ret_val = 0; 2494 u16 j; 2495 u16 nvm_offset; 2496 2497 for (j = 0; j < 4; j++) { 2498 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2499 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); 2500 if (ret_val != 0) 2501 goto out; 2502 } 2503 2504 out: 2505 return ret_val; 2506 } 2507 2508 /** 2509 * __igb_access_emi_reg - Read/write EMI register 2510 * @hw: pointer to the HW structure 2511 * @addr: EMI address to program 2512 * @data: pointer to value to read/write from/to the EMI address 2513 * @read: boolean flag to indicate read or write 2514 **/ 2515 static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address, 2516 u16 *data, bool read) 2517 { 2518 s32 ret_val = 0; 2519 2520 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); 2521 if (ret_val) 2522 return ret_val; 2523 2524 if (read) 2525 ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); 2526 else 2527 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); 2528 2529 return ret_val; 2530 } 2531 2532 /** 2533 * igb_read_emi_reg - Read Extended Management Interface register 2534 * @hw: pointer to the HW structure 2535 * @addr: EMI address to program 2536 * @data: value to be read from the EMI address 2537 **/ 2538 s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) 2539 { 2540 return __igb_access_emi_reg(hw, addr, data, true); 2541 } 2542 2543 /** 2544 * igb_set_eee_i350 - Enable/disable EEE support 2545 * @hw: pointer to the HW structure 2546 * 2547 * Enable/disable EEE based on setting in dev_spec structure. 2548 * 2549 **/ 2550 s32 igb_set_eee_i350(struct e1000_hw *hw) 2551 { 2552 u32 ipcnfg, eeer; 2553 2554 if ((hw->mac.type < e1000_i350) || 2555 (hw->phy.media_type != e1000_media_type_copper)) 2556 goto out; 2557 ipcnfg = rd32(E1000_IPCNFG); 2558 eeer = rd32(E1000_EEER); 2559 2560 /* enable or disable per user setting */ 2561 if (!(hw->dev_spec._82575.eee_disable)) { 2562 u32 eee_su = rd32(E1000_EEE_SU); 2563 2564 ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); 2565 eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | 2566 E1000_EEER_LPI_FC); 2567 2568 /* This bit should not be set in normal operation. */ 2569 if (eee_su & E1000_EEE_SU_LPI_CLK_STP) 2570 hw_dbg("LPI Clock Stop Bit should not be set!\n"); 2571 2572 } else { 2573 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | 2574 E1000_IPCNFG_EEE_100M_AN); 2575 eeer &= ~(E1000_EEER_TX_LPI_EN | 2576 E1000_EEER_RX_LPI_EN | 2577 E1000_EEER_LPI_FC); 2578 } 2579 wr32(E1000_IPCNFG, ipcnfg); 2580 wr32(E1000_EEER, eeer); 2581 rd32(E1000_IPCNFG); 2582 rd32(E1000_EEER); 2583 out: 2584 2585 return 0; 2586 } 2587 2588 /** 2589 * igb_set_eee_i354 - Enable/disable EEE support 2590 * @hw: pointer to the HW structure 2591 * 2592 * Enable/disable EEE legacy mode based on setting in dev_spec structure. 2593 * 2594 **/ 2595 s32 igb_set_eee_i354(struct e1000_hw *hw) 2596 { 2597 struct e1000_phy_info *phy = &hw->phy; 2598 s32 ret_val = 0; 2599 u16 phy_data; 2600 2601 if ((hw->phy.media_type != e1000_media_type_copper) || 2602 (phy->id != M88E1543_E_PHY_ID)) 2603 goto out; 2604 2605 if (!hw->dev_spec._82575.eee_disable) { 2606 /* Switch to PHY page 18. */ 2607 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); 2608 if (ret_val) 2609 goto out; 2610 2611 ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, 2612 &phy_data); 2613 if (ret_val) 2614 goto out; 2615 2616 phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; 2617 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, 2618 phy_data); 2619 if (ret_val) 2620 goto out; 2621 2622 /* Return the PHY to page 0. */ 2623 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); 2624 if (ret_val) 2625 goto out; 2626 2627 /* Turn on EEE advertisement. */ 2628 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2629 E1000_EEE_ADV_DEV_I354, 2630 &phy_data); 2631 if (ret_val) 2632 goto out; 2633 2634 phy_data |= E1000_EEE_ADV_100_SUPPORTED | 2635 E1000_EEE_ADV_1000_SUPPORTED; 2636 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2637 E1000_EEE_ADV_DEV_I354, 2638 phy_data); 2639 } else { 2640 /* Turn off EEE advertisement. */ 2641 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2642 E1000_EEE_ADV_DEV_I354, 2643 &phy_data); 2644 if (ret_val) 2645 goto out; 2646 2647 phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | 2648 E1000_EEE_ADV_1000_SUPPORTED); 2649 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2650 E1000_EEE_ADV_DEV_I354, 2651 phy_data); 2652 } 2653 2654 out: 2655 return ret_val; 2656 } 2657 2658 /** 2659 * igb_get_eee_status_i354 - Get EEE status 2660 * @hw: pointer to the HW structure 2661 * @status: EEE status 2662 * 2663 * Get EEE status by guessing based on whether Tx or Rx LPI indications have 2664 * been received. 2665 **/ 2666 s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status) 2667 { 2668 struct e1000_phy_info *phy = &hw->phy; 2669 s32 ret_val = 0; 2670 u16 phy_data; 2671 2672 /* Check if EEE is supported on this device. */ 2673 if ((hw->phy.media_type != e1000_media_type_copper) || 2674 (phy->id != M88E1543_E_PHY_ID)) 2675 goto out; 2676 2677 ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, 2678 E1000_PCS_STATUS_DEV_I354, 2679 &phy_data); 2680 if (ret_val) 2681 goto out; 2682 2683 *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | 2684 E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; 2685 2686 out: 2687 return ret_val; 2688 } 2689 2690 static const u8 e1000_emc_temp_data[4] = { 2691 E1000_EMC_INTERNAL_DATA, 2692 E1000_EMC_DIODE1_DATA, 2693 E1000_EMC_DIODE2_DATA, 2694 E1000_EMC_DIODE3_DATA 2695 }; 2696 static const u8 e1000_emc_therm_limit[4] = { 2697 E1000_EMC_INTERNAL_THERM_LIMIT, 2698 E1000_EMC_DIODE1_THERM_LIMIT, 2699 E1000_EMC_DIODE2_THERM_LIMIT, 2700 E1000_EMC_DIODE3_THERM_LIMIT 2701 }; 2702 2703 #ifdef CONFIG_IGB_HWMON 2704 /** 2705 * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data 2706 * @hw: pointer to hardware structure 2707 * 2708 * Updates the temperatures in mac.thermal_sensor_data 2709 **/ 2710 static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) 2711 { 2712 u16 ets_offset; 2713 u16 ets_cfg; 2714 u16 ets_sensor; 2715 u8 num_sensors; 2716 u8 sensor_index; 2717 u8 sensor_location; 2718 u8 i; 2719 struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 2720 2721 if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) 2722 return E1000_NOT_IMPLEMENTED; 2723 2724 data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF); 2725 2726 /* Return the internal sensor only if ETS is unsupported */ 2727 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); 2728 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) 2729 return 0; 2730 2731 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); 2732 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) 2733 != NVM_ETS_TYPE_EMC) 2734 return E1000_NOT_IMPLEMENTED; 2735 2736 num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); 2737 if (num_sensors > E1000_MAX_SENSORS) 2738 num_sensors = E1000_MAX_SENSORS; 2739 2740 for (i = 1; i < num_sensors; i++) { 2741 hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); 2742 sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> 2743 NVM_ETS_DATA_INDEX_SHIFT); 2744 sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> 2745 NVM_ETS_DATA_LOC_SHIFT); 2746 2747 if (sensor_location != 0) 2748 hw->phy.ops.read_i2c_byte(hw, 2749 e1000_emc_temp_data[sensor_index], 2750 E1000_I2C_THERMAL_SENSOR_ADDR, 2751 &data->sensor[i].temp); 2752 } 2753 return 0; 2754 } 2755 2756 /** 2757 * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds 2758 * @hw: pointer to hardware structure 2759 * 2760 * Sets the thermal sensor thresholds according to the NVM map 2761 * and save off the threshold and location values into mac.thermal_sensor_data 2762 **/ 2763 static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) 2764 { 2765 u16 ets_offset; 2766 u16 ets_cfg; 2767 u16 ets_sensor; 2768 u8 low_thresh_delta; 2769 u8 num_sensors; 2770 u8 sensor_index; 2771 u8 sensor_location; 2772 u8 therm_limit; 2773 u8 i; 2774 struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 2775 2776 if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) 2777 return E1000_NOT_IMPLEMENTED; 2778 2779 memset(data, 0, sizeof(struct e1000_thermal_sensor_data)); 2780 2781 data->sensor[0].location = 0x1; 2782 data->sensor[0].caution_thresh = 2783 (rd32(E1000_THHIGHTC) & 0xFF); 2784 data->sensor[0].max_op_thresh = 2785 (rd32(E1000_THLOWTC) & 0xFF); 2786 2787 /* Return the internal sensor only if ETS is unsupported */ 2788 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); 2789 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) 2790 return 0; 2791 2792 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); 2793 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) 2794 != NVM_ETS_TYPE_EMC) 2795 return E1000_NOT_IMPLEMENTED; 2796 2797 low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >> 2798 NVM_ETS_LTHRES_DELTA_SHIFT); 2799 num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); 2800 2801 for (i = 1; i <= num_sensors; i++) { 2802 hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); 2803 sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> 2804 NVM_ETS_DATA_INDEX_SHIFT); 2805 sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> 2806 NVM_ETS_DATA_LOC_SHIFT); 2807 therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK; 2808 2809 hw->phy.ops.write_i2c_byte(hw, 2810 e1000_emc_therm_limit[sensor_index], 2811 E1000_I2C_THERMAL_SENSOR_ADDR, 2812 therm_limit); 2813 2814 if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) { 2815 data->sensor[i].location = sensor_location; 2816 data->sensor[i].caution_thresh = therm_limit; 2817 data->sensor[i].max_op_thresh = therm_limit - 2818 low_thresh_delta; 2819 } 2820 } 2821 return 0; 2822 } 2823 2824 #endif 2825 static struct e1000_mac_operations e1000_mac_ops_82575 = { 2826 .init_hw = igb_init_hw_82575, 2827 .check_for_link = igb_check_for_link_82575, 2828 .rar_set = igb_rar_set, 2829 .read_mac_addr = igb_read_mac_addr_82575, 2830 .get_speed_and_duplex = igb_get_link_up_info_82575, 2831 #ifdef CONFIG_IGB_HWMON 2832 .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic, 2833 .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic, 2834 #endif 2835 }; 2836 2837 static struct e1000_phy_operations e1000_phy_ops_82575 = { 2838 .acquire = igb_acquire_phy_82575, 2839 .get_cfg_done = igb_get_cfg_done_82575, 2840 .release = igb_release_phy_82575, 2841 .write_i2c_byte = igb_write_i2c_byte, 2842 .read_i2c_byte = igb_read_i2c_byte, 2843 }; 2844 2845 static struct e1000_nvm_operations e1000_nvm_ops_82575 = { 2846 .acquire = igb_acquire_nvm_82575, 2847 .read = igb_read_nvm_eerd, 2848 .release = igb_release_nvm_82575, 2849 .write = igb_write_nvm_spi, 2850 }; 2851 2852 const struct e1000_info e1000_82575_info = { 2853 .get_invariants = igb_get_invariants_82575, 2854 .mac_ops = &e1000_mac_ops_82575, 2855 .phy_ops = &e1000_phy_ops_82575, 2856 .nvm_ops = &e1000_nvm_ops_82575, 2857 }; 2858 2859