1 /* Intel(R) Gigabit Ethernet Linux driver 2 * Copyright(c) 2007-2014 Intel Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, see <http://www.gnu.org/licenses/>. 15 * 16 * The full GNU General Public License is included in this distribution in 17 * the file called "COPYING". 18 * 19 * Contact Information: 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 22 */ 23 24 /* e1000_82575 25 * e1000_82576 26 */ 27 28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29 30 #include <linux/types.h> 31 #include <linux/if_ether.h> 32 #include <linux/i2c.h> 33 34 #include "e1000_mac.h" 35 #include "e1000_82575.h" 36 #include "e1000_i210.h" 37 38 static s32 igb_get_invariants_82575(struct e1000_hw *); 39 static s32 igb_acquire_phy_82575(struct e1000_hw *); 40 static void igb_release_phy_82575(struct e1000_hw *); 41 static s32 igb_acquire_nvm_82575(struct e1000_hw *); 42 static void igb_release_nvm_82575(struct e1000_hw *); 43 static s32 igb_check_for_link_82575(struct e1000_hw *); 44 static s32 igb_get_cfg_done_82575(struct e1000_hw *); 45 static s32 igb_init_hw_82575(struct e1000_hw *); 46 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); 47 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); 48 static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *); 49 static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16); 50 static s32 igb_reset_hw_82575(struct e1000_hw *); 51 static s32 igb_reset_hw_82580(struct e1000_hw *); 52 static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); 53 static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool); 54 static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool); 55 static s32 igb_setup_copper_link_82575(struct e1000_hw *); 56 static s32 igb_setup_serdes_link_82575(struct e1000_hw *); 57 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16); 58 static void igb_clear_hw_cntrs_82575(struct e1000_hw *); 59 static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16); 60 static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *, 61 u16 *); 62 static s32 igb_get_phy_id_82575(struct e1000_hw *); 63 static void igb_release_swfw_sync_82575(struct e1000_hw *, u16); 64 static bool igb_sgmii_active_82575(struct e1000_hw *); 65 static s32 igb_reset_init_script_82575(struct e1000_hw *); 66 static s32 igb_read_mac_addr_82575(struct e1000_hw *); 67 static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); 68 static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); 69 static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw); 70 static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); 71 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); 72 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); 73 static const u16 e1000_82580_rxpbs_table[] = { 74 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; 75 76 /** 77 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO 78 * @hw: pointer to the HW structure 79 * 80 * Called to determine if the I2C pins are being used for I2C or as an 81 * external MDIO interface since the two options are mutually exclusive. 82 **/ 83 static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) 84 { 85 u32 reg = 0; 86 bool ext_mdio = false; 87 88 switch (hw->mac.type) { 89 case e1000_82575: 90 case e1000_82576: 91 reg = rd32(E1000_MDIC); 92 ext_mdio = !!(reg & E1000_MDIC_DEST); 93 break; 94 case e1000_82580: 95 case e1000_i350: 96 case e1000_i354: 97 case e1000_i210: 98 case e1000_i211: 99 reg = rd32(E1000_MDICNFG); 100 ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); 101 break; 102 default: 103 break; 104 } 105 return ext_mdio; 106 } 107 108 /** 109 * igb_check_for_link_media_swap - Check which M88E1112 interface linked 110 * @hw: pointer to the HW structure 111 * 112 * Poll the M88E1112 interfaces to see which interface achieved link. 113 */ 114 static s32 igb_check_for_link_media_swap(struct e1000_hw *hw) 115 { 116 struct e1000_phy_info *phy = &hw->phy; 117 s32 ret_val; 118 u16 data; 119 u8 port = 0; 120 121 /* Check the copper medium. */ 122 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); 123 if (ret_val) 124 return ret_val; 125 126 ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); 127 if (ret_val) 128 return ret_val; 129 130 if (data & E1000_M88E1112_STATUS_LINK) 131 port = E1000_MEDIA_PORT_COPPER; 132 133 /* Check the other medium. */ 134 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); 135 if (ret_val) 136 return ret_val; 137 138 ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); 139 if (ret_val) 140 return ret_val; 141 142 /* reset page to 0 */ 143 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); 144 if (ret_val) 145 return ret_val; 146 147 if (data & E1000_M88E1112_STATUS_LINK) 148 port = E1000_MEDIA_PORT_OTHER; 149 150 /* Determine if a swap needs to happen. */ 151 if (port && (hw->dev_spec._82575.media_port != port)) { 152 hw->dev_spec._82575.media_port = port; 153 hw->dev_spec._82575.media_changed = true; 154 } else { 155 ret_val = igb_check_for_link_82575(hw); 156 } 157 158 return 0; 159 } 160 161 /** 162 * igb_init_phy_params_82575 - Init PHY func ptrs. 163 * @hw: pointer to the HW structure 164 **/ 165 static s32 igb_init_phy_params_82575(struct e1000_hw *hw) 166 { 167 struct e1000_phy_info *phy = &hw->phy; 168 s32 ret_val = 0; 169 u32 ctrl_ext; 170 171 if (hw->phy.media_type != e1000_media_type_copper) { 172 phy->type = e1000_phy_none; 173 goto out; 174 } 175 176 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 177 phy->reset_delay_us = 100; 178 179 ctrl_ext = rd32(E1000_CTRL_EXT); 180 181 if (igb_sgmii_active_82575(hw)) { 182 phy->ops.reset = igb_phy_hw_reset_sgmii_82575; 183 ctrl_ext |= E1000_CTRL_I2C_ENA; 184 } else { 185 phy->ops.reset = igb_phy_hw_reset; 186 ctrl_ext &= ~E1000_CTRL_I2C_ENA; 187 } 188 189 wr32(E1000_CTRL_EXT, ctrl_ext); 190 igb_reset_mdicnfg_82580(hw); 191 192 if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { 193 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; 194 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; 195 } else { 196 switch (hw->mac.type) { 197 case e1000_82580: 198 case e1000_i350: 199 case e1000_i354: 200 phy->ops.read_reg = igb_read_phy_reg_82580; 201 phy->ops.write_reg = igb_write_phy_reg_82580; 202 break; 203 case e1000_i210: 204 case e1000_i211: 205 phy->ops.read_reg = igb_read_phy_reg_gs40g; 206 phy->ops.write_reg = igb_write_phy_reg_gs40g; 207 break; 208 default: 209 phy->ops.read_reg = igb_read_phy_reg_igp; 210 phy->ops.write_reg = igb_write_phy_reg_igp; 211 } 212 } 213 214 /* set lan id */ 215 hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> 216 E1000_STATUS_FUNC_SHIFT; 217 218 /* Set phy->phy_addr and phy->id. */ 219 ret_val = igb_get_phy_id_82575(hw); 220 if (ret_val) 221 return ret_val; 222 223 /* Verify phy id and set remaining function pointers */ 224 switch (phy->id) { 225 case M88E1543_E_PHY_ID: 226 case I347AT4_E_PHY_ID: 227 case M88E1112_E_PHY_ID: 228 case M88E1111_I_PHY_ID: 229 phy->type = e1000_phy_m88; 230 phy->ops.check_polarity = igb_check_polarity_m88; 231 phy->ops.get_phy_info = igb_get_phy_info_m88; 232 if (phy->id != M88E1111_I_PHY_ID) 233 phy->ops.get_cable_length = 234 igb_get_cable_length_m88_gen2; 235 else 236 phy->ops.get_cable_length = igb_get_cable_length_m88; 237 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; 238 /* Check if this PHY is confgured for media swap. */ 239 if (phy->id == M88E1112_E_PHY_ID) { 240 u16 data; 241 242 ret_val = phy->ops.write_reg(hw, 243 E1000_M88E1112_PAGE_ADDR, 244 2); 245 if (ret_val) 246 goto out; 247 248 ret_val = phy->ops.read_reg(hw, 249 E1000_M88E1112_MAC_CTRL_1, 250 &data); 251 if (ret_val) 252 goto out; 253 254 data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >> 255 E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT; 256 if (data == E1000_M88E1112_AUTO_COPPER_SGMII || 257 data == E1000_M88E1112_AUTO_COPPER_BASEX) 258 hw->mac.ops.check_for_link = 259 igb_check_for_link_media_swap; 260 } 261 break; 262 case IGP03E1000_E_PHY_ID: 263 phy->type = e1000_phy_igp_3; 264 phy->ops.get_phy_info = igb_get_phy_info_igp; 265 phy->ops.get_cable_length = igb_get_cable_length_igp_2; 266 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp; 267 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; 268 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; 269 break; 270 case I82580_I_PHY_ID: 271 case I350_I_PHY_ID: 272 phy->type = e1000_phy_82580; 273 phy->ops.force_speed_duplex = 274 igb_phy_force_speed_duplex_82580; 275 phy->ops.get_cable_length = igb_get_cable_length_82580; 276 phy->ops.get_phy_info = igb_get_phy_info_82580; 277 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; 278 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; 279 break; 280 case I210_I_PHY_ID: 281 phy->type = e1000_phy_i210; 282 phy->ops.check_polarity = igb_check_polarity_m88; 283 phy->ops.get_phy_info = igb_get_phy_info_m88; 284 phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; 285 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; 286 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; 287 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; 288 break; 289 default: 290 ret_val = -E1000_ERR_PHY; 291 goto out; 292 } 293 294 out: 295 return ret_val; 296 } 297 298 /** 299 * igb_init_nvm_params_82575 - Init NVM func ptrs. 300 * @hw: pointer to the HW structure 301 **/ 302 static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) 303 { 304 struct e1000_nvm_info *nvm = &hw->nvm; 305 u32 eecd = rd32(E1000_EECD); 306 u16 size; 307 308 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 309 E1000_EECD_SIZE_EX_SHIFT); 310 311 /* Added to a constant, "size" becomes the left-shift value 312 * for setting word_size. 313 */ 314 size += NVM_WORD_SIZE_BASE_SHIFT; 315 316 /* Just in case size is out of range, cap it to the largest 317 * EEPROM size supported 318 */ 319 if (size > 15) 320 size = 15; 321 322 nvm->word_size = 1 << size; 323 nvm->opcode_bits = 8; 324 nvm->delay_usec = 1; 325 326 switch (nvm->override) { 327 case e1000_nvm_override_spi_large: 328 nvm->page_size = 32; 329 nvm->address_bits = 16; 330 break; 331 case e1000_nvm_override_spi_small: 332 nvm->page_size = 8; 333 nvm->address_bits = 8; 334 break; 335 default: 336 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; 337 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 338 16 : 8; 339 break; 340 } 341 if (nvm->word_size == (1 << 15)) 342 nvm->page_size = 128; 343 344 nvm->type = e1000_nvm_eeprom_spi; 345 346 /* NVM Function Pointers */ 347 nvm->ops.acquire = igb_acquire_nvm_82575; 348 nvm->ops.release = igb_release_nvm_82575; 349 nvm->ops.write = igb_write_nvm_spi; 350 nvm->ops.validate = igb_validate_nvm_checksum; 351 nvm->ops.update = igb_update_nvm_checksum; 352 if (nvm->word_size < (1 << 15)) 353 nvm->ops.read = igb_read_nvm_eerd; 354 else 355 nvm->ops.read = igb_read_nvm_spi; 356 357 /* override generic family function pointers for specific descendants */ 358 switch (hw->mac.type) { 359 case e1000_82580: 360 nvm->ops.validate = igb_validate_nvm_checksum_82580; 361 nvm->ops.update = igb_update_nvm_checksum_82580; 362 break; 363 case e1000_i354: 364 case e1000_i350: 365 nvm->ops.validate = igb_validate_nvm_checksum_i350; 366 nvm->ops.update = igb_update_nvm_checksum_i350; 367 break; 368 default: 369 break; 370 } 371 372 return 0; 373 } 374 375 /** 376 * igb_init_mac_params_82575 - Init MAC func ptrs. 377 * @hw: pointer to the HW structure 378 **/ 379 static s32 igb_init_mac_params_82575(struct e1000_hw *hw) 380 { 381 struct e1000_mac_info *mac = &hw->mac; 382 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 383 384 /* Set mta register count */ 385 mac->mta_reg_count = 128; 386 /* Set rar entry count */ 387 switch (mac->type) { 388 case e1000_82576: 389 mac->rar_entry_count = E1000_RAR_ENTRIES_82576; 390 break; 391 case e1000_82580: 392 mac->rar_entry_count = E1000_RAR_ENTRIES_82580; 393 break; 394 case e1000_i350: 395 case e1000_i354: 396 mac->rar_entry_count = E1000_RAR_ENTRIES_I350; 397 break; 398 default: 399 mac->rar_entry_count = E1000_RAR_ENTRIES_82575; 400 break; 401 } 402 /* reset */ 403 if (mac->type >= e1000_82580) 404 mac->ops.reset_hw = igb_reset_hw_82580; 405 else 406 mac->ops.reset_hw = igb_reset_hw_82575; 407 408 if (mac->type >= e1000_i210) { 409 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210; 410 mac->ops.release_swfw_sync = igb_release_swfw_sync_i210; 411 412 } else { 413 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575; 414 mac->ops.release_swfw_sync = igb_release_swfw_sync_82575; 415 } 416 417 /* Set if part includes ASF firmware */ 418 mac->asf_firmware_present = true; 419 /* Set if manageability features are enabled. */ 420 mac->arc_subsystem_valid = 421 (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) 422 ? true : false; 423 /* enable EEE on i350 parts and later parts */ 424 if (mac->type >= e1000_i350) 425 dev_spec->eee_disable = false; 426 else 427 dev_spec->eee_disable = true; 428 /* Allow a single clear of the SW semaphore on I210 and newer */ 429 if (mac->type >= e1000_i210) 430 dev_spec->clear_semaphore_once = true; 431 /* physical interface link setup */ 432 mac->ops.setup_physical_interface = 433 (hw->phy.media_type == e1000_media_type_copper) 434 ? igb_setup_copper_link_82575 435 : igb_setup_serdes_link_82575; 436 437 if (mac->type == e1000_82580) { 438 switch (hw->device_id) { 439 /* feature not supported on these id's */ 440 case E1000_DEV_ID_DH89XXCC_SGMII: 441 case E1000_DEV_ID_DH89XXCC_SERDES: 442 case E1000_DEV_ID_DH89XXCC_BACKPLANE: 443 case E1000_DEV_ID_DH89XXCC_SFP: 444 break; 445 default: 446 hw->dev_spec._82575.mas_capable = true; 447 break; 448 } 449 } 450 return 0; 451 } 452 453 /** 454 * igb_set_sfp_media_type_82575 - derives SFP module media type. 455 * @hw: pointer to the HW structure 456 * 457 * The media type is chosen based on SFP module. 458 * compatibility flags retrieved from SFP ID EEPROM. 459 **/ 460 static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw) 461 { 462 s32 ret_val = E1000_ERR_CONFIG; 463 u32 ctrl_ext = 0; 464 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 465 struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; 466 u8 tranceiver_type = 0; 467 s32 timeout = 3; 468 469 /* Turn I2C interface ON and power on sfp cage */ 470 ctrl_ext = rd32(E1000_CTRL_EXT); 471 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; 472 wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); 473 474 wrfl(); 475 476 /* Read SFP module data */ 477 while (timeout) { 478 ret_val = igb_read_sfp_data_byte(hw, 479 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), 480 &tranceiver_type); 481 if (ret_val == 0) 482 break; 483 msleep(100); 484 timeout--; 485 } 486 if (ret_val != 0) 487 goto out; 488 489 ret_val = igb_read_sfp_data_byte(hw, 490 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), 491 (u8 *)eth_flags); 492 if (ret_val != 0) 493 goto out; 494 495 /* Check if there is some SFP module plugged and powered */ 496 if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || 497 (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { 498 dev_spec->module_plugged = true; 499 if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { 500 hw->phy.media_type = e1000_media_type_internal_serdes; 501 } else if (eth_flags->e100_base_fx) { 502 dev_spec->sgmii_active = true; 503 hw->phy.media_type = e1000_media_type_internal_serdes; 504 } else if (eth_flags->e1000_base_t) { 505 dev_spec->sgmii_active = true; 506 hw->phy.media_type = e1000_media_type_copper; 507 } else { 508 hw->phy.media_type = e1000_media_type_unknown; 509 hw_dbg("PHY module has not been recognized\n"); 510 goto out; 511 } 512 } else { 513 hw->phy.media_type = e1000_media_type_unknown; 514 } 515 ret_val = 0; 516 out: 517 /* Restore I2C interface setting */ 518 wr32(E1000_CTRL_EXT, ctrl_ext); 519 return ret_val; 520 } 521 522 static s32 igb_get_invariants_82575(struct e1000_hw *hw) 523 { 524 struct e1000_mac_info *mac = &hw->mac; 525 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 526 s32 ret_val; 527 u32 ctrl_ext = 0; 528 u32 link_mode = 0; 529 530 switch (hw->device_id) { 531 case E1000_DEV_ID_82575EB_COPPER: 532 case E1000_DEV_ID_82575EB_FIBER_SERDES: 533 case E1000_DEV_ID_82575GB_QUAD_COPPER: 534 mac->type = e1000_82575; 535 break; 536 case E1000_DEV_ID_82576: 537 case E1000_DEV_ID_82576_NS: 538 case E1000_DEV_ID_82576_NS_SERDES: 539 case E1000_DEV_ID_82576_FIBER: 540 case E1000_DEV_ID_82576_SERDES: 541 case E1000_DEV_ID_82576_QUAD_COPPER: 542 case E1000_DEV_ID_82576_QUAD_COPPER_ET2: 543 case E1000_DEV_ID_82576_SERDES_QUAD: 544 mac->type = e1000_82576; 545 break; 546 case E1000_DEV_ID_82580_COPPER: 547 case E1000_DEV_ID_82580_FIBER: 548 case E1000_DEV_ID_82580_QUAD_FIBER: 549 case E1000_DEV_ID_82580_SERDES: 550 case E1000_DEV_ID_82580_SGMII: 551 case E1000_DEV_ID_82580_COPPER_DUAL: 552 case E1000_DEV_ID_DH89XXCC_SGMII: 553 case E1000_DEV_ID_DH89XXCC_SERDES: 554 case E1000_DEV_ID_DH89XXCC_BACKPLANE: 555 case E1000_DEV_ID_DH89XXCC_SFP: 556 mac->type = e1000_82580; 557 break; 558 case E1000_DEV_ID_I350_COPPER: 559 case E1000_DEV_ID_I350_FIBER: 560 case E1000_DEV_ID_I350_SERDES: 561 case E1000_DEV_ID_I350_SGMII: 562 mac->type = e1000_i350; 563 break; 564 case E1000_DEV_ID_I210_COPPER: 565 case E1000_DEV_ID_I210_FIBER: 566 case E1000_DEV_ID_I210_SERDES: 567 case E1000_DEV_ID_I210_SGMII: 568 case E1000_DEV_ID_I210_COPPER_FLASHLESS: 569 case E1000_DEV_ID_I210_SERDES_FLASHLESS: 570 mac->type = e1000_i210; 571 break; 572 case E1000_DEV_ID_I211_COPPER: 573 mac->type = e1000_i211; 574 break; 575 case E1000_DEV_ID_I354_BACKPLANE_1GBPS: 576 case E1000_DEV_ID_I354_SGMII: 577 case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: 578 mac->type = e1000_i354; 579 break; 580 default: 581 return -E1000_ERR_MAC_INIT; 582 break; 583 } 584 585 /* Set media type */ 586 /* The 82575 uses bits 22:23 for link mode. The mode can be changed 587 * based on the EEPROM. We cannot rely upon device ID. There 588 * is no distinguishable difference between fiber and internal 589 * SerDes mode on the 82575. There can be an external PHY attached 590 * on the SGMII interface. For this, we'll set sgmii_active to true. 591 */ 592 hw->phy.media_type = e1000_media_type_copper; 593 dev_spec->sgmii_active = false; 594 dev_spec->module_plugged = false; 595 596 ctrl_ext = rd32(E1000_CTRL_EXT); 597 598 link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; 599 switch (link_mode) { 600 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 601 hw->phy.media_type = e1000_media_type_internal_serdes; 602 break; 603 case E1000_CTRL_EXT_LINK_MODE_SGMII: 604 /* Get phy control interface type set (MDIO vs. I2C)*/ 605 if (igb_sgmii_uses_mdio_82575(hw)) { 606 hw->phy.media_type = e1000_media_type_copper; 607 dev_spec->sgmii_active = true; 608 break; 609 } 610 /* fall through for I2C based SGMII */ 611 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: 612 /* read media type from SFP EEPROM */ 613 ret_val = igb_set_sfp_media_type_82575(hw); 614 if ((ret_val != 0) || 615 (hw->phy.media_type == e1000_media_type_unknown)) { 616 /* If media type was not identified then return media 617 * type defined by the CTRL_EXT settings. 618 */ 619 hw->phy.media_type = e1000_media_type_internal_serdes; 620 621 if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { 622 hw->phy.media_type = e1000_media_type_copper; 623 dev_spec->sgmii_active = true; 624 } 625 626 break; 627 } 628 629 /* do not change link mode for 100BaseFX */ 630 if (dev_spec->eth_flags.e100_base_fx) 631 break; 632 633 /* change current link mode setting */ 634 ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; 635 636 if (hw->phy.media_type == e1000_media_type_copper) 637 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; 638 else 639 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; 640 641 wr32(E1000_CTRL_EXT, ctrl_ext); 642 643 break; 644 default: 645 break; 646 } 647 648 /* mac initialization and operations */ 649 ret_val = igb_init_mac_params_82575(hw); 650 if (ret_val) 651 goto out; 652 653 /* NVM initialization */ 654 ret_val = igb_init_nvm_params_82575(hw); 655 switch (hw->mac.type) { 656 case e1000_i210: 657 case e1000_i211: 658 ret_val = igb_init_nvm_params_i210(hw); 659 break; 660 default: 661 break; 662 } 663 664 if (ret_val) 665 goto out; 666 667 /* if part supports SR-IOV then initialize mailbox parameters */ 668 switch (mac->type) { 669 case e1000_82576: 670 case e1000_i350: 671 igb_init_mbx_params_pf(hw); 672 break; 673 default: 674 break; 675 } 676 677 /* setup PHY parameters */ 678 ret_val = igb_init_phy_params_82575(hw); 679 680 out: 681 return ret_val; 682 } 683 684 /** 685 * igb_acquire_phy_82575 - Acquire rights to access PHY 686 * @hw: pointer to the HW structure 687 * 688 * Acquire access rights to the correct PHY. This is a 689 * function pointer entry point called by the api module. 690 **/ 691 static s32 igb_acquire_phy_82575(struct e1000_hw *hw) 692 { 693 u16 mask = E1000_SWFW_PHY0_SM; 694 695 if (hw->bus.func == E1000_FUNC_1) 696 mask = E1000_SWFW_PHY1_SM; 697 else if (hw->bus.func == E1000_FUNC_2) 698 mask = E1000_SWFW_PHY2_SM; 699 else if (hw->bus.func == E1000_FUNC_3) 700 mask = E1000_SWFW_PHY3_SM; 701 702 return hw->mac.ops.acquire_swfw_sync(hw, mask); 703 } 704 705 /** 706 * igb_release_phy_82575 - Release rights to access PHY 707 * @hw: pointer to the HW structure 708 * 709 * A wrapper to release access rights to the correct PHY. This is a 710 * function pointer entry point called by the api module. 711 **/ 712 static void igb_release_phy_82575(struct e1000_hw *hw) 713 { 714 u16 mask = E1000_SWFW_PHY0_SM; 715 716 if (hw->bus.func == E1000_FUNC_1) 717 mask = E1000_SWFW_PHY1_SM; 718 else if (hw->bus.func == E1000_FUNC_2) 719 mask = E1000_SWFW_PHY2_SM; 720 else if (hw->bus.func == E1000_FUNC_3) 721 mask = E1000_SWFW_PHY3_SM; 722 723 hw->mac.ops.release_swfw_sync(hw, mask); 724 } 725 726 /** 727 * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii 728 * @hw: pointer to the HW structure 729 * @offset: register offset to be read 730 * @data: pointer to the read data 731 * 732 * Reads the PHY register at offset using the serial gigabit media independent 733 * interface and stores the retrieved information in data. 734 **/ 735 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 736 u16 *data) 737 { 738 s32 ret_val = -E1000_ERR_PARAM; 739 740 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 741 hw_dbg("PHY Address %u is out of range\n", offset); 742 goto out; 743 } 744 745 ret_val = hw->phy.ops.acquire(hw); 746 if (ret_val) 747 goto out; 748 749 ret_val = igb_read_phy_reg_i2c(hw, offset, data); 750 751 hw->phy.ops.release(hw); 752 753 out: 754 return ret_val; 755 } 756 757 /** 758 * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii 759 * @hw: pointer to the HW structure 760 * @offset: register offset to write to 761 * @data: data to write at register offset 762 * 763 * Writes the data to PHY register at the offset using the serial gigabit 764 * media independent interface. 765 **/ 766 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 767 u16 data) 768 { 769 s32 ret_val = -E1000_ERR_PARAM; 770 771 772 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 773 hw_dbg("PHY Address %d is out of range\n", offset); 774 goto out; 775 } 776 777 ret_val = hw->phy.ops.acquire(hw); 778 if (ret_val) 779 goto out; 780 781 ret_val = igb_write_phy_reg_i2c(hw, offset, data); 782 783 hw->phy.ops.release(hw); 784 785 out: 786 return ret_val; 787 } 788 789 /** 790 * igb_get_phy_id_82575 - Retrieve PHY addr and id 791 * @hw: pointer to the HW structure 792 * 793 * Retrieves the PHY address and ID for both PHY's which do and do not use 794 * sgmi interface. 795 **/ 796 static s32 igb_get_phy_id_82575(struct e1000_hw *hw) 797 { 798 struct e1000_phy_info *phy = &hw->phy; 799 s32 ret_val = 0; 800 u16 phy_id; 801 u32 ctrl_ext; 802 u32 mdic; 803 804 /* Extra read required for some PHY's on i354 */ 805 if (hw->mac.type == e1000_i354) 806 igb_get_phy_id(hw); 807 808 /* For SGMII PHYs, we try the list of possible addresses until 809 * we find one that works. For non-SGMII PHYs 810 * (e.g. integrated copper PHYs), an address of 1 should 811 * work. The result of this function should mean phy->phy_addr 812 * and phy->id are set correctly. 813 */ 814 if (!(igb_sgmii_active_82575(hw))) { 815 phy->addr = 1; 816 ret_val = igb_get_phy_id(hw); 817 goto out; 818 } 819 820 if (igb_sgmii_uses_mdio_82575(hw)) { 821 switch (hw->mac.type) { 822 case e1000_82575: 823 case e1000_82576: 824 mdic = rd32(E1000_MDIC); 825 mdic &= E1000_MDIC_PHY_MASK; 826 phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; 827 break; 828 case e1000_82580: 829 case e1000_i350: 830 case e1000_i354: 831 case e1000_i210: 832 case e1000_i211: 833 mdic = rd32(E1000_MDICNFG); 834 mdic &= E1000_MDICNFG_PHY_MASK; 835 phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; 836 break; 837 default: 838 ret_val = -E1000_ERR_PHY; 839 goto out; 840 break; 841 } 842 ret_val = igb_get_phy_id(hw); 843 goto out; 844 } 845 846 /* Power on sgmii phy if it is disabled */ 847 ctrl_ext = rd32(E1000_CTRL_EXT); 848 wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); 849 wrfl(); 850 msleep(300); 851 852 /* The address field in the I2CCMD register is 3 bits and 0 is invalid. 853 * Therefore, we need to test 1-7 854 */ 855 for (phy->addr = 1; phy->addr < 8; phy->addr++) { 856 ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); 857 if (ret_val == 0) { 858 hw_dbg("Vendor ID 0x%08X read at address %u\n", 859 phy_id, phy->addr); 860 /* At the time of this writing, The M88 part is 861 * the only supported SGMII PHY product. 862 */ 863 if (phy_id == M88_VENDOR) 864 break; 865 } else { 866 hw_dbg("PHY address %u was unreadable\n", phy->addr); 867 } 868 } 869 870 /* A valid PHY type couldn't be found. */ 871 if (phy->addr == 8) { 872 phy->addr = 0; 873 ret_val = -E1000_ERR_PHY; 874 goto out; 875 } else { 876 ret_val = igb_get_phy_id(hw); 877 } 878 879 /* restore previous sfp cage power state */ 880 wr32(E1000_CTRL_EXT, ctrl_ext); 881 882 out: 883 return ret_val; 884 } 885 886 /** 887 * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset 888 * @hw: pointer to the HW structure 889 * 890 * Resets the PHY using the serial gigabit media independent interface. 891 **/ 892 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) 893 { 894 s32 ret_val; 895 896 /* This isn't a true "hard" reset, but is the only reset 897 * available to us at this time. 898 */ 899 900 hw_dbg("Soft resetting SGMII attached PHY...\n"); 901 902 /* SFP documentation requires the following to configure the SPF module 903 * to work on SGMII. No further documentation is given. 904 */ 905 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); 906 if (ret_val) 907 goto out; 908 909 ret_val = igb_phy_sw_reset(hw); 910 911 out: 912 return ret_val; 913 } 914 915 /** 916 * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state 917 * @hw: pointer to the HW structure 918 * @active: true to enable LPLU, false to disable 919 * 920 * Sets the LPLU D0 state according to the active flag. When 921 * activating LPLU this function also disables smart speed 922 * and vice versa. LPLU will not be activated unless the 923 * device autonegotiation advertisement meets standards of 924 * either 10 or 10/100 or 10/100/1000 at all duplexes. 925 * This is a function pointer entry point only called by 926 * PHY setup routines. 927 **/ 928 static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) 929 { 930 struct e1000_phy_info *phy = &hw->phy; 931 s32 ret_val; 932 u16 data; 933 934 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); 935 if (ret_val) 936 goto out; 937 938 if (active) { 939 data |= IGP02E1000_PM_D0_LPLU; 940 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 941 data); 942 if (ret_val) 943 goto out; 944 945 /* When LPLU is enabled, we should disable SmartSpeed */ 946 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 947 &data); 948 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 949 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 950 data); 951 if (ret_val) 952 goto out; 953 } else { 954 data &= ~IGP02E1000_PM_D0_LPLU; 955 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 956 data); 957 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 958 * during Dx states where the power conservation is most 959 * important. During driver activity we should enable 960 * SmartSpeed, so performance is maintained. 961 */ 962 if (phy->smart_speed == e1000_smart_speed_on) { 963 ret_val = phy->ops.read_reg(hw, 964 IGP01E1000_PHY_PORT_CONFIG, &data); 965 if (ret_val) 966 goto out; 967 968 data |= IGP01E1000_PSCFR_SMART_SPEED; 969 ret_val = phy->ops.write_reg(hw, 970 IGP01E1000_PHY_PORT_CONFIG, data); 971 if (ret_val) 972 goto out; 973 } else if (phy->smart_speed == e1000_smart_speed_off) { 974 ret_val = phy->ops.read_reg(hw, 975 IGP01E1000_PHY_PORT_CONFIG, &data); 976 if (ret_val) 977 goto out; 978 979 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 980 ret_val = phy->ops.write_reg(hw, 981 IGP01E1000_PHY_PORT_CONFIG, data); 982 if (ret_val) 983 goto out; 984 } 985 } 986 987 out: 988 return ret_val; 989 } 990 991 /** 992 * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state 993 * @hw: pointer to the HW structure 994 * @active: true to enable LPLU, false to disable 995 * 996 * Sets the LPLU D0 state according to the active flag. When 997 * activating LPLU this function also disables smart speed 998 * and vice versa. LPLU will not be activated unless the 999 * device autonegotiation advertisement meets standards of 1000 * either 10 or 10/100 or 10/100/1000 at all duplexes. 1001 * This is a function pointer entry point only called by 1002 * PHY setup routines. 1003 **/ 1004 static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) 1005 { 1006 struct e1000_phy_info *phy = &hw->phy; 1007 u16 data; 1008 1009 data = rd32(E1000_82580_PHY_POWER_MGMT); 1010 1011 if (active) { 1012 data |= E1000_82580_PM_D0_LPLU; 1013 1014 /* When LPLU is enabled, we should disable SmartSpeed */ 1015 data &= ~E1000_82580_PM_SPD; 1016 } else { 1017 data &= ~E1000_82580_PM_D0_LPLU; 1018 1019 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 1020 * during Dx states where the power conservation is most 1021 * important. During driver activity we should enable 1022 * SmartSpeed, so performance is maintained. 1023 */ 1024 if (phy->smart_speed == e1000_smart_speed_on) 1025 data |= E1000_82580_PM_SPD; 1026 else if (phy->smart_speed == e1000_smart_speed_off) 1027 data &= ~E1000_82580_PM_SPD; } 1028 1029 wr32(E1000_82580_PHY_POWER_MGMT, data); 1030 return 0; 1031 } 1032 1033 /** 1034 * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3 1035 * @hw: pointer to the HW structure 1036 * @active: boolean used to enable/disable lplu 1037 * 1038 * Success returns 0, Failure returns 1 1039 * 1040 * The low power link up (lplu) state is set to the power management level D3 1041 * and SmartSpeed is disabled when active is true, else clear lplu for D3 1042 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU 1043 * is used during Dx states where the power conservation is most important. 1044 * During driver activity, SmartSpeed should be enabled so performance is 1045 * maintained. 1046 **/ 1047 static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) 1048 { 1049 struct e1000_phy_info *phy = &hw->phy; 1050 u16 data; 1051 1052 data = rd32(E1000_82580_PHY_POWER_MGMT); 1053 1054 if (!active) { 1055 data &= ~E1000_82580_PM_D3_LPLU; 1056 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 1057 * during Dx states where the power conservation is most 1058 * important. During driver activity we should enable 1059 * SmartSpeed, so performance is maintained. 1060 */ 1061 if (phy->smart_speed == e1000_smart_speed_on) 1062 data |= E1000_82580_PM_SPD; 1063 else if (phy->smart_speed == e1000_smart_speed_off) 1064 data &= ~E1000_82580_PM_SPD; 1065 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || 1066 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || 1067 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { 1068 data |= E1000_82580_PM_D3_LPLU; 1069 /* When LPLU is enabled, we should disable SmartSpeed */ 1070 data &= ~E1000_82580_PM_SPD; 1071 } 1072 1073 wr32(E1000_82580_PHY_POWER_MGMT, data); 1074 return 0; 1075 } 1076 1077 /** 1078 * igb_acquire_nvm_82575 - Request for access to EEPROM 1079 * @hw: pointer to the HW structure 1080 * 1081 * Acquire the necessary semaphores for exclusive access to the EEPROM. 1082 * Set the EEPROM access request bit and wait for EEPROM access grant bit. 1083 * Return successful if access grant bit set, else clear the request for 1084 * EEPROM access and return -E1000_ERR_NVM (-1). 1085 **/ 1086 static s32 igb_acquire_nvm_82575(struct e1000_hw *hw) 1087 { 1088 s32 ret_val; 1089 1090 ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM); 1091 if (ret_val) 1092 goto out; 1093 1094 ret_val = igb_acquire_nvm(hw); 1095 1096 if (ret_val) 1097 hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); 1098 1099 out: 1100 return ret_val; 1101 } 1102 1103 /** 1104 * igb_release_nvm_82575 - Release exclusive access to EEPROM 1105 * @hw: pointer to the HW structure 1106 * 1107 * Stop any current commands to the EEPROM and clear the EEPROM request bit, 1108 * then release the semaphores acquired. 1109 **/ 1110 static void igb_release_nvm_82575(struct e1000_hw *hw) 1111 { 1112 igb_release_nvm(hw); 1113 hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); 1114 } 1115 1116 /** 1117 * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore 1118 * @hw: pointer to the HW structure 1119 * @mask: specifies which semaphore to acquire 1120 * 1121 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask 1122 * will also specify which port we're acquiring the lock for. 1123 **/ 1124 static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) 1125 { 1126 u32 swfw_sync; 1127 u32 swmask = mask; 1128 u32 fwmask = mask << 16; 1129 s32 ret_val = 0; 1130 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ 1131 1132 while (i < timeout) { 1133 if (igb_get_hw_semaphore(hw)) { 1134 ret_val = -E1000_ERR_SWFW_SYNC; 1135 goto out; 1136 } 1137 1138 swfw_sync = rd32(E1000_SW_FW_SYNC); 1139 if (!(swfw_sync & (fwmask | swmask))) 1140 break; 1141 1142 /* Firmware currently using resource (fwmask) 1143 * or other software thread using resource (swmask) 1144 */ 1145 igb_put_hw_semaphore(hw); 1146 mdelay(5); 1147 i++; 1148 } 1149 1150 if (i == timeout) { 1151 hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); 1152 ret_val = -E1000_ERR_SWFW_SYNC; 1153 goto out; 1154 } 1155 1156 swfw_sync |= swmask; 1157 wr32(E1000_SW_FW_SYNC, swfw_sync); 1158 1159 igb_put_hw_semaphore(hw); 1160 1161 out: 1162 return ret_val; 1163 } 1164 1165 /** 1166 * igb_release_swfw_sync_82575 - Release SW/FW semaphore 1167 * @hw: pointer to the HW structure 1168 * @mask: specifies which semaphore to acquire 1169 * 1170 * Release the SW/FW semaphore used to access the PHY or NVM. The mask 1171 * will also specify which port we're releasing the lock for. 1172 **/ 1173 static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) 1174 { 1175 u32 swfw_sync; 1176 1177 while (igb_get_hw_semaphore(hw) != 0) 1178 ; /* Empty */ 1179 1180 swfw_sync = rd32(E1000_SW_FW_SYNC); 1181 swfw_sync &= ~mask; 1182 wr32(E1000_SW_FW_SYNC, swfw_sync); 1183 1184 igb_put_hw_semaphore(hw); 1185 } 1186 1187 /** 1188 * igb_get_cfg_done_82575 - Read config done bit 1189 * @hw: pointer to the HW structure 1190 * 1191 * Read the management control register for the config done bit for 1192 * completion status. NOTE: silicon which is EEPROM-less will fail trying 1193 * to read the config done bit, so an error is *ONLY* logged and returns 1194 * 0. If we were to return with error, EEPROM-less silicon 1195 * would not be able to be reset or change link. 1196 **/ 1197 static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) 1198 { 1199 s32 timeout = PHY_CFG_TIMEOUT; 1200 u32 mask = E1000_NVM_CFG_DONE_PORT_0; 1201 1202 if (hw->bus.func == 1) 1203 mask = E1000_NVM_CFG_DONE_PORT_1; 1204 else if (hw->bus.func == E1000_FUNC_2) 1205 mask = E1000_NVM_CFG_DONE_PORT_2; 1206 else if (hw->bus.func == E1000_FUNC_3) 1207 mask = E1000_NVM_CFG_DONE_PORT_3; 1208 1209 while (timeout) { 1210 if (rd32(E1000_EEMNGCTL) & mask) 1211 break; 1212 usleep_range(1000, 2000); 1213 timeout--; 1214 } 1215 if (!timeout) 1216 hw_dbg("MNG configuration cycle has not completed.\n"); 1217 1218 /* If EEPROM is not marked present, init the PHY manually */ 1219 if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) && 1220 (hw->phy.type == e1000_phy_igp_3)) 1221 igb_phy_init_script_igp3(hw); 1222 1223 return 0; 1224 } 1225 1226 /** 1227 * igb_get_link_up_info_82575 - Get link speed/duplex info 1228 * @hw: pointer to the HW structure 1229 * @speed: stores the current speed 1230 * @duplex: stores the current duplex 1231 * 1232 * This is a wrapper function, if using the serial gigabit media independent 1233 * interface, use PCS to retrieve the link speed and duplex information. 1234 * Otherwise, use the generic function to get the link speed and duplex info. 1235 **/ 1236 static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, 1237 u16 *duplex) 1238 { 1239 s32 ret_val; 1240 1241 if (hw->phy.media_type != e1000_media_type_copper) 1242 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed, 1243 duplex); 1244 else 1245 ret_val = igb_get_speed_and_duplex_copper(hw, speed, 1246 duplex); 1247 1248 return ret_val; 1249 } 1250 1251 /** 1252 * igb_check_for_link_82575 - Check for link 1253 * @hw: pointer to the HW structure 1254 * 1255 * If sgmii is enabled, then use the pcs register to determine link, otherwise 1256 * use the generic interface for determining link. 1257 **/ 1258 static s32 igb_check_for_link_82575(struct e1000_hw *hw) 1259 { 1260 s32 ret_val; 1261 u16 speed, duplex; 1262 1263 if (hw->phy.media_type != e1000_media_type_copper) { 1264 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, 1265 &duplex); 1266 /* Use this flag to determine if link needs to be checked or 1267 * not. If we have link clear the flag so that we do not 1268 * continue to check for link. 1269 */ 1270 hw->mac.get_link_status = !hw->mac.serdes_has_link; 1271 1272 /* Configure Flow Control now that Auto-Neg has completed. 1273 * First, we need to restore the desired flow control 1274 * settings because we may have had to re-autoneg with a 1275 * different link partner. 1276 */ 1277 ret_val = igb_config_fc_after_link_up(hw); 1278 if (ret_val) 1279 hw_dbg("Error configuring flow control\n"); 1280 } else { 1281 ret_val = igb_check_for_copper_link(hw); 1282 } 1283 1284 return ret_val; 1285 } 1286 1287 /** 1288 * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown 1289 * @hw: pointer to the HW structure 1290 **/ 1291 void igb_power_up_serdes_link_82575(struct e1000_hw *hw) 1292 { 1293 u32 reg; 1294 1295 1296 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1297 !igb_sgmii_active_82575(hw)) 1298 return; 1299 1300 /* Enable PCS to turn on link */ 1301 reg = rd32(E1000_PCS_CFG0); 1302 reg |= E1000_PCS_CFG_PCS_EN; 1303 wr32(E1000_PCS_CFG0, reg); 1304 1305 /* Power up the laser */ 1306 reg = rd32(E1000_CTRL_EXT); 1307 reg &= ~E1000_CTRL_EXT_SDP3_DATA; 1308 wr32(E1000_CTRL_EXT, reg); 1309 1310 /* flush the write to verify completion */ 1311 wrfl(); 1312 usleep_range(1000, 2000); 1313 } 1314 1315 /** 1316 * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex 1317 * @hw: pointer to the HW structure 1318 * @speed: stores the current speed 1319 * @duplex: stores the current duplex 1320 * 1321 * Using the physical coding sub-layer (PCS), retrieve the current speed and 1322 * duplex, then store the values in the pointers provided. 1323 **/ 1324 static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, 1325 u16 *duplex) 1326 { 1327 struct e1000_mac_info *mac = &hw->mac; 1328 u32 pcs, status; 1329 1330 /* Set up defaults for the return values of this function */ 1331 mac->serdes_has_link = false; 1332 *speed = 0; 1333 *duplex = 0; 1334 1335 /* Read the PCS Status register for link state. For non-copper mode, 1336 * the status register is not accurate. The PCS status register is 1337 * used instead. 1338 */ 1339 pcs = rd32(E1000_PCS_LSTAT); 1340 1341 /* The link up bit determines when link is up on autoneg. The sync ok 1342 * gets set once both sides sync up and agree upon link. Stable link 1343 * can be determined by checking for both link up and link sync ok 1344 */ 1345 if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { 1346 mac->serdes_has_link = true; 1347 1348 /* Detect and store PCS speed */ 1349 if (pcs & E1000_PCS_LSTS_SPEED_1000) 1350 *speed = SPEED_1000; 1351 else if (pcs & E1000_PCS_LSTS_SPEED_100) 1352 *speed = SPEED_100; 1353 else 1354 *speed = SPEED_10; 1355 1356 /* Detect and store PCS duplex */ 1357 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) 1358 *duplex = FULL_DUPLEX; 1359 else 1360 *duplex = HALF_DUPLEX; 1361 1362 /* Check if it is an I354 2.5Gb backplane connection. */ 1363 if (mac->type == e1000_i354) { 1364 status = rd32(E1000_STATUS); 1365 if ((status & E1000_STATUS_2P5_SKU) && 1366 !(status & E1000_STATUS_2P5_SKU_OVER)) { 1367 *speed = SPEED_2500; 1368 *duplex = FULL_DUPLEX; 1369 hw_dbg("2500 Mbs, "); 1370 hw_dbg("Full Duplex\n"); 1371 } 1372 } 1373 1374 } 1375 1376 return 0; 1377 } 1378 1379 /** 1380 * igb_shutdown_serdes_link_82575 - Remove link during power down 1381 * @hw: pointer to the HW structure 1382 * 1383 * In the case of fiber serdes, shut down optics and PCS on driver unload 1384 * when management pass thru is not enabled. 1385 **/ 1386 void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) 1387 { 1388 u32 reg; 1389 1390 if (hw->phy.media_type != e1000_media_type_internal_serdes && 1391 igb_sgmii_active_82575(hw)) 1392 return; 1393 1394 if (!igb_enable_mng_pass_thru(hw)) { 1395 /* Disable PCS to turn off link */ 1396 reg = rd32(E1000_PCS_CFG0); 1397 reg &= ~E1000_PCS_CFG_PCS_EN; 1398 wr32(E1000_PCS_CFG0, reg); 1399 1400 /* shutdown the laser */ 1401 reg = rd32(E1000_CTRL_EXT); 1402 reg |= E1000_CTRL_EXT_SDP3_DATA; 1403 wr32(E1000_CTRL_EXT, reg); 1404 1405 /* flush the write to verify completion */ 1406 wrfl(); 1407 usleep_range(1000, 2000); 1408 } 1409 } 1410 1411 /** 1412 * igb_reset_hw_82575 - Reset hardware 1413 * @hw: pointer to the HW structure 1414 * 1415 * This resets the hardware into a known state. This is a 1416 * function pointer entry point called by the api module. 1417 **/ 1418 static s32 igb_reset_hw_82575(struct e1000_hw *hw) 1419 { 1420 u32 ctrl; 1421 s32 ret_val; 1422 1423 /* Prevent the PCI-E bus from sticking if there is no TLP connection 1424 * on the last TLP read/write transaction when MAC is reset. 1425 */ 1426 ret_val = igb_disable_pcie_master(hw); 1427 if (ret_val) 1428 hw_dbg("PCI-E Master disable polling has failed.\n"); 1429 1430 /* set the completion timeout for interface */ 1431 ret_val = igb_set_pcie_completion_timeout(hw); 1432 if (ret_val) 1433 hw_dbg("PCI-E Set completion timeout has failed.\n"); 1434 1435 hw_dbg("Masking off all interrupts\n"); 1436 wr32(E1000_IMC, 0xffffffff); 1437 1438 wr32(E1000_RCTL, 0); 1439 wr32(E1000_TCTL, E1000_TCTL_PSP); 1440 wrfl(); 1441 1442 usleep_range(10000, 20000); 1443 1444 ctrl = rd32(E1000_CTRL); 1445 1446 hw_dbg("Issuing a global reset to MAC\n"); 1447 wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); 1448 1449 ret_val = igb_get_auto_rd_done(hw); 1450 if (ret_val) { 1451 /* When auto config read does not complete, do not 1452 * return with an error. This can happen in situations 1453 * where there is no eeprom and prevents getting link. 1454 */ 1455 hw_dbg("Auto Read Done did not complete\n"); 1456 } 1457 1458 /* If EEPROM is not present, run manual init scripts */ 1459 if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) 1460 igb_reset_init_script_82575(hw); 1461 1462 /* Clear any pending interrupt events. */ 1463 wr32(E1000_IMC, 0xffffffff); 1464 rd32(E1000_ICR); 1465 1466 /* Install any alternate MAC address into RAR0 */ 1467 ret_val = igb_check_alt_mac_addr(hw); 1468 1469 return ret_val; 1470 } 1471 1472 /** 1473 * igb_init_hw_82575 - Initialize hardware 1474 * @hw: pointer to the HW structure 1475 * 1476 * This inits the hardware readying it for operation. 1477 **/ 1478 static s32 igb_init_hw_82575(struct e1000_hw *hw) 1479 { 1480 struct e1000_mac_info *mac = &hw->mac; 1481 s32 ret_val; 1482 u16 i, rar_count = mac->rar_entry_count; 1483 1484 if ((hw->mac.type >= e1000_i210) && 1485 !(igb_get_flash_presence_i210(hw))) { 1486 ret_val = igb_pll_workaround_i210(hw); 1487 if (ret_val) 1488 return ret_val; 1489 } 1490 1491 /* Initialize identification LED */ 1492 ret_val = igb_id_led_init(hw); 1493 if (ret_val) { 1494 hw_dbg("Error initializing identification LED\n"); 1495 /* This is not fatal and we should not stop init due to this */ 1496 } 1497 1498 /* Disabling VLAN filtering */ 1499 hw_dbg("Initializing the IEEE VLAN\n"); 1500 if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) 1501 igb_clear_vfta_i350(hw); 1502 else 1503 igb_clear_vfta(hw); 1504 1505 /* Setup the receive address */ 1506 igb_init_rx_addrs(hw, rar_count); 1507 1508 /* Zero out the Multicast HASH table */ 1509 hw_dbg("Zeroing the MTA\n"); 1510 for (i = 0; i < mac->mta_reg_count; i++) 1511 array_wr32(E1000_MTA, i, 0); 1512 1513 /* Zero out the Unicast HASH table */ 1514 hw_dbg("Zeroing the UTA\n"); 1515 for (i = 0; i < mac->uta_reg_count; i++) 1516 array_wr32(E1000_UTA, i, 0); 1517 1518 /* Setup link and flow control */ 1519 ret_val = igb_setup_link(hw); 1520 1521 /* Clear all of the statistics registers (clear on read). It is 1522 * important that we do this after we have tried to establish link 1523 * because the symbol error count will increment wildly if there 1524 * is no link. 1525 */ 1526 igb_clear_hw_cntrs_82575(hw); 1527 return ret_val; 1528 } 1529 1530 /** 1531 * igb_setup_copper_link_82575 - Configure copper link settings 1532 * @hw: pointer to the HW structure 1533 * 1534 * Configures the link for auto-neg or forced speed and duplex. Then we check 1535 * for link, once link is established calls to configure collision distance 1536 * and flow control are called. 1537 **/ 1538 static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) 1539 { 1540 u32 ctrl; 1541 s32 ret_val; 1542 u32 phpm_reg; 1543 1544 ctrl = rd32(E1000_CTRL); 1545 ctrl |= E1000_CTRL_SLU; 1546 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1547 wr32(E1000_CTRL, ctrl); 1548 1549 /* Clear Go Link Disconnect bit on supported devices */ 1550 switch (hw->mac.type) { 1551 case e1000_82580: 1552 case e1000_i350: 1553 case e1000_i210: 1554 case e1000_i211: 1555 phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT); 1556 phpm_reg &= ~E1000_82580_PM_GO_LINKD; 1557 wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg); 1558 break; 1559 default: 1560 break; 1561 } 1562 1563 ret_val = igb_setup_serdes_link_82575(hw); 1564 if (ret_val) 1565 goto out; 1566 1567 if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { 1568 /* allow time for SFP cage time to power up phy */ 1569 msleep(300); 1570 1571 ret_val = hw->phy.ops.reset(hw); 1572 if (ret_val) { 1573 hw_dbg("Error resetting the PHY.\n"); 1574 goto out; 1575 } 1576 } 1577 switch (hw->phy.type) { 1578 case e1000_phy_i210: 1579 case e1000_phy_m88: 1580 switch (hw->phy.id) { 1581 case I347AT4_E_PHY_ID: 1582 case M88E1112_E_PHY_ID: 1583 case M88E1543_E_PHY_ID: 1584 case I210_I_PHY_ID: 1585 ret_val = igb_copper_link_setup_m88_gen2(hw); 1586 break; 1587 default: 1588 ret_val = igb_copper_link_setup_m88(hw); 1589 break; 1590 } 1591 break; 1592 case e1000_phy_igp_3: 1593 ret_val = igb_copper_link_setup_igp(hw); 1594 break; 1595 case e1000_phy_82580: 1596 ret_val = igb_copper_link_setup_82580(hw); 1597 break; 1598 default: 1599 ret_val = -E1000_ERR_PHY; 1600 break; 1601 } 1602 1603 if (ret_val) 1604 goto out; 1605 1606 ret_val = igb_setup_copper_link(hw); 1607 out: 1608 return ret_val; 1609 } 1610 1611 /** 1612 * igb_setup_serdes_link_82575 - Setup link for serdes 1613 * @hw: pointer to the HW structure 1614 * 1615 * Configure the physical coding sub-layer (PCS) link. The PCS link is 1616 * used on copper connections where the serialized gigabit media independent 1617 * interface (sgmii), or serdes fiber is being used. Configures the link 1618 * for auto-negotiation or forces speed/duplex. 1619 **/ 1620 static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) 1621 { 1622 u32 ctrl_ext, ctrl_reg, reg, anadv_reg; 1623 bool pcs_autoneg; 1624 s32 ret_val = 0; 1625 u16 data; 1626 1627 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1628 !igb_sgmii_active_82575(hw)) 1629 return ret_val; 1630 1631 1632 /* On the 82575, SerDes loopback mode persists until it is 1633 * explicitly turned off or a power cycle is performed. A read to 1634 * the register does not indicate its status. Therefore, we ensure 1635 * loopback mode is disabled during initialization. 1636 */ 1637 wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); 1638 1639 /* power on the sfp cage if present and turn on I2C */ 1640 ctrl_ext = rd32(E1000_CTRL_EXT); 1641 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; 1642 ctrl_ext |= E1000_CTRL_I2C_ENA; 1643 wr32(E1000_CTRL_EXT, ctrl_ext); 1644 1645 ctrl_reg = rd32(E1000_CTRL); 1646 ctrl_reg |= E1000_CTRL_SLU; 1647 1648 if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { 1649 /* set both sw defined pins */ 1650 ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; 1651 1652 /* Set switch control to serdes energy detect */ 1653 reg = rd32(E1000_CONNSW); 1654 reg |= E1000_CONNSW_ENRGSRC; 1655 wr32(E1000_CONNSW, reg); 1656 } 1657 1658 reg = rd32(E1000_PCS_LCTL); 1659 1660 /* default pcs_autoneg to the same setting as mac autoneg */ 1661 pcs_autoneg = hw->mac.autoneg; 1662 1663 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { 1664 case E1000_CTRL_EXT_LINK_MODE_SGMII: 1665 /* sgmii mode lets the phy handle forcing speed/duplex */ 1666 pcs_autoneg = true; 1667 /* autoneg time out should be disabled for SGMII mode */ 1668 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); 1669 break; 1670 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 1671 /* disable PCS autoneg and support parallel detect only */ 1672 pcs_autoneg = false; 1673 default: 1674 if (hw->mac.type == e1000_82575 || 1675 hw->mac.type == e1000_82576) { 1676 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); 1677 if (ret_val) { 1678 hw_dbg(KERN_DEBUG "NVM Read Error\n\n"); 1679 return ret_val; 1680 } 1681 1682 if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) 1683 pcs_autoneg = false; 1684 } 1685 1686 /* non-SGMII modes only supports a speed of 1000/Full for the 1687 * link so it is best to just force the MAC and let the pcs 1688 * link either autoneg or be forced to 1000/Full 1689 */ 1690 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | 1691 E1000_CTRL_FD | E1000_CTRL_FRCDPX; 1692 1693 /* set speed of 1000/Full if speed/duplex is forced */ 1694 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; 1695 break; 1696 } 1697 1698 wr32(E1000_CTRL, ctrl_reg); 1699 1700 /* New SerDes mode allows for forcing speed or autonegotiating speed 1701 * at 1gb. Autoneg should be default set by most drivers. This is the 1702 * mode that will be compatible with older link partners and switches. 1703 * However, both are supported by the hardware and some drivers/tools. 1704 */ 1705 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | 1706 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); 1707 1708 if (pcs_autoneg) { 1709 /* Set PCS register for autoneg */ 1710 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ 1711 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ 1712 1713 /* Disable force flow control for autoneg */ 1714 reg &= ~E1000_PCS_LCTL_FORCE_FCTRL; 1715 1716 /* Configure flow control advertisement for autoneg */ 1717 anadv_reg = rd32(E1000_PCS_ANADV); 1718 anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE); 1719 switch (hw->fc.requested_mode) { 1720 case e1000_fc_full: 1721 case e1000_fc_rx_pause: 1722 anadv_reg |= E1000_TXCW_ASM_DIR; 1723 anadv_reg |= E1000_TXCW_PAUSE; 1724 break; 1725 case e1000_fc_tx_pause: 1726 anadv_reg |= E1000_TXCW_ASM_DIR; 1727 break; 1728 default: 1729 break; 1730 } 1731 wr32(E1000_PCS_ANADV, anadv_reg); 1732 1733 hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); 1734 } else { 1735 /* Set PCS register for forced link */ 1736 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ 1737 1738 /* Force flow control for forced link */ 1739 reg |= E1000_PCS_LCTL_FORCE_FCTRL; 1740 1741 hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); 1742 } 1743 1744 wr32(E1000_PCS_LCTL, reg); 1745 1746 if (!pcs_autoneg && !igb_sgmii_active_82575(hw)) 1747 igb_force_mac_fc(hw); 1748 1749 return ret_val; 1750 } 1751 1752 /** 1753 * igb_sgmii_active_82575 - Return sgmii state 1754 * @hw: pointer to the HW structure 1755 * 1756 * 82575 silicon has a serialized gigabit media independent interface (sgmii) 1757 * which can be enabled for use in the embedded applications. Simply 1758 * return the current state of the sgmii interface. 1759 **/ 1760 static bool igb_sgmii_active_82575(struct e1000_hw *hw) 1761 { 1762 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 1763 return dev_spec->sgmii_active; 1764 } 1765 1766 /** 1767 * igb_reset_init_script_82575 - Inits HW defaults after reset 1768 * @hw: pointer to the HW structure 1769 * 1770 * Inits recommended HW defaults after a reset when there is no EEPROM 1771 * detected. This is only for the 82575. 1772 **/ 1773 static s32 igb_reset_init_script_82575(struct e1000_hw *hw) 1774 { 1775 if (hw->mac.type == e1000_82575) { 1776 hw_dbg("Running reset init script for 82575\n"); 1777 /* SerDes configuration via SERDESCTRL */ 1778 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); 1779 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); 1780 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23); 1781 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15); 1782 1783 /* CCM configuration via CCMCTL register */ 1784 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00); 1785 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00); 1786 1787 /* PCIe lanes configuration */ 1788 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC); 1789 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF); 1790 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05); 1791 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81); 1792 1793 /* PCIe PLL Configuration */ 1794 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47); 1795 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00); 1796 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00); 1797 } 1798 1799 return 0; 1800 } 1801 1802 /** 1803 * igb_read_mac_addr_82575 - Read device MAC address 1804 * @hw: pointer to the HW structure 1805 **/ 1806 static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) 1807 { 1808 s32 ret_val = 0; 1809 1810 /* If there's an alternate MAC address place it in RAR0 1811 * so that it will override the Si installed default perm 1812 * address. 1813 */ 1814 ret_val = igb_check_alt_mac_addr(hw); 1815 if (ret_val) 1816 goto out; 1817 1818 ret_val = igb_read_mac_addr(hw); 1819 1820 out: 1821 return ret_val; 1822 } 1823 1824 /** 1825 * igb_power_down_phy_copper_82575 - Remove link during PHY power down 1826 * @hw: pointer to the HW structure 1827 * 1828 * In the case of a PHY power down to save power, or to turn off link during a 1829 * driver unload, or wake on lan is not enabled, remove the link. 1830 **/ 1831 void igb_power_down_phy_copper_82575(struct e1000_hw *hw) 1832 { 1833 /* If the management interface is not enabled, then power down */ 1834 if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw))) 1835 igb_power_down_phy_copper(hw); 1836 } 1837 1838 /** 1839 * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters 1840 * @hw: pointer to the HW structure 1841 * 1842 * Clears the hardware counters by reading the counter registers. 1843 **/ 1844 static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) 1845 { 1846 igb_clear_hw_cntrs_base(hw); 1847 1848 rd32(E1000_PRC64); 1849 rd32(E1000_PRC127); 1850 rd32(E1000_PRC255); 1851 rd32(E1000_PRC511); 1852 rd32(E1000_PRC1023); 1853 rd32(E1000_PRC1522); 1854 rd32(E1000_PTC64); 1855 rd32(E1000_PTC127); 1856 rd32(E1000_PTC255); 1857 rd32(E1000_PTC511); 1858 rd32(E1000_PTC1023); 1859 rd32(E1000_PTC1522); 1860 1861 rd32(E1000_ALGNERRC); 1862 rd32(E1000_RXERRC); 1863 rd32(E1000_TNCRS); 1864 rd32(E1000_CEXTERR); 1865 rd32(E1000_TSCTC); 1866 rd32(E1000_TSCTFC); 1867 1868 rd32(E1000_MGTPRC); 1869 rd32(E1000_MGTPDC); 1870 rd32(E1000_MGTPTC); 1871 1872 rd32(E1000_IAC); 1873 rd32(E1000_ICRXOC); 1874 1875 rd32(E1000_ICRXPTC); 1876 rd32(E1000_ICRXATC); 1877 rd32(E1000_ICTXPTC); 1878 rd32(E1000_ICTXATC); 1879 rd32(E1000_ICTXQEC); 1880 rd32(E1000_ICTXQMTC); 1881 rd32(E1000_ICRXDMTC); 1882 1883 rd32(E1000_CBTMPC); 1884 rd32(E1000_HTDPMC); 1885 rd32(E1000_CBRMPC); 1886 rd32(E1000_RPTHC); 1887 rd32(E1000_HGPTC); 1888 rd32(E1000_HTCBDPC); 1889 rd32(E1000_HGORCL); 1890 rd32(E1000_HGORCH); 1891 rd32(E1000_HGOTCL); 1892 rd32(E1000_HGOTCH); 1893 rd32(E1000_LENERRS); 1894 1895 /* This register should not be read in copper configurations */ 1896 if (hw->phy.media_type == e1000_media_type_internal_serdes || 1897 igb_sgmii_active_82575(hw)) 1898 rd32(E1000_SCVPC); 1899 } 1900 1901 /** 1902 * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable 1903 * @hw: pointer to the HW structure 1904 * 1905 * After rx enable if managability is enabled then there is likely some 1906 * bad data at the start of the fifo and possibly in the DMA fifo. This 1907 * function clears the fifos and flushes any packets that came in as rx was 1908 * being enabled. 1909 **/ 1910 void igb_rx_fifo_flush_82575(struct e1000_hw *hw) 1911 { 1912 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; 1913 int i, ms_wait; 1914 1915 if (hw->mac.type != e1000_82575 || 1916 !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) 1917 return; 1918 1919 /* Disable all RX queues */ 1920 for (i = 0; i < 4; i++) { 1921 rxdctl[i] = rd32(E1000_RXDCTL(i)); 1922 wr32(E1000_RXDCTL(i), 1923 rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); 1924 } 1925 /* Poll all queues to verify they have shut down */ 1926 for (ms_wait = 0; ms_wait < 10; ms_wait++) { 1927 usleep_range(1000, 2000); 1928 rx_enabled = 0; 1929 for (i = 0; i < 4; i++) 1930 rx_enabled |= rd32(E1000_RXDCTL(i)); 1931 if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) 1932 break; 1933 } 1934 1935 if (ms_wait == 10) 1936 hw_dbg("Queue disable timed out after 10ms\n"); 1937 1938 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all 1939 * incoming packets are rejected. Set enable and wait 2ms so that 1940 * any packet that was coming in as RCTL.EN was set is flushed 1941 */ 1942 rfctl = rd32(E1000_RFCTL); 1943 wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); 1944 1945 rlpml = rd32(E1000_RLPML); 1946 wr32(E1000_RLPML, 0); 1947 1948 rctl = rd32(E1000_RCTL); 1949 temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); 1950 temp_rctl |= E1000_RCTL_LPE; 1951 1952 wr32(E1000_RCTL, temp_rctl); 1953 wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); 1954 wrfl(); 1955 usleep_range(2000, 3000); 1956 1957 /* Enable RX queues that were previously enabled and restore our 1958 * previous state 1959 */ 1960 for (i = 0; i < 4; i++) 1961 wr32(E1000_RXDCTL(i), rxdctl[i]); 1962 wr32(E1000_RCTL, rctl); 1963 wrfl(); 1964 1965 wr32(E1000_RLPML, rlpml); 1966 wr32(E1000_RFCTL, rfctl); 1967 1968 /* Flush receive errors generated by workaround */ 1969 rd32(E1000_ROC); 1970 rd32(E1000_RNBC); 1971 rd32(E1000_MPC); 1972 } 1973 1974 /** 1975 * igb_set_pcie_completion_timeout - set pci-e completion timeout 1976 * @hw: pointer to the HW structure 1977 * 1978 * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, 1979 * however the hardware default for these parts is 500us to 1ms which is less 1980 * than the 10ms recommended by the pci-e spec. To address this we need to 1981 * increase the value to either 10ms to 200ms for capability version 1 config, 1982 * or 16ms to 55ms for version 2. 1983 **/ 1984 static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) 1985 { 1986 u32 gcr = rd32(E1000_GCR); 1987 s32 ret_val = 0; 1988 u16 pcie_devctl2; 1989 1990 /* only take action if timeout value is defaulted to 0 */ 1991 if (gcr & E1000_GCR_CMPL_TMOUT_MASK) 1992 goto out; 1993 1994 /* if capabilities version is type 1 we can write the 1995 * timeout of 10ms to 200ms through the GCR register 1996 */ 1997 if (!(gcr & E1000_GCR_CAP_VER2)) { 1998 gcr |= E1000_GCR_CMPL_TMOUT_10ms; 1999 goto out; 2000 } 2001 2002 /* for version 2 capabilities we need to write the config space 2003 * directly in order to set the completion timeout value for 2004 * 16ms to 55ms 2005 */ 2006 ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 2007 &pcie_devctl2); 2008 if (ret_val) 2009 goto out; 2010 2011 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; 2012 2013 ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 2014 &pcie_devctl2); 2015 out: 2016 /* disable completion timeout resend */ 2017 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; 2018 2019 wr32(E1000_GCR, gcr); 2020 return ret_val; 2021 } 2022 2023 /** 2024 * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing 2025 * @hw: pointer to the hardware struct 2026 * @enable: state to enter, either enabled or disabled 2027 * @pf: Physical Function pool - do not set anti-spoofing for the PF 2028 * 2029 * enables/disables L2 switch anti-spoofing functionality. 2030 **/ 2031 void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) 2032 { 2033 u32 reg_val, reg_offset; 2034 2035 switch (hw->mac.type) { 2036 case e1000_82576: 2037 reg_offset = E1000_DTXSWC; 2038 break; 2039 case e1000_i350: 2040 case e1000_i354: 2041 reg_offset = E1000_TXSWC; 2042 break; 2043 default: 2044 return; 2045 } 2046 2047 reg_val = rd32(reg_offset); 2048 if (enable) { 2049 reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | 2050 E1000_DTXSWC_VLAN_SPOOF_MASK); 2051 /* The PF can spoof - it has to in order to 2052 * support emulation mode NICs 2053 */ 2054 reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); 2055 } else { 2056 reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | 2057 E1000_DTXSWC_VLAN_SPOOF_MASK); 2058 } 2059 wr32(reg_offset, reg_val); 2060 } 2061 2062 /** 2063 * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback 2064 * @hw: pointer to the hardware struct 2065 * @enable: state to enter, either enabled or disabled 2066 * 2067 * enables/disables L2 switch loopback functionality. 2068 **/ 2069 void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) 2070 { 2071 u32 dtxswc; 2072 2073 switch (hw->mac.type) { 2074 case e1000_82576: 2075 dtxswc = rd32(E1000_DTXSWC); 2076 if (enable) 2077 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2078 else 2079 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2080 wr32(E1000_DTXSWC, dtxswc); 2081 break; 2082 case e1000_i354: 2083 case e1000_i350: 2084 dtxswc = rd32(E1000_TXSWC); 2085 if (enable) 2086 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2087 else 2088 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2089 wr32(E1000_TXSWC, dtxswc); 2090 break; 2091 default: 2092 /* Currently no other hardware supports loopback */ 2093 break; 2094 } 2095 2096 } 2097 2098 /** 2099 * igb_vmdq_set_replication_pf - enable or disable vmdq replication 2100 * @hw: pointer to the hardware struct 2101 * @enable: state to enter, either enabled or disabled 2102 * 2103 * enables/disables replication of packets across multiple pools. 2104 **/ 2105 void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) 2106 { 2107 u32 vt_ctl = rd32(E1000_VT_CTL); 2108 2109 if (enable) 2110 vt_ctl |= E1000_VT_CTL_VM_REPL_EN; 2111 else 2112 vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; 2113 2114 wr32(E1000_VT_CTL, vt_ctl); 2115 } 2116 2117 /** 2118 * igb_read_phy_reg_82580 - Read 82580 MDI control register 2119 * @hw: pointer to the HW structure 2120 * @offset: register offset to be read 2121 * @data: pointer to the read data 2122 * 2123 * Reads the MDI control register in the PHY at offset and stores the 2124 * information read to data. 2125 **/ 2126 static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) 2127 { 2128 s32 ret_val; 2129 2130 ret_val = hw->phy.ops.acquire(hw); 2131 if (ret_val) 2132 goto out; 2133 2134 ret_val = igb_read_phy_reg_mdic(hw, offset, data); 2135 2136 hw->phy.ops.release(hw); 2137 2138 out: 2139 return ret_val; 2140 } 2141 2142 /** 2143 * igb_write_phy_reg_82580 - Write 82580 MDI control register 2144 * @hw: pointer to the HW structure 2145 * @offset: register offset to write to 2146 * @data: data to write to register at offset 2147 * 2148 * Writes data to MDI control register in the PHY at offset. 2149 **/ 2150 static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) 2151 { 2152 s32 ret_val; 2153 2154 2155 ret_val = hw->phy.ops.acquire(hw); 2156 if (ret_val) 2157 goto out; 2158 2159 ret_val = igb_write_phy_reg_mdic(hw, offset, data); 2160 2161 hw->phy.ops.release(hw); 2162 2163 out: 2164 return ret_val; 2165 } 2166 2167 /** 2168 * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits 2169 * @hw: pointer to the HW structure 2170 * 2171 * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on 2172 * the values found in the EEPROM. This addresses an issue in which these 2173 * bits are not restored from EEPROM after reset. 2174 **/ 2175 static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw) 2176 { 2177 s32 ret_val = 0; 2178 u32 mdicnfg; 2179 u16 nvm_data = 0; 2180 2181 if (hw->mac.type != e1000_82580) 2182 goto out; 2183 if (!igb_sgmii_active_82575(hw)) 2184 goto out; 2185 2186 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + 2187 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, 2188 &nvm_data); 2189 if (ret_val) { 2190 hw_dbg("NVM Read Error\n"); 2191 goto out; 2192 } 2193 2194 mdicnfg = rd32(E1000_MDICNFG); 2195 if (nvm_data & NVM_WORD24_EXT_MDIO) 2196 mdicnfg |= E1000_MDICNFG_EXT_MDIO; 2197 if (nvm_data & NVM_WORD24_COM_MDIO) 2198 mdicnfg |= E1000_MDICNFG_COM_MDIO; 2199 wr32(E1000_MDICNFG, mdicnfg); 2200 out: 2201 return ret_val; 2202 } 2203 2204 /** 2205 * igb_reset_hw_82580 - Reset hardware 2206 * @hw: pointer to the HW structure 2207 * 2208 * This resets function or entire device (all ports, etc.) 2209 * to a known state. 2210 **/ 2211 static s32 igb_reset_hw_82580(struct e1000_hw *hw) 2212 { 2213 s32 ret_val = 0; 2214 /* BH SW mailbox bit in SW_FW_SYNC */ 2215 u16 swmbsw_mask = E1000_SW_SYNCH_MB; 2216 u32 ctrl; 2217 bool global_device_reset = hw->dev_spec._82575.global_device_reset; 2218 2219 hw->dev_spec._82575.global_device_reset = false; 2220 2221 /* due to hw errata, global device reset doesn't always 2222 * work on 82580 2223 */ 2224 if (hw->mac.type == e1000_82580) 2225 global_device_reset = false; 2226 2227 /* Get current control state. */ 2228 ctrl = rd32(E1000_CTRL); 2229 2230 /* Prevent the PCI-E bus from sticking if there is no TLP connection 2231 * on the last TLP read/write transaction when MAC is reset. 2232 */ 2233 ret_val = igb_disable_pcie_master(hw); 2234 if (ret_val) 2235 hw_dbg("PCI-E Master disable polling has failed.\n"); 2236 2237 hw_dbg("Masking off all interrupts\n"); 2238 wr32(E1000_IMC, 0xffffffff); 2239 wr32(E1000_RCTL, 0); 2240 wr32(E1000_TCTL, E1000_TCTL_PSP); 2241 wrfl(); 2242 2243 usleep_range(10000, 11000); 2244 2245 /* Determine whether or not a global dev reset is requested */ 2246 if (global_device_reset && 2247 hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask)) 2248 global_device_reset = false; 2249 2250 if (global_device_reset && 2251 !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) 2252 ctrl |= E1000_CTRL_DEV_RST; 2253 else 2254 ctrl |= E1000_CTRL_RST; 2255 2256 wr32(E1000_CTRL, ctrl); 2257 wrfl(); 2258 2259 /* Add delay to insure DEV_RST has time to complete */ 2260 if (global_device_reset) 2261 usleep_range(5000, 6000); 2262 2263 ret_val = igb_get_auto_rd_done(hw); 2264 if (ret_val) { 2265 /* When auto config read does not complete, do not 2266 * return with an error. This can happen in situations 2267 * where there is no eeprom and prevents getting link. 2268 */ 2269 hw_dbg("Auto Read Done did not complete\n"); 2270 } 2271 2272 /* clear global device reset status bit */ 2273 wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); 2274 2275 /* Clear any pending interrupt events. */ 2276 wr32(E1000_IMC, 0xffffffff); 2277 rd32(E1000_ICR); 2278 2279 ret_val = igb_reset_mdicnfg_82580(hw); 2280 if (ret_val) 2281 hw_dbg("Could not reset MDICNFG based on EEPROM\n"); 2282 2283 /* Install any alternate MAC address into RAR0 */ 2284 ret_val = igb_check_alt_mac_addr(hw); 2285 2286 /* Release semaphore */ 2287 if (global_device_reset) 2288 hw->mac.ops.release_swfw_sync(hw, swmbsw_mask); 2289 2290 return ret_val; 2291 } 2292 2293 /** 2294 * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size 2295 * @data: data received by reading RXPBS register 2296 * 2297 * The 82580 uses a table based approach for packet buffer allocation sizes. 2298 * This function converts the retrieved value into the correct table value 2299 * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 2300 * 0x0 36 72 144 1 2 4 8 16 2301 * 0x8 35 70 140 rsv rsv rsv rsv rsv 2302 */ 2303 u16 igb_rxpbs_adjust_82580(u32 data) 2304 { 2305 u16 ret_val = 0; 2306 2307 if (data < ARRAY_SIZE(e1000_82580_rxpbs_table)) 2308 ret_val = e1000_82580_rxpbs_table[data]; 2309 2310 return ret_val; 2311 } 2312 2313 /** 2314 * igb_validate_nvm_checksum_with_offset - Validate EEPROM 2315 * checksum 2316 * @hw: pointer to the HW structure 2317 * @offset: offset in words of the checksum protected region 2318 * 2319 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM 2320 * and then verifies that the sum of the EEPROM is equal to 0xBABA. 2321 **/ 2322 static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, 2323 u16 offset) 2324 { 2325 s32 ret_val = 0; 2326 u16 checksum = 0; 2327 u16 i, nvm_data; 2328 2329 for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { 2330 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); 2331 if (ret_val) { 2332 hw_dbg("NVM Read Error\n"); 2333 goto out; 2334 } 2335 checksum += nvm_data; 2336 } 2337 2338 if (checksum != (u16) NVM_SUM) { 2339 hw_dbg("NVM Checksum Invalid\n"); 2340 ret_val = -E1000_ERR_NVM; 2341 goto out; 2342 } 2343 2344 out: 2345 return ret_val; 2346 } 2347 2348 /** 2349 * igb_update_nvm_checksum_with_offset - Update EEPROM 2350 * checksum 2351 * @hw: pointer to the HW structure 2352 * @offset: offset in words of the checksum protected region 2353 * 2354 * Updates the EEPROM checksum by reading/adding each word of the EEPROM 2355 * up to the checksum. Then calculates the EEPROM checksum and writes the 2356 * value to the EEPROM. 2357 **/ 2358 static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) 2359 { 2360 s32 ret_val; 2361 u16 checksum = 0; 2362 u16 i, nvm_data; 2363 2364 for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { 2365 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); 2366 if (ret_val) { 2367 hw_dbg("NVM Read Error while updating checksum.\n"); 2368 goto out; 2369 } 2370 checksum += nvm_data; 2371 } 2372 checksum = (u16) NVM_SUM - checksum; 2373 ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, 2374 &checksum); 2375 if (ret_val) 2376 hw_dbg("NVM Write Error while updating checksum.\n"); 2377 2378 out: 2379 return ret_val; 2380 } 2381 2382 /** 2383 * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum 2384 * @hw: pointer to the HW structure 2385 * 2386 * Calculates the EEPROM section checksum by reading/adding each word of 2387 * the EEPROM and then verifies that the sum of the EEPROM is 2388 * equal to 0xBABA. 2389 **/ 2390 static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) 2391 { 2392 s32 ret_val = 0; 2393 u16 eeprom_regions_count = 1; 2394 u16 j, nvm_data; 2395 u16 nvm_offset; 2396 2397 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); 2398 if (ret_val) { 2399 hw_dbg("NVM Read Error\n"); 2400 goto out; 2401 } 2402 2403 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { 2404 /* if checksums compatibility bit is set validate checksums 2405 * for all 4 ports. 2406 */ 2407 eeprom_regions_count = 4; 2408 } 2409 2410 for (j = 0; j < eeprom_regions_count; j++) { 2411 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2412 ret_val = igb_validate_nvm_checksum_with_offset(hw, 2413 nvm_offset); 2414 if (ret_val != 0) 2415 goto out; 2416 } 2417 2418 out: 2419 return ret_val; 2420 } 2421 2422 /** 2423 * igb_update_nvm_checksum_82580 - Update EEPROM checksum 2424 * @hw: pointer to the HW structure 2425 * 2426 * Updates the EEPROM section checksums for all 4 ports by reading/adding 2427 * each word of the EEPROM up to the checksum. Then calculates the EEPROM 2428 * checksum and writes the value to the EEPROM. 2429 **/ 2430 static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) 2431 { 2432 s32 ret_val; 2433 u16 j, nvm_data; 2434 u16 nvm_offset; 2435 2436 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); 2437 if (ret_val) { 2438 hw_dbg("NVM Read Error while updating checksum compatibility bit.\n"); 2439 goto out; 2440 } 2441 2442 if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { 2443 /* set compatibility bit to validate checksums appropriately */ 2444 nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; 2445 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, 2446 &nvm_data); 2447 if (ret_val) { 2448 hw_dbg("NVM Write Error while updating checksum compatibility bit.\n"); 2449 goto out; 2450 } 2451 } 2452 2453 for (j = 0; j < 4; j++) { 2454 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2455 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); 2456 if (ret_val) 2457 goto out; 2458 } 2459 2460 out: 2461 return ret_val; 2462 } 2463 2464 /** 2465 * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum 2466 * @hw: pointer to the HW structure 2467 * 2468 * Calculates the EEPROM section checksum by reading/adding each word of 2469 * the EEPROM and then verifies that the sum of the EEPROM is 2470 * equal to 0xBABA. 2471 **/ 2472 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw) 2473 { 2474 s32 ret_val = 0; 2475 u16 j; 2476 u16 nvm_offset; 2477 2478 for (j = 0; j < 4; j++) { 2479 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2480 ret_val = igb_validate_nvm_checksum_with_offset(hw, 2481 nvm_offset); 2482 if (ret_val != 0) 2483 goto out; 2484 } 2485 2486 out: 2487 return ret_val; 2488 } 2489 2490 /** 2491 * igb_update_nvm_checksum_i350 - Update EEPROM checksum 2492 * @hw: pointer to the HW structure 2493 * 2494 * Updates the EEPROM section checksums for all 4 ports by reading/adding 2495 * each word of the EEPROM up to the checksum. Then calculates the EEPROM 2496 * checksum and writes the value to the EEPROM. 2497 **/ 2498 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) 2499 { 2500 s32 ret_val = 0; 2501 u16 j; 2502 u16 nvm_offset; 2503 2504 for (j = 0; j < 4; j++) { 2505 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2506 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); 2507 if (ret_val != 0) 2508 goto out; 2509 } 2510 2511 out: 2512 return ret_val; 2513 } 2514 2515 /** 2516 * __igb_access_emi_reg - Read/write EMI register 2517 * @hw: pointer to the HW structure 2518 * @addr: EMI address to program 2519 * @data: pointer to value to read/write from/to the EMI address 2520 * @read: boolean flag to indicate read or write 2521 **/ 2522 static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address, 2523 u16 *data, bool read) 2524 { 2525 s32 ret_val = 0; 2526 2527 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); 2528 if (ret_val) 2529 return ret_val; 2530 2531 if (read) 2532 ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); 2533 else 2534 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); 2535 2536 return ret_val; 2537 } 2538 2539 /** 2540 * igb_read_emi_reg - Read Extended Management Interface register 2541 * @hw: pointer to the HW structure 2542 * @addr: EMI address to program 2543 * @data: value to be read from the EMI address 2544 **/ 2545 s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) 2546 { 2547 return __igb_access_emi_reg(hw, addr, data, true); 2548 } 2549 2550 /** 2551 * igb_set_eee_i350 - Enable/disable EEE support 2552 * @hw: pointer to the HW structure 2553 * 2554 * Enable/disable EEE based on setting in dev_spec structure. 2555 * 2556 **/ 2557 s32 igb_set_eee_i350(struct e1000_hw *hw) 2558 { 2559 u32 ipcnfg, eeer; 2560 2561 if ((hw->mac.type < e1000_i350) || 2562 (hw->phy.media_type != e1000_media_type_copper)) 2563 goto out; 2564 ipcnfg = rd32(E1000_IPCNFG); 2565 eeer = rd32(E1000_EEER); 2566 2567 /* enable or disable per user setting */ 2568 if (!(hw->dev_spec._82575.eee_disable)) { 2569 u32 eee_su = rd32(E1000_EEE_SU); 2570 2571 ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); 2572 eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | 2573 E1000_EEER_LPI_FC); 2574 2575 /* This bit should not be set in normal operation. */ 2576 if (eee_su & E1000_EEE_SU_LPI_CLK_STP) 2577 hw_dbg("LPI Clock Stop Bit should not be set!\n"); 2578 2579 } else { 2580 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | 2581 E1000_IPCNFG_EEE_100M_AN); 2582 eeer &= ~(E1000_EEER_TX_LPI_EN | 2583 E1000_EEER_RX_LPI_EN | 2584 E1000_EEER_LPI_FC); 2585 } 2586 wr32(E1000_IPCNFG, ipcnfg); 2587 wr32(E1000_EEER, eeer); 2588 rd32(E1000_IPCNFG); 2589 rd32(E1000_EEER); 2590 out: 2591 2592 return 0; 2593 } 2594 2595 /** 2596 * igb_set_eee_i354 - Enable/disable EEE support 2597 * @hw: pointer to the HW structure 2598 * 2599 * Enable/disable EEE legacy mode based on setting in dev_spec structure. 2600 * 2601 **/ 2602 s32 igb_set_eee_i354(struct e1000_hw *hw) 2603 { 2604 struct e1000_phy_info *phy = &hw->phy; 2605 s32 ret_val = 0; 2606 u16 phy_data; 2607 2608 if ((hw->phy.media_type != e1000_media_type_copper) || 2609 (phy->id != M88E1543_E_PHY_ID)) 2610 goto out; 2611 2612 if (!hw->dev_spec._82575.eee_disable) { 2613 /* Switch to PHY page 18. */ 2614 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); 2615 if (ret_val) 2616 goto out; 2617 2618 ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, 2619 &phy_data); 2620 if (ret_val) 2621 goto out; 2622 2623 phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; 2624 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, 2625 phy_data); 2626 if (ret_val) 2627 goto out; 2628 2629 /* Return the PHY to page 0. */ 2630 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); 2631 if (ret_val) 2632 goto out; 2633 2634 /* Turn on EEE advertisement. */ 2635 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2636 E1000_EEE_ADV_DEV_I354, 2637 &phy_data); 2638 if (ret_val) 2639 goto out; 2640 2641 phy_data |= E1000_EEE_ADV_100_SUPPORTED | 2642 E1000_EEE_ADV_1000_SUPPORTED; 2643 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2644 E1000_EEE_ADV_DEV_I354, 2645 phy_data); 2646 } else { 2647 /* Turn off EEE advertisement. */ 2648 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2649 E1000_EEE_ADV_DEV_I354, 2650 &phy_data); 2651 if (ret_val) 2652 goto out; 2653 2654 phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | 2655 E1000_EEE_ADV_1000_SUPPORTED); 2656 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2657 E1000_EEE_ADV_DEV_I354, 2658 phy_data); 2659 } 2660 2661 out: 2662 return ret_val; 2663 } 2664 2665 /** 2666 * igb_get_eee_status_i354 - Get EEE status 2667 * @hw: pointer to the HW structure 2668 * @status: EEE status 2669 * 2670 * Get EEE status by guessing based on whether Tx or Rx LPI indications have 2671 * been received. 2672 **/ 2673 s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status) 2674 { 2675 struct e1000_phy_info *phy = &hw->phy; 2676 s32 ret_val = 0; 2677 u16 phy_data; 2678 2679 /* Check if EEE is supported on this device. */ 2680 if ((hw->phy.media_type != e1000_media_type_copper) || 2681 (phy->id != M88E1543_E_PHY_ID)) 2682 goto out; 2683 2684 ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, 2685 E1000_PCS_STATUS_DEV_I354, 2686 &phy_data); 2687 if (ret_val) 2688 goto out; 2689 2690 *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | 2691 E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; 2692 2693 out: 2694 return ret_val; 2695 } 2696 2697 static const u8 e1000_emc_temp_data[4] = { 2698 E1000_EMC_INTERNAL_DATA, 2699 E1000_EMC_DIODE1_DATA, 2700 E1000_EMC_DIODE2_DATA, 2701 E1000_EMC_DIODE3_DATA 2702 }; 2703 static const u8 e1000_emc_therm_limit[4] = { 2704 E1000_EMC_INTERNAL_THERM_LIMIT, 2705 E1000_EMC_DIODE1_THERM_LIMIT, 2706 E1000_EMC_DIODE2_THERM_LIMIT, 2707 E1000_EMC_DIODE3_THERM_LIMIT 2708 }; 2709 2710 #ifdef CONFIG_IGB_HWMON 2711 /** 2712 * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data 2713 * @hw: pointer to hardware structure 2714 * 2715 * Updates the temperatures in mac.thermal_sensor_data 2716 **/ 2717 static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) 2718 { 2719 u16 ets_offset; 2720 u16 ets_cfg; 2721 u16 ets_sensor; 2722 u8 num_sensors; 2723 u8 sensor_index; 2724 u8 sensor_location; 2725 u8 i; 2726 struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 2727 2728 if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) 2729 return E1000_NOT_IMPLEMENTED; 2730 2731 data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF); 2732 2733 /* Return the internal sensor only if ETS is unsupported */ 2734 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); 2735 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) 2736 return 0; 2737 2738 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); 2739 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) 2740 != NVM_ETS_TYPE_EMC) 2741 return E1000_NOT_IMPLEMENTED; 2742 2743 num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); 2744 if (num_sensors > E1000_MAX_SENSORS) 2745 num_sensors = E1000_MAX_SENSORS; 2746 2747 for (i = 1; i < num_sensors; i++) { 2748 hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); 2749 sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> 2750 NVM_ETS_DATA_INDEX_SHIFT); 2751 sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> 2752 NVM_ETS_DATA_LOC_SHIFT); 2753 2754 if (sensor_location != 0) 2755 hw->phy.ops.read_i2c_byte(hw, 2756 e1000_emc_temp_data[sensor_index], 2757 E1000_I2C_THERMAL_SENSOR_ADDR, 2758 &data->sensor[i].temp); 2759 } 2760 return 0; 2761 } 2762 2763 /** 2764 * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds 2765 * @hw: pointer to hardware structure 2766 * 2767 * Sets the thermal sensor thresholds according to the NVM map 2768 * and save off the threshold and location values into mac.thermal_sensor_data 2769 **/ 2770 static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) 2771 { 2772 u16 ets_offset; 2773 u16 ets_cfg; 2774 u16 ets_sensor; 2775 u8 low_thresh_delta; 2776 u8 num_sensors; 2777 u8 sensor_index; 2778 u8 sensor_location; 2779 u8 therm_limit; 2780 u8 i; 2781 struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 2782 2783 if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) 2784 return E1000_NOT_IMPLEMENTED; 2785 2786 memset(data, 0, sizeof(struct e1000_thermal_sensor_data)); 2787 2788 data->sensor[0].location = 0x1; 2789 data->sensor[0].caution_thresh = 2790 (rd32(E1000_THHIGHTC) & 0xFF); 2791 data->sensor[0].max_op_thresh = 2792 (rd32(E1000_THLOWTC) & 0xFF); 2793 2794 /* Return the internal sensor only if ETS is unsupported */ 2795 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); 2796 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) 2797 return 0; 2798 2799 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); 2800 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) 2801 != NVM_ETS_TYPE_EMC) 2802 return E1000_NOT_IMPLEMENTED; 2803 2804 low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >> 2805 NVM_ETS_LTHRES_DELTA_SHIFT); 2806 num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); 2807 2808 for (i = 1; i <= num_sensors; i++) { 2809 hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); 2810 sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> 2811 NVM_ETS_DATA_INDEX_SHIFT); 2812 sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> 2813 NVM_ETS_DATA_LOC_SHIFT); 2814 therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK; 2815 2816 hw->phy.ops.write_i2c_byte(hw, 2817 e1000_emc_therm_limit[sensor_index], 2818 E1000_I2C_THERMAL_SENSOR_ADDR, 2819 therm_limit); 2820 2821 if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) { 2822 data->sensor[i].location = sensor_location; 2823 data->sensor[i].caution_thresh = therm_limit; 2824 data->sensor[i].max_op_thresh = therm_limit - 2825 low_thresh_delta; 2826 } 2827 } 2828 return 0; 2829 } 2830 2831 #endif 2832 static struct e1000_mac_operations e1000_mac_ops_82575 = { 2833 .init_hw = igb_init_hw_82575, 2834 .check_for_link = igb_check_for_link_82575, 2835 .rar_set = igb_rar_set, 2836 .read_mac_addr = igb_read_mac_addr_82575, 2837 .get_speed_and_duplex = igb_get_link_up_info_82575, 2838 #ifdef CONFIG_IGB_HWMON 2839 .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic, 2840 .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic, 2841 #endif 2842 }; 2843 2844 static struct e1000_phy_operations e1000_phy_ops_82575 = { 2845 .acquire = igb_acquire_phy_82575, 2846 .get_cfg_done = igb_get_cfg_done_82575, 2847 .release = igb_release_phy_82575, 2848 .write_i2c_byte = igb_write_i2c_byte, 2849 .read_i2c_byte = igb_read_i2c_byte, 2850 }; 2851 2852 static struct e1000_nvm_operations e1000_nvm_ops_82575 = { 2853 .acquire = igb_acquire_nvm_82575, 2854 .read = igb_read_nvm_eerd, 2855 .release = igb_release_nvm_82575, 2856 .write = igb_write_nvm_spi, 2857 }; 2858 2859 const struct e1000_info e1000_82575_info = { 2860 .get_invariants = igb_get_invariants_82575, 2861 .mac_ops = &e1000_mac_ops_82575, 2862 .phy_ops = &e1000_phy_ops_82575, 2863 .nvm_ops = &e1000_nvm_ops_82575, 2864 }; 2865 2866