1 /******************************************************************************* 2 3 Intel(R) Gigabit Ethernet Linux driver 4 Copyright(c) 2007-2014 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, see <http://www.gnu.org/licenses/>. 17 18 The full GNU General Public License is included in this distribution in 19 the file called "COPYING". 20 21 Contact Information: 22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 25 *******************************************************************************/ 26 27 #include <linux/if_ether.h> 28 #include <linux/delay.h> 29 #include <linux/pci.h> 30 #include <linux/netdevice.h> 31 #include <linux/etherdevice.h> 32 33 #include "e1000_mac.h" 34 35 #include "igb.h" 36 37 static s32 igb_set_default_fc(struct e1000_hw *hw); 38 static s32 igb_set_fc_watermarks(struct e1000_hw *hw); 39 40 /** 41 * igb_get_bus_info_pcie - Get PCIe bus information 42 * @hw: pointer to the HW structure 43 * 44 * Determines and stores the system bus information for a particular 45 * network interface. The following bus information is determined and stored: 46 * bus speed, bus width, type (PCIe), and PCIe function. 47 **/ 48 s32 igb_get_bus_info_pcie(struct e1000_hw *hw) 49 { 50 struct e1000_bus_info *bus = &hw->bus; 51 s32 ret_val; 52 u32 reg; 53 u16 pcie_link_status; 54 55 bus->type = e1000_bus_type_pci_express; 56 57 ret_val = igb_read_pcie_cap_reg(hw, 58 PCI_EXP_LNKSTA, 59 &pcie_link_status); 60 if (ret_val) { 61 bus->width = e1000_bus_width_unknown; 62 bus->speed = e1000_bus_speed_unknown; 63 } else { 64 switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) { 65 case PCI_EXP_LNKSTA_CLS_2_5GB: 66 bus->speed = e1000_bus_speed_2500; 67 break; 68 case PCI_EXP_LNKSTA_CLS_5_0GB: 69 bus->speed = e1000_bus_speed_5000; 70 break; 71 default: 72 bus->speed = e1000_bus_speed_unknown; 73 break; 74 } 75 76 bus->width = (enum e1000_bus_width)((pcie_link_status & 77 PCI_EXP_LNKSTA_NLW) >> 78 PCI_EXP_LNKSTA_NLW_SHIFT); 79 } 80 81 reg = rd32(E1000_STATUS); 82 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; 83 84 return 0; 85 } 86 87 /** 88 * igb_clear_vfta - Clear VLAN filter table 89 * @hw: pointer to the HW structure 90 * 91 * Clears the register array which contains the VLAN filter table by 92 * setting all the values to 0. 93 **/ 94 void igb_clear_vfta(struct e1000_hw *hw) 95 { 96 u32 offset; 97 98 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { 99 array_wr32(E1000_VFTA, offset, 0); 100 wrfl(); 101 } 102 } 103 104 /** 105 * igb_write_vfta - Write value to VLAN filter table 106 * @hw: pointer to the HW structure 107 * @offset: register offset in VLAN filter table 108 * @value: register value written to VLAN filter table 109 * 110 * Writes value at the given offset in the register array which stores 111 * the VLAN filter table. 112 **/ 113 static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) 114 { 115 array_wr32(E1000_VFTA, offset, value); 116 wrfl(); 117 } 118 119 /* Due to a hw errata, if the host tries to configure the VFTA register 120 * while performing queries from the BMC or DMA, then the VFTA in some 121 * cases won't be written. 122 */ 123 124 /** 125 * igb_clear_vfta_i350 - Clear VLAN filter table 126 * @hw: pointer to the HW structure 127 * 128 * Clears the register array which contains the VLAN filter table by 129 * setting all the values to 0. 130 **/ 131 void igb_clear_vfta_i350(struct e1000_hw *hw) 132 { 133 u32 offset; 134 int i; 135 136 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { 137 for (i = 0; i < 10; i++) 138 array_wr32(E1000_VFTA, offset, 0); 139 140 wrfl(); 141 } 142 } 143 144 /** 145 * igb_write_vfta_i350 - Write value to VLAN filter table 146 * @hw: pointer to the HW structure 147 * @offset: register offset in VLAN filter table 148 * @value: register value written to VLAN filter table 149 * 150 * Writes value at the given offset in the register array which stores 151 * the VLAN filter table. 152 **/ 153 static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) 154 { 155 int i; 156 157 for (i = 0; i < 10; i++) 158 array_wr32(E1000_VFTA, offset, value); 159 160 wrfl(); 161 } 162 163 /** 164 * igb_init_rx_addrs - Initialize receive address's 165 * @hw: pointer to the HW structure 166 * @rar_count: receive address registers 167 * 168 * Setups the receive address registers by setting the base receive address 169 * register to the devices MAC address and clearing all the other receive 170 * address registers to 0. 171 **/ 172 void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) 173 { 174 u32 i; 175 u8 mac_addr[ETH_ALEN] = {0}; 176 177 /* Setup the receive address */ 178 hw_dbg("Programming MAC Address into RAR[0]\n"); 179 180 hw->mac.ops.rar_set(hw, hw->mac.addr, 0); 181 182 /* Zero out the other (rar_entry_count - 1) receive addresses */ 183 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1); 184 for (i = 1; i < rar_count; i++) 185 hw->mac.ops.rar_set(hw, mac_addr, i); 186 } 187 188 /** 189 * igb_vfta_set - enable or disable vlan in VLAN filter table 190 * @hw: pointer to the HW structure 191 * @vid: VLAN id to add or remove 192 * @add: if true add filter, if false remove 193 * 194 * Sets or clears a bit in the VLAN filter table array based on VLAN id 195 * and if we are adding or removing the filter 196 **/ 197 s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add) 198 { 199 u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK; 200 u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK); 201 u32 vfta; 202 struct igb_adapter *adapter = hw->back; 203 s32 ret_val = 0; 204 205 vfta = adapter->shadow_vfta[index]; 206 207 /* bit was set/cleared before we started */ 208 if ((!!(vfta & mask)) == add) { 209 ret_val = -E1000_ERR_CONFIG; 210 } else { 211 if (add) 212 vfta |= mask; 213 else 214 vfta &= ~mask; 215 } 216 if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) 217 igb_write_vfta_i350(hw, index, vfta); 218 else 219 igb_write_vfta(hw, index, vfta); 220 adapter->shadow_vfta[index] = vfta; 221 222 return ret_val; 223 } 224 225 /** 226 * igb_check_alt_mac_addr - Check for alternate MAC addr 227 * @hw: pointer to the HW structure 228 * 229 * Checks the nvm for an alternate MAC address. An alternate MAC address 230 * can be setup by pre-boot software and must be treated like a permanent 231 * address and must override the actual permanent MAC address. If an 232 * alternate MAC address is found it is saved in the hw struct and 233 * programmed into RAR0 and the function returns success, otherwise the 234 * function returns an error. 235 **/ 236 s32 igb_check_alt_mac_addr(struct e1000_hw *hw) 237 { 238 u32 i; 239 s32 ret_val = 0; 240 u16 offset, nvm_alt_mac_addr_offset, nvm_data; 241 u8 alt_mac_addr[ETH_ALEN]; 242 243 /* Alternate MAC address is handled by the option ROM for 82580 244 * and newer. SW support not required. 245 */ 246 if (hw->mac.type >= e1000_82580) 247 goto out; 248 249 ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1, 250 &nvm_alt_mac_addr_offset); 251 if (ret_val) { 252 hw_dbg("NVM Read Error\n"); 253 goto out; 254 } 255 256 if ((nvm_alt_mac_addr_offset == 0xFFFF) || 257 (nvm_alt_mac_addr_offset == 0x0000)) 258 /* There is no Alternate MAC Address */ 259 goto out; 260 261 if (hw->bus.func == E1000_FUNC_1) 262 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; 263 if (hw->bus.func == E1000_FUNC_2) 264 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2; 265 266 if (hw->bus.func == E1000_FUNC_3) 267 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3; 268 for (i = 0; i < ETH_ALEN; i += 2) { 269 offset = nvm_alt_mac_addr_offset + (i >> 1); 270 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); 271 if (ret_val) { 272 hw_dbg("NVM Read Error\n"); 273 goto out; 274 } 275 276 alt_mac_addr[i] = (u8)(nvm_data & 0xFF); 277 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); 278 } 279 280 /* if multicast bit is set, the alternate address will not be used */ 281 if (is_multicast_ether_addr(alt_mac_addr)) { 282 hw_dbg("Ignoring Alternate Mac Address with MC bit set\n"); 283 goto out; 284 } 285 286 /* We have a valid alternate MAC address, and we want to treat it the 287 * same as the normal permanent MAC address stored by the HW into the 288 * RAR. Do this by mapping this address into RAR0. 289 */ 290 hw->mac.ops.rar_set(hw, alt_mac_addr, 0); 291 292 out: 293 return ret_val; 294 } 295 296 /** 297 * igb_rar_set - Set receive address register 298 * @hw: pointer to the HW structure 299 * @addr: pointer to the receive address 300 * @index: receive address array register 301 * 302 * Sets the receive address array register at index to the address passed 303 * in by addr. 304 **/ 305 void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) 306 { 307 u32 rar_low, rar_high; 308 309 /* HW expects these in little endian so we reverse the byte order 310 * from network order (big endian) to little endian 311 */ 312 rar_low = ((u32) addr[0] | 313 ((u32) addr[1] << 8) | 314 ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); 315 316 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); 317 318 /* If MAC address zero, no need to set the AV bit */ 319 if (rar_low || rar_high) 320 rar_high |= E1000_RAH_AV; 321 322 /* Some bridges will combine consecutive 32-bit writes into 323 * a single burst write, which will malfunction on some parts. 324 * The flushes avoid this. 325 */ 326 wr32(E1000_RAL(index), rar_low); 327 wrfl(); 328 wr32(E1000_RAH(index), rar_high); 329 wrfl(); 330 } 331 332 /** 333 * igb_mta_set - Set multicast filter table address 334 * @hw: pointer to the HW structure 335 * @hash_value: determines the MTA register and bit to set 336 * 337 * The multicast table address is a register array of 32-bit registers. 338 * The hash_value is used to determine what register the bit is in, the 339 * current value is read, the new bit is OR'd in and the new value is 340 * written back into the register. 341 **/ 342 void igb_mta_set(struct e1000_hw *hw, u32 hash_value) 343 { 344 u32 hash_bit, hash_reg, mta; 345 346 /* The MTA is a register array of 32-bit registers. It is 347 * treated like an array of (32*mta_reg_count) bits. We want to 348 * set bit BitArray[hash_value]. So we figure out what register 349 * the bit is in, read it, OR in the new bit, then write 350 * back the new value. The (hw->mac.mta_reg_count - 1) serves as a 351 * mask to bits 31:5 of the hash value which gives us the 352 * register we're modifying. The hash bit within that register 353 * is determined by the lower 5 bits of the hash value. 354 */ 355 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); 356 hash_bit = hash_value & 0x1F; 357 358 mta = array_rd32(E1000_MTA, hash_reg); 359 360 mta |= (1 << hash_bit); 361 362 array_wr32(E1000_MTA, hash_reg, mta); 363 wrfl(); 364 } 365 366 /** 367 * igb_hash_mc_addr - Generate a multicast hash value 368 * @hw: pointer to the HW structure 369 * @mc_addr: pointer to a multicast address 370 * 371 * Generates a multicast address hash value which is used to determine 372 * the multicast filter table array address and new table value. See 373 * igb_mta_set() 374 **/ 375 static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) 376 { 377 u32 hash_value, hash_mask; 378 u8 bit_shift = 0; 379 380 /* Register count multiplied by bits per register */ 381 hash_mask = (hw->mac.mta_reg_count * 32) - 1; 382 383 /* For a mc_filter_type of 0, bit_shift is the number of left-shifts 384 * where 0xFF would still fall within the hash mask. 385 */ 386 while (hash_mask >> bit_shift != 0xFF) 387 bit_shift++; 388 389 /* The portion of the address that is used for the hash table 390 * is determined by the mc_filter_type setting. 391 * The algorithm is such that there is a total of 8 bits of shifting. 392 * The bit_shift for a mc_filter_type of 0 represents the number of 393 * left-shifts where the MSB of mc_addr[5] would still fall within 394 * the hash_mask. Case 0 does this exactly. Since there are a total 395 * of 8 bits of shifting, then mc_addr[4] will shift right the 396 * remaining number of bits. Thus 8 - bit_shift. The rest of the 397 * cases are a variation of this algorithm...essentially raising the 398 * number of bits to shift mc_addr[5] left, while still keeping the 399 * 8-bit shifting total. 400 * 401 * For example, given the following Destination MAC Address and an 402 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), 403 * we can see that the bit_shift for case 0 is 4. These are the hash 404 * values resulting from each mc_filter_type... 405 * [0] [1] [2] [3] [4] [5] 406 * 01 AA 00 12 34 56 407 * LSB MSB 408 * 409 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 410 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 411 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 412 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 413 */ 414 switch (hw->mac.mc_filter_type) { 415 default: 416 case 0: 417 break; 418 case 1: 419 bit_shift += 1; 420 break; 421 case 2: 422 bit_shift += 2; 423 break; 424 case 3: 425 bit_shift += 4; 426 break; 427 } 428 429 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | 430 (((u16) mc_addr[5]) << bit_shift))); 431 432 return hash_value; 433 } 434 435 /** 436 * igb_update_mc_addr_list - Update Multicast addresses 437 * @hw: pointer to the HW structure 438 * @mc_addr_list: array of multicast addresses to program 439 * @mc_addr_count: number of multicast addresses to program 440 * 441 * Updates entire Multicast Table Array. 442 * The caller must have a packed mc_addr_list of multicast addresses. 443 **/ 444 void igb_update_mc_addr_list(struct e1000_hw *hw, 445 u8 *mc_addr_list, u32 mc_addr_count) 446 { 447 u32 hash_value, hash_bit, hash_reg; 448 int i; 449 450 /* clear mta_shadow */ 451 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); 452 453 /* update mta_shadow from mc_addr_list */ 454 for (i = 0; (u32) i < mc_addr_count; i++) { 455 hash_value = igb_hash_mc_addr(hw, mc_addr_list); 456 457 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); 458 hash_bit = hash_value & 0x1F; 459 460 hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); 461 mc_addr_list += (ETH_ALEN); 462 } 463 464 /* replace the entire MTA table */ 465 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) 466 array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); 467 wrfl(); 468 } 469 470 /** 471 * igb_clear_hw_cntrs_base - Clear base hardware counters 472 * @hw: pointer to the HW structure 473 * 474 * Clears the base hardware counters by reading the counter registers. 475 **/ 476 void igb_clear_hw_cntrs_base(struct e1000_hw *hw) 477 { 478 rd32(E1000_CRCERRS); 479 rd32(E1000_SYMERRS); 480 rd32(E1000_MPC); 481 rd32(E1000_SCC); 482 rd32(E1000_ECOL); 483 rd32(E1000_MCC); 484 rd32(E1000_LATECOL); 485 rd32(E1000_COLC); 486 rd32(E1000_DC); 487 rd32(E1000_SEC); 488 rd32(E1000_RLEC); 489 rd32(E1000_XONRXC); 490 rd32(E1000_XONTXC); 491 rd32(E1000_XOFFRXC); 492 rd32(E1000_XOFFTXC); 493 rd32(E1000_FCRUC); 494 rd32(E1000_GPRC); 495 rd32(E1000_BPRC); 496 rd32(E1000_MPRC); 497 rd32(E1000_GPTC); 498 rd32(E1000_GORCL); 499 rd32(E1000_GORCH); 500 rd32(E1000_GOTCL); 501 rd32(E1000_GOTCH); 502 rd32(E1000_RNBC); 503 rd32(E1000_RUC); 504 rd32(E1000_RFC); 505 rd32(E1000_ROC); 506 rd32(E1000_RJC); 507 rd32(E1000_TORL); 508 rd32(E1000_TORH); 509 rd32(E1000_TOTL); 510 rd32(E1000_TOTH); 511 rd32(E1000_TPR); 512 rd32(E1000_TPT); 513 rd32(E1000_MPTC); 514 rd32(E1000_BPTC); 515 } 516 517 /** 518 * igb_check_for_copper_link - Check for link (Copper) 519 * @hw: pointer to the HW structure 520 * 521 * Checks to see of the link status of the hardware has changed. If a 522 * change in link status has been detected, then we read the PHY registers 523 * to get the current speed/duplex if link exists. 524 **/ 525 s32 igb_check_for_copper_link(struct e1000_hw *hw) 526 { 527 struct e1000_mac_info *mac = &hw->mac; 528 s32 ret_val; 529 bool link; 530 531 /* We only want to go out to the PHY registers to see if Auto-Neg 532 * has completed and/or if our link status has changed. The 533 * get_link_status flag is set upon receiving a Link Status 534 * Change or Rx Sequence Error interrupt. 535 */ 536 if (!mac->get_link_status) { 537 ret_val = 0; 538 goto out; 539 } 540 541 /* First we want to see if the MII Status Register reports 542 * link. If so, then we want to get the current speed/duplex 543 * of the PHY. 544 */ 545 ret_val = igb_phy_has_link(hw, 1, 0, &link); 546 if (ret_val) 547 goto out; 548 549 if (!link) 550 goto out; /* No link detected */ 551 552 mac->get_link_status = false; 553 554 /* Check if there was DownShift, must be checked 555 * immediately after link-up 556 */ 557 igb_check_downshift(hw); 558 559 /* If we are forcing speed/duplex, then we simply return since 560 * we have already determined whether we have link or not. 561 */ 562 if (!mac->autoneg) { 563 ret_val = -E1000_ERR_CONFIG; 564 goto out; 565 } 566 567 /* Auto-Neg is enabled. Auto Speed Detection takes care 568 * of MAC speed/duplex configuration. So we only need to 569 * configure Collision Distance in the MAC. 570 */ 571 igb_config_collision_dist(hw); 572 573 /* Configure Flow Control now that Auto-Neg has completed. 574 * First, we need to restore the desired flow control 575 * settings because we may have had to re-autoneg with a 576 * different link partner. 577 */ 578 ret_val = igb_config_fc_after_link_up(hw); 579 if (ret_val) 580 hw_dbg("Error configuring flow control\n"); 581 582 out: 583 return ret_val; 584 } 585 586 /** 587 * igb_setup_link - Setup flow control and link settings 588 * @hw: pointer to the HW structure 589 * 590 * Determines which flow control settings to use, then configures flow 591 * control. Calls the appropriate media-specific link configuration 592 * function. Assuming the adapter has a valid link partner, a valid link 593 * should be established. Assumes the hardware has previously been reset 594 * and the transmitter and receiver are not enabled. 595 **/ 596 s32 igb_setup_link(struct e1000_hw *hw) 597 { 598 s32 ret_val = 0; 599 600 /* In the case of the phy reset being blocked, we already have a link. 601 * We do not need to set it up again. 602 */ 603 if (igb_check_reset_block(hw)) 604 goto out; 605 606 /* If requested flow control is set to default, set flow control 607 * based on the EEPROM flow control settings. 608 */ 609 if (hw->fc.requested_mode == e1000_fc_default) { 610 ret_val = igb_set_default_fc(hw); 611 if (ret_val) 612 goto out; 613 } 614 615 /* We want to save off the original Flow Control configuration just 616 * in case we get disconnected and then reconnected into a different 617 * hub or switch with different Flow Control capabilities. 618 */ 619 hw->fc.current_mode = hw->fc.requested_mode; 620 621 hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); 622 623 /* Call the necessary media_type subroutine to configure the link. */ 624 ret_val = hw->mac.ops.setup_physical_interface(hw); 625 if (ret_val) 626 goto out; 627 628 /* Initialize the flow control address, type, and PAUSE timer 629 * registers to their default values. This is done even if flow 630 * control is disabled, because it does not hurt anything to 631 * initialize these registers. 632 */ 633 hw_dbg("Initializing the Flow Control address, type and timer regs\n"); 634 wr32(E1000_FCT, FLOW_CONTROL_TYPE); 635 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); 636 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); 637 638 wr32(E1000_FCTTV, hw->fc.pause_time); 639 640 ret_val = igb_set_fc_watermarks(hw); 641 642 out: 643 644 return ret_val; 645 } 646 647 /** 648 * igb_config_collision_dist - Configure collision distance 649 * @hw: pointer to the HW structure 650 * 651 * Configures the collision distance to the default value and is used 652 * during link setup. Currently no func pointer exists and all 653 * implementations are handled in the generic version of this function. 654 **/ 655 void igb_config_collision_dist(struct e1000_hw *hw) 656 { 657 u32 tctl; 658 659 tctl = rd32(E1000_TCTL); 660 661 tctl &= ~E1000_TCTL_COLD; 662 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; 663 664 wr32(E1000_TCTL, tctl); 665 wrfl(); 666 } 667 668 /** 669 * igb_set_fc_watermarks - Set flow control high/low watermarks 670 * @hw: pointer to the HW structure 671 * 672 * Sets the flow control high/low threshold (watermark) registers. If 673 * flow control XON frame transmission is enabled, then set XON frame 674 * tansmission as well. 675 **/ 676 static s32 igb_set_fc_watermarks(struct e1000_hw *hw) 677 { 678 s32 ret_val = 0; 679 u32 fcrtl = 0, fcrth = 0; 680 681 /* Set the flow control receive threshold registers. Normally, 682 * these registers will be set to a default threshold that may be 683 * adjusted later by the driver's runtime code. However, if the 684 * ability to transmit pause frames is not enabled, then these 685 * registers will be set to 0. 686 */ 687 if (hw->fc.current_mode & e1000_fc_tx_pause) { 688 /* We need to set up the Receive Threshold high and low water 689 * marks as well as (optionally) enabling the transmission of 690 * XON frames. 691 */ 692 fcrtl = hw->fc.low_water; 693 if (hw->fc.send_xon) 694 fcrtl |= E1000_FCRTL_XONE; 695 696 fcrth = hw->fc.high_water; 697 } 698 wr32(E1000_FCRTL, fcrtl); 699 wr32(E1000_FCRTH, fcrth); 700 701 return ret_val; 702 } 703 704 /** 705 * igb_set_default_fc - Set flow control default values 706 * @hw: pointer to the HW structure 707 * 708 * Read the EEPROM for the default values for flow control and store the 709 * values. 710 **/ 711 static s32 igb_set_default_fc(struct e1000_hw *hw) 712 { 713 s32 ret_val = 0; 714 u16 lan_offset; 715 u16 nvm_data; 716 717 /* Read and store word 0x0F of the EEPROM. This word contains bits 718 * that determine the hardware's default PAUSE (flow control) mode, 719 * a bit that determines whether the HW defaults to enabling or 720 * disabling auto-negotiation, and the direction of the 721 * SW defined pins. If there is no SW over-ride of the flow 722 * control setting, then the variable hw->fc will 723 * be initialized based on a value in the EEPROM. 724 */ 725 if (hw->mac.type == e1000_i350) { 726 lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); 727 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG 728 + lan_offset, 1, &nvm_data); 729 } else { 730 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 731 1, &nvm_data); 732 } 733 734 if (ret_val) { 735 hw_dbg("NVM Read Error\n"); 736 goto out; 737 } 738 739 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) 740 hw->fc.requested_mode = e1000_fc_none; 741 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 742 NVM_WORD0F_ASM_DIR) 743 hw->fc.requested_mode = e1000_fc_tx_pause; 744 else 745 hw->fc.requested_mode = e1000_fc_full; 746 747 out: 748 return ret_val; 749 } 750 751 /** 752 * igb_force_mac_fc - Force the MAC's flow control settings 753 * @hw: pointer to the HW structure 754 * 755 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the 756 * device control register to reflect the adapter settings. TFCE and RFCE 757 * need to be explicitly set by software when a copper PHY is used because 758 * autonegotiation is managed by the PHY rather than the MAC. Software must 759 * also configure these bits when link is forced on a fiber connection. 760 **/ 761 s32 igb_force_mac_fc(struct e1000_hw *hw) 762 { 763 u32 ctrl; 764 s32 ret_val = 0; 765 766 ctrl = rd32(E1000_CTRL); 767 768 /* Because we didn't get link via the internal auto-negotiation 769 * mechanism (we either forced link or we got link via PHY 770 * auto-neg), we have to manually enable/disable transmit an 771 * receive flow control. 772 * 773 * The "Case" statement below enables/disable flow control 774 * according to the "hw->fc.current_mode" parameter. 775 * 776 * The possible values of the "fc" parameter are: 777 * 0: Flow control is completely disabled 778 * 1: Rx flow control is enabled (we can receive pause 779 * frames but not send pause frames). 780 * 2: Tx flow control is enabled (we can send pause frames 781 * frames but we do not receive pause frames). 782 * 3: Both Rx and TX flow control (symmetric) is enabled. 783 * other: No other values should be possible at this point. 784 */ 785 hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); 786 787 switch (hw->fc.current_mode) { 788 case e1000_fc_none: 789 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); 790 break; 791 case e1000_fc_rx_pause: 792 ctrl &= (~E1000_CTRL_TFCE); 793 ctrl |= E1000_CTRL_RFCE; 794 break; 795 case e1000_fc_tx_pause: 796 ctrl &= (~E1000_CTRL_RFCE); 797 ctrl |= E1000_CTRL_TFCE; 798 break; 799 case e1000_fc_full: 800 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); 801 break; 802 default: 803 hw_dbg("Flow control param set incorrectly\n"); 804 ret_val = -E1000_ERR_CONFIG; 805 goto out; 806 } 807 808 wr32(E1000_CTRL, ctrl); 809 810 out: 811 return ret_val; 812 } 813 814 /** 815 * igb_config_fc_after_link_up - Configures flow control after link 816 * @hw: pointer to the HW structure 817 * 818 * Checks the status of auto-negotiation after link up to ensure that the 819 * speed and duplex were not forced. If the link needed to be forced, then 820 * flow control needs to be forced also. If auto-negotiation is enabled 821 * and did not fail, then we configure flow control based on our link 822 * partner. 823 **/ 824 s32 igb_config_fc_after_link_up(struct e1000_hw *hw) 825 { 826 struct e1000_mac_info *mac = &hw->mac; 827 s32 ret_val = 0; 828 u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; 829 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; 830 u16 speed, duplex; 831 832 /* Check for the case where we have fiber media and auto-neg failed 833 * so we had to force link. In this case, we need to force the 834 * configuration of the MAC to match the "fc" parameter. 835 */ 836 if (mac->autoneg_failed) { 837 if (hw->phy.media_type == e1000_media_type_internal_serdes) 838 ret_val = igb_force_mac_fc(hw); 839 } else { 840 if (hw->phy.media_type == e1000_media_type_copper) 841 ret_val = igb_force_mac_fc(hw); 842 } 843 844 if (ret_val) { 845 hw_dbg("Error forcing flow control settings\n"); 846 goto out; 847 } 848 849 /* Check for the case where we have copper media and auto-neg is 850 * enabled. In this case, we need to check and see if Auto-Neg 851 * has completed, and if so, how the PHY and link partner has 852 * flow control configured. 853 */ 854 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { 855 /* Read the MII Status Register and check to see if AutoNeg 856 * has completed. We read this twice because this reg has 857 * some "sticky" (latched) bits. 858 */ 859 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, 860 &mii_status_reg); 861 if (ret_val) 862 goto out; 863 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, 864 &mii_status_reg); 865 if (ret_val) 866 goto out; 867 868 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { 869 hw_dbg("Copper PHY and Auto Neg " 870 "has not completed.\n"); 871 goto out; 872 } 873 874 /* The AutoNeg process has completed, so we now need to 875 * read both the Auto Negotiation Advertisement 876 * Register (Address 4) and the Auto_Negotiation Base 877 * Page Ability Register (Address 5) to determine how 878 * flow control was negotiated. 879 */ 880 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, 881 &mii_nway_adv_reg); 882 if (ret_val) 883 goto out; 884 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, 885 &mii_nway_lp_ability_reg); 886 if (ret_val) 887 goto out; 888 889 /* Two bits in the Auto Negotiation Advertisement Register 890 * (Address 4) and two bits in the Auto Negotiation Base 891 * Page Ability Register (Address 5) determine flow control 892 * for both the PHY and the link partner. The following 893 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, 894 * 1999, describes these PAUSE resolution bits and how flow 895 * control is determined based upon these settings. 896 * NOTE: DC = Don't Care 897 * 898 * LOCAL DEVICE | LINK PARTNER 899 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution 900 *-------|---------|-------|---------|-------------------- 901 * 0 | 0 | DC | DC | e1000_fc_none 902 * 0 | 1 | 0 | DC | e1000_fc_none 903 * 0 | 1 | 1 | 0 | e1000_fc_none 904 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause 905 * 1 | 0 | 0 | DC | e1000_fc_none 906 * 1 | DC | 1 | DC | e1000_fc_full 907 * 1 | 1 | 0 | 0 | e1000_fc_none 908 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause 909 * 910 * Are both PAUSE bits set to 1? If so, this implies 911 * Symmetric Flow Control is enabled at both ends. The 912 * ASM_DIR bits are irrelevant per the spec. 913 * 914 * For Symmetric Flow Control: 915 * 916 * LOCAL DEVICE | LINK PARTNER 917 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 918 *-------|---------|-------|---------|-------------------- 919 * 1 | DC | 1 | DC | E1000_fc_full 920 * 921 */ 922 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && 923 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { 924 /* Now we need to check if the user selected RX ONLY 925 * of pause frames. In this case, we had to advertise 926 * FULL flow control because we could not advertise RX 927 * ONLY. Hence, we must now check to see if we need to 928 * turn OFF the TRANSMISSION of PAUSE frames. 929 */ 930 if (hw->fc.requested_mode == e1000_fc_full) { 931 hw->fc.current_mode = e1000_fc_full; 932 hw_dbg("Flow Control = FULL.\r\n"); 933 } else { 934 hw->fc.current_mode = e1000_fc_rx_pause; 935 hw_dbg("Flow Control = " 936 "RX PAUSE frames only.\r\n"); 937 } 938 } 939 /* For receiving PAUSE frames ONLY. 940 * 941 * LOCAL DEVICE | LINK PARTNER 942 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 943 *-------|---------|-------|---------|-------------------- 944 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause 945 */ 946 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && 947 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && 948 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 949 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 950 hw->fc.current_mode = e1000_fc_tx_pause; 951 hw_dbg("Flow Control = TX PAUSE frames only.\r\n"); 952 } 953 /* For transmitting PAUSE frames ONLY. 954 * 955 * LOCAL DEVICE | LINK PARTNER 956 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 957 *-------|---------|-------|---------|-------------------- 958 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause 959 */ 960 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && 961 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && 962 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 963 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 964 hw->fc.current_mode = e1000_fc_rx_pause; 965 hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); 966 } 967 /* Per the IEEE spec, at this point flow control should be 968 * disabled. However, we want to consider that we could 969 * be connected to a legacy switch that doesn't advertise 970 * desired flow control, but can be forced on the link 971 * partner. So if we advertised no flow control, that is 972 * what we will resolve to. If we advertised some kind of 973 * receive capability (Rx Pause Only or Full Flow Control) 974 * and the link partner advertised none, we will configure 975 * ourselves to enable Rx Flow Control only. We can do 976 * this safely for two reasons: If the link partner really 977 * didn't want flow control enabled, and we enable Rx, no 978 * harm done since we won't be receiving any PAUSE frames 979 * anyway. If the intent on the link partner was to have 980 * flow control enabled, then by us enabling RX only, we 981 * can at least receive pause frames and process them. 982 * This is a good idea because in most cases, since we are 983 * predominantly a server NIC, more times than not we will 984 * be asked to delay transmission of packets than asking 985 * our link partner to pause transmission of frames. 986 */ 987 else if ((hw->fc.requested_mode == e1000_fc_none) || 988 (hw->fc.requested_mode == e1000_fc_tx_pause) || 989 (hw->fc.strict_ieee)) { 990 hw->fc.current_mode = e1000_fc_none; 991 hw_dbg("Flow Control = NONE.\r\n"); 992 } else { 993 hw->fc.current_mode = e1000_fc_rx_pause; 994 hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); 995 } 996 997 /* Now we need to do one last check... If we auto- 998 * negotiated to HALF DUPLEX, flow control should not be 999 * enabled per IEEE 802.3 spec. 1000 */ 1001 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); 1002 if (ret_val) { 1003 hw_dbg("Error getting link speed and duplex\n"); 1004 goto out; 1005 } 1006 1007 if (duplex == HALF_DUPLEX) 1008 hw->fc.current_mode = e1000_fc_none; 1009 1010 /* Now we call a subroutine to actually force the MAC 1011 * controller to use the correct flow control settings. 1012 */ 1013 ret_val = igb_force_mac_fc(hw); 1014 if (ret_val) { 1015 hw_dbg("Error forcing flow control settings\n"); 1016 goto out; 1017 } 1018 } 1019 /* Check for the case where we have SerDes media and auto-neg is 1020 * enabled. In this case, we need to check and see if Auto-Neg 1021 * has completed, and if so, how the PHY and link partner has 1022 * flow control configured. 1023 */ 1024 if ((hw->phy.media_type == e1000_media_type_internal_serdes) 1025 && mac->autoneg) { 1026 /* Read the PCS_LSTS and check to see if AutoNeg 1027 * has completed. 1028 */ 1029 pcs_status_reg = rd32(E1000_PCS_LSTAT); 1030 1031 if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { 1032 hw_dbg("PCS Auto Neg has not completed.\n"); 1033 return ret_val; 1034 } 1035 1036 /* The AutoNeg process has completed, so we now need to 1037 * read both the Auto Negotiation Advertisement 1038 * Register (PCS_ANADV) and the Auto_Negotiation Base 1039 * Page Ability Register (PCS_LPAB) to determine how 1040 * flow control was negotiated. 1041 */ 1042 pcs_adv_reg = rd32(E1000_PCS_ANADV); 1043 pcs_lp_ability_reg = rd32(E1000_PCS_LPAB); 1044 1045 /* Two bits in the Auto Negotiation Advertisement Register 1046 * (PCS_ANADV) and two bits in the Auto Negotiation Base 1047 * Page Ability Register (PCS_LPAB) determine flow control 1048 * for both the PHY and the link partner. The following 1049 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, 1050 * 1999, describes these PAUSE resolution bits and how flow 1051 * control is determined based upon these settings. 1052 * NOTE: DC = Don't Care 1053 * 1054 * LOCAL DEVICE | LINK PARTNER 1055 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution 1056 *-------|---------|-------|---------|-------------------- 1057 * 0 | 0 | DC | DC | e1000_fc_none 1058 * 0 | 1 | 0 | DC | e1000_fc_none 1059 * 0 | 1 | 1 | 0 | e1000_fc_none 1060 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause 1061 * 1 | 0 | 0 | DC | e1000_fc_none 1062 * 1 | DC | 1 | DC | e1000_fc_full 1063 * 1 | 1 | 0 | 0 | e1000_fc_none 1064 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause 1065 * 1066 * Are both PAUSE bits set to 1? If so, this implies 1067 * Symmetric Flow Control is enabled at both ends. The 1068 * ASM_DIR bits are irrelevant per the spec. 1069 * 1070 * For Symmetric Flow Control: 1071 * 1072 * LOCAL DEVICE | LINK PARTNER 1073 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 1074 *-------|---------|-------|---------|-------------------- 1075 * 1 | DC | 1 | DC | e1000_fc_full 1076 * 1077 */ 1078 if ((pcs_adv_reg & E1000_TXCW_PAUSE) && 1079 (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) { 1080 /* Now we need to check if the user selected Rx ONLY 1081 * of pause frames. In this case, we had to advertise 1082 * FULL flow control because we could not advertise Rx 1083 * ONLY. Hence, we must now check to see if we need to 1084 * turn OFF the TRANSMISSION of PAUSE frames. 1085 */ 1086 if (hw->fc.requested_mode == e1000_fc_full) { 1087 hw->fc.current_mode = e1000_fc_full; 1088 hw_dbg("Flow Control = FULL.\n"); 1089 } else { 1090 hw->fc.current_mode = e1000_fc_rx_pause; 1091 hw_dbg("Flow Control = Rx PAUSE frames only.\n"); 1092 } 1093 } 1094 /* For receiving PAUSE frames ONLY. 1095 * 1096 * LOCAL DEVICE | LINK PARTNER 1097 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 1098 *-------|---------|-------|---------|-------------------- 1099 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause 1100 */ 1101 else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) && 1102 (pcs_adv_reg & E1000_TXCW_ASM_DIR) && 1103 (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && 1104 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { 1105 hw->fc.current_mode = e1000_fc_tx_pause; 1106 hw_dbg("Flow Control = Tx PAUSE frames only.\n"); 1107 } 1108 /* For transmitting PAUSE frames ONLY. 1109 * 1110 * LOCAL DEVICE | LINK PARTNER 1111 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 1112 *-------|---------|-------|---------|-------------------- 1113 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause 1114 */ 1115 else if ((pcs_adv_reg & E1000_TXCW_PAUSE) && 1116 (pcs_adv_reg & E1000_TXCW_ASM_DIR) && 1117 !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && 1118 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { 1119 hw->fc.current_mode = e1000_fc_rx_pause; 1120 hw_dbg("Flow Control = Rx PAUSE frames only.\n"); 1121 } else { 1122 /* Per the IEEE spec, at this point flow control 1123 * should be disabled. 1124 */ 1125 hw->fc.current_mode = e1000_fc_none; 1126 hw_dbg("Flow Control = NONE.\n"); 1127 } 1128 1129 /* Now we call a subroutine to actually force the MAC 1130 * controller to use the correct flow control settings. 1131 */ 1132 pcs_ctrl_reg = rd32(E1000_PCS_LCTL); 1133 pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; 1134 wr32(E1000_PCS_LCTL, pcs_ctrl_reg); 1135 1136 ret_val = igb_force_mac_fc(hw); 1137 if (ret_val) { 1138 hw_dbg("Error forcing flow control settings\n"); 1139 return ret_val; 1140 } 1141 } 1142 1143 out: 1144 return ret_val; 1145 } 1146 1147 /** 1148 * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex 1149 * @hw: pointer to the HW structure 1150 * @speed: stores the current speed 1151 * @duplex: stores the current duplex 1152 * 1153 * Read the status register for the current speed/duplex and store the current 1154 * speed and duplex for copper connections. 1155 **/ 1156 s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, 1157 u16 *duplex) 1158 { 1159 u32 status; 1160 1161 status = rd32(E1000_STATUS); 1162 if (status & E1000_STATUS_SPEED_1000) { 1163 *speed = SPEED_1000; 1164 hw_dbg("1000 Mbs, "); 1165 } else if (status & E1000_STATUS_SPEED_100) { 1166 *speed = SPEED_100; 1167 hw_dbg("100 Mbs, "); 1168 } else { 1169 *speed = SPEED_10; 1170 hw_dbg("10 Mbs, "); 1171 } 1172 1173 if (status & E1000_STATUS_FD) { 1174 *duplex = FULL_DUPLEX; 1175 hw_dbg("Full Duplex\n"); 1176 } else { 1177 *duplex = HALF_DUPLEX; 1178 hw_dbg("Half Duplex\n"); 1179 } 1180 1181 return 0; 1182 } 1183 1184 /** 1185 * igb_get_hw_semaphore - Acquire hardware semaphore 1186 * @hw: pointer to the HW structure 1187 * 1188 * Acquire the HW semaphore to access the PHY or NVM 1189 **/ 1190 s32 igb_get_hw_semaphore(struct e1000_hw *hw) 1191 { 1192 u32 swsm; 1193 s32 ret_val = 0; 1194 s32 timeout = hw->nvm.word_size + 1; 1195 s32 i = 0; 1196 1197 /* Get the SW semaphore */ 1198 while (i < timeout) { 1199 swsm = rd32(E1000_SWSM); 1200 if (!(swsm & E1000_SWSM_SMBI)) 1201 break; 1202 1203 udelay(50); 1204 i++; 1205 } 1206 1207 if (i == timeout) { 1208 hw_dbg("Driver can't access device - SMBI bit is set.\n"); 1209 ret_val = -E1000_ERR_NVM; 1210 goto out; 1211 } 1212 1213 /* Get the FW semaphore. */ 1214 for (i = 0; i < timeout; i++) { 1215 swsm = rd32(E1000_SWSM); 1216 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); 1217 1218 /* Semaphore acquired if bit latched */ 1219 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) 1220 break; 1221 1222 udelay(50); 1223 } 1224 1225 if (i == timeout) { 1226 /* Release semaphores */ 1227 igb_put_hw_semaphore(hw); 1228 hw_dbg("Driver can't access the NVM\n"); 1229 ret_val = -E1000_ERR_NVM; 1230 goto out; 1231 } 1232 1233 out: 1234 return ret_val; 1235 } 1236 1237 /** 1238 * igb_put_hw_semaphore - Release hardware semaphore 1239 * @hw: pointer to the HW structure 1240 * 1241 * Release hardware semaphore used to access the PHY or NVM 1242 **/ 1243 void igb_put_hw_semaphore(struct e1000_hw *hw) 1244 { 1245 u32 swsm; 1246 1247 swsm = rd32(E1000_SWSM); 1248 1249 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); 1250 1251 wr32(E1000_SWSM, swsm); 1252 } 1253 1254 /** 1255 * igb_get_auto_rd_done - Check for auto read completion 1256 * @hw: pointer to the HW structure 1257 * 1258 * Check EEPROM for Auto Read done bit. 1259 **/ 1260 s32 igb_get_auto_rd_done(struct e1000_hw *hw) 1261 { 1262 s32 i = 0; 1263 s32 ret_val = 0; 1264 1265 1266 while (i < AUTO_READ_DONE_TIMEOUT) { 1267 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD) 1268 break; 1269 msleep(1); 1270 i++; 1271 } 1272 1273 if (i == AUTO_READ_DONE_TIMEOUT) { 1274 hw_dbg("Auto read by HW from NVM has not completed.\n"); 1275 ret_val = -E1000_ERR_RESET; 1276 goto out; 1277 } 1278 1279 out: 1280 return ret_val; 1281 } 1282 1283 /** 1284 * igb_valid_led_default - Verify a valid default LED config 1285 * @hw: pointer to the HW structure 1286 * @data: pointer to the NVM (EEPROM) 1287 * 1288 * Read the EEPROM for the current default LED configuration. If the 1289 * LED configuration is not valid, set to a valid LED configuration. 1290 **/ 1291 static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data) 1292 { 1293 s32 ret_val; 1294 1295 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); 1296 if (ret_val) { 1297 hw_dbg("NVM Read Error\n"); 1298 goto out; 1299 } 1300 1301 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { 1302 switch(hw->phy.media_type) { 1303 case e1000_media_type_internal_serdes: 1304 *data = ID_LED_DEFAULT_82575_SERDES; 1305 break; 1306 case e1000_media_type_copper: 1307 default: 1308 *data = ID_LED_DEFAULT; 1309 break; 1310 } 1311 } 1312 out: 1313 return ret_val; 1314 } 1315 1316 /** 1317 * igb_id_led_init - 1318 * @hw: pointer to the HW structure 1319 * 1320 **/ 1321 s32 igb_id_led_init(struct e1000_hw *hw) 1322 { 1323 struct e1000_mac_info *mac = &hw->mac; 1324 s32 ret_val; 1325 const u32 ledctl_mask = 0x000000FF; 1326 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; 1327 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; 1328 u16 data, i, temp; 1329 const u16 led_mask = 0x0F; 1330 1331 /* i210 and i211 devices have different LED mechanism */ 1332 if ((hw->mac.type == e1000_i210) || 1333 (hw->mac.type == e1000_i211)) 1334 ret_val = igb_valid_led_default_i210(hw, &data); 1335 else 1336 ret_val = igb_valid_led_default(hw, &data); 1337 1338 if (ret_val) 1339 goto out; 1340 1341 mac->ledctl_default = rd32(E1000_LEDCTL); 1342 mac->ledctl_mode1 = mac->ledctl_default; 1343 mac->ledctl_mode2 = mac->ledctl_default; 1344 1345 for (i = 0; i < 4; i++) { 1346 temp = (data >> (i << 2)) & led_mask; 1347 switch (temp) { 1348 case ID_LED_ON1_DEF2: 1349 case ID_LED_ON1_ON2: 1350 case ID_LED_ON1_OFF2: 1351 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); 1352 mac->ledctl_mode1 |= ledctl_on << (i << 3); 1353 break; 1354 case ID_LED_OFF1_DEF2: 1355 case ID_LED_OFF1_ON2: 1356 case ID_LED_OFF1_OFF2: 1357 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); 1358 mac->ledctl_mode1 |= ledctl_off << (i << 3); 1359 break; 1360 default: 1361 /* Do nothing */ 1362 break; 1363 } 1364 switch (temp) { 1365 case ID_LED_DEF1_ON2: 1366 case ID_LED_ON1_ON2: 1367 case ID_LED_OFF1_ON2: 1368 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); 1369 mac->ledctl_mode2 |= ledctl_on << (i << 3); 1370 break; 1371 case ID_LED_DEF1_OFF2: 1372 case ID_LED_ON1_OFF2: 1373 case ID_LED_OFF1_OFF2: 1374 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); 1375 mac->ledctl_mode2 |= ledctl_off << (i << 3); 1376 break; 1377 default: 1378 /* Do nothing */ 1379 break; 1380 } 1381 } 1382 1383 out: 1384 return ret_val; 1385 } 1386 1387 /** 1388 * igb_cleanup_led - Set LED config to default operation 1389 * @hw: pointer to the HW structure 1390 * 1391 * Remove the current LED configuration and set the LED configuration 1392 * to the default value, saved from the EEPROM. 1393 **/ 1394 s32 igb_cleanup_led(struct e1000_hw *hw) 1395 { 1396 wr32(E1000_LEDCTL, hw->mac.ledctl_default); 1397 return 0; 1398 } 1399 1400 /** 1401 * igb_blink_led - Blink LED 1402 * @hw: pointer to the HW structure 1403 * 1404 * Blink the led's which are set to be on. 1405 **/ 1406 s32 igb_blink_led(struct e1000_hw *hw) 1407 { 1408 u32 ledctl_blink = 0; 1409 u32 i; 1410 1411 if (hw->phy.media_type == e1000_media_type_fiber) { 1412 /* always blink LED0 for PCI-E fiber */ 1413 ledctl_blink = E1000_LEDCTL_LED0_BLINK | 1414 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); 1415 } else { 1416 /* Set the blink bit for each LED that's "on" (0x0E) 1417 * (or "off" if inverted) in ledctl_mode2. The blink 1418 * logic in hardware only works when mode is set to "on" 1419 * so it must be changed accordingly when the mode is 1420 * "off" and inverted. 1421 */ 1422 ledctl_blink = hw->mac.ledctl_mode2; 1423 for (i = 0; i < 32; i += 8) { 1424 u32 mode = (hw->mac.ledctl_mode2 >> i) & 1425 E1000_LEDCTL_LED0_MODE_MASK; 1426 u32 led_default = hw->mac.ledctl_default >> i; 1427 1428 if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && 1429 (mode == E1000_LEDCTL_MODE_LED_ON)) || 1430 ((led_default & E1000_LEDCTL_LED0_IVRT) && 1431 (mode == E1000_LEDCTL_MODE_LED_OFF))) { 1432 ledctl_blink &= 1433 ~(E1000_LEDCTL_LED0_MODE_MASK << i); 1434 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | 1435 E1000_LEDCTL_MODE_LED_ON) << i; 1436 } 1437 } 1438 } 1439 1440 wr32(E1000_LEDCTL, ledctl_blink); 1441 1442 return 0; 1443 } 1444 1445 /** 1446 * igb_led_off - Turn LED off 1447 * @hw: pointer to the HW structure 1448 * 1449 * Turn LED off. 1450 **/ 1451 s32 igb_led_off(struct e1000_hw *hw) 1452 { 1453 switch (hw->phy.media_type) { 1454 case e1000_media_type_copper: 1455 wr32(E1000_LEDCTL, hw->mac.ledctl_mode1); 1456 break; 1457 default: 1458 break; 1459 } 1460 1461 return 0; 1462 } 1463 1464 /** 1465 * igb_disable_pcie_master - Disables PCI-express master access 1466 * @hw: pointer to the HW structure 1467 * 1468 * Returns 0 (0) if successful, else returns -10 1469 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused 1470 * the master requests to be disabled. 1471 * 1472 * Disables PCI-Express master access and verifies there are no pending 1473 * requests. 1474 **/ 1475 s32 igb_disable_pcie_master(struct e1000_hw *hw) 1476 { 1477 u32 ctrl; 1478 s32 timeout = MASTER_DISABLE_TIMEOUT; 1479 s32 ret_val = 0; 1480 1481 if (hw->bus.type != e1000_bus_type_pci_express) 1482 goto out; 1483 1484 ctrl = rd32(E1000_CTRL); 1485 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; 1486 wr32(E1000_CTRL, ctrl); 1487 1488 while (timeout) { 1489 if (!(rd32(E1000_STATUS) & 1490 E1000_STATUS_GIO_MASTER_ENABLE)) 1491 break; 1492 udelay(100); 1493 timeout--; 1494 } 1495 1496 if (!timeout) { 1497 hw_dbg("Master requests are pending.\n"); 1498 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING; 1499 goto out; 1500 } 1501 1502 out: 1503 return ret_val; 1504 } 1505 1506 /** 1507 * igb_validate_mdi_setting - Verify MDI/MDIx settings 1508 * @hw: pointer to the HW structure 1509 * 1510 * Verify that when not using auto-negotitation that MDI/MDIx is correctly 1511 * set, which is forced to MDI mode only. 1512 **/ 1513 s32 igb_validate_mdi_setting(struct e1000_hw *hw) 1514 { 1515 s32 ret_val = 0; 1516 1517 /* All MDI settings are supported on 82580 and newer. */ 1518 if (hw->mac.type >= e1000_82580) 1519 goto out; 1520 1521 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { 1522 hw_dbg("Invalid MDI setting detected\n"); 1523 hw->phy.mdix = 1; 1524 ret_val = -E1000_ERR_CONFIG; 1525 goto out; 1526 } 1527 1528 out: 1529 return ret_val; 1530 } 1531 1532 /** 1533 * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register 1534 * @hw: pointer to the HW structure 1535 * @reg: 32bit register offset such as E1000_SCTL 1536 * @offset: register offset to write to 1537 * @data: data to write at register offset 1538 * 1539 * Writes an address/data control type register. There are several of these 1540 * and they all have the format address << 8 | data and bit 31 is polled for 1541 * completion. 1542 **/ 1543 s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, 1544 u32 offset, u8 data) 1545 { 1546 u32 i, regvalue = 0; 1547 s32 ret_val = 0; 1548 1549 /* Set up the address and data */ 1550 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT); 1551 wr32(reg, regvalue); 1552 1553 /* Poll the ready bit to see if the MDI read completed */ 1554 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { 1555 udelay(5); 1556 regvalue = rd32(reg); 1557 if (regvalue & E1000_GEN_CTL_READY) 1558 break; 1559 } 1560 if (!(regvalue & E1000_GEN_CTL_READY)) { 1561 hw_dbg("Reg %08x did not indicate ready\n", reg); 1562 ret_val = -E1000_ERR_PHY; 1563 goto out; 1564 } 1565 1566 out: 1567 return ret_val; 1568 } 1569 1570 /** 1571 * igb_enable_mng_pass_thru - Enable processing of ARP's 1572 * @hw: pointer to the HW structure 1573 * 1574 * Verifies the hardware needs to leave interface enabled so that frames can 1575 * be directed to and from the management interface. 1576 **/ 1577 bool igb_enable_mng_pass_thru(struct e1000_hw *hw) 1578 { 1579 u32 manc; 1580 u32 fwsm, factps; 1581 bool ret_val = false; 1582 1583 if (!hw->mac.asf_firmware_present) 1584 goto out; 1585 1586 manc = rd32(E1000_MANC); 1587 1588 if (!(manc & E1000_MANC_RCV_TCO_EN)) 1589 goto out; 1590 1591 if (hw->mac.arc_subsystem_valid) { 1592 fwsm = rd32(E1000_FWSM); 1593 factps = rd32(E1000_FACTPS); 1594 1595 if (!(factps & E1000_FACTPS_MNGCG) && 1596 ((fwsm & E1000_FWSM_MODE_MASK) == 1597 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { 1598 ret_val = true; 1599 goto out; 1600 } 1601 } else { 1602 if ((manc & E1000_MANC_SMBUS_EN) && 1603 !(manc & E1000_MANC_ASF_EN)) { 1604 ret_val = true; 1605 goto out; 1606 } 1607 } 1608 1609 out: 1610 return ret_val; 1611 } 1612