1 /******************************************************************************* 2 3 Intel(R) Gigabit Ethernet Linux driver 4 Copyright(c) 2007-2013 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 #include <linux/if_ether.h> 29 #include <linux/delay.h> 30 #include <linux/pci.h> 31 #include <linux/netdevice.h> 32 #include <linux/etherdevice.h> 33 34 #include "e1000_mac.h" 35 36 #include "igb.h" 37 38 static s32 igb_set_default_fc(struct e1000_hw *hw); 39 static s32 igb_set_fc_watermarks(struct e1000_hw *hw); 40 41 /** 42 * igb_get_bus_info_pcie - Get PCIe bus information 43 * @hw: pointer to the HW structure 44 * 45 * Determines and stores the system bus information for a particular 46 * network interface. The following bus information is determined and stored: 47 * bus speed, bus width, type (PCIe), and PCIe function. 48 **/ 49 s32 igb_get_bus_info_pcie(struct e1000_hw *hw) 50 { 51 struct e1000_bus_info *bus = &hw->bus; 52 s32 ret_val; 53 u32 reg; 54 u16 pcie_link_status; 55 56 bus->type = e1000_bus_type_pci_express; 57 58 ret_val = igb_read_pcie_cap_reg(hw, 59 PCI_EXP_LNKSTA, 60 &pcie_link_status); 61 if (ret_val) { 62 bus->width = e1000_bus_width_unknown; 63 bus->speed = e1000_bus_speed_unknown; 64 } else { 65 switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) { 66 case PCI_EXP_LNKSTA_CLS_2_5GB: 67 bus->speed = e1000_bus_speed_2500; 68 break; 69 case PCI_EXP_LNKSTA_CLS_5_0GB: 70 bus->speed = e1000_bus_speed_5000; 71 break; 72 default: 73 bus->speed = e1000_bus_speed_unknown; 74 break; 75 } 76 77 bus->width = (enum e1000_bus_width)((pcie_link_status & 78 PCI_EXP_LNKSTA_NLW) >> 79 PCI_EXP_LNKSTA_NLW_SHIFT); 80 } 81 82 reg = rd32(E1000_STATUS); 83 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; 84 85 return 0; 86 } 87 88 /** 89 * igb_clear_vfta - Clear VLAN filter table 90 * @hw: pointer to the HW structure 91 * 92 * Clears the register array which contains the VLAN filter table by 93 * setting all the values to 0. 94 **/ 95 void igb_clear_vfta(struct e1000_hw *hw) 96 { 97 u32 offset; 98 99 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { 100 array_wr32(E1000_VFTA, offset, 0); 101 wrfl(); 102 } 103 } 104 105 /** 106 * igb_write_vfta - Write value to VLAN filter table 107 * @hw: pointer to the HW structure 108 * @offset: register offset in VLAN filter table 109 * @value: register value written to VLAN filter table 110 * 111 * Writes value at the given offset in the register array which stores 112 * the VLAN filter table. 113 **/ 114 static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) 115 { 116 array_wr32(E1000_VFTA, offset, value); 117 wrfl(); 118 } 119 120 /* Due to a hw errata, if the host tries to configure the VFTA register 121 * while performing queries from the BMC or DMA, then the VFTA in some 122 * cases won't be written. 123 */ 124 125 /** 126 * igb_clear_vfta_i350 - Clear VLAN filter table 127 * @hw: pointer to the HW structure 128 * 129 * Clears the register array which contains the VLAN filter table by 130 * setting all the values to 0. 131 **/ 132 void igb_clear_vfta_i350(struct e1000_hw *hw) 133 { 134 u32 offset; 135 int i; 136 137 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { 138 for (i = 0; i < 10; i++) 139 array_wr32(E1000_VFTA, offset, 0); 140 141 wrfl(); 142 } 143 } 144 145 /** 146 * igb_write_vfta_i350 - Write value to VLAN filter table 147 * @hw: pointer to the HW structure 148 * @offset: register offset in VLAN filter table 149 * @value: register value written to VLAN filter table 150 * 151 * Writes value at the given offset in the register array which stores 152 * the VLAN filter table. 153 **/ 154 static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) 155 { 156 int i; 157 158 for (i = 0; i < 10; i++) 159 array_wr32(E1000_VFTA, offset, value); 160 161 wrfl(); 162 } 163 164 /** 165 * igb_init_rx_addrs - Initialize receive address's 166 * @hw: pointer to the HW structure 167 * @rar_count: receive address registers 168 * 169 * Setups the receive address registers by setting the base receive address 170 * register to the devices MAC address and clearing all the other receive 171 * address registers to 0. 172 **/ 173 void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) 174 { 175 u32 i; 176 u8 mac_addr[ETH_ALEN] = {0}; 177 178 /* Setup the receive address */ 179 hw_dbg("Programming MAC Address into RAR[0]\n"); 180 181 hw->mac.ops.rar_set(hw, hw->mac.addr, 0); 182 183 /* Zero out the other (rar_entry_count - 1) receive addresses */ 184 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1); 185 for (i = 1; i < rar_count; i++) 186 hw->mac.ops.rar_set(hw, mac_addr, i); 187 } 188 189 /** 190 * igb_vfta_set - enable or disable vlan in VLAN filter table 191 * @hw: pointer to the HW structure 192 * @vid: VLAN id to add or remove 193 * @add: if true add filter, if false remove 194 * 195 * Sets or clears a bit in the VLAN filter table array based on VLAN id 196 * and if we are adding or removing the filter 197 **/ 198 s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add) 199 { 200 u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK; 201 u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK); 202 u32 vfta; 203 struct igb_adapter *adapter = hw->back; 204 s32 ret_val = 0; 205 206 vfta = adapter->shadow_vfta[index]; 207 208 /* bit was set/cleared before we started */ 209 if ((!!(vfta & mask)) == add) { 210 ret_val = -E1000_ERR_CONFIG; 211 } else { 212 if (add) 213 vfta |= mask; 214 else 215 vfta &= ~mask; 216 } 217 if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) 218 igb_write_vfta_i350(hw, index, vfta); 219 else 220 igb_write_vfta(hw, index, vfta); 221 adapter->shadow_vfta[index] = vfta; 222 223 return ret_val; 224 } 225 226 /** 227 * igb_check_alt_mac_addr - Check for alternate MAC addr 228 * @hw: pointer to the HW structure 229 * 230 * Checks the nvm for an alternate MAC address. An alternate MAC address 231 * can be setup by pre-boot software and must be treated like a permanent 232 * address and must override the actual permanent MAC address. If an 233 * alternate MAC address is found it is saved in the hw struct and 234 * programmed into RAR0 and the function returns success, otherwise the 235 * function returns an error. 236 **/ 237 s32 igb_check_alt_mac_addr(struct e1000_hw *hw) 238 { 239 u32 i; 240 s32 ret_val = 0; 241 u16 offset, nvm_alt_mac_addr_offset, nvm_data; 242 u8 alt_mac_addr[ETH_ALEN]; 243 244 /* Alternate MAC address is handled by the option ROM for 82580 245 * and newer. SW support not required. 246 */ 247 if (hw->mac.type >= e1000_82580) 248 goto out; 249 250 ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1, 251 &nvm_alt_mac_addr_offset); 252 if (ret_val) { 253 hw_dbg("NVM Read Error\n"); 254 goto out; 255 } 256 257 if ((nvm_alt_mac_addr_offset == 0xFFFF) || 258 (nvm_alt_mac_addr_offset == 0x0000)) 259 /* There is no Alternate MAC Address */ 260 goto out; 261 262 if (hw->bus.func == E1000_FUNC_1) 263 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; 264 if (hw->bus.func == E1000_FUNC_2) 265 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2; 266 267 if (hw->bus.func == E1000_FUNC_3) 268 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3; 269 for (i = 0; i < ETH_ALEN; i += 2) { 270 offset = nvm_alt_mac_addr_offset + (i >> 1); 271 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); 272 if (ret_val) { 273 hw_dbg("NVM Read Error\n"); 274 goto out; 275 } 276 277 alt_mac_addr[i] = (u8)(nvm_data & 0xFF); 278 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); 279 } 280 281 /* if multicast bit is set, the alternate address will not be used */ 282 if (is_multicast_ether_addr(alt_mac_addr)) { 283 hw_dbg("Ignoring Alternate Mac Address with MC bit set\n"); 284 goto out; 285 } 286 287 /* We have a valid alternate MAC address, and we want to treat it the 288 * same as the normal permanent MAC address stored by the HW into the 289 * RAR. Do this by mapping this address into RAR0. 290 */ 291 hw->mac.ops.rar_set(hw, alt_mac_addr, 0); 292 293 out: 294 return ret_val; 295 } 296 297 /** 298 * igb_rar_set - Set receive address register 299 * @hw: pointer to the HW structure 300 * @addr: pointer to the receive address 301 * @index: receive address array register 302 * 303 * Sets the receive address array register at index to the address passed 304 * in by addr. 305 **/ 306 void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) 307 { 308 u32 rar_low, rar_high; 309 310 /* HW expects these in little endian so we reverse the byte order 311 * from network order (big endian) to little endian 312 */ 313 rar_low = ((u32) addr[0] | 314 ((u32) addr[1] << 8) | 315 ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); 316 317 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); 318 319 /* If MAC address zero, no need to set the AV bit */ 320 if (rar_low || rar_high) 321 rar_high |= E1000_RAH_AV; 322 323 /* Some bridges will combine consecutive 32-bit writes into 324 * a single burst write, which will malfunction on some parts. 325 * The flushes avoid this. 326 */ 327 wr32(E1000_RAL(index), rar_low); 328 wrfl(); 329 wr32(E1000_RAH(index), rar_high); 330 wrfl(); 331 } 332 333 /** 334 * igb_mta_set - Set multicast filter table address 335 * @hw: pointer to the HW structure 336 * @hash_value: determines the MTA register and bit to set 337 * 338 * The multicast table address is a register array of 32-bit registers. 339 * The hash_value is used to determine what register the bit is in, the 340 * current value is read, the new bit is OR'd in and the new value is 341 * written back into the register. 342 **/ 343 void igb_mta_set(struct e1000_hw *hw, u32 hash_value) 344 { 345 u32 hash_bit, hash_reg, mta; 346 347 /* The MTA is a register array of 32-bit registers. It is 348 * treated like an array of (32*mta_reg_count) bits. We want to 349 * set bit BitArray[hash_value]. So we figure out what register 350 * the bit is in, read it, OR in the new bit, then write 351 * back the new value. The (hw->mac.mta_reg_count - 1) serves as a 352 * mask to bits 31:5 of the hash value which gives us the 353 * register we're modifying. The hash bit within that register 354 * is determined by the lower 5 bits of the hash value. 355 */ 356 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); 357 hash_bit = hash_value & 0x1F; 358 359 mta = array_rd32(E1000_MTA, hash_reg); 360 361 mta |= (1 << hash_bit); 362 363 array_wr32(E1000_MTA, hash_reg, mta); 364 wrfl(); 365 } 366 367 /** 368 * igb_hash_mc_addr - Generate a multicast hash value 369 * @hw: pointer to the HW structure 370 * @mc_addr: pointer to a multicast address 371 * 372 * Generates a multicast address hash value which is used to determine 373 * the multicast filter table array address and new table value. See 374 * igb_mta_set() 375 **/ 376 static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) 377 { 378 u32 hash_value, hash_mask; 379 u8 bit_shift = 0; 380 381 /* Register count multiplied by bits per register */ 382 hash_mask = (hw->mac.mta_reg_count * 32) - 1; 383 384 /* For a mc_filter_type of 0, bit_shift is the number of left-shifts 385 * where 0xFF would still fall within the hash mask. 386 */ 387 while (hash_mask >> bit_shift != 0xFF) 388 bit_shift++; 389 390 /* The portion of the address that is used for the hash table 391 * is determined by the mc_filter_type setting. 392 * The algorithm is such that there is a total of 8 bits of shifting. 393 * The bit_shift for a mc_filter_type of 0 represents the number of 394 * left-shifts where the MSB of mc_addr[5] would still fall within 395 * the hash_mask. Case 0 does this exactly. Since there are a total 396 * of 8 bits of shifting, then mc_addr[4] will shift right the 397 * remaining number of bits. Thus 8 - bit_shift. The rest of the 398 * cases are a variation of this algorithm...essentially raising the 399 * number of bits to shift mc_addr[5] left, while still keeping the 400 * 8-bit shifting total. 401 * 402 * For example, given the following Destination MAC Address and an 403 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), 404 * we can see that the bit_shift for case 0 is 4. These are the hash 405 * values resulting from each mc_filter_type... 406 * [0] [1] [2] [3] [4] [5] 407 * 01 AA 00 12 34 56 408 * LSB MSB 409 * 410 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 411 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 412 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 413 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 414 */ 415 switch (hw->mac.mc_filter_type) { 416 default: 417 case 0: 418 break; 419 case 1: 420 bit_shift += 1; 421 break; 422 case 2: 423 bit_shift += 2; 424 break; 425 case 3: 426 bit_shift += 4; 427 break; 428 } 429 430 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | 431 (((u16) mc_addr[5]) << bit_shift))); 432 433 return hash_value; 434 } 435 436 /** 437 * igb_update_mc_addr_list - Update Multicast addresses 438 * @hw: pointer to the HW structure 439 * @mc_addr_list: array of multicast addresses to program 440 * @mc_addr_count: number of multicast addresses to program 441 * 442 * Updates entire Multicast Table Array. 443 * The caller must have a packed mc_addr_list of multicast addresses. 444 **/ 445 void igb_update_mc_addr_list(struct e1000_hw *hw, 446 u8 *mc_addr_list, u32 mc_addr_count) 447 { 448 u32 hash_value, hash_bit, hash_reg; 449 int i; 450 451 /* clear mta_shadow */ 452 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); 453 454 /* update mta_shadow from mc_addr_list */ 455 for (i = 0; (u32) i < mc_addr_count; i++) { 456 hash_value = igb_hash_mc_addr(hw, mc_addr_list); 457 458 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); 459 hash_bit = hash_value & 0x1F; 460 461 hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); 462 mc_addr_list += (ETH_ALEN); 463 } 464 465 /* replace the entire MTA table */ 466 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) 467 array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); 468 wrfl(); 469 } 470 471 /** 472 * igb_clear_hw_cntrs_base - Clear base hardware counters 473 * @hw: pointer to the HW structure 474 * 475 * Clears the base hardware counters by reading the counter registers. 476 **/ 477 void igb_clear_hw_cntrs_base(struct e1000_hw *hw) 478 { 479 rd32(E1000_CRCERRS); 480 rd32(E1000_SYMERRS); 481 rd32(E1000_MPC); 482 rd32(E1000_SCC); 483 rd32(E1000_ECOL); 484 rd32(E1000_MCC); 485 rd32(E1000_LATECOL); 486 rd32(E1000_COLC); 487 rd32(E1000_DC); 488 rd32(E1000_SEC); 489 rd32(E1000_RLEC); 490 rd32(E1000_XONRXC); 491 rd32(E1000_XONTXC); 492 rd32(E1000_XOFFRXC); 493 rd32(E1000_XOFFTXC); 494 rd32(E1000_FCRUC); 495 rd32(E1000_GPRC); 496 rd32(E1000_BPRC); 497 rd32(E1000_MPRC); 498 rd32(E1000_GPTC); 499 rd32(E1000_GORCL); 500 rd32(E1000_GORCH); 501 rd32(E1000_GOTCL); 502 rd32(E1000_GOTCH); 503 rd32(E1000_RNBC); 504 rd32(E1000_RUC); 505 rd32(E1000_RFC); 506 rd32(E1000_ROC); 507 rd32(E1000_RJC); 508 rd32(E1000_TORL); 509 rd32(E1000_TORH); 510 rd32(E1000_TOTL); 511 rd32(E1000_TOTH); 512 rd32(E1000_TPR); 513 rd32(E1000_TPT); 514 rd32(E1000_MPTC); 515 rd32(E1000_BPTC); 516 } 517 518 /** 519 * igb_check_for_copper_link - Check for link (Copper) 520 * @hw: pointer to the HW structure 521 * 522 * Checks to see of the link status of the hardware has changed. If a 523 * change in link status has been detected, then we read the PHY registers 524 * to get the current speed/duplex if link exists. 525 **/ 526 s32 igb_check_for_copper_link(struct e1000_hw *hw) 527 { 528 struct e1000_mac_info *mac = &hw->mac; 529 s32 ret_val; 530 bool link; 531 532 /* We only want to go out to the PHY registers to see if Auto-Neg 533 * has completed and/or if our link status has changed. The 534 * get_link_status flag is set upon receiving a Link Status 535 * Change or Rx Sequence Error interrupt. 536 */ 537 if (!mac->get_link_status) { 538 ret_val = 0; 539 goto out; 540 } 541 542 /* First we want to see if the MII Status Register reports 543 * link. If so, then we want to get the current speed/duplex 544 * of the PHY. 545 */ 546 ret_val = igb_phy_has_link(hw, 1, 0, &link); 547 if (ret_val) 548 goto out; 549 550 if (!link) 551 goto out; /* No link detected */ 552 553 mac->get_link_status = false; 554 555 /* Check if there was DownShift, must be checked 556 * immediately after link-up 557 */ 558 igb_check_downshift(hw); 559 560 /* If we are forcing speed/duplex, then we simply return since 561 * we have already determined whether we have link or not. 562 */ 563 if (!mac->autoneg) { 564 ret_val = -E1000_ERR_CONFIG; 565 goto out; 566 } 567 568 /* Auto-Neg is enabled. Auto Speed Detection takes care 569 * of MAC speed/duplex configuration. So we only need to 570 * configure Collision Distance in the MAC. 571 */ 572 igb_config_collision_dist(hw); 573 574 /* Configure Flow Control now that Auto-Neg has completed. 575 * First, we need to restore the desired flow control 576 * settings because we may have had to re-autoneg with a 577 * different link partner. 578 */ 579 ret_val = igb_config_fc_after_link_up(hw); 580 if (ret_val) 581 hw_dbg("Error configuring flow control\n"); 582 583 out: 584 return ret_val; 585 } 586 587 /** 588 * igb_setup_link - Setup flow control and link settings 589 * @hw: pointer to the HW structure 590 * 591 * Determines which flow control settings to use, then configures flow 592 * control. Calls the appropriate media-specific link configuration 593 * function. Assuming the adapter has a valid link partner, a valid link 594 * should be established. Assumes the hardware has previously been reset 595 * and the transmitter and receiver are not enabled. 596 **/ 597 s32 igb_setup_link(struct e1000_hw *hw) 598 { 599 s32 ret_val = 0; 600 601 /* In the case of the phy reset being blocked, we already have a link. 602 * We do not need to set it up again. 603 */ 604 if (igb_check_reset_block(hw)) 605 goto out; 606 607 /* If requested flow control is set to default, set flow control 608 * based on the EEPROM flow control settings. 609 */ 610 if (hw->fc.requested_mode == e1000_fc_default) { 611 ret_val = igb_set_default_fc(hw); 612 if (ret_val) 613 goto out; 614 } 615 616 /* We want to save off the original Flow Control configuration just 617 * in case we get disconnected and then reconnected into a different 618 * hub or switch with different Flow Control capabilities. 619 */ 620 hw->fc.current_mode = hw->fc.requested_mode; 621 622 hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); 623 624 /* Call the necessary media_type subroutine to configure the link. */ 625 ret_val = hw->mac.ops.setup_physical_interface(hw); 626 if (ret_val) 627 goto out; 628 629 /* Initialize the flow control address, type, and PAUSE timer 630 * registers to their default values. This is done even if flow 631 * control is disabled, because it does not hurt anything to 632 * initialize these registers. 633 */ 634 hw_dbg("Initializing the Flow Control address, type and timer regs\n"); 635 wr32(E1000_FCT, FLOW_CONTROL_TYPE); 636 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); 637 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); 638 639 wr32(E1000_FCTTV, hw->fc.pause_time); 640 641 ret_val = igb_set_fc_watermarks(hw); 642 643 out: 644 645 return ret_val; 646 } 647 648 /** 649 * igb_config_collision_dist - Configure collision distance 650 * @hw: pointer to the HW structure 651 * 652 * Configures the collision distance to the default value and is used 653 * during link setup. Currently no func pointer exists and all 654 * implementations are handled in the generic version of this function. 655 **/ 656 void igb_config_collision_dist(struct e1000_hw *hw) 657 { 658 u32 tctl; 659 660 tctl = rd32(E1000_TCTL); 661 662 tctl &= ~E1000_TCTL_COLD; 663 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; 664 665 wr32(E1000_TCTL, tctl); 666 wrfl(); 667 } 668 669 /** 670 * igb_set_fc_watermarks - Set flow control high/low watermarks 671 * @hw: pointer to the HW structure 672 * 673 * Sets the flow control high/low threshold (watermark) registers. If 674 * flow control XON frame transmission is enabled, then set XON frame 675 * tansmission as well. 676 **/ 677 static s32 igb_set_fc_watermarks(struct e1000_hw *hw) 678 { 679 s32 ret_val = 0; 680 u32 fcrtl = 0, fcrth = 0; 681 682 /* Set the flow control receive threshold registers. Normally, 683 * these registers will be set to a default threshold that may be 684 * adjusted later by the driver's runtime code. However, if the 685 * ability to transmit pause frames is not enabled, then these 686 * registers will be set to 0. 687 */ 688 if (hw->fc.current_mode & e1000_fc_tx_pause) { 689 /* We need to set up the Receive Threshold high and low water 690 * marks as well as (optionally) enabling the transmission of 691 * XON frames. 692 */ 693 fcrtl = hw->fc.low_water; 694 if (hw->fc.send_xon) 695 fcrtl |= E1000_FCRTL_XONE; 696 697 fcrth = hw->fc.high_water; 698 } 699 wr32(E1000_FCRTL, fcrtl); 700 wr32(E1000_FCRTH, fcrth); 701 702 return ret_val; 703 } 704 705 /** 706 * igb_set_default_fc - Set flow control default values 707 * @hw: pointer to the HW structure 708 * 709 * Read the EEPROM for the default values for flow control and store the 710 * values. 711 **/ 712 static s32 igb_set_default_fc(struct e1000_hw *hw) 713 { 714 s32 ret_val = 0; 715 u16 lan_offset; 716 u16 nvm_data; 717 718 /* Read and store word 0x0F of the EEPROM. This word contains bits 719 * that determine the hardware's default PAUSE (flow control) mode, 720 * a bit that determines whether the HW defaults to enabling or 721 * disabling auto-negotiation, and the direction of the 722 * SW defined pins. If there is no SW over-ride of the flow 723 * control setting, then the variable hw->fc will 724 * be initialized based on a value in the EEPROM. 725 */ 726 if (hw->mac.type == e1000_i350) { 727 lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); 728 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG 729 + lan_offset, 1, &nvm_data); 730 } else { 731 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 732 1, &nvm_data); 733 } 734 735 if (ret_val) { 736 hw_dbg("NVM Read Error\n"); 737 goto out; 738 } 739 740 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) 741 hw->fc.requested_mode = e1000_fc_none; 742 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 743 NVM_WORD0F_ASM_DIR) 744 hw->fc.requested_mode = e1000_fc_tx_pause; 745 else 746 hw->fc.requested_mode = e1000_fc_full; 747 748 out: 749 return ret_val; 750 } 751 752 /** 753 * igb_force_mac_fc - Force the MAC's flow control settings 754 * @hw: pointer to the HW structure 755 * 756 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the 757 * device control register to reflect the adapter settings. TFCE and RFCE 758 * need to be explicitly set by software when a copper PHY is used because 759 * autonegotiation is managed by the PHY rather than the MAC. Software must 760 * also configure these bits when link is forced on a fiber connection. 761 **/ 762 s32 igb_force_mac_fc(struct e1000_hw *hw) 763 { 764 u32 ctrl; 765 s32 ret_val = 0; 766 767 ctrl = rd32(E1000_CTRL); 768 769 /* Because we didn't get link via the internal auto-negotiation 770 * mechanism (we either forced link or we got link via PHY 771 * auto-neg), we have to manually enable/disable transmit an 772 * receive flow control. 773 * 774 * The "Case" statement below enables/disable flow control 775 * according to the "hw->fc.current_mode" parameter. 776 * 777 * The possible values of the "fc" parameter are: 778 * 0: Flow control is completely disabled 779 * 1: Rx flow control is enabled (we can receive pause 780 * frames but not send pause frames). 781 * 2: Tx flow control is enabled (we can send pause frames 782 * frames but we do not receive pause frames). 783 * 3: Both Rx and TX flow control (symmetric) is enabled. 784 * other: No other values should be possible at this point. 785 */ 786 hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); 787 788 switch (hw->fc.current_mode) { 789 case e1000_fc_none: 790 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); 791 break; 792 case e1000_fc_rx_pause: 793 ctrl &= (~E1000_CTRL_TFCE); 794 ctrl |= E1000_CTRL_RFCE; 795 break; 796 case e1000_fc_tx_pause: 797 ctrl &= (~E1000_CTRL_RFCE); 798 ctrl |= E1000_CTRL_TFCE; 799 break; 800 case e1000_fc_full: 801 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); 802 break; 803 default: 804 hw_dbg("Flow control param set incorrectly\n"); 805 ret_val = -E1000_ERR_CONFIG; 806 goto out; 807 } 808 809 wr32(E1000_CTRL, ctrl); 810 811 out: 812 return ret_val; 813 } 814 815 /** 816 * igb_config_fc_after_link_up - Configures flow control after link 817 * @hw: pointer to the HW structure 818 * 819 * Checks the status of auto-negotiation after link up to ensure that the 820 * speed and duplex were not forced. If the link needed to be forced, then 821 * flow control needs to be forced also. If auto-negotiation is enabled 822 * and did not fail, then we configure flow control based on our link 823 * partner. 824 **/ 825 s32 igb_config_fc_after_link_up(struct e1000_hw *hw) 826 { 827 struct e1000_mac_info *mac = &hw->mac; 828 s32 ret_val = 0; 829 u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; 830 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; 831 u16 speed, duplex; 832 833 /* Check for the case where we have fiber media and auto-neg failed 834 * so we had to force link. In this case, we need to force the 835 * configuration of the MAC to match the "fc" parameter. 836 */ 837 if (mac->autoneg_failed) { 838 if (hw->phy.media_type == e1000_media_type_internal_serdes) 839 ret_val = igb_force_mac_fc(hw); 840 } else { 841 if (hw->phy.media_type == e1000_media_type_copper) 842 ret_val = igb_force_mac_fc(hw); 843 } 844 845 if (ret_val) { 846 hw_dbg("Error forcing flow control settings\n"); 847 goto out; 848 } 849 850 /* Check for the case where we have copper media and auto-neg is 851 * enabled. In this case, we need to check and see if Auto-Neg 852 * has completed, and if so, how the PHY and link partner has 853 * flow control configured. 854 */ 855 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { 856 /* Read the MII Status Register and check to see if AutoNeg 857 * has completed. We read this twice because this reg has 858 * some "sticky" (latched) bits. 859 */ 860 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, 861 &mii_status_reg); 862 if (ret_val) 863 goto out; 864 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, 865 &mii_status_reg); 866 if (ret_val) 867 goto out; 868 869 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { 870 hw_dbg("Copper PHY and Auto Neg " 871 "has not completed.\n"); 872 goto out; 873 } 874 875 /* The AutoNeg process has completed, so we now need to 876 * read both the Auto Negotiation Advertisement 877 * Register (Address 4) and the Auto_Negotiation Base 878 * Page Ability Register (Address 5) to determine how 879 * flow control was negotiated. 880 */ 881 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, 882 &mii_nway_adv_reg); 883 if (ret_val) 884 goto out; 885 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, 886 &mii_nway_lp_ability_reg); 887 if (ret_val) 888 goto out; 889 890 /* Two bits in the Auto Negotiation Advertisement Register 891 * (Address 4) and two bits in the Auto Negotiation Base 892 * Page Ability Register (Address 5) determine flow control 893 * for both the PHY and the link partner. The following 894 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, 895 * 1999, describes these PAUSE resolution bits and how flow 896 * control is determined based upon these settings. 897 * NOTE: DC = Don't Care 898 * 899 * LOCAL DEVICE | LINK PARTNER 900 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution 901 *-------|---------|-------|---------|-------------------- 902 * 0 | 0 | DC | DC | e1000_fc_none 903 * 0 | 1 | 0 | DC | e1000_fc_none 904 * 0 | 1 | 1 | 0 | e1000_fc_none 905 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause 906 * 1 | 0 | 0 | DC | e1000_fc_none 907 * 1 | DC | 1 | DC | e1000_fc_full 908 * 1 | 1 | 0 | 0 | e1000_fc_none 909 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause 910 * 911 * Are both PAUSE bits set to 1? If so, this implies 912 * Symmetric Flow Control is enabled at both ends. The 913 * ASM_DIR bits are irrelevant per the spec. 914 * 915 * For Symmetric Flow Control: 916 * 917 * LOCAL DEVICE | LINK PARTNER 918 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 919 *-------|---------|-------|---------|-------------------- 920 * 1 | DC | 1 | DC | E1000_fc_full 921 * 922 */ 923 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && 924 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { 925 /* Now we need to check if the user selected RX ONLY 926 * of pause frames. In this case, we had to advertise 927 * FULL flow control because we could not advertise RX 928 * ONLY. Hence, we must now check to see if we need to 929 * turn OFF the TRANSMISSION of PAUSE frames. 930 */ 931 if (hw->fc.requested_mode == e1000_fc_full) { 932 hw->fc.current_mode = e1000_fc_full; 933 hw_dbg("Flow Control = FULL.\r\n"); 934 } else { 935 hw->fc.current_mode = e1000_fc_rx_pause; 936 hw_dbg("Flow Control = " 937 "RX PAUSE frames only.\r\n"); 938 } 939 } 940 /* For receiving PAUSE frames ONLY. 941 * 942 * LOCAL DEVICE | LINK PARTNER 943 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 944 *-------|---------|-------|---------|-------------------- 945 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause 946 */ 947 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && 948 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && 949 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 950 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 951 hw->fc.current_mode = e1000_fc_tx_pause; 952 hw_dbg("Flow Control = TX PAUSE frames only.\r\n"); 953 } 954 /* For transmitting PAUSE frames ONLY. 955 * 956 * LOCAL DEVICE | LINK PARTNER 957 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 958 *-------|---------|-------|---------|-------------------- 959 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause 960 */ 961 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && 962 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && 963 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 964 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 965 hw->fc.current_mode = e1000_fc_rx_pause; 966 hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); 967 } 968 /* Per the IEEE spec, at this point flow control should be 969 * disabled. However, we want to consider that we could 970 * be connected to a legacy switch that doesn't advertise 971 * desired flow control, but can be forced on the link 972 * partner. So if we advertised no flow control, that is 973 * what we will resolve to. If we advertised some kind of 974 * receive capability (Rx Pause Only or Full Flow Control) 975 * and the link partner advertised none, we will configure 976 * ourselves to enable Rx Flow Control only. We can do 977 * this safely for two reasons: If the link partner really 978 * didn't want flow control enabled, and we enable Rx, no 979 * harm done since we won't be receiving any PAUSE frames 980 * anyway. If the intent on the link partner was to have 981 * flow control enabled, then by us enabling RX only, we 982 * can at least receive pause frames and process them. 983 * This is a good idea because in most cases, since we are 984 * predominantly a server NIC, more times than not we will 985 * be asked to delay transmission of packets than asking 986 * our link partner to pause transmission of frames. 987 */ 988 else if ((hw->fc.requested_mode == e1000_fc_none) || 989 (hw->fc.requested_mode == e1000_fc_tx_pause) || 990 (hw->fc.strict_ieee)) { 991 hw->fc.current_mode = e1000_fc_none; 992 hw_dbg("Flow Control = NONE.\r\n"); 993 } else { 994 hw->fc.current_mode = e1000_fc_rx_pause; 995 hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); 996 } 997 998 /* Now we need to do one last check... If we auto- 999 * negotiated to HALF DUPLEX, flow control should not be 1000 * enabled per IEEE 802.3 spec. 1001 */ 1002 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); 1003 if (ret_val) { 1004 hw_dbg("Error getting link speed and duplex\n"); 1005 goto out; 1006 } 1007 1008 if (duplex == HALF_DUPLEX) 1009 hw->fc.current_mode = e1000_fc_none; 1010 1011 /* Now we call a subroutine to actually force the MAC 1012 * controller to use the correct flow control settings. 1013 */ 1014 ret_val = igb_force_mac_fc(hw); 1015 if (ret_val) { 1016 hw_dbg("Error forcing flow control settings\n"); 1017 goto out; 1018 } 1019 } 1020 /* Check for the case where we have SerDes media and auto-neg is 1021 * enabled. In this case, we need to check and see if Auto-Neg 1022 * has completed, and if so, how the PHY and link partner has 1023 * flow control configured. 1024 */ 1025 if ((hw->phy.media_type == e1000_media_type_internal_serdes) 1026 && mac->autoneg) { 1027 /* Read the PCS_LSTS and check to see if AutoNeg 1028 * has completed. 1029 */ 1030 pcs_status_reg = rd32(E1000_PCS_LSTAT); 1031 1032 if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { 1033 hw_dbg("PCS Auto Neg has not completed.\n"); 1034 return ret_val; 1035 } 1036 1037 /* The AutoNeg process has completed, so we now need to 1038 * read both the Auto Negotiation Advertisement 1039 * Register (PCS_ANADV) and the Auto_Negotiation Base 1040 * Page Ability Register (PCS_LPAB) to determine how 1041 * flow control was negotiated. 1042 */ 1043 pcs_adv_reg = rd32(E1000_PCS_ANADV); 1044 pcs_lp_ability_reg = rd32(E1000_PCS_LPAB); 1045 1046 /* Two bits in the Auto Negotiation Advertisement Register 1047 * (PCS_ANADV) and two bits in the Auto Negotiation Base 1048 * Page Ability Register (PCS_LPAB) determine flow control 1049 * for both the PHY and the link partner. The following 1050 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, 1051 * 1999, describes these PAUSE resolution bits and how flow 1052 * control is determined based upon these settings. 1053 * NOTE: DC = Don't Care 1054 * 1055 * LOCAL DEVICE | LINK PARTNER 1056 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution 1057 *-------|---------|-------|---------|-------------------- 1058 * 0 | 0 | DC | DC | e1000_fc_none 1059 * 0 | 1 | 0 | DC | e1000_fc_none 1060 * 0 | 1 | 1 | 0 | e1000_fc_none 1061 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause 1062 * 1 | 0 | 0 | DC | e1000_fc_none 1063 * 1 | DC | 1 | DC | e1000_fc_full 1064 * 1 | 1 | 0 | 0 | e1000_fc_none 1065 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause 1066 * 1067 * Are both PAUSE bits set to 1? If so, this implies 1068 * Symmetric Flow Control is enabled at both ends. The 1069 * ASM_DIR bits are irrelevant per the spec. 1070 * 1071 * For Symmetric Flow Control: 1072 * 1073 * LOCAL DEVICE | LINK PARTNER 1074 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 1075 *-------|---------|-------|---------|-------------------- 1076 * 1 | DC | 1 | DC | e1000_fc_full 1077 * 1078 */ 1079 if ((pcs_adv_reg & E1000_TXCW_PAUSE) && 1080 (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) { 1081 /* Now we need to check if the user selected Rx ONLY 1082 * of pause frames. In this case, we had to advertise 1083 * FULL flow control because we could not advertise Rx 1084 * ONLY. Hence, we must now check to see if we need to 1085 * turn OFF the TRANSMISSION of PAUSE frames. 1086 */ 1087 if (hw->fc.requested_mode == e1000_fc_full) { 1088 hw->fc.current_mode = e1000_fc_full; 1089 hw_dbg("Flow Control = FULL.\n"); 1090 } else { 1091 hw->fc.current_mode = e1000_fc_rx_pause; 1092 hw_dbg("Flow Control = Rx PAUSE frames only.\n"); 1093 } 1094 } 1095 /* For receiving PAUSE frames ONLY. 1096 * 1097 * LOCAL DEVICE | LINK PARTNER 1098 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 1099 *-------|---------|-------|---------|-------------------- 1100 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause 1101 */ 1102 else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) && 1103 (pcs_adv_reg & E1000_TXCW_ASM_DIR) && 1104 (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && 1105 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { 1106 hw->fc.current_mode = e1000_fc_tx_pause; 1107 hw_dbg("Flow Control = Tx PAUSE frames only.\n"); 1108 } 1109 /* For transmitting PAUSE frames ONLY. 1110 * 1111 * LOCAL DEVICE | LINK PARTNER 1112 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 1113 *-------|---------|-------|---------|-------------------- 1114 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause 1115 */ 1116 else if ((pcs_adv_reg & E1000_TXCW_PAUSE) && 1117 (pcs_adv_reg & E1000_TXCW_ASM_DIR) && 1118 !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && 1119 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { 1120 hw->fc.current_mode = e1000_fc_rx_pause; 1121 hw_dbg("Flow Control = Rx PAUSE frames only.\n"); 1122 } else { 1123 /* Per the IEEE spec, at this point flow control 1124 * should be disabled. 1125 */ 1126 hw->fc.current_mode = e1000_fc_none; 1127 hw_dbg("Flow Control = NONE.\n"); 1128 } 1129 1130 /* Now we call a subroutine to actually force the MAC 1131 * controller to use the correct flow control settings. 1132 */ 1133 pcs_ctrl_reg = rd32(E1000_PCS_LCTL); 1134 pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; 1135 wr32(E1000_PCS_LCTL, pcs_ctrl_reg); 1136 1137 ret_val = igb_force_mac_fc(hw); 1138 if (ret_val) { 1139 hw_dbg("Error forcing flow control settings\n"); 1140 return ret_val; 1141 } 1142 } 1143 1144 out: 1145 return ret_val; 1146 } 1147 1148 /** 1149 * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex 1150 * @hw: pointer to the HW structure 1151 * @speed: stores the current speed 1152 * @duplex: stores the current duplex 1153 * 1154 * Read the status register for the current speed/duplex and store the current 1155 * speed and duplex for copper connections. 1156 **/ 1157 s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, 1158 u16 *duplex) 1159 { 1160 u32 status; 1161 1162 status = rd32(E1000_STATUS); 1163 if (status & E1000_STATUS_SPEED_1000) { 1164 *speed = SPEED_1000; 1165 hw_dbg("1000 Mbs, "); 1166 } else if (status & E1000_STATUS_SPEED_100) { 1167 *speed = SPEED_100; 1168 hw_dbg("100 Mbs, "); 1169 } else { 1170 *speed = SPEED_10; 1171 hw_dbg("10 Mbs, "); 1172 } 1173 1174 if (status & E1000_STATUS_FD) { 1175 *duplex = FULL_DUPLEX; 1176 hw_dbg("Full Duplex\n"); 1177 } else { 1178 *duplex = HALF_DUPLEX; 1179 hw_dbg("Half Duplex\n"); 1180 } 1181 1182 return 0; 1183 } 1184 1185 /** 1186 * igb_get_hw_semaphore - Acquire hardware semaphore 1187 * @hw: pointer to the HW structure 1188 * 1189 * Acquire the HW semaphore to access the PHY or NVM 1190 **/ 1191 s32 igb_get_hw_semaphore(struct e1000_hw *hw) 1192 { 1193 u32 swsm; 1194 s32 ret_val = 0; 1195 s32 timeout = hw->nvm.word_size + 1; 1196 s32 i = 0; 1197 1198 /* Get the SW semaphore */ 1199 while (i < timeout) { 1200 swsm = rd32(E1000_SWSM); 1201 if (!(swsm & E1000_SWSM_SMBI)) 1202 break; 1203 1204 udelay(50); 1205 i++; 1206 } 1207 1208 if (i == timeout) { 1209 hw_dbg("Driver can't access device - SMBI bit is set.\n"); 1210 ret_val = -E1000_ERR_NVM; 1211 goto out; 1212 } 1213 1214 /* Get the FW semaphore. */ 1215 for (i = 0; i < timeout; i++) { 1216 swsm = rd32(E1000_SWSM); 1217 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); 1218 1219 /* Semaphore acquired if bit latched */ 1220 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) 1221 break; 1222 1223 udelay(50); 1224 } 1225 1226 if (i == timeout) { 1227 /* Release semaphores */ 1228 igb_put_hw_semaphore(hw); 1229 hw_dbg("Driver can't access the NVM\n"); 1230 ret_val = -E1000_ERR_NVM; 1231 goto out; 1232 } 1233 1234 out: 1235 return ret_val; 1236 } 1237 1238 /** 1239 * igb_put_hw_semaphore - Release hardware semaphore 1240 * @hw: pointer to the HW structure 1241 * 1242 * Release hardware semaphore used to access the PHY or NVM 1243 **/ 1244 void igb_put_hw_semaphore(struct e1000_hw *hw) 1245 { 1246 u32 swsm; 1247 1248 swsm = rd32(E1000_SWSM); 1249 1250 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); 1251 1252 wr32(E1000_SWSM, swsm); 1253 } 1254 1255 /** 1256 * igb_get_auto_rd_done - Check for auto read completion 1257 * @hw: pointer to the HW structure 1258 * 1259 * Check EEPROM for Auto Read done bit. 1260 **/ 1261 s32 igb_get_auto_rd_done(struct e1000_hw *hw) 1262 { 1263 s32 i = 0; 1264 s32 ret_val = 0; 1265 1266 1267 while (i < AUTO_READ_DONE_TIMEOUT) { 1268 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD) 1269 break; 1270 msleep(1); 1271 i++; 1272 } 1273 1274 if (i == AUTO_READ_DONE_TIMEOUT) { 1275 hw_dbg("Auto read by HW from NVM has not completed.\n"); 1276 ret_val = -E1000_ERR_RESET; 1277 goto out; 1278 } 1279 1280 out: 1281 return ret_val; 1282 } 1283 1284 /** 1285 * igb_valid_led_default - Verify a valid default LED config 1286 * @hw: pointer to the HW structure 1287 * @data: pointer to the NVM (EEPROM) 1288 * 1289 * Read the EEPROM for the current default LED configuration. If the 1290 * LED configuration is not valid, set to a valid LED configuration. 1291 **/ 1292 static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data) 1293 { 1294 s32 ret_val; 1295 1296 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); 1297 if (ret_val) { 1298 hw_dbg("NVM Read Error\n"); 1299 goto out; 1300 } 1301 1302 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { 1303 switch(hw->phy.media_type) { 1304 case e1000_media_type_internal_serdes: 1305 *data = ID_LED_DEFAULT_82575_SERDES; 1306 break; 1307 case e1000_media_type_copper: 1308 default: 1309 *data = ID_LED_DEFAULT; 1310 break; 1311 } 1312 } 1313 out: 1314 return ret_val; 1315 } 1316 1317 /** 1318 * igb_id_led_init - 1319 * @hw: pointer to the HW structure 1320 * 1321 **/ 1322 s32 igb_id_led_init(struct e1000_hw *hw) 1323 { 1324 struct e1000_mac_info *mac = &hw->mac; 1325 s32 ret_val; 1326 const u32 ledctl_mask = 0x000000FF; 1327 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; 1328 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; 1329 u16 data, i, temp; 1330 const u16 led_mask = 0x0F; 1331 1332 /* i210 and i211 devices have different LED mechanism */ 1333 if ((hw->mac.type == e1000_i210) || 1334 (hw->mac.type == e1000_i211)) 1335 ret_val = igb_valid_led_default_i210(hw, &data); 1336 else 1337 ret_val = igb_valid_led_default(hw, &data); 1338 1339 if (ret_val) 1340 goto out; 1341 1342 mac->ledctl_default = rd32(E1000_LEDCTL); 1343 mac->ledctl_mode1 = mac->ledctl_default; 1344 mac->ledctl_mode2 = mac->ledctl_default; 1345 1346 for (i = 0; i < 4; i++) { 1347 temp = (data >> (i << 2)) & led_mask; 1348 switch (temp) { 1349 case ID_LED_ON1_DEF2: 1350 case ID_LED_ON1_ON2: 1351 case ID_LED_ON1_OFF2: 1352 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); 1353 mac->ledctl_mode1 |= ledctl_on << (i << 3); 1354 break; 1355 case ID_LED_OFF1_DEF2: 1356 case ID_LED_OFF1_ON2: 1357 case ID_LED_OFF1_OFF2: 1358 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); 1359 mac->ledctl_mode1 |= ledctl_off << (i << 3); 1360 break; 1361 default: 1362 /* Do nothing */ 1363 break; 1364 } 1365 switch (temp) { 1366 case ID_LED_DEF1_ON2: 1367 case ID_LED_ON1_ON2: 1368 case ID_LED_OFF1_ON2: 1369 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); 1370 mac->ledctl_mode2 |= ledctl_on << (i << 3); 1371 break; 1372 case ID_LED_DEF1_OFF2: 1373 case ID_LED_ON1_OFF2: 1374 case ID_LED_OFF1_OFF2: 1375 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); 1376 mac->ledctl_mode2 |= ledctl_off << (i << 3); 1377 break; 1378 default: 1379 /* Do nothing */ 1380 break; 1381 } 1382 } 1383 1384 out: 1385 return ret_val; 1386 } 1387 1388 /** 1389 * igb_cleanup_led - Set LED config to default operation 1390 * @hw: pointer to the HW structure 1391 * 1392 * Remove the current LED configuration and set the LED configuration 1393 * to the default value, saved from the EEPROM. 1394 **/ 1395 s32 igb_cleanup_led(struct e1000_hw *hw) 1396 { 1397 wr32(E1000_LEDCTL, hw->mac.ledctl_default); 1398 return 0; 1399 } 1400 1401 /** 1402 * igb_blink_led - Blink LED 1403 * @hw: pointer to the HW structure 1404 * 1405 * Blink the led's which are set to be on. 1406 **/ 1407 s32 igb_blink_led(struct e1000_hw *hw) 1408 { 1409 u32 ledctl_blink = 0; 1410 u32 i; 1411 1412 if (hw->phy.media_type == e1000_media_type_fiber) { 1413 /* always blink LED0 for PCI-E fiber */ 1414 ledctl_blink = E1000_LEDCTL_LED0_BLINK | 1415 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); 1416 } else { 1417 /* Set the blink bit for each LED that's "on" (0x0E) 1418 * (or "off" if inverted) in ledctl_mode2. The blink 1419 * logic in hardware only works when mode is set to "on" 1420 * so it must be changed accordingly when the mode is 1421 * "off" and inverted. 1422 */ 1423 ledctl_blink = hw->mac.ledctl_mode2; 1424 for (i = 0; i < 32; i += 8) { 1425 u32 mode = (hw->mac.ledctl_mode2 >> i) & 1426 E1000_LEDCTL_LED0_MODE_MASK; 1427 u32 led_default = hw->mac.ledctl_default >> i; 1428 1429 if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && 1430 (mode == E1000_LEDCTL_MODE_LED_ON)) || 1431 ((led_default & E1000_LEDCTL_LED0_IVRT) && 1432 (mode == E1000_LEDCTL_MODE_LED_OFF))) { 1433 ledctl_blink &= 1434 ~(E1000_LEDCTL_LED0_MODE_MASK << i); 1435 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | 1436 E1000_LEDCTL_MODE_LED_ON) << i; 1437 } 1438 } 1439 } 1440 1441 wr32(E1000_LEDCTL, ledctl_blink); 1442 1443 return 0; 1444 } 1445 1446 /** 1447 * igb_led_off - Turn LED off 1448 * @hw: pointer to the HW structure 1449 * 1450 * Turn LED off. 1451 **/ 1452 s32 igb_led_off(struct e1000_hw *hw) 1453 { 1454 switch (hw->phy.media_type) { 1455 case e1000_media_type_copper: 1456 wr32(E1000_LEDCTL, hw->mac.ledctl_mode1); 1457 break; 1458 default: 1459 break; 1460 } 1461 1462 return 0; 1463 } 1464 1465 /** 1466 * igb_disable_pcie_master - Disables PCI-express master access 1467 * @hw: pointer to the HW structure 1468 * 1469 * Returns 0 (0) if successful, else returns -10 1470 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused 1471 * the master requests to be disabled. 1472 * 1473 * Disables PCI-Express master access and verifies there are no pending 1474 * requests. 1475 **/ 1476 s32 igb_disable_pcie_master(struct e1000_hw *hw) 1477 { 1478 u32 ctrl; 1479 s32 timeout = MASTER_DISABLE_TIMEOUT; 1480 s32 ret_val = 0; 1481 1482 if (hw->bus.type != e1000_bus_type_pci_express) 1483 goto out; 1484 1485 ctrl = rd32(E1000_CTRL); 1486 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; 1487 wr32(E1000_CTRL, ctrl); 1488 1489 while (timeout) { 1490 if (!(rd32(E1000_STATUS) & 1491 E1000_STATUS_GIO_MASTER_ENABLE)) 1492 break; 1493 udelay(100); 1494 timeout--; 1495 } 1496 1497 if (!timeout) { 1498 hw_dbg("Master requests are pending.\n"); 1499 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING; 1500 goto out; 1501 } 1502 1503 out: 1504 return ret_val; 1505 } 1506 1507 /** 1508 * igb_validate_mdi_setting - Verify MDI/MDIx settings 1509 * @hw: pointer to the HW structure 1510 * 1511 * Verify that when not using auto-negotitation that MDI/MDIx is correctly 1512 * set, which is forced to MDI mode only. 1513 **/ 1514 s32 igb_validate_mdi_setting(struct e1000_hw *hw) 1515 { 1516 s32 ret_val = 0; 1517 1518 /* All MDI settings are supported on 82580 and newer. */ 1519 if (hw->mac.type >= e1000_82580) 1520 goto out; 1521 1522 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { 1523 hw_dbg("Invalid MDI setting detected\n"); 1524 hw->phy.mdix = 1; 1525 ret_val = -E1000_ERR_CONFIG; 1526 goto out; 1527 } 1528 1529 out: 1530 return ret_val; 1531 } 1532 1533 /** 1534 * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register 1535 * @hw: pointer to the HW structure 1536 * @reg: 32bit register offset such as E1000_SCTL 1537 * @offset: register offset to write to 1538 * @data: data to write at register offset 1539 * 1540 * Writes an address/data control type register. There are several of these 1541 * and they all have the format address << 8 | data and bit 31 is polled for 1542 * completion. 1543 **/ 1544 s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, 1545 u32 offset, u8 data) 1546 { 1547 u32 i, regvalue = 0; 1548 s32 ret_val = 0; 1549 1550 /* Set up the address and data */ 1551 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT); 1552 wr32(reg, regvalue); 1553 1554 /* Poll the ready bit to see if the MDI read completed */ 1555 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { 1556 udelay(5); 1557 regvalue = rd32(reg); 1558 if (regvalue & E1000_GEN_CTL_READY) 1559 break; 1560 } 1561 if (!(regvalue & E1000_GEN_CTL_READY)) { 1562 hw_dbg("Reg %08x did not indicate ready\n", reg); 1563 ret_val = -E1000_ERR_PHY; 1564 goto out; 1565 } 1566 1567 out: 1568 return ret_val; 1569 } 1570 1571 /** 1572 * igb_enable_mng_pass_thru - Enable processing of ARP's 1573 * @hw: pointer to the HW structure 1574 * 1575 * Verifies the hardware needs to leave interface enabled so that frames can 1576 * be directed to and from the management interface. 1577 **/ 1578 bool igb_enable_mng_pass_thru(struct e1000_hw *hw) 1579 { 1580 u32 manc; 1581 u32 fwsm, factps; 1582 bool ret_val = false; 1583 1584 if (!hw->mac.asf_firmware_present) 1585 goto out; 1586 1587 manc = rd32(E1000_MANC); 1588 1589 if (!(manc & E1000_MANC_RCV_TCO_EN)) 1590 goto out; 1591 1592 if (hw->mac.arc_subsystem_valid) { 1593 fwsm = rd32(E1000_FWSM); 1594 factps = rd32(E1000_FACTPS); 1595 1596 if (!(factps & E1000_FACTPS_MNGCG) && 1597 ((fwsm & E1000_FWSM_MODE_MASK) == 1598 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { 1599 ret_val = true; 1600 goto out; 1601 } 1602 } else { 1603 if ((manc & E1000_MANC_SMBUS_EN) && 1604 !(manc & E1000_MANC_ASF_EN)) { 1605 ret_val = true; 1606 goto out; 1607 } 1608 } 1609 1610 out: 1611 return ret_val; 1612 } 1613