1 // SPDX-License-Identifier: GPL-2.0 2 /* Intel(R) Gigabit Ethernet Linux driver 3 * Copyright(c) 2007-2014 Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, see <http://www.gnu.org/licenses/>. 16 * 17 * The full GNU General Public License is included in this distribution in 18 * the file called "COPYING". 19 * 20 * Contact Information: 21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 23 */ 24 25 #include <linux/if_ether.h> 26 #include <linux/delay.h> 27 #include <linux/pci.h> 28 #include <linux/netdevice.h> 29 #include <linux/etherdevice.h> 30 31 #include "e1000_mac.h" 32 33 #include "igb.h" 34 35 static s32 igb_set_default_fc(struct e1000_hw *hw); 36 static s32 igb_set_fc_watermarks(struct e1000_hw *hw); 37 38 /** 39 * igb_get_bus_info_pcie - Get PCIe bus information 40 * @hw: pointer to the HW structure 41 * 42 * Determines and stores the system bus information for a particular 43 * network interface. The following bus information is determined and stored: 44 * bus speed, bus width, type (PCIe), and PCIe function. 45 **/ 46 s32 igb_get_bus_info_pcie(struct e1000_hw *hw) 47 { 48 struct e1000_bus_info *bus = &hw->bus; 49 s32 ret_val; 50 u32 reg; 51 u16 pcie_link_status; 52 53 bus->type = e1000_bus_type_pci_express; 54 55 ret_val = igb_read_pcie_cap_reg(hw, 56 PCI_EXP_LNKSTA, 57 &pcie_link_status); 58 if (ret_val) { 59 bus->width = e1000_bus_width_unknown; 60 bus->speed = e1000_bus_speed_unknown; 61 } else { 62 switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) { 63 case PCI_EXP_LNKSTA_CLS_2_5GB: 64 bus->speed = e1000_bus_speed_2500; 65 break; 66 case PCI_EXP_LNKSTA_CLS_5_0GB: 67 bus->speed = e1000_bus_speed_5000; 68 break; 69 default: 70 bus->speed = e1000_bus_speed_unknown; 71 break; 72 } 73 74 bus->width = (enum e1000_bus_width)((pcie_link_status & 75 PCI_EXP_LNKSTA_NLW) >> 76 PCI_EXP_LNKSTA_NLW_SHIFT); 77 } 78 79 reg = rd32(E1000_STATUS); 80 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; 81 82 return 0; 83 } 84 85 /** 86 * igb_clear_vfta - Clear VLAN filter table 87 * @hw: pointer to the HW structure 88 * 89 * Clears the register array which contains the VLAN filter table by 90 * setting all the values to 0. 91 **/ 92 void igb_clear_vfta(struct e1000_hw *hw) 93 { 94 u32 offset; 95 96 for (offset = E1000_VLAN_FILTER_TBL_SIZE; offset--;) 97 hw->mac.ops.write_vfta(hw, offset, 0); 98 } 99 100 /** 101 * igb_write_vfta - Write value to VLAN filter table 102 * @hw: pointer to the HW structure 103 * @offset: register offset in VLAN filter table 104 * @value: register value written to VLAN filter table 105 * 106 * Writes value at the given offset in the register array which stores 107 * the VLAN filter table. 108 **/ 109 void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) 110 { 111 struct igb_adapter *adapter = hw->back; 112 113 array_wr32(E1000_VFTA, offset, value); 114 wrfl(); 115 116 adapter->shadow_vfta[offset] = value; 117 } 118 119 /** 120 * igb_init_rx_addrs - Initialize receive address's 121 * @hw: pointer to the HW structure 122 * @rar_count: receive address registers 123 * 124 * Setups the receive address registers by setting the base receive address 125 * register to the devices MAC address and clearing all the other receive 126 * address registers to 0. 127 **/ 128 void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) 129 { 130 u32 i; 131 u8 mac_addr[ETH_ALEN] = {0}; 132 133 /* Setup the receive address */ 134 hw_dbg("Programming MAC Address into RAR[0]\n"); 135 136 hw->mac.ops.rar_set(hw, hw->mac.addr, 0); 137 138 /* Zero out the other (rar_entry_count - 1) receive addresses */ 139 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1); 140 for (i = 1; i < rar_count; i++) 141 hw->mac.ops.rar_set(hw, mac_addr, i); 142 } 143 144 /** 145 * igb_find_vlvf_slot - find the VLAN id or the first empty slot 146 * @hw: pointer to hardware structure 147 * @vlan: VLAN id to write to VLAN filter 148 * @vlvf_bypass: skip VLVF if no match is found 149 * 150 * return the VLVF index where this VLAN id should be placed 151 * 152 **/ 153 static s32 igb_find_vlvf_slot(struct e1000_hw *hw, u32 vlan, bool vlvf_bypass) 154 { 155 s32 regindex, first_empty_slot; 156 u32 bits; 157 158 /* short cut the special case */ 159 if (vlan == 0) 160 return 0; 161 162 /* if vlvf_bypass is set we don't want to use an empty slot, we 163 * will simply bypass the VLVF if there are no entries present in the 164 * VLVF that contain our VLAN 165 */ 166 first_empty_slot = vlvf_bypass ? -E1000_ERR_NO_SPACE : 0; 167 168 /* Search for the VLAN id in the VLVF entries. Save off the first empty 169 * slot found along the way. 170 * 171 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 172 */ 173 for (regindex = E1000_VLVF_ARRAY_SIZE; --regindex > 0;) { 174 bits = rd32(E1000_VLVF(regindex)) & E1000_VLVF_VLANID_MASK; 175 if (bits == vlan) 176 return regindex; 177 if (!first_empty_slot && !bits) 178 first_empty_slot = regindex; 179 } 180 181 return first_empty_slot ? : -E1000_ERR_NO_SPACE; 182 } 183 184 /** 185 * igb_vfta_set - enable or disable vlan in VLAN filter table 186 * @hw: pointer to the HW structure 187 * @vlan: VLAN id to add or remove 188 * @vind: VMDq output index that maps queue to VLAN id 189 * @vlan_on: if true add filter, if false remove 190 * 191 * Sets or clears a bit in the VLAN filter table array based on VLAN id 192 * and if we are adding or removing the filter 193 **/ 194 s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind, 195 bool vlan_on, bool vlvf_bypass) 196 { 197 struct igb_adapter *adapter = hw->back; 198 u32 regidx, vfta_delta, vfta, bits; 199 s32 vlvf_index; 200 201 if ((vlan > 4095) || (vind > 7)) 202 return -E1000_ERR_PARAM; 203 204 /* this is a 2 part operation - first the VFTA, then the 205 * VLVF and VLVFB if VT Mode is set 206 * We don't write the VFTA until we know the VLVF part succeeded. 207 */ 208 209 /* Part 1 210 * The VFTA is a bitstring made up of 128 32-bit registers 211 * that enable the particular VLAN id, much like the MTA: 212 * bits[11-5]: which register 213 * bits[4-0]: which bit in the register 214 */ 215 regidx = vlan / 32; 216 vfta_delta = BIT(vlan % 32); 217 vfta = adapter->shadow_vfta[regidx]; 218 219 /* vfta_delta represents the difference between the current value 220 * of vfta and the value we want in the register. Since the diff 221 * is an XOR mask we can just update vfta using an XOR. 222 */ 223 vfta_delta &= vlan_on ? ~vfta : vfta; 224 vfta ^= vfta_delta; 225 226 /* Part 2 227 * If VT Mode is set 228 * Either vlan_on 229 * make sure the VLAN is in VLVF 230 * set the vind bit in the matching VLVFB 231 * Or !vlan_on 232 * clear the pool bit and possibly the vind 233 */ 234 if (!adapter->vfs_allocated_count) 235 goto vfta_update; 236 237 vlvf_index = igb_find_vlvf_slot(hw, vlan, vlvf_bypass); 238 if (vlvf_index < 0) { 239 if (vlvf_bypass) 240 goto vfta_update; 241 return vlvf_index; 242 } 243 244 bits = rd32(E1000_VLVF(vlvf_index)); 245 246 /* set the pool bit */ 247 bits |= BIT(E1000_VLVF_POOLSEL_SHIFT + vind); 248 if (vlan_on) 249 goto vlvf_update; 250 251 /* clear the pool bit */ 252 bits ^= BIT(E1000_VLVF_POOLSEL_SHIFT + vind); 253 254 if (!(bits & E1000_VLVF_POOLSEL_MASK)) { 255 /* Clear VFTA first, then disable VLVF. Otherwise 256 * we run the risk of stray packets leaking into 257 * the PF via the default pool 258 */ 259 if (vfta_delta) 260 hw->mac.ops.write_vfta(hw, regidx, vfta); 261 262 /* disable VLVF and clear remaining bit from pool */ 263 wr32(E1000_VLVF(vlvf_index), 0); 264 265 return 0; 266 } 267 268 /* If there are still bits set in the VLVFB registers 269 * for the VLAN ID indicated we need to see if the 270 * caller is requesting that we clear the VFTA entry bit. 271 * If the caller has requested that we clear the VFTA 272 * entry bit but there are still pools/VFs using this VLAN 273 * ID entry then ignore the request. We're not worried 274 * about the case where we're turning the VFTA VLAN ID 275 * entry bit on, only when requested to turn it off as 276 * there may be multiple pools and/or VFs using the 277 * VLAN ID entry. In that case we cannot clear the 278 * VFTA bit until all pools/VFs using that VLAN ID have also 279 * been cleared. This will be indicated by "bits" being 280 * zero. 281 */ 282 vfta_delta = 0; 283 284 vlvf_update: 285 /* record pool change and enable VLAN ID if not already enabled */ 286 wr32(E1000_VLVF(vlvf_index), bits | vlan | E1000_VLVF_VLANID_ENABLE); 287 288 vfta_update: 289 /* bit was set/cleared before we started */ 290 if (vfta_delta) 291 hw->mac.ops.write_vfta(hw, regidx, vfta); 292 293 return 0; 294 } 295 296 /** 297 * igb_check_alt_mac_addr - Check for alternate MAC addr 298 * @hw: pointer to the HW structure 299 * 300 * Checks the nvm for an alternate MAC address. An alternate MAC address 301 * can be setup by pre-boot software and must be treated like a permanent 302 * address and must override the actual permanent MAC address. If an 303 * alternate MAC address is found it is saved in the hw struct and 304 * programmed into RAR0 and the function returns success, otherwise the 305 * function returns an error. 306 **/ 307 s32 igb_check_alt_mac_addr(struct e1000_hw *hw) 308 { 309 u32 i; 310 s32 ret_val = 0; 311 u16 offset, nvm_alt_mac_addr_offset, nvm_data; 312 u8 alt_mac_addr[ETH_ALEN]; 313 314 /* Alternate MAC address is handled by the option ROM for 82580 315 * and newer. SW support not required. 316 */ 317 if (hw->mac.type >= e1000_82580) 318 goto out; 319 320 ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1, 321 &nvm_alt_mac_addr_offset); 322 if (ret_val) { 323 hw_dbg("NVM Read Error\n"); 324 goto out; 325 } 326 327 if ((nvm_alt_mac_addr_offset == 0xFFFF) || 328 (nvm_alt_mac_addr_offset == 0x0000)) 329 /* There is no Alternate MAC Address */ 330 goto out; 331 332 if (hw->bus.func == E1000_FUNC_1) 333 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; 334 if (hw->bus.func == E1000_FUNC_2) 335 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2; 336 337 if (hw->bus.func == E1000_FUNC_3) 338 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3; 339 for (i = 0; i < ETH_ALEN; i += 2) { 340 offset = nvm_alt_mac_addr_offset + (i >> 1); 341 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); 342 if (ret_val) { 343 hw_dbg("NVM Read Error\n"); 344 goto out; 345 } 346 347 alt_mac_addr[i] = (u8)(nvm_data & 0xFF); 348 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); 349 } 350 351 /* if multicast bit is set, the alternate address will not be used */ 352 if (is_multicast_ether_addr(alt_mac_addr)) { 353 hw_dbg("Ignoring Alternate Mac Address with MC bit set\n"); 354 goto out; 355 } 356 357 /* We have a valid alternate MAC address, and we want to treat it the 358 * same as the normal permanent MAC address stored by the HW into the 359 * RAR. Do this by mapping this address into RAR0. 360 */ 361 hw->mac.ops.rar_set(hw, alt_mac_addr, 0); 362 363 out: 364 return ret_val; 365 } 366 367 /** 368 * igb_rar_set - Set receive address register 369 * @hw: pointer to the HW structure 370 * @addr: pointer to the receive address 371 * @index: receive address array register 372 * 373 * Sets the receive address array register at index to the address passed 374 * in by addr. 375 **/ 376 void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) 377 { 378 u32 rar_low, rar_high; 379 380 /* HW expects these in little endian so we reverse the byte order 381 * from network order (big endian) to little endian 382 */ 383 rar_low = ((u32) addr[0] | 384 ((u32) addr[1] << 8) | 385 ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); 386 387 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); 388 389 /* If MAC address zero, no need to set the AV bit */ 390 if (rar_low || rar_high) 391 rar_high |= E1000_RAH_AV; 392 393 /* Some bridges will combine consecutive 32-bit writes into 394 * a single burst write, which will malfunction on some parts. 395 * The flushes avoid this. 396 */ 397 wr32(E1000_RAL(index), rar_low); 398 wrfl(); 399 wr32(E1000_RAH(index), rar_high); 400 wrfl(); 401 } 402 403 /** 404 * igb_mta_set - Set multicast filter table address 405 * @hw: pointer to the HW structure 406 * @hash_value: determines the MTA register and bit to set 407 * 408 * The multicast table address is a register array of 32-bit registers. 409 * The hash_value is used to determine what register the bit is in, the 410 * current value is read, the new bit is OR'd in and the new value is 411 * written back into the register. 412 **/ 413 void igb_mta_set(struct e1000_hw *hw, u32 hash_value) 414 { 415 u32 hash_bit, hash_reg, mta; 416 417 /* The MTA is a register array of 32-bit registers. It is 418 * treated like an array of (32*mta_reg_count) bits. We want to 419 * set bit BitArray[hash_value]. So we figure out what register 420 * the bit is in, read it, OR in the new bit, then write 421 * back the new value. The (hw->mac.mta_reg_count - 1) serves as a 422 * mask to bits 31:5 of the hash value which gives us the 423 * register we're modifying. The hash bit within that register 424 * is determined by the lower 5 bits of the hash value. 425 */ 426 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); 427 hash_bit = hash_value & 0x1F; 428 429 mta = array_rd32(E1000_MTA, hash_reg); 430 431 mta |= BIT(hash_bit); 432 433 array_wr32(E1000_MTA, hash_reg, mta); 434 wrfl(); 435 } 436 437 /** 438 * igb_hash_mc_addr - Generate a multicast hash value 439 * @hw: pointer to the HW structure 440 * @mc_addr: pointer to a multicast address 441 * 442 * Generates a multicast address hash value which is used to determine 443 * the multicast filter table array address and new table value. See 444 * igb_mta_set() 445 **/ 446 static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) 447 { 448 u32 hash_value, hash_mask; 449 u8 bit_shift = 0; 450 451 /* Register count multiplied by bits per register */ 452 hash_mask = (hw->mac.mta_reg_count * 32) - 1; 453 454 /* For a mc_filter_type of 0, bit_shift is the number of left-shifts 455 * where 0xFF would still fall within the hash mask. 456 */ 457 while (hash_mask >> bit_shift != 0xFF) 458 bit_shift++; 459 460 /* The portion of the address that is used for the hash table 461 * is determined by the mc_filter_type setting. 462 * The algorithm is such that there is a total of 8 bits of shifting. 463 * The bit_shift for a mc_filter_type of 0 represents the number of 464 * left-shifts where the MSB of mc_addr[5] would still fall within 465 * the hash_mask. Case 0 does this exactly. Since there are a total 466 * of 8 bits of shifting, then mc_addr[4] will shift right the 467 * remaining number of bits. Thus 8 - bit_shift. The rest of the 468 * cases are a variation of this algorithm...essentially raising the 469 * number of bits to shift mc_addr[5] left, while still keeping the 470 * 8-bit shifting total. 471 * 472 * For example, given the following Destination MAC Address and an 473 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), 474 * we can see that the bit_shift for case 0 is 4. These are the hash 475 * values resulting from each mc_filter_type... 476 * [0] [1] [2] [3] [4] [5] 477 * 01 AA 00 12 34 56 478 * LSB MSB 479 * 480 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 481 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 482 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 483 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 484 */ 485 switch (hw->mac.mc_filter_type) { 486 default: 487 case 0: 488 break; 489 case 1: 490 bit_shift += 1; 491 break; 492 case 2: 493 bit_shift += 2; 494 break; 495 case 3: 496 bit_shift += 4; 497 break; 498 } 499 500 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | 501 (((u16) mc_addr[5]) << bit_shift))); 502 503 return hash_value; 504 } 505 506 /** 507 * igb_update_mc_addr_list - Update Multicast addresses 508 * @hw: pointer to the HW structure 509 * @mc_addr_list: array of multicast addresses to program 510 * @mc_addr_count: number of multicast addresses to program 511 * 512 * Updates entire Multicast Table Array. 513 * The caller must have a packed mc_addr_list of multicast addresses. 514 **/ 515 void igb_update_mc_addr_list(struct e1000_hw *hw, 516 u8 *mc_addr_list, u32 mc_addr_count) 517 { 518 u32 hash_value, hash_bit, hash_reg; 519 int i; 520 521 /* clear mta_shadow */ 522 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); 523 524 /* update mta_shadow from mc_addr_list */ 525 for (i = 0; (u32) i < mc_addr_count; i++) { 526 hash_value = igb_hash_mc_addr(hw, mc_addr_list); 527 528 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); 529 hash_bit = hash_value & 0x1F; 530 531 hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit); 532 mc_addr_list += (ETH_ALEN); 533 } 534 535 /* replace the entire MTA table */ 536 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) 537 array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); 538 wrfl(); 539 } 540 541 /** 542 * igb_clear_hw_cntrs_base - Clear base hardware counters 543 * @hw: pointer to the HW structure 544 * 545 * Clears the base hardware counters by reading the counter registers. 546 **/ 547 void igb_clear_hw_cntrs_base(struct e1000_hw *hw) 548 { 549 rd32(E1000_CRCERRS); 550 rd32(E1000_SYMERRS); 551 rd32(E1000_MPC); 552 rd32(E1000_SCC); 553 rd32(E1000_ECOL); 554 rd32(E1000_MCC); 555 rd32(E1000_LATECOL); 556 rd32(E1000_COLC); 557 rd32(E1000_DC); 558 rd32(E1000_SEC); 559 rd32(E1000_RLEC); 560 rd32(E1000_XONRXC); 561 rd32(E1000_XONTXC); 562 rd32(E1000_XOFFRXC); 563 rd32(E1000_XOFFTXC); 564 rd32(E1000_FCRUC); 565 rd32(E1000_GPRC); 566 rd32(E1000_BPRC); 567 rd32(E1000_MPRC); 568 rd32(E1000_GPTC); 569 rd32(E1000_GORCL); 570 rd32(E1000_GORCH); 571 rd32(E1000_GOTCL); 572 rd32(E1000_GOTCH); 573 rd32(E1000_RNBC); 574 rd32(E1000_RUC); 575 rd32(E1000_RFC); 576 rd32(E1000_ROC); 577 rd32(E1000_RJC); 578 rd32(E1000_TORL); 579 rd32(E1000_TORH); 580 rd32(E1000_TOTL); 581 rd32(E1000_TOTH); 582 rd32(E1000_TPR); 583 rd32(E1000_TPT); 584 rd32(E1000_MPTC); 585 rd32(E1000_BPTC); 586 } 587 588 /** 589 * igb_check_for_copper_link - Check for link (Copper) 590 * @hw: pointer to the HW structure 591 * 592 * Checks to see of the link status of the hardware has changed. If a 593 * change in link status has been detected, then we read the PHY registers 594 * to get the current speed/duplex if link exists. 595 **/ 596 s32 igb_check_for_copper_link(struct e1000_hw *hw) 597 { 598 struct e1000_mac_info *mac = &hw->mac; 599 s32 ret_val; 600 bool link; 601 602 /* We only want to go out to the PHY registers to see if Auto-Neg 603 * has completed and/or if our link status has changed. The 604 * get_link_status flag is set upon receiving a Link Status 605 * Change or Rx Sequence Error interrupt. 606 */ 607 if (!mac->get_link_status) { 608 ret_val = 0; 609 goto out; 610 } 611 612 /* First we want to see if the MII Status Register reports 613 * link. If so, then we want to get the current speed/duplex 614 * of the PHY. 615 */ 616 ret_val = igb_phy_has_link(hw, 1, 0, &link); 617 if (ret_val) 618 goto out; 619 620 if (!link) 621 goto out; /* No link detected */ 622 623 mac->get_link_status = false; 624 625 /* Check if there was DownShift, must be checked 626 * immediately after link-up 627 */ 628 igb_check_downshift(hw); 629 630 /* If we are forcing speed/duplex, then we simply return since 631 * we have already determined whether we have link or not. 632 */ 633 if (!mac->autoneg) { 634 ret_val = -E1000_ERR_CONFIG; 635 goto out; 636 } 637 638 /* Auto-Neg is enabled. Auto Speed Detection takes care 639 * of MAC speed/duplex configuration. So we only need to 640 * configure Collision Distance in the MAC. 641 */ 642 igb_config_collision_dist(hw); 643 644 /* Configure Flow Control now that Auto-Neg has completed. 645 * First, we need to restore the desired flow control 646 * settings because we may have had to re-autoneg with a 647 * different link partner. 648 */ 649 ret_val = igb_config_fc_after_link_up(hw); 650 if (ret_val) 651 hw_dbg("Error configuring flow control\n"); 652 653 out: 654 return ret_val; 655 } 656 657 /** 658 * igb_setup_link - Setup flow control and link settings 659 * @hw: pointer to the HW structure 660 * 661 * Determines which flow control settings to use, then configures flow 662 * control. Calls the appropriate media-specific link configuration 663 * function. Assuming the adapter has a valid link partner, a valid link 664 * should be established. Assumes the hardware has previously been reset 665 * and the transmitter and receiver are not enabled. 666 **/ 667 s32 igb_setup_link(struct e1000_hw *hw) 668 { 669 s32 ret_val = 0; 670 671 /* In the case of the phy reset being blocked, we already have a link. 672 * We do not need to set it up again. 673 */ 674 if (igb_check_reset_block(hw)) 675 goto out; 676 677 /* If requested flow control is set to default, set flow control 678 * based on the EEPROM flow control settings. 679 */ 680 if (hw->fc.requested_mode == e1000_fc_default) { 681 ret_val = igb_set_default_fc(hw); 682 if (ret_val) 683 goto out; 684 } 685 686 /* We want to save off the original Flow Control configuration just 687 * in case we get disconnected and then reconnected into a different 688 * hub or switch with different Flow Control capabilities. 689 */ 690 hw->fc.current_mode = hw->fc.requested_mode; 691 692 hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); 693 694 /* Call the necessary media_type subroutine to configure the link. */ 695 ret_val = hw->mac.ops.setup_physical_interface(hw); 696 if (ret_val) 697 goto out; 698 699 /* Initialize the flow control address, type, and PAUSE timer 700 * registers to their default values. This is done even if flow 701 * control is disabled, because it does not hurt anything to 702 * initialize these registers. 703 */ 704 hw_dbg("Initializing the Flow Control address, type and timer regs\n"); 705 wr32(E1000_FCT, FLOW_CONTROL_TYPE); 706 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); 707 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); 708 709 wr32(E1000_FCTTV, hw->fc.pause_time); 710 711 ret_val = igb_set_fc_watermarks(hw); 712 713 out: 714 715 return ret_val; 716 } 717 718 /** 719 * igb_config_collision_dist - Configure collision distance 720 * @hw: pointer to the HW structure 721 * 722 * Configures the collision distance to the default value and is used 723 * during link setup. Currently no func pointer exists and all 724 * implementations are handled in the generic version of this function. 725 **/ 726 void igb_config_collision_dist(struct e1000_hw *hw) 727 { 728 u32 tctl; 729 730 tctl = rd32(E1000_TCTL); 731 732 tctl &= ~E1000_TCTL_COLD; 733 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; 734 735 wr32(E1000_TCTL, tctl); 736 wrfl(); 737 } 738 739 /** 740 * igb_set_fc_watermarks - Set flow control high/low watermarks 741 * @hw: pointer to the HW structure 742 * 743 * Sets the flow control high/low threshold (watermark) registers. If 744 * flow control XON frame transmission is enabled, then set XON frame 745 * tansmission as well. 746 **/ 747 static s32 igb_set_fc_watermarks(struct e1000_hw *hw) 748 { 749 s32 ret_val = 0; 750 u32 fcrtl = 0, fcrth = 0; 751 752 /* Set the flow control receive threshold registers. Normally, 753 * these registers will be set to a default threshold that may be 754 * adjusted later by the driver's runtime code. However, if the 755 * ability to transmit pause frames is not enabled, then these 756 * registers will be set to 0. 757 */ 758 if (hw->fc.current_mode & e1000_fc_tx_pause) { 759 /* We need to set up the Receive Threshold high and low water 760 * marks as well as (optionally) enabling the transmission of 761 * XON frames. 762 */ 763 fcrtl = hw->fc.low_water; 764 if (hw->fc.send_xon) 765 fcrtl |= E1000_FCRTL_XONE; 766 767 fcrth = hw->fc.high_water; 768 } 769 wr32(E1000_FCRTL, fcrtl); 770 wr32(E1000_FCRTH, fcrth); 771 772 return ret_val; 773 } 774 775 /** 776 * igb_set_default_fc - Set flow control default values 777 * @hw: pointer to the HW structure 778 * 779 * Read the EEPROM for the default values for flow control and store the 780 * values. 781 **/ 782 static s32 igb_set_default_fc(struct e1000_hw *hw) 783 { 784 s32 ret_val = 0; 785 u16 lan_offset; 786 u16 nvm_data; 787 788 /* Read and store word 0x0F of the EEPROM. This word contains bits 789 * that determine the hardware's default PAUSE (flow control) mode, 790 * a bit that determines whether the HW defaults to enabling or 791 * disabling auto-negotiation, and the direction of the 792 * SW defined pins. If there is no SW over-ride of the flow 793 * control setting, then the variable hw->fc will 794 * be initialized based on a value in the EEPROM. 795 */ 796 if (hw->mac.type == e1000_i350) 797 lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); 798 else 799 lan_offset = 0; 800 801 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG + lan_offset, 802 1, &nvm_data); 803 if (ret_val) { 804 hw_dbg("NVM Read Error\n"); 805 goto out; 806 } 807 808 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) 809 hw->fc.requested_mode = e1000_fc_none; 810 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR) 811 hw->fc.requested_mode = e1000_fc_tx_pause; 812 else 813 hw->fc.requested_mode = e1000_fc_full; 814 815 out: 816 return ret_val; 817 } 818 819 /** 820 * igb_force_mac_fc - Force the MAC's flow control settings 821 * @hw: pointer to the HW structure 822 * 823 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the 824 * device control register to reflect the adapter settings. TFCE and RFCE 825 * need to be explicitly set by software when a copper PHY is used because 826 * autonegotiation is managed by the PHY rather than the MAC. Software must 827 * also configure these bits when link is forced on a fiber connection. 828 **/ 829 s32 igb_force_mac_fc(struct e1000_hw *hw) 830 { 831 u32 ctrl; 832 s32 ret_val = 0; 833 834 ctrl = rd32(E1000_CTRL); 835 836 /* Because we didn't get link via the internal auto-negotiation 837 * mechanism (we either forced link or we got link via PHY 838 * auto-neg), we have to manually enable/disable transmit an 839 * receive flow control. 840 * 841 * The "Case" statement below enables/disable flow control 842 * according to the "hw->fc.current_mode" parameter. 843 * 844 * The possible values of the "fc" parameter are: 845 * 0: Flow control is completely disabled 846 * 1: Rx flow control is enabled (we can receive pause 847 * frames but not send pause frames). 848 * 2: Tx flow control is enabled (we can send pause frames 849 * frames but we do not receive pause frames). 850 * 3: Both Rx and TX flow control (symmetric) is enabled. 851 * other: No other values should be possible at this point. 852 */ 853 hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); 854 855 switch (hw->fc.current_mode) { 856 case e1000_fc_none: 857 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); 858 break; 859 case e1000_fc_rx_pause: 860 ctrl &= (~E1000_CTRL_TFCE); 861 ctrl |= E1000_CTRL_RFCE; 862 break; 863 case e1000_fc_tx_pause: 864 ctrl &= (~E1000_CTRL_RFCE); 865 ctrl |= E1000_CTRL_TFCE; 866 break; 867 case e1000_fc_full: 868 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); 869 break; 870 default: 871 hw_dbg("Flow control param set incorrectly\n"); 872 ret_val = -E1000_ERR_CONFIG; 873 goto out; 874 } 875 876 wr32(E1000_CTRL, ctrl); 877 878 out: 879 return ret_val; 880 } 881 882 /** 883 * igb_config_fc_after_link_up - Configures flow control after link 884 * @hw: pointer to the HW structure 885 * 886 * Checks the status of auto-negotiation after link up to ensure that the 887 * speed and duplex were not forced. If the link needed to be forced, then 888 * flow control needs to be forced also. If auto-negotiation is enabled 889 * and did not fail, then we configure flow control based on our link 890 * partner. 891 **/ 892 s32 igb_config_fc_after_link_up(struct e1000_hw *hw) 893 { 894 struct e1000_mac_info *mac = &hw->mac; 895 s32 ret_val = 0; 896 u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; 897 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; 898 u16 speed, duplex; 899 900 /* Check for the case where we have fiber media and auto-neg failed 901 * so we had to force link. In this case, we need to force the 902 * configuration of the MAC to match the "fc" parameter. 903 */ 904 if (mac->autoneg_failed) { 905 if (hw->phy.media_type == e1000_media_type_internal_serdes) 906 ret_val = igb_force_mac_fc(hw); 907 } else { 908 if (hw->phy.media_type == e1000_media_type_copper) 909 ret_val = igb_force_mac_fc(hw); 910 } 911 912 if (ret_val) { 913 hw_dbg("Error forcing flow control settings\n"); 914 goto out; 915 } 916 917 /* Check for the case where we have copper media and auto-neg is 918 * enabled. In this case, we need to check and see if Auto-Neg 919 * has completed, and if so, how the PHY and link partner has 920 * flow control configured. 921 */ 922 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { 923 /* Read the MII Status Register and check to see if AutoNeg 924 * has completed. We read this twice because this reg has 925 * some "sticky" (latched) bits. 926 */ 927 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, 928 &mii_status_reg); 929 if (ret_val) 930 goto out; 931 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, 932 &mii_status_reg); 933 if (ret_val) 934 goto out; 935 936 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { 937 hw_dbg("Copper PHY and Auto Neg has not completed.\n"); 938 goto out; 939 } 940 941 /* The AutoNeg process has completed, so we now need to 942 * read both the Auto Negotiation Advertisement 943 * Register (Address 4) and the Auto_Negotiation Base 944 * Page Ability Register (Address 5) to determine how 945 * flow control was negotiated. 946 */ 947 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, 948 &mii_nway_adv_reg); 949 if (ret_val) 950 goto out; 951 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, 952 &mii_nway_lp_ability_reg); 953 if (ret_val) 954 goto out; 955 956 /* Two bits in the Auto Negotiation Advertisement Register 957 * (Address 4) and two bits in the Auto Negotiation Base 958 * Page Ability Register (Address 5) determine flow control 959 * for both the PHY and the link partner. The following 960 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, 961 * 1999, describes these PAUSE resolution bits and how flow 962 * control is determined based upon these settings. 963 * NOTE: DC = Don't Care 964 * 965 * LOCAL DEVICE | LINK PARTNER 966 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution 967 *-------|---------|-------|---------|-------------------- 968 * 0 | 0 | DC | DC | e1000_fc_none 969 * 0 | 1 | 0 | DC | e1000_fc_none 970 * 0 | 1 | 1 | 0 | e1000_fc_none 971 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause 972 * 1 | 0 | 0 | DC | e1000_fc_none 973 * 1 | DC | 1 | DC | e1000_fc_full 974 * 1 | 1 | 0 | 0 | e1000_fc_none 975 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause 976 * 977 * Are both PAUSE bits set to 1? If so, this implies 978 * Symmetric Flow Control is enabled at both ends. The 979 * ASM_DIR bits are irrelevant per the spec. 980 * 981 * For Symmetric Flow Control: 982 * 983 * LOCAL DEVICE | LINK PARTNER 984 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 985 *-------|---------|-------|---------|-------------------- 986 * 1 | DC | 1 | DC | E1000_fc_full 987 * 988 */ 989 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && 990 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { 991 /* Now we need to check if the user selected RX ONLY 992 * of pause frames. In this case, we had to advertise 993 * FULL flow control because we could not advertise RX 994 * ONLY. Hence, we must now check to see if we need to 995 * turn OFF the TRANSMISSION of PAUSE frames. 996 */ 997 if (hw->fc.requested_mode == e1000_fc_full) { 998 hw->fc.current_mode = e1000_fc_full; 999 hw_dbg("Flow Control = FULL.\n"); 1000 } else { 1001 hw->fc.current_mode = e1000_fc_rx_pause; 1002 hw_dbg("Flow Control = RX PAUSE frames only.\n"); 1003 } 1004 } 1005 /* For receiving PAUSE frames ONLY. 1006 * 1007 * LOCAL DEVICE | LINK PARTNER 1008 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 1009 *-------|---------|-------|---------|-------------------- 1010 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause 1011 */ 1012 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && 1013 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && 1014 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 1015 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 1016 hw->fc.current_mode = e1000_fc_tx_pause; 1017 hw_dbg("Flow Control = TX PAUSE frames only.\n"); 1018 } 1019 /* For transmitting PAUSE frames ONLY. 1020 * 1021 * LOCAL DEVICE | LINK PARTNER 1022 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 1023 *-------|---------|-------|---------|-------------------- 1024 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause 1025 */ 1026 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && 1027 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && 1028 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 1029 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 1030 hw->fc.current_mode = e1000_fc_rx_pause; 1031 hw_dbg("Flow Control = RX PAUSE frames only.\n"); 1032 } 1033 /* Per the IEEE spec, at this point flow control should be 1034 * disabled. However, we want to consider that we could 1035 * be connected to a legacy switch that doesn't advertise 1036 * desired flow control, but can be forced on the link 1037 * partner. So if we advertised no flow control, that is 1038 * what we will resolve to. If we advertised some kind of 1039 * receive capability (Rx Pause Only or Full Flow Control) 1040 * and the link partner advertised none, we will configure 1041 * ourselves to enable Rx Flow Control only. We can do 1042 * this safely for two reasons: If the link partner really 1043 * didn't want flow control enabled, and we enable Rx, no 1044 * harm done since we won't be receiving any PAUSE frames 1045 * anyway. If the intent on the link partner was to have 1046 * flow control enabled, then by us enabling RX only, we 1047 * can at least receive pause frames and process them. 1048 * This is a good idea because in most cases, since we are 1049 * predominantly a server NIC, more times than not we will 1050 * be asked to delay transmission of packets than asking 1051 * our link partner to pause transmission of frames. 1052 */ 1053 else if ((hw->fc.requested_mode == e1000_fc_none) || 1054 (hw->fc.requested_mode == e1000_fc_tx_pause) || 1055 (hw->fc.strict_ieee)) { 1056 hw->fc.current_mode = e1000_fc_none; 1057 hw_dbg("Flow Control = NONE.\n"); 1058 } else { 1059 hw->fc.current_mode = e1000_fc_rx_pause; 1060 hw_dbg("Flow Control = RX PAUSE frames only.\n"); 1061 } 1062 1063 /* Now we need to do one last check... If we auto- 1064 * negotiated to HALF DUPLEX, flow control should not be 1065 * enabled per IEEE 802.3 spec. 1066 */ 1067 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); 1068 if (ret_val) { 1069 hw_dbg("Error getting link speed and duplex\n"); 1070 goto out; 1071 } 1072 1073 if (duplex == HALF_DUPLEX) 1074 hw->fc.current_mode = e1000_fc_none; 1075 1076 /* Now we call a subroutine to actually force the MAC 1077 * controller to use the correct flow control settings. 1078 */ 1079 ret_val = igb_force_mac_fc(hw); 1080 if (ret_val) { 1081 hw_dbg("Error forcing flow control settings\n"); 1082 goto out; 1083 } 1084 } 1085 /* Check for the case where we have SerDes media and auto-neg is 1086 * enabled. In this case, we need to check and see if Auto-Neg 1087 * has completed, and if so, how the PHY and link partner has 1088 * flow control configured. 1089 */ 1090 if ((hw->phy.media_type == e1000_media_type_internal_serdes) 1091 && mac->autoneg) { 1092 /* Read the PCS_LSTS and check to see if AutoNeg 1093 * has completed. 1094 */ 1095 pcs_status_reg = rd32(E1000_PCS_LSTAT); 1096 1097 if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { 1098 hw_dbg("PCS Auto Neg has not completed.\n"); 1099 return ret_val; 1100 } 1101 1102 /* The AutoNeg process has completed, so we now need to 1103 * read both the Auto Negotiation Advertisement 1104 * Register (PCS_ANADV) and the Auto_Negotiation Base 1105 * Page Ability Register (PCS_LPAB) to determine how 1106 * flow control was negotiated. 1107 */ 1108 pcs_adv_reg = rd32(E1000_PCS_ANADV); 1109 pcs_lp_ability_reg = rd32(E1000_PCS_LPAB); 1110 1111 /* Two bits in the Auto Negotiation Advertisement Register 1112 * (PCS_ANADV) and two bits in the Auto Negotiation Base 1113 * Page Ability Register (PCS_LPAB) determine flow control 1114 * for both the PHY and the link partner. The following 1115 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, 1116 * 1999, describes these PAUSE resolution bits and how flow 1117 * control is determined based upon these settings. 1118 * NOTE: DC = Don't Care 1119 * 1120 * LOCAL DEVICE | LINK PARTNER 1121 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution 1122 *-------|---------|-------|---------|-------------------- 1123 * 0 | 0 | DC | DC | e1000_fc_none 1124 * 0 | 1 | 0 | DC | e1000_fc_none 1125 * 0 | 1 | 1 | 0 | e1000_fc_none 1126 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause 1127 * 1 | 0 | 0 | DC | e1000_fc_none 1128 * 1 | DC | 1 | DC | e1000_fc_full 1129 * 1 | 1 | 0 | 0 | e1000_fc_none 1130 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause 1131 * 1132 * Are both PAUSE bits set to 1? If so, this implies 1133 * Symmetric Flow Control is enabled at both ends. The 1134 * ASM_DIR bits are irrelevant per the spec. 1135 * 1136 * For Symmetric Flow Control: 1137 * 1138 * LOCAL DEVICE | LINK PARTNER 1139 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 1140 *-------|---------|-------|---------|-------------------- 1141 * 1 | DC | 1 | DC | e1000_fc_full 1142 * 1143 */ 1144 if ((pcs_adv_reg & E1000_TXCW_PAUSE) && 1145 (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) { 1146 /* Now we need to check if the user selected Rx ONLY 1147 * of pause frames. In this case, we had to advertise 1148 * FULL flow control because we could not advertise Rx 1149 * ONLY. Hence, we must now check to see if we need to 1150 * turn OFF the TRANSMISSION of PAUSE frames. 1151 */ 1152 if (hw->fc.requested_mode == e1000_fc_full) { 1153 hw->fc.current_mode = e1000_fc_full; 1154 hw_dbg("Flow Control = FULL.\n"); 1155 } else { 1156 hw->fc.current_mode = e1000_fc_rx_pause; 1157 hw_dbg("Flow Control = Rx PAUSE frames only.\n"); 1158 } 1159 } 1160 /* For receiving PAUSE frames ONLY. 1161 * 1162 * LOCAL DEVICE | LINK PARTNER 1163 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 1164 *-------|---------|-------|---------|-------------------- 1165 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause 1166 */ 1167 else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) && 1168 (pcs_adv_reg & E1000_TXCW_ASM_DIR) && 1169 (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && 1170 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { 1171 hw->fc.current_mode = e1000_fc_tx_pause; 1172 hw_dbg("Flow Control = Tx PAUSE frames only.\n"); 1173 } 1174 /* For transmitting PAUSE frames ONLY. 1175 * 1176 * LOCAL DEVICE | LINK PARTNER 1177 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 1178 *-------|---------|-------|---------|-------------------- 1179 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause 1180 */ 1181 else if ((pcs_adv_reg & E1000_TXCW_PAUSE) && 1182 (pcs_adv_reg & E1000_TXCW_ASM_DIR) && 1183 !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && 1184 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { 1185 hw->fc.current_mode = e1000_fc_rx_pause; 1186 hw_dbg("Flow Control = Rx PAUSE frames only.\n"); 1187 } else { 1188 /* Per the IEEE spec, at this point flow control 1189 * should be disabled. 1190 */ 1191 hw->fc.current_mode = e1000_fc_none; 1192 hw_dbg("Flow Control = NONE.\n"); 1193 } 1194 1195 /* Now we call a subroutine to actually force the MAC 1196 * controller to use the correct flow control settings. 1197 */ 1198 pcs_ctrl_reg = rd32(E1000_PCS_LCTL); 1199 pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; 1200 wr32(E1000_PCS_LCTL, pcs_ctrl_reg); 1201 1202 ret_val = igb_force_mac_fc(hw); 1203 if (ret_val) { 1204 hw_dbg("Error forcing flow control settings\n"); 1205 return ret_val; 1206 } 1207 } 1208 1209 out: 1210 return ret_val; 1211 } 1212 1213 /** 1214 * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex 1215 * @hw: pointer to the HW structure 1216 * @speed: stores the current speed 1217 * @duplex: stores the current duplex 1218 * 1219 * Read the status register for the current speed/duplex and store the current 1220 * speed and duplex for copper connections. 1221 **/ 1222 s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, 1223 u16 *duplex) 1224 { 1225 u32 status; 1226 1227 status = rd32(E1000_STATUS); 1228 if (status & E1000_STATUS_SPEED_1000) { 1229 *speed = SPEED_1000; 1230 hw_dbg("1000 Mbs, "); 1231 } else if (status & E1000_STATUS_SPEED_100) { 1232 *speed = SPEED_100; 1233 hw_dbg("100 Mbs, "); 1234 } else { 1235 *speed = SPEED_10; 1236 hw_dbg("10 Mbs, "); 1237 } 1238 1239 if (status & E1000_STATUS_FD) { 1240 *duplex = FULL_DUPLEX; 1241 hw_dbg("Full Duplex\n"); 1242 } else { 1243 *duplex = HALF_DUPLEX; 1244 hw_dbg("Half Duplex\n"); 1245 } 1246 1247 return 0; 1248 } 1249 1250 /** 1251 * igb_get_hw_semaphore - Acquire hardware semaphore 1252 * @hw: pointer to the HW structure 1253 * 1254 * Acquire the HW semaphore to access the PHY or NVM 1255 **/ 1256 s32 igb_get_hw_semaphore(struct e1000_hw *hw) 1257 { 1258 u32 swsm; 1259 s32 ret_val = 0; 1260 s32 timeout = hw->nvm.word_size + 1; 1261 s32 i = 0; 1262 1263 /* Get the SW semaphore */ 1264 while (i < timeout) { 1265 swsm = rd32(E1000_SWSM); 1266 if (!(swsm & E1000_SWSM_SMBI)) 1267 break; 1268 1269 udelay(50); 1270 i++; 1271 } 1272 1273 if (i == timeout) { 1274 hw_dbg("Driver can't access device - SMBI bit is set.\n"); 1275 ret_val = -E1000_ERR_NVM; 1276 goto out; 1277 } 1278 1279 /* Get the FW semaphore. */ 1280 for (i = 0; i < timeout; i++) { 1281 swsm = rd32(E1000_SWSM); 1282 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); 1283 1284 /* Semaphore acquired if bit latched */ 1285 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) 1286 break; 1287 1288 udelay(50); 1289 } 1290 1291 if (i == timeout) { 1292 /* Release semaphores */ 1293 igb_put_hw_semaphore(hw); 1294 hw_dbg("Driver can't access the NVM\n"); 1295 ret_val = -E1000_ERR_NVM; 1296 goto out; 1297 } 1298 1299 out: 1300 return ret_val; 1301 } 1302 1303 /** 1304 * igb_put_hw_semaphore - Release hardware semaphore 1305 * @hw: pointer to the HW structure 1306 * 1307 * Release hardware semaphore used to access the PHY or NVM 1308 **/ 1309 void igb_put_hw_semaphore(struct e1000_hw *hw) 1310 { 1311 u32 swsm; 1312 1313 swsm = rd32(E1000_SWSM); 1314 1315 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); 1316 1317 wr32(E1000_SWSM, swsm); 1318 } 1319 1320 /** 1321 * igb_get_auto_rd_done - Check for auto read completion 1322 * @hw: pointer to the HW structure 1323 * 1324 * Check EEPROM for Auto Read done bit. 1325 **/ 1326 s32 igb_get_auto_rd_done(struct e1000_hw *hw) 1327 { 1328 s32 i = 0; 1329 s32 ret_val = 0; 1330 1331 1332 while (i < AUTO_READ_DONE_TIMEOUT) { 1333 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD) 1334 break; 1335 usleep_range(1000, 2000); 1336 i++; 1337 } 1338 1339 if (i == AUTO_READ_DONE_TIMEOUT) { 1340 hw_dbg("Auto read by HW from NVM has not completed.\n"); 1341 ret_val = -E1000_ERR_RESET; 1342 goto out; 1343 } 1344 1345 out: 1346 return ret_val; 1347 } 1348 1349 /** 1350 * igb_valid_led_default - Verify a valid default LED config 1351 * @hw: pointer to the HW structure 1352 * @data: pointer to the NVM (EEPROM) 1353 * 1354 * Read the EEPROM for the current default LED configuration. If the 1355 * LED configuration is not valid, set to a valid LED configuration. 1356 **/ 1357 static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data) 1358 { 1359 s32 ret_val; 1360 1361 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); 1362 if (ret_val) { 1363 hw_dbg("NVM Read Error\n"); 1364 goto out; 1365 } 1366 1367 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { 1368 switch (hw->phy.media_type) { 1369 case e1000_media_type_internal_serdes: 1370 *data = ID_LED_DEFAULT_82575_SERDES; 1371 break; 1372 case e1000_media_type_copper: 1373 default: 1374 *data = ID_LED_DEFAULT; 1375 break; 1376 } 1377 } 1378 out: 1379 return ret_val; 1380 } 1381 1382 /** 1383 * igb_id_led_init - 1384 * @hw: pointer to the HW structure 1385 * 1386 **/ 1387 s32 igb_id_led_init(struct e1000_hw *hw) 1388 { 1389 struct e1000_mac_info *mac = &hw->mac; 1390 s32 ret_val; 1391 const u32 ledctl_mask = 0x000000FF; 1392 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; 1393 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; 1394 u16 data, i, temp; 1395 const u16 led_mask = 0x0F; 1396 1397 /* i210 and i211 devices have different LED mechanism */ 1398 if ((hw->mac.type == e1000_i210) || 1399 (hw->mac.type == e1000_i211)) 1400 ret_val = igb_valid_led_default_i210(hw, &data); 1401 else 1402 ret_val = igb_valid_led_default(hw, &data); 1403 1404 if (ret_val) 1405 goto out; 1406 1407 mac->ledctl_default = rd32(E1000_LEDCTL); 1408 mac->ledctl_mode1 = mac->ledctl_default; 1409 mac->ledctl_mode2 = mac->ledctl_default; 1410 1411 for (i = 0; i < 4; i++) { 1412 temp = (data >> (i << 2)) & led_mask; 1413 switch (temp) { 1414 case ID_LED_ON1_DEF2: 1415 case ID_LED_ON1_ON2: 1416 case ID_LED_ON1_OFF2: 1417 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); 1418 mac->ledctl_mode1 |= ledctl_on << (i << 3); 1419 break; 1420 case ID_LED_OFF1_DEF2: 1421 case ID_LED_OFF1_ON2: 1422 case ID_LED_OFF1_OFF2: 1423 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); 1424 mac->ledctl_mode1 |= ledctl_off << (i << 3); 1425 break; 1426 default: 1427 /* Do nothing */ 1428 break; 1429 } 1430 switch (temp) { 1431 case ID_LED_DEF1_ON2: 1432 case ID_LED_ON1_ON2: 1433 case ID_LED_OFF1_ON2: 1434 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); 1435 mac->ledctl_mode2 |= ledctl_on << (i << 3); 1436 break; 1437 case ID_LED_DEF1_OFF2: 1438 case ID_LED_ON1_OFF2: 1439 case ID_LED_OFF1_OFF2: 1440 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); 1441 mac->ledctl_mode2 |= ledctl_off << (i << 3); 1442 break; 1443 default: 1444 /* Do nothing */ 1445 break; 1446 } 1447 } 1448 1449 out: 1450 return ret_val; 1451 } 1452 1453 /** 1454 * igb_cleanup_led - Set LED config to default operation 1455 * @hw: pointer to the HW structure 1456 * 1457 * Remove the current LED configuration and set the LED configuration 1458 * to the default value, saved from the EEPROM. 1459 **/ 1460 s32 igb_cleanup_led(struct e1000_hw *hw) 1461 { 1462 wr32(E1000_LEDCTL, hw->mac.ledctl_default); 1463 return 0; 1464 } 1465 1466 /** 1467 * igb_blink_led - Blink LED 1468 * @hw: pointer to the HW structure 1469 * 1470 * Blink the led's which are set to be on. 1471 **/ 1472 s32 igb_blink_led(struct e1000_hw *hw) 1473 { 1474 u32 ledctl_blink = 0; 1475 u32 i; 1476 1477 if (hw->phy.media_type == e1000_media_type_fiber) { 1478 /* always blink LED0 for PCI-E fiber */ 1479 ledctl_blink = E1000_LEDCTL_LED0_BLINK | 1480 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); 1481 } else { 1482 /* Set the blink bit for each LED that's "on" (0x0E) 1483 * (or "off" if inverted) in ledctl_mode2. The blink 1484 * logic in hardware only works when mode is set to "on" 1485 * so it must be changed accordingly when the mode is 1486 * "off" and inverted. 1487 */ 1488 ledctl_blink = hw->mac.ledctl_mode2; 1489 for (i = 0; i < 32; i += 8) { 1490 u32 mode = (hw->mac.ledctl_mode2 >> i) & 1491 E1000_LEDCTL_LED0_MODE_MASK; 1492 u32 led_default = hw->mac.ledctl_default >> i; 1493 1494 if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && 1495 (mode == E1000_LEDCTL_MODE_LED_ON)) || 1496 ((led_default & E1000_LEDCTL_LED0_IVRT) && 1497 (mode == E1000_LEDCTL_MODE_LED_OFF))) { 1498 ledctl_blink &= 1499 ~(E1000_LEDCTL_LED0_MODE_MASK << i); 1500 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | 1501 E1000_LEDCTL_MODE_LED_ON) << i; 1502 } 1503 } 1504 } 1505 1506 wr32(E1000_LEDCTL, ledctl_blink); 1507 1508 return 0; 1509 } 1510 1511 /** 1512 * igb_led_off - Turn LED off 1513 * @hw: pointer to the HW structure 1514 * 1515 * Turn LED off. 1516 **/ 1517 s32 igb_led_off(struct e1000_hw *hw) 1518 { 1519 switch (hw->phy.media_type) { 1520 case e1000_media_type_copper: 1521 wr32(E1000_LEDCTL, hw->mac.ledctl_mode1); 1522 break; 1523 default: 1524 break; 1525 } 1526 1527 return 0; 1528 } 1529 1530 /** 1531 * igb_disable_pcie_master - Disables PCI-express master access 1532 * @hw: pointer to the HW structure 1533 * 1534 * Returns 0 (0) if successful, else returns -10 1535 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused 1536 * the master requests to be disabled. 1537 * 1538 * Disables PCI-Express master access and verifies there are no pending 1539 * requests. 1540 **/ 1541 s32 igb_disable_pcie_master(struct e1000_hw *hw) 1542 { 1543 u32 ctrl; 1544 s32 timeout = MASTER_DISABLE_TIMEOUT; 1545 s32 ret_val = 0; 1546 1547 if (hw->bus.type != e1000_bus_type_pci_express) 1548 goto out; 1549 1550 ctrl = rd32(E1000_CTRL); 1551 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; 1552 wr32(E1000_CTRL, ctrl); 1553 1554 while (timeout) { 1555 if (!(rd32(E1000_STATUS) & 1556 E1000_STATUS_GIO_MASTER_ENABLE)) 1557 break; 1558 udelay(100); 1559 timeout--; 1560 } 1561 1562 if (!timeout) { 1563 hw_dbg("Master requests are pending.\n"); 1564 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING; 1565 goto out; 1566 } 1567 1568 out: 1569 return ret_val; 1570 } 1571 1572 /** 1573 * igb_validate_mdi_setting - Verify MDI/MDIx settings 1574 * @hw: pointer to the HW structure 1575 * 1576 * Verify that when not using auto-negotitation that MDI/MDIx is correctly 1577 * set, which is forced to MDI mode only. 1578 **/ 1579 s32 igb_validate_mdi_setting(struct e1000_hw *hw) 1580 { 1581 s32 ret_val = 0; 1582 1583 /* All MDI settings are supported on 82580 and newer. */ 1584 if (hw->mac.type >= e1000_82580) 1585 goto out; 1586 1587 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { 1588 hw_dbg("Invalid MDI setting detected\n"); 1589 hw->phy.mdix = 1; 1590 ret_val = -E1000_ERR_CONFIG; 1591 goto out; 1592 } 1593 1594 out: 1595 return ret_val; 1596 } 1597 1598 /** 1599 * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register 1600 * @hw: pointer to the HW structure 1601 * @reg: 32bit register offset such as E1000_SCTL 1602 * @offset: register offset to write to 1603 * @data: data to write at register offset 1604 * 1605 * Writes an address/data control type register. There are several of these 1606 * and they all have the format address << 8 | data and bit 31 is polled for 1607 * completion. 1608 **/ 1609 s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, 1610 u32 offset, u8 data) 1611 { 1612 u32 i, regvalue = 0; 1613 s32 ret_val = 0; 1614 1615 /* Set up the address and data */ 1616 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT); 1617 wr32(reg, regvalue); 1618 1619 /* Poll the ready bit to see if the MDI read completed */ 1620 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { 1621 udelay(5); 1622 regvalue = rd32(reg); 1623 if (regvalue & E1000_GEN_CTL_READY) 1624 break; 1625 } 1626 if (!(regvalue & E1000_GEN_CTL_READY)) { 1627 hw_dbg("Reg %08x did not indicate ready\n", reg); 1628 ret_val = -E1000_ERR_PHY; 1629 goto out; 1630 } 1631 1632 out: 1633 return ret_val; 1634 } 1635 1636 /** 1637 * igb_enable_mng_pass_thru - Enable processing of ARP's 1638 * @hw: pointer to the HW structure 1639 * 1640 * Verifies the hardware needs to leave interface enabled so that frames can 1641 * be directed to and from the management interface. 1642 **/ 1643 bool igb_enable_mng_pass_thru(struct e1000_hw *hw) 1644 { 1645 u32 manc; 1646 u32 fwsm, factps; 1647 bool ret_val = false; 1648 1649 if (!hw->mac.asf_firmware_present) 1650 goto out; 1651 1652 manc = rd32(E1000_MANC); 1653 1654 if (!(manc & E1000_MANC_RCV_TCO_EN)) 1655 goto out; 1656 1657 if (hw->mac.arc_subsystem_valid) { 1658 fwsm = rd32(E1000_FWSM); 1659 factps = rd32(E1000_FACTPS); 1660 1661 if (!(factps & E1000_FACTPS_MNGCG) && 1662 ((fwsm & E1000_FWSM_MODE_MASK) == 1663 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { 1664 ret_val = true; 1665 goto out; 1666 } 1667 } else { 1668 if ((manc & E1000_MANC_SMBUS_EN) && 1669 !(manc & E1000_MANC_ASF_EN)) { 1670 ret_val = true; 1671 goto out; 1672 } 1673 } 1674 1675 out: 1676 return ret_val; 1677 } 1678