1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 #include <linux/pci.h> 29 #include <linux/delay.h> 30 #include <linux/sched.h> 31 32 #include "ixgbe.h" 33 #include "ixgbe_phy.h" 34 35 #define IXGBE_82598_MAX_TX_QUEUES 32 36 #define IXGBE_82598_MAX_RX_QUEUES 64 37 #define IXGBE_82598_RAR_ENTRIES 16 38 #define IXGBE_82598_MC_TBL_SIZE 128 39 #define IXGBE_82598_VFT_TBL_SIZE 128 40 #define IXGBE_82598_RX_PB_SIZE 512 41 42 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 43 ixgbe_link_speed speed, 44 bool autoneg, 45 bool autoneg_wait_to_complete); 46 static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, 47 u8 *eeprom_data); 48 49 /** 50 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout 51 * @hw: pointer to the HW structure 52 * 53 * The defaults for 82598 should be in the range of 50us to 50ms, 54 * however the hardware default for these parts is 500us to 1ms which is less 55 * than the 10ms recommended by the pci-e spec. To address this we need to 56 * increase the value to either 10ms to 250ms for capability version 1 config, 57 * or 16ms to 55ms for version 2. 58 **/ 59 static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) 60 { 61 struct ixgbe_adapter *adapter = hw->back; 62 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); 63 u16 pcie_devctl2; 64 65 /* only take action if timeout value is defaulted to 0 */ 66 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) 67 goto out; 68 69 /* 70 * if capababilities version is type 1 we can write the 71 * timeout of 10ms to 250ms through the GCR register 72 */ 73 if (!(gcr & IXGBE_GCR_CAP_VER2)) { 74 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; 75 goto out; 76 } 77 78 /* 79 * for version 2 capabilities we need to write the config space 80 * directly in order to set the completion timeout value for 81 * 16ms to 55ms 82 */ 83 pci_read_config_word(adapter->pdev, 84 IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2); 85 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; 86 pci_write_config_word(adapter->pdev, 87 IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); 88 out: 89 /* disable completion timeout resend */ 90 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; 91 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); 92 } 93 94 static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) 95 { 96 struct ixgbe_mac_info *mac = &hw->mac; 97 98 /* Call PHY identify routine to get the phy type */ 99 ixgbe_identify_phy_generic(hw); 100 101 mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; 102 mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; 103 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; 104 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; 105 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; 106 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 107 108 return 0; 109 } 110 111 /** 112 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init 113 * @hw: pointer to hardware structure 114 * 115 * Initialize any function pointers that were not able to be 116 * set during get_invariants because the PHY/SFP type was 117 * not known. Perform the SFP init if necessary. 118 * 119 **/ 120 static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) 121 { 122 struct ixgbe_mac_info *mac = &hw->mac; 123 struct ixgbe_phy_info *phy = &hw->phy; 124 s32 ret_val = 0; 125 u16 list_offset, data_offset; 126 127 /* Identify the PHY */ 128 phy->ops.identify(hw); 129 130 /* Overwrite the link function pointers if copper PHY */ 131 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 132 mac->ops.setup_link = &ixgbe_setup_copper_link_82598; 133 mac->ops.get_link_capabilities = 134 &ixgbe_get_copper_link_capabilities_generic; 135 } 136 137 switch (hw->phy.type) { 138 case ixgbe_phy_tn: 139 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; 140 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 141 phy->ops.get_firmware_version = 142 &ixgbe_get_phy_firmware_version_tnx; 143 break; 144 case ixgbe_phy_nl: 145 phy->ops.reset = &ixgbe_reset_phy_nl; 146 147 /* Call SFP+ identify routine to get the SFP+ module type */ 148 ret_val = phy->ops.identify_sfp(hw); 149 if (ret_val != 0) 150 goto out; 151 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) { 152 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; 153 goto out; 154 } 155 156 /* Check to see if SFP+ module is supported */ 157 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, 158 &list_offset, 159 &data_offset); 160 if (ret_val != 0) { 161 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; 162 goto out; 163 } 164 break; 165 default: 166 break; 167 } 168 169 out: 170 return ret_val; 171 } 172 173 /** 174 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx 175 * @hw: pointer to hardware structure 176 * 177 * Starts the hardware using the generic start_hw function. 178 * Disables relaxed ordering Then set pcie completion timeout 179 * 180 **/ 181 static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) 182 { 183 u32 regval; 184 u32 i; 185 s32 ret_val = 0; 186 187 ret_val = ixgbe_start_hw_generic(hw); 188 189 /* Disable relaxed ordering */ 190 for (i = 0; ((i < hw->mac.max_tx_queues) && 191 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { 192 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); 193 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 194 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); 195 } 196 197 for (i = 0; ((i < hw->mac.max_rx_queues) && 198 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { 199 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 200 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | 201 IXGBE_DCA_RXCTRL_HEAD_WRO_EN); 202 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 203 } 204 205 hw->mac.rx_pb_size = IXGBE_82598_RX_PB_SIZE; 206 207 /* set the completion timeout for interface */ 208 if (ret_val == 0) 209 ixgbe_set_pcie_completion_timeout(hw); 210 211 return ret_val; 212 } 213 214 /** 215 * ixgbe_get_link_capabilities_82598 - Determines link capabilities 216 * @hw: pointer to hardware structure 217 * @speed: pointer to link speed 218 * @autoneg: boolean auto-negotiation value 219 * 220 * Determines the link capabilities by reading the AUTOC register. 221 **/ 222 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, 223 ixgbe_link_speed *speed, 224 bool *autoneg) 225 { 226 s32 status = 0; 227 u32 autoc = 0; 228 229 /* 230 * Determine link capabilities based on the stored value of AUTOC, 231 * which represents EEPROM defaults. If AUTOC value has not been 232 * stored, use the current register value. 233 */ 234 if (hw->mac.orig_link_settings_stored) 235 autoc = hw->mac.orig_autoc; 236 else 237 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 238 239 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 240 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 241 *speed = IXGBE_LINK_SPEED_1GB_FULL; 242 *autoneg = false; 243 break; 244 245 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 246 *speed = IXGBE_LINK_SPEED_10GB_FULL; 247 *autoneg = false; 248 break; 249 250 case IXGBE_AUTOC_LMS_1G_AN: 251 *speed = IXGBE_LINK_SPEED_1GB_FULL; 252 *autoneg = true; 253 break; 254 255 case IXGBE_AUTOC_LMS_KX4_AN: 256 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: 257 *speed = IXGBE_LINK_SPEED_UNKNOWN; 258 if (autoc & IXGBE_AUTOC_KX4_SUPP) 259 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 260 if (autoc & IXGBE_AUTOC_KX_SUPP) 261 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 262 *autoneg = true; 263 break; 264 265 default: 266 status = IXGBE_ERR_LINK_SETUP; 267 break; 268 } 269 270 return status; 271 } 272 273 /** 274 * ixgbe_get_media_type_82598 - Determines media type 275 * @hw: pointer to hardware structure 276 * 277 * Returns the media type (fiber, copper, backplane) 278 **/ 279 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) 280 { 281 enum ixgbe_media_type media_type; 282 283 /* Detect if there is a copper PHY attached. */ 284 switch (hw->phy.type) { 285 case ixgbe_phy_cu_unknown: 286 case ixgbe_phy_tn: 287 media_type = ixgbe_media_type_copper; 288 goto out; 289 default: 290 break; 291 } 292 293 /* Media type for I82598 is based on device ID */ 294 switch (hw->device_id) { 295 case IXGBE_DEV_ID_82598: 296 case IXGBE_DEV_ID_82598_BX: 297 /* Default device ID is mezzanine card KX/KX4 */ 298 media_type = ixgbe_media_type_backplane; 299 break; 300 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 301 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 302 case IXGBE_DEV_ID_82598_DA_DUAL_PORT: 303 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: 304 case IXGBE_DEV_ID_82598EB_XF_LR: 305 case IXGBE_DEV_ID_82598EB_SFP_LOM: 306 media_type = ixgbe_media_type_fiber; 307 break; 308 case IXGBE_DEV_ID_82598EB_CX4: 309 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: 310 media_type = ixgbe_media_type_cx4; 311 break; 312 case IXGBE_DEV_ID_82598AT: 313 case IXGBE_DEV_ID_82598AT2: 314 media_type = ixgbe_media_type_copper; 315 break; 316 default: 317 media_type = ixgbe_media_type_unknown; 318 break; 319 } 320 out: 321 return media_type; 322 } 323 324 /** 325 * ixgbe_fc_enable_82598 - Enable flow control 326 * @hw: pointer to hardware structure 327 * 328 * Enable flow control according to the current settings. 329 **/ 330 static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) 331 { 332 s32 ret_val = 0; 333 u32 fctrl_reg; 334 u32 rmcs_reg; 335 u32 reg; 336 u32 fcrtl, fcrth; 337 u32 link_speed = 0; 338 int i; 339 bool link_up; 340 341 /* 342 * Validate the water mark configuration for packet buffer 0. Zero 343 * water marks indicate that the packet buffer was not configured 344 * and the watermarks for packet buffer 0 should always be configured. 345 */ 346 if (!hw->fc.low_water || 347 !hw->fc.high_water[0] || 348 !hw->fc.pause_time) { 349 hw_dbg(hw, "Invalid water mark configuration\n"); 350 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 351 goto out; 352 } 353 354 /* 355 * On 82598 having Rx FC on causes resets while doing 1G 356 * so if it's on turn it off once we know link_speed. For 357 * more details see 82598 Specification update. 358 */ 359 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 360 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { 361 switch (hw->fc.requested_mode) { 362 case ixgbe_fc_full: 363 hw->fc.requested_mode = ixgbe_fc_tx_pause; 364 break; 365 case ixgbe_fc_rx_pause: 366 hw->fc.requested_mode = ixgbe_fc_none; 367 break; 368 default: 369 /* no change */ 370 break; 371 } 372 } 373 374 /* Negotiate the fc mode to use */ 375 ixgbe_fc_autoneg(hw); 376 377 /* Disable any previous flow control settings */ 378 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); 379 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); 380 381 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); 382 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); 383 384 /* 385 * The possible values of fc.current_mode are: 386 * 0: Flow control is completely disabled 387 * 1: Rx flow control is enabled (we can receive pause frames, 388 * but not send pause frames). 389 * 2: Tx flow control is enabled (we can send pause frames but 390 * we do not support receiving pause frames). 391 * 3: Both Rx and Tx flow control (symmetric) are enabled. 392 * other: Invalid. 393 */ 394 switch (hw->fc.current_mode) { 395 case ixgbe_fc_none: 396 /* 397 * Flow control is disabled by software override or autoneg. 398 * The code below will actually disable it in the HW. 399 */ 400 break; 401 case ixgbe_fc_rx_pause: 402 /* 403 * Rx Flow control is enabled and Tx Flow control is 404 * disabled by software override. Since there really 405 * isn't a way to advertise that we are capable of RX 406 * Pause ONLY, we will advertise that we support both 407 * symmetric and asymmetric Rx PAUSE. Later, we will 408 * disable the adapter's ability to send PAUSE frames. 409 */ 410 fctrl_reg |= IXGBE_FCTRL_RFCE; 411 break; 412 case ixgbe_fc_tx_pause: 413 /* 414 * Tx Flow control is enabled, and Rx Flow control is 415 * disabled by software override. 416 */ 417 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 418 break; 419 case ixgbe_fc_full: 420 /* Flow control (both Rx and Tx) is enabled by SW override. */ 421 fctrl_reg |= IXGBE_FCTRL_RFCE; 422 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 423 break; 424 default: 425 hw_dbg(hw, "Flow control param set incorrectly\n"); 426 ret_val = IXGBE_ERR_CONFIG; 427 goto out; 428 break; 429 } 430 431 /* Set 802.3x based flow control settings. */ 432 fctrl_reg |= IXGBE_FCTRL_DPF; 433 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); 434 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); 435 436 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE; 437 438 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 439 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 440 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 441 hw->fc.high_water[i]) { 442 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 443 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); 444 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth); 445 } else { 446 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); 447 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); 448 } 449 450 } 451 452 /* Configure pause time (2 TCs per register) */ 453 reg = hw->fc.pause_time * 0x00010001; 454 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) 455 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 456 457 /* Configure flow control refresh threshold value */ 458 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 459 460 out: 461 return ret_val; 462 } 463 464 /** 465 * ixgbe_start_mac_link_82598 - Configures MAC link settings 466 * @hw: pointer to hardware structure 467 * 468 * Configures link settings based on values in the ixgbe_hw struct. 469 * Restarts the link. Performs autonegotiation if needed. 470 **/ 471 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, 472 bool autoneg_wait_to_complete) 473 { 474 u32 autoc_reg; 475 u32 links_reg; 476 u32 i; 477 s32 status = 0; 478 479 /* Restart link */ 480 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 481 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 482 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 483 484 /* Only poll for autoneg to complete if specified to do so */ 485 if (autoneg_wait_to_complete) { 486 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == 487 IXGBE_AUTOC_LMS_KX4_AN || 488 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 489 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { 490 links_reg = 0; /* Just in case Autoneg time = 0 */ 491 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 492 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 493 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 494 break; 495 msleep(100); 496 } 497 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 498 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; 499 hw_dbg(hw, "Autonegotiation did not complete.\n"); 500 } 501 } 502 } 503 504 /* Add delay to filter out noises during initial link setup */ 505 msleep(50); 506 507 return status; 508 } 509 510 /** 511 * ixgbe_validate_link_ready - Function looks for phy link 512 * @hw: pointer to hardware structure 513 * 514 * Function indicates success when phy link is available. If phy is not ready 515 * within 5 seconds of MAC indicating link, the function returns error. 516 **/ 517 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) 518 { 519 u32 timeout; 520 u16 an_reg; 521 522 if (hw->device_id != IXGBE_DEV_ID_82598AT2) 523 return 0; 524 525 for (timeout = 0; 526 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) { 527 hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &an_reg); 528 529 if ((an_reg & MDIO_AN_STAT1_COMPLETE) && 530 (an_reg & MDIO_STAT1_LSTATUS)) 531 break; 532 533 msleep(100); 534 } 535 536 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { 537 hw_dbg(hw, "Link was indicated but link is down\n"); 538 return IXGBE_ERR_LINK_SETUP; 539 } 540 541 return 0; 542 } 543 544 /** 545 * ixgbe_check_mac_link_82598 - Get link/speed status 546 * @hw: pointer to hardware structure 547 * @speed: pointer to link speed 548 * @link_up: true is link is up, false otherwise 549 * @link_up_wait_to_complete: bool used to wait for link up or not 550 * 551 * Reads the links register to determine if link is up and the current speed 552 **/ 553 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, 554 ixgbe_link_speed *speed, bool *link_up, 555 bool link_up_wait_to_complete) 556 { 557 u32 links_reg; 558 u32 i; 559 u16 link_reg, adapt_comp_reg; 560 561 /* 562 * SERDES PHY requires us to read link status from register 0xC79F. 563 * Bit 0 set indicates link is up/ready; clear indicates link down. 564 * 0xC00C is read to check that the XAUI lanes are active. Bit 0 565 * clear indicates active; set indicates inactive. 566 */ 567 if (hw->phy.type == ixgbe_phy_nl) { 568 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); 569 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); 570 hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD, 571 &adapt_comp_reg); 572 if (link_up_wait_to_complete) { 573 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 574 if ((link_reg & 1) && 575 ((adapt_comp_reg & 1) == 0)) { 576 *link_up = true; 577 break; 578 } else { 579 *link_up = false; 580 } 581 msleep(100); 582 hw->phy.ops.read_reg(hw, 0xC79F, 583 MDIO_MMD_PMAPMD, 584 &link_reg); 585 hw->phy.ops.read_reg(hw, 0xC00C, 586 MDIO_MMD_PMAPMD, 587 &adapt_comp_reg); 588 } 589 } else { 590 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) 591 *link_up = true; 592 else 593 *link_up = false; 594 } 595 596 if (!*link_up) 597 goto out; 598 } 599 600 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 601 if (link_up_wait_to_complete) { 602 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 603 if (links_reg & IXGBE_LINKS_UP) { 604 *link_up = true; 605 break; 606 } else { 607 *link_up = false; 608 } 609 msleep(100); 610 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 611 } 612 } else { 613 if (links_reg & IXGBE_LINKS_UP) 614 *link_up = true; 615 else 616 *link_up = false; 617 } 618 619 if (links_reg & IXGBE_LINKS_SPEED) 620 *speed = IXGBE_LINK_SPEED_10GB_FULL; 621 else 622 *speed = IXGBE_LINK_SPEED_1GB_FULL; 623 624 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && *link_up && 625 (ixgbe_validate_link_ready(hw) != 0)) 626 *link_up = false; 627 628 out: 629 return 0; 630 } 631 632 /** 633 * ixgbe_setup_mac_link_82598 - Set MAC link speed 634 * @hw: pointer to hardware structure 635 * @speed: new link speed 636 * @autoneg: true if auto-negotiation enabled 637 * @autoneg_wait_to_complete: true when waiting for completion is needed 638 * 639 * Set the link speed in the AUTOC register and restarts link. 640 **/ 641 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, 642 ixgbe_link_speed speed, bool autoneg, 643 bool autoneg_wait_to_complete) 644 { 645 s32 status = 0; 646 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 647 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 648 u32 autoc = curr_autoc; 649 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; 650 651 /* Check to see if speed passed in is supported. */ 652 ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg); 653 speed &= link_capabilities; 654 655 if (speed == IXGBE_LINK_SPEED_UNKNOWN) 656 status = IXGBE_ERR_LINK_SETUP; 657 658 /* Set KX4/KX support according to speed requested */ 659 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || 660 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { 661 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; 662 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 663 autoc |= IXGBE_AUTOC_KX4_SUPP; 664 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 665 autoc |= IXGBE_AUTOC_KX_SUPP; 666 if (autoc != curr_autoc) 667 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 668 } 669 670 if (status == 0) { 671 /* 672 * Setup and restart the link based on the new values in 673 * ixgbe_hw This will write the AUTOC register based on the new 674 * stored values 675 */ 676 status = ixgbe_start_mac_link_82598(hw, 677 autoneg_wait_to_complete); 678 } 679 680 return status; 681 } 682 683 684 /** 685 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field 686 * @hw: pointer to hardware structure 687 * @speed: new link speed 688 * @autoneg: true if autonegotiation enabled 689 * @autoneg_wait_to_complete: true if waiting is needed to complete 690 * 691 * Sets the link speed in the AUTOC register in the MAC and restarts link. 692 **/ 693 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 694 ixgbe_link_speed speed, 695 bool autoneg, 696 bool autoneg_wait_to_complete) 697 { 698 s32 status; 699 700 /* Setup the PHY according to input speed */ 701 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, 702 autoneg_wait_to_complete); 703 /* Set up MAC */ 704 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); 705 706 return status; 707 } 708 709 /** 710 * ixgbe_reset_hw_82598 - Performs hardware reset 711 * @hw: pointer to hardware structure 712 * 713 * Resets the hardware by resetting the transmit and receive units, masks and 714 * clears all interrupts, performing a PHY reset, and performing a link (MAC) 715 * reset. 716 **/ 717 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) 718 { 719 s32 status = 0; 720 s32 phy_status = 0; 721 u32 ctrl; 722 u32 gheccr; 723 u32 i; 724 u32 autoc; 725 u8 analog_val; 726 727 /* Call adapter stop to disable tx/rx and clear interrupts */ 728 status = hw->mac.ops.stop_adapter(hw); 729 if (status != 0) 730 goto reset_hw_out; 731 732 /* 733 * Power up the Atlas Tx lanes if they are currently powered down. 734 * Atlas Tx lanes are powered down for MAC loopback tests, but 735 * they are not automatically restored on reset. 736 */ 737 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); 738 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { 739 /* Enable Tx Atlas so packets can be transmitted again */ 740 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 741 &analog_val); 742 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; 743 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 744 analog_val); 745 746 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 747 &analog_val); 748 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 749 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 750 analog_val); 751 752 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 753 &analog_val); 754 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 755 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 756 analog_val); 757 758 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 759 &analog_val); 760 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 761 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 762 analog_val); 763 } 764 765 /* Reset PHY */ 766 if (hw->phy.reset_disable == false) { 767 /* PHY ops must be identified and initialized prior to reset */ 768 769 /* Init PHY and function pointers, perform SFP setup */ 770 phy_status = hw->phy.ops.init(hw); 771 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) 772 goto reset_hw_out; 773 if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) 774 goto mac_reset_top; 775 776 hw->phy.ops.reset(hw); 777 } 778 779 mac_reset_top: 780 /* 781 * Issue global reset to the MAC. This needs to be a SW reset. 782 * If link reset is used, it might reset the MAC when mng is using it 783 */ 784 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST; 785 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 786 IXGBE_WRITE_FLUSH(hw); 787 788 /* Poll for reset bit to self-clear indicating reset is complete */ 789 for (i = 0; i < 10; i++) { 790 udelay(1); 791 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 792 if (!(ctrl & IXGBE_CTRL_RST)) 793 break; 794 } 795 if (ctrl & IXGBE_CTRL_RST) { 796 status = IXGBE_ERR_RESET_FAILED; 797 hw_dbg(hw, "Reset polling failed to complete.\n"); 798 } 799 800 msleep(50); 801 802 /* 803 * Double resets are required for recovery from certain error 804 * conditions. Between resets, it is necessary to stall to allow time 805 * for any pending HW events to complete. 806 */ 807 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { 808 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 809 goto mac_reset_top; 810 } 811 812 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); 813 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); 814 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); 815 816 /* 817 * Store the original AUTOC value if it has not been 818 * stored off yet. Otherwise restore the stored original 819 * AUTOC value since the reset operation sets back to deaults. 820 */ 821 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 822 if (hw->mac.orig_link_settings_stored == false) { 823 hw->mac.orig_autoc = autoc; 824 hw->mac.orig_link_settings_stored = true; 825 } else if (autoc != hw->mac.orig_autoc) { 826 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); 827 } 828 829 /* Store the permanent mac address */ 830 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 831 832 /* 833 * Store MAC address from RAR0, clear receive address registers, and 834 * clear the multicast table 835 */ 836 hw->mac.ops.init_rx_addrs(hw); 837 838 reset_hw_out: 839 if (phy_status) 840 status = phy_status; 841 842 return status; 843 } 844 845 /** 846 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address 847 * @hw: pointer to hardware struct 848 * @rar: receive address register index to associate with a VMDq index 849 * @vmdq: VMDq set index 850 **/ 851 static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 852 { 853 u32 rar_high; 854 u32 rar_entries = hw->mac.num_rar_entries; 855 856 /* Make sure we are using a valid rar index range */ 857 if (rar >= rar_entries) { 858 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 859 return IXGBE_ERR_INVALID_ARGUMENT; 860 } 861 862 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 863 rar_high &= ~IXGBE_RAH_VIND_MASK; 864 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); 865 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); 866 return 0; 867 } 868 869 /** 870 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address 871 * @hw: pointer to hardware struct 872 * @rar: receive address register index to associate with a VMDq index 873 * @vmdq: VMDq clear index (not used in 82598, but elsewhere) 874 **/ 875 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 876 { 877 u32 rar_high; 878 u32 rar_entries = hw->mac.num_rar_entries; 879 880 881 /* Make sure we are using a valid rar index range */ 882 if (rar >= rar_entries) { 883 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 884 return IXGBE_ERR_INVALID_ARGUMENT; 885 } 886 887 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 888 if (rar_high & IXGBE_RAH_VIND_MASK) { 889 rar_high &= ~IXGBE_RAH_VIND_MASK; 890 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); 891 } 892 893 return 0; 894 } 895 896 /** 897 * ixgbe_set_vfta_82598 - Set VLAN filter table 898 * @hw: pointer to hardware structure 899 * @vlan: VLAN id to write to VLAN filter 900 * @vind: VMDq output index that maps queue to VLAN id in VFTA 901 * @vlan_on: boolean flag to turn on/off VLAN in VFTA 902 * 903 * Turn on/off specified VLAN in the VLAN filter table. 904 **/ 905 static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, 906 bool vlan_on) 907 { 908 u32 regindex; 909 u32 bitindex; 910 u32 bits; 911 u32 vftabyte; 912 913 if (vlan > 4095) 914 return IXGBE_ERR_PARAM; 915 916 /* Determine 32-bit word position in array */ 917 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ 918 919 /* Determine the location of the (VMD) queue index */ 920 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ 921 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ 922 923 /* Set the nibble for VMD queue index */ 924 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); 925 bits &= (~(0x0F << bitindex)); 926 bits |= (vind << bitindex); 927 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); 928 929 /* Determine the location of the bit for this VLAN id */ 930 bitindex = vlan & 0x1F; /* lower five bits */ 931 932 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); 933 if (vlan_on) 934 /* Turn on this VLAN id */ 935 bits |= (1 << bitindex); 936 else 937 /* Turn off this VLAN id */ 938 bits &= ~(1 << bitindex); 939 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); 940 941 return 0; 942 } 943 944 /** 945 * ixgbe_clear_vfta_82598 - Clear VLAN filter table 946 * @hw: pointer to hardware structure 947 * 948 * Clears the VLAN filer table, and the VMDq index associated with the filter 949 **/ 950 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) 951 { 952 u32 offset; 953 u32 vlanbyte; 954 955 for (offset = 0; offset < hw->mac.vft_size; offset++) 956 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 957 958 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) 959 for (offset = 0; offset < hw->mac.vft_size; offset++) 960 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), 961 0); 962 963 return 0; 964 } 965 966 /** 967 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register 968 * @hw: pointer to hardware structure 969 * @reg: analog register to read 970 * @val: read value 971 * 972 * Performs read operation to Atlas analog register specified. 973 **/ 974 static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) 975 { 976 u32 atlas_ctl; 977 978 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, 979 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); 980 IXGBE_WRITE_FLUSH(hw); 981 udelay(10); 982 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); 983 *val = (u8)atlas_ctl; 984 985 return 0; 986 } 987 988 /** 989 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register 990 * @hw: pointer to hardware structure 991 * @reg: atlas register to write 992 * @val: value to write 993 * 994 * Performs write operation to Atlas analog register specified. 995 **/ 996 static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) 997 { 998 u32 atlas_ctl; 999 1000 atlas_ctl = (reg << 8) | val; 1001 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); 1002 IXGBE_WRITE_FLUSH(hw); 1003 udelay(10); 1004 1005 return 0; 1006 } 1007 1008 /** 1009 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. 1010 * @hw: pointer to hardware structure 1011 * @byte_offset: EEPROM byte offset to read 1012 * @eeprom_data: value read 1013 * 1014 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. 1015 **/ 1016 static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, 1017 u8 *eeprom_data) 1018 { 1019 s32 status = 0; 1020 u16 sfp_addr = 0; 1021 u16 sfp_data = 0; 1022 u16 sfp_stat = 0; 1023 u32 i; 1024 1025 if (hw->phy.type == ixgbe_phy_nl) { 1026 /* 1027 * phy SDA/SCL registers are at addresses 0xC30A to 1028 * 0xC30D. These registers are used to talk to the SFP+ 1029 * module's EEPROM through the SDA/SCL (I2C) interface. 1030 */ 1031 sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset; 1032 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); 1033 hw->phy.ops.write_reg(hw, 1034 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, 1035 MDIO_MMD_PMAPMD, 1036 sfp_addr); 1037 1038 /* Poll status */ 1039 for (i = 0; i < 100; i++) { 1040 hw->phy.ops.read_reg(hw, 1041 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, 1042 MDIO_MMD_PMAPMD, 1043 &sfp_stat); 1044 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; 1045 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) 1046 break; 1047 usleep_range(10000, 20000); 1048 } 1049 1050 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { 1051 hw_dbg(hw, "EEPROM read did not pass.\n"); 1052 status = IXGBE_ERR_SFP_NOT_PRESENT; 1053 goto out; 1054 } 1055 1056 /* Read data */ 1057 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, 1058 MDIO_MMD_PMAPMD, &sfp_data); 1059 1060 *eeprom_data = (u8)(sfp_data >> 8); 1061 } else { 1062 status = IXGBE_ERR_PHY; 1063 goto out; 1064 } 1065 1066 out: 1067 return status; 1068 } 1069 1070 /** 1071 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type 1072 * @hw: pointer to hardware structure 1073 * 1074 * Determines physical layer capabilities of the current configuration. 1075 **/ 1076 static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) 1077 { 1078 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1079 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1080 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; 1081 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 1082 u16 ext_ability = 0; 1083 1084 hw->phy.ops.identify(hw); 1085 1086 /* Copper PHY must be checked before AUTOC LMS to determine correct 1087 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ 1088 switch (hw->phy.type) { 1089 case ixgbe_phy_tn: 1090 case ixgbe_phy_cu_unknown: 1091 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, 1092 MDIO_MMD_PMAPMD, &ext_ability); 1093 if (ext_ability & MDIO_PMA_EXTABLE_10GBT) 1094 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 1095 if (ext_ability & MDIO_PMA_EXTABLE_1000BT) 1096 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; 1097 if (ext_ability & MDIO_PMA_EXTABLE_100BTX) 1098 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 1099 goto out; 1100 default: 1101 break; 1102 } 1103 1104 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 1105 case IXGBE_AUTOC_LMS_1G_AN: 1106 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 1107 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX) 1108 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; 1109 else 1110 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX; 1111 break; 1112 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 1113 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4) 1114 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; 1115 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4) 1116 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 1117 else /* XAUI */ 1118 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1119 break; 1120 case IXGBE_AUTOC_LMS_KX4_AN: 1121 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: 1122 if (autoc & IXGBE_AUTOC_KX_SUPP) 1123 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; 1124 if (autoc & IXGBE_AUTOC_KX4_SUPP) 1125 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 1126 break; 1127 default: 1128 break; 1129 } 1130 1131 if (hw->phy.type == ixgbe_phy_nl) { 1132 hw->phy.ops.identify_sfp(hw); 1133 1134 switch (hw->phy.sfp_type) { 1135 case ixgbe_sfp_type_da_cu: 1136 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 1137 break; 1138 case ixgbe_sfp_type_sr: 1139 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 1140 break; 1141 case ixgbe_sfp_type_lr: 1142 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 1143 break; 1144 default: 1145 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1146 break; 1147 } 1148 } 1149 1150 switch (hw->device_id) { 1151 case IXGBE_DEV_ID_82598_DA_DUAL_PORT: 1152 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 1153 break; 1154 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 1155 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 1156 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: 1157 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 1158 break; 1159 case IXGBE_DEV_ID_82598EB_XF_LR: 1160 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 1161 break; 1162 default: 1163 break; 1164 } 1165 1166 out: 1167 return physical_layer; 1168 } 1169 1170 /** 1171 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple 1172 * port devices. 1173 * @hw: pointer to the HW structure 1174 * 1175 * Calls common function and corrects issue with some single port devices 1176 * that enable LAN1 but not LAN0. 1177 **/ 1178 static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) 1179 { 1180 struct ixgbe_bus_info *bus = &hw->bus; 1181 u16 pci_gen = 0; 1182 u16 pci_ctrl2 = 0; 1183 1184 ixgbe_set_lan_id_multi_port_pcie(hw); 1185 1186 /* check if LAN0 is disabled */ 1187 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen); 1188 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) { 1189 1190 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2); 1191 1192 /* if LAN0 is completely disabled force function to 0 */ 1193 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) && 1194 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) && 1195 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) { 1196 1197 bus->func = 0; 1198 } 1199 } 1200 } 1201 1202 /** 1203 * ixgbe_set_rxpba_82598 - Configure packet buffers 1204 * @hw: pointer to hardware structure 1205 * @dcb_config: pointer to ixgbe_dcb_config structure 1206 * 1207 * Configure packet buffers. 1208 */ 1209 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom, 1210 int strategy) 1211 { 1212 u32 rxpktsize = IXGBE_RXPBSIZE_64KB; 1213 u8 i = 0; 1214 1215 if (!num_pb) 1216 return; 1217 1218 /* Setup Rx packet buffer sizes */ 1219 switch (strategy) { 1220 case PBA_STRATEGY_WEIGHTED: 1221 /* Setup the first four at 80KB */ 1222 rxpktsize = IXGBE_RXPBSIZE_80KB; 1223 for (; i < 4; i++) 1224 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 1225 /* Setup the last four at 48KB...don't re-init i */ 1226 rxpktsize = IXGBE_RXPBSIZE_48KB; 1227 /* Fall Through */ 1228 case PBA_STRATEGY_EQUAL: 1229 default: 1230 /* Divide the remaining Rx packet buffer evenly among the TCs */ 1231 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++) 1232 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 1233 break; 1234 } 1235 1236 /* Setup Tx packet buffer sizes */ 1237 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) 1238 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); 1239 1240 return; 1241 } 1242 1243 static struct ixgbe_mac_operations mac_ops_82598 = { 1244 .init_hw = &ixgbe_init_hw_generic, 1245 .reset_hw = &ixgbe_reset_hw_82598, 1246 .start_hw = &ixgbe_start_hw_82598, 1247 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, 1248 .get_media_type = &ixgbe_get_media_type_82598, 1249 .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598, 1250 .enable_rx_dma = &ixgbe_enable_rx_dma_generic, 1251 .get_mac_addr = &ixgbe_get_mac_addr_generic, 1252 .stop_adapter = &ixgbe_stop_adapter_generic, 1253 .get_bus_info = &ixgbe_get_bus_info_generic, 1254 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598, 1255 .read_analog_reg8 = &ixgbe_read_analog_reg8_82598, 1256 .write_analog_reg8 = &ixgbe_write_analog_reg8_82598, 1257 .setup_link = &ixgbe_setup_mac_link_82598, 1258 .set_rxpba = &ixgbe_set_rxpba_82598, 1259 .check_link = &ixgbe_check_mac_link_82598, 1260 .get_link_capabilities = &ixgbe_get_link_capabilities_82598, 1261 .led_on = &ixgbe_led_on_generic, 1262 .led_off = &ixgbe_led_off_generic, 1263 .blink_led_start = &ixgbe_blink_led_start_generic, 1264 .blink_led_stop = &ixgbe_blink_led_stop_generic, 1265 .set_rar = &ixgbe_set_rar_generic, 1266 .clear_rar = &ixgbe_clear_rar_generic, 1267 .set_vmdq = &ixgbe_set_vmdq_82598, 1268 .clear_vmdq = &ixgbe_clear_vmdq_82598, 1269 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, 1270 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, 1271 .enable_mc = &ixgbe_enable_mc_generic, 1272 .disable_mc = &ixgbe_disable_mc_generic, 1273 .clear_vfta = &ixgbe_clear_vfta_82598, 1274 .set_vfta = &ixgbe_set_vfta_82598, 1275 .fc_enable = &ixgbe_fc_enable_82598, 1276 .set_fw_drv_ver = NULL, 1277 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, 1278 .release_swfw_sync = &ixgbe_release_swfw_sync, 1279 .get_thermal_sensor_data = NULL, 1280 .init_thermal_sensor_thresh = NULL, 1281 }; 1282 1283 static struct ixgbe_eeprom_operations eeprom_ops_82598 = { 1284 .init_params = &ixgbe_init_eeprom_params_generic, 1285 .read = &ixgbe_read_eerd_generic, 1286 .write = &ixgbe_write_eeprom_generic, 1287 .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic, 1288 .read_buffer = &ixgbe_read_eerd_buffer_generic, 1289 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, 1290 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, 1291 .update_checksum = &ixgbe_update_eeprom_checksum_generic, 1292 }; 1293 1294 static struct ixgbe_phy_operations phy_ops_82598 = { 1295 .identify = &ixgbe_identify_phy_generic, 1296 .identify_sfp = &ixgbe_identify_sfp_module_generic, 1297 .init = &ixgbe_init_phy_ops_82598, 1298 .reset = &ixgbe_reset_phy_generic, 1299 .read_reg = &ixgbe_read_phy_reg_generic, 1300 .write_reg = &ixgbe_write_phy_reg_generic, 1301 .setup_link = &ixgbe_setup_phy_link_generic, 1302 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, 1303 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598, 1304 .check_overtemp = &ixgbe_tn_check_overtemp, 1305 }; 1306 1307 struct ixgbe_info ixgbe_82598_info = { 1308 .mac = ixgbe_mac_82598EB, 1309 .get_invariants = &ixgbe_get_invariants_82598, 1310 .mac_ops = &mac_ops_82598, 1311 .eeprom_ops = &eeprom_ops_82598, 1312 .phy_ops = &phy_ops_82598, 1313 }; 1314 1315