1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2013 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 #include <linux/pci.h> 29 #include <linux/delay.h> 30 #include <linux/sched.h> 31 32 #include "ixgbe.h" 33 #include "ixgbe_phy.h" 34 35 #define IXGBE_82598_MAX_TX_QUEUES 32 36 #define IXGBE_82598_MAX_RX_QUEUES 64 37 #define IXGBE_82598_RAR_ENTRIES 16 38 #define IXGBE_82598_MC_TBL_SIZE 128 39 #define IXGBE_82598_VFT_TBL_SIZE 128 40 #define IXGBE_82598_RX_PB_SIZE 512 41 42 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 43 ixgbe_link_speed speed, 44 bool autoneg_wait_to_complete); 45 static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, 46 u8 *eeprom_data); 47 48 /** 49 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout 50 * @hw: pointer to the HW structure 51 * 52 * The defaults for 82598 should be in the range of 50us to 50ms, 53 * however the hardware default for these parts is 500us to 1ms which is less 54 * than the 10ms recommended by the pci-e spec. To address this we need to 55 * increase the value to either 10ms to 250ms for capability version 1 config, 56 * or 16ms to 55ms for version 2. 57 **/ 58 static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) 59 { 60 struct ixgbe_adapter *adapter = hw->back; 61 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); 62 u16 pcie_devctl2; 63 64 /* only take action if timeout value is defaulted to 0 */ 65 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) 66 goto out; 67 68 /* 69 * if capababilities version is type 1 we can write the 70 * timeout of 10ms to 250ms through the GCR register 71 */ 72 if (!(gcr & IXGBE_GCR_CAP_VER2)) { 73 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; 74 goto out; 75 } 76 77 /* 78 * for version 2 capabilities we need to write the config space 79 * directly in order to set the completion timeout value for 80 * 16ms to 55ms 81 */ 82 pci_read_config_word(adapter->pdev, 83 IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2); 84 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; 85 pci_write_config_word(adapter->pdev, 86 IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); 87 out: 88 /* disable completion timeout resend */ 89 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; 90 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); 91 } 92 93 static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) 94 { 95 struct ixgbe_mac_info *mac = &hw->mac; 96 97 /* Call PHY identify routine to get the phy type */ 98 ixgbe_identify_phy_generic(hw); 99 100 mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; 101 mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; 102 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; 103 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; 104 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; 105 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 106 107 return 0; 108 } 109 110 /** 111 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init 112 * @hw: pointer to hardware structure 113 * 114 * Initialize any function pointers that were not able to be 115 * set during get_invariants because the PHY/SFP type was 116 * not known. Perform the SFP init if necessary. 117 * 118 **/ 119 static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) 120 { 121 struct ixgbe_mac_info *mac = &hw->mac; 122 struct ixgbe_phy_info *phy = &hw->phy; 123 s32 ret_val = 0; 124 u16 list_offset, data_offset; 125 126 /* Identify the PHY */ 127 phy->ops.identify(hw); 128 129 /* Overwrite the link function pointers if copper PHY */ 130 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 131 mac->ops.setup_link = &ixgbe_setup_copper_link_82598; 132 mac->ops.get_link_capabilities = 133 &ixgbe_get_copper_link_capabilities_generic; 134 } 135 136 switch (hw->phy.type) { 137 case ixgbe_phy_tn: 138 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; 139 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 140 phy->ops.get_firmware_version = 141 &ixgbe_get_phy_firmware_version_tnx; 142 break; 143 case ixgbe_phy_nl: 144 phy->ops.reset = &ixgbe_reset_phy_nl; 145 146 /* Call SFP+ identify routine to get the SFP+ module type */ 147 ret_val = phy->ops.identify_sfp(hw); 148 if (ret_val != 0) 149 goto out; 150 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) { 151 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; 152 goto out; 153 } 154 155 /* Check to see if SFP+ module is supported */ 156 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, 157 &list_offset, 158 &data_offset); 159 if (ret_val != 0) { 160 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; 161 goto out; 162 } 163 break; 164 default: 165 break; 166 } 167 168 out: 169 return ret_val; 170 } 171 172 /** 173 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx 174 * @hw: pointer to hardware structure 175 * 176 * Starts the hardware using the generic start_hw function. 177 * Disables relaxed ordering Then set pcie completion timeout 178 * 179 **/ 180 static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) 181 { 182 u32 regval; 183 u32 i; 184 s32 ret_val = 0; 185 186 ret_val = ixgbe_start_hw_generic(hw); 187 188 /* Disable relaxed ordering */ 189 for (i = 0; ((i < hw->mac.max_tx_queues) && 190 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { 191 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); 192 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 193 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); 194 } 195 196 for (i = 0; ((i < hw->mac.max_rx_queues) && 197 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { 198 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 199 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | 200 IXGBE_DCA_RXCTRL_HEAD_WRO_EN); 201 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 202 } 203 204 hw->mac.rx_pb_size = IXGBE_82598_RX_PB_SIZE; 205 206 /* set the completion timeout for interface */ 207 if (ret_val == 0) 208 ixgbe_set_pcie_completion_timeout(hw); 209 210 return ret_val; 211 } 212 213 /** 214 * ixgbe_get_link_capabilities_82598 - Determines link capabilities 215 * @hw: pointer to hardware structure 216 * @speed: pointer to link speed 217 * @autoneg: boolean auto-negotiation value 218 * 219 * Determines the link capabilities by reading the AUTOC register. 220 **/ 221 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, 222 ixgbe_link_speed *speed, 223 bool *autoneg) 224 { 225 s32 status = 0; 226 u32 autoc = 0; 227 228 /* 229 * Determine link capabilities based on the stored value of AUTOC, 230 * which represents EEPROM defaults. If AUTOC value has not been 231 * stored, use the current register value. 232 */ 233 if (hw->mac.orig_link_settings_stored) 234 autoc = hw->mac.orig_autoc; 235 else 236 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 237 238 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 239 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 240 *speed = IXGBE_LINK_SPEED_1GB_FULL; 241 *autoneg = false; 242 break; 243 244 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 245 *speed = IXGBE_LINK_SPEED_10GB_FULL; 246 *autoneg = false; 247 break; 248 249 case IXGBE_AUTOC_LMS_1G_AN: 250 *speed = IXGBE_LINK_SPEED_1GB_FULL; 251 *autoneg = true; 252 break; 253 254 case IXGBE_AUTOC_LMS_KX4_AN: 255 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: 256 *speed = IXGBE_LINK_SPEED_UNKNOWN; 257 if (autoc & IXGBE_AUTOC_KX4_SUPP) 258 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 259 if (autoc & IXGBE_AUTOC_KX_SUPP) 260 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 261 *autoneg = true; 262 break; 263 264 default: 265 status = IXGBE_ERR_LINK_SETUP; 266 break; 267 } 268 269 return status; 270 } 271 272 /** 273 * ixgbe_get_media_type_82598 - Determines media type 274 * @hw: pointer to hardware structure 275 * 276 * Returns the media type (fiber, copper, backplane) 277 **/ 278 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) 279 { 280 enum ixgbe_media_type media_type; 281 282 /* Detect if there is a copper PHY attached. */ 283 switch (hw->phy.type) { 284 case ixgbe_phy_cu_unknown: 285 case ixgbe_phy_tn: 286 media_type = ixgbe_media_type_copper; 287 goto out; 288 default: 289 break; 290 } 291 292 /* Media type for I82598 is based on device ID */ 293 switch (hw->device_id) { 294 case IXGBE_DEV_ID_82598: 295 case IXGBE_DEV_ID_82598_BX: 296 /* Default device ID is mezzanine card KX/KX4 */ 297 media_type = ixgbe_media_type_backplane; 298 break; 299 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 300 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 301 case IXGBE_DEV_ID_82598_DA_DUAL_PORT: 302 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: 303 case IXGBE_DEV_ID_82598EB_XF_LR: 304 case IXGBE_DEV_ID_82598EB_SFP_LOM: 305 media_type = ixgbe_media_type_fiber; 306 break; 307 case IXGBE_DEV_ID_82598EB_CX4: 308 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: 309 media_type = ixgbe_media_type_cx4; 310 break; 311 case IXGBE_DEV_ID_82598AT: 312 case IXGBE_DEV_ID_82598AT2: 313 media_type = ixgbe_media_type_copper; 314 break; 315 default: 316 media_type = ixgbe_media_type_unknown; 317 break; 318 } 319 out: 320 return media_type; 321 } 322 323 /** 324 * ixgbe_fc_enable_82598 - Enable flow control 325 * @hw: pointer to hardware structure 326 * 327 * Enable flow control according to the current settings. 328 **/ 329 static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) 330 { 331 s32 ret_val = 0; 332 u32 fctrl_reg; 333 u32 rmcs_reg; 334 u32 reg; 335 u32 fcrtl, fcrth; 336 u32 link_speed = 0; 337 int i; 338 bool link_up; 339 340 /* 341 * Validate the water mark configuration for packet buffer 0. Zero 342 * water marks indicate that the packet buffer was not configured 343 * and the watermarks for packet buffer 0 should always be configured. 344 */ 345 if (!hw->fc.low_water || 346 !hw->fc.high_water[0] || 347 !hw->fc.pause_time) { 348 hw_dbg(hw, "Invalid water mark configuration\n"); 349 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 350 goto out; 351 } 352 353 /* 354 * On 82598 having Rx FC on causes resets while doing 1G 355 * so if it's on turn it off once we know link_speed. For 356 * more details see 82598 Specification update. 357 */ 358 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 359 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { 360 switch (hw->fc.requested_mode) { 361 case ixgbe_fc_full: 362 hw->fc.requested_mode = ixgbe_fc_tx_pause; 363 break; 364 case ixgbe_fc_rx_pause: 365 hw->fc.requested_mode = ixgbe_fc_none; 366 break; 367 default: 368 /* no change */ 369 break; 370 } 371 } 372 373 /* Negotiate the fc mode to use */ 374 ixgbe_fc_autoneg(hw); 375 376 /* Disable any previous flow control settings */ 377 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); 378 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); 379 380 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); 381 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); 382 383 /* 384 * The possible values of fc.current_mode are: 385 * 0: Flow control is completely disabled 386 * 1: Rx flow control is enabled (we can receive pause frames, 387 * but not send pause frames). 388 * 2: Tx flow control is enabled (we can send pause frames but 389 * we do not support receiving pause frames). 390 * 3: Both Rx and Tx flow control (symmetric) are enabled. 391 * other: Invalid. 392 */ 393 switch (hw->fc.current_mode) { 394 case ixgbe_fc_none: 395 /* 396 * Flow control is disabled by software override or autoneg. 397 * The code below will actually disable it in the HW. 398 */ 399 break; 400 case ixgbe_fc_rx_pause: 401 /* 402 * Rx Flow control is enabled and Tx Flow control is 403 * disabled by software override. Since there really 404 * isn't a way to advertise that we are capable of RX 405 * Pause ONLY, we will advertise that we support both 406 * symmetric and asymmetric Rx PAUSE. Later, we will 407 * disable the adapter's ability to send PAUSE frames. 408 */ 409 fctrl_reg |= IXGBE_FCTRL_RFCE; 410 break; 411 case ixgbe_fc_tx_pause: 412 /* 413 * Tx Flow control is enabled, and Rx Flow control is 414 * disabled by software override. 415 */ 416 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 417 break; 418 case ixgbe_fc_full: 419 /* Flow control (both Rx and Tx) is enabled by SW override. */ 420 fctrl_reg |= IXGBE_FCTRL_RFCE; 421 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 422 break; 423 default: 424 hw_dbg(hw, "Flow control param set incorrectly\n"); 425 ret_val = IXGBE_ERR_CONFIG; 426 goto out; 427 break; 428 } 429 430 /* Set 802.3x based flow control settings. */ 431 fctrl_reg |= IXGBE_FCTRL_DPF; 432 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); 433 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); 434 435 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE; 436 437 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 438 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 439 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 440 hw->fc.high_water[i]) { 441 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 442 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); 443 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth); 444 } else { 445 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); 446 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); 447 } 448 449 } 450 451 /* Configure pause time (2 TCs per register) */ 452 reg = hw->fc.pause_time * 0x00010001; 453 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) 454 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 455 456 /* Configure flow control refresh threshold value */ 457 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 458 459 out: 460 return ret_val; 461 } 462 463 /** 464 * ixgbe_start_mac_link_82598 - Configures MAC link settings 465 * @hw: pointer to hardware structure 466 * 467 * Configures link settings based on values in the ixgbe_hw struct. 468 * Restarts the link. Performs autonegotiation if needed. 469 **/ 470 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, 471 bool autoneg_wait_to_complete) 472 { 473 u32 autoc_reg; 474 u32 links_reg; 475 u32 i; 476 s32 status = 0; 477 478 /* Restart link */ 479 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 480 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 481 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 482 483 /* Only poll for autoneg to complete if specified to do so */ 484 if (autoneg_wait_to_complete) { 485 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == 486 IXGBE_AUTOC_LMS_KX4_AN || 487 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 488 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { 489 links_reg = 0; /* Just in case Autoneg time = 0 */ 490 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 491 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 492 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 493 break; 494 msleep(100); 495 } 496 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 497 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; 498 hw_dbg(hw, "Autonegotiation did not complete.\n"); 499 } 500 } 501 } 502 503 /* Add delay to filter out noises during initial link setup */ 504 msleep(50); 505 506 return status; 507 } 508 509 /** 510 * ixgbe_validate_link_ready - Function looks for phy link 511 * @hw: pointer to hardware structure 512 * 513 * Function indicates success when phy link is available. If phy is not ready 514 * within 5 seconds of MAC indicating link, the function returns error. 515 **/ 516 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) 517 { 518 u32 timeout; 519 u16 an_reg; 520 521 if (hw->device_id != IXGBE_DEV_ID_82598AT2) 522 return 0; 523 524 for (timeout = 0; 525 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) { 526 hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &an_reg); 527 528 if ((an_reg & MDIO_AN_STAT1_COMPLETE) && 529 (an_reg & MDIO_STAT1_LSTATUS)) 530 break; 531 532 msleep(100); 533 } 534 535 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { 536 hw_dbg(hw, "Link was indicated but link is down\n"); 537 return IXGBE_ERR_LINK_SETUP; 538 } 539 540 return 0; 541 } 542 543 /** 544 * ixgbe_check_mac_link_82598 - Get link/speed status 545 * @hw: pointer to hardware structure 546 * @speed: pointer to link speed 547 * @link_up: true is link is up, false otherwise 548 * @link_up_wait_to_complete: bool used to wait for link up or not 549 * 550 * Reads the links register to determine if link is up and the current speed 551 **/ 552 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, 553 ixgbe_link_speed *speed, bool *link_up, 554 bool link_up_wait_to_complete) 555 { 556 u32 links_reg; 557 u32 i; 558 u16 link_reg, adapt_comp_reg; 559 560 /* 561 * SERDES PHY requires us to read link status from register 0xC79F. 562 * Bit 0 set indicates link is up/ready; clear indicates link down. 563 * 0xC00C is read to check that the XAUI lanes are active. Bit 0 564 * clear indicates active; set indicates inactive. 565 */ 566 if (hw->phy.type == ixgbe_phy_nl) { 567 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); 568 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); 569 hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD, 570 &adapt_comp_reg); 571 if (link_up_wait_to_complete) { 572 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 573 if ((link_reg & 1) && 574 ((adapt_comp_reg & 1) == 0)) { 575 *link_up = true; 576 break; 577 } else { 578 *link_up = false; 579 } 580 msleep(100); 581 hw->phy.ops.read_reg(hw, 0xC79F, 582 MDIO_MMD_PMAPMD, 583 &link_reg); 584 hw->phy.ops.read_reg(hw, 0xC00C, 585 MDIO_MMD_PMAPMD, 586 &adapt_comp_reg); 587 } 588 } else { 589 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) 590 *link_up = true; 591 else 592 *link_up = false; 593 } 594 595 if (!*link_up) 596 goto out; 597 } 598 599 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 600 if (link_up_wait_to_complete) { 601 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 602 if (links_reg & IXGBE_LINKS_UP) { 603 *link_up = true; 604 break; 605 } else { 606 *link_up = false; 607 } 608 msleep(100); 609 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 610 } 611 } else { 612 if (links_reg & IXGBE_LINKS_UP) 613 *link_up = true; 614 else 615 *link_up = false; 616 } 617 618 if (links_reg & IXGBE_LINKS_SPEED) 619 *speed = IXGBE_LINK_SPEED_10GB_FULL; 620 else 621 *speed = IXGBE_LINK_SPEED_1GB_FULL; 622 623 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && *link_up && 624 (ixgbe_validate_link_ready(hw) != 0)) 625 *link_up = false; 626 627 out: 628 return 0; 629 } 630 631 /** 632 * ixgbe_setup_mac_link_82598 - Set MAC link speed 633 * @hw: pointer to hardware structure 634 * @speed: new link speed 635 * @autoneg_wait_to_complete: true when waiting for completion is needed 636 * 637 * Set the link speed in the AUTOC register and restarts link. 638 **/ 639 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, 640 ixgbe_link_speed speed, 641 bool autoneg_wait_to_complete) 642 { 643 bool autoneg = false; 644 s32 status = 0; 645 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 646 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 647 u32 autoc = curr_autoc; 648 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; 649 650 /* Check to see if speed passed in is supported. */ 651 ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg); 652 speed &= link_capabilities; 653 654 if (speed == IXGBE_LINK_SPEED_UNKNOWN) 655 status = IXGBE_ERR_LINK_SETUP; 656 657 /* Set KX4/KX support according to speed requested */ 658 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || 659 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { 660 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; 661 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 662 autoc |= IXGBE_AUTOC_KX4_SUPP; 663 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 664 autoc |= IXGBE_AUTOC_KX_SUPP; 665 if (autoc != curr_autoc) 666 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 667 } 668 669 if (status == 0) { 670 /* 671 * Setup and restart the link based on the new values in 672 * ixgbe_hw This will write the AUTOC register based on the new 673 * stored values 674 */ 675 status = ixgbe_start_mac_link_82598(hw, 676 autoneg_wait_to_complete); 677 } 678 679 return status; 680 } 681 682 683 /** 684 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field 685 * @hw: pointer to hardware structure 686 * @speed: new link speed 687 * @autoneg_wait_to_complete: true if waiting is needed to complete 688 * 689 * Sets the link speed in the AUTOC register in the MAC and restarts link. 690 **/ 691 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 692 ixgbe_link_speed speed, 693 bool autoneg_wait_to_complete) 694 { 695 s32 status; 696 697 /* Setup the PHY according to input speed */ 698 status = hw->phy.ops.setup_link_speed(hw, speed, 699 autoneg_wait_to_complete); 700 /* Set up MAC */ 701 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); 702 703 return status; 704 } 705 706 /** 707 * ixgbe_reset_hw_82598 - Performs hardware reset 708 * @hw: pointer to hardware structure 709 * 710 * Resets the hardware by resetting the transmit and receive units, masks and 711 * clears all interrupts, performing a PHY reset, and performing a link (MAC) 712 * reset. 713 **/ 714 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) 715 { 716 s32 status = 0; 717 s32 phy_status = 0; 718 u32 ctrl; 719 u32 gheccr; 720 u32 i; 721 u32 autoc; 722 u8 analog_val; 723 724 /* Call adapter stop to disable tx/rx and clear interrupts */ 725 status = hw->mac.ops.stop_adapter(hw); 726 if (status != 0) 727 goto reset_hw_out; 728 729 /* 730 * Power up the Atlas Tx lanes if they are currently powered down. 731 * Atlas Tx lanes are powered down for MAC loopback tests, but 732 * they are not automatically restored on reset. 733 */ 734 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); 735 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { 736 /* Enable Tx Atlas so packets can be transmitted again */ 737 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 738 &analog_val); 739 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; 740 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 741 analog_val); 742 743 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 744 &analog_val); 745 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 746 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 747 analog_val); 748 749 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 750 &analog_val); 751 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 752 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 753 analog_val); 754 755 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 756 &analog_val); 757 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 758 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 759 analog_val); 760 } 761 762 /* Reset PHY */ 763 if (hw->phy.reset_disable == false) { 764 /* PHY ops must be identified and initialized prior to reset */ 765 766 /* Init PHY and function pointers, perform SFP setup */ 767 phy_status = hw->phy.ops.init(hw); 768 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) 769 goto reset_hw_out; 770 if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) 771 goto mac_reset_top; 772 773 hw->phy.ops.reset(hw); 774 } 775 776 mac_reset_top: 777 /* 778 * Issue global reset to the MAC. This needs to be a SW reset. 779 * If link reset is used, it might reset the MAC when mng is using it 780 */ 781 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST; 782 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 783 IXGBE_WRITE_FLUSH(hw); 784 785 /* Poll for reset bit to self-clear indicating reset is complete */ 786 for (i = 0; i < 10; i++) { 787 udelay(1); 788 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 789 if (!(ctrl & IXGBE_CTRL_RST)) 790 break; 791 } 792 if (ctrl & IXGBE_CTRL_RST) { 793 status = IXGBE_ERR_RESET_FAILED; 794 hw_dbg(hw, "Reset polling failed to complete.\n"); 795 } 796 797 msleep(50); 798 799 /* 800 * Double resets are required for recovery from certain error 801 * conditions. Between resets, it is necessary to stall to allow time 802 * for any pending HW events to complete. 803 */ 804 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { 805 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 806 goto mac_reset_top; 807 } 808 809 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); 810 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); 811 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); 812 813 /* 814 * Store the original AUTOC value if it has not been 815 * stored off yet. Otherwise restore the stored original 816 * AUTOC value since the reset operation sets back to deaults. 817 */ 818 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 819 if (hw->mac.orig_link_settings_stored == false) { 820 hw->mac.orig_autoc = autoc; 821 hw->mac.orig_link_settings_stored = true; 822 } else if (autoc != hw->mac.orig_autoc) { 823 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); 824 } 825 826 /* Store the permanent mac address */ 827 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 828 829 /* 830 * Store MAC address from RAR0, clear receive address registers, and 831 * clear the multicast table 832 */ 833 hw->mac.ops.init_rx_addrs(hw); 834 835 reset_hw_out: 836 if (phy_status) 837 status = phy_status; 838 839 return status; 840 } 841 842 /** 843 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address 844 * @hw: pointer to hardware struct 845 * @rar: receive address register index to associate with a VMDq index 846 * @vmdq: VMDq set index 847 **/ 848 static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 849 { 850 u32 rar_high; 851 u32 rar_entries = hw->mac.num_rar_entries; 852 853 /* Make sure we are using a valid rar index range */ 854 if (rar >= rar_entries) { 855 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 856 return IXGBE_ERR_INVALID_ARGUMENT; 857 } 858 859 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 860 rar_high &= ~IXGBE_RAH_VIND_MASK; 861 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); 862 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); 863 return 0; 864 } 865 866 /** 867 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address 868 * @hw: pointer to hardware struct 869 * @rar: receive address register index to associate with a VMDq index 870 * @vmdq: VMDq clear index (not used in 82598, but elsewhere) 871 **/ 872 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 873 { 874 u32 rar_high; 875 u32 rar_entries = hw->mac.num_rar_entries; 876 877 878 /* Make sure we are using a valid rar index range */ 879 if (rar >= rar_entries) { 880 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 881 return IXGBE_ERR_INVALID_ARGUMENT; 882 } 883 884 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 885 if (rar_high & IXGBE_RAH_VIND_MASK) { 886 rar_high &= ~IXGBE_RAH_VIND_MASK; 887 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); 888 } 889 890 return 0; 891 } 892 893 /** 894 * ixgbe_set_vfta_82598 - Set VLAN filter table 895 * @hw: pointer to hardware structure 896 * @vlan: VLAN id to write to VLAN filter 897 * @vind: VMDq output index that maps queue to VLAN id in VFTA 898 * @vlan_on: boolean flag to turn on/off VLAN in VFTA 899 * 900 * Turn on/off specified VLAN in the VLAN filter table. 901 **/ 902 static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, 903 bool vlan_on) 904 { 905 u32 regindex; 906 u32 bitindex; 907 u32 bits; 908 u32 vftabyte; 909 910 if (vlan > 4095) 911 return IXGBE_ERR_PARAM; 912 913 /* Determine 32-bit word position in array */ 914 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ 915 916 /* Determine the location of the (VMD) queue index */ 917 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ 918 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ 919 920 /* Set the nibble for VMD queue index */ 921 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); 922 bits &= (~(0x0F << bitindex)); 923 bits |= (vind << bitindex); 924 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); 925 926 /* Determine the location of the bit for this VLAN id */ 927 bitindex = vlan & 0x1F; /* lower five bits */ 928 929 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); 930 if (vlan_on) 931 /* Turn on this VLAN id */ 932 bits |= (1 << bitindex); 933 else 934 /* Turn off this VLAN id */ 935 bits &= ~(1 << bitindex); 936 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); 937 938 return 0; 939 } 940 941 /** 942 * ixgbe_clear_vfta_82598 - Clear VLAN filter table 943 * @hw: pointer to hardware structure 944 * 945 * Clears the VLAN filer table, and the VMDq index associated with the filter 946 **/ 947 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) 948 { 949 u32 offset; 950 u32 vlanbyte; 951 952 for (offset = 0; offset < hw->mac.vft_size; offset++) 953 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 954 955 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) 956 for (offset = 0; offset < hw->mac.vft_size; offset++) 957 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), 958 0); 959 960 return 0; 961 } 962 963 /** 964 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register 965 * @hw: pointer to hardware structure 966 * @reg: analog register to read 967 * @val: read value 968 * 969 * Performs read operation to Atlas analog register specified. 970 **/ 971 static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) 972 { 973 u32 atlas_ctl; 974 975 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, 976 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); 977 IXGBE_WRITE_FLUSH(hw); 978 udelay(10); 979 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); 980 *val = (u8)atlas_ctl; 981 982 return 0; 983 } 984 985 /** 986 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register 987 * @hw: pointer to hardware structure 988 * @reg: atlas register to write 989 * @val: value to write 990 * 991 * Performs write operation to Atlas analog register specified. 992 **/ 993 static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) 994 { 995 u32 atlas_ctl; 996 997 atlas_ctl = (reg << 8) | val; 998 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); 999 IXGBE_WRITE_FLUSH(hw); 1000 udelay(10); 1001 1002 return 0; 1003 } 1004 1005 /** 1006 * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface. 1007 * @hw: pointer to hardware structure 1008 * @dev_addr: address to read from 1009 * @byte_offset: byte offset to read from dev_addr 1010 * @eeprom_data: value read 1011 * 1012 * Performs 8 byte read operation to SFP module's data over I2C interface. 1013 **/ 1014 static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, 1015 u8 byte_offset, u8 *eeprom_data) 1016 { 1017 s32 status = 0; 1018 u16 sfp_addr = 0; 1019 u16 sfp_data = 0; 1020 u16 sfp_stat = 0; 1021 u16 gssr; 1022 u32 i; 1023 1024 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) 1025 gssr = IXGBE_GSSR_PHY1_SM; 1026 else 1027 gssr = IXGBE_GSSR_PHY0_SM; 1028 1029 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) 1030 return IXGBE_ERR_SWFW_SYNC; 1031 1032 if (hw->phy.type == ixgbe_phy_nl) { 1033 /* 1034 * phy SDA/SCL registers are at addresses 0xC30A to 1035 * 0xC30D. These registers are used to talk to the SFP+ 1036 * module's EEPROM through the SDA/SCL (I2C) interface. 1037 */ 1038 sfp_addr = (dev_addr << 8) + byte_offset; 1039 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); 1040 hw->phy.ops.write_reg_mdi(hw, 1041 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, 1042 MDIO_MMD_PMAPMD, 1043 sfp_addr); 1044 1045 /* Poll status */ 1046 for (i = 0; i < 100; i++) { 1047 hw->phy.ops.read_reg_mdi(hw, 1048 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, 1049 MDIO_MMD_PMAPMD, 1050 &sfp_stat); 1051 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; 1052 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) 1053 break; 1054 usleep_range(10000, 20000); 1055 } 1056 1057 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { 1058 hw_dbg(hw, "EEPROM read did not pass.\n"); 1059 status = IXGBE_ERR_SFP_NOT_PRESENT; 1060 goto out; 1061 } 1062 1063 /* Read data */ 1064 hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, 1065 MDIO_MMD_PMAPMD, &sfp_data); 1066 1067 *eeprom_data = (u8)(sfp_data >> 8); 1068 } else { 1069 status = IXGBE_ERR_PHY; 1070 } 1071 1072 out: 1073 hw->mac.ops.release_swfw_sync(hw, gssr); 1074 return status; 1075 } 1076 1077 /** 1078 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. 1079 * @hw: pointer to hardware structure 1080 * @byte_offset: EEPROM byte offset to read 1081 * @eeprom_data: value read 1082 * 1083 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. 1084 **/ 1085 static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, 1086 u8 *eeprom_data) 1087 { 1088 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR, 1089 byte_offset, eeprom_data); 1090 } 1091 1092 /** 1093 * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface. 1094 * @hw: pointer to hardware structure 1095 * @byte_offset: byte offset at address 0xA2 1096 * @eeprom_data: value read 1097 * 1098 * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C 1099 **/ 1100 static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, 1101 u8 *sff8472_data) 1102 { 1103 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2, 1104 byte_offset, sff8472_data); 1105 } 1106 1107 /** 1108 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type 1109 * @hw: pointer to hardware structure 1110 * 1111 * Determines physical layer capabilities of the current configuration. 1112 **/ 1113 static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) 1114 { 1115 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1116 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1117 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; 1118 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 1119 u16 ext_ability = 0; 1120 1121 hw->phy.ops.identify(hw); 1122 1123 /* Copper PHY must be checked before AUTOC LMS to determine correct 1124 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ 1125 switch (hw->phy.type) { 1126 case ixgbe_phy_tn: 1127 case ixgbe_phy_cu_unknown: 1128 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, 1129 MDIO_MMD_PMAPMD, &ext_ability); 1130 if (ext_ability & MDIO_PMA_EXTABLE_10GBT) 1131 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 1132 if (ext_ability & MDIO_PMA_EXTABLE_1000BT) 1133 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; 1134 if (ext_ability & MDIO_PMA_EXTABLE_100BTX) 1135 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 1136 goto out; 1137 default: 1138 break; 1139 } 1140 1141 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 1142 case IXGBE_AUTOC_LMS_1G_AN: 1143 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 1144 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX) 1145 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; 1146 else 1147 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX; 1148 break; 1149 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 1150 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4) 1151 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; 1152 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4) 1153 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 1154 else /* XAUI */ 1155 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1156 break; 1157 case IXGBE_AUTOC_LMS_KX4_AN: 1158 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: 1159 if (autoc & IXGBE_AUTOC_KX_SUPP) 1160 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; 1161 if (autoc & IXGBE_AUTOC_KX4_SUPP) 1162 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 1163 break; 1164 default: 1165 break; 1166 } 1167 1168 if (hw->phy.type == ixgbe_phy_nl) { 1169 hw->phy.ops.identify_sfp(hw); 1170 1171 switch (hw->phy.sfp_type) { 1172 case ixgbe_sfp_type_da_cu: 1173 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 1174 break; 1175 case ixgbe_sfp_type_sr: 1176 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 1177 break; 1178 case ixgbe_sfp_type_lr: 1179 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 1180 break; 1181 default: 1182 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1183 break; 1184 } 1185 } 1186 1187 switch (hw->device_id) { 1188 case IXGBE_DEV_ID_82598_DA_DUAL_PORT: 1189 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 1190 break; 1191 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 1192 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 1193 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: 1194 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 1195 break; 1196 case IXGBE_DEV_ID_82598EB_XF_LR: 1197 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 1198 break; 1199 default: 1200 break; 1201 } 1202 1203 out: 1204 return physical_layer; 1205 } 1206 1207 /** 1208 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple 1209 * port devices. 1210 * @hw: pointer to the HW structure 1211 * 1212 * Calls common function and corrects issue with some single port devices 1213 * that enable LAN1 but not LAN0. 1214 **/ 1215 static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) 1216 { 1217 struct ixgbe_bus_info *bus = &hw->bus; 1218 u16 pci_gen = 0; 1219 u16 pci_ctrl2 = 0; 1220 1221 ixgbe_set_lan_id_multi_port_pcie(hw); 1222 1223 /* check if LAN0 is disabled */ 1224 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen); 1225 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) { 1226 1227 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2); 1228 1229 /* if LAN0 is completely disabled force function to 0 */ 1230 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) && 1231 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) && 1232 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) { 1233 1234 bus->func = 0; 1235 } 1236 } 1237 } 1238 1239 /** 1240 * ixgbe_set_rxpba_82598 - Configure packet buffers 1241 * @hw: pointer to hardware structure 1242 * @dcb_config: pointer to ixgbe_dcb_config structure 1243 * 1244 * Configure packet buffers. 1245 */ 1246 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom, 1247 int strategy) 1248 { 1249 u32 rxpktsize = IXGBE_RXPBSIZE_64KB; 1250 u8 i = 0; 1251 1252 if (!num_pb) 1253 return; 1254 1255 /* Setup Rx packet buffer sizes */ 1256 switch (strategy) { 1257 case PBA_STRATEGY_WEIGHTED: 1258 /* Setup the first four at 80KB */ 1259 rxpktsize = IXGBE_RXPBSIZE_80KB; 1260 for (; i < 4; i++) 1261 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 1262 /* Setup the last four at 48KB...don't re-init i */ 1263 rxpktsize = IXGBE_RXPBSIZE_48KB; 1264 /* Fall Through */ 1265 case PBA_STRATEGY_EQUAL: 1266 default: 1267 /* Divide the remaining Rx packet buffer evenly among the TCs */ 1268 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++) 1269 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 1270 break; 1271 } 1272 1273 /* Setup Tx packet buffer sizes */ 1274 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) 1275 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); 1276 1277 return; 1278 } 1279 1280 static struct ixgbe_mac_operations mac_ops_82598 = { 1281 .init_hw = &ixgbe_init_hw_generic, 1282 .reset_hw = &ixgbe_reset_hw_82598, 1283 .start_hw = &ixgbe_start_hw_82598, 1284 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, 1285 .get_media_type = &ixgbe_get_media_type_82598, 1286 .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598, 1287 .enable_rx_dma = &ixgbe_enable_rx_dma_generic, 1288 .get_mac_addr = &ixgbe_get_mac_addr_generic, 1289 .stop_adapter = &ixgbe_stop_adapter_generic, 1290 .get_bus_info = &ixgbe_get_bus_info_generic, 1291 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598, 1292 .read_analog_reg8 = &ixgbe_read_analog_reg8_82598, 1293 .write_analog_reg8 = &ixgbe_write_analog_reg8_82598, 1294 .setup_link = &ixgbe_setup_mac_link_82598, 1295 .set_rxpba = &ixgbe_set_rxpba_82598, 1296 .check_link = &ixgbe_check_mac_link_82598, 1297 .get_link_capabilities = &ixgbe_get_link_capabilities_82598, 1298 .led_on = &ixgbe_led_on_generic, 1299 .led_off = &ixgbe_led_off_generic, 1300 .blink_led_start = &ixgbe_blink_led_start_generic, 1301 .blink_led_stop = &ixgbe_blink_led_stop_generic, 1302 .set_rar = &ixgbe_set_rar_generic, 1303 .clear_rar = &ixgbe_clear_rar_generic, 1304 .set_vmdq = &ixgbe_set_vmdq_82598, 1305 .clear_vmdq = &ixgbe_clear_vmdq_82598, 1306 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, 1307 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, 1308 .enable_mc = &ixgbe_enable_mc_generic, 1309 .disable_mc = &ixgbe_disable_mc_generic, 1310 .clear_vfta = &ixgbe_clear_vfta_82598, 1311 .set_vfta = &ixgbe_set_vfta_82598, 1312 .fc_enable = &ixgbe_fc_enable_82598, 1313 .set_fw_drv_ver = NULL, 1314 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, 1315 .release_swfw_sync = &ixgbe_release_swfw_sync, 1316 .get_thermal_sensor_data = NULL, 1317 .init_thermal_sensor_thresh = NULL, 1318 .mng_fw_enabled = NULL, 1319 }; 1320 1321 static struct ixgbe_eeprom_operations eeprom_ops_82598 = { 1322 .init_params = &ixgbe_init_eeprom_params_generic, 1323 .read = &ixgbe_read_eerd_generic, 1324 .write = &ixgbe_write_eeprom_generic, 1325 .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic, 1326 .read_buffer = &ixgbe_read_eerd_buffer_generic, 1327 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, 1328 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, 1329 .update_checksum = &ixgbe_update_eeprom_checksum_generic, 1330 }; 1331 1332 static struct ixgbe_phy_operations phy_ops_82598 = { 1333 .identify = &ixgbe_identify_phy_generic, 1334 .identify_sfp = &ixgbe_identify_module_generic, 1335 .init = &ixgbe_init_phy_ops_82598, 1336 .reset = &ixgbe_reset_phy_generic, 1337 .read_reg = &ixgbe_read_phy_reg_generic, 1338 .write_reg = &ixgbe_write_phy_reg_generic, 1339 .read_reg_mdi = &ixgbe_read_phy_reg_mdi, 1340 .write_reg_mdi = &ixgbe_write_phy_reg_mdi, 1341 .setup_link = &ixgbe_setup_phy_link_generic, 1342 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, 1343 .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598, 1344 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598, 1345 .check_overtemp = &ixgbe_tn_check_overtemp, 1346 }; 1347 1348 struct ixgbe_info ixgbe_82598_info = { 1349 .mac = ixgbe_mac_82598EB, 1350 .get_invariants = &ixgbe_get_invariants_82598, 1351 .mac_ops = &mac_ops_82598, 1352 .eeprom_ops = &eeprom_ops_82598, 1353 .phy_ops = &phy_ops_82598, 1354 }; 1355