1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2014 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27 *******************************************************************************/ 28 29 #include <linux/pci.h> 30 #include <linux/delay.h> 31 #include <linux/sched.h> 32 33 #include "ixgbe.h" 34 #include "ixgbe_phy.h" 35 36 #define IXGBE_82598_MAX_TX_QUEUES 32 37 #define IXGBE_82598_MAX_RX_QUEUES 64 38 #define IXGBE_82598_RAR_ENTRIES 16 39 #define IXGBE_82598_MC_TBL_SIZE 128 40 #define IXGBE_82598_VFT_TBL_SIZE 128 41 #define IXGBE_82598_RX_PB_SIZE 512 42 43 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 44 ixgbe_link_speed speed, 45 bool autoneg_wait_to_complete); 46 static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, 47 u8 *eeprom_data); 48 49 /** 50 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout 51 * @hw: pointer to the HW structure 52 * 53 * The defaults for 82598 should be in the range of 50us to 50ms, 54 * however the hardware default for these parts is 500us to 1ms which is less 55 * than the 10ms recommended by the pci-e spec. To address this we need to 56 * increase the value to either 10ms to 250ms for capability version 1 config, 57 * or 16ms to 55ms for version 2. 58 **/ 59 static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) 60 { 61 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); 62 u16 pcie_devctl2; 63 64 if (ixgbe_removed(hw->hw_addr)) 65 return; 66 67 /* only take action if timeout value is defaulted to 0 */ 68 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) 69 goto out; 70 71 /* 72 * if capababilities version is type 1 we can write the 73 * timeout of 10ms to 250ms through the GCR register 74 */ 75 if (!(gcr & IXGBE_GCR_CAP_VER2)) { 76 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; 77 goto out; 78 } 79 80 /* 81 * for version 2 capabilities we need to write the config space 82 * directly in order to set the completion timeout value for 83 * 16ms to 55ms 84 */ 85 pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2); 86 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; 87 ixgbe_write_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); 88 out: 89 /* disable completion timeout resend */ 90 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; 91 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); 92 } 93 94 static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) 95 { 96 struct ixgbe_mac_info *mac = &hw->mac; 97 98 /* Call PHY identify routine to get the phy type */ 99 ixgbe_identify_phy_generic(hw); 100 101 mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; 102 mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; 103 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; 104 mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE; 105 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; 106 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; 107 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 108 109 return 0; 110 } 111 112 /** 113 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init 114 * @hw: pointer to hardware structure 115 * 116 * Initialize any function pointers that were not able to be 117 * set during get_invariants because the PHY/SFP type was 118 * not known. Perform the SFP init if necessary. 119 * 120 **/ 121 static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) 122 { 123 struct ixgbe_mac_info *mac = &hw->mac; 124 struct ixgbe_phy_info *phy = &hw->phy; 125 s32 ret_val = 0; 126 u16 list_offset, data_offset; 127 128 /* Identify the PHY */ 129 phy->ops.identify(hw); 130 131 /* Overwrite the link function pointers if copper PHY */ 132 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 133 mac->ops.setup_link = &ixgbe_setup_copper_link_82598; 134 mac->ops.get_link_capabilities = 135 &ixgbe_get_copper_link_capabilities_generic; 136 } 137 138 switch (hw->phy.type) { 139 case ixgbe_phy_tn: 140 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; 141 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 142 phy->ops.get_firmware_version = 143 &ixgbe_get_phy_firmware_version_tnx; 144 break; 145 case ixgbe_phy_nl: 146 phy->ops.reset = &ixgbe_reset_phy_nl; 147 148 /* Call SFP+ identify routine to get the SFP+ module type */ 149 ret_val = phy->ops.identify_sfp(hw); 150 if (ret_val != 0) 151 goto out; 152 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) { 153 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; 154 goto out; 155 } 156 157 /* Check to see if SFP+ module is supported */ 158 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, 159 &list_offset, 160 &data_offset); 161 if (ret_val != 0) { 162 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; 163 goto out; 164 } 165 break; 166 default: 167 break; 168 } 169 170 out: 171 return ret_val; 172 } 173 174 /** 175 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx 176 * @hw: pointer to hardware structure 177 * 178 * Starts the hardware using the generic start_hw function. 179 * Disables relaxed ordering Then set pcie completion timeout 180 * 181 **/ 182 static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) 183 { 184 u32 regval; 185 u32 i; 186 s32 ret_val = 0; 187 188 ret_val = ixgbe_start_hw_generic(hw); 189 190 /* Disable relaxed ordering */ 191 for (i = 0; ((i < hw->mac.max_tx_queues) && 192 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { 193 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); 194 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 195 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); 196 } 197 198 for (i = 0; ((i < hw->mac.max_rx_queues) && 199 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { 200 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 201 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | 202 IXGBE_DCA_RXCTRL_HEAD_WRO_EN); 203 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 204 } 205 206 /* set the completion timeout for interface */ 207 if (ret_val == 0) 208 ixgbe_set_pcie_completion_timeout(hw); 209 210 return ret_val; 211 } 212 213 /** 214 * ixgbe_get_link_capabilities_82598 - Determines link capabilities 215 * @hw: pointer to hardware structure 216 * @speed: pointer to link speed 217 * @autoneg: boolean auto-negotiation value 218 * 219 * Determines the link capabilities by reading the AUTOC register. 220 **/ 221 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, 222 ixgbe_link_speed *speed, 223 bool *autoneg) 224 { 225 s32 status = 0; 226 u32 autoc = 0; 227 228 /* 229 * Determine link capabilities based on the stored value of AUTOC, 230 * which represents EEPROM defaults. If AUTOC value has not been 231 * stored, use the current register value. 232 */ 233 if (hw->mac.orig_link_settings_stored) 234 autoc = hw->mac.orig_autoc; 235 else 236 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 237 238 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 239 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 240 *speed = IXGBE_LINK_SPEED_1GB_FULL; 241 *autoneg = false; 242 break; 243 244 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 245 *speed = IXGBE_LINK_SPEED_10GB_FULL; 246 *autoneg = false; 247 break; 248 249 case IXGBE_AUTOC_LMS_1G_AN: 250 *speed = IXGBE_LINK_SPEED_1GB_FULL; 251 *autoneg = true; 252 break; 253 254 case IXGBE_AUTOC_LMS_KX4_AN: 255 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: 256 *speed = IXGBE_LINK_SPEED_UNKNOWN; 257 if (autoc & IXGBE_AUTOC_KX4_SUPP) 258 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 259 if (autoc & IXGBE_AUTOC_KX_SUPP) 260 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 261 *autoneg = true; 262 break; 263 264 default: 265 status = IXGBE_ERR_LINK_SETUP; 266 break; 267 } 268 269 return status; 270 } 271 272 /** 273 * ixgbe_get_media_type_82598 - Determines media type 274 * @hw: pointer to hardware structure 275 * 276 * Returns the media type (fiber, copper, backplane) 277 **/ 278 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) 279 { 280 enum ixgbe_media_type media_type; 281 282 /* Detect if there is a copper PHY attached. */ 283 switch (hw->phy.type) { 284 case ixgbe_phy_cu_unknown: 285 case ixgbe_phy_tn: 286 media_type = ixgbe_media_type_copper; 287 goto out; 288 default: 289 break; 290 } 291 292 /* Media type for I82598 is based on device ID */ 293 switch (hw->device_id) { 294 case IXGBE_DEV_ID_82598: 295 case IXGBE_DEV_ID_82598_BX: 296 /* Default device ID is mezzanine card KX/KX4 */ 297 media_type = ixgbe_media_type_backplane; 298 break; 299 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 300 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 301 case IXGBE_DEV_ID_82598_DA_DUAL_PORT: 302 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: 303 case IXGBE_DEV_ID_82598EB_XF_LR: 304 case IXGBE_DEV_ID_82598EB_SFP_LOM: 305 media_type = ixgbe_media_type_fiber; 306 break; 307 case IXGBE_DEV_ID_82598EB_CX4: 308 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: 309 media_type = ixgbe_media_type_cx4; 310 break; 311 case IXGBE_DEV_ID_82598AT: 312 case IXGBE_DEV_ID_82598AT2: 313 media_type = ixgbe_media_type_copper; 314 break; 315 default: 316 media_type = ixgbe_media_type_unknown; 317 break; 318 } 319 out: 320 return media_type; 321 } 322 323 /** 324 * ixgbe_fc_enable_82598 - Enable flow control 325 * @hw: pointer to hardware structure 326 * 327 * Enable flow control according to the current settings. 328 **/ 329 static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) 330 { 331 s32 ret_val = 0; 332 u32 fctrl_reg; 333 u32 rmcs_reg; 334 u32 reg; 335 u32 fcrtl, fcrth; 336 u32 link_speed = 0; 337 int i; 338 bool link_up; 339 340 /* Validate the water mark configuration */ 341 if (!hw->fc.pause_time) { 342 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 343 goto out; 344 } 345 346 /* Low water mark of zero causes XOFF floods */ 347 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 348 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 349 hw->fc.high_water[i]) { 350 if (!hw->fc.low_water[i] || 351 hw->fc.low_water[i] >= hw->fc.high_water[i]) { 352 hw_dbg(hw, "Invalid water mark configuration\n"); 353 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 354 goto out; 355 } 356 } 357 } 358 359 /* 360 * On 82598 having Rx FC on causes resets while doing 1G 361 * so if it's on turn it off once we know link_speed. For 362 * more details see 82598 Specification update. 363 */ 364 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 365 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { 366 switch (hw->fc.requested_mode) { 367 case ixgbe_fc_full: 368 hw->fc.requested_mode = ixgbe_fc_tx_pause; 369 break; 370 case ixgbe_fc_rx_pause: 371 hw->fc.requested_mode = ixgbe_fc_none; 372 break; 373 default: 374 /* no change */ 375 break; 376 } 377 } 378 379 /* Negotiate the fc mode to use */ 380 ixgbe_fc_autoneg(hw); 381 382 /* Disable any previous flow control settings */ 383 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); 384 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); 385 386 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); 387 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); 388 389 /* 390 * The possible values of fc.current_mode are: 391 * 0: Flow control is completely disabled 392 * 1: Rx flow control is enabled (we can receive pause frames, 393 * but not send pause frames). 394 * 2: Tx flow control is enabled (we can send pause frames but 395 * we do not support receiving pause frames). 396 * 3: Both Rx and Tx flow control (symmetric) are enabled. 397 * other: Invalid. 398 */ 399 switch (hw->fc.current_mode) { 400 case ixgbe_fc_none: 401 /* 402 * Flow control is disabled by software override or autoneg. 403 * The code below will actually disable it in the HW. 404 */ 405 break; 406 case ixgbe_fc_rx_pause: 407 /* 408 * Rx Flow control is enabled and Tx Flow control is 409 * disabled by software override. Since there really 410 * isn't a way to advertise that we are capable of RX 411 * Pause ONLY, we will advertise that we support both 412 * symmetric and asymmetric Rx PAUSE. Later, we will 413 * disable the adapter's ability to send PAUSE frames. 414 */ 415 fctrl_reg |= IXGBE_FCTRL_RFCE; 416 break; 417 case ixgbe_fc_tx_pause: 418 /* 419 * Tx Flow control is enabled, and Rx Flow control is 420 * disabled by software override. 421 */ 422 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 423 break; 424 case ixgbe_fc_full: 425 /* Flow control (both Rx and Tx) is enabled by SW override. */ 426 fctrl_reg |= IXGBE_FCTRL_RFCE; 427 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 428 break; 429 default: 430 hw_dbg(hw, "Flow control param set incorrectly\n"); 431 ret_val = IXGBE_ERR_CONFIG; 432 goto out; 433 break; 434 } 435 436 /* Set 802.3x based flow control settings. */ 437 fctrl_reg |= IXGBE_FCTRL_DPF; 438 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); 439 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); 440 441 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 442 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 443 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 444 hw->fc.high_water[i]) { 445 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; 446 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 447 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); 448 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth); 449 } else { 450 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); 451 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); 452 } 453 454 } 455 456 /* Configure pause time (2 TCs per register) */ 457 reg = hw->fc.pause_time * 0x00010001; 458 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) 459 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 460 461 /* Configure flow control refresh threshold value */ 462 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 463 464 out: 465 return ret_val; 466 } 467 468 /** 469 * ixgbe_start_mac_link_82598 - Configures MAC link settings 470 * @hw: pointer to hardware structure 471 * 472 * Configures link settings based on values in the ixgbe_hw struct. 473 * Restarts the link. Performs autonegotiation if needed. 474 **/ 475 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, 476 bool autoneg_wait_to_complete) 477 { 478 u32 autoc_reg; 479 u32 links_reg; 480 u32 i; 481 s32 status = 0; 482 483 /* Restart link */ 484 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 485 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 486 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 487 488 /* Only poll for autoneg to complete if specified to do so */ 489 if (autoneg_wait_to_complete) { 490 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == 491 IXGBE_AUTOC_LMS_KX4_AN || 492 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 493 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { 494 links_reg = 0; /* Just in case Autoneg time = 0 */ 495 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 496 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 497 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 498 break; 499 msleep(100); 500 } 501 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 502 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; 503 hw_dbg(hw, "Autonegotiation did not complete.\n"); 504 } 505 } 506 } 507 508 /* Add delay to filter out noises during initial link setup */ 509 msleep(50); 510 511 return status; 512 } 513 514 /** 515 * ixgbe_validate_link_ready - Function looks for phy link 516 * @hw: pointer to hardware structure 517 * 518 * Function indicates success when phy link is available. If phy is not ready 519 * within 5 seconds of MAC indicating link, the function returns error. 520 **/ 521 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) 522 { 523 u32 timeout; 524 u16 an_reg; 525 526 if (hw->device_id != IXGBE_DEV_ID_82598AT2) 527 return 0; 528 529 for (timeout = 0; 530 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) { 531 hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &an_reg); 532 533 if ((an_reg & MDIO_AN_STAT1_COMPLETE) && 534 (an_reg & MDIO_STAT1_LSTATUS)) 535 break; 536 537 msleep(100); 538 } 539 540 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { 541 hw_dbg(hw, "Link was indicated but link is down\n"); 542 return IXGBE_ERR_LINK_SETUP; 543 } 544 545 return 0; 546 } 547 548 /** 549 * ixgbe_check_mac_link_82598 - Get link/speed status 550 * @hw: pointer to hardware structure 551 * @speed: pointer to link speed 552 * @link_up: true is link is up, false otherwise 553 * @link_up_wait_to_complete: bool used to wait for link up or not 554 * 555 * Reads the links register to determine if link is up and the current speed 556 **/ 557 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, 558 ixgbe_link_speed *speed, bool *link_up, 559 bool link_up_wait_to_complete) 560 { 561 u32 links_reg; 562 u32 i; 563 u16 link_reg, adapt_comp_reg; 564 565 /* 566 * SERDES PHY requires us to read link status from register 0xC79F. 567 * Bit 0 set indicates link is up/ready; clear indicates link down. 568 * 0xC00C is read to check that the XAUI lanes are active. Bit 0 569 * clear indicates active; set indicates inactive. 570 */ 571 if (hw->phy.type == ixgbe_phy_nl) { 572 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); 573 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); 574 hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD, 575 &adapt_comp_reg); 576 if (link_up_wait_to_complete) { 577 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 578 if ((link_reg & 1) && 579 ((adapt_comp_reg & 1) == 0)) { 580 *link_up = true; 581 break; 582 } else { 583 *link_up = false; 584 } 585 msleep(100); 586 hw->phy.ops.read_reg(hw, 0xC79F, 587 MDIO_MMD_PMAPMD, 588 &link_reg); 589 hw->phy.ops.read_reg(hw, 0xC00C, 590 MDIO_MMD_PMAPMD, 591 &adapt_comp_reg); 592 } 593 } else { 594 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) 595 *link_up = true; 596 else 597 *link_up = false; 598 } 599 600 if (!*link_up) 601 goto out; 602 } 603 604 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 605 if (link_up_wait_to_complete) { 606 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 607 if (links_reg & IXGBE_LINKS_UP) { 608 *link_up = true; 609 break; 610 } else { 611 *link_up = false; 612 } 613 msleep(100); 614 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 615 } 616 } else { 617 if (links_reg & IXGBE_LINKS_UP) 618 *link_up = true; 619 else 620 *link_up = false; 621 } 622 623 if (links_reg & IXGBE_LINKS_SPEED) 624 *speed = IXGBE_LINK_SPEED_10GB_FULL; 625 else 626 *speed = IXGBE_LINK_SPEED_1GB_FULL; 627 628 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && *link_up && 629 (ixgbe_validate_link_ready(hw) != 0)) 630 *link_up = false; 631 632 out: 633 return 0; 634 } 635 636 /** 637 * ixgbe_setup_mac_link_82598 - Set MAC link speed 638 * @hw: pointer to hardware structure 639 * @speed: new link speed 640 * @autoneg_wait_to_complete: true when waiting for completion is needed 641 * 642 * Set the link speed in the AUTOC register and restarts link. 643 **/ 644 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, 645 ixgbe_link_speed speed, 646 bool autoneg_wait_to_complete) 647 { 648 bool autoneg = false; 649 s32 status = 0; 650 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 651 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 652 u32 autoc = curr_autoc; 653 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; 654 655 /* Check to see if speed passed in is supported. */ 656 ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg); 657 speed &= link_capabilities; 658 659 if (speed == IXGBE_LINK_SPEED_UNKNOWN) 660 status = IXGBE_ERR_LINK_SETUP; 661 662 /* Set KX4/KX support according to speed requested */ 663 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || 664 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { 665 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; 666 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 667 autoc |= IXGBE_AUTOC_KX4_SUPP; 668 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 669 autoc |= IXGBE_AUTOC_KX_SUPP; 670 if (autoc != curr_autoc) 671 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 672 } 673 674 if (status == 0) { 675 /* 676 * Setup and restart the link based on the new values in 677 * ixgbe_hw This will write the AUTOC register based on the new 678 * stored values 679 */ 680 status = ixgbe_start_mac_link_82598(hw, 681 autoneg_wait_to_complete); 682 } 683 684 return status; 685 } 686 687 688 /** 689 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field 690 * @hw: pointer to hardware structure 691 * @speed: new link speed 692 * @autoneg_wait_to_complete: true if waiting is needed to complete 693 * 694 * Sets the link speed in the AUTOC register in the MAC and restarts link. 695 **/ 696 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 697 ixgbe_link_speed speed, 698 bool autoneg_wait_to_complete) 699 { 700 s32 status; 701 702 /* Setup the PHY according to input speed */ 703 status = hw->phy.ops.setup_link_speed(hw, speed, 704 autoneg_wait_to_complete); 705 /* Set up MAC */ 706 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); 707 708 return status; 709 } 710 711 /** 712 * ixgbe_reset_hw_82598 - Performs hardware reset 713 * @hw: pointer to hardware structure 714 * 715 * Resets the hardware by resetting the transmit and receive units, masks and 716 * clears all interrupts, performing a PHY reset, and performing a link (MAC) 717 * reset. 718 **/ 719 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) 720 { 721 s32 status = 0; 722 s32 phy_status = 0; 723 u32 ctrl; 724 u32 gheccr; 725 u32 i; 726 u32 autoc; 727 u8 analog_val; 728 729 /* Call adapter stop to disable tx/rx and clear interrupts */ 730 status = hw->mac.ops.stop_adapter(hw); 731 if (status != 0) 732 goto reset_hw_out; 733 734 /* 735 * Power up the Atlas Tx lanes if they are currently powered down. 736 * Atlas Tx lanes are powered down for MAC loopback tests, but 737 * they are not automatically restored on reset. 738 */ 739 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); 740 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { 741 /* Enable Tx Atlas so packets can be transmitted again */ 742 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 743 &analog_val); 744 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; 745 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 746 analog_val); 747 748 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 749 &analog_val); 750 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 751 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 752 analog_val); 753 754 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 755 &analog_val); 756 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 757 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 758 analog_val); 759 760 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 761 &analog_val); 762 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 763 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 764 analog_val); 765 } 766 767 /* Reset PHY */ 768 if (hw->phy.reset_disable == false) { 769 /* PHY ops must be identified and initialized prior to reset */ 770 771 /* Init PHY and function pointers, perform SFP setup */ 772 phy_status = hw->phy.ops.init(hw); 773 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) 774 goto reset_hw_out; 775 if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) 776 goto mac_reset_top; 777 778 hw->phy.ops.reset(hw); 779 } 780 781 mac_reset_top: 782 /* 783 * Issue global reset to the MAC. This needs to be a SW reset. 784 * If link reset is used, it might reset the MAC when mng is using it 785 */ 786 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST; 787 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 788 IXGBE_WRITE_FLUSH(hw); 789 790 /* Poll for reset bit to self-clear indicating reset is complete */ 791 for (i = 0; i < 10; i++) { 792 udelay(1); 793 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 794 if (!(ctrl & IXGBE_CTRL_RST)) 795 break; 796 } 797 if (ctrl & IXGBE_CTRL_RST) { 798 status = IXGBE_ERR_RESET_FAILED; 799 hw_dbg(hw, "Reset polling failed to complete.\n"); 800 } 801 802 msleep(50); 803 804 /* 805 * Double resets are required for recovery from certain error 806 * conditions. Between resets, it is necessary to stall to allow time 807 * for any pending HW events to complete. 808 */ 809 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { 810 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 811 goto mac_reset_top; 812 } 813 814 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); 815 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); 816 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); 817 818 /* 819 * Store the original AUTOC value if it has not been 820 * stored off yet. Otherwise restore the stored original 821 * AUTOC value since the reset operation sets back to deaults. 822 */ 823 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 824 if (hw->mac.orig_link_settings_stored == false) { 825 hw->mac.orig_autoc = autoc; 826 hw->mac.orig_link_settings_stored = true; 827 } else if (autoc != hw->mac.orig_autoc) { 828 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); 829 } 830 831 /* Store the permanent mac address */ 832 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 833 834 /* 835 * Store MAC address from RAR0, clear receive address registers, and 836 * clear the multicast table 837 */ 838 hw->mac.ops.init_rx_addrs(hw); 839 840 reset_hw_out: 841 if (phy_status) 842 status = phy_status; 843 844 return status; 845 } 846 847 /** 848 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address 849 * @hw: pointer to hardware struct 850 * @rar: receive address register index to associate with a VMDq index 851 * @vmdq: VMDq set index 852 **/ 853 static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 854 { 855 u32 rar_high; 856 u32 rar_entries = hw->mac.num_rar_entries; 857 858 /* Make sure we are using a valid rar index range */ 859 if (rar >= rar_entries) { 860 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 861 return IXGBE_ERR_INVALID_ARGUMENT; 862 } 863 864 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 865 rar_high &= ~IXGBE_RAH_VIND_MASK; 866 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); 867 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); 868 return 0; 869 } 870 871 /** 872 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address 873 * @hw: pointer to hardware struct 874 * @rar: receive address register index to associate with a VMDq index 875 * @vmdq: VMDq clear index (not used in 82598, but elsewhere) 876 **/ 877 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 878 { 879 u32 rar_high; 880 u32 rar_entries = hw->mac.num_rar_entries; 881 882 883 /* Make sure we are using a valid rar index range */ 884 if (rar >= rar_entries) { 885 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 886 return IXGBE_ERR_INVALID_ARGUMENT; 887 } 888 889 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 890 if (rar_high & IXGBE_RAH_VIND_MASK) { 891 rar_high &= ~IXGBE_RAH_VIND_MASK; 892 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); 893 } 894 895 return 0; 896 } 897 898 /** 899 * ixgbe_set_vfta_82598 - Set VLAN filter table 900 * @hw: pointer to hardware structure 901 * @vlan: VLAN id to write to VLAN filter 902 * @vind: VMDq output index that maps queue to VLAN id in VFTA 903 * @vlan_on: boolean flag to turn on/off VLAN in VFTA 904 * 905 * Turn on/off specified VLAN in the VLAN filter table. 906 **/ 907 static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, 908 bool vlan_on) 909 { 910 u32 regindex; 911 u32 bitindex; 912 u32 bits; 913 u32 vftabyte; 914 915 if (vlan > 4095) 916 return IXGBE_ERR_PARAM; 917 918 /* Determine 32-bit word position in array */ 919 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ 920 921 /* Determine the location of the (VMD) queue index */ 922 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ 923 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ 924 925 /* Set the nibble for VMD queue index */ 926 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); 927 bits &= (~(0x0F << bitindex)); 928 bits |= (vind << bitindex); 929 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); 930 931 /* Determine the location of the bit for this VLAN id */ 932 bitindex = vlan & 0x1F; /* lower five bits */ 933 934 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); 935 if (vlan_on) 936 /* Turn on this VLAN id */ 937 bits |= (1 << bitindex); 938 else 939 /* Turn off this VLAN id */ 940 bits &= ~(1 << bitindex); 941 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); 942 943 return 0; 944 } 945 946 /** 947 * ixgbe_clear_vfta_82598 - Clear VLAN filter table 948 * @hw: pointer to hardware structure 949 * 950 * Clears the VLAN filer table, and the VMDq index associated with the filter 951 **/ 952 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) 953 { 954 u32 offset; 955 u32 vlanbyte; 956 957 for (offset = 0; offset < hw->mac.vft_size; offset++) 958 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 959 960 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) 961 for (offset = 0; offset < hw->mac.vft_size; offset++) 962 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), 963 0); 964 965 return 0; 966 } 967 968 /** 969 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register 970 * @hw: pointer to hardware structure 971 * @reg: analog register to read 972 * @val: read value 973 * 974 * Performs read operation to Atlas analog register specified. 975 **/ 976 static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) 977 { 978 u32 atlas_ctl; 979 980 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, 981 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); 982 IXGBE_WRITE_FLUSH(hw); 983 udelay(10); 984 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); 985 *val = (u8)atlas_ctl; 986 987 return 0; 988 } 989 990 /** 991 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register 992 * @hw: pointer to hardware structure 993 * @reg: atlas register to write 994 * @val: value to write 995 * 996 * Performs write operation to Atlas analog register specified. 997 **/ 998 static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) 999 { 1000 u32 atlas_ctl; 1001 1002 atlas_ctl = (reg << 8) | val; 1003 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); 1004 IXGBE_WRITE_FLUSH(hw); 1005 udelay(10); 1006 1007 return 0; 1008 } 1009 1010 /** 1011 * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface. 1012 * @hw: pointer to hardware structure 1013 * @dev_addr: address to read from 1014 * @byte_offset: byte offset to read from dev_addr 1015 * @eeprom_data: value read 1016 * 1017 * Performs 8 byte read operation to SFP module's data over I2C interface. 1018 **/ 1019 static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, 1020 u8 byte_offset, u8 *eeprom_data) 1021 { 1022 s32 status = 0; 1023 u16 sfp_addr = 0; 1024 u16 sfp_data = 0; 1025 u16 sfp_stat = 0; 1026 u16 gssr; 1027 u32 i; 1028 1029 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) 1030 gssr = IXGBE_GSSR_PHY1_SM; 1031 else 1032 gssr = IXGBE_GSSR_PHY0_SM; 1033 1034 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) 1035 return IXGBE_ERR_SWFW_SYNC; 1036 1037 if (hw->phy.type == ixgbe_phy_nl) { 1038 /* 1039 * phy SDA/SCL registers are at addresses 0xC30A to 1040 * 0xC30D. These registers are used to talk to the SFP+ 1041 * module's EEPROM through the SDA/SCL (I2C) interface. 1042 */ 1043 sfp_addr = (dev_addr << 8) + byte_offset; 1044 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); 1045 hw->phy.ops.write_reg_mdi(hw, 1046 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, 1047 MDIO_MMD_PMAPMD, 1048 sfp_addr); 1049 1050 /* Poll status */ 1051 for (i = 0; i < 100; i++) { 1052 hw->phy.ops.read_reg_mdi(hw, 1053 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, 1054 MDIO_MMD_PMAPMD, 1055 &sfp_stat); 1056 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; 1057 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) 1058 break; 1059 usleep_range(10000, 20000); 1060 } 1061 1062 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { 1063 hw_dbg(hw, "EEPROM read did not pass.\n"); 1064 status = IXGBE_ERR_SFP_NOT_PRESENT; 1065 goto out; 1066 } 1067 1068 /* Read data */ 1069 hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, 1070 MDIO_MMD_PMAPMD, &sfp_data); 1071 1072 *eeprom_data = (u8)(sfp_data >> 8); 1073 } else { 1074 status = IXGBE_ERR_PHY; 1075 } 1076 1077 out: 1078 hw->mac.ops.release_swfw_sync(hw, gssr); 1079 return status; 1080 } 1081 1082 /** 1083 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. 1084 * @hw: pointer to hardware structure 1085 * @byte_offset: EEPROM byte offset to read 1086 * @eeprom_data: value read 1087 * 1088 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. 1089 **/ 1090 static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, 1091 u8 *eeprom_data) 1092 { 1093 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR, 1094 byte_offset, eeprom_data); 1095 } 1096 1097 /** 1098 * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface. 1099 * @hw: pointer to hardware structure 1100 * @byte_offset: byte offset at address 0xA2 1101 * @eeprom_data: value read 1102 * 1103 * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C 1104 **/ 1105 static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, 1106 u8 *sff8472_data) 1107 { 1108 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2, 1109 byte_offset, sff8472_data); 1110 } 1111 1112 /** 1113 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type 1114 * @hw: pointer to hardware structure 1115 * 1116 * Determines physical layer capabilities of the current configuration. 1117 **/ 1118 static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) 1119 { 1120 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1121 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1122 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; 1123 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 1124 u16 ext_ability = 0; 1125 1126 hw->phy.ops.identify(hw); 1127 1128 /* Copper PHY must be checked before AUTOC LMS to determine correct 1129 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ 1130 switch (hw->phy.type) { 1131 case ixgbe_phy_tn: 1132 case ixgbe_phy_cu_unknown: 1133 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, 1134 MDIO_MMD_PMAPMD, &ext_ability); 1135 if (ext_ability & MDIO_PMA_EXTABLE_10GBT) 1136 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 1137 if (ext_ability & MDIO_PMA_EXTABLE_1000BT) 1138 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; 1139 if (ext_ability & MDIO_PMA_EXTABLE_100BTX) 1140 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 1141 goto out; 1142 default: 1143 break; 1144 } 1145 1146 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 1147 case IXGBE_AUTOC_LMS_1G_AN: 1148 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 1149 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX) 1150 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; 1151 else 1152 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX; 1153 break; 1154 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 1155 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4) 1156 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; 1157 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4) 1158 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 1159 else /* XAUI */ 1160 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1161 break; 1162 case IXGBE_AUTOC_LMS_KX4_AN: 1163 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: 1164 if (autoc & IXGBE_AUTOC_KX_SUPP) 1165 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; 1166 if (autoc & IXGBE_AUTOC_KX4_SUPP) 1167 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 1168 break; 1169 default: 1170 break; 1171 } 1172 1173 if (hw->phy.type == ixgbe_phy_nl) { 1174 hw->phy.ops.identify_sfp(hw); 1175 1176 switch (hw->phy.sfp_type) { 1177 case ixgbe_sfp_type_da_cu: 1178 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 1179 break; 1180 case ixgbe_sfp_type_sr: 1181 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 1182 break; 1183 case ixgbe_sfp_type_lr: 1184 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 1185 break; 1186 default: 1187 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1188 break; 1189 } 1190 } 1191 1192 switch (hw->device_id) { 1193 case IXGBE_DEV_ID_82598_DA_DUAL_PORT: 1194 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 1195 break; 1196 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 1197 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 1198 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: 1199 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 1200 break; 1201 case IXGBE_DEV_ID_82598EB_XF_LR: 1202 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 1203 break; 1204 default: 1205 break; 1206 } 1207 1208 out: 1209 return physical_layer; 1210 } 1211 1212 /** 1213 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple 1214 * port devices. 1215 * @hw: pointer to the HW structure 1216 * 1217 * Calls common function and corrects issue with some single port devices 1218 * that enable LAN1 but not LAN0. 1219 **/ 1220 static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) 1221 { 1222 struct ixgbe_bus_info *bus = &hw->bus; 1223 u16 pci_gen = 0; 1224 u16 pci_ctrl2 = 0; 1225 1226 ixgbe_set_lan_id_multi_port_pcie(hw); 1227 1228 /* check if LAN0 is disabled */ 1229 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen); 1230 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) { 1231 1232 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2); 1233 1234 /* if LAN0 is completely disabled force function to 0 */ 1235 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) && 1236 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) && 1237 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) { 1238 1239 bus->func = 0; 1240 } 1241 } 1242 } 1243 1244 /** 1245 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer 1246 * @hw: pointer to hardware structure 1247 * @num_pb: number of packet buffers to allocate 1248 * @headroom: reserve n KB of headroom 1249 * @strategy: packet buffer allocation strategy 1250 **/ 1251 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, 1252 u32 headroom, int strategy) 1253 { 1254 u32 rxpktsize = IXGBE_RXPBSIZE_64KB; 1255 u8 i = 0; 1256 1257 if (!num_pb) 1258 return; 1259 1260 /* Setup Rx packet buffer sizes */ 1261 switch (strategy) { 1262 case PBA_STRATEGY_WEIGHTED: 1263 /* Setup the first four at 80KB */ 1264 rxpktsize = IXGBE_RXPBSIZE_80KB; 1265 for (; i < 4; i++) 1266 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 1267 /* Setup the last four at 48KB...don't re-init i */ 1268 rxpktsize = IXGBE_RXPBSIZE_48KB; 1269 /* Fall Through */ 1270 case PBA_STRATEGY_EQUAL: 1271 default: 1272 /* Divide the remaining Rx packet buffer evenly among the TCs */ 1273 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++) 1274 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 1275 break; 1276 } 1277 1278 /* Setup Tx packet buffer sizes */ 1279 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) 1280 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); 1281 } 1282 1283 static struct ixgbe_mac_operations mac_ops_82598 = { 1284 .init_hw = &ixgbe_init_hw_generic, 1285 .reset_hw = &ixgbe_reset_hw_82598, 1286 .start_hw = &ixgbe_start_hw_82598, 1287 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, 1288 .get_media_type = &ixgbe_get_media_type_82598, 1289 .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598, 1290 .enable_rx_dma = &ixgbe_enable_rx_dma_generic, 1291 .get_mac_addr = &ixgbe_get_mac_addr_generic, 1292 .stop_adapter = &ixgbe_stop_adapter_generic, 1293 .get_bus_info = &ixgbe_get_bus_info_generic, 1294 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598, 1295 .read_analog_reg8 = &ixgbe_read_analog_reg8_82598, 1296 .write_analog_reg8 = &ixgbe_write_analog_reg8_82598, 1297 .setup_link = &ixgbe_setup_mac_link_82598, 1298 .set_rxpba = &ixgbe_set_rxpba_82598, 1299 .check_link = &ixgbe_check_mac_link_82598, 1300 .get_link_capabilities = &ixgbe_get_link_capabilities_82598, 1301 .led_on = &ixgbe_led_on_generic, 1302 .led_off = &ixgbe_led_off_generic, 1303 .blink_led_start = &ixgbe_blink_led_start_generic, 1304 .blink_led_stop = &ixgbe_blink_led_stop_generic, 1305 .set_rar = &ixgbe_set_rar_generic, 1306 .clear_rar = &ixgbe_clear_rar_generic, 1307 .set_vmdq = &ixgbe_set_vmdq_82598, 1308 .clear_vmdq = &ixgbe_clear_vmdq_82598, 1309 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, 1310 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, 1311 .enable_mc = &ixgbe_enable_mc_generic, 1312 .disable_mc = &ixgbe_disable_mc_generic, 1313 .clear_vfta = &ixgbe_clear_vfta_82598, 1314 .set_vfta = &ixgbe_set_vfta_82598, 1315 .fc_enable = &ixgbe_fc_enable_82598, 1316 .set_fw_drv_ver = NULL, 1317 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, 1318 .release_swfw_sync = &ixgbe_release_swfw_sync, 1319 .get_thermal_sensor_data = NULL, 1320 .init_thermal_sensor_thresh = NULL, 1321 .prot_autoc_read = &prot_autoc_read_generic, 1322 .prot_autoc_write = &prot_autoc_write_generic, 1323 }; 1324 1325 static struct ixgbe_eeprom_operations eeprom_ops_82598 = { 1326 .init_params = &ixgbe_init_eeprom_params_generic, 1327 .read = &ixgbe_read_eerd_generic, 1328 .write = &ixgbe_write_eeprom_generic, 1329 .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic, 1330 .read_buffer = &ixgbe_read_eerd_buffer_generic, 1331 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, 1332 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, 1333 .update_checksum = &ixgbe_update_eeprom_checksum_generic, 1334 }; 1335 1336 static struct ixgbe_phy_operations phy_ops_82598 = { 1337 .identify = &ixgbe_identify_phy_generic, 1338 .identify_sfp = &ixgbe_identify_module_generic, 1339 .init = &ixgbe_init_phy_ops_82598, 1340 .reset = &ixgbe_reset_phy_generic, 1341 .read_reg = &ixgbe_read_phy_reg_generic, 1342 .write_reg = &ixgbe_write_phy_reg_generic, 1343 .read_reg_mdi = &ixgbe_read_phy_reg_mdi, 1344 .write_reg_mdi = &ixgbe_write_phy_reg_mdi, 1345 .setup_link = &ixgbe_setup_phy_link_generic, 1346 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, 1347 .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598, 1348 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598, 1349 .check_overtemp = &ixgbe_tn_check_overtemp, 1350 }; 1351 1352 struct ixgbe_info ixgbe_82598_info = { 1353 .mac = ixgbe_mac_82598EB, 1354 .get_invariants = &ixgbe_get_invariants_82598, 1355 .mac_ops = &mac_ops_82598, 1356 .eeprom_ops = &eeprom_ops_82598, 1357 .phy_ops = &phy_ops_82598, 1358 }; 1359