1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2016 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27 *******************************************************************************/ 28 29 #include <linux/pci.h> 30 #include <linux/delay.h> 31 #include <linux/sched.h> 32 #include <linux/netdevice.h> 33 34 #include "ixgbe.h" 35 #include "ixgbe_common.h" 36 #include "ixgbe_phy.h" 37 38 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); 39 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); 40 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); 41 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); 42 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); 43 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 44 u16 count); 45 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); 46 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 47 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 48 static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 49 50 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 51 static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); 52 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 53 u16 words, u16 *data); 54 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 55 u16 words, u16 *data); 56 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, 57 u16 offset); 58 static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); 59 60 /* Base table for registers values that change by MAC */ 61 const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = { 62 IXGBE_MVALS_INIT(8259X) 63 }; 64 65 /** 66 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow 67 * control 68 * @hw: pointer to hardware structure 69 * 70 * There are several phys that do not support autoneg flow control. This 71 * function check the device id to see if the associated phy supports 72 * autoneg flow control. 73 **/ 74 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) 75 { 76 bool supported = false; 77 ixgbe_link_speed speed; 78 bool link_up; 79 80 switch (hw->phy.media_type) { 81 case ixgbe_media_type_fiber: 82 /* flow control autoneg black list */ 83 switch (hw->device_id) { 84 case IXGBE_DEV_ID_X550EM_A_SFP: 85 case IXGBE_DEV_ID_X550EM_A_SFP_N: 86 supported = false; 87 break; 88 default: 89 hw->mac.ops.check_link(hw, &speed, &link_up, false); 90 /* if link is down, assume supported */ 91 if (link_up) 92 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? 93 true : false; 94 else 95 supported = true; 96 } 97 98 break; 99 case ixgbe_media_type_backplane: 100 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI) 101 supported = false; 102 else 103 supported = true; 104 break; 105 case ixgbe_media_type_copper: 106 /* only some copper devices support flow control autoneg */ 107 switch (hw->device_id) { 108 case IXGBE_DEV_ID_82599_T3_LOM: 109 case IXGBE_DEV_ID_X540T: 110 case IXGBE_DEV_ID_X540T1: 111 case IXGBE_DEV_ID_X550T: 112 case IXGBE_DEV_ID_X550T1: 113 case IXGBE_DEV_ID_X550EM_X_10G_T: 114 case IXGBE_DEV_ID_X550EM_A_10G_T: 115 case IXGBE_DEV_ID_X550EM_A_1G_T: 116 case IXGBE_DEV_ID_X550EM_A_1G_T_L: 117 supported = true; 118 break; 119 default: 120 break; 121 } 122 default: 123 break; 124 } 125 126 if (!supported) 127 hw_dbg(hw, "Device %x does not support flow control autoneg\n", 128 hw->device_id); 129 130 return supported; 131 } 132 133 /** 134 * ixgbe_setup_fc_generic - Set up flow control 135 * @hw: pointer to hardware structure 136 * 137 * Called at init time to set up flow control. 138 **/ 139 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) 140 { 141 s32 ret_val = 0; 142 u32 reg = 0, reg_bp = 0; 143 u16 reg_cu = 0; 144 bool locked = false; 145 146 /* 147 * Validate the requested mode. Strict IEEE mode does not allow 148 * ixgbe_fc_rx_pause because it will cause us to fail at UNH. 149 */ 150 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { 151 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); 152 return IXGBE_ERR_INVALID_LINK_SETTINGS; 153 } 154 155 /* 156 * 10gig parts do not have a word in the EEPROM to determine the 157 * default flow control setting, so we explicitly set it to full. 158 */ 159 if (hw->fc.requested_mode == ixgbe_fc_default) 160 hw->fc.requested_mode = ixgbe_fc_full; 161 162 /* 163 * Set up the 1G and 10G flow control advertisement registers so the 164 * HW will be able to do fc autoneg once the cable is plugged in. If 165 * we link at 10G, the 1G advertisement is harmless and vice versa. 166 */ 167 switch (hw->phy.media_type) { 168 case ixgbe_media_type_backplane: 169 /* some MAC's need RMW protection on AUTOC */ 170 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp); 171 if (ret_val) 172 return ret_val; 173 174 /* fall through - only backplane uses autoc */ 175 case ixgbe_media_type_fiber: 176 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 177 178 break; 179 case ixgbe_media_type_copper: 180 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, 181 MDIO_MMD_AN, ®_cu); 182 break; 183 default: 184 break; 185 } 186 187 /* 188 * The possible values of fc.requested_mode are: 189 * 0: Flow control is completely disabled 190 * 1: Rx flow control is enabled (we can receive pause frames, 191 * but not send pause frames). 192 * 2: Tx flow control is enabled (we can send pause frames but 193 * we do not support receiving pause frames). 194 * 3: Both Rx and Tx flow control (symmetric) are enabled. 195 * other: Invalid. 196 */ 197 switch (hw->fc.requested_mode) { 198 case ixgbe_fc_none: 199 /* Flow control completely disabled by software override. */ 200 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 201 if (hw->phy.media_type == ixgbe_media_type_backplane) 202 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | 203 IXGBE_AUTOC_ASM_PAUSE); 204 else if (hw->phy.media_type == ixgbe_media_type_copper) 205 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); 206 break; 207 case ixgbe_fc_tx_pause: 208 /* 209 * Tx Flow control is enabled, and Rx Flow control is 210 * disabled by software override. 211 */ 212 reg |= IXGBE_PCS1GANA_ASM_PAUSE; 213 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE; 214 if (hw->phy.media_type == ixgbe_media_type_backplane) { 215 reg_bp |= IXGBE_AUTOC_ASM_PAUSE; 216 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE; 217 } else if (hw->phy.media_type == ixgbe_media_type_copper) { 218 reg_cu |= IXGBE_TAF_ASM_PAUSE; 219 reg_cu &= ~IXGBE_TAF_SYM_PAUSE; 220 } 221 break; 222 case ixgbe_fc_rx_pause: 223 /* 224 * Rx Flow control is enabled and Tx Flow control is 225 * disabled by software override. Since there really 226 * isn't a way to advertise that we are capable of RX 227 * Pause ONLY, we will advertise that we support both 228 * symmetric and asymmetric Rx PAUSE, as such we fall 229 * through to the fc_full statement. Later, we will 230 * disable the adapter's ability to send PAUSE frames. 231 */ 232 case ixgbe_fc_full: 233 /* Flow control (both Rx and Tx) is enabled by SW override. */ 234 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE; 235 if (hw->phy.media_type == ixgbe_media_type_backplane) 236 reg_bp |= IXGBE_AUTOC_SYM_PAUSE | 237 IXGBE_AUTOC_ASM_PAUSE; 238 else if (hw->phy.media_type == ixgbe_media_type_copper) 239 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE; 240 break; 241 default: 242 hw_dbg(hw, "Flow control param set incorrectly\n"); 243 return IXGBE_ERR_CONFIG; 244 } 245 246 if (hw->mac.type != ixgbe_mac_X540) { 247 /* 248 * Enable auto-negotiation between the MAC & PHY; 249 * the MAC will advertise clause 37 flow control. 250 */ 251 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 252 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 253 254 /* Disable AN timeout */ 255 if (hw->fc.strict_ieee) 256 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 257 258 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 259 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); 260 } 261 262 /* 263 * AUTOC restart handles negotiation of 1G and 10G on backplane 264 * and copper. There is no need to set the PCS1GCTL register. 265 * 266 */ 267 if (hw->phy.media_type == ixgbe_media_type_backplane) { 268 /* Need the SW/FW semaphore around AUTOC writes if 82599 and 269 * LESM is on, likewise reset_pipeline requries the lock as 270 * it also writes AUTOC. 271 */ 272 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked); 273 if (ret_val) 274 return ret_val; 275 276 } else if ((hw->phy.media_type == ixgbe_media_type_copper) && 277 ixgbe_device_supports_autoneg_fc(hw)) { 278 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, 279 MDIO_MMD_AN, reg_cu); 280 } 281 282 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); 283 return ret_val; 284 } 285 286 /** 287 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 288 * @hw: pointer to hardware structure 289 * 290 * Starts the hardware by filling the bus info structure and media type, clears 291 * all on chip counters, initializes receive address registers, multicast 292 * table, VLAN filter table, calls routine to set up link and flow control 293 * settings, and leaves transmit and receive units disabled and uninitialized 294 **/ 295 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) 296 { 297 s32 ret_val; 298 u32 ctrl_ext; 299 u16 device_caps; 300 301 /* Set the media type */ 302 hw->phy.media_type = hw->mac.ops.get_media_type(hw); 303 304 /* Identify the PHY */ 305 hw->phy.ops.identify(hw); 306 307 /* Clear the VLAN filter table */ 308 hw->mac.ops.clear_vfta(hw); 309 310 /* Clear statistics registers */ 311 hw->mac.ops.clear_hw_cntrs(hw); 312 313 /* Set No Snoop Disable */ 314 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 315 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; 316 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 317 IXGBE_WRITE_FLUSH(hw); 318 319 /* Setup flow control if method for doing so */ 320 if (hw->mac.ops.setup_fc) { 321 ret_val = hw->mac.ops.setup_fc(hw); 322 if (ret_val) 323 return ret_val; 324 } 325 326 /* Cashe bit indicating need for crosstalk fix */ 327 switch (hw->mac.type) { 328 case ixgbe_mac_82599EB: 329 case ixgbe_mac_X550EM_x: 330 case ixgbe_mac_x550em_a: 331 hw->mac.ops.get_device_caps(hw, &device_caps); 332 if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) 333 hw->need_crosstalk_fix = false; 334 else 335 hw->need_crosstalk_fix = true; 336 break; 337 default: 338 hw->need_crosstalk_fix = false; 339 break; 340 } 341 342 /* Clear adapter stopped flag */ 343 hw->adapter_stopped = false; 344 345 return 0; 346 } 347 348 /** 349 * ixgbe_start_hw_gen2 - Init sequence for common device family 350 * @hw: pointer to hw structure 351 * 352 * Performs the init sequence common to the second generation 353 * of 10 GbE devices. 354 * Devices in the second generation: 355 * 82599 356 * X540 357 **/ 358 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) 359 { 360 u32 i; 361 362 /* Clear the rate limiters */ 363 for (i = 0; i < hw->mac.max_tx_queues; i++) { 364 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); 365 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); 366 } 367 IXGBE_WRITE_FLUSH(hw); 368 369 #ifndef CONFIG_ARCH_WANT_RELAX_ORDER 370 /* Disable relaxed ordering */ 371 for (i = 0; i < hw->mac.max_tx_queues; i++) { 372 u32 regval; 373 374 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); 375 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 376 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); 377 } 378 379 for (i = 0; i < hw->mac.max_rx_queues; i++) { 380 u32 regval; 381 382 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 383 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | 384 IXGBE_DCA_RXCTRL_HEAD_WRO_EN); 385 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 386 } 387 #endif 388 return 0; 389 } 390 391 /** 392 * ixgbe_init_hw_generic - Generic hardware initialization 393 * @hw: pointer to hardware structure 394 * 395 * Initialize the hardware by resetting the hardware, filling the bus info 396 * structure and media type, clears all on chip counters, initializes receive 397 * address registers, multicast table, VLAN filter table, calls routine to set 398 * up link and flow control settings, and leaves transmit and receive units 399 * disabled and uninitialized 400 **/ 401 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) 402 { 403 s32 status; 404 405 /* Reset the hardware */ 406 status = hw->mac.ops.reset_hw(hw); 407 408 if (status == 0) { 409 /* Start the HW */ 410 status = hw->mac.ops.start_hw(hw); 411 } 412 413 /* Initialize the LED link active for LED blink support */ 414 if (hw->mac.ops.init_led_link_act) 415 hw->mac.ops.init_led_link_act(hw); 416 417 return status; 418 } 419 420 /** 421 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters 422 * @hw: pointer to hardware structure 423 * 424 * Clears all hardware statistics counters by reading them from the hardware 425 * Statistics counters are clear on read. 426 **/ 427 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) 428 { 429 u16 i = 0; 430 431 IXGBE_READ_REG(hw, IXGBE_CRCERRS); 432 IXGBE_READ_REG(hw, IXGBE_ILLERRC); 433 IXGBE_READ_REG(hw, IXGBE_ERRBC); 434 IXGBE_READ_REG(hw, IXGBE_MSPDC); 435 for (i = 0; i < 8; i++) 436 IXGBE_READ_REG(hw, IXGBE_MPC(i)); 437 438 IXGBE_READ_REG(hw, IXGBE_MLFC); 439 IXGBE_READ_REG(hw, IXGBE_MRFC); 440 IXGBE_READ_REG(hw, IXGBE_RLEC); 441 IXGBE_READ_REG(hw, IXGBE_LXONTXC); 442 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 443 if (hw->mac.type >= ixgbe_mac_82599EB) { 444 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 445 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 446 } else { 447 IXGBE_READ_REG(hw, IXGBE_LXONRXC); 448 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 449 } 450 451 for (i = 0; i < 8; i++) { 452 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 453 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 454 if (hw->mac.type >= ixgbe_mac_82599EB) { 455 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 456 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 457 } else { 458 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 459 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 460 } 461 } 462 if (hw->mac.type >= ixgbe_mac_82599EB) 463 for (i = 0; i < 8; i++) 464 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 465 IXGBE_READ_REG(hw, IXGBE_PRC64); 466 IXGBE_READ_REG(hw, IXGBE_PRC127); 467 IXGBE_READ_REG(hw, IXGBE_PRC255); 468 IXGBE_READ_REG(hw, IXGBE_PRC511); 469 IXGBE_READ_REG(hw, IXGBE_PRC1023); 470 IXGBE_READ_REG(hw, IXGBE_PRC1522); 471 IXGBE_READ_REG(hw, IXGBE_GPRC); 472 IXGBE_READ_REG(hw, IXGBE_BPRC); 473 IXGBE_READ_REG(hw, IXGBE_MPRC); 474 IXGBE_READ_REG(hw, IXGBE_GPTC); 475 IXGBE_READ_REG(hw, IXGBE_GORCL); 476 IXGBE_READ_REG(hw, IXGBE_GORCH); 477 IXGBE_READ_REG(hw, IXGBE_GOTCL); 478 IXGBE_READ_REG(hw, IXGBE_GOTCH); 479 if (hw->mac.type == ixgbe_mac_82598EB) 480 for (i = 0; i < 8; i++) 481 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 482 IXGBE_READ_REG(hw, IXGBE_RUC); 483 IXGBE_READ_REG(hw, IXGBE_RFC); 484 IXGBE_READ_REG(hw, IXGBE_ROC); 485 IXGBE_READ_REG(hw, IXGBE_RJC); 486 IXGBE_READ_REG(hw, IXGBE_MNGPRC); 487 IXGBE_READ_REG(hw, IXGBE_MNGPDC); 488 IXGBE_READ_REG(hw, IXGBE_MNGPTC); 489 IXGBE_READ_REG(hw, IXGBE_TORL); 490 IXGBE_READ_REG(hw, IXGBE_TORH); 491 IXGBE_READ_REG(hw, IXGBE_TPR); 492 IXGBE_READ_REG(hw, IXGBE_TPT); 493 IXGBE_READ_REG(hw, IXGBE_PTC64); 494 IXGBE_READ_REG(hw, IXGBE_PTC127); 495 IXGBE_READ_REG(hw, IXGBE_PTC255); 496 IXGBE_READ_REG(hw, IXGBE_PTC511); 497 IXGBE_READ_REG(hw, IXGBE_PTC1023); 498 IXGBE_READ_REG(hw, IXGBE_PTC1522); 499 IXGBE_READ_REG(hw, IXGBE_MPTC); 500 IXGBE_READ_REG(hw, IXGBE_BPTC); 501 for (i = 0; i < 16; i++) { 502 IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 503 IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 504 if (hw->mac.type >= ixgbe_mac_82599EB) { 505 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 506 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); 507 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 508 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); 509 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 510 } else { 511 IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 512 IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 513 } 514 } 515 516 if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { 517 if (hw->phy.id == 0) 518 hw->phy.ops.identify(hw); 519 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i); 520 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i); 521 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i); 522 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i); 523 } 524 525 return 0; 526 } 527 528 /** 529 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM 530 * @hw: pointer to hardware structure 531 * @pba_num: stores the part number string from the EEPROM 532 * @pba_num_size: part number string buffer length 533 * 534 * Reads the part number string from the EEPROM. 535 **/ 536 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 537 u32 pba_num_size) 538 { 539 s32 ret_val; 540 u16 data; 541 u16 pba_ptr; 542 u16 offset; 543 u16 length; 544 545 if (pba_num == NULL) { 546 hw_dbg(hw, "PBA string buffer was null\n"); 547 return IXGBE_ERR_INVALID_ARGUMENT; 548 } 549 550 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 551 if (ret_val) { 552 hw_dbg(hw, "NVM Read Error\n"); 553 return ret_val; 554 } 555 556 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); 557 if (ret_val) { 558 hw_dbg(hw, "NVM Read Error\n"); 559 return ret_val; 560 } 561 562 /* 563 * if data is not ptr guard the PBA must be in legacy format which 564 * means pba_ptr is actually our second data word for the PBA number 565 * and we can decode it into an ascii string 566 */ 567 if (data != IXGBE_PBANUM_PTR_GUARD) { 568 hw_dbg(hw, "NVM PBA number is not stored as string\n"); 569 570 /* we will need 11 characters to store the PBA */ 571 if (pba_num_size < 11) { 572 hw_dbg(hw, "PBA string buffer too small\n"); 573 return IXGBE_ERR_NO_SPACE; 574 } 575 576 /* extract hex string from data and pba_ptr */ 577 pba_num[0] = (data >> 12) & 0xF; 578 pba_num[1] = (data >> 8) & 0xF; 579 pba_num[2] = (data >> 4) & 0xF; 580 pba_num[3] = data & 0xF; 581 pba_num[4] = (pba_ptr >> 12) & 0xF; 582 pba_num[5] = (pba_ptr >> 8) & 0xF; 583 pba_num[6] = '-'; 584 pba_num[7] = 0; 585 pba_num[8] = (pba_ptr >> 4) & 0xF; 586 pba_num[9] = pba_ptr & 0xF; 587 588 /* put a null character on the end of our string */ 589 pba_num[10] = '\0'; 590 591 /* switch all the data but the '-' to hex char */ 592 for (offset = 0; offset < 10; offset++) { 593 if (pba_num[offset] < 0xA) 594 pba_num[offset] += '0'; 595 else if (pba_num[offset] < 0x10) 596 pba_num[offset] += 'A' - 0xA; 597 } 598 599 return 0; 600 } 601 602 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); 603 if (ret_val) { 604 hw_dbg(hw, "NVM Read Error\n"); 605 return ret_val; 606 } 607 608 if (length == 0xFFFF || length == 0) { 609 hw_dbg(hw, "NVM PBA number section invalid length\n"); 610 return IXGBE_ERR_PBA_SECTION; 611 } 612 613 /* check if pba_num buffer is big enough */ 614 if (pba_num_size < (((u32)length * 2) - 1)) { 615 hw_dbg(hw, "PBA string buffer too small\n"); 616 return IXGBE_ERR_NO_SPACE; 617 } 618 619 /* trim pba length from start of string */ 620 pba_ptr++; 621 length--; 622 623 for (offset = 0; offset < length; offset++) { 624 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); 625 if (ret_val) { 626 hw_dbg(hw, "NVM Read Error\n"); 627 return ret_val; 628 } 629 pba_num[offset * 2] = (u8)(data >> 8); 630 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); 631 } 632 pba_num[offset * 2] = '\0'; 633 634 return 0; 635 } 636 637 /** 638 * ixgbe_get_mac_addr_generic - Generic get MAC address 639 * @hw: pointer to hardware structure 640 * @mac_addr: Adapter MAC address 641 * 642 * Reads the adapter's MAC address from first Receive Address Register (RAR0) 643 * A reset of the adapter must be performed prior to calling this function 644 * in order for the MAC address to have been loaded from the EEPROM into RAR0 645 **/ 646 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) 647 { 648 u32 rar_high; 649 u32 rar_low; 650 u16 i; 651 652 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); 653 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); 654 655 for (i = 0; i < 4; i++) 656 mac_addr[i] = (u8)(rar_low >> (i*8)); 657 658 for (i = 0; i < 2; i++) 659 mac_addr[i+4] = (u8)(rar_high >> (i*8)); 660 661 return 0; 662 } 663 664 enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status) 665 { 666 switch (link_status & IXGBE_PCI_LINK_WIDTH) { 667 case IXGBE_PCI_LINK_WIDTH_1: 668 return ixgbe_bus_width_pcie_x1; 669 case IXGBE_PCI_LINK_WIDTH_2: 670 return ixgbe_bus_width_pcie_x2; 671 case IXGBE_PCI_LINK_WIDTH_4: 672 return ixgbe_bus_width_pcie_x4; 673 case IXGBE_PCI_LINK_WIDTH_8: 674 return ixgbe_bus_width_pcie_x8; 675 default: 676 return ixgbe_bus_width_unknown; 677 } 678 } 679 680 enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status) 681 { 682 switch (link_status & IXGBE_PCI_LINK_SPEED) { 683 case IXGBE_PCI_LINK_SPEED_2500: 684 return ixgbe_bus_speed_2500; 685 case IXGBE_PCI_LINK_SPEED_5000: 686 return ixgbe_bus_speed_5000; 687 case IXGBE_PCI_LINK_SPEED_8000: 688 return ixgbe_bus_speed_8000; 689 default: 690 return ixgbe_bus_speed_unknown; 691 } 692 } 693 694 /** 695 * ixgbe_get_bus_info_generic - Generic set PCI bus info 696 * @hw: pointer to hardware structure 697 * 698 * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure 699 **/ 700 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) 701 { 702 u16 link_status; 703 704 hw->bus.type = ixgbe_bus_type_pci_express; 705 706 /* Get the negotiated link width and speed from PCI config space */ 707 link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS); 708 709 hw->bus.width = ixgbe_convert_bus_width(link_status); 710 hw->bus.speed = ixgbe_convert_bus_speed(link_status); 711 712 hw->mac.ops.set_lan_id(hw); 713 714 return 0; 715 } 716 717 /** 718 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices 719 * @hw: pointer to the HW structure 720 * 721 * Determines the LAN function id by reading memory-mapped registers 722 * and swaps the port value if requested. 723 **/ 724 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) 725 { 726 struct ixgbe_bus_info *bus = &hw->bus; 727 u16 ee_ctrl_4; 728 u32 reg; 729 730 reg = IXGBE_READ_REG(hw, IXGBE_STATUS); 731 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; 732 bus->lan_id = bus->func; 733 734 /* check for a port swap */ 735 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw)); 736 if (reg & IXGBE_FACTPS_LFS) 737 bus->func ^= 0x1; 738 739 /* Get MAC instance from EEPROM for configuring CS4227 */ 740 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) { 741 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4); 742 bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >> 743 IXGBE_EE_CTRL_4_INST_ID_SHIFT; 744 } 745 } 746 747 /** 748 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units 749 * @hw: pointer to hardware structure 750 * 751 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, 752 * disables transmit and receive units. The adapter_stopped flag is used by 753 * the shared code and drivers to determine if the adapter is in a stopped 754 * state and should not touch the hardware. 755 **/ 756 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) 757 { 758 u32 reg_val; 759 u16 i; 760 761 /* 762 * Set the adapter_stopped flag so other driver functions stop touching 763 * the hardware 764 */ 765 hw->adapter_stopped = true; 766 767 /* Disable the receive unit */ 768 hw->mac.ops.disable_rx(hw); 769 770 /* Clear interrupt mask to stop interrupts from being generated */ 771 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 772 773 /* Clear any pending interrupts, flush previous writes */ 774 IXGBE_READ_REG(hw, IXGBE_EICR); 775 776 /* Disable the transmit unit. Each queue must be disabled. */ 777 for (i = 0; i < hw->mac.max_tx_queues; i++) 778 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH); 779 780 /* Disable the receive unit by stopping each queue */ 781 for (i = 0; i < hw->mac.max_rx_queues; i++) { 782 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 783 reg_val &= ~IXGBE_RXDCTL_ENABLE; 784 reg_val |= IXGBE_RXDCTL_SWFLSH; 785 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); 786 } 787 788 /* flush all queues disables */ 789 IXGBE_WRITE_FLUSH(hw); 790 usleep_range(1000, 2000); 791 792 /* 793 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 794 * access and verify no pending requests 795 */ 796 return ixgbe_disable_pcie_master(hw); 797 } 798 799 /** 800 * ixgbe_init_led_link_act_generic - Store the LED index link/activity. 801 * @hw: pointer to hardware structure 802 * 803 * Store the index for the link active LED. This will be used to support 804 * blinking the LED. 805 **/ 806 s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) 807 { 808 struct ixgbe_mac_info *mac = &hw->mac; 809 u32 led_reg, led_mode; 810 u16 i; 811 812 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 813 814 /* Get LED link active from the LEDCTL register */ 815 for (i = 0; i < 4; i++) { 816 led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i); 817 818 if ((led_mode & IXGBE_LED_MODE_MASK_BASE) == 819 IXGBE_LED_LINK_ACTIVE) { 820 mac->led_link_act = i; 821 return 0; 822 } 823 } 824 825 /* If LEDCTL register does not have the LED link active set, then use 826 * known MAC defaults. 827 */ 828 switch (hw->mac.type) { 829 case ixgbe_mac_x550em_a: 830 mac->led_link_act = 0; 831 break; 832 case ixgbe_mac_X550EM_x: 833 mac->led_link_act = 1; 834 break; 835 default: 836 mac->led_link_act = 2; 837 } 838 839 return 0; 840 } 841 842 /** 843 * ixgbe_led_on_generic - Turns on the software controllable LEDs. 844 * @hw: pointer to hardware structure 845 * @index: led number to turn on 846 **/ 847 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) 848 { 849 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 850 851 if (index > 3) 852 return IXGBE_ERR_PARAM; 853 854 /* To turn on the LED, set mode to ON. */ 855 led_reg &= ~IXGBE_LED_MODE_MASK(index); 856 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); 857 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 858 IXGBE_WRITE_FLUSH(hw); 859 860 return 0; 861 } 862 863 /** 864 * ixgbe_led_off_generic - Turns off the software controllable LEDs. 865 * @hw: pointer to hardware structure 866 * @index: led number to turn off 867 **/ 868 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) 869 { 870 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 871 872 if (index > 3) 873 return IXGBE_ERR_PARAM; 874 875 /* To turn off the LED, set mode to OFF. */ 876 led_reg &= ~IXGBE_LED_MODE_MASK(index); 877 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); 878 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 879 IXGBE_WRITE_FLUSH(hw); 880 881 return 0; 882 } 883 884 /** 885 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params 886 * @hw: pointer to hardware structure 887 * 888 * Initializes the EEPROM parameters ixgbe_eeprom_info within the 889 * ixgbe_hw struct in order to set up EEPROM access. 890 **/ 891 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) 892 { 893 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 894 u32 eec; 895 u16 eeprom_size; 896 897 if (eeprom->type == ixgbe_eeprom_uninitialized) { 898 eeprom->type = ixgbe_eeprom_none; 899 /* Set default semaphore delay to 10ms which is a well 900 * tested value */ 901 eeprom->semaphore_delay = 10; 902 /* Clear EEPROM page size, it will be initialized as needed */ 903 eeprom->word_page_size = 0; 904 905 /* 906 * Check for EEPROM present first. 907 * If not present leave as none 908 */ 909 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 910 if (eec & IXGBE_EEC_PRES) { 911 eeprom->type = ixgbe_eeprom_spi; 912 913 /* 914 * SPI EEPROM is assumed here. This code would need to 915 * change if a future EEPROM is not SPI. 916 */ 917 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> 918 IXGBE_EEC_SIZE_SHIFT); 919 eeprom->word_size = BIT(eeprom_size + 920 IXGBE_EEPROM_WORD_SIZE_SHIFT); 921 } 922 923 if (eec & IXGBE_EEC_ADDR_SIZE) 924 eeprom->address_bits = 16; 925 else 926 eeprom->address_bits = 8; 927 hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n", 928 eeprom->type, eeprom->word_size, eeprom->address_bits); 929 } 930 931 return 0; 932 } 933 934 /** 935 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang 936 * @hw: pointer to hardware structure 937 * @offset: offset within the EEPROM to write 938 * @words: number of words 939 * @data: 16 bit word(s) to write to EEPROM 940 * 941 * Reads 16 bit word(s) from EEPROM through bit-bang method 942 **/ 943 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 944 u16 words, u16 *data) 945 { 946 s32 status; 947 u16 i, count; 948 949 hw->eeprom.ops.init_params(hw); 950 951 if (words == 0) 952 return IXGBE_ERR_INVALID_ARGUMENT; 953 954 if (offset + words > hw->eeprom.word_size) 955 return IXGBE_ERR_EEPROM; 956 957 /* 958 * The EEPROM page size cannot be queried from the chip. We do lazy 959 * initialization. It is worth to do that when we write large buffer. 960 */ 961 if ((hw->eeprom.word_page_size == 0) && 962 (words > IXGBE_EEPROM_PAGE_SIZE_MAX)) 963 ixgbe_detect_eeprom_page_size_generic(hw, offset); 964 965 /* 966 * We cannot hold synchronization semaphores for too long 967 * to avoid other entity starvation. However it is more efficient 968 * to read in bursts than synchronizing access for each word. 969 */ 970 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { 971 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? 972 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); 973 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, 974 count, &data[i]); 975 976 if (status != 0) 977 break; 978 } 979 980 return status; 981 } 982 983 /** 984 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM 985 * @hw: pointer to hardware structure 986 * @offset: offset within the EEPROM to be written to 987 * @words: number of word(s) 988 * @data: 16 bit word(s) to be written to the EEPROM 989 * 990 * If ixgbe_eeprom_update_checksum is not called after this function, the 991 * EEPROM will most likely contain an invalid checksum. 992 **/ 993 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 994 u16 words, u16 *data) 995 { 996 s32 status; 997 u16 word; 998 u16 page_size; 999 u16 i; 1000 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; 1001 1002 /* Prepare the EEPROM for writing */ 1003 status = ixgbe_acquire_eeprom(hw); 1004 if (status) 1005 return status; 1006 1007 if (ixgbe_ready_eeprom(hw) != 0) { 1008 ixgbe_release_eeprom(hw); 1009 return IXGBE_ERR_EEPROM; 1010 } 1011 1012 for (i = 0; i < words; i++) { 1013 ixgbe_standby_eeprom(hw); 1014 1015 /* Send the WRITE ENABLE command (8 bit opcode) */ 1016 ixgbe_shift_out_eeprom_bits(hw, 1017 IXGBE_EEPROM_WREN_OPCODE_SPI, 1018 IXGBE_EEPROM_OPCODE_BITS); 1019 1020 ixgbe_standby_eeprom(hw); 1021 1022 /* Some SPI eeproms use the 8th address bit embedded 1023 * in the opcode 1024 */ 1025 if ((hw->eeprom.address_bits == 8) && 1026 ((offset + i) >= 128)) 1027 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 1028 1029 /* Send the Write command (8-bit opcode + addr) */ 1030 ixgbe_shift_out_eeprom_bits(hw, write_opcode, 1031 IXGBE_EEPROM_OPCODE_BITS); 1032 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), 1033 hw->eeprom.address_bits); 1034 1035 page_size = hw->eeprom.word_page_size; 1036 1037 /* Send the data in burst via SPI */ 1038 do { 1039 word = data[i]; 1040 word = (word >> 8) | (word << 8); 1041 ixgbe_shift_out_eeprom_bits(hw, word, 16); 1042 1043 if (page_size == 0) 1044 break; 1045 1046 /* do not wrap around page */ 1047 if (((offset + i) & (page_size - 1)) == 1048 (page_size - 1)) 1049 break; 1050 } while (++i < words); 1051 1052 ixgbe_standby_eeprom(hw); 1053 usleep_range(10000, 20000); 1054 } 1055 /* Done with writing - release the EEPROM */ 1056 ixgbe_release_eeprom(hw); 1057 1058 return 0; 1059 } 1060 1061 /** 1062 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM 1063 * @hw: pointer to hardware structure 1064 * @offset: offset within the EEPROM to be written to 1065 * @data: 16 bit word to be written to the EEPROM 1066 * 1067 * If ixgbe_eeprom_update_checksum is not called after this function, the 1068 * EEPROM will most likely contain an invalid checksum. 1069 **/ 1070 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 1071 { 1072 hw->eeprom.ops.init_params(hw); 1073 1074 if (offset >= hw->eeprom.word_size) 1075 return IXGBE_ERR_EEPROM; 1076 1077 return ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); 1078 } 1079 1080 /** 1081 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang 1082 * @hw: pointer to hardware structure 1083 * @offset: offset within the EEPROM to be read 1084 * @words: number of word(s) 1085 * @data: read 16 bit words(s) from EEPROM 1086 * 1087 * Reads 16 bit word(s) from EEPROM through bit-bang method 1088 **/ 1089 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1090 u16 words, u16 *data) 1091 { 1092 s32 status; 1093 u16 i, count; 1094 1095 hw->eeprom.ops.init_params(hw); 1096 1097 if (words == 0) 1098 return IXGBE_ERR_INVALID_ARGUMENT; 1099 1100 if (offset + words > hw->eeprom.word_size) 1101 return IXGBE_ERR_EEPROM; 1102 1103 /* 1104 * We cannot hold synchronization semaphores for too long 1105 * to avoid other entity starvation. However it is more efficient 1106 * to read in bursts than synchronizing access for each word. 1107 */ 1108 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { 1109 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? 1110 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); 1111 1112 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, 1113 count, &data[i]); 1114 1115 if (status) 1116 return status; 1117 } 1118 1119 return 0; 1120 } 1121 1122 /** 1123 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang 1124 * @hw: pointer to hardware structure 1125 * @offset: offset within the EEPROM to be read 1126 * @words: number of word(s) 1127 * @data: read 16 bit word(s) from EEPROM 1128 * 1129 * Reads 16 bit word(s) from EEPROM through bit-bang method 1130 **/ 1131 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 1132 u16 words, u16 *data) 1133 { 1134 s32 status; 1135 u16 word_in; 1136 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; 1137 u16 i; 1138 1139 /* Prepare the EEPROM for reading */ 1140 status = ixgbe_acquire_eeprom(hw); 1141 if (status) 1142 return status; 1143 1144 if (ixgbe_ready_eeprom(hw) != 0) { 1145 ixgbe_release_eeprom(hw); 1146 return IXGBE_ERR_EEPROM; 1147 } 1148 1149 for (i = 0; i < words; i++) { 1150 ixgbe_standby_eeprom(hw); 1151 /* Some SPI eeproms use the 8th address bit embedded 1152 * in the opcode 1153 */ 1154 if ((hw->eeprom.address_bits == 8) && 1155 ((offset + i) >= 128)) 1156 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 1157 1158 /* Send the READ command (opcode + addr) */ 1159 ixgbe_shift_out_eeprom_bits(hw, read_opcode, 1160 IXGBE_EEPROM_OPCODE_BITS); 1161 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), 1162 hw->eeprom.address_bits); 1163 1164 /* Read the data. */ 1165 word_in = ixgbe_shift_in_eeprom_bits(hw, 16); 1166 data[i] = (word_in >> 8) | (word_in << 8); 1167 } 1168 1169 /* End this read operation */ 1170 ixgbe_release_eeprom(hw); 1171 1172 return 0; 1173 } 1174 1175 /** 1176 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang 1177 * @hw: pointer to hardware structure 1178 * @offset: offset within the EEPROM to be read 1179 * @data: read 16 bit value from EEPROM 1180 * 1181 * Reads 16 bit value from EEPROM through bit-bang method 1182 **/ 1183 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1184 u16 *data) 1185 { 1186 hw->eeprom.ops.init_params(hw); 1187 1188 if (offset >= hw->eeprom.word_size) 1189 return IXGBE_ERR_EEPROM; 1190 1191 return ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); 1192 } 1193 1194 /** 1195 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD 1196 * @hw: pointer to hardware structure 1197 * @offset: offset of word in the EEPROM to read 1198 * @words: number of word(s) 1199 * @data: 16 bit word(s) from the EEPROM 1200 * 1201 * Reads a 16 bit word(s) from the EEPROM using the EERD register. 1202 **/ 1203 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, 1204 u16 words, u16 *data) 1205 { 1206 u32 eerd; 1207 s32 status; 1208 u32 i; 1209 1210 hw->eeprom.ops.init_params(hw); 1211 1212 if (words == 0) 1213 return IXGBE_ERR_INVALID_ARGUMENT; 1214 1215 if (offset >= hw->eeprom.word_size) 1216 return IXGBE_ERR_EEPROM; 1217 1218 for (i = 0; i < words; i++) { 1219 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1220 IXGBE_EEPROM_RW_REG_START; 1221 1222 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); 1223 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); 1224 1225 if (status == 0) { 1226 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> 1227 IXGBE_EEPROM_RW_REG_DATA); 1228 } else { 1229 hw_dbg(hw, "Eeprom read timed out\n"); 1230 return status; 1231 } 1232 } 1233 1234 return 0; 1235 } 1236 1237 /** 1238 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size 1239 * @hw: pointer to hardware structure 1240 * @offset: offset within the EEPROM to be used as a scratch pad 1241 * 1242 * Discover EEPROM page size by writing marching data at given offset. 1243 * This function is called only when we are writing a new large buffer 1244 * at given offset so the data would be overwritten anyway. 1245 **/ 1246 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, 1247 u16 offset) 1248 { 1249 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; 1250 s32 status; 1251 u16 i; 1252 1253 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) 1254 data[i] = i; 1255 1256 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX; 1257 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1258 IXGBE_EEPROM_PAGE_SIZE_MAX, data); 1259 hw->eeprom.word_page_size = 0; 1260 if (status) 1261 return status; 1262 1263 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); 1264 if (status) 1265 return status; 1266 1267 /* 1268 * When writing in burst more than the actual page size 1269 * EEPROM address wraps around current page. 1270 */ 1271 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; 1272 1273 hw_dbg(hw, "Detected EEPROM page size = %d words.\n", 1274 hw->eeprom.word_page_size); 1275 return 0; 1276 } 1277 1278 /** 1279 * ixgbe_read_eerd_generic - Read EEPROM word using EERD 1280 * @hw: pointer to hardware structure 1281 * @offset: offset of word in the EEPROM to read 1282 * @data: word read from the EEPROM 1283 * 1284 * Reads a 16 bit word from the EEPROM using the EERD register. 1285 **/ 1286 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) 1287 { 1288 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); 1289 } 1290 1291 /** 1292 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR 1293 * @hw: pointer to hardware structure 1294 * @offset: offset of word in the EEPROM to write 1295 * @words: number of words 1296 * @data: word(s) write to the EEPROM 1297 * 1298 * Write a 16 bit word(s) to the EEPROM using the EEWR register. 1299 **/ 1300 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, 1301 u16 words, u16 *data) 1302 { 1303 u32 eewr; 1304 s32 status; 1305 u16 i; 1306 1307 hw->eeprom.ops.init_params(hw); 1308 1309 if (words == 0) 1310 return IXGBE_ERR_INVALID_ARGUMENT; 1311 1312 if (offset >= hw->eeprom.word_size) 1313 return IXGBE_ERR_EEPROM; 1314 1315 for (i = 0; i < words; i++) { 1316 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1317 (data[i] << IXGBE_EEPROM_RW_REG_DATA) | 1318 IXGBE_EEPROM_RW_REG_START; 1319 1320 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1321 if (status) { 1322 hw_dbg(hw, "Eeprom write EEWR timed out\n"); 1323 return status; 1324 } 1325 1326 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); 1327 1328 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1329 if (status) { 1330 hw_dbg(hw, "Eeprom write EEWR timed out\n"); 1331 return status; 1332 } 1333 } 1334 1335 return 0; 1336 } 1337 1338 /** 1339 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR 1340 * @hw: pointer to hardware structure 1341 * @offset: offset of word in the EEPROM to write 1342 * @data: word write to the EEPROM 1343 * 1344 * Write a 16 bit word to the EEPROM using the EEWR register. 1345 **/ 1346 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 1347 { 1348 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); 1349 } 1350 1351 /** 1352 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status 1353 * @hw: pointer to hardware structure 1354 * @ee_reg: EEPROM flag for polling 1355 * 1356 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the 1357 * read or write is done respectively. 1358 **/ 1359 static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) 1360 { 1361 u32 i; 1362 u32 reg; 1363 1364 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { 1365 if (ee_reg == IXGBE_NVM_POLL_READ) 1366 reg = IXGBE_READ_REG(hw, IXGBE_EERD); 1367 else 1368 reg = IXGBE_READ_REG(hw, IXGBE_EEWR); 1369 1370 if (reg & IXGBE_EEPROM_RW_REG_DONE) { 1371 return 0; 1372 } 1373 udelay(5); 1374 } 1375 return IXGBE_ERR_EEPROM; 1376 } 1377 1378 /** 1379 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang 1380 * @hw: pointer to hardware structure 1381 * 1382 * Prepares EEPROM for access using bit-bang method. This function should 1383 * be called before issuing a command to the EEPROM. 1384 **/ 1385 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) 1386 { 1387 u32 eec; 1388 u32 i; 1389 1390 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) 1391 return IXGBE_ERR_SWFW_SYNC; 1392 1393 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1394 1395 /* Request EEPROM Access */ 1396 eec |= IXGBE_EEC_REQ; 1397 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1398 1399 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { 1400 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1401 if (eec & IXGBE_EEC_GNT) 1402 break; 1403 udelay(5); 1404 } 1405 1406 /* Release if grant not acquired */ 1407 if (!(eec & IXGBE_EEC_GNT)) { 1408 eec &= ~IXGBE_EEC_REQ; 1409 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1410 hw_dbg(hw, "Could not acquire EEPROM grant\n"); 1411 1412 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1413 return IXGBE_ERR_EEPROM; 1414 } 1415 1416 /* Setup EEPROM for Read/Write */ 1417 /* Clear CS and SK */ 1418 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); 1419 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1420 IXGBE_WRITE_FLUSH(hw); 1421 udelay(1); 1422 return 0; 1423 } 1424 1425 /** 1426 * ixgbe_get_eeprom_semaphore - Get hardware semaphore 1427 * @hw: pointer to hardware structure 1428 * 1429 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method 1430 **/ 1431 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) 1432 { 1433 u32 timeout = 2000; 1434 u32 i; 1435 u32 swsm; 1436 1437 /* Get SMBI software semaphore between device drivers first */ 1438 for (i = 0; i < timeout; i++) { 1439 /* 1440 * If the SMBI bit is 0 when we read it, then the bit will be 1441 * set and we have the semaphore 1442 */ 1443 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1444 if (!(swsm & IXGBE_SWSM_SMBI)) 1445 break; 1446 usleep_range(50, 100); 1447 } 1448 1449 if (i == timeout) { 1450 hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n"); 1451 /* this release is particularly important because our attempts 1452 * above to get the semaphore may have succeeded, and if there 1453 * was a timeout, we should unconditionally clear the semaphore 1454 * bits to free the driver to make progress 1455 */ 1456 ixgbe_release_eeprom_semaphore(hw); 1457 1458 usleep_range(50, 100); 1459 /* one last try 1460 * If the SMBI bit is 0 when we read it, then the bit will be 1461 * set and we have the semaphore 1462 */ 1463 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1464 if (swsm & IXGBE_SWSM_SMBI) { 1465 hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n"); 1466 return IXGBE_ERR_EEPROM; 1467 } 1468 } 1469 1470 /* Now get the semaphore between SW/FW through the SWESMBI bit */ 1471 for (i = 0; i < timeout; i++) { 1472 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1473 1474 /* Set the SW EEPROM semaphore bit to request access */ 1475 swsm |= IXGBE_SWSM_SWESMBI; 1476 IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); 1477 1478 /* If we set the bit successfully then we got the 1479 * semaphore. 1480 */ 1481 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1482 if (swsm & IXGBE_SWSM_SWESMBI) 1483 break; 1484 1485 usleep_range(50, 100); 1486 } 1487 1488 /* Release semaphores and return error if SW EEPROM semaphore 1489 * was not granted because we don't have access to the EEPROM 1490 */ 1491 if (i >= timeout) { 1492 hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n"); 1493 ixgbe_release_eeprom_semaphore(hw); 1494 return IXGBE_ERR_EEPROM; 1495 } 1496 1497 return 0; 1498 } 1499 1500 /** 1501 * ixgbe_release_eeprom_semaphore - Release hardware semaphore 1502 * @hw: pointer to hardware structure 1503 * 1504 * This function clears hardware semaphore bits. 1505 **/ 1506 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) 1507 { 1508 u32 swsm; 1509 1510 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1511 1512 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ 1513 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); 1514 IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); 1515 IXGBE_WRITE_FLUSH(hw); 1516 } 1517 1518 /** 1519 * ixgbe_ready_eeprom - Polls for EEPROM ready 1520 * @hw: pointer to hardware structure 1521 **/ 1522 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) 1523 { 1524 u16 i; 1525 u8 spi_stat_reg; 1526 1527 /* 1528 * Read "Status Register" repeatedly until the LSB is cleared. The 1529 * EEPROM will signal that the command has been completed by clearing 1530 * bit 0 of the internal status register. If it's not cleared within 1531 * 5 milliseconds, then error out. 1532 */ 1533 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { 1534 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, 1535 IXGBE_EEPROM_OPCODE_BITS); 1536 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); 1537 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) 1538 break; 1539 1540 udelay(5); 1541 ixgbe_standby_eeprom(hw); 1542 } 1543 1544 /* 1545 * On some parts, SPI write time could vary from 0-20mSec on 3.3V 1546 * devices (and only 0-5mSec on 5V devices) 1547 */ 1548 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { 1549 hw_dbg(hw, "SPI EEPROM Status error\n"); 1550 return IXGBE_ERR_EEPROM; 1551 } 1552 1553 return 0; 1554 } 1555 1556 /** 1557 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state 1558 * @hw: pointer to hardware structure 1559 **/ 1560 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) 1561 { 1562 u32 eec; 1563 1564 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1565 1566 /* Toggle CS to flush commands */ 1567 eec |= IXGBE_EEC_CS; 1568 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1569 IXGBE_WRITE_FLUSH(hw); 1570 udelay(1); 1571 eec &= ~IXGBE_EEC_CS; 1572 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1573 IXGBE_WRITE_FLUSH(hw); 1574 udelay(1); 1575 } 1576 1577 /** 1578 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. 1579 * @hw: pointer to hardware structure 1580 * @data: data to send to the EEPROM 1581 * @count: number of bits to shift out 1582 **/ 1583 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 1584 u16 count) 1585 { 1586 u32 eec; 1587 u32 mask; 1588 u32 i; 1589 1590 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1591 1592 /* 1593 * Mask is used to shift "count" bits of "data" out to the EEPROM 1594 * one bit at a time. Determine the starting bit based on count 1595 */ 1596 mask = BIT(count - 1); 1597 1598 for (i = 0; i < count; i++) { 1599 /* 1600 * A "1" is shifted out to the EEPROM by setting bit "DI" to a 1601 * "1", and then raising and then lowering the clock (the SK 1602 * bit controls the clock input to the EEPROM). A "0" is 1603 * shifted out to the EEPROM by setting "DI" to "0" and then 1604 * raising and then lowering the clock. 1605 */ 1606 if (data & mask) 1607 eec |= IXGBE_EEC_DI; 1608 else 1609 eec &= ~IXGBE_EEC_DI; 1610 1611 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1612 IXGBE_WRITE_FLUSH(hw); 1613 1614 udelay(1); 1615 1616 ixgbe_raise_eeprom_clk(hw, &eec); 1617 ixgbe_lower_eeprom_clk(hw, &eec); 1618 1619 /* 1620 * Shift mask to signify next bit of data to shift in to the 1621 * EEPROM 1622 */ 1623 mask = mask >> 1; 1624 } 1625 1626 /* We leave the "DI" bit set to "0" when we leave this routine. */ 1627 eec &= ~IXGBE_EEC_DI; 1628 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1629 IXGBE_WRITE_FLUSH(hw); 1630 } 1631 1632 /** 1633 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM 1634 * @hw: pointer to hardware structure 1635 **/ 1636 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) 1637 { 1638 u32 eec; 1639 u32 i; 1640 u16 data = 0; 1641 1642 /* 1643 * In order to read a register from the EEPROM, we need to shift 1644 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising 1645 * the clock input to the EEPROM (setting the SK bit), and then reading 1646 * the value of the "DO" bit. During this "shifting in" process the 1647 * "DI" bit should always be clear. 1648 */ 1649 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1650 1651 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); 1652 1653 for (i = 0; i < count; i++) { 1654 data = data << 1; 1655 ixgbe_raise_eeprom_clk(hw, &eec); 1656 1657 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1658 1659 eec &= ~(IXGBE_EEC_DI); 1660 if (eec & IXGBE_EEC_DO) 1661 data |= 1; 1662 1663 ixgbe_lower_eeprom_clk(hw, &eec); 1664 } 1665 1666 return data; 1667 } 1668 1669 /** 1670 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. 1671 * @hw: pointer to hardware structure 1672 * @eec: EEC register's current value 1673 **/ 1674 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 1675 { 1676 /* 1677 * Raise the clock input to the EEPROM 1678 * (setting the SK bit), then delay 1679 */ 1680 *eec = *eec | IXGBE_EEC_SK; 1681 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec); 1682 IXGBE_WRITE_FLUSH(hw); 1683 udelay(1); 1684 } 1685 1686 /** 1687 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. 1688 * @hw: pointer to hardware structure 1689 * @eecd: EECD's current value 1690 **/ 1691 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 1692 { 1693 /* 1694 * Lower the clock input to the EEPROM (clearing the SK bit), then 1695 * delay 1696 */ 1697 *eec = *eec & ~IXGBE_EEC_SK; 1698 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec); 1699 IXGBE_WRITE_FLUSH(hw); 1700 udelay(1); 1701 } 1702 1703 /** 1704 * ixgbe_release_eeprom - Release EEPROM, release semaphores 1705 * @hw: pointer to hardware structure 1706 **/ 1707 static void ixgbe_release_eeprom(struct ixgbe_hw *hw) 1708 { 1709 u32 eec; 1710 1711 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1712 1713 eec |= IXGBE_EEC_CS; /* Pull CS high */ 1714 eec &= ~IXGBE_EEC_SK; /* Lower SCK */ 1715 1716 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1717 IXGBE_WRITE_FLUSH(hw); 1718 1719 udelay(1); 1720 1721 /* Stop requesting EEPROM access */ 1722 eec &= ~IXGBE_EEC_REQ; 1723 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1724 1725 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1726 1727 /* 1728 * Delay before attempt to obtain semaphore again to allow FW 1729 * access. semaphore_delay is in ms we need us for usleep_range 1730 */ 1731 usleep_range(hw->eeprom.semaphore_delay * 1000, 1732 hw->eeprom.semaphore_delay * 2000); 1733 } 1734 1735 /** 1736 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum 1737 * @hw: pointer to hardware structure 1738 **/ 1739 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) 1740 { 1741 u16 i; 1742 u16 j; 1743 u16 checksum = 0; 1744 u16 length = 0; 1745 u16 pointer = 0; 1746 u16 word = 0; 1747 1748 /* Include 0x0-0x3F in the checksum */ 1749 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { 1750 if (hw->eeprom.ops.read(hw, i, &word)) { 1751 hw_dbg(hw, "EEPROM read failed\n"); 1752 break; 1753 } 1754 checksum += word; 1755 } 1756 1757 /* Include all data from pointers except for the fw pointer */ 1758 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { 1759 if (hw->eeprom.ops.read(hw, i, &pointer)) { 1760 hw_dbg(hw, "EEPROM read failed\n"); 1761 return IXGBE_ERR_EEPROM; 1762 } 1763 1764 /* If the pointer seems invalid */ 1765 if (pointer == 0xFFFF || pointer == 0) 1766 continue; 1767 1768 if (hw->eeprom.ops.read(hw, pointer, &length)) { 1769 hw_dbg(hw, "EEPROM read failed\n"); 1770 return IXGBE_ERR_EEPROM; 1771 } 1772 1773 if (length == 0xFFFF || length == 0) 1774 continue; 1775 1776 for (j = pointer + 1; j <= pointer + length; j++) { 1777 if (hw->eeprom.ops.read(hw, j, &word)) { 1778 hw_dbg(hw, "EEPROM read failed\n"); 1779 return IXGBE_ERR_EEPROM; 1780 } 1781 checksum += word; 1782 } 1783 } 1784 1785 checksum = (u16)IXGBE_EEPROM_SUM - checksum; 1786 1787 return (s32)checksum; 1788 } 1789 1790 /** 1791 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum 1792 * @hw: pointer to hardware structure 1793 * @checksum_val: calculated checksum 1794 * 1795 * Performs checksum calculation and validates the EEPROM checksum. If the 1796 * caller does not need checksum_val, the value can be NULL. 1797 **/ 1798 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 1799 u16 *checksum_val) 1800 { 1801 s32 status; 1802 u16 checksum; 1803 u16 read_checksum = 0; 1804 1805 /* 1806 * Read the first word from the EEPROM. If this times out or fails, do 1807 * not continue or we could be in for a very long wait while every 1808 * EEPROM read fails 1809 */ 1810 status = hw->eeprom.ops.read(hw, 0, &checksum); 1811 if (status) { 1812 hw_dbg(hw, "EEPROM read failed\n"); 1813 return status; 1814 } 1815 1816 status = hw->eeprom.ops.calc_checksum(hw); 1817 if (status < 0) 1818 return status; 1819 1820 checksum = (u16)(status & 0xffff); 1821 1822 status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); 1823 if (status) { 1824 hw_dbg(hw, "EEPROM read failed\n"); 1825 return status; 1826 } 1827 1828 /* Verify read checksum from EEPROM is the same as 1829 * calculated checksum 1830 */ 1831 if (read_checksum != checksum) 1832 status = IXGBE_ERR_EEPROM_CHECKSUM; 1833 1834 /* If the user cares, return the calculated checksum */ 1835 if (checksum_val) 1836 *checksum_val = checksum; 1837 1838 return status; 1839 } 1840 1841 /** 1842 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum 1843 * @hw: pointer to hardware structure 1844 **/ 1845 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) 1846 { 1847 s32 status; 1848 u16 checksum; 1849 1850 /* 1851 * Read the first word from the EEPROM. If this times out or fails, do 1852 * not continue or we could be in for a very long wait while every 1853 * EEPROM read fails 1854 */ 1855 status = hw->eeprom.ops.read(hw, 0, &checksum); 1856 if (status) { 1857 hw_dbg(hw, "EEPROM read failed\n"); 1858 return status; 1859 } 1860 1861 status = hw->eeprom.ops.calc_checksum(hw); 1862 if (status < 0) 1863 return status; 1864 1865 checksum = (u16)(status & 0xffff); 1866 1867 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum); 1868 1869 return status; 1870 } 1871 1872 /** 1873 * ixgbe_set_rar_generic - Set Rx address register 1874 * @hw: pointer to hardware structure 1875 * @index: Receive address register to write 1876 * @addr: Address to put into receive address register 1877 * @vmdq: VMDq "set" or "pool" index 1878 * @enable_addr: set flag that address is active 1879 * 1880 * Puts an ethernet address into a receive address register. 1881 **/ 1882 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 1883 u32 enable_addr) 1884 { 1885 u32 rar_low, rar_high; 1886 u32 rar_entries = hw->mac.num_rar_entries; 1887 1888 /* Make sure we are using a valid rar index range */ 1889 if (index >= rar_entries) { 1890 hw_dbg(hw, "RAR index %d is out of range.\n", index); 1891 return IXGBE_ERR_INVALID_ARGUMENT; 1892 } 1893 1894 /* setup VMDq pool selection before this RAR gets enabled */ 1895 hw->mac.ops.set_vmdq(hw, index, vmdq); 1896 1897 /* 1898 * HW expects these in little endian so we reverse the byte 1899 * order from network order (big endian) to little endian 1900 */ 1901 rar_low = ((u32)addr[0] | 1902 ((u32)addr[1] << 8) | 1903 ((u32)addr[2] << 16) | 1904 ((u32)addr[3] << 24)); 1905 /* 1906 * Some parts put the VMDq setting in the extra RAH bits, 1907 * so save everything except the lower 16 bits that hold part 1908 * of the address and the address valid bit. 1909 */ 1910 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1911 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 1912 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); 1913 1914 if (enable_addr != 0) 1915 rar_high |= IXGBE_RAH_AV; 1916 1917 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 1918 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1919 1920 return 0; 1921 } 1922 1923 /** 1924 * ixgbe_clear_rar_generic - Remove Rx address register 1925 * @hw: pointer to hardware structure 1926 * @index: Receive address register to write 1927 * 1928 * Clears an ethernet address from a receive address register. 1929 **/ 1930 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) 1931 { 1932 u32 rar_high; 1933 u32 rar_entries = hw->mac.num_rar_entries; 1934 1935 /* Make sure we are using a valid rar index range */ 1936 if (index >= rar_entries) { 1937 hw_dbg(hw, "RAR index %d is out of range.\n", index); 1938 return IXGBE_ERR_INVALID_ARGUMENT; 1939 } 1940 1941 /* 1942 * Some parts put the VMDq setting in the extra RAH bits, 1943 * so save everything except the lower 16 bits that hold part 1944 * of the address and the address valid bit. 1945 */ 1946 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1947 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 1948 1949 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); 1950 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1951 1952 /* clear VMDq pool/queue selection for this RAR */ 1953 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); 1954 1955 return 0; 1956 } 1957 1958 /** 1959 * ixgbe_init_rx_addrs_generic - Initializes receive address filters. 1960 * @hw: pointer to hardware structure 1961 * 1962 * Places the MAC address in receive address register 0 and clears the rest 1963 * of the receive address registers. Clears the multicast table. Assumes 1964 * the receiver is in reset when the routine is called. 1965 **/ 1966 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) 1967 { 1968 u32 i; 1969 u32 rar_entries = hw->mac.num_rar_entries; 1970 1971 /* 1972 * If the current mac address is valid, assume it is a software override 1973 * to the permanent address. 1974 * Otherwise, use the permanent address from the eeprom. 1975 */ 1976 if (!is_valid_ether_addr(hw->mac.addr)) { 1977 /* Get the MAC address from the RAR0 for later reference */ 1978 hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 1979 1980 hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr); 1981 } else { 1982 /* Setup the receive address. */ 1983 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); 1984 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); 1985 1986 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1987 } 1988 1989 /* clear VMDq pool/queue selection for RAR 0 */ 1990 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); 1991 1992 hw->addr_ctrl.overflow_promisc = 0; 1993 1994 hw->addr_ctrl.rar_used_count = 1; 1995 1996 /* Zero out the other receive addresses. */ 1997 hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1); 1998 for (i = 1; i < rar_entries; i++) { 1999 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 2000 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 2001 } 2002 2003 /* Clear the MTA */ 2004 hw->addr_ctrl.mta_in_use = 0; 2005 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 2006 2007 hw_dbg(hw, " Clearing MTA\n"); 2008 for (i = 0; i < hw->mac.mcft_size; i++) 2009 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 2010 2011 if (hw->mac.ops.init_uta_tables) 2012 hw->mac.ops.init_uta_tables(hw); 2013 2014 return 0; 2015 } 2016 2017 /** 2018 * ixgbe_mta_vector - Determines bit-vector in multicast table to set 2019 * @hw: pointer to hardware structure 2020 * @mc_addr: the multicast address 2021 * 2022 * Extracts the 12 bits, from a multicast address, to determine which 2023 * bit-vector to set in the multicast table. The hardware uses 12 bits, from 2024 * incoming rx multicast addresses, to determine the bit-vector to check in 2025 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set 2026 * by the MO field of the MCSTCTRL. The MO field is set during initialization 2027 * to mc_filter_type. 2028 **/ 2029 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) 2030 { 2031 u32 vector = 0; 2032 2033 switch (hw->mac.mc_filter_type) { 2034 case 0: /* use bits [47:36] of the address */ 2035 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); 2036 break; 2037 case 1: /* use bits [46:35] of the address */ 2038 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); 2039 break; 2040 case 2: /* use bits [45:34] of the address */ 2041 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); 2042 break; 2043 case 3: /* use bits [43:32] of the address */ 2044 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); 2045 break; 2046 default: /* Invalid mc_filter_type */ 2047 hw_dbg(hw, "MC filter type param set incorrectly\n"); 2048 break; 2049 } 2050 2051 /* vector can only be 12-bits or boundary will be exceeded */ 2052 vector &= 0xFFF; 2053 return vector; 2054 } 2055 2056 /** 2057 * ixgbe_set_mta - Set bit-vector in multicast table 2058 * @hw: pointer to hardware structure 2059 * @hash_value: Multicast address hash value 2060 * 2061 * Sets the bit-vector in the multicast table. 2062 **/ 2063 static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) 2064 { 2065 u32 vector; 2066 u32 vector_bit; 2067 u32 vector_reg; 2068 2069 hw->addr_ctrl.mta_in_use++; 2070 2071 vector = ixgbe_mta_vector(hw, mc_addr); 2072 hw_dbg(hw, " bit-vector = 0x%03X\n", vector); 2073 2074 /* 2075 * The MTA is a register array of 128 32-bit registers. It is treated 2076 * like an array of 4096 bits. We want to set bit 2077 * BitArray[vector_value]. So we figure out what register the bit is 2078 * in, read it, OR in the new bit, then write back the new value. The 2079 * register is determined by the upper 7 bits of the vector value and 2080 * the bit within that register are determined by the lower 5 bits of 2081 * the value. 2082 */ 2083 vector_reg = (vector >> 5) & 0x7F; 2084 vector_bit = vector & 0x1F; 2085 hw->mac.mta_shadow[vector_reg] |= BIT(vector_bit); 2086 } 2087 2088 /** 2089 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses 2090 * @hw: pointer to hardware structure 2091 * @netdev: pointer to net device structure 2092 * 2093 * The given list replaces any existing list. Clears the MC addrs from receive 2094 * address registers and the multicast table. Uses unused receive address 2095 * registers for the first multicast addresses, and hashes the rest into the 2096 * multicast table. 2097 **/ 2098 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, 2099 struct net_device *netdev) 2100 { 2101 struct netdev_hw_addr *ha; 2102 u32 i; 2103 2104 /* 2105 * Set the new number of MC addresses that we are being requested to 2106 * use. 2107 */ 2108 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); 2109 hw->addr_ctrl.mta_in_use = 0; 2110 2111 /* Clear mta_shadow */ 2112 hw_dbg(hw, " Clearing MTA\n"); 2113 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); 2114 2115 /* Update mta shadow */ 2116 netdev_for_each_mc_addr(ha, netdev) { 2117 hw_dbg(hw, " Adding the multicast addresses:\n"); 2118 ixgbe_set_mta(hw, ha->addr); 2119 } 2120 2121 /* Enable mta */ 2122 for (i = 0; i < hw->mac.mcft_size; i++) 2123 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, 2124 hw->mac.mta_shadow[i]); 2125 2126 if (hw->addr_ctrl.mta_in_use > 0) 2127 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 2128 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 2129 2130 hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); 2131 return 0; 2132 } 2133 2134 /** 2135 * ixgbe_enable_mc_generic - Enable multicast address in RAR 2136 * @hw: pointer to hardware structure 2137 * 2138 * Enables multicast address in RAR and the use of the multicast hash table. 2139 **/ 2140 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) 2141 { 2142 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 2143 2144 if (a->mta_in_use > 0) 2145 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 2146 hw->mac.mc_filter_type); 2147 2148 return 0; 2149 } 2150 2151 /** 2152 * ixgbe_disable_mc_generic - Disable multicast address in RAR 2153 * @hw: pointer to hardware structure 2154 * 2155 * Disables multicast address in RAR and the use of the multicast hash table. 2156 **/ 2157 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) 2158 { 2159 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 2160 2161 if (a->mta_in_use > 0) 2162 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 2163 2164 return 0; 2165 } 2166 2167 /** 2168 * ixgbe_fc_enable_generic - Enable flow control 2169 * @hw: pointer to hardware structure 2170 * 2171 * Enable flow control according to the current settings. 2172 **/ 2173 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) 2174 { 2175 u32 mflcn_reg, fccfg_reg; 2176 u32 reg; 2177 u32 fcrtl, fcrth; 2178 int i; 2179 2180 /* Validate the water mark configuration. */ 2181 if (!hw->fc.pause_time) 2182 return IXGBE_ERR_INVALID_LINK_SETTINGS; 2183 2184 /* Low water mark of zero causes XOFF floods */ 2185 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 2186 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2187 hw->fc.high_water[i]) { 2188 if (!hw->fc.low_water[i] || 2189 hw->fc.low_water[i] >= hw->fc.high_water[i]) { 2190 hw_dbg(hw, "Invalid water mark configuration\n"); 2191 return IXGBE_ERR_INVALID_LINK_SETTINGS; 2192 } 2193 } 2194 } 2195 2196 /* Negotiate the fc mode to use */ 2197 hw->mac.ops.fc_autoneg(hw); 2198 2199 /* Disable any previous flow control settings */ 2200 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2201 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); 2202 2203 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 2204 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 2205 2206 /* 2207 * The possible values of fc.current_mode are: 2208 * 0: Flow control is completely disabled 2209 * 1: Rx flow control is enabled (we can receive pause frames, 2210 * but not send pause frames). 2211 * 2: Tx flow control is enabled (we can send pause frames but 2212 * we do not support receiving pause frames). 2213 * 3: Both Rx and Tx flow control (symmetric) are enabled. 2214 * other: Invalid. 2215 */ 2216 switch (hw->fc.current_mode) { 2217 case ixgbe_fc_none: 2218 /* 2219 * Flow control is disabled by software override or autoneg. 2220 * The code below will actually disable it in the HW. 2221 */ 2222 break; 2223 case ixgbe_fc_rx_pause: 2224 /* 2225 * Rx Flow control is enabled and Tx Flow control is 2226 * disabled by software override. Since there really 2227 * isn't a way to advertise that we are capable of RX 2228 * Pause ONLY, we will advertise that we support both 2229 * symmetric and asymmetric Rx PAUSE. Later, we will 2230 * disable the adapter's ability to send PAUSE frames. 2231 */ 2232 mflcn_reg |= IXGBE_MFLCN_RFCE; 2233 break; 2234 case ixgbe_fc_tx_pause: 2235 /* 2236 * Tx Flow control is enabled, and Rx Flow control is 2237 * disabled by software override. 2238 */ 2239 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2240 break; 2241 case ixgbe_fc_full: 2242 /* Flow control (both Rx and Tx) is enabled by SW override. */ 2243 mflcn_reg |= IXGBE_MFLCN_RFCE; 2244 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2245 break; 2246 default: 2247 hw_dbg(hw, "Flow control param set incorrectly\n"); 2248 return IXGBE_ERR_CONFIG; 2249 } 2250 2251 /* Set 802.3x based flow control settings. */ 2252 mflcn_reg |= IXGBE_MFLCN_DPF; 2253 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 2254 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 2255 2256 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 2257 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 2258 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2259 hw->fc.high_water[i]) { 2260 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; 2261 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); 2262 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 2263 } else { 2264 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); 2265 /* 2266 * In order to prevent Tx hangs when the internal Tx 2267 * switch is enabled we must set the high water mark 2268 * to the Rx packet buffer size - 24KB. This allows 2269 * the Tx switch to function even under heavy Rx 2270 * workloads. 2271 */ 2272 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; 2273 } 2274 2275 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); 2276 } 2277 2278 /* Configure pause time (2 TCs per register) */ 2279 reg = hw->fc.pause_time * 0x00010001; 2280 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) 2281 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 2282 2283 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 2284 2285 return 0; 2286 } 2287 2288 /** 2289 * ixgbe_negotiate_fc - Negotiate flow control 2290 * @hw: pointer to hardware structure 2291 * @adv_reg: flow control advertised settings 2292 * @lp_reg: link partner's flow control settings 2293 * @adv_sym: symmetric pause bit in advertisement 2294 * @adv_asm: asymmetric pause bit in advertisement 2295 * @lp_sym: symmetric pause bit in link partner advertisement 2296 * @lp_asm: asymmetric pause bit in link partner advertisement 2297 * 2298 * Find the intersection between advertised settings and link partner's 2299 * advertised settings 2300 **/ 2301 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, 2302 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) 2303 { 2304 if ((!(adv_reg)) || (!(lp_reg))) 2305 return IXGBE_ERR_FC_NOT_NEGOTIATED; 2306 2307 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { 2308 /* 2309 * Now we need to check if the user selected Rx ONLY 2310 * of pause frames. In this case, we had to advertise 2311 * FULL flow control because we could not advertise RX 2312 * ONLY. Hence, we must now check to see if we need to 2313 * turn OFF the TRANSMISSION of PAUSE frames. 2314 */ 2315 if (hw->fc.requested_mode == ixgbe_fc_full) { 2316 hw->fc.current_mode = ixgbe_fc_full; 2317 hw_dbg(hw, "Flow Control = FULL.\n"); 2318 } else { 2319 hw->fc.current_mode = ixgbe_fc_rx_pause; 2320 hw_dbg(hw, "Flow Control=RX PAUSE frames only\n"); 2321 } 2322 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && 2323 (lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2324 hw->fc.current_mode = ixgbe_fc_tx_pause; 2325 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); 2326 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && 2327 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2328 hw->fc.current_mode = ixgbe_fc_rx_pause; 2329 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); 2330 } else { 2331 hw->fc.current_mode = ixgbe_fc_none; 2332 hw_dbg(hw, "Flow Control = NONE.\n"); 2333 } 2334 return 0; 2335 } 2336 2337 /** 2338 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber 2339 * @hw: pointer to hardware structure 2340 * 2341 * Enable flow control according on 1 gig fiber. 2342 **/ 2343 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) 2344 { 2345 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; 2346 s32 ret_val; 2347 2348 /* 2349 * On multispeed fiber at 1g, bail out if 2350 * - link is up but AN did not complete, or if 2351 * - link is up and AN completed but timed out 2352 */ 2353 2354 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 2355 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || 2356 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) 2357 return IXGBE_ERR_FC_NOT_NEGOTIATED; 2358 2359 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 2360 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 2361 2362 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, 2363 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, 2364 IXGBE_PCS1GANA_ASM_PAUSE, 2365 IXGBE_PCS1GANA_SYM_PAUSE, 2366 IXGBE_PCS1GANA_ASM_PAUSE); 2367 2368 return ret_val; 2369 } 2370 2371 /** 2372 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 2373 * @hw: pointer to hardware structure 2374 * 2375 * Enable flow control according to IEEE clause 37. 2376 **/ 2377 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) 2378 { 2379 u32 links2, anlp1_reg, autoc_reg, links; 2380 s32 ret_val; 2381 2382 /* 2383 * On backplane, bail out if 2384 * - backplane autoneg was not completed, or if 2385 * - we are 82599 and link partner is not AN enabled 2386 */ 2387 links = IXGBE_READ_REG(hw, IXGBE_LINKS); 2388 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) 2389 return IXGBE_ERR_FC_NOT_NEGOTIATED; 2390 2391 if (hw->mac.type == ixgbe_mac_82599EB) { 2392 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 2393 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) 2394 return IXGBE_ERR_FC_NOT_NEGOTIATED; 2395 } 2396 /* 2397 * Read the 10g AN autoc and LP ability registers and resolve 2398 * local flow control settings accordingly 2399 */ 2400 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2401 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); 2402 2403 ret_val = ixgbe_negotiate_fc(hw, autoc_reg, 2404 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, 2405 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); 2406 2407 return ret_val; 2408 } 2409 2410 /** 2411 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 2412 * @hw: pointer to hardware structure 2413 * 2414 * Enable flow control according to IEEE clause 37. 2415 **/ 2416 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) 2417 { 2418 u16 technology_ability_reg = 0; 2419 u16 lp_technology_ability_reg = 0; 2420 2421 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, 2422 MDIO_MMD_AN, 2423 &technology_ability_reg); 2424 hw->phy.ops.read_reg(hw, MDIO_AN_LPA, 2425 MDIO_MMD_AN, 2426 &lp_technology_ability_reg); 2427 2428 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, 2429 (u32)lp_technology_ability_reg, 2430 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, 2431 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); 2432 } 2433 2434 /** 2435 * ixgbe_fc_autoneg - Configure flow control 2436 * @hw: pointer to hardware structure 2437 * 2438 * Compares our advertised flow control capabilities to those advertised by 2439 * our link partner, and determines the proper flow control mode to use. 2440 **/ 2441 void ixgbe_fc_autoneg(struct ixgbe_hw *hw) 2442 { 2443 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2444 ixgbe_link_speed speed; 2445 bool link_up; 2446 2447 /* 2448 * AN should have completed when the cable was plugged in. 2449 * Look for reasons to bail out. Bail out if: 2450 * - FC autoneg is disabled, or if 2451 * - link is not up. 2452 * 2453 * Since we're being called from an LSC, link is already known to be up. 2454 * So use link_up_wait_to_complete=false. 2455 */ 2456 if (hw->fc.disable_fc_autoneg) 2457 goto out; 2458 2459 hw->mac.ops.check_link(hw, &speed, &link_up, false); 2460 if (!link_up) 2461 goto out; 2462 2463 switch (hw->phy.media_type) { 2464 /* Autoneg flow control on fiber adapters */ 2465 case ixgbe_media_type_fiber: 2466 if (speed == IXGBE_LINK_SPEED_1GB_FULL) 2467 ret_val = ixgbe_fc_autoneg_fiber(hw); 2468 break; 2469 2470 /* Autoneg flow control on backplane adapters */ 2471 case ixgbe_media_type_backplane: 2472 ret_val = ixgbe_fc_autoneg_backplane(hw); 2473 break; 2474 2475 /* Autoneg flow control on copper adapters */ 2476 case ixgbe_media_type_copper: 2477 if (ixgbe_device_supports_autoneg_fc(hw)) 2478 ret_val = ixgbe_fc_autoneg_copper(hw); 2479 break; 2480 2481 default: 2482 break; 2483 } 2484 2485 out: 2486 if (ret_val == 0) { 2487 hw->fc.fc_was_autonegged = true; 2488 } else { 2489 hw->fc.fc_was_autonegged = false; 2490 hw->fc.current_mode = hw->fc.requested_mode; 2491 } 2492 } 2493 2494 /** 2495 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion 2496 * @hw: pointer to hardware structure 2497 * 2498 * System-wide timeout range is encoded in PCIe Device Control2 register. 2499 * 2500 * Add 10% to specified maximum and return the number of times to poll for 2501 * completion timeout, in units of 100 microsec. Never return less than 2502 * 800 = 80 millisec. 2503 **/ 2504 static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) 2505 { 2506 s16 devctl2; 2507 u32 pollcnt; 2508 2509 devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2); 2510 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK; 2511 2512 switch (devctl2) { 2513 case IXGBE_PCIDEVCTRL2_65_130ms: 2514 pollcnt = 1300; /* 130 millisec */ 2515 break; 2516 case IXGBE_PCIDEVCTRL2_260_520ms: 2517 pollcnt = 5200; /* 520 millisec */ 2518 break; 2519 case IXGBE_PCIDEVCTRL2_1_2s: 2520 pollcnt = 20000; /* 2 sec */ 2521 break; 2522 case IXGBE_PCIDEVCTRL2_4_8s: 2523 pollcnt = 80000; /* 8 sec */ 2524 break; 2525 case IXGBE_PCIDEVCTRL2_17_34s: 2526 pollcnt = 34000; /* 34 sec */ 2527 break; 2528 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */ 2529 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */ 2530 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */ 2531 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */ 2532 default: 2533 pollcnt = 800; /* 80 millisec minimum */ 2534 break; 2535 } 2536 2537 /* add 10% to spec maximum */ 2538 return (pollcnt * 11) / 10; 2539 } 2540 2541 /** 2542 * ixgbe_disable_pcie_master - Disable PCI-express master access 2543 * @hw: pointer to hardware structure 2544 * 2545 * Disables PCI-Express master access and verifies there are no pending 2546 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable 2547 * bit hasn't caused the master requests to be disabled, else 0 2548 * is returned signifying master requests disabled. 2549 **/ 2550 static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 2551 { 2552 u32 i, poll; 2553 u16 value; 2554 2555 /* Always set this bit to ensure any future transactions are blocked */ 2556 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); 2557 2558 /* Poll for bit to read as set */ 2559 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2560 if (IXGBE_READ_REG(hw, IXGBE_CTRL) & IXGBE_CTRL_GIO_DIS) 2561 break; 2562 usleep_range(100, 120); 2563 } 2564 if (i >= IXGBE_PCI_MASTER_DISABLE_TIMEOUT) { 2565 hw_dbg(hw, "GIO disable did not set - requesting resets\n"); 2566 goto gio_disable_fail; 2567 } 2568 2569 /* Exit if master requests are blocked */ 2570 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || 2571 ixgbe_removed(hw->hw_addr)) 2572 return 0; 2573 2574 /* Poll for master request bit to clear */ 2575 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2576 udelay(100); 2577 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) 2578 return 0; 2579 } 2580 2581 /* 2582 * Two consecutive resets are required via CTRL.RST per datasheet 2583 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine 2584 * of this need. The first reset prevents new master requests from 2585 * being issued by our device. We then must wait 1usec or more for any 2586 * remaining completions from the PCIe bus to trickle in, and then reset 2587 * again to clear out any effects they may have had on our device. 2588 */ 2589 hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n"); 2590 gio_disable_fail: 2591 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 2592 2593 if (hw->mac.type >= ixgbe_mac_X550) 2594 return 0; 2595 2596 /* 2597 * Before proceeding, make sure that the PCIe block does not have 2598 * transactions pending. 2599 */ 2600 poll = ixgbe_pcie_timeout_poll(hw); 2601 for (i = 0; i < poll; i++) { 2602 udelay(100); 2603 value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); 2604 if (ixgbe_removed(hw->hw_addr)) 2605 return 0; 2606 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 2607 return 0; 2608 } 2609 2610 hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n"); 2611 return IXGBE_ERR_MASTER_REQUESTS_PENDING; 2612 } 2613 2614 /** 2615 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore 2616 * @hw: pointer to hardware structure 2617 * @mask: Mask to specify which semaphore to acquire 2618 * 2619 * Acquires the SWFW semaphore through the GSSR register for the specified 2620 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2621 **/ 2622 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) 2623 { 2624 u32 gssr = 0; 2625 u32 swmask = mask; 2626 u32 fwmask = mask << 5; 2627 u32 timeout = 200; 2628 u32 i; 2629 2630 for (i = 0; i < timeout; i++) { 2631 /* 2632 * SW NVM semaphore bit is used for access to all 2633 * SW_FW_SYNC bits (not just NVM) 2634 */ 2635 if (ixgbe_get_eeprom_semaphore(hw)) 2636 return IXGBE_ERR_SWFW_SYNC; 2637 2638 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2639 if (!(gssr & (fwmask | swmask))) { 2640 gssr |= swmask; 2641 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 2642 ixgbe_release_eeprom_semaphore(hw); 2643 return 0; 2644 } else { 2645 /* Resource is currently in use by FW or SW */ 2646 ixgbe_release_eeprom_semaphore(hw); 2647 usleep_range(5000, 10000); 2648 } 2649 } 2650 2651 /* If time expired clear the bits holding the lock and retry */ 2652 if (gssr & (fwmask | swmask)) 2653 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); 2654 2655 usleep_range(5000, 10000); 2656 return IXGBE_ERR_SWFW_SYNC; 2657 } 2658 2659 /** 2660 * ixgbe_release_swfw_sync - Release SWFW semaphore 2661 * @hw: pointer to hardware structure 2662 * @mask: Mask to specify which semaphore to release 2663 * 2664 * Releases the SWFW semaphore through the GSSR register for the specified 2665 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2666 **/ 2667 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) 2668 { 2669 u32 gssr; 2670 u32 swmask = mask; 2671 2672 ixgbe_get_eeprom_semaphore(hw); 2673 2674 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2675 gssr &= ~swmask; 2676 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 2677 2678 ixgbe_release_eeprom_semaphore(hw); 2679 } 2680 2681 /** 2682 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read 2683 * @hw: pointer to hardware structure 2684 * @reg_val: Value we read from AUTOC 2685 * @locked: bool to indicate whether the SW/FW lock should be taken. Never 2686 * true in this the generic case. 2687 * 2688 * The default case requires no protection so just to the register read. 2689 **/ 2690 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) 2691 { 2692 *locked = false; 2693 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2694 return 0; 2695 } 2696 2697 /** 2698 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write 2699 * @hw: pointer to hardware structure 2700 * @reg_val: value to write to AUTOC 2701 * @locked: bool to indicate whether the SW/FW lock was already taken by 2702 * previous read. 2703 **/ 2704 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) 2705 { 2706 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); 2707 return 0; 2708 } 2709 2710 /** 2711 * ixgbe_disable_rx_buff_generic - Stops the receive data path 2712 * @hw: pointer to hardware structure 2713 * 2714 * Stops the receive data path and waits for the HW to internally 2715 * empty the Rx security block. 2716 **/ 2717 s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) 2718 { 2719 #define IXGBE_MAX_SECRX_POLL 40 2720 int i; 2721 int secrxreg; 2722 2723 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2724 secrxreg |= IXGBE_SECRXCTRL_RX_DIS; 2725 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 2726 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { 2727 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); 2728 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) 2729 break; 2730 else 2731 /* Use interrupt-safe sleep just in case */ 2732 udelay(1000); 2733 } 2734 2735 /* For informational purposes only */ 2736 if (i >= IXGBE_MAX_SECRX_POLL) 2737 hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n"); 2738 2739 return 0; 2740 2741 } 2742 2743 /** 2744 * ixgbe_enable_rx_buff - Enables the receive data path 2745 * @hw: pointer to hardware structure 2746 * 2747 * Enables the receive data path 2748 **/ 2749 s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) 2750 { 2751 u32 secrxreg; 2752 2753 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2754 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; 2755 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 2756 IXGBE_WRITE_FLUSH(hw); 2757 2758 return 0; 2759 } 2760 2761 /** 2762 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit 2763 * @hw: pointer to hardware structure 2764 * @regval: register value to write to RXCTRL 2765 * 2766 * Enables the Rx DMA unit 2767 **/ 2768 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) 2769 { 2770 if (regval & IXGBE_RXCTRL_RXEN) 2771 hw->mac.ops.enable_rx(hw); 2772 else 2773 hw->mac.ops.disable_rx(hw); 2774 2775 return 0; 2776 } 2777 2778 /** 2779 * ixgbe_blink_led_start_generic - Blink LED based on index. 2780 * @hw: pointer to hardware structure 2781 * @index: led number to blink 2782 **/ 2783 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) 2784 { 2785 ixgbe_link_speed speed = 0; 2786 bool link_up = false; 2787 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2788 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2789 bool locked = false; 2790 s32 ret_val; 2791 2792 if (index > 3) 2793 return IXGBE_ERR_PARAM; 2794 2795 /* 2796 * Link must be up to auto-blink the LEDs; 2797 * Force it if link is down. 2798 */ 2799 hw->mac.ops.check_link(hw, &speed, &link_up, false); 2800 2801 if (!link_up) { 2802 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); 2803 if (ret_val) 2804 return ret_val; 2805 2806 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2807 autoc_reg |= IXGBE_AUTOC_FLU; 2808 2809 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); 2810 if (ret_val) 2811 return ret_val; 2812 2813 IXGBE_WRITE_FLUSH(hw); 2814 2815 usleep_range(10000, 20000); 2816 } 2817 2818 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2819 led_reg |= IXGBE_LED_BLINK(index); 2820 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2821 IXGBE_WRITE_FLUSH(hw); 2822 2823 return 0; 2824 } 2825 2826 /** 2827 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. 2828 * @hw: pointer to hardware structure 2829 * @index: led number to stop blinking 2830 **/ 2831 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) 2832 { 2833 u32 autoc_reg = 0; 2834 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2835 bool locked = false; 2836 s32 ret_val; 2837 2838 if (index > 3) 2839 return IXGBE_ERR_PARAM; 2840 2841 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); 2842 if (ret_val) 2843 return ret_val; 2844 2845 autoc_reg &= ~IXGBE_AUTOC_FLU; 2846 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2847 2848 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); 2849 if (ret_val) 2850 return ret_val; 2851 2852 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2853 led_reg &= ~IXGBE_LED_BLINK(index); 2854 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); 2855 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2856 IXGBE_WRITE_FLUSH(hw); 2857 2858 return 0; 2859 } 2860 2861 /** 2862 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM 2863 * @hw: pointer to hardware structure 2864 * @san_mac_offset: SAN MAC address offset 2865 * 2866 * This function will read the EEPROM location for the SAN MAC address 2867 * pointer, and returns the value at that location. This is used in both 2868 * get and set mac_addr routines. 2869 **/ 2870 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 2871 u16 *san_mac_offset) 2872 { 2873 s32 ret_val; 2874 2875 /* 2876 * First read the EEPROM pointer to see if the MAC addresses are 2877 * available. 2878 */ 2879 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, 2880 san_mac_offset); 2881 if (ret_val) 2882 hw_err(hw, "eeprom read at offset %d failed\n", 2883 IXGBE_SAN_MAC_ADDR_PTR); 2884 2885 return ret_val; 2886 } 2887 2888 /** 2889 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM 2890 * @hw: pointer to hardware structure 2891 * @san_mac_addr: SAN MAC address 2892 * 2893 * Reads the SAN MAC address from the EEPROM, if it's available. This is 2894 * per-port, so set_lan_id() must be called before reading the addresses. 2895 * set_lan_id() is called by identify_sfp(), but this cannot be relied 2896 * upon for non-SFP connections, so we must call it here. 2897 **/ 2898 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) 2899 { 2900 u16 san_mac_data, san_mac_offset; 2901 u8 i; 2902 s32 ret_val; 2903 2904 /* 2905 * First read the EEPROM pointer to see if the MAC addresses are 2906 * available. If they're not, no point in calling set_lan_id() here. 2907 */ 2908 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 2909 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) 2910 2911 goto san_mac_addr_clr; 2912 2913 /* make sure we know which port we need to program */ 2914 hw->mac.ops.set_lan_id(hw); 2915 /* apply the port offset to the address offset */ 2916 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 2917 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 2918 for (i = 0; i < 3; i++) { 2919 ret_val = hw->eeprom.ops.read(hw, san_mac_offset, 2920 &san_mac_data); 2921 if (ret_val) { 2922 hw_err(hw, "eeprom read at offset %d failed\n", 2923 san_mac_offset); 2924 goto san_mac_addr_clr; 2925 } 2926 san_mac_addr[i * 2] = (u8)(san_mac_data); 2927 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); 2928 san_mac_offset++; 2929 } 2930 return 0; 2931 2932 san_mac_addr_clr: 2933 /* No addresses available in this EEPROM. It's not necessarily an 2934 * error though, so just wipe the local address and return. 2935 */ 2936 for (i = 0; i < 6; i++) 2937 san_mac_addr[i] = 0xFF; 2938 return ret_val; 2939 } 2940 2941 /** 2942 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count 2943 * @hw: pointer to hardware structure 2944 * 2945 * Read PCIe configuration space, and get the MSI-X vector count from 2946 * the capabilities table. 2947 **/ 2948 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) 2949 { 2950 u16 msix_count; 2951 u16 max_msix_count; 2952 u16 pcie_offset; 2953 2954 switch (hw->mac.type) { 2955 case ixgbe_mac_82598EB: 2956 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS; 2957 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598; 2958 break; 2959 case ixgbe_mac_82599EB: 2960 case ixgbe_mac_X540: 2961 case ixgbe_mac_X550: 2962 case ixgbe_mac_X550EM_x: 2963 case ixgbe_mac_x550em_a: 2964 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; 2965 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; 2966 break; 2967 default: 2968 return 1; 2969 } 2970 2971 msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset); 2972 if (ixgbe_removed(hw->hw_addr)) 2973 msix_count = 0; 2974 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; 2975 2976 /* MSI-X count is zero-based in HW */ 2977 msix_count++; 2978 2979 if (msix_count > max_msix_count) 2980 msix_count = max_msix_count; 2981 2982 return msix_count; 2983 } 2984 2985 /** 2986 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address 2987 * @hw: pointer to hardware struct 2988 * @rar: receive address register index to disassociate 2989 * @vmdq: VMDq pool index to remove from the rar 2990 **/ 2991 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 2992 { 2993 u32 mpsar_lo, mpsar_hi; 2994 u32 rar_entries = hw->mac.num_rar_entries; 2995 2996 /* Make sure we are using a valid rar index range */ 2997 if (rar >= rar_entries) { 2998 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 2999 return IXGBE_ERR_INVALID_ARGUMENT; 3000 } 3001 3002 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 3003 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 3004 3005 if (ixgbe_removed(hw->hw_addr)) 3006 return 0; 3007 3008 if (!mpsar_lo && !mpsar_hi) 3009 return 0; 3010 3011 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { 3012 if (mpsar_lo) { 3013 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 3014 mpsar_lo = 0; 3015 } 3016 if (mpsar_hi) { 3017 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 3018 mpsar_hi = 0; 3019 } 3020 } else if (vmdq < 32) { 3021 mpsar_lo &= ~BIT(vmdq); 3022 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); 3023 } else { 3024 mpsar_hi &= ~BIT(vmdq - 32); 3025 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); 3026 } 3027 3028 /* was that the last pool using this rar? */ 3029 if (mpsar_lo == 0 && mpsar_hi == 0 && 3030 rar != 0 && rar != hw->mac.san_mac_rar_index) 3031 hw->mac.ops.clear_rar(hw, rar); 3032 3033 return 0; 3034 } 3035 3036 /** 3037 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address 3038 * @hw: pointer to hardware struct 3039 * @rar: receive address register index to associate with a VMDq index 3040 * @vmdq: VMDq pool index 3041 **/ 3042 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 3043 { 3044 u32 mpsar; 3045 u32 rar_entries = hw->mac.num_rar_entries; 3046 3047 /* Make sure we are using a valid rar index range */ 3048 if (rar >= rar_entries) { 3049 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 3050 return IXGBE_ERR_INVALID_ARGUMENT; 3051 } 3052 3053 if (vmdq < 32) { 3054 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 3055 mpsar |= BIT(vmdq); 3056 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); 3057 } else { 3058 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 3059 mpsar |= BIT(vmdq - 32); 3060 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); 3061 } 3062 return 0; 3063 } 3064 3065 /** 3066 * This function should only be involved in the IOV mode. 3067 * In IOV mode, Default pool is next pool after the number of 3068 * VFs advertized and not 0. 3069 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] 3070 * 3071 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address 3072 * @hw: pointer to hardware struct 3073 * @vmdq: VMDq pool index 3074 **/ 3075 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) 3076 { 3077 u32 rar = hw->mac.san_mac_rar_index; 3078 3079 if (vmdq < 32) { 3080 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), BIT(vmdq)); 3081 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 3082 } else { 3083 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 3084 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), BIT(vmdq - 32)); 3085 } 3086 3087 return 0; 3088 } 3089 3090 /** 3091 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array 3092 * @hw: pointer to hardware structure 3093 **/ 3094 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) 3095 { 3096 int i; 3097 3098 for (i = 0; i < 128; i++) 3099 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 3100 3101 return 0; 3102 } 3103 3104 /** 3105 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot 3106 * @hw: pointer to hardware structure 3107 * @vlan: VLAN id to write to VLAN filter 3108 * 3109 * return the VLVF index where this VLAN id should be placed 3110 * 3111 **/ 3112 static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) 3113 { 3114 s32 regindex, first_empty_slot; 3115 u32 bits; 3116 3117 /* short cut the special case */ 3118 if (vlan == 0) 3119 return 0; 3120 3121 /* if vlvf_bypass is set we don't want to use an empty slot, we 3122 * will simply bypass the VLVF if there are no entries present in the 3123 * VLVF that contain our VLAN 3124 */ 3125 first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0; 3126 3127 /* add VLAN enable bit for comparison */ 3128 vlan |= IXGBE_VLVF_VIEN; 3129 3130 /* Search for the vlan id in the VLVF entries. Save off the first empty 3131 * slot found along the way. 3132 * 3133 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 3134 */ 3135 for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) { 3136 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); 3137 if (bits == vlan) 3138 return regindex; 3139 if (!first_empty_slot && !bits) 3140 first_empty_slot = regindex; 3141 } 3142 3143 /* If we are here then we didn't find the VLAN. Return first empty 3144 * slot we found during our search, else error. 3145 */ 3146 if (!first_empty_slot) 3147 hw_dbg(hw, "No space in VLVF.\n"); 3148 3149 return first_empty_slot ? : IXGBE_ERR_NO_SPACE; 3150 } 3151 3152 /** 3153 * ixgbe_set_vfta_generic - Set VLAN filter table 3154 * @hw: pointer to hardware structure 3155 * @vlan: VLAN id to write to VLAN filter 3156 * @vind: VMDq output index that maps queue to VLAN id in VFVFB 3157 * @vlan_on: boolean flag to turn on/off VLAN in VFVF 3158 * @vlvf_bypass: boolean flag indicating updating default pool is okay 3159 * 3160 * Turn on/off specified VLAN in the VLAN filter table. 3161 **/ 3162 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, 3163 bool vlan_on, bool vlvf_bypass) 3164 { 3165 u32 regidx, vfta_delta, vfta, bits; 3166 s32 vlvf_index; 3167 3168 if ((vlan > 4095) || (vind > 63)) 3169 return IXGBE_ERR_PARAM; 3170 3171 /* 3172 * this is a 2 part operation - first the VFTA, then the 3173 * VLVF and VLVFB if VT Mode is set 3174 * We don't write the VFTA until we know the VLVF part succeeded. 3175 */ 3176 3177 /* Part 1 3178 * The VFTA is a bitstring made up of 128 32-bit registers 3179 * that enable the particular VLAN id, much like the MTA: 3180 * bits[11-5]: which register 3181 * bits[4-0]: which bit in the register 3182 */ 3183 regidx = vlan / 32; 3184 vfta_delta = BIT(vlan % 32); 3185 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); 3186 3187 /* vfta_delta represents the difference between the current value 3188 * of vfta and the value we want in the register. Since the diff 3189 * is an XOR mask we can just update vfta using an XOR. 3190 */ 3191 vfta_delta &= vlan_on ? ~vfta : vfta; 3192 vfta ^= vfta_delta; 3193 3194 /* Part 2 3195 * If VT Mode is set 3196 * Either vlan_on 3197 * make sure the vlan is in VLVF 3198 * set the vind bit in the matching VLVFB 3199 * Or !vlan_on 3200 * clear the pool bit and possibly the vind 3201 */ 3202 if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE)) 3203 goto vfta_update; 3204 3205 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass); 3206 if (vlvf_index < 0) { 3207 if (vlvf_bypass) 3208 goto vfta_update; 3209 return vlvf_index; 3210 } 3211 3212 bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); 3213 3214 /* set the pool bit */ 3215 bits |= BIT(vind % 32); 3216 if (vlan_on) 3217 goto vlvf_update; 3218 3219 /* clear the pool bit */ 3220 bits ^= BIT(vind % 32); 3221 3222 if (!bits && 3223 !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { 3224 /* Clear VFTA first, then disable VLVF. Otherwise 3225 * we run the risk of stray packets leaking into 3226 * the PF via the default pool 3227 */ 3228 if (vfta_delta) 3229 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); 3230 3231 /* disable VLVF and clear remaining bit from pool */ 3232 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); 3233 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0); 3234 3235 return 0; 3236 } 3237 3238 /* If there are still bits set in the VLVFB registers 3239 * for the VLAN ID indicated we need to see if the 3240 * caller is requesting that we clear the VFTA entry bit. 3241 * If the caller has requested that we clear the VFTA 3242 * entry bit but there are still pools/VFs using this VLAN 3243 * ID entry then ignore the request. We're not worried 3244 * about the case where we're turning the VFTA VLAN ID 3245 * entry bit on, only when requested to turn it off as 3246 * there may be multiple pools and/or VFs using the 3247 * VLAN ID entry. In that case we cannot clear the 3248 * VFTA bit until all pools/VFs using that VLAN ID have also 3249 * been cleared. This will be indicated by "bits" being 3250 * zero. 3251 */ 3252 vfta_delta = 0; 3253 3254 vlvf_update: 3255 /* record pool change and enable VLAN ID if not already enabled */ 3256 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits); 3257 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan); 3258 3259 vfta_update: 3260 /* Update VFTA now that we are ready for traffic */ 3261 if (vfta_delta) 3262 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); 3263 3264 return 0; 3265 } 3266 3267 /** 3268 * ixgbe_clear_vfta_generic - Clear VLAN filter table 3269 * @hw: pointer to hardware structure 3270 * 3271 * Clears the VLAN filer table, and the VMDq index associated with the filter 3272 **/ 3273 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) 3274 { 3275 u32 offset; 3276 3277 for (offset = 0; offset < hw->mac.vft_size; offset++) 3278 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 3279 3280 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { 3281 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); 3282 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0); 3283 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0); 3284 } 3285 3286 return 0; 3287 } 3288 3289 /** 3290 * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix 3291 * @hw: pointer to hardware structure 3292 * 3293 * Contains the logic to identify if we need to verify link for the 3294 * crosstalk fix 3295 **/ 3296 static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw) 3297 { 3298 /* Does FW say we need the fix */ 3299 if (!hw->need_crosstalk_fix) 3300 return false; 3301 3302 /* Only consider SFP+ PHYs i.e. media type fiber */ 3303 switch (hw->mac.ops.get_media_type(hw)) { 3304 case ixgbe_media_type_fiber: 3305 case ixgbe_media_type_fiber_qsfp: 3306 break; 3307 default: 3308 return false; 3309 } 3310 3311 return true; 3312 } 3313 3314 /** 3315 * ixgbe_check_mac_link_generic - Determine link and speed status 3316 * @hw: pointer to hardware structure 3317 * @speed: pointer to link speed 3318 * @link_up: true when link is up 3319 * @link_up_wait_to_complete: bool used to wait for link up or not 3320 * 3321 * Reads the links register to determine if link is up and the current speed 3322 **/ 3323 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 3324 bool *link_up, bool link_up_wait_to_complete) 3325 { 3326 u32 links_reg, links_orig; 3327 u32 i; 3328 3329 /* If Crosstalk fix enabled do the sanity check of making sure 3330 * the SFP+ cage is full. 3331 */ 3332 if (ixgbe_need_crosstalk_fix(hw)) { 3333 u32 sfp_cage_full; 3334 3335 switch (hw->mac.type) { 3336 case ixgbe_mac_82599EB: 3337 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3338 IXGBE_ESDP_SDP2; 3339 break; 3340 case ixgbe_mac_X550EM_x: 3341 case ixgbe_mac_x550em_a: 3342 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3343 IXGBE_ESDP_SDP0; 3344 break; 3345 default: 3346 /* sanity check - No SFP+ devices here */ 3347 sfp_cage_full = false; 3348 break; 3349 } 3350 3351 if (!sfp_cage_full) { 3352 *link_up = false; 3353 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3354 return 0; 3355 } 3356 } 3357 3358 /* clear the old state */ 3359 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); 3360 3361 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 3362 3363 if (links_orig != links_reg) { 3364 hw_dbg(hw, "LINKS changed from %08X to %08X\n", 3365 links_orig, links_reg); 3366 } 3367 3368 if (link_up_wait_to_complete) { 3369 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 3370 if (links_reg & IXGBE_LINKS_UP) { 3371 *link_up = true; 3372 break; 3373 } else { 3374 *link_up = false; 3375 } 3376 msleep(100); 3377 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 3378 } 3379 } else { 3380 if (links_reg & IXGBE_LINKS_UP) 3381 *link_up = true; 3382 else 3383 *link_up = false; 3384 } 3385 3386 switch (links_reg & IXGBE_LINKS_SPEED_82599) { 3387 case IXGBE_LINKS_SPEED_10G_82599: 3388 if ((hw->mac.type >= ixgbe_mac_X550) && 3389 (links_reg & IXGBE_LINKS_SPEED_NON_STD)) 3390 *speed = IXGBE_LINK_SPEED_2_5GB_FULL; 3391 else 3392 *speed = IXGBE_LINK_SPEED_10GB_FULL; 3393 break; 3394 case IXGBE_LINKS_SPEED_1G_82599: 3395 *speed = IXGBE_LINK_SPEED_1GB_FULL; 3396 break; 3397 case IXGBE_LINKS_SPEED_100_82599: 3398 if ((hw->mac.type >= ixgbe_mac_X550) && 3399 (links_reg & IXGBE_LINKS_SPEED_NON_STD)) 3400 *speed = IXGBE_LINK_SPEED_5GB_FULL; 3401 else 3402 *speed = IXGBE_LINK_SPEED_100_FULL; 3403 break; 3404 case IXGBE_LINKS_SPEED_10_X550EM_A: 3405 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3406 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 3407 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { 3408 *speed = IXGBE_LINK_SPEED_10_FULL; 3409 } 3410 break; 3411 default: 3412 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3413 } 3414 3415 return 0; 3416 } 3417 3418 /** 3419 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from 3420 * the EEPROM 3421 * @hw: pointer to hardware structure 3422 * @wwnn_prefix: the alternative WWNN prefix 3423 * @wwpn_prefix: the alternative WWPN prefix 3424 * 3425 * This function will read the EEPROM from the alternative SAN MAC address 3426 * block to check the support for the alternative WWNN/WWPN prefix support. 3427 **/ 3428 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 3429 u16 *wwpn_prefix) 3430 { 3431 u16 offset, caps; 3432 u16 alt_san_mac_blk_offset; 3433 3434 /* clear output first */ 3435 *wwnn_prefix = 0xFFFF; 3436 *wwpn_prefix = 0xFFFF; 3437 3438 /* check if alternative SAN MAC is supported */ 3439 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR; 3440 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset)) 3441 goto wwn_prefix_err; 3442 3443 if ((alt_san_mac_blk_offset == 0) || 3444 (alt_san_mac_blk_offset == 0xFFFF)) 3445 return 0; 3446 3447 /* check capability in alternative san mac address block */ 3448 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; 3449 if (hw->eeprom.ops.read(hw, offset, &caps)) 3450 goto wwn_prefix_err; 3451 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) 3452 return 0; 3453 3454 /* get the corresponding prefix for WWNN/WWPN */ 3455 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; 3456 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) 3457 hw_err(hw, "eeprom read at offset %d failed\n", offset); 3458 3459 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; 3460 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix)) 3461 goto wwn_prefix_err; 3462 3463 return 0; 3464 3465 wwn_prefix_err: 3466 hw_err(hw, "eeprom read at offset %d failed\n", offset); 3467 return 0; 3468 } 3469 3470 /** 3471 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing 3472 * @hw: pointer to hardware structure 3473 * @enable: enable or disable switch for MAC anti-spoofing 3474 * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing 3475 * 3476 **/ 3477 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) 3478 { 3479 int vf_target_reg = vf >> 3; 3480 int vf_target_shift = vf % 8; 3481 u32 pfvfspoof; 3482 3483 if (hw->mac.type == ixgbe_mac_82598EB) 3484 return; 3485 3486 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 3487 if (enable) 3488 pfvfspoof |= BIT(vf_target_shift); 3489 else 3490 pfvfspoof &= ~BIT(vf_target_shift); 3491 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); 3492 } 3493 3494 /** 3495 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing 3496 * @hw: pointer to hardware structure 3497 * @enable: enable or disable switch for VLAN anti-spoofing 3498 * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing 3499 * 3500 **/ 3501 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) 3502 { 3503 int vf_target_reg = vf >> 3; 3504 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; 3505 u32 pfvfspoof; 3506 3507 if (hw->mac.type == ixgbe_mac_82598EB) 3508 return; 3509 3510 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 3511 if (enable) 3512 pfvfspoof |= BIT(vf_target_shift); 3513 else 3514 pfvfspoof &= ~BIT(vf_target_shift); 3515 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); 3516 } 3517 3518 /** 3519 * ixgbe_get_device_caps_generic - Get additional device capabilities 3520 * @hw: pointer to hardware structure 3521 * @device_caps: the EEPROM word with the extra device capabilities 3522 * 3523 * This function will read the EEPROM location for the device capabilities, 3524 * and return the word through device_caps. 3525 **/ 3526 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) 3527 { 3528 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); 3529 3530 return 0; 3531 } 3532 3533 /** 3534 * ixgbe_set_rxpba_generic - Initialize RX packet buffer 3535 * @hw: pointer to hardware structure 3536 * @num_pb: number of packet buffers to allocate 3537 * @headroom: reserve n KB of headroom 3538 * @strategy: packet buffer allocation strategy 3539 **/ 3540 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, 3541 int num_pb, 3542 u32 headroom, 3543 int strategy) 3544 { 3545 u32 pbsize = hw->mac.rx_pb_size; 3546 int i = 0; 3547 u32 rxpktsize, txpktsize, txpbthresh; 3548 3549 /* Reserve headroom */ 3550 pbsize -= headroom; 3551 3552 if (!num_pb) 3553 num_pb = 1; 3554 3555 /* Divide remaining packet buffer space amongst the number 3556 * of packet buffers requested using supplied strategy. 3557 */ 3558 switch (strategy) { 3559 case (PBA_STRATEGY_WEIGHTED): 3560 /* pba_80_48 strategy weight first half of packet buffer with 3561 * 5/8 of the packet buffer space. 3562 */ 3563 rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8)); 3564 pbsize -= rxpktsize * (num_pb / 2); 3565 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; 3566 for (; i < (num_pb / 2); i++) 3567 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 3568 /* fall through - configure remaining packet buffers */ 3569 case (PBA_STRATEGY_EQUAL): 3570 /* Divide the remaining Rx packet buffer evenly among the TCs */ 3571 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; 3572 for (; i < num_pb; i++) 3573 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 3574 break; 3575 default: 3576 break; 3577 } 3578 3579 /* 3580 * Setup Tx packet buffer and threshold equally for all TCs 3581 * TXPBTHRESH register is set in K so divide by 1024 and subtract 3582 * 10 since the largest packet we support is just over 9K. 3583 */ 3584 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; 3585 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; 3586 for (i = 0; i < num_pb; i++) { 3587 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); 3588 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); 3589 } 3590 3591 /* Clear unused TCs, if any, to zero buffer size*/ 3592 for (; i < IXGBE_MAX_PB; i++) { 3593 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); 3594 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); 3595 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); 3596 } 3597 } 3598 3599 /** 3600 * ixgbe_calculate_checksum - Calculate checksum for buffer 3601 * @buffer: pointer to EEPROM 3602 * @length: size of EEPROM to calculate a checksum for 3603 * 3604 * Calculates the checksum for some buffer on a specified length. The 3605 * checksum calculated is returned. 3606 **/ 3607 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) 3608 { 3609 u32 i; 3610 u8 sum = 0; 3611 3612 if (!buffer) 3613 return 0; 3614 3615 for (i = 0; i < length; i++) 3616 sum += buffer[i]; 3617 3618 return (u8) (0 - sum); 3619 } 3620 3621 /** 3622 * ixgbe_hic_unlocked - Issue command to manageability block unlocked 3623 * @hw: pointer to the HW structure 3624 * @buffer: command to write and where the return status will be placed 3625 * @length: length of buffer, must be multiple of 4 bytes 3626 * @timeout: time in ms to wait for command completion 3627 * 3628 * Communicates with the manageability block. On success return 0 3629 * else returns semaphore error when encountering an error acquiring 3630 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. 3631 * 3632 * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held 3633 * by the caller. 3634 **/ 3635 s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, 3636 u32 timeout) 3637 { 3638 u32 hicr, i, fwsts; 3639 u16 dword_len; 3640 3641 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { 3642 hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); 3643 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 3644 } 3645 3646 /* Set bit 9 of FWSTS clearing FW reset indication */ 3647 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); 3648 IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI); 3649 3650 /* Check that the host interface is enabled. */ 3651 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 3652 if (!(hicr & IXGBE_HICR_EN)) { 3653 hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); 3654 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 3655 } 3656 3657 /* Calculate length in DWORDs. We must be DWORD aligned */ 3658 if (length % sizeof(u32)) { 3659 hw_dbg(hw, "Buffer length failure, not aligned to dword"); 3660 return IXGBE_ERR_INVALID_ARGUMENT; 3661 } 3662 3663 dword_len = length >> 2; 3664 3665 /* The device driver writes the relevant command block 3666 * into the ram area. 3667 */ 3668 for (i = 0; i < dword_len; i++) 3669 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, 3670 i, cpu_to_le32(buffer[i])); 3671 3672 /* Setting this bit tells the ARC that a new command is pending. */ 3673 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); 3674 3675 for (i = 0; i < timeout; i++) { 3676 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 3677 if (!(hicr & IXGBE_HICR_C)) 3678 break; 3679 usleep_range(1000, 2000); 3680 } 3681 3682 /* Check command successful completion. */ 3683 if ((timeout && i == timeout) || 3684 !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) 3685 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 3686 3687 return 0; 3688 } 3689 3690 /** 3691 * ixgbe_host_interface_command - Issue command to manageability block 3692 * @hw: pointer to the HW structure 3693 * @buffer: contains the command to write and where the return status will 3694 * be placed 3695 * @length: length of buffer, must be multiple of 4 bytes 3696 * @timeout: time in ms to wait for command completion 3697 * @return_data: read and return data from the buffer (true) or not (false) 3698 * Needed because FW structures are big endian and decoding of 3699 * these fields can be 8 bit or 16 bit based on command. Decoding 3700 * is not easily understood without making a table of commands. 3701 * So we will leave this up to the caller to read back the data 3702 * in these cases. 3703 * 3704 * Communicates with the manageability block. On success return 0 3705 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. 3706 **/ 3707 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, 3708 u32 length, u32 timeout, 3709 bool return_data) 3710 { 3711 u32 hdr_size = sizeof(struct ixgbe_hic_hdr); 3712 union { 3713 struct ixgbe_hic_hdr hdr; 3714 u32 u32arr[1]; 3715 } *bp = buffer; 3716 u16 buf_len, dword_len; 3717 s32 status; 3718 u32 bi; 3719 3720 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { 3721 hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); 3722 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 3723 } 3724 /* Take management host interface semaphore */ 3725 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); 3726 if (status) 3727 return status; 3728 3729 status = ixgbe_hic_unlocked(hw, buffer, length, timeout); 3730 if (status) 3731 goto rel_out; 3732 3733 if (!return_data) 3734 goto rel_out; 3735 3736 /* Calculate length in DWORDs */ 3737 dword_len = hdr_size >> 2; 3738 3739 /* first pull in the header so we know the buffer length */ 3740 for (bi = 0; bi < dword_len; bi++) { 3741 bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 3742 le32_to_cpus(&bp->u32arr[bi]); 3743 } 3744 3745 /* If there is any thing in data position pull it in */ 3746 buf_len = bp->hdr.buf_len; 3747 if (!buf_len) 3748 goto rel_out; 3749 3750 if (length < round_up(buf_len, 4) + hdr_size) { 3751 hw_dbg(hw, "Buffer not large enough for reply message.\n"); 3752 status = IXGBE_ERR_HOST_INTERFACE_COMMAND; 3753 goto rel_out; 3754 } 3755 3756 /* Calculate length in DWORDs, add 3 for odd lengths */ 3757 dword_len = (buf_len + 3) >> 2; 3758 3759 /* Pull in the rest of the buffer (bi is where we left off) */ 3760 for (; bi <= dword_len; bi++) { 3761 bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 3762 le32_to_cpus(&bp->u32arr[bi]); 3763 } 3764 3765 rel_out: 3766 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); 3767 3768 return status; 3769 } 3770 3771 /** 3772 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware 3773 * @hw: pointer to the HW structure 3774 * @maj: driver version major number 3775 * @min: driver version minor number 3776 * @build: driver version build number 3777 * @sub: driver version sub build number 3778 * @len: length of driver_ver string 3779 * @driver_ver: driver string 3780 * 3781 * Sends driver version number to firmware through the manageability 3782 * block. On success return 0 3783 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring 3784 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. 3785 **/ 3786 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, 3787 u8 build, u8 sub, __always_unused u16 len, 3788 __always_unused const char *driver_ver) 3789 { 3790 struct ixgbe_hic_drv_info fw_cmd; 3791 int i; 3792 s32 ret_val; 3793 3794 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; 3795 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; 3796 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; 3797 fw_cmd.port_num = hw->bus.func; 3798 fw_cmd.ver_maj = maj; 3799 fw_cmd.ver_min = min; 3800 fw_cmd.ver_build = build; 3801 fw_cmd.ver_sub = sub; 3802 fw_cmd.hdr.checksum = 0; 3803 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, 3804 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); 3805 fw_cmd.pad = 0; 3806 fw_cmd.pad2 = 0; 3807 3808 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { 3809 ret_val = ixgbe_host_interface_command(hw, &fw_cmd, 3810 sizeof(fw_cmd), 3811 IXGBE_HI_COMMAND_TIMEOUT, 3812 true); 3813 if (ret_val != 0) 3814 continue; 3815 3816 if (fw_cmd.hdr.cmd_or_resp.ret_status == 3817 FW_CEM_RESP_STATUS_SUCCESS) 3818 ret_val = 0; 3819 else 3820 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; 3821 3822 break; 3823 } 3824 3825 return ret_val; 3826 } 3827 3828 /** 3829 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo 3830 * @hw: pointer to the hardware structure 3831 * 3832 * The 82599 and x540 MACs can experience issues if TX work is still pending 3833 * when a reset occurs. This function prevents this by flushing the PCIe 3834 * buffers on the system. 3835 **/ 3836 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) 3837 { 3838 u32 gcr_ext, hlreg0, i, poll; 3839 u16 value; 3840 3841 /* 3842 * If double reset is not requested then all transactions should 3843 * already be clear and as such there is no work to do 3844 */ 3845 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED)) 3846 return; 3847 3848 /* 3849 * Set loopback enable to prevent any transmits from being sent 3850 * should the link come up. This assumes that the RXCTRL.RXEN bit 3851 * has already been cleared. 3852 */ 3853 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 3854 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); 3855 3856 /* wait for a last completion before clearing buffers */ 3857 IXGBE_WRITE_FLUSH(hw); 3858 usleep_range(3000, 6000); 3859 3860 /* Before proceeding, make sure that the PCIe block does not have 3861 * transactions pending. 3862 */ 3863 poll = ixgbe_pcie_timeout_poll(hw); 3864 for (i = 0; i < poll; i++) { 3865 usleep_range(100, 200); 3866 value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); 3867 if (ixgbe_removed(hw->hw_addr)) 3868 break; 3869 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 3870 break; 3871 } 3872 3873 /* initiate cleaning flow for buffers in the PCIe transaction layer */ 3874 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); 3875 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 3876 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR); 3877 3878 /* Flush all writes and allow 20usec for all transactions to clear */ 3879 IXGBE_WRITE_FLUSH(hw); 3880 udelay(20); 3881 3882 /* restore previous register values */ 3883 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 3884 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 3885 } 3886 3887 static const u8 ixgbe_emc_temp_data[4] = { 3888 IXGBE_EMC_INTERNAL_DATA, 3889 IXGBE_EMC_DIODE1_DATA, 3890 IXGBE_EMC_DIODE2_DATA, 3891 IXGBE_EMC_DIODE3_DATA 3892 }; 3893 static const u8 ixgbe_emc_therm_limit[4] = { 3894 IXGBE_EMC_INTERNAL_THERM_LIMIT, 3895 IXGBE_EMC_DIODE1_THERM_LIMIT, 3896 IXGBE_EMC_DIODE2_THERM_LIMIT, 3897 IXGBE_EMC_DIODE3_THERM_LIMIT 3898 }; 3899 3900 /** 3901 * ixgbe_get_ets_data - Extracts the ETS bit data 3902 * @hw: pointer to hardware structure 3903 * @ets_cfg: extected ETS data 3904 * @ets_offset: offset of ETS data 3905 * 3906 * Returns error code. 3907 **/ 3908 static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg, 3909 u16 *ets_offset) 3910 { 3911 s32 status; 3912 3913 status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset); 3914 if (status) 3915 return status; 3916 3917 if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) 3918 return IXGBE_NOT_IMPLEMENTED; 3919 3920 status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg); 3921 if (status) 3922 return status; 3923 3924 if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) 3925 return IXGBE_NOT_IMPLEMENTED; 3926 3927 return 0; 3928 } 3929 3930 /** 3931 * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data 3932 * @hw: pointer to hardware structure 3933 * 3934 * Returns the thermal sensor data structure 3935 **/ 3936 s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) 3937 { 3938 s32 status; 3939 u16 ets_offset; 3940 u16 ets_cfg; 3941 u16 ets_sensor; 3942 u8 num_sensors; 3943 u8 i; 3944 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 3945 3946 /* Only support thermal sensors attached to physical port 0 */ 3947 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) 3948 return IXGBE_NOT_IMPLEMENTED; 3949 3950 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); 3951 if (status) 3952 return status; 3953 3954 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); 3955 if (num_sensors > IXGBE_MAX_SENSORS) 3956 num_sensors = IXGBE_MAX_SENSORS; 3957 3958 for (i = 0; i < num_sensors; i++) { 3959 u8 sensor_index; 3960 u8 sensor_location; 3961 3962 status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i), 3963 &ets_sensor); 3964 if (status) 3965 return status; 3966 3967 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> 3968 IXGBE_ETS_DATA_INDEX_SHIFT); 3969 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> 3970 IXGBE_ETS_DATA_LOC_SHIFT); 3971 3972 if (sensor_location != 0) { 3973 status = hw->phy.ops.read_i2c_byte(hw, 3974 ixgbe_emc_temp_data[sensor_index], 3975 IXGBE_I2C_THERMAL_SENSOR_ADDR, 3976 &data->sensor[i].temp); 3977 if (status) 3978 return status; 3979 } 3980 } 3981 3982 return 0; 3983 } 3984 3985 /** 3986 * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds 3987 * @hw: pointer to hardware structure 3988 * 3989 * Inits the thermal sensor thresholds according to the NVM map 3990 * and save off the threshold and location values into mac.thermal_sensor_data 3991 **/ 3992 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) 3993 { 3994 s32 status; 3995 u16 ets_offset; 3996 u16 ets_cfg; 3997 u16 ets_sensor; 3998 u8 low_thresh_delta; 3999 u8 num_sensors; 4000 u8 therm_limit; 4001 u8 i; 4002 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 4003 4004 memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data)); 4005 4006 /* Only support thermal sensors attached to physical port 0 */ 4007 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) 4008 return IXGBE_NOT_IMPLEMENTED; 4009 4010 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); 4011 if (status) 4012 return status; 4013 4014 low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >> 4015 IXGBE_ETS_LTHRES_DELTA_SHIFT); 4016 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); 4017 if (num_sensors > IXGBE_MAX_SENSORS) 4018 num_sensors = IXGBE_MAX_SENSORS; 4019 4020 for (i = 0; i < num_sensors; i++) { 4021 u8 sensor_index; 4022 u8 sensor_location; 4023 4024 if (hw->eeprom.ops.read(hw, ets_offset + 1 + i, &ets_sensor)) { 4025 hw_err(hw, "eeprom read at offset %d failed\n", 4026 ets_offset + 1 + i); 4027 continue; 4028 } 4029 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> 4030 IXGBE_ETS_DATA_INDEX_SHIFT); 4031 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> 4032 IXGBE_ETS_DATA_LOC_SHIFT); 4033 therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK; 4034 4035 hw->phy.ops.write_i2c_byte(hw, 4036 ixgbe_emc_therm_limit[sensor_index], 4037 IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit); 4038 4039 if (sensor_location == 0) 4040 continue; 4041 4042 data->sensor[i].location = sensor_location; 4043 data->sensor[i].caution_thresh = therm_limit; 4044 data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta; 4045 } 4046 4047 return 0; 4048 } 4049 4050 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) 4051 { 4052 u32 rxctrl; 4053 4054 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4055 if (rxctrl & IXGBE_RXCTRL_RXEN) { 4056 if (hw->mac.type != ixgbe_mac_82598EB) { 4057 u32 pfdtxgswc; 4058 4059 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); 4060 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { 4061 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; 4062 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); 4063 hw->mac.set_lben = true; 4064 } else { 4065 hw->mac.set_lben = false; 4066 } 4067 } 4068 rxctrl &= ~IXGBE_RXCTRL_RXEN; 4069 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); 4070 } 4071 } 4072 4073 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) 4074 { 4075 u32 rxctrl; 4076 4077 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4078 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN)); 4079 4080 if (hw->mac.type != ixgbe_mac_82598EB) { 4081 if (hw->mac.set_lben) { 4082 u32 pfdtxgswc; 4083 4084 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); 4085 pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN; 4086 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); 4087 hw->mac.set_lben = false; 4088 } 4089 } 4090 } 4091 4092 /** ixgbe_mng_present - returns true when management capability is present 4093 * @hw: pointer to hardware structure 4094 **/ 4095 bool ixgbe_mng_present(struct ixgbe_hw *hw) 4096 { 4097 u32 fwsm; 4098 4099 if (hw->mac.type < ixgbe_mac_82599EB) 4100 return false; 4101 4102 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); 4103 fwsm &= IXGBE_FWSM_MODE_MASK; 4104 return fwsm == IXGBE_FWSM_FW_MODE_PT; 4105 } 4106 4107 /** 4108 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed 4109 * @hw: pointer to hardware structure 4110 * @speed: new link speed 4111 * @autoneg_wait_to_complete: true when waiting for completion is needed 4112 * 4113 * Set the link speed in the MAC and/or PHY register and restarts link. 4114 */ 4115 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 4116 ixgbe_link_speed speed, 4117 bool autoneg_wait_to_complete) 4118 { 4119 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 4120 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; 4121 s32 status = 0; 4122 u32 speedcnt = 0; 4123 u32 i = 0; 4124 bool autoneg, link_up = false; 4125 4126 /* Mask off requested but non-supported speeds */ 4127 status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg); 4128 if (status) 4129 return status; 4130 4131 speed &= link_speed; 4132 4133 /* Try each speed one by one, highest priority first. We do this in 4134 * software because 10Gb fiber doesn't support speed autonegotiation. 4135 */ 4136 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { 4137 speedcnt++; 4138 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; 4139 4140 /* Set the module link speed */ 4141 switch (hw->phy.media_type) { 4142 case ixgbe_media_type_fiber: 4143 hw->mac.ops.set_rate_select_speed(hw, 4144 IXGBE_LINK_SPEED_10GB_FULL); 4145 break; 4146 case ixgbe_media_type_fiber_qsfp: 4147 /* QSFP module automatically detects MAC link speed */ 4148 break; 4149 default: 4150 hw_dbg(hw, "Unexpected media type\n"); 4151 break; 4152 } 4153 4154 /* Allow module to change analog characteristics (1G->10G) */ 4155 msleep(40); 4156 4157 status = hw->mac.ops.setup_mac_link(hw, 4158 IXGBE_LINK_SPEED_10GB_FULL, 4159 autoneg_wait_to_complete); 4160 if (status) 4161 return status; 4162 4163 /* Flap the Tx laser if it has not already been done */ 4164 if (hw->mac.ops.flap_tx_laser) 4165 hw->mac.ops.flap_tx_laser(hw); 4166 4167 /* Wait for the controller to acquire link. Per IEEE 802.3ap, 4168 * Section 73.10.2, we may have to wait up to 500ms if KR is 4169 * attempted. 82599 uses the same timing for 10g SFI. 4170 */ 4171 for (i = 0; i < 5; i++) { 4172 /* Wait for the link partner to also set speed */ 4173 msleep(100); 4174 4175 /* If we have link, just jump out */ 4176 status = hw->mac.ops.check_link(hw, &link_speed, 4177 &link_up, false); 4178 if (status) 4179 return status; 4180 4181 if (link_up) 4182 goto out; 4183 } 4184 } 4185 4186 if (speed & IXGBE_LINK_SPEED_1GB_FULL) { 4187 speedcnt++; 4188 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) 4189 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; 4190 4191 /* Set the module link speed */ 4192 switch (hw->phy.media_type) { 4193 case ixgbe_media_type_fiber: 4194 hw->mac.ops.set_rate_select_speed(hw, 4195 IXGBE_LINK_SPEED_1GB_FULL); 4196 break; 4197 case ixgbe_media_type_fiber_qsfp: 4198 /* QSFP module automatically detects link speed */ 4199 break; 4200 default: 4201 hw_dbg(hw, "Unexpected media type\n"); 4202 break; 4203 } 4204 4205 /* Allow module to change analog characteristics (10G->1G) */ 4206 msleep(40); 4207 4208 status = hw->mac.ops.setup_mac_link(hw, 4209 IXGBE_LINK_SPEED_1GB_FULL, 4210 autoneg_wait_to_complete); 4211 if (status) 4212 return status; 4213 4214 /* Flap the Tx laser if it has not already been done */ 4215 if (hw->mac.ops.flap_tx_laser) 4216 hw->mac.ops.flap_tx_laser(hw); 4217 4218 /* Wait for the link partner to also set speed */ 4219 msleep(100); 4220 4221 /* If we have link, just jump out */ 4222 status = hw->mac.ops.check_link(hw, &link_speed, &link_up, 4223 false); 4224 if (status) 4225 return status; 4226 4227 if (link_up) 4228 goto out; 4229 } 4230 4231 /* We didn't get link. Configure back to the highest speed we tried, 4232 * (if there was more than one). We call ourselves back with just the 4233 * single highest speed that the user requested. 4234 */ 4235 if (speedcnt > 1) 4236 status = ixgbe_setup_mac_link_multispeed_fiber(hw, 4237 highest_link_speed, 4238 autoneg_wait_to_complete); 4239 4240 out: 4241 /* Set autoneg_advertised value based on input link speed */ 4242 hw->phy.autoneg_advertised = 0; 4243 4244 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 4245 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 4246 4247 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 4248 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 4249 4250 return status; 4251 } 4252 4253 /** 4254 * ixgbe_set_soft_rate_select_speed - Set module link speed 4255 * @hw: pointer to hardware structure 4256 * @speed: link speed to set 4257 * 4258 * Set module link speed via the soft rate select. 4259 */ 4260 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, 4261 ixgbe_link_speed speed) 4262 { 4263 s32 status; 4264 u8 rs, eeprom_data; 4265 4266 switch (speed) { 4267 case IXGBE_LINK_SPEED_10GB_FULL: 4268 /* one bit mask same as setting on */ 4269 rs = IXGBE_SFF_SOFT_RS_SELECT_10G; 4270 break; 4271 case IXGBE_LINK_SPEED_1GB_FULL: 4272 rs = IXGBE_SFF_SOFT_RS_SELECT_1G; 4273 break; 4274 default: 4275 hw_dbg(hw, "Invalid fixed module speed\n"); 4276 return; 4277 } 4278 4279 /* Set RS0 */ 4280 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, 4281 IXGBE_I2C_EEPROM_DEV_ADDR2, 4282 &eeprom_data); 4283 if (status) { 4284 hw_dbg(hw, "Failed to read Rx Rate Select RS0\n"); 4285 return; 4286 } 4287 4288 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; 4289 4290 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, 4291 IXGBE_I2C_EEPROM_DEV_ADDR2, 4292 eeprom_data); 4293 if (status) { 4294 hw_dbg(hw, "Failed to write Rx Rate Select RS0\n"); 4295 return; 4296 } 4297 4298 /* Set RS1 */ 4299 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, 4300 IXGBE_I2C_EEPROM_DEV_ADDR2, 4301 &eeprom_data); 4302 if (status) { 4303 hw_dbg(hw, "Failed to read Rx Rate Select RS1\n"); 4304 return; 4305 } 4306 4307 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; 4308 4309 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, 4310 IXGBE_I2C_EEPROM_DEV_ADDR2, 4311 eeprom_data); 4312 if (status) { 4313 hw_dbg(hw, "Failed to write Rx Rate Select RS1\n"); 4314 return; 4315 } 4316 } 4317