1 // SPDX-License-Identifier: GPL-2.0 2 /******************************************************************************* 3 4 Intel 10 Gigabit PCI Express Linux driver 5 Copyright(c) 1999 - 2016 Intel Corporation. 6 7 This program is free software; you can redistribute it and/or modify it 8 under the terms and conditions of the GNU General Public License, 9 version 2, as published by the Free Software Foundation. 10 11 This program is distributed in the hope it will be useful, but WITHOUT 12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 more details. 15 16 You should have received a copy of the GNU General Public License along with 17 this program; if not, write to the Free Software Foundation, Inc., 18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 19 20 The full GNU General Public License is included in this distribution in 21 the file called "COPYING". 22 23 Contact Information: 24 Linux NICS <linux.nics@intel.com> 25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 27 28 *******************************************************************************/ 29 30 #include <linux/pci.h> 31 #include <linux/delay.h> 32 #include <linux/sched.h> 33 #include <linux/netdevice.h> 34 35 #include "ixgbe.h" 36 #include "ixgbe_common.h" 37 #include "ixgbe_phy.h" 38 39 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); 40 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); 41 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); 42 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); 43 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); 44 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 45 u16 count); 46 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); 47 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 48 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 49 static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 50 51 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 52 static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); 53 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 54 u16 words, u16 *data); 55 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 56 u16 words, u16 *data); 57 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, 58 u16 offset); 59 static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); 60 61 /* Base table for registers values that change by MAC */ 62 const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = { 63 IXGBE_MVALS_INIT(8259X) 64 }; 65 66 /** 67 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow 68 * control 69 * @hw: pointer to hardware structure 70 * 71 * There are several phys that do not support autoneg flow control. This 72 * function check the device id to see if the associated phy supports 73 * autoneg flow control. 74 **/ 75 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) 76 { 77 bool supported = false; 78 ixgbe_link_speed speed; 79 bool link_up; 80 81 switch (hw->phy.media_type) { 82 case ixgbe_media_type_fiber: 83 /* flow control autoneg black list */ 84 switch (hw->device_id) { 85 case IXGBE_DEV_ID_X550EM_A_SFP: 86 case IXGBE_DEV_ID_X550EM_A_SFP_N: 87 supported = false; 88 break; 89 default: 90 hw->mac.ops.check_link(hw, &speed, &link_up, false); 91 /* if link is down, assume supported */ 92 if (link_up) 93 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? 94 true : false; 95 else 96 supported = true; 97 } 98 99 break; 100 case ixgbe_media_type_backplane: 101 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI) 102 supported = false; 103 else 104 supported = true; 105 break; 106 case ixgbe_media_type_copper: 107 /* only some copper devices support flow control autoneg */ 108 switch (hw->device_id) { 109 case IXGBE_DEV_ID_82599_T3_LOM: 110 case IXGBE_DEV_ID_X540T: 111 case IXGBE_DEV_ID_X540T1: 112 case IXGBE_DEV_ID_X550T: 113 case IXGBE_DEV_ID_X550T1: 114 case IXGBE_DEV_ID_X550EM_X_10G_T: 115 case IXGBE_DEV_ID_X550EM_A_10G_T: 116 case IXGBE_DEV_ID_X550EM_A_1G_T: 117 case IXGBE_DEV_ID_X550EM_A_1G_T_L: 118 supported = true; 119 break; 120 default: 121 break; 122 } 123 default: 124 break; 125 } 126 127 if (!supported) 128 hw_dbg(hw, "Device %x does not support flow control autoneg\n", 129 hw->device_id); 130 131 return supported; 132 } 133 134 /** 135 * ixgbe_setup_fc_generic - Set up flow control 136 * @hw: pointer to hardware structure 137 * 138 * Called at init time to set up flow control. 139 **/ 140 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) 141 { 142 s32 ret_val = 0; 143 u32 reg = 0, reg_bp = 0; 144 u16 reg_cu = 0; 145 bool locked = false; 146 147 /* 148 * Validate the requested mode. Strict IEEE mode does not allow 149 * ixgbe_fc_rx_pause because it will cause us to fail at UNH. 150 */ 151 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { 152 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); 153 return IXGBE_ERR_INVALID_LINK_SETTINGS; 154 } 155 156 /* 157 * 10gig parts do not have a word in the EEPROM to determine the 158 * default flow control setting, so we explicitly set it to full. 159 */ 160 if (hw->fc.requested_mode == ixgbe_fc_default) 161 hw->fc.requested_mode = ixgbe_fc_full; 162 163 /* 164 * Set up the 1G and 10G flow control advertisement registers so the 165 * HW will be able to do fc autoneg once the cable is plugged in. If 166 * we link at 10G, the 1G advertisement is harmless and vice versa. 167 */ 168 switch (hw->phy.media_type) { 169 case ixgbe_media_type_backplane: 170 /* some MAC's need RMW protection on AUTOC */ 171 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp); 172 if (ret_val) 173 return ret_val; 174 175 /* fall through - only backplane uses autoc */ 176 case ixgbe_media_type_fiber: 177 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 178 179 break; 180 case ixgbe_media_type_copper: 181 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, 182 MDIO_MMD_AN, ®_cu); 183 break; 184 default: 185 break; 186 } 187 188 /* 189 * The possible values of fc.requested_mode are: 190 * 0: Flow control is completely disabled 191 * 1: Rx flow control is enabled (we can receive pause frames, 192 * but not send pause frames). 193 * 2: Tx flow control is enabled (we can send pause frames but 194 * we do not support receiving pause frames). 195 * 3: Both Rx and Tx flow control (symmetric) are enabled. 196 * other: Invalid. 197 */ 198 switch (hw->fc.requested_mode) { 199 case ixgbe_fc_none: 200 /* Flow control completely disabled by software override. */ 201 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 202 if (hw->phy.media_type == ixgbe_media_type_backplane) 203 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | 204 IXGBE_AUTOC_ASM_PAUSE); 205 else if (hw->phy.media_type == ixgbe_media_type_copper) 206 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); 207 break; 208 case ixgbe_fc_tx_pause: 209 /* 210 * Tx Flow control is enabled, and Rx Flow control is 211 * disabled by software override. 212 */ 213 reg |= IXGBE_PCS1GANA_ASM_PAUSE; 214 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE; 215 if (hw->phy.media_type == ixgbe_media_type_backplane) { 216 reg_bp |= IXGBE_AUTOC_ASM_PAUSE; 217 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE; 218 } else if (hw->phy.media_type == ixgbe_media_type_copper) { 219 reg_cu |= IXGBE_TAF_ASM_PAUSE; 220 reg_cu &= ~IXGBE_TAF_SYM_PAUSE; 221 } 222 break; 223 case ixgbe_fc_rx_pause: 224 /* 225 * Rx Flow control is enabled and Tx Flow control is 226 * disabled by software override. Since there really 227 * isn't a way to advertise that we are capable of RX 228 * Pause ONLY, we will advertise that we support both 229 * symmetric and asymmetric Rx PAUSE, as such we fall 230 * through to the fc_full statement. Later, we will 231 * disable the adapter's ability to send PAUSE frames. 232 */ 233 case ixgbe_fc_full: 234 /* Flow control (both Rx and Tx) is enabled by SW override. */ 235 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE; 236 if (hw->phy.media_type == ixgbe_media_type_backplane) 237 reg_bp |= IXGBE_AUTOC_SYM_PAUSE | 238 IXGBE_AUTOC_ASM_PAUSE; 239 else if (hw->phy.media_type == ixgbe_media_type_copper) 240 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE; 241 break; 242 default: 243 hw_dbg(hw, "Flow control param set incorrectly\n"); 244 return IXGBE_ERR_CONFIG; 245 } 246 247 if (hw->mac.type != ixgbe_mac_X540) { 248 /* 249 * Enable auto-negotiation between the MAC & PHY; 250 * the MAC will advertise clause 37 flow control. 251 */ 252 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 253 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 254 255 /* Disable AN timeout */ 256 if (hw->fc.strict_ieee) 257 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 258 259 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 260 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); 261 } 262 263 /* 264 * AUTOC restart handles negotiation of 1G and 10G on backplane 265 * and copper. There is no need to set the PCS1GCTL register. 266 * 267 */ 268 if (hw->phy.media_type == ixgbe_media_type_backplane) { 269 /* Need the SW/FW semaphore around AUTOC writes if 82599 and 270 * LESM is on, likewise reset_pipeline requries the lock as 271 * it also writes AUTOC. 272 */ 273 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked); 274 if (ret_val) 275 return ret_val; 276 277 } else if ((hw->phy.media_type == ixgbe_media_type_copper) && 278 ixgbe_device_supports_autoneg_fc(hw)) { 279 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, 280 MDIO_MMD_AN, reg_cu); 281 } 282 283 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); 284 return ret_val; 285 } 286 287 /** 288 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 289 * @hw: pointer to hardware structure 290 * 291 * Starts the hardware by filling the bus info structure and media type, clears 292 * all on chip counters, initializes receive address registers, multicast 293 * table, VLAN filter table, calls routine to set up link and flow control 294 * settings, and leaves transmit and receive units disabled and uninitialized 295 **/ 296 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) 297 { 298 s32 ret_val; 299 u32 ctrl_ext; 300 u16 device_caps; 301 302 /* Set the media type */ 303 hw->phy.media_type = hw->mac.ops.get_media_type(hw); 304 305 /* Identify the PHY */ 306 hw->phy.ops.identify(hw); 307 308 /* Clear the VLAN filter table */ 309 hw->mac.ops.clear_vfta(hw); 310 311 /* Clear statistics registers */ 312 hw->mac.ops.clear_hw_cntrs(hw); 313 314 /* Set No Snoop Disable */ 315 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 316 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; 317 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 318 IXGBE_WRITE_FLUSH(hw); 319 320 /* Setup flow control if method for doing so */ 321 if (hw->mac.ops.setup_fc) { 322 ret_val = hw->mac.ops.setup_fc(hw); 323 if (ret_val) 324 return ret_val; 325 } 326 327 /* Cashe bit indicating need for crosstalk fix */ 328 switch (hw->mac.type) { 329 case ixgbe_mac_82599EB: 330 case ixgbe_mac_X550EM_x: 331 case ixgbe_mac_x550em_a: 332 hw->mac.ops.get_device_caps(hw, &device_caps); 333 if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) 334 hw->need_crosstalk_fix = false; 335 else 336 hw->need_crosstalk_fix = true; 337 break; 338 default: 339 hw->need_crosstalk_fix = false; 340 break; 341 } 342 343 /* Clear adapter stopped flag */ 344 hw->adapter_stopped = false; 345 346 return 0; 347 } 348 349 /** 350 * ixgbe_start_hw_gen2 - Init sequence for common device family 351 * @hw: pointer to hw structure 352 * 353 * Performs the init sequence common to the second generation 354 * of 10 GbE devices. 355 * Devices in the second generation: 356 * 82599 357 * X540 358 **/ 359 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) 360 { 361 u32 i; 362 363 /* Clear the rate limiters */ 364 for (i = 0; i < hw->mac.max_tx_queues; i++) { 365 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); 366 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); 367 } 368 IXGBE_WRITE_FLUSH(hw); 369 370 return 0; 371 } 372 373 /** 374 * ixgbe_init_hw_generic - Generic hardware initialization 375 * @hw: pointer to hardware structure 376 * 377 * Initialize the hardware by resetting the hardware, filling the bus info 378 * structure and media type, clears all on chip counters, initializes receive 379 * address registers, multicast table, VLAN filter table, calls routine to set 380 * up link and flow control settings, and leaves transmit and receive units 381 * disabled and uninitialized 382 **/ 383 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) 384 { 385 s32 status; 386 387 /* Reset the hardware */ 388 status = hw->mac.ops.reset_hw(hw); 389 390 if (status == 0) { 391 /* Start the HW */ 392 status = hw->mac.ops.start_hw(hw); 393 } 394 395 /* Initialize the LED link active for LED blink support */ 396 if (hw->mac.ops.init_led_link_act) 397 hw->mac.ops.init_led_link_act(hw); 398 399 return status; 400 } 401 402 /** 403 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters 404 * @hw: pointer to hardware structure 405 * 406 * Clears all hardware statistics counters by reading them from the hardware 407 * Statistics counters are clear on read. 408 **/ 409 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) 410 { 411 u16 i = 0; 412 413 IXGBE_READ_REG(hw, IXGBE_CRCERRS); 414 IXGBE_READ_REG(hw, IXGBE_ILLERRC); 415 IXGBE_READ_REG(hw, IXGBE_ERRBC); 416 IXGBE_READ_REG(hw, IXGBE_MSPDC); 417 for (i = 0; i < 8; i++) 418 IXGBE_READ_REG(hw, IXGBE_MPC(i)); 419 420 IXGBE_READ_REG(hw, IXGBE_MLFC); 421 IXGBE_READ_REG(hw, IXGBE_MRFC); 422 IXGBE_READ_REG(hw, IXGBE_RLEC); 423 IXGBE_READ_REG(hw, IXGBE_LXONTXC); 424 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 425 if (hw->mac.type >= ixgbe_mac_82599EB) { 426 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 427 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 428 } else { 429 IXGBE_READ_REG(hw, IXGBE_LXONRXC); 430 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 431 } 432 433 for (i = 0; i < 8; i++) { 434 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 435 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 436 if (hw->mac.type >= ixgbe_mac_82599EB) { 437 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 438 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 439 } else { 440 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 441 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 442 } 443 } 444 if (hw->mac.type >= ixgbe_mac_82599EB) 445 for (i = 0; i < 8; i++) 446 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 447 IXGBE_READ_REG(hw, IXGBE_PRC64); 448 IXGBE_READ_REG(hw, IXGBE_PRC127); 449 IXGBE_READ_REG(hw, IXGBE_PRC255); 450 IXGBE_READ_REG(hw, IXGBE_PRC511); 451 IXGBE_READ_REG(hw, IXGBE_PRC1023); 452 IXGBE_READ_REG(hw, IXGBE_PRC1522); 453 IXGBE_READ_REG(hw, IXGBE_GPRC); 454 IXGBE_READ_REG(hw, IXGBE_BPRC); 455 IXGBE_READ_REG(hw, IXGBE_MPRC); 456 IXGBE_READ_REG(hw, IXGBE_GPTC); 457 IXGBE_READ_REG(hw, IXGBE_GORCL); 458 IXGBE_READ_REG(hw, IXGBE_GORCH); 459 IXGBE_READ_REG(hw, IXGBE_GOTCL); 460 IXGBE_READ_REG(hw, IXGBE_GOTCH); 461 if (hw->mac.type == ixgbe_mac_82598EB) 462 for (i = 0; i < 8; i++) 463 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 464 IXGBE_READ_REG(hw, IXGBE_RUC); 465 IXGBE_READ_REG(hw, IXGBE_RFC); 466 IXGBE_READ_REG(hw, IXGBE_ROC); 467 IXGBE_READ_REG(hw, IXGBE_RJC); 468 IXGBE_READ_REG(hw, IXGBE_MNGPRC); 469 IXGBE_READ_REG(hw, IXGBE_MNGPDC); 470 IXGBE_READ_REG(hw, IXGBE_MNGPTC); 471 IXGBE_READ_REG(hw, IXGBE_TORL); 472 IXGBE_READ_REG(hw, IXGBE_TORH); 473 IXGBE_READ_REG(hw, IXGBE_TPR); 474 IXGBE_READ_REG(hw, IXGBE_TPT); 475 IXGBE_READ_REG(hw, IXGBE_PTC64); 476 IXGBE_READ_REG(hw, IXGBE_PTC127); 477 IXGBE_READ_REG(hw, IXGBE_PTC255); 478 IXGBE_READ_REG(hw, IXGBE_PTC511); 479 IXGBE_READ_REG(hw, IXGBE_PTC1023); 480 IXGBE_READ_REG(hw, IXGBE_PTC1522); 481 IXGBE_READ_REG(hw, IXGBE_MPTC); 482 IXGBE_READ_REG(hw, IXGBE_BPTC); 483 for (i = 0; i < 16; i++) { 484 IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 485 IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 486 if (hw->mac.type >= ixgbe_mac_82599EB) { 487 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 488 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); 489 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 490 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); 491 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 492 } else { 493 IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 494 IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 495 } 496 } 497 498 if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { 499 if (hw->phy.id == 0) 500 hw->phy.ops.identify(hw); 501 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i); 502 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i); 503 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i); 504 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i); 505 } 506 507 return 0; 508 } 509 510 /** 511 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM 512 * @hw: pointer to hardware structure 513 * @pba_num: stores the part number string from the EEPROM 514 * @pba_num_size: part number string buffer length 515 * 516 * Reads the part number string from the EEPROM. 517 **/ 518 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 519 u32 pba_num_size) 520 { 521 s32 ret_val; 522 u16 data; 523 u16 pba_ptr; 524 u16 offset; 525 u16 length; 526 527 if (pba_num == NULL) { 528 hw_dbg(hw, "PBA string buffer was null\n"); 529 return IXGBE_ERR_INVALID_ARGUMENT; 530 } 531 532 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 533 if (ret_val) { 534 hw_dbg(hw, "NVM Read Error\n"); 535 return ret_val; 536 } 537 538 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); 539 if (ret_val) { 540 hw_dbg(hw, "NVM Read Error\n"); 541 return ret_val; 542 } 543 544 /* 545 * if data is not ptr guard the PBA must be in legacy format which 546 * means pba_ptr is actually our second data word for the PBA number 547 * and we can decode it into an ascii string 548 */ 549 if (data != IXGBE_PBANUM_PTR_GUARD) { 550 hw_dbg(hw, "NVM PBA number is not stored as string\n"); 551 552 /* we will need 11 characters to store the PBA */ 553 if (pba_num_size < 11) { 554 hw_dbg(hw, "PBA string buffer too small\n"); 555 return IXGBE_ERR_NO_SPACE; 556 } 557 558 /* extract hex string from data and pba_ptr */ 559 pba_num[0] = (data >> 12) & 0xF; 560 pba_num[1] = (data >> 8) & 0xF; 561 pba_num[2] = (data >> 4) & 0xF; 562 pba_num[3] = data & 0xF; 563 pba_num[4] = (pba_ptr >> 12) & 0xF; 564 pba_num[5] = (pba_ptr >> 8) & 0xF; 565 pba_num[6] = '-'; 566 pba_num[7] = 0; 567 pba_num[8] = (pba_ptr >> 4) & 0xF; 568 pba_num[9] = pba_ptr & 0xF; 569 570 /* put a null character on the end of our string */ 571 pba_num[10] = '\0'; 572 573 /* switch all the data but the '-' to hex char */ 574 for (offset = 0; offset < 10; offset++) { 575 if (pba_num[offset] < 0xA) 576 pba_num[offset] += '0'; 577 else if (pba_num[offset] < 0x10) 578 pba_num[offset] += 'A' - 0xA; 579 } 580 581 return 0; 582 } 583 584 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); 585 if (ret_val) { 586 hw_dbg(hw, "NVM Read Error\n"); 587 return ret_val; 588 } 589 590 if (length == 0xFFFF || length == 0) { 591 hw_dbg(hw, "NVM PBA number section invalid length\n"); 592 return IXGBE_ERR_PBA_SECTION; 593 } 594 595 /* check if pba_num buffer is big enough */ 596 if (pba_num_size < (((u32)length * 2) - 1)) { 597 hw_dbg(hw, "PBA string buffer too small\n"); 598 return IXGBE_ERR_NO_SPACE; 599 } 600 601 /* trim pba length from start of string */ 602 pba_ptr++; 603 length--; 604 605 for (offset = 0; offset < length; offset++) { 606 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); 607 if (ret_val) { 608 hw_dbg(hw, "NVM Read Error\n"); 609 return ret_val; 610 } 611 pba_num[offset * 2] = (u8)(data >> 8); 612 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); 613 } 614 pba_num[offset * 2] = '\0'; 615 616 return 0; 617 } 618 619 /** 620 * ixgbe_get_mac_addr_generic - Generic get MAC address 621 * @hw: pointer to hardware structure 622 * @mac_addr: Adapter MAC address 623 * 624 * Reads the adapter's MAC address from first Receive Address Register (RAR0) 625 * A reset of the adapter must be performed prior to calling this function 626 * in order for the MAC address to have been loaded from the EEPROM into RAR0 627 **/ 628 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) 629 { 630 u32 rar_high; 631 u32 rar_low; 632 u16 i; 633 634 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); 635 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); 636 637 for (i = 0; i < 4; i++) 638 mac_addr[i] = (u8)(rar_low >> (i*8)); 639 640 for (i = 0; i < 2; i++) 641 mac_addr[i+4] = (u8)(rar_high >> (i*8)); 642 643 return 0; 644 } 645 646 enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status) 647 { 648 switch (link_status & IXGBE_PCI_LINK_WIDTH) { 649 case IXGBE_PCI_LINK_WIDTH_1: 650 return ixgbe_bus_width_pcie_x1; 651 case IXGBE_PCI_LINK_WIDTH_2: 652 return ixgbe_bus_width_pcie_x2; 653 case IXGBE_PCI_LINK_WIDTH_4: 654 return ixgbe_bus_width_pcie_x4; 655 case IXGBE_PCI_LINK_WIDTH_8: 656 return ixgbe_bus_width_pcie_x8; 657 default: 658 return ixgbe_bus_width_unknown; 659 } 660 } 661 662 enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status) 663 { 664 switch (link_status & IXGBE_PCI_LINK_SPEED) { 665 case IXGBE_PCI_LINK_SPEED_2500: 666 return ixgbe_bus_speed_2500; 667 case IXGBE_PCI_LINK_SPEED_5000: 668 return ixgbe_bus_speed_5000; 669 case IXGBE_PCI_LINK_SPEED_8000: 670 return ixgbe_bus_speed_8000; 671 default: 672 return ixgbe_bus_speed_unknown; 673 } 674 } 675 676 /** 677 * ixgbe_get_bus_info_generic - Generic set PCI bus info 678 * @hw: pointer to hardware structure 679 * 680 * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure 681 **/ 682 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) 683 { 684 u16 link_status; 685 686 hw->bus.type = ixgbe_bus_type_pci_express; 687 688 /* Get the negotiated link width and speed from PCI config space */ 689 link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS); 690 691 hw->bus.width = ixgbe_convert_bus_width(link_status); 692 hw->bus.speed = ixgbe_convert_bus_speed(link_status); 693 694 hw->mac.ops.set_lan_id(hw); 695 696 return 0; 697 } 698 699 /** 700 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices 701 * @hw: pointer to the HW structure 702 * 703 * Determines the LAN function id by reading memory-mapped registers 704 * and swaps the port value if requested. 705 **/ 706 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) 707 { 708 struct ixgbe_bus_info *bus = &hw->bus; 709 u16 ee_ctrl_4; 710 u32 reg; 711 712 reg = IXGBE_READ_REG(hw, IXGBE_STATUS); 713 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; 714 bus->lan_id = bus->func; 715 716 /* check for a port swap */ 717 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw)); 718 if (reg & IXGBE_FACTPS_LFS) 719 bus->func ^= 0x1; 720 721 /* Get MAC instance from EEPROM for configuring CS4227 */ 722 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) { 723 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4); 724 bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >> 725 IXGBE_EE_CTRL_4_INST_ID_SHIFT; 726 } 727 } 728 729 /** 730 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units 731 * @hw: pointer to hardware structure 732 * 733 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, 734 * disables transmit and receive units. The adapter_stopped flag is used by 735 * the shared code and drivers to determine if the adapter is in a stopped 736 * state and should not touch the hardware. 737 **/ 738 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) 739 { 740 u32 reg_val; 741 u16 i; 742 743 /* 744 * Set the adapter_stopped flag so other driver functions stop touching 745 * the hardware 746 */ 747 hw->adapter_stopped = true; 748 749 /* Disable the receive unit */ 750 hw->mac.ops.disable_rx(hw); 751 752 /* Clear interrupt mask to stop interrupts from being generated */ 753 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 754 755 /* Clear any pending interrupts, flush previous writes */ 756 IXGBE_READ_REG(hw, IXGBE_EICR); 757 758 /* Disable the transmit unit. Each queue must be disabled. */ 759 for (i = 0; i < hw->mac.max_tx_queues; i++) 760 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH); 761 762 /* Disable the receive unit by stopping each queue */ 763 for (i = 0; i < hw->mac.max_rx_queues; i++) { 764 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 765 reg_val &= ~IXGBE_RXDCTL_ENABLE; 766 reg_val |= IXGBE_RXDCTL_SWFLSH; 767 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); 768 } 769 770 /* flush all queues disables */ 771 IXGBE_WRITE_FLUSH(hw); 772 usleep_range(1000, 2000); 773 774 /* 775 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 776 * access and verify no pending requests 777 */ 778 return ixgbe_disable_pcie_master(hw); 779 } 780 781 /** 782 * ixgbe_init_led_link_act_generic - Store the LED index link/activity. 783 * @hw: pointer to hardware structure 784 * 785 * Store the index for the link active LED. This will be used to support 786 * blinking the LED. 787 **/ 788 s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) 789 { 790 struct ixgbe_mac_info *mac = &hw->mac; 791 u32 led_reg, led_mode; 792 u16 i; 793 794 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 795 796 /* Get LED link active from the LEDCTL register */ 797 for (i = 0; i < 4; i++) { 798 led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i); 799 800 if ((led_mode & IXGBE_LED_MODE_MASK_BASE) == 801 IXGBE_LED_LINK_ACTIVE) { 802 mac->led_link_act = i; 803 return 0; 804 } 805 } 806 807 /* If LEDCTL register does not have the LED link active set, then use 808 * known MAC defaults. 809 */ 810 switch (hw->mac.type) { 811 case ixgbe_mac_x550em_a: 812 mac->led_link_act = 0; 813 break; 814 case ixgbe_mac_X550EM_x: 815 mac->led_link_act = 1; 816 break; 817 default: 818 mac->led_link_act = 2; 819 } 820 821 return 0; 822 } 823 824 /** 825 * ixgbe_led_on_generic - Turns on the software controllable LEDs. 826 * @hw: pointer to hardware structure 827 * @index: led number to turn on 828 **/ 829 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) 830 { 831 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 832 833 if (index > 3) 834 return IXGBE_ERR_PARAM; 835 836 /* To turn on the LED, set mode to ON. */ 837 led_reg &= ~IXGBE_LED_MODE_MASK(index); 838 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); 839 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 840 IXGBE_WRITE_FLUSH(hw); 841 842 return 0; 843 } 844 845 /** 846 * ixgbe_led_off_generic - Turns off the software controllable LEDs. 847 * @hw: pointer to hardware structure 848 * @index: led number to turn off 849 **/ 850 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) 851 { 852 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 853 854 if (index > 3) 855 return IXGBE_ERR_PARAM; 856 857 /* To turn off the LED, set mode to OFF. */ 858 led_reg &= ~IXGBE_LED_MODE_MASK(index); 859 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); 860 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 861 IXGBE_WRITE_FLUSH(hw); 862 863 return 0; 864 } 865 866 /** 867 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params 868 * @hw: pointer to hardware structure 869 * 870 * Initializes the EEPROM parameters ixgbe_eeprom_info within the 871 * ixgbe_hw struct in order to set up EEPROM access. 872 **/ 873 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) 874 { 875 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 876 u32 eec; 877 u16 eeprom_size; 878 879 if (eeprom->type == ixgbe_eeprom_uninitialized) { 880 eeprom->type = ixgbe_eeprom_none; 881 /* Set default semaphore delay to 10ms which is a well 882 * tested value */ 883 eeprom->semaphore_delay = 10; 884 /* Clear EEPROM page size, it will be initialized as needed */ 885 eeprom->word_page_size = 0; 886 887 /* 888 * Check for EEPROM present first. 889 * If not present leave as none 890 */ 891 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 892 if (eec & IXGBE_EEC_PRES) { 893 eeprom->type = ixgbe_eeprom_spi; 894 895 /* 896 * SPI EEPROM is assumed here. This code would need to 897 * change if a future EEPROM is not SPI. 898 */ 899 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> 900 IXGBE_EEC_SIZE_SHIFT); 901 eeprom->word_size = BIT(eeprom_size + 902 IXGBE_EEPROM_WORD_SIZE_SHIFT); 903 } 904 905 if (eec & IXGBE_EEC_ADDR_SIZE) 906 eeprom->address_bits = 16; 907 else 908 eeprom->address_bits = 8; 909 hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n", 910 eeprom->type, eeprom->word_size, eeprom->address_bits); 911 } 912 913 return 0; 914 } 915 916 /** 917 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang 918 * @hw: pointer to hardware structure 919 * @offset: offset within the EEPROM to write 920 * @words: number of words 921 * @data: 16 bit word(s) to write to EEPROM 922 * 923 * Reads 16 bit word(s) from EEPROM through bit-bang method 924 **/ 925 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 926 u16 words, u16 *data) 927 { 928 s32 status; 929 u16 i, count; 930 931 hw->eeprom.ops.init_params(hw); 932 933 if (words == 0) 934 return IXGBE_ERR_INVALID_ARGUMENT; 935 936 if (offset + words > hw->eeprom.word_size) 937 return IXGBE_ERR_EEPROM; 938 939 /* 940 * The EEPROM page size cannot be queried from the chip. We do lazy 941 * initialization. It is worth to do that when we write large buffer. 942 */ 943 if ((hw->eeprom.word_page_size == 0) && 944 (words > IXGBE_EEPROM_PAGE_SIZE_MAX)) 945 ixgbe_detect_eeprom_page_size_generic(hw, offset); 946 947 /* 948 * We cannot hold synchronization semaphores for too long 949 * to avoid other entity starvation. However it is more efficient 950 * to read in bursts than synchronizing access for each word. 951 */ 952 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { 953 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? 954 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); 955 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, 956 count, &data[i]); 957 958 if (status != 0) 959 break; 960 } 961 962 return status; 963 } 964 965 /** 966 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM 967 * @hw: pointer to hardware structure 968 * @offset: offset within the EEPROM to be written to 969 * @words: number of word(s) 970 * @data: 16 bit word(s) to be written to the EEPROM 971 * 972 * If ixgbe_eeprom_update_checksum is not called after this function, the 973 * EEPROM will most likely contain an invalid checksum. 974 **/ 975 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 976 u16 words, u16 *data) 977 { 978 s32 status; 979 u16 word; 980 u16 page_size; 981 u16 i; 982 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; 983 984 /* Prepare the EEPROM for writing */ 985 status = ixgbe_acquire_eeprom(hw); 986 if (status) 987 return status; 988 989 if (ixgbe_ready_eeprom(hw) != 0) { 990 ixgbe_release_eeprom(hw); 991 return IXGBE_ERR_EEPROM; 992 } 993 994 for (i = 0; i < words; i++) { 995 ixgbe_standby_eeprom(hw); 996 997 /* Send the WRITE ENABLE command (8 bit opcode) */ 998 ixgbe_shift_out_eeprom_bits(hw, 999 IXGBE_EEPROM_WREN_OPCODE_SPI, 1000 IXGBE_EEPROM_OPCODE_BITS); 1001 1002 ixgbe_standby_eeprom(hw); 1003 1004 /* Some SPI eeproms use the 8th address bit embedded 1005 * in the opcode 1006 */ 1007 if ((hw->eeprom.address_bits == 8) && 1008 ((offset + i) >= 128)) 1009 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 1010 1011 /* Send the Write command (8-bit opcode + addr) */ 1012 ixgbe_shift_out_eeprom_bits(hw, write_opcode, 1013 IXGBE_EEPROM_OPCODE_BITS); 1014 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), 1015 hw->eeprom.address_bits); 1016 1017 page_size = hw->eeprom.word_page_size; 1018 1019 /* Send the data in burst via SPI */ 1020 do { 1021 word = data[i]; 1022 word = (word >> 8) | (word << 8); 1023 ixgbe_shift_out_eeprom_bits(hw, word, 16); 1024 1025 if (page_size == 0) 1026 break; 1027 1028 /* do not wrap around page */ 1029 if (((offset + i) & (page_size - 1)) == 1030 (page_size - 1)) 1031 break; 1032 } while (++i < words); 1033 1034 ixgbe_standby_eeprom(hw); 1035 usleep_range(10000, 20000); 1036 } 1037 /* Done with writing - release the EEPROM */ 1038 ixgbe_release_eeprom(hw); 1039 1040 return 0; 1041 } 1042 1043 /** 1044 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM 1045 * @hw: pointer to hardware structure 1046 * @offset: offset within the EEPROM to be written to 1047 * @data: 16 bit word to be written to the EEPROM 1048 * 1049 * If ixgbe_eeprom_update_checksum is not called after this function, the 1050 * EEPROM will most likely contain an invalid checksum. 1051 **/ 1052 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 1053 { 1054 hw->eeprom.ops.init_params(hw); 1055 1056 if (offset >= hw->eeprom.word_size) 1057 return IXGBE_ERR_EEPROM; 1058 1059 return ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); 1060 } 1061 1062 /** 1063 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang 1064 * @hw: pointer to hardware structure 1065 * @offset: offset within the EEPROM to be read 1066 * @words: number of word(s) 1067 * @data: read 16 bit words(s) from EEPROM 1068 * 1069 * Reads 16 bit word(s) from EEPROM through bit-bang method 1070 **/ 1071 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1072 u16 words, u16 *data) 1073 { 1074 s32 status; 1075 u16 i, count; 1076 1077 hw->eeprom.ops.init_params(hw); 1078 1079 if (words == 0) 1080 return IXGBE_ERR_INVALID_ARGUMENT; 1081 1082 if (offset + words > hw->eeprom.word_size) 1083 return IXGBE_ERR_EEPROM; 1084 1085 /* 1086 * We cannot hold synchronization semaphores for too long 1087 * to avoid other entity starvation. However it is more efficient 1088 * to read in bursts than synchronizing access for each word. 1089 */ 1090 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { 1091 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? 1092 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); 1093 1094 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, 1095 count, &data[i]); 1096 1097 if (status) 1098 return status; 1099 } 1100 1101 return 0; 1102 } 1103 1104 /** 1105 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang 1106 * @hw: pointer to hardware structure 1107 * @offset: offset within the EEPROM to be read 1108 * @words: number of word(s) 1109 * @data: read 16 bit word(s) from EEPROM 1110 * 1111 * Reads 16 bit word(s) from EEPROM through bit-bang method 1112 **/ 1113 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 1114 u16 words, u16 *data) 1115 { 1116 s32 status; 1117 u16 word_in; 1118 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; 1119 u16 i; 1120 1121 /* Prepare the EEPROM for reading */ 1122 status = ixgbe_acquire_eeprom(hw); 1123 if (status) 1124 return status; 1125 1126 if (ixgbe_ready_eeprom(hw) != 0) { 1127 ixgbe_release_eeprom(hw); 1128 return IXGBE_ERR_EEPROM; 1129 } 1130 1131 for (i = 0; i < words; i++) { 1132 ixgbe_standby_eeprom(hw); 1133 /* Some SPI eeproms use the 8th address bit embedded 1134 * in the opcode 1135 */ 1136 if ((hw->eeprom.address_bits == 8) && 1137 ((offset + i) >= 128)) 1138 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 1139 1140 /* Send the READ command (opcode + addr) */ 1141 ixgbe_shift_out_eeprom_bits(hw, read_opcode, 1142 IXGBE_EEPROM_OPCODE_BITS); 1143 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), 1144 hw->eeprom.address_bits); 1145 1146 /* Read the data. */ 1147 word_in = ixgbe_shift_in_eeprom_bits(hw, 16); 1148 data[i] = (word_in >> 8) | (word_in << 8); 1149 } 1150 1151 /* End this read operation */ 1152 ixgbe_release_eeprom(hw); 1153 1154 return 0; 1155 } 1156 1157 /** 1158 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang 1159 * @hw: pointer to hardware structure 1160 * @offset: offset within the EEPROM to be read 1161 * @data: read 16 bit value from EEPROM 1162 * 1163 * Reads 16 bit value from EEPROM through bit-bang method 1164 **/ 1165 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1166 u16 *data) 1167 { 1168 hw->eeprom.ops.init_params(hw); 1169 1170 if (offset >= hw->eeprom.word_size) 1171 return IXGBE_ERR_EEPROM; 1172 1173 return ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); 1174 } 1175 1176 /** 1177 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD 1178 * @hw: pointer to hardware structure 1179 * @offset: offset of word in the EEPROM to read 1180 * @words: number of word(s) 1181 * @data: 16 bit word(s) from the EEPROM 1182 * 1183 * Reads a 16 bit word(s) from the EEPROM using the EERD register. 1184 **/ 1185 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, 1186 u16 words, u16 *data) 1187 { 1188 u32 eerd; 1189 s32 status; 1190 u32 i; 1191 1192 hw->eeprom.ops.init_params(hw); 1193 1194 if (words == 0) 1195 return IXGBE_ERR_INVALID_ARGUMENT; 1196 1197 if (offset >= hw->eeprom.word_size) 1198 return IXGBE_ERR_EEPROM; 1199 1200 for (i = 0; i < words; i++) { 1201 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1202 IXGBE_EEPROM_RW_REG_START; 1203 1204 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); 1205 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); 1206 1207 if (status == 0) { 1208 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> 1209 IXGBE_EEPROM_RW_REG_DATA); 1210 } else { 1211 hw_dbg(hw, "Eeprom read timed out\n"); 1212 return status; 1213 } 1214 } 1215 1216 return 0; 1217 } 1218 1219 /** 1220 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size 1221 * @hw: pointer to hardware structure 1222 * @offset: offset within the EEPROM to be used as a scratch pad 1223 * 1224 * Discover EEPROM page size by writing marching data at given offset. 1225 * This function is called only when we are writing a new large buffer 1226 * at given offset so the data would be overwritten anyway. 1227 **/ 1228 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, 1229 u16 offset) 1230 { 1231 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; 1232 s32 status; 1233 u16 i; 1234 1235 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) 1236 data[i] = i; 1237 1238 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX; 1239 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1240 IXGBE_EEPROM_PAGE_SIZE_MAX, data); 1241 hw->eeprom.word_page_size = 0; 1242 if (status) 1243 return status; 1244 1245 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); 1246 if (status) 1247 return status; 1248 1249 /* 1250 * When writing in burst more than the actual page size 1251 * EEPROM address wraps around current page. 1252 */ 1253 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; 1254 1255 hw_dbg(hw, "Detected EEPROM page size = %d words.\n", 1256 hw->eeprom.word_page_size); 1257 return 0; 1258 } 1259 1260 /** 1261 * ixgbe_read_eerd_generic - Read EEPROM word using EERD 1262 * @hw: pointer to hardware structure 1263 * @offset: offset of word in the EEPROM to read 1264 * @data: word read from the EEPROM 1265 * 1266 * Reads a 16 bit word from the EEPROM using the EERD register. 1267 **/ 1268 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) 1269 { 1270 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); 1271 } 1272 1273 /** 1274 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR 1275 * @hw: pointer to hardware structure 1276 * @offset: offset of word in the EEPROM to write 1277 * @words: number of words 1278 * @data: word(s) write to the EEPROM 1279 * 1280 * Write a 16 bit word(s) to the EEPROM using the EEWR register. 1281 **/ 1282 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, 1283 u16 words, u16 *data) 1284 { 1285 u32 eewr; 1286 s32 status; 1287 u16 i; 1288 1289 hw->eeprom.ops.init_params(hw); 1290 1291 if (words == 0) 1292 return IXGBE_ERR_INVALID_ARGUMENT; 1293 1294 if (offset >= hw->eeprom.word_size) 1295 return IXGBE_ERR_EEPROM; 1296 1297 for (i = 0; i < words; i++) { 1298 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1299 (data[i] << IXGBE_EEPROM_RW_REG_DATA) | 1300 IXGBE_EEPROM_RW_REG_START; 1301 1302 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1303 if (status) { 1304 hw_dbg(hw, "Eeprom write EEWR timed out\n"); 1305 return status; 1306 } 1307 1308 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); 1309 1310 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1311 if (status) { 1312 hw_dbg(hw, "Eeprom write EEWR timed out\n"); 1313 return status; 1314 } 1315 } 1316 1317 return 0; 1318 } 1319 1320 /** 1321 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR 1322 * @hw: pointer to hardware structure 1323 * @offset: offset of word in the EEPROM to write 1324 * @data: word write to the EEPROM 1325 * 1326 * Write a 16 bit word to the EEPROM using the EEWR register. 1327 **/ 1328 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 1329 { 1330 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); 1331 } 1332 1333 /** 1334 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status 1335 * @hw: pointer to hardware structure 1336 * @ee_reg: EEPROM flag for polling 1337 * 1338 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the 1339 * read or write is done respectively. 1340 **/ 1341 static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) 1342 { 1343 u32 i; 1344 u32 reg; 1345 1346 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { 1347 if (ee_reg == IXGBE_NVM_POLL_READ) 1348 reg = IXGBE_READ_REG(hw, IXGBE_EERD); 1349 else 1350 reg = IXGBE_READ_REG(hw, IXGBE_EEWR); 1351 1352 if (reg & IXGBE_EEPROM_RW_REG_DONE) { 1353 return 0; 1354 } 1355 udelay(5); 1356 } 1357 return IXGBE_ERR_EEPROM; 1358 } 1359 1360 /** 1361 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang 1362 * @hw: pointer to hardware structure 1363 * 1364 * Prepares EEPROM for access using bit-bang method. This function should 1365 * be called before issuing a command to the EEPROM. 1366 **/ 1367 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) 1368 { 1369 u32 eec; 1370 u32 i; 1371 1372 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) 1373 return IXGBE_ERR_SWFW_SYNC; 1374 1375 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1376 1377 /* Request EEPROM Access */ 1378 eec |= IXGBE_EEC_REQ; 1379 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1380 1381 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { 1382 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1383 if (eec & IXGBE_EEC_GNT) 1384 break; 1385 udelay(5); 1386 } 1387 1388 /* Release if grant not acquired */ 1389 if (!(eec & IXGBE_EEC_GNT)) { 1390 eec &= ~IXGBE_EEC_REQ; 1391 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1392 hw_dbg(hw, "Could not acquire EEPROM grant\n"); 1393 1394 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1395 return IXGBE_ERR_EEPROM; 1396 } 1397 1398 /* Setup EEPROM for Read/Write */ 1399 /* Clear CS and SK */ 1400 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); 1401 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1402 IXGBE_WRITE_FLUSH(hw); 1403 udelay(1); 1404 return 0; 1405 } 1406 1407 /** 1408 * ixgbe_get_eeprom_semaphore - Get hardware semaphore 1409 * @hw: pointer to hardware structure 1410 * 1411 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method 1412 **/ 1413 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) 1414 { 1415 u32 timeout = 2000; 1416 u32 i; 1417 u32 swsm; 1418 1419 /* Get SMBI software semaphore between device drivers first */ 1420 for (i = 0; i < timeout; i++) { 1421 /* 1422 * If the SMBI bit is 0 when we read it, then the bit will be 1423 * set and we have the semaphore 1424 */ 1425 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1426 if (!(swsm & IXGBE_SWSM_SMBI)) 1427 break; 1428 usleep_range(50, 100); 1429 } 1430 1431 if (i == timeout) { 1432 hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n"); 1433 /* this release is particularly important because our attempts 1434 * above to get the semaphore may have succeeded, and if there 1435 * was a timeout, we should unconditionally clear the semaphore 1436 * bits to free the driver to make progress 1437 */ 1438 ixgbe_release_eeprom_semaphore(hw); 1439 1440 usleep_range(50, 100); 1441 /* one last try 1442 * If the SMBI bit is 0 when we read it, then the bit will be 1443 * set and we have the semaphore 1444 */ 1445 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1446 if (swsm & IXGBE_SWSM_SMBI) { 1447 hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n"); 1448 return IXGBE_ERR_EEPROM; 1449 } 1450 } 1451 1452 /* Now get the semaphore between SW/FW through the SWESMBI bit */ 1453 for (i = 0; i < timeout; i++) { 1454 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1455 1456 /* Set the SW EEPROM semaphore bit to request access */ 1457 swsm |= IXGBE_SWSM_SWESMBI; 1458 IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); 1459 1460 /* If we set the bit successfully then we got the 1461 * semaphore. 1462 */ 1463 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1464 if (swsm & IXGBE_SWSM_SWESMBI) 1465 break; 1466 1467 usleep_range(50, 100); 1468 } 1469 1470 /* Release semaphores and return error if SW EEPROM semaphore 1471 * was not granted because we don't have access to the EEPROM 1472 */ 1473 if (i >= timeout) { 1474 hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n"); 1475 ixgbe_release_eeprom_semaphore(hw); 1476 return IXGBE_ERR_EEPROM; 1477 } 1478 1479 return 0; 1480 } 1481 1482 /** 1483 * ixgbe_release_eeprom_semaphore - Release hardware semaphore 1484 * @hw: pointer to hardware structure 1485 * 1486 * This function clears hardware semaphore bits. 1487 **/ 1488 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) 1489 { 1490 u32 swsm; 1491 1492 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1493 1494 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ 1495 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); 1496 IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); 1497 IXGBE_WRITE_FLUSH(hw); 1498 } 1499 1500 /** 1501 * ixgbe_ready_eeprom - Polls for EEPROM ready 1502 * @hw: pointer to hardware structure 1503 **/ 1504 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) 1505 { 1506 u16 i; 1507 u8 spi_stat_reg; 1508 1509 /* 1510 * Read "Status Register" repeatedly until the LSB is cleared. The 1511 * EEPROM will signal that the command has been completed by clearing 1512 * bit 0 of the internal status register. If it's not cleared within 1513 * 5 milliseconds, then error out. 1514 */ 1515 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { 1516 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, 1517 IXGBE_EEPROM_OPCODE_BITS); 1518 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); 1519 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) 1520 break; 1521 1522 udelay(5); 1523 ixgbe_standby_eeprom(hw); 1524 } 1525 1526 /* 1527 * On some parts, SPI write time could vary from 0-20mSec on 3.3V 1528 * devices (and only 0-5mSec on 5V devices) 1529 */ 1530 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { 1531 hw_dbg(hw, "SPI EEPROM Status error\n"); 1532 return IXGBE_ERR_EEPROM; 1533 } 1534 1535 return 0; 1536 } 1537 1538 /** 1539 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state 1540 * @hw: pointer to hardware structure 1541 **/ 1542 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) 1543 { 1544 u32 eec; 1545 1546 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1547 1548 /* Toggle CS to flush commands */ 1549 eec |= IXGBE_EEC_CS; 1550 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1551 IXGBE_WRITE_FLUSH(hw); 1552 udelay(1); 1553 eec &= ~IXGBE_EEC_CS; 1554 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1555 IXGBE_WRITE_FLUSH(hw); 1556 udelay(1); 1557 } 1558 1559 /** 1560 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. 1561 * @hw: pointer to hardware structure 1562 * @data: data to send to the EEPROM 1563 * @count: number of bits to shift out 1564 **/ 1565 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 1566 u16 count) 1567 { 1568 u32 eec; 1569 u32 mask; 1570 u32 i; 1571 1572 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1573 1574 /* 1575 * Mask is used to shift "count" bits of "data" out to the EEPROM 1576 * one bit at a time. Determine the starting bit based on count 1577 */ 1578 mask = BIT(count - 1); 1579 1580 for (i = 0; i < count; i++) { 1581 /* 1582 * A "1" is shifted out to the EEPROM by setting bit "DI" to a 1583 * "1", and then raising and then lowering the clock (the SK 1584 * bit controls the clock input to the EEPROM). A "0" is 1585 * shifted out to the EEPROM by setting "DI" to "0" and then 1586 * raising and then lowering the clock. 1587 */ 1588 if (data & mask) 1589 eec |= IXGBE_EEC_DI; 1590 else 1591 eec &= ~IXGBE_EEC_DI; 1592 1593 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1594 IXGBE_WRITE_FLUSH(hw); 1595 1596 udelay(1); 1597 1598 ixgbe_raise_eeprom_clk(hw, &eec); 1599 ixgbe_lower_eeprom_clk(hw, &eec); 1600 1601 /* 1602 * Shift mask to signify next bit of data to shift in to the 1603 * EEPROM 1604 */ 1605 mask = mask >> 1; 1606 } 1607 1608 /* We leave the "DI" bit set to "0" when we leave this routine. */ 1609 eec &= ~IXGBE_EEC_DI; 1610 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1611 IXGBE_WRITE_FLUSH(hw); 1612 } 1613 1614 /** 1615 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM 1616 * @hw: pointer to hardware structure 1617 * @count: number of bits to shift 1618 **/ 1619 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) 1620 { 1621 u32 eec; 1622 u32 i; 1623 u16 data = 0; 1624 1625 /* 1626 * In order to read a register from the EEPROM, we need to shift 1627 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising 1628 * the clock input to the EEPROM (setting the SK bit), and then reading 1629 * the value of the "DO" bit. During this "shifting in" process the 1630 * "DI" bit should always be clear. 1631 */ 1632 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1633 1634 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); 1635 1636 for (i = 0; i < count; i++) { 1637 data = data << 1; 1638 ixgbe_raise_eeprom_clk(hw, &eec); 1639 1640 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1641 1642 eec &= ~(IXGBE_EEC_DI); 1643 if (eec & IXGBE_EEC_DO) 1644 data |= 1; 1645 1646 ixgbe_lower_eeprom_clk(hw, &eec); 1647 } 1648 1649 return data; 1650 } 1651 1652 /** 1653 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. 1654 * @hw: pointer to hardware structure 1655 * @eec: EEC register's current value 1656 **/ 1657 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 1658 { 1659 /* 1660 * Raise the clock input to the EEPROM 1661 * (setting the SK bit), then delay 1662 */ 1663 *eec = *eec | IXGBE_EEC_SK; 1664 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec); 1665 IXGBE_WRITE_FLUSH(hw); 1666 udelay(1); 1667 } 1668 1669 /** 1670 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. 1671 * @hw: pointer to hardware structure 1672 * @eec: EEC's current value 1673 **/ 1674 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 1675 { 1676 /* 1677 * Lower the clock input to the EEPROM (clearing the SK bit), then 1678 * delay 1679 */ 1680 *eec = *eec & ~IXGBE_EEC_SK; 1681 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec); 1682 IXGBE_WRITE_FLUSH(hw); 1683 udelay(1); 1684 } 1685 1686 /** 1687 * ixgbe_release_eeprom - Release EEPROM, release semaphores 1688 * @hw: pointer to hardware structure 1689 **/ 1690 static void ixgbe_release_eeprom(struct ixgbe_hw *hw) 1691 { 1692 u32 eec; 1693 1694 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1695 1696 eec |= IXGBE_EEC_CS; /* Pull CS high */ 1697 eec &= ~IXGBE_EEC_SK; /* Lower SCK */ 1698 1699 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1700 IXGBE_WRITE_FLUSH(hw); 1701 1702 udelay(1); 1703 1704 /* Stop requesting EEPROM access */ 1705 eec &= ~IXGBE_EEC_REQ; 1706 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1707 1708 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1709 1710 /* 1711 * Delay before attempt to obtain semaphore again to allow FW 1712 * access. semaphore_delay is in ms we need us for usleep_range 1713 */ 1714 usleep_range(hw->eeprom.semaphore_delay * 1000, 1715 hw->eeprom.semaphore_delay * 2000); 1716 } 1717 1718 /** 1719 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum 1720 * @hw: pointer to hardware structure 1721 **/ 1722 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) 1723 { 1724 u16 i; 1725 u16 j; 1726 u16 checksum = 0; 1727 u16 length = 0; 1728 u16 pointer = 0; 1729 u16 word = 0; 1730 1731 /* Include 0x0-0x3F in the checksum */ 1732 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { 1733 if (hw->eeprom.ops.read(hw, i, &word)) { 1734 hw_dbg(hw, "EEPROM read failed\n"); 1735 break; 1736 } 1737 checksum += word; 1738 } 1739 1740 /* Include all data from pointers except for the fw pointer */ 1741 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { 1742 if (hw->eeprom.ops.read(hw, i, &pointer)) { 1743 hw_dbg(hw, "EEPROM read failed\n"); 1744 return IXGBE_ERR_EEPROM; 1745 } 1746 1747 /* If the pointer seems invalid */ 1748 if (pointer == 0xFFFF || pointer == 0) 1749 continue; 1750 1751 if (hw->eeprom.ops.read(hw, pointer, &length)) { 1752 hw_dbg(hw, "EEPROM read failed\n"); 1753 return IXGBE_ERR_EEPROM; 1754 } 1755 1756 if (length == 0xFFFF || length == 0) 1757 continue; 1758 1759 for (j = pointer + 1; j <= pointer + length; j++) { 1760 if (hw->eeprom.ops.read(hw, j, &word)) { 1761 hw_dbg(hw, "EEPROM read failed\n"); 1762 return IXGBE_ERR_EEPROM; 1763 } 1764 checksum += word; 1765 } 1766 } 1767 1768 checksum = (u16)IXGBE_EEPROM_SUM - checksum; 1769 1770 return (s32)checksum; 1771 } 1772 1773 /** 1774 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum 1775 * @hw: pointer to hardware structure 1776 * @checksum_val: calculated checksum 1777 * 1778 * Performs checksum calculation and validates the EEPROM checksum. If the 1779 * caller does not need checksum_val, the value can be NULL. 1780 **/ 1781 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 1782 u16 *checksum_val) 1783 { 1784 s32 status; 1785 u16 checksum; 1786 u16 read_checksum = 0; 1787 1788 /* 1789 * Read the first word from the EEPROM. If this times out or fails, do 1790 * not continue or we could be in for a very long wait while every 1791 * EEPROM read fails 1792 */ 1793 status = hw->eeprom.ops.read(hw, 0, &checksum); 1794 if (status) { 1795 hw_dbg(hw, "EEPROM read failed\n"); 1796 return status; 1797 } 1798 1799 status = hw->eeprom.ops.calc_checksum(hw); 1800 if (status < 0) 1801 return status; 1802 1803 checksum = (u16)(status & 0xffff); 1804 1805 status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); 1806 if (status) { 1807 hw_dbg(hw, "EEPROM read failed\n"); 1808 return status; 1809 } 1810 1811 /* Verify read checksum from EEPROM is the same as 1812 * calculated checksum 1813 */ 1814 if (read_checksum != checksum) 1815 status = IXGBE_ERR_EEPROM_CHECKSUM; 1816 1817 /* If the user cares, return the calculated checksum */ 1818 if (checksum_val) 1819 *checksum_val = checksum; 1820 1821 return status; 1822 } 1823 1824 /** 1825 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum 1826 * @hw: pointer to hardware structure 1827 **/ 1828 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) 1829 { 1830 s32 status; 1831 u16 checksum; 1832 1833 /* 1834 * Read the first word from the EEPROM. If this times out or fails, do 1835 * not continue or we could be in for a very long wait while every 1836 * EEPROM read fails 1837 */ 1838 status = hw->eeprom.ops.read(hw, 0, &checksum); 1839 if (status) { 1840 hw_dbg(hw, "EEPROM read failed\n"); 1841 return status; 1842 } 1843 1844 status = hw->eeprom.ops.calc_checksum(hw); 1845 if (status < 0) 1846 return status; 1847 1848 checksum = (u16)(status & 0xffff); 1849 1850 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum); 1851 1852 return status; 1853 } 1854 1855 /** 1856 * ixgbe_set_rar_generic - Set Rx address register 1857 * @hw: pointer to hardware structure 1858 * @index: Receive address register to write 1859 * @addr: Address to put into receive address register 1860 * @vmdq: VMDq "set" or "pool" index 1861 * @enable_addr: set flag that address is active 1862 * 1863 * Puts an ethernet address into a receive address register. 1864 **/ 1865 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 1866 u32 enable_addr) 1867 { 1868 u32 rar_low, rar_high; 1869 u32 rar_entries = hw->mac.num_rar_entries; 1870 1871 /* Make sure we are using a valid rar index range */ 1872 if (index >= rar_entries) { 1873 hw_dbg(hw, "RAR index %d is out of range.\n", index); 1874 return IXGBE_ERR_INVALID_ARGUMENT; 1875 } 1876 1877 /* setup VMDq pool selection before this RAR gets enabled */ 1878 hw->mac.ops.set_vmdq(hw, index, vmdq); 1879 1880 /* 1881 * HW expects these in little endian so we reverse the byte 1882 * order from network order (big endian) to little endian 1883 */ 1884 rar_low = ((u32)addr[0] | 1885 ((u32)addr[1] << 8) | 1886 ((u32)addr[2] << 16) | 1887 ((u32)addr[3] << 24)); 1888 /* 1889 * Some parts put the VMDq setting in the extra RAH bits, 1890 * so save everything except the lower 16 bits that hold part 1891 * of the address and the address valid bit. 1892 */ 1893 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1894 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 1895 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); 1896 1897 if (enable_addr != 0) 1898 rar_high |= IXGBE_RAH_AV; 1899 1900 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 1901 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1902 1903 return 0; 1904 } 1905 1906 /** 1907 * ixgbe_clear_rar_generic - Remove Rx address register 1908 * @hw: pointer to hardware structure 1909 * @index: Receive address register to write 1910 * 1911 * Clears an ethernet address from a receive address register. 1912 **/ 1913 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) 1914 { 1915 u32 rar_high; 1916 u32 rar_entries = hw->mac.num_rar_entries; 1917 1918 /* Make sure we are using a valid rar index range */ 1919 if (index >= rar_entries) { 1920 hw_dbg(hw, "RAR index %d is out of range.\n", index); 1921 return IXGBE_ERR_INVALID_ARGUMENT; 1922 } 1923 1924 /* 1925 * Some parts put the VMDq setting in the extra RAH bits, 1926 * so save everything except the lower 16 bits that hold part 1927 * of the address and the address valid bit. 1928 */ 1929 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1930 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 1931 1932 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); 1933 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1934 1935 /* clear VMDq pool/queue selection for this RAR */ 1936 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); 1937 1938 return 0; 1939 } 1940 1941 /** 1942 * ixgbe_init_rx_addrs_generic - Initializes receive address filters. 1943 * @hw: pointer to hardware structure 1944 * 1945 * Places the MAC address in receive address register 0 and clears the rest 1946 * of the receive address registers. Clears the multicast table. Assumes 1947 * the receiver is in reset when the routine is called. 1948 **/ 1949 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) 1950 { 1951 u32 i; 1952 u32 rar_entries = hw->mac.num_rar_entries; 1953 1954 /* 1955 * If the current mac address is valid, assume it is a software override 1956 * to the permanent address. 1957 * Otherwise, use the permanent address from the eeprom. 1958 */ 1959 if (!is_valid_ether_addr(hw->mac.addr)) { 1960 /* Get the MAC address from the RAR0 for later reference */ 1961 hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 1962 1963 hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr); 1964 } else { 1965 /* Setup the receive address. */ 1966 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); 1967 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); 1968 1969 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1970 } 1971 1972 /* clear VMDq pool/queue selection for RAR 0 */ 1973 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); 1974 1975 hw->addr_ctrl.overflow_promisc = 0; 1976 1977 hw->addr_ctrl.rar_used_count = 1; 1978 1979 /* Zero out the other receive addresses. */ 1980 hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1); 1981 for (i = 1; i < rar_entries; i++) { 1982 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 1983 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 1984 } 1985 1986 /* Clear the MTA */ 1987 hw->addr_ctrl.mta_in_use = 0; 1988 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1989 1990 hw_dbg(hw, " Clearing MTA\n"); 1991 for (i = 0; i < hw->mac.mcft_size; i++) 1992 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 1993 1994 if (hw->mac.ops.init_uta_tables) 1995 hw->mac.ops.init_uta_tables(hw); 1996 1997 return 0; 1998 } 1999 2000 /** 2001 * ixgbe_mta_vector - Determines bit-vector in multicast table to set 2002 * @hw: pointer to hardware structure 2003 * @mc_addr: the multicast address 2004 * 2005 * Extracts the 12 bits, from a multicast address, to determine which 2006 * bit-vector to set in the multicast table. The hardware uses 12 bits, from 2007 * incoming rx multicast addresses, to determine the bit-vector to check in 2008 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set 2009 * by the MO field of the MCSTCTRL. The MO field is set during initialization 2010 * to mc_filter_type. 2011 **/ 2012 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) 2013 { 2014 u32 vector = 0; 2015 2016 switch (hw->mac.mc_filter_type) { 2017 case 0: /* use bits [47:36] of the address */ 2018 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); 2019 break; 2020 case 1: /* use bits [46:35] of the address */ 2021 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); 2022 break; 2023 case 2: /* use bits [45:34] of the address */ 2024 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); 2025 break; 2026 case 3: /* use bits [43:32] of the address */ 2027 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); 2028 break; 2029 default: /* Invalid mc_filter_type */ 2030 hw_dbg(hw, "MC filter type param set incorrectly\n"); 2031 break; 2032 } 2033 2034 /* vector can only be 12-bits or boundary will be exceeded */ 2035 vector &= 0xFFF; 2036 return vector; 2037 } 2038 2039 /** 2040 * ixgbe_set_mta - Set bit-vector in multicast table 2041 * @hw: pointer to hardware structure 2042 * @mc_addr: Multicast address 2043 * 2044 * Sets the bit-vector in the multicast table. 2045 **/ 2046 static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) 2047 { 2048 u32 vector; 2049 u32 vector_bit; 2050 u32 vector_reg; 2051 2052 hw->addr_ctrl.mta_in_use++; 2053 2054 vector = ixgbe_mta_vector(hw, mc_addr); 2055 hw_dbg(hw, " bit-vector = 0x%03X\n", vector); 2056 2057 /* 2058 * The MTA is a register array of 128 32-bit registers. It is treated 2059 * like an array of 4096 bits. We want to set bit 2060 * BitArray[vector_value]. So we figure out what register the bit is 2061 * in, read it, OR in the new bit, then write back the new value. The 2062 * register is determined by the upper 7 bits of the vector value and 2063 * the bit within that register are determined by the lower 5 bits of 2064 * the value. 2065 */ 2066 vector_reg = (vector >> 5) & 0x7F; 2067 vector_bit = vector & 0x1F; 2068 hw->mac.mta_shadow[vector_reg] |= BIT(vector_bit); 2069 } 2070 2071 /** 2072 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses 2073 * @hw: pointer to hardware structure 2074 * @netdev: pointer to net device structure 2075 * 2076 * The given list replaces any existing list. Clears the MC addrs from receive 2077 * address registers and the multicast table. Uses unused receive address 2078 * registers for the first multicast addresses, and hashes the rest into the 2079 * multicast table. 2080 **/ 2081 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, 2082 struct net_device *netdev) 2083 { 2084 struct netdev_hw_addr *ha; 2085 u32 i; 2086 2087 /* 2088 * Set the new number of MC addresses that we are being requested to 2089 * use. 2090 */ 2091 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); 2092 hw->addr_ctrl.mta_in_use = 0; 2093 2094 /* Clear mta_shadow */ 2095 hw_dbg(hw, " Clearing MTA\n"); 2096 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); 2097 2098 /* Update mta shadow */ 2099 netdev_for_each_mc_addr(ha, netdev) { 2100 hw_dbg(hw, " Adding the multicast addresses:\n"); 2101 ixgbe_set_mta(hw, ha->addr); 2102 } 2103 2104 /* Enable mta */ 2105 for (i = 0; i < hw->mac.mcft_size; i++) 2106 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, 2107 hw->mac.mta_shadow[i]); 2108 2109 if (hw->addr_ctrl.mta_in_use > 0) 2110 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 2111 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 2112 2113 hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); 2114 return 0; 2115 } 2116 2117 /** 2118 * ixgbe_enable_mc_generic - Enable multicast address in RAR 2119 * @hw: pointer to hardware structure 2120 * 2121 * Enables multicast address in RAR and the use of the multicast hash table. 2122 **/ 2123 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) 2124 { 2125 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 2126 2127 if (a->mta_in_use > 0) 2128 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 2129 hw->mac.mc_filter_type); 2130 2131 return 0; 2132 } 2133 2134 /** 2135 * ixgbe_disable_mc_generic - Disable multicast address in RAR 2136 * @hw: pointer to hardware structure 2137 * 2138 * Disables multicast address in RAR and the use of the multicast hash table. 2139 **/ 2140 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) 2141 { 2142 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 2143 2144 if (a->mta_in_use > 0) 2145 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 2146 2147 return 0; 2148 } 2149 2150 /** 2151 * ixgbe_fc_enable_generic - Enable flow control 2152 * @hw: pointer to hardware structure 2153 * 2154 * Enable flow control according to the current settings. 2155 **/ 2156 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) 2157 { 2158 u32 mflcn_reg, fccfg_reg; 2159 u32 reg; 2160 u32 fcrtl, fcrth; 2161 int i; 2162 2163 /* Validate the water mark configuration. */ 2164 if (!hw->fc.pause_time) 2165 return IXGBE_ERR_INVALID_LINK_SETTINGS; 2166 2167 /* Low water mark of zero causes XOFF floods */ 2168 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 2169 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2170 hw->fc.high_water[i]) { 2171 if (!hw->fc.low_water[i] || 2172 hw->fc.low_water[i] >= hw->fc.high_water[i]) { 2173 hw_dbg(hw, "Invalid water mark configuration\n"); 2174 return IXGBE_ERR_INVALID_LINK_SETTINGS; 2175 } 2176 } 2177 } 2178 2179 /* Negotiate the fc mode to use */ 2180 hw->mac.ops.fc_autoneg(hw); 2181 2182 /* Disable any previous flow control settings */ 2183 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2184 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); 2185 2186 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 2187 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 2188 2189 /* 2190 * The possible values of fc.current_mode are: 2191 * 0: Flow control is completely disabled 2192 * 1: Rx flow control is enabled (we can receive pause frames, 2193 * but not send pause frames). 2194 * 2: Tx flow control is enabled (we can send pause frames but 2195 * we do not support receiving pause frames). 2196 * 3: Both Rx and Tx flow control (symmetric) are enabled. 2197 * other: Invalid. 2198 */ 2199 switch (hw->fc.current_mode) { 2200 case ixgbe_fc_none: 2201 /* 2202 * Flow control is disabled by software override or autoneg. 2203 * The code below will actually disable it in the HW. 2204 */ 2205 break; 2206 case ixgbe_fc_rx_pause: 2207 /* 2208 * Rx Flow control is enabled and Tx Flow control is 2209 * disabled by software override. Since there really 2210 * isn't a way to advertise that we are capable of RX 2211 * Pause ONLY, we will advertise that we support both 2212 * symmetric and asymmetric Rx PAUSE. Later, we will 2213 * disable the adapter's ability to send PAUSE frames. 2214 */ 2215 mflcn_reg |= IXGBE_MFLCN_RFCE; 2216 break; 2217 case ixgbe_fc_tx_pause: 2218 /* 2219 * Tx Flow control is enabled, and Rx Flow control is 2220 * disabled by software override. 2221 */ 2222 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2223 break; 2224 case ixgbe_fc_full: 2225 /* Flow control (both Rx and Tx) is enabled by SW override. */ 2226 mflcn_reg |= IXGBE_MFLCN_RFCE; 2227 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2228 break; 2229 default: 2230 hw_dbg(hw, "Flow control param set incorrectly\n"); 2231 return IXGBE_ERR_CONFIG; 2232 } 2233 2234 /* Set 802.3x based flow control settings. */ 2235 mflcn_reg |= IXGBE_MFLCN_DPF; 2236 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 2237 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 2238 2239 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 2240 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 2241 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2242 hw->fc.high_water[i]) { 2243 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; 2244 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); 2245 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 2246 } else { 2247 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); 2248 /* 2249 * In order to prevent Tx hangs when the internal Tx 2250 * switch is enabled we must set the high water mark 2251 * to the Rx packet buffer size - 24KB. This allows 2252 * the Tx switch to function even under heavy Rx 2253 * workloads. 2254 */ 2255 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; 2256 } 2257 2258 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); 2259 } 2260 2261 /* Configure pause time (2 TCs per register) */ 2262 reg = hw->fc.pause_time * 0x00010001; 2263 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) 2264 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 2265 2266 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 2267 2268 return 0; 2269 } 2270 2271 /** 2272 * ixgbe_negotiate_fc - Negotiate flow control 2273 * @hw: pointer to hardware structure 2274 * @adv_reg: flow control advertised settings 2275 * @lp_reg: link partner's flow control settings 2276 * @adv_sym: symmetric pause bit in advertisement 2277 * @adv_asm: asymmetric pause bit in advertisement 2278 * @lp_sym: symmetric pause bit in link partner advertisement 2279 * @lp_asm: asymmetric pause bit in link partner advertisement 2280 * 2281 * Find the intersection between advertised settings and link partner's 2282 * advertised settings 2283 **/ 2284 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, 2285 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) 2286 { 2287 if ((!(adv_reg)) || (!(lp_reg))) 2288 return IXGBE_ERR_FC_NOT_NEGOTIATED; 2289 2290 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { 2291 /* 2292 * Now we need to check if the user selected Rx ONLY 2293 * of pause frames. In this case, we had to advertise 2294 * FULL flow control because we could not advertise RX 2295 * ONLY. Hence, we must now check to see if we need to 2296 * turn OFF the TRANSMISSION of PAUSE frames. 2297 */ 2298 if (hw->fc.requested_mode == ixgbe_fc_full) { 2299 hw->fc.current_mode = ixgbe_fc_full; 2300 hw_dbg(hw, "Flow Control = FULL.\n"); 2301 } else { 2302 hw->fc.current_mode = ixgbe_fc_rx_pause; 2303 hw_dbg(hw, "Flow Control=RX PAUSE frames only\n"); 2304 } 2305 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && 2306 (lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2307 hw->fc.current_mode = ixgbe_fc_tx_pause; 2308 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); 2309 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && 2310 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2311 hw->fc.current_mode = ixgbe_fc_rx_pause; 2312 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); 2313 } else { 2314 hw->fc.current_mode = ixgbe_fc_none; 2315 hw_dbg(hw, "Flow Control = NONE.\n"); 2316 } 2317 return 0; 2318 } 2319 2320 /** 2321 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber 2322 * @hw: pointer to hardware structure 2323 * 2324 * Enable flow control according on 1 gig fiber. 2325 **/ 2326 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) 2327 { 2328 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; 2329 s32 ret_val; 2330 2331 /* 2332 * On multispeed fiber at 1g, bail out if 2333 * - link is up but AN did not complete, or if 2334 * - link is up and AN completed but timed out 2335 */ 2336 2337 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 2338 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || 2339 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) 2340 return IXGBE_ERR_FC_NOT_NEGOTIATED; 2341 2342 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 2343 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 2344 2345 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, 2346 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, 2347 IXGBE_PCS1GANA_ASM_PAUSE, 2348 IXGBE_PCS1GANA_SYM_PAUSE, 2349 IXGBE_PCS1GANA_ASM_PAUSE); 2350 2351 return ret_val; 2352 } 2353 2354 /** 2355 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 2356 * @hw: pointer to hardware structure 2357 * 2358 * Enable flow control according to IEEE clause 37. 2359 **/ 2360 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) 2361 { 2362 u32 links2, anlp1_reg, autoc_reg, links; 2363 s32 ret_val; 2364 2365 /* 2366 * On backplane, bail out if 2367 * - backplane autoneg was not completed, or if 2368 * - we are 82599 and link partner is not AN enabled 2369 */ 2370 links = IXGBE_READ_REG(hw, IXGBE_LINKS); 2371 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) 2372 return IXGBE_ERR_FC_NOT_NEGOTIATED; 2373 2374 if (hw->mac.type == ixgbe_mac_82599EB) { 2375 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 2376 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) 2377 return IXGBE_ERR_FC_NOT_NEGOTIATED; 2378 } 2379 /* 2380 * Read the 10g AN autoc and LP ability registers and resolve 2381 * local flow control settings accordingly 2382 */ 2383 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2384 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); 2385 2386 ret_val = ixgbe_negotiate_fc(hw, autoc_reg, 2387 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, 2388 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); 2389 2390 return ret_val; 2391 } 2392 2393 /** 2394 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 2395 * @hw: pointer to hardware structure 2396 * 2397 * Enable flow control according to IEEE clause 37. 2398 **/ 2399 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) 2400 { 2401 u16 technology_ability_reg = 0; 2402 u16 lp_technology_ability_reg = 0; 2403 2404 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, 2405 MDIO_MMD_AN, 2406 &technology_ability_reg); 2407 hw->phy.ops.read_reg(hw, MDIO_AN_LPA, 2408 MDIO_MMD_AN, 2409 &lp_technology_ability_reg); 2410 2411 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, 2412 (u32)lp_technology_ability_reg, 2413 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, 2414 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); 2415 } 2416 2417 /** 2418 * ixgbe_fc_autoneg - Configure flow control 2419 * @hw: pointer to hardware structure 2420 * 2421 * Compares our advertised flow control capabilities to those advertised by 2422 * our link partner, and determines the proper flow control mode to use. 2423 **/ 2424 void ixgbe_fc_autoneg(struct ixgbe_hw *hw) 2425 { 2426 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2427 ixgbe_link_speed speed; 2428 bool link_up; 2429 2430 /* 2431 * AN should have completed when the cable was plugged in. 2432 * Look for reasons to bail out. Bail out if: 2433 * - FC autoneg is disabled, or if 2434 * - link is not up. 2435 * 2436 * Since we're being called from an LSC, link is already known to be up. 2437 * So use link_up_wait_to_complete=false. 2438 */ 2439 if (hw->fc.disable_fc_autoneg) 2440 goto out; 2441 2442 hw->mac.ops.check_link(hw, &speed, &link_up, false); 2443 if (!link_up) 2444 goto out; 2445 2446 switch (hw->phy.media_type) { 2447 /* Autoneg flow control on fiber adapters */ 2448 case ixgbe_media_type_fiber: 2449 if (speed == IXGBE_LINK_SPEED_1GB_FULL) 2450 ret_val = ixgbe_fc_autoneg_fiber(hw); 2451 break; 2452 2453 /* Autoneg flow control on backplane adapters */ 2454 case ixgbe_media_type_backplane: 2455 ret_val = ixgbe_fc_autoneg_backplane(hw); 2456 break; 2457 2458 /* Autoneg flow control on copper adapters */ 2459 case ixgbe_media_type_copper: 2460 if (ixgbe_device_supports_autoneg_fc(hw)) 2461 ret_val = ixgbe_fc_autoneg_copper(hw); 2462 break; 2463 2464 default: 2465 break; 2466 } 2467 2468 out: 2469 if (ret_val == 0) { 2470 hw->fc.fc_was_autonegged = true; 2471 } else { 2472 hw->fc.fc_was_autonegged = false; 2473 hw->fc.current_mode = hw->fc.requested_mode; 2474 } 2475 } 2476 2477 /** 2478 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion 2479 * @hw: pointer to hardware structure 2480 * 2481 * System-wide timeout range is encoded in PCIe Device Control2 register. 2482 * 2483 * Add 10% to specified maximum and return the number of times to poll for 2484 * completion timeout, in units of 100 microsec. Never return less than 2485 * 800 = 80 millisec. 2486 **/ 2487 static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) 2488 { 2489 s16 devctl2; 2490 u32 pollcnt; 2491 2492 devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2); 2493 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK; 2494 2495 switch (devctl2) { 2496 case IXGBE_PCIDEVCTRL2_65_130ms: 2497 pollcnt = 1300; /* 130 millisec */ 2498 break; 2499 case IXGBE_PCIDEVCTRL2_260_520ms: 2500 pollcnt = 5200; /* 520 millisec */ 2501 break; 2502 case IXGBE_PCIDEVCTRL2_1_2s: 2503 pollcnt = 20000; /* 2 sec */ 2504 break; 2505 case IXGBE_PCIDEVCTRL2_4_8s: 2506 pollcnt = 80000; /* 8 sec */ 2507 break; 2508 case IXGBE_PCIDEVCTRL2_17_34s: 2509 pollcnt = 34000; /* 34 sec */ 2510 break; 2511 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */ 2512 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */ 2513 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */ 2514 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */ 2515 default: 2516 pollcnt = 800; /* 80 millisec minimum */ 2517 break; 2518 } 2519 2520 /* add 10% to spec maximum */ 2521 return (pollcnt * 11) / 10; 2522 } 2523 2524 /** 2525 * ixgbe_disable_pcie_master - Disable PCI-express master access 2526 * @hw: pointer to hardware structure 2527 * 2528 * Disables PCI-Express master access and verifies there are no pending 2529 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable 2530 * bit hasn't caused the master requests to be disabled, else 0 2531 * is returned signifying master requests disabled. 2532 **/ 2533 static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 2534 { 2535 u32 i, poll; 2536 u16 value; 2537 2538 /* Always set this bit to ensure any future transactions are blocked */ 2539 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); 2540 2541 /* Poll for bit to read as set */ 2542 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2543 if (IXGBE_READ_REG(hw, IXGBE_CTRL) & IXGBE_CTRL_GIO_DIS) 2544 break; 2545 usleep_range(100, 120); 2546 } 2547 if (i >= IXGBE_PCI_MASTER_DISABLE_TIMEOUT) { 2548 hw_dbg(hw, "GIO disable did not set - requesting resets\n"); 2549 goto gio_disable_fail; 2550 } 2551 2552 /* Exit if master requests are blocked */ 2553 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || 2554 ixgbe_removed(hw->hw_addr)) 2555 return 0; 2556 2557 /* Poll for master request bit to clear */ 2558 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2559 udelay(100); 2560 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) 2561 return 0; 2562 } 2563 2564 /* 2565 * Two consecutive resets are required via CTRL.RST per datasheet 2566 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine 2567 * of this need. The first reset prevents new master requests from 2568 * being issued by our device. We then must wait 1usec or more for any 2569 * remaining completions from the PCIe bus to trickle in, and then reset 2570 * again to clear out any effects they may have had on our device. 2571 */ 2572 hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n"); 2573 gio_disable_fail: 2574 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 2575 2576 if (hw->mac.type >= ixgbe_mac_X550) 2577 return 0; 2578 2579 /* 2580 * Before proceeding, make sure that the PCIe block does not have 2581 * transactions pending. 2582 */ 2583 poll = ixgbe_pcie_timeout_poll(hw); 2584 for (i = 0; i < poll; i++) { 2585 udelay(100); 2586 value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); 2587 if (ixgbe_removed(hw->hw_addr)) 2588 return 0; 2589 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 2590 return 0; 2591 } 2592 2593 hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n"); 2594 return IXGBE_ERR_MASTER_REQUESTS_PENDING; 2595 } 2596 2597 /** 2598 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore 2599 * @hw: pointer to hardware structure 2600 * @mask: Mask to specify which semaphore to acquire 2601 * 2602 * Acquires the SWFW semaphore through the GSSR register for the specified 2603 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2604 **/ 2605 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) 2606 { 2607 u32 gssr = 0; 2608 u32 swmask = mask; 2609 u32 fwmask = mask << 5; 2610 u32 timeout = 200; 2611 u32 i; 2612 2613 for (i = 0; i < timeout; i++) { 2614 /* 2615 * SW NVM semaphore bit is used for access to all 2616 * SW_FW_SYNC bits (not just NVM) 2617 */ 2618 if (ixgbe_get_eeprom_semaphore(hw)) 2619 return IXGBE_ERR_SWFW_SYNC; 2620 2621 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2622 if (!(gssr & (fwmask | swmask))) { 2623 gssr |= swmask; 2624 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 2625 ixgbe_release_eeprom_semaphore(hw); 2626 return 0; 2627 } else { 2628 /* Resource is currently in use by FW or SW */ 2629 ixgbe_release_eeprom_semaphore(hw); 2630 usleep_range(5000, 10000); 2631 } 2632 } 2633 2634 /* If time expired clear the bits holding the lock and retry */ 2635 if (gssr & (fwmask | swmask)) 2636 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); 2637 2638 usleep_range(5000, 10000); 2639 return IXGBE_ERR_SWFW_SYNC; 2640 } 2641 2642 /** 2643 * ixgbe_release_swfw_sync - Release SWFW semaphore 2644 * @hw: pointer to hardware structure 2645 * @mask: Mask to specify which semaphore to release 2646 * 2647 * Releases the SWFW semaphore through the GSSR register for the specified 2648 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2649 **/ 2650 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) 2651 { 2652 u32 gssr; 2653 u32 swmask = mask; 2654 2655 ixgbe_get_eeprom_semaphore(hw); 2656 2657 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2658 gssr &= ~swmask; 2659 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 2660 2661 ixgbe_release_eeprom_semaphore(hw); 2662 } 2663 2664 /** 2665 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read 2666 * @hw: pointer to hardware structure 2667 * @reg_val: Value we read from AUTOC 2668 * @locked: bool to indicate whether the SW/FW lock should be taken. Never 2669 * true in this the generic case. 2670 * 2671 * The default case requires no protection so just to the register read. 2672 **/ 2673 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) 2674 { 2675 *locked = false; 2676 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2677 return 0; 2678 } 2679 2680 /** 2681 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write 2682 * @hw: pointer to hardware structure 2683 * @reg_val: value to write to AUTOC 2684 * @locked: bool to indicate whether the SW/FW lock was already taken by 2685 * previous read. 2686 **/ 2687 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) 2688 { 2689 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); 2690 return 0; 2691 } 2692 2693 /** 2694 * ixgbe_disable_rx_buff_generic - Stops the receive data path 2695 * @hw: pointer to hardware structure 2696 * 2697 * Stops the receive data path and waits for the HW to internally 2698 * empty the Rx security block. 2699 **/ 2700 s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) 2701 { 2702 #define IXGBE_MAX_SECRX_POLL 40 2703 int i; 2704 int secrxreg; 2705 2706 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2707 secrxreg |= IXGBE_SECRXCTRL_RX_DIS; 2708 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 2709 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { 2710 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); 2711 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) 2712 break; 2713 else 2714 /* Use interrupt-safe sleep just in case */ 2715 udelay(1000); 2716 } 2717 2718 /* For informational purposes only */ 2719 if (i >= IXGBE_MAX_SECRX_POLL) 2720 hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n"); 2721 2722 return 0; 2723 2724 } 2725 2726 /** 2727 * ixgbe_enable_rx_buff - Enables the receive data path 2728 * @hw: pointer to hardware structure 2729 * 2730 * Enables the receive data path 2731 **/ 2732 s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) 2733 { 2734 u32 secrxreg; 2735 2736 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2737 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; 2738 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 2739 IXGBE_WRITE_FLUSH(hw); 2740 2741 return 0; 2742 } 2743 2744 /** 2745 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit 2746 * @hw: pointer to hardware structure 2747 * @regval: register value to write to RXCTRL 2748 * 2749 * Enables the Rx DMA unit 2750 **/ 2751 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) 2752 { 2753 if (regval & IXGBE_RXCTRL_RXEN) 2754 hw->mac.ops.enable_rx(hw); 2755 else 2756 hw->mac.ops.disable_rx(hw); 2757 2758 return 0; 2759 } 2760 2761 /** 2762 * ixgbe_blink_led_start_generic - Blink LED based on index. 2763 * @hw: pointer to hardware structure 2764 * @index: led number to blink 2765 **/ 2766 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) 2767 { 2768 ixgbe_link_speed speed = 0; 2769 bool link_up = false; 2770 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2771 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2772 bool locked = false; 2773 s32 ret_val; 2774 2775 if (index > 3) 2776 return IXGBE_ERR_PARAM; 2777 2778 /* 2779 * Link must be up to auto-blink the LEDs; 2780 * Force it if link is down. 2781 */ 2782 hw->mac.ops.check_link(hw, &speed, &link_up, false); 2783 2784 if (!link_up) { 2785 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); 2786 if (ret_val) 2787 return ret_val; 2788 2789 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2790 autoc_reg |= IXGBE_AUTOC_FLU; 2791 2792 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); 2793 if (ret_val) 2794 return ret_val; 2795 2796 IXGBE_WRITE_FLUSH(hw); 2797 2798 usleep_range(10000, 20000); 2799 } 2800 2801 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2802 led_reg |= IXGBE_LED_BLINK(index); 2803 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2804 IXGBE_WRITE_FLUSH(hw); 2805 2806 return 0; 2807 } 2808 2809 /** 2810 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. 2811 * @hw: pointer to hardware structure 2812 * @index: led number to stop blinking 2813 **/ 2814 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) 2815 { 2816 u32 autoc_reg = 0; 2817 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2818 bool locked = false; 2819 s32 ret_val; 2820 2821 if (index > 3) 2822 return IXGBE_ERR_PARAM; 2823 2824 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); 2825 if (ret_val) 2826 return ret_val; 2827 2828 autoc_reg &= ~IXGBE_AUTOC_FLU; 2829 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2830 2831 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); 2832 if (ret_val) 2833 return ret_val; 2834 2835 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2836 led_reg &= ~IXGBE_LED_BLINK(index); 2837 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); 2838 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2839 IXGBE_WRITE_FLUSH(hw); 2840 2841 return 0; 2842 } 2843 2844 /** 2845 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM 2846 * @hw: pointer to hardware structure 2847 * @san_mac_offset: SAN MAC address offset 2848 * 2849 * This function will read the EEPROM location for the SAN MAC address 2850 * pointer, and returns the value at that location. This is used in both 2851 * get and set mac_addr routines. 2852 **/ 2853 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 2854 u16 *san_mac_offset) 2855 { 2856 s32 ret_val; 2857 2858 /* 2859 * First read the EEPROM pointer to see if the MAC addresses are 2860 * available. 2861 */ 2862 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, 2863 san_mac_offset); 2864 if (ret_val) 2865 hw_err(hw, "eeprom read at offset %d failed\n", 2866 IXGBE_SAN_MAC_ADDR_PTR); 2867 2868 return ret_val; 2869 } 2870 2871 /** 2872 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM 2873 * @hw: pointer to hardware structure 2874 * @san_mac_addr: SAN MAC address 2875 * 2876 * Reads the SAN MAC address from the EEPROM, if it's available. This is 2877 * per-port, so set_lan_id() must be called before reading the addresses. 2878 * set_lan_id() is called by identify_sfp(), but this cannot be relied 2879 * upon for non-SFP connections, so we must call it here. 2880 **/ 2881 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) 2882 { 2883 u16 san_mac_data, san_mac_offset; 2884 u8 i; 2885 s32 ret_val; 2886 2887 /* 2888 * First read the EEPROM pointer to see if the MAC addresses are 2889 * available. If they're not, no point in calling set_lan_id() here. 2890 */ 2891 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 2892 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) 2893 2894 goto san_mac_addr_clr; 2895 2896 /* make sure we know which port we need to program */ 2897 hw->mac.ops.set_lan_id(hw); 2898 /* apply the port offset to the address offset */ 2899 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 2900 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 2901 for (i = 0; i < 3; i++) { 2902 ret_val = hw->eeprom.ops.read(hw, san_mac_offset, 2903 &san_mac_data); 2904 if (ret_val) { 2905 hw_err(hw, "eeprom read at offset %d failed\n", 2906 san_mac_offset); 2907 goto san_mac_addr_clr; 2908 } 2909 san_mac_addr[i * 2] = (u8)(san_mac_data); 2910 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); 2911 san_mac_offset++; 2912 } 2913 return 0; 2914 2915 san_mac_addr_clr: 2916 /* No addresses available in this EEPROM. It's not necessarily an 2917 * error though, so just wipe the local address and return. 2918 */ 2919 for (i = 0; i < 6; i++) 2920 san_mac_addr[i] = 0xFF; 2921 return ret_val; 2922 } 2923 2924 /** 2925 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count 2926 * @hw: pointer to hardware structure 2927 * 2928 * Read PCIe configuration space, and get the MSI-X vector count from 2929 * the capabilities table. 2930 **/ 2931 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) 2932 { 2933 u16 msix_count; 2934 u16 max_msix_count; 2935 u16 pcie_offset; 2936 2937 switch (hw->mac.type) { 2938 case ixgbe_mac_82598EB: 2939 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS; 2940 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598; 2941 break; 2942 case ixgbe_mac_82599EB: 2943 case ixgbe_mac_X540: 2944 case ixgbe_mac_X550: 2945 case ixgbe_mac_X550EM_x: 2946 case ixgbe_mac_x550em_a: 2947 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; 2948 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; 2949 break; 2950 default: 2951 return 1; 2952 } 2953 2954 msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset); 2955 if (ixgbe_removed(hw->hw_addr)) 2956 msix_count = 0; 2957 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; 2958 2959 /* MSI-X count is zero-based in HW */ 2960 msix_count++; 2961 2962 if (msix_count > max_msix_count) 2963 msix_count = max_msix_count; 2964 2965 return msix_count; 2966 } 2967 2968 /** 2969 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address 2970 * @hw: pointer to hardware struct 2971 * @rar: receive address register index to disassociate 2972 * @vmdq: VMDq pool index to remove from the rar 2973 **/ 2974 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 2975 { 2976 u32 mpsar_lo, mpsar_hi; 2977 u32 rar_entries = hw->mac.num_rar_entries; 2978 2979 /* Make sure we are using a valid rar index range */ 2980 if (rar >= rar_entries) { 2981 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 2982 return IXGBE_ERR_INVALID_ARGUMENT; 2983 } 2984 2985 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 2986 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 2987 2988 if (ixgbe_removed(hw->hw_addr)) 2989 return 0; 2990 2991 if (!mpsar_lo && !mpsar_hi) 2992 return 0; 2993 2994 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { 2995 if (mpsar_lo) { 2996 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 2997 mpsar_lo = 0; 2998 } 2999 if (mpsar_hi) { 3000 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 3001 mpsar_hi = 0; 3002 } 3003 } else if (vmdq < 32) { 3004 mpsar_lo &= ~BIT(vmdq); 3005 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); 3006 } else { 3007 mpsar_hi &= ~BIT(vmdq - 32); 3008 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); 3009 } 3010 3011 /* was that the last pool using this rar? */ 3012 if (mpsar_lo == 0 && mpsar_hi == 0 && 3013 rar != 0 && rar != hw->mac.san_mac_rar_index) 3014 hw->mac.ops.clear_rar(hw, rar); 3015 3016 return 0; 3017 } 3018 3019 /** 3020 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address 3021 * @hw: pointer to hardware struct 3022 * @rar: receive address register index to associate with a VMDq index 3023 * @vmdq: VMDq pool index 3024 **/ 3025 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 3026 { 3027 u32 mpsar; 3028 u32 rar_entries = hw->mac.num_rar_entries; 3029 3030 /* Make sure we are using a valid rar index range */ 3031 if (rar >= rar_entries) { 3032 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 3033 return IXGBE_ERR_INVALID_ARGUMENT; 3034 } 3035 3036 if (vmdq < 32) { 3037 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 3038 mpsar |= BIT(vmdq); 3039 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); 3040 } else { 3041 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 3042 mpsar |= BIT(vmdq - 32); 3043 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); 3044 } 3045 return 0; 3046 } 3047 3048 /** 3049 * This function should only be involved in the IOV mode. 3050 * In IOV mode, Default pool is next pool after the number of 3051 * VFs advertized and not 0. 3052 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] 3053 * 3054 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address 3055 * @hw: pointer to hardware struct 3056 * @vmdq: VMDq pool index 3057 **/ 3058 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) 3059 { 3060 u32 rar = hw->mac.san_mac_rar_index; 3061 3062 if (vmdq < 32) { 3063 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), BIT(vmdq)); 3064 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 3065 } else { 3066 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 3067 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), BIT(vmdq - 32)); 3068 } 3069 3070 return 0; 3071 } 3072 3073 /** 3074 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array 3075 * @hw: pointer to hardware structure 3076 **/ 3077 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) 3078 { 3079 int i; 3080 3081 for (i = 0; i < 128; i++) 3082 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 3083 3084 return 0; 3085 } 3086 3087 /** 3088 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot 3089 * @hw: pointer to hardware structure 3090 * @vlan: VLAN id to write to VLAN filter 3091 * @vlvf_bypass: true to find vlanid only, false returns first empty slot if 3092 * vlanid not found 3093 * 3094 * return the VLVF index where this VLAN id should be placed 3095 * 3096 **/ 3097 static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) 3098 { 3099 s32 regindex, first_empty_slot; 3100 u32 bits; 3101 3102 /* short cut the special case */ 3103 if (vlan == 0) 3104 return 0; 3105 3106 /* if vlvf_bypass is set we don't want to use an empty slot, we 3107 * will simply bypass the VLVF if there are no entries present in the 3108 * VLVF that contain our VLAN 3109 */ 3110 first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0; 3111 3112 /* add VLAN enable bit for comparison */ 3113 vlan |= IXGBE_VLVF_VIEN; 3114 3115 /* Search for the vlan id in the VLVF entries. Save off the first empty 3116 * slot found along the way. 3117 * 3118 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 3119 */ 3120 for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) { 3121 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); 3122 if (bits == vlan) 3123 return regindex; 3124 if (!first_empty_slot && !bits) 3125 first_empty_slot = regindex; 3126 } 3127 3128 /* If we are here then we didn't find the VLAN. Return first empty 3129 * slot we found during our search, else error. 3130 */ 3131 if (!first_empty_slot) 3132 hw_dbg(hw, "No space in VLVF.\n"); 3133 3134 return first_empty_slot ? : IXGBE_ERR_NO_SPACE; 3135 } 3136 3137 /** 3138 * ixgbe_set_vfta_generic - Set VLAN filter table 3139 * @hw: pointer to hardware structure 3140 * @vlan: VLAN id to write to VLAN filter 3141 * @vind: VMDq output index that maps queue to VLAN id in VFVFB 3142 * @vlan_on: boolean flag to turn on/off VLAN in VFVF 3143 * @vlvf_bypass: boolean flag indicating updating default pool is okay 3144 * 3145 * Turn on/off specified VLAN in the VLAN filter table. 3146 **/ 3147 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, 3148 bool vlan_on, bool vlvf_bypass) 3149 { 3150 u32 regidx, vfta_delta, vfta, bits; 3151 s32 vlvf_index; 3152 3153 if ((vlan > 4095) || (vind > 63)) 3154 return IXGBE_ERR_PARAM; 3155 3156 /* 3157 * this is a 2 part operation - first the VFTA, then the 3158 * VLVF and VLVFB if VT Mode is set 3159 * We don't write the VFTA until we know the VLVF part succeeded. 3160 */ 3161 3162 /* Part 1 3163 * The VFTA is a bitstring made up of 128 32-bit registers 3164 * that enable the particular VLAN id, much like the MTA: 3165 * bits[11-5]: which register 3166 * bits[4-0]: which bit in the register 3167 */ 3168 regidx = vlan / 32; 3169 vfta_delta = BIT(vlan % 32); 3170 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); 3171 3172 /* vfta_delta represents the difference between the current value 3173 * of vfta and the value we want in the register. Since the diff 3174 * is an XOR mask we can just update vfta using an XOR. 3175 */ 3176 vfta_delta &= vlan_on ? ~vfta : vfta; 3177 vfta ^= vfta_delta; 3178 3179 /* Part 2 3180 * If VT Mode is set 3181 * Either vlan_on 3182 * make sure the vlan is in VLVF 3183 * set the vind bit in the matching VLVFB 3184 * Or !vlan_on 3185 * clear the pool bit and possibly the vind 3186 */ 3187 if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE)) 3188 goto vfta_update; 3189 3190 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass); 3191 if (vlvf_index < 0) { 3192 if (vlvf_bypass) 3193 goto vfta_update; 3194 return vlvf_index; 3195 } 3196 3197 bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); 3198 3199 /* set the pool bit */ 3200 bits |= BIT(vind % 32); 3201 if (vlan_on) 3202 goto vlvf_update; 3203 3204 /* clear the pool bit */ 3205 bits ^= BIT(vind % 32); 3206 3207 if (!bits && 3208 !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { 3209 /* Clear VFTA first, then disable VLVF. Otherwise 3210 * we run the risk of stray packets leaking into 3211 * the PF via the default pool 3212 */ 3213 if (vfta_delta) 3214 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); 3215 3216 /* disable VLVF and clear remaining bit from pool */ 3217 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); 3218 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0); 3219 3220 return 0; 3221 } 3222 3223 /* If there are still bits set in the VLVFB registers 3224 * for the VLAN ID indicated we need to see if the 3225 * caller is requesting that we clear the VFTA entry bit. 3226 * If the caller has requested that we clear the VFTA 3227 * entry bit but there are still pools/VFs using this VLAN 3228 * ID entry then ignore the request. We're not worried 3229 * about the case where we're turning the VFTA VLAN ID 3230 * entry bit on, only when requested to turn it off as 3231 * there may be multiple pools and/or VFs using the 3232 * VLAN ID entry. In that case we cannot clear the 3233 * VFTA bit until all pools/VFs using that VLAN ID have also 3234 * been cleared. This will be indicated by "bits" being 3235 * zero. 3236 */ 3237 vfta_delta = 0; 3238 3239 vlvf_update: 3240 /* record pool change and enable VLAN ID if not already enabled */ 3241 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits); 3242 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan); 3243 3244 vfta_update: 3245 /* Update VFTA now that we are ready for traffic */ 3246 if (vfta_delta) 3247 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); 3248 3249 return 0; 3250 } 3251 3252 /** 3253 * ixgbe_clear_vfta_generic - Clear VLAN filter table 3254 * @hw: pointer to hardware structure 3255 * 3256 * Clears the VLAN filer table, and the VMDq index associated with the filter 3257 **/ 3258 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) 3259 { 3260 u32 offset; 3261 3262 for (offset = 0; offset < hw->mac.vft_size; offset++) 3263 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 3264 3265 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { 3266 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); 3267 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0); 3268 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0); 3269 } 3270 3271 return 0; 3272 } 3273 3274 /** 3275 * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix 3276 * @hw: pointer to hardware structure 3277 * 3278 * Contains the logic to identify if we need to verify link for the 3279 * crosstalk fix 3280 **/ 3281 static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw) 3282 { 3283 /* Does FW say we need the fix */ 3284 if (!hw->need_crosstalk_fix) 3285 return false; 3286 3287 /* Only consider SFP+ PHYs i.e. media type fiber */ 3288 switch (hw->mac.ops.get_media_type(hw)) { 3289 case ixgbe_media_type_fiber: 3290 case ixgbe_media_type_fiber_qsfp: 3291 break; 3292 default: 3293 return false; 3294 } 3295 3296 return true; 3297 } 3298 3299 /** 3300 * ixgbe_check_mac_link_generic - Determine link and speed status 3301 * @hw: pointer to hardware structure 3302 * @speed: pointer to link speed 3303 * @link_up: true when link is up 3304 * @link_up_wait_to_complete: bool used to wait for link up or not 3305 * 3306 * Reads the links register to determine if link is up and the current speed 3307 **/ 3308 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 3309 bool *link_up, bool link_up_wait_to_complete) 3310 { 3311 u32 links_reg, links_orig; 3312 u32 i; 3313 3314 /* If Crosstalk fix enabled do the sanity check of making sure 3315 * the SFP+ cage is full. 3316 */ 3317 if (ixgbe_need_crosstalk_fix(hw)) { 3318 u32 sfp_cage_full; 3319 3320 switch (hw->mac.type) { 3321 case ixgbe_mac_82599EB: 3322 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3323 IXGBE_ESDP_SDP2; 3324 break; 3325 case ixgbe_mac_X550EM_x: 3326 case ixgbe_mac_x550em_a: 3327 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3328 IXGBE_ESDP_SDP0; 3329 break; 3330 default: 3331 /* sanity check - No SFP+ devices here */ 3332 sfp_cage_full = false; 3333 break; 3334 } 3335 3336 if (!sfp_cage_full) { 3337 *link_up = false; 3338 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3339 return 0; 3340 } 3341 } 3342 3343 /* clear the old state */ 3344 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); 3345 3346 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 3347 3348 if (links_orig != links_reg) { 3349 hw_dbg(hw, "LINKS changed from %08X to %08X\n", 3350 links_orig, links_reg); 3351 } 3352 3353 if (link_up_wait_to_complete) { 3354 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 3355 if (links_reg & IXGBE_LINKS_UP) { 3356 *link_up = true; 3357 break; 3358 } else { 3359 *link_up = false; 3360 } 3361 msleep(100); 3362 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 3363 } 3364 } else { 3365 if (links_reg & IXGBE_LINKS_UP) 3366 *link_up = true; 3367 else 3368 *link_up = false; 3369 } 3370 3371 switch (links_reg & IXGBE_LINKS_SPEED_82599) { 3372 case IXGBE_LINKS_SPEED_10G_82599: 3373 if ((hw->mac.type >= ixgbe_mac_X550) && 3374 (links_reg & IXGBE_LINKS_SPEED_NON_STD)) 3375 *speed = IXGBE_LINK_SPEED_2_5GB_FULL; 3376 else 3377 *speed = IXGBE_LINK_SPEED_10GB_FULL; 3378 break; 3379 case IXGBE_LINKS_SPEED_1G_82599: 3380 *speed = IXGBE_LINK_SPEED_1GB_FULL; 3381 break; 3382 case IXGBE_LINKS_SPEED_100_82599: 3383 if ((hw->mac.type >= ixgbe_mac_X550) && 3384 (links_reg & IXGBE_LINKS_SPEED_NON_STD)) 3385 *speed = IXGBE_LINK_SPEED_5GB_FULL; 3386 else 3387 *speed = IXGBE_LINK_SPEED_100_FULL; 3388 break; 3389 case IXGBE_LINKS_SPEED_10_X550EM_A: 3390 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3391 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 3392 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { 3393 *speed = IXGBE_LINK_SPEED_10_FULL; 3394 } 3395 break; 3396 default: 3397 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3398 } 3399 3400 return 0; 3401 } 3402 3403 /** 3404 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from 3405 * the EEPROM 3406 * @hw: pointer to hardware structure 3407 * @wwnn_prefix: the alternative WWNN prefix 3408 * @wwpn_prefix: the alternative WWPN prefix 3409 * 3410 * This function will read the EEPROM from the alternative SAN MAC address 3411 * block to check the support for the alternative WWNN/WWPN prefix support. 3412 **/ 3413 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 3414 u16 *wwpn_prefix) 3415 { 3416 u16 offset, caps; 3417 u16 alt_san_mac_blk_offset; 3418 3419 /* clear output first */ 3420 *wwnn_prefix = 0xFFFF; 3421 *wwpn_prefix = 0xFFFF; 3422 3423 /* check if alternative SAN MAC is supported */ 3424 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR; 3425 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset)) 3426 goto wwn_prefix_err; 3427 3428 if ((alt_san_mac_blk_offset == 0) || 3429 (alt_san_mac_blk_offset == 0xFFFF)) 3430 return 0; 3431 3432 /* check capability in alternative san mac address block */ 3433 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; 3434 if (hw->eeprom.ops.read(hw, offset, &caps)) 3435 goto wwn_prefix_err; 3436 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) 3437 return 0; 3438 3439 /* get the corresponding prefix for WWNN/WWPN */ 3440 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; 3441 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) 3442 hw_err(hw, "eeprom read at offset %d failed\n", offset); 3443 3444 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; 3445 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix)) 3446 goto wwn_prefix_err; 3447 3448 return 0; 3449 3450 wwn_prefix_err: 3451 hw_err(hw, "eeprom read at offset %d failed\n", offset); 3452 return 0; 3453 } 3454 3455 /** 3456 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing 3457 * @hw: pointer to hardware structure 3458 * @enable: enable or disable switch for MAC anti-spoofing 3459 * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing 3460 * 3461 **/ 3462 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) 3463 { 3464 int vf_target_reg = vf >> 3; 3465 int vf_target_shift = vf % 8; 3466 u32 pfvfspoof; 3467 3468 if (hw->mac.type == ixgbe_mac_82598EB) 3469 return; 3470 3471 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 3472 if (enable) 3473 pfvfspoof |= BIT(vf_target_shift); 3474 else 3475 pfvfspoof &= ~BIT(vf_target_shift); 3476 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); 3477 } 3478 3479 /** 3480 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing 3481 * @hw: pointer to hardware structure 3482 * @enable: enable or disable switch for VLAN anti-spoofing 3483 * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing 3484 * 3485 **/ 3486 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) 3487 { 3488 int vf_target_reg = vf >> 3; 3489 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; 3490 u32 pfvfspoof; 3491 3492 if (hw->mac.type == ixgbe_mac_82598EB) 3493 return; 3494 3495 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 3496 if (enable) 3497 pfvfspoof |= BIT(vf_target_shift); 3498 else 3499 pfvfspoof &= ~BIT(vf_target_shift); 3500 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); 3501 } 3502 3503 /** 3504 * ixgbe_get_device_caps_generic - Get additional device capabilities 3505 * @hw: pointer to hardware structure 3506 * @device_caps: the EEPROM word with the extra device capabilities 3507 * 3508 * This function will read the EEPROM location for the device capabilities, 3509 * and return the word through device_caps. 3510 **/ 3511 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) 3512 { 3513 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); 3514 3515 return 0; 3516 } 3517 3518 /** 3519 * ixgbe_set_rxpba_generic - Initialize RX packet buffer 3520 * @hw: pointer to hardware structure 3521 * @num_pb: number of packet buffers to allocate 3522 * @headroom: reserve n KB of headroom 3523 * @strategy: packet buffer allocation strategy 3524 **/ 3525 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, 3526 int num_pb, 3527 u32 headroom, 3528 int strategy) 3529 { 3530 u32 pbsize = hw->mac.rx_pb_size; 3531 int i = 0; 3532 u32 rxpktsize, txpktsize, txpbthresh; 3533 3534 /* Reserve headroom */ 3535 pbsize -= headroom; 3536 3537 if (!num_pb) 3538 num_pb = 1; 3539 3540 /* Divide remaining packet buffer space amongst the number 3541 * of packet buffers requested using supplied strategy. 3542 */ 3543 switch (strategy) { 3544 case (PBA_STRATEGY_WEIGHTED): 3545 /* pba_80_48 strategy weight first half of packet buffer with 3546 * 5/8 of the packet buffer space. 3547 */ 3548 rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8)); 3549 pbsize -= rxpktsize * (num_pb / 2); 3550 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; 3551 for (; i < (num_pb / 2); i++) 3552 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 3553 /* fall through - configure remaining packet buffers */ 3554 case (PBA_STRATEGY_EQUAL): 3555 /* Divide the remaining Rx packet buffer evenly among the TCs */ 3556 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; 3557 for (; i < num_pb; i++) 3558 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 3559 break; 3560 default: 3561 break; 3562 } 3563 3564 /* 3565 * Setup Tx packet buffer and threshold equally for all TCs 3566 * TXPBTHRESH register is set in K so divide by 1024 and subtract 3567 * 10 since the largest packet we support is just over 9K. 3568 */ 3569 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; 3570 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; 3571 for (i = 0; i < num_pb; i++) { 3572 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); 3573 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); 3574 } 3575 3576 /* Clear unused TCs, if any, to zero buffer size*/ 3577 for (; i < IXGBE_MAX_PB; i++) { 3578 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); 3579 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); 3580 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); 3581 } 3582 } 3583 3584 /** 3585 * ixgbe_calculate_checksum - Calculate checksum for buffer 3586 * @buffer: pointer to EEPROM 3587 * @length: size of EEPROM to calculate a checksum for 3588 * 3589 * Calculates the checksum for some buffer on a specified length. The 3590 * checksum calculated is returned. 3591 **/ 3592 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) 3593 { 3594 u32 i; 3595 u8 sum = 0; 3596 3597 if (!buffer) 3598 return 0; 3599 3600 for (i = 0; i < length; i++) 3601 sum += buffer[i]; 3602 3603 return (u8) (0 - sum); 3604 } 3605 3606 /** 3607 * ixgbe_hic_unlocked - Issue command to manageability block unlocked 3608 * @hw: pointer to the HW structure 3609 * @buffer: command to write and where the return status will be placed 3610 * @length: length of buffer, must be multiple of 4 bytes 3611 * @timeout: time in ms to wait for command completion 3612 * 3613 * Communicates with the manageability block. On success return 0 3614 * else returns semaphore error when encountering an error acquiring 3615 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. 3616 * 3617 * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held 3618 * by the caller. 3619 **/ 3620 s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, 3621 u32 timeout) 3622 { 3623 u32 hicr, i, fwsts; 3624 u16 dword_len; 3625 3626 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { 3627 hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); 3628 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 3629 } 3630 3631 /* Set bit 9 of FWSTS clearing FW reset indication */ 3632 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); 3633 IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI); 3634 3635 /* Check that the host interface is enabled. */ 3636 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 3637 if (!(hicr & IXGBE_HICR_EN)) { 3638 hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); 3639 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 3640 } 3641 3642 /* Calculate length in DWORDs. We must be DWORD aligned */ 3643 if (length % sizeof(u32)) { 3644 hw_dbg(hw, "Buffer length failure, not aligned to dword"); 3645 return IXGBE_ERR_INVALID_ARGUMENT; 3646 } 3647 3648 dword_len = length >> 2; 3649 3650 /* The device driver writes the relevant command block 3651 * into the ram area. 3652 */ 3653 for (i = 0; i < dword_len; i++) 3654 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, 3655 i, cpu_to_le32(buffer[i])); 3656 3657 /* Setting this bit tells the ARC that a new command is pending. */ 3658 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); 3659 3660 for (i = 0; i < timeout; i++) { 3661 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 3662 if (!(hicr & IXGBE_HICR_C)) 3663 break; 3664 usleep_range(1000, 2000); 3665 } 3666 3667 /* Check command successful completion. */ 3668 if ((timeout && i == timeout) || 3669 !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) 3670 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 3671 3672 return 0; 3673 } 3674 3675 /** 3676 * ixgbe_host_interface_command - Issue command to manageability block 3677 * @hw: pointer to the HW structure 3678 * @buffer: contains the command to write and where the return status will 3679 * be placed 3680 * @length: length of buffer, must be multiple of 4 bytes 3681 * @timeout: time in ms to wait for command completion 3682 * @return_data: read and return data from the buffer (true) or not (false) 3683 * Needed because FW structures are big endian and decoding of 3684 * these fields can be 8 bit or 16 bit based on command. Decoding 3685 * is not easily understood without making a table of commands. 3686 * So we will leave this up to the caller to read back the data 3687 * in these cases. 3688 * 3689 * Communicates with the manageability block. On success return 0 3690 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. 3691 **/ 3692 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, 3693 u32 length, u32 timeout, 3694 bool return_data) 3695 { 3696 u32 hdr_size = sizeof(struct ixgbe_hic_hdr); 3697 union { 3698 struct ixgbe_hic_hdr hdr; 3699 u32 u32arr[1]; 3700 } *bp = buffer; 3701 u16 buf_len, dword_len; 3702 s32 status; 3703 u32 bi; 3704 3705 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { 3706 hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); 3707 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 3708 } 3709 /* Take management host interface semaphore */ 3710 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); 3711 if (status) 3712 return status; 3713 3714 status = ixgbe_hic_unlocked(hw, buffer, length, timeout); 3715 if (status) 3716 goto rel_out; 3717 3718 if (!return_data) 3719 goto rel_out; 3720 3721 /* Calculate length in DWORDs */ 3722 dword_len = hdr_size >> 2; 3723 3724 /* first pull in the header so we know the buffer length */ 3725 for (bi = 0; bi < dword_len; bi++) { 3726 bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 3727 le32_to_cpus(&bp->u32arr[bi]); 3728 } 3729 3730 /* If there is any thing in data position pull it in */ 3731 buf_len = bp->hdr.buf_len; 3732 if (!buf_len) 3733 goto rel_out; 3734 3735 if (length < round_up(buf_len, 4) + hdr_size) { 3736 hw_dbg(hw, "Buffer not large enough for reply message.\n"); 3737 status = IXGBE_ERR_HOST_INTERFACE_COMMAND; 3738 goto rel_out; 3739 } 3740 3741 /* Calculate length in DWORDs, add 3 for odd lengths */ 3742 dword_len = (buf_len + 3) >> 2; 3743 3744 /* Pull in the rest of the buffer (bi is where we left off) */ 3745 for (; bi <= dword_len; bi++) { 3746 bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 3747 le32_to_cpus(&bp->u32arr[bi]); 3748 } 3749 3750 rel_out: 3751 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); 3752 3753 return status; 3754 } 3755 3756 /** 3757 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware 3758 * @hw: pointer to the HW structure 3759 * @maj: driver version major number 3760 * @min: driver version minor number 3761 * @build: driver version build number 3762 * @sub: driver version sub build number 3763 * @len: length of driver_ver string 3764 * @driver_ver: driver string 3765 * 3766 * Sends driver version number to firmware through the manageability 3767 * block. On success return 0 3768 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring 3769 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. 3770 **/ 3771 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, 3772 u8 build, u8 sub, __always_unused u16 len, 3773 __always_unused const char *driver_ver) 3774 { 3775 struct ixgbe_hic_drv_info fw_cmd; 3776 int i; 3777 s32 ret_val; 3778 3779 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; 3780 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; 3781 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; 3782 fw_cmd.port_num = hw->bus.func; 3783 fw_cmd.ver_maj = maj; 3784 fw_cmd.ver_min = min; 3785 fw_cmd.ver_build = build; 3786 fw_cmd.ver_sub = sub; 3787 fw_cmd.hdr.checksum = 0; 3788 fw_cmd.pad = 0; 3789 fw_cmd.pad2 = 0; 3790 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, 3791 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); 3792 3793 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { 3794 ret_val = ixgbe_host_interface_command(hw, &fw_cmd, 3795 sizeof(fw_cmd), 3796 IXGBE_HI_COMMAND_TIMEOUT, 3797 true); 3798 if (ret_val != 0) 3799 continue; 3800 3801 if (fw_cmd.hdr.cmd_or_resp.ret_status == 3802 FW_CEM_RESP_STATUS_SUCCESS) 3803 ret_val = 0; 3804 else 3805 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; 3806 3807 break; 3808 } 3809 3810 return ret_val; 3811 } 3812 3813 /** 3814 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo 3815 * @hw: pointer to the hardware structure 3816 * 3817 * The 82599 and x540 MACs can experience issues if TX work is still pending 3818 * when a reset occurs. This function prevents this by flushing the PCIe 3819 * buffers on the system. 3820 **/ 3821 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) 3822 { 3823 u32 gcr_ext, hlreg0, i, poll; 3824 u16 value; 3825 3826 /* 3827 * If double reset is not requested then all transactions should 3828 * already be clear and as such there is no work to do 3829 */ 3830 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED)) 3831 return; 3832 3833 /* 3834 * Set loopback enable to prevent any transmits from being sent 3835 * should the link come up. This assumes that the RXCTRL.RXEN bit 3836 * has already been cleared. 3837 */ 3838 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 3839 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); 3840 3841 /* wait for a last completion before clearing buffers */ 3842 IXGBE_WRITE_FLUSH(hw); 3843 usleep_range(3000, 6000); 3844 3845 /* Before proceeding, make sure that the PCIe block does not have 3846 * transactions pending. 3847 */ 3848 poll = ixgbe_pcie_timeout_poll(hw); 3849 for (i = 0; i < poll; i++) { 3850 usleep_range(100, 200); 3851 value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); 3852 if (ixgbe_removed(hw->hw_addr)) 3853 break; 3854 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 3855 break; 3856 } 3857 3858 /* initiate cleaning flow for buffers in the PCIe transaction layer */ 3859 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); 3860 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 3861 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR); 3862 3863 /* Flush all writes and allow 20usec for all transactions to clear */ 3864 IXGBE_WRITE_FLUSH(hw); 3865 udelay(20); 3866 3867 /* restore previous register values */ 3868 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 3869 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 3870 } 3871 3872 static const u8 ixgbe_emc_temp_data[4] = { 3873 IXGBE_EMC_INTERNAL_DATA, 3874 IXGBE_EMC_DIODE1_DATA, 3875 IXGBE_EMC_DIODE2_DATA, 3876 IXGBE_EMC_DIODE3_DATA 3877 }; 3878 static const u8 ixgbe_emc_therm_limit[4] = { 3879 IXGBE_EMC_INTERNAL_THERM_LIMIT, 3880 IXGBE_EMC_DIODE1_THERM_LIMIT, 3881 IXGBE_EMC_DIODE2_THERM_LIMIT, 3882 IXGBE_EMC_DIODE3_THERM_LIMIT 3883 }; 3884 3885 /** 3886 * ixgbe_get_ets_data - Extracts the ETS bit data 3887 * @hw: pointer to hardware structure 3888 * @ets_cfg: extected ETS data 3889 * @ets_offset: offset of ETS data 3890 * 3891 * Returns error code. 3892 **/ 3893 static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg, 3894 u16 *ets_offset) 3895 { 3896 s32 status; 3897 3898 status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset); 3899 if (status) 3900 return status; 3901 3902 if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) 3903 return IXGBE_NOT_IMPLEMENTED; 3904 3905 status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg); 3906 if (status) 3907 return status; 3908 3909 if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) 3910 return IXGBE_NOT_IMPLEMENTED; 3911 3912 return 0; 3913 } 3914 3915 /** 3916 * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data 3917 * @hw: pointer to hardware structure 3918 * 3919 * Returns the thermal sensor data structure 3920 **/ 3921 s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) 3922 { 3923 s32 status; 3924 u16 ets_offset; 3925 u16 ets_cfg; 3926 u16 ets_sensor; 3927 u8 num_sensors; 3928 u8 i; 3929 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 3930 3931 /* Only support thermal sensors attached to physical port 0 */ 3932 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) 3933 return IXGBE_NOT_IMPLEMENTED; 3934 3935 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); 3936 if (status) 3937 return status; 3938 3939 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); 3940 if (num_sensors > IXGBE_MAX_SENSORS) 3941 num_sensors = IXGBE_MAX_SENSORS; 3942 3943 for (i = 0; i < num_sensors; i++) { 3944 u8 sensor_index; 3945 u8 sensor_location; 3946 3947 status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i), 3948 &ets_sensor); 3949 if (status) 3950 return status; 3951 3952 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> 3953 IXGBE_ETS_DATA_INDEX_SHIFT); 3954 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> 3955 IXGBE_ETS_DATA_LOC_SHIFT); 3956 3957 if (sensor_location != 0) { 3958 status = hw->phy.ops.read_i2c_byte(hw, 3959 ixgbe_emc_temp_data[sensor_index], 3960 IXGBE_I2C_THERMAL_SENSOR_ADDR, 3961 &data->sensor[i].temp); 3962 if (status) 3963 return status; 3964 } 3965 } 3966 3967 return 0; 3968 } 3969 3970 /** 3971 * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds 3972 * @hw: pointer to hardware structure 3973 * 3974 * Inits the thermal sensor thresholds according to the NVM map 3975 * and save off the threshold and location values into mac.thermal_sensor_data 3976 **/ 3977 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) 3978 { 3979 s32 status; 3980 u16 ets_offset; 3981 u16 ets_cfg; 3982 u16 ets_sensor; 3983 u8 low_thresh_delta; 3984 u8 num_sensors; 3985 u8 therm_limit; 3986 u8 i; 3987 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 3988 3989 memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data)); 3990 3991 /* Only support thermal sensors attached to physical port 0 */ 3992 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) 3993 return IXGBE_NOT_IMPLEMENTED; 3994 3995 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); 3996 if (status) 3997 return status; 3998 3999 low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >> 4000 IXGBE_ETS_LTHRES_DELTA_SHIFT); 4001 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); 4002 if (num_sensors > IXGBE_MAX_SENSORS) 4003 num_sensors = IXGBE_MAX_SENSORS; 4004 4005 for (i = 0; i < num_sensors; i++) { 4006 u8 sensor_index; 4007 u8 sensor_location; 4008 4009 if (hw->eeprom.ops.read(hw, ets_offset + 1 + i, &ets_sensor)) { 4010 hw_err(hw, "eeprom read at offset %d failed\n", 4011 ets_offset + 1 + i); 4012 continue; 4013 } 4014 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> 4015 IXGBE_ETS_DATA_INDEX_SHIFT); 4016 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> 4017 IXGBE_ETS_DATA_LOC_SHIFT); 4018 therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK; 4019 4020 hw->phy.ops.write_i2c_byte(hw, 4021 ixgbe_emc_therm_limit[sensor_index], 4022 IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit); 4023 4024 if (sensor_location == 0) 4025 continue; 4026 4027 data->sensor[i].location = sensor_location; 4028 data->sensor[i].caution_thresh = therm_limit; 4029 data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta; 4030 } 4031 4032 return 0; 4033 } 4034 4035 /** 4036 * ixgbe_get_orom_version - Return option ROM from EEPROM 4037 * 4038 * @hw: pointer to hardware structure 4039 * @nvm_ver: pointer to output structure 4040 * 4041 * if valid option ROM version, nvm_ver->or_valid set to true 4042 * else nvm_ver->or_valid is false. 4043 **/ 4044 void ixgbe_get_orom_version(struct ixgbe_hw *hw, 4045 struct ixgbe_nvm_version *nvm_ver) 4046 { 4047 u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl; 4048 4049 nvm_ver->or_valid = false; 4050 /* Option Rom may or may not be present. Start with pointer */ 4051 hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset); 4052 4053 /* make sure offset is valid */ 4054 if (offset == 0x0 || offset == NVM_INVALID_PTR) 4055 return; 4056 4057 hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh); 4058 hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl); 4059 4060 /* option rom exists and is valid */ 4061 if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 || 4062 eeprom_cfg_blkl == NVM_VER_INVALID || 4063 eeprom_cfg_blkh == NVM_VER_INVALID) 4064 return; 4065 4066 nvm_ver->or_valid = true; 4067 nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT; 4068 nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) | 4069 (eeprom_cfg_blkh >> NVM_OROM_SHIFT); 4070 nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK; 4071 } 4072 4073 /** 4074 * ixgbe_get_oem_prod_version Etrack ID from EEPROM 4075 * 4076 * @hw: pointer to hardware structure 4077 * @nvm_ver: pointer to output structure 4078 * 4079 * if valid OEM product version, nvm_ver->oem_valid set to true 4080 * else nvm_ver->oem_valid is false. 4081 **/ 4082 void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, 4083 struct ixgbe_nvm_version *nvm_ver) 4084 { 4085 u16 rel_num, prod_ver, mod_len, cap, offset; 4086 4087 nvm_ver->oem_valid = false; 4088 hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset); 4089 4090 /* Return is offset to OEM Product Version block is invalid */ 4091 if (offset == 0x0 || offset == NVM_INVALID_PTR) 4092 return; 4093 4094 /* Read product version block */ 4095 hw->eeprom.ops.read(hw, offset, &mod_len); 4096 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap); 4097 4098 /* Return if OEM product version block is invalid */ 4099 if (mod_len != NVM_OEM_PROD_VER_MOD_LEN || 4100 (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0) 4101 return; 4102 4103 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver); 4104 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num); 4105 4106 /* Return if version is invalid */ 4107 if ((rel_num | prod_ver) == 0x0 || 4108 rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID) 4109 return; 4110 4111 nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT; 4112 nvm_ver->oem_minor = prod_ver & NVM_VER_MASK; 4113 nvm_ver->oem_release = rel_num; 4114 nvm_ver->oem_valid = true; 4115 } 4116 4117 /** 4118 * ixgbe_get_etk_id - Return Etrack ID from EEPROM 4119 * 4120 * @hw: pointer to hardware structure 4121 * @nvm_ver: pointer to output structure 4122 * 4123 * word read errors will return 0xFFFF 4124 **/ 4125 void ixgbe_get_etk_id(struct ixgbe_hw *hw, 4126 struct ixgbe_nvm_version *nvm_ver) 4127 { 4128 u16 etk_id_l, etk_id_h; 4129 4130 if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l)) 4131 etk_id_l = NVM_VER_INVALID; 4132 if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h)) 4133 etk_id_h = NVM_VER_INVALID; 4134 4135 /* The word order for the version format is determined by high order 4136 * word bit 15. 4137 */ 4138 if ((etk_id_h & NVM_ETK_VALID) == 0) { 4139 nvm_ver->etk_id = etk_id_h; 4140 nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT); 4141 } else { 4142 nvm_ver->etk_id = etk_id_l; 4143 nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT); 4144 } 4145 } 4146 4147 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) 4148 { 4149 u32 rxctrl; 4150 4151 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4152 if (rxctrl & IXGBE_RXCTRL_RXEN) { 4153 if (hw->mac.type != ixgbe_mac_82598EB) { 4154 u32 pfdtxgswc; 4155 4156 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); 4157 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { 4158 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; 4159 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); 4160 hw->mac.set_lben = true; 4161 } else { 4162 hw->mac.set_lben = false; 4163 } 4164 } 4165 rxctrl &= ~IXGBE_RXCTRL_RXEN; 4166 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); 4167 } 4168 } 4169 4170 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) 4171 { 4172 u32 rxctrl; 4173 4174 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4175 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN)); 4176 4177 if (hw->mac.type != ixgbe_mac_82598EB) { 4178 if (hw->mac.set_lben) { 4179 u32 pfdtxgswc; 4180 4181 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); 4182 pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN; 4183 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); 4184 hw->mac.set_lben = false; 4185 } 4186 } 4187 } 4188 4189 /** ixgbe_mng_present - returns true when management capability is present 4190 * @hw: pointer to hardware structure 4191 **/ 4192 bool ixgbe_mng_present(struct ixgbe_hw *hw) 4193 { 4194 u32 fwsm; 4195 4196 if (hw->mac.type < ixgbe_mac_82599EB) 4197 return false; 4198 4199 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); 4200 4201 return !!(fwsm & IXGBE_FWSM_FW_MODE_PT); 4202 } 4203 4204 /** 4205 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed 4206 * @hw: pointer to hardware structure 4207 * @speed: new link speed 4208 * @autoneg_wait_to_complete: true when waiting for completion is needed 4209 * 4210 * Set the link speed in the MAC and/or PHY register and restarts link. 4211 */ 4212 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 4213 ixgbe_link_speed speed, 4214 bool autoneg_wait_to_complete) 4215 { 4216 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 4217 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; 4218 s32 status = 0; 4219 u32 speedcnt = 0; 4220 u32 i = 0; 4221 bool autoneg, link_up = false; 4222 4223 /* Mask off requested but non-supported speeds */ 4224 status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg); 4225 if (status) 4226 return status; 4227 4228 speed &= link_speed; 4229 4230 /* Try each speed one by one, highest priority first. We do this in 4231 * software because 10Gb fiber doesn't support speed autonegotiation. 4232 */ 4233 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { 4234 speedcnt++; 4235 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; 4236 4237 /* Set the module link speed */ 4238 switch (hw->phy.media_type) { 4239 case ixgbe_media_type_fiber: 4240 hw->mac.ops.set_rate_select_speed(hw, 4241 IXGBE_LINK_SPEED_10GB_FULL); 4242 break; 4243 case ixgbe_media_type_fiber_qsfp: 4244 /* QSFP module automatically detects MAC link speed */ 4245 break; 4246 default: 4247 hw_dbg(hw, "Unexpected media type\n"); 4248 break; 4249 } 4250 4251 /* Allow module to change analog characteristics (1G->10G) */ 4252 msleep(40); 4253 4254 status = hw->mac.ops.setup_mac_link(hw, 4255 IXGBE_LINK_SPEED_10GB_FULL, 4256 autoneg_wait_to_complete); 4257 if (status) 4258 return status; 4259 4260 /* Flap the Tx laser if it has not already been done */ 4261 if (hw->mac.ops.flap_tx_laser) 4262 hw->mac.ops.flap_tx_laser(hw); 4263 4264 /* Wait for the controller to acquire link. Per IEEE 802.3ap, 4265 * Section 73.10.2, we may have to wait up to 500ms if KR is 4266 * attempted. 82599 uses the same timing for 10g SFI. 4267 */ 4268 for (i = 0; i < 5; i++) { 4269 /* Wait for the link partner to also set speed */ 4270 msleep(100); 4271 4272 /* If we have link, just jump out */ 4273 status = hw->mac.ops.check_link(hw, &link_speed, 4274 &link_up, false); 4275 if (status) 4276 return status; 4277 4278 if (link_up) 4279 goto out; 4280 } 4281 } 4282 4283 if (speed & IXGBE_LINK_SPEED_1GB_FULL) { 4284 speedcnt++; 4285 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) 4286 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; 4287 4288 /* Set the module link speed */ 4289 switch (hw->phy.media_type) { 4290 case ixgbe_media_type_fiber: 4291 hw->mac.ops.set_rate_select_speed(hw, 4292 IXGBE_LINK_SPEED_1GB_FULL); 4293 break; 4294 case ixgbe_media_type_fiber_qsfp: 4295 /* QSFP module automatically detects link speed */ 4296 break; 4297 default: 4298 hw_dbg(hw, "Unexpected media type\n"); 4299 break; 4300 } 4301 4302 /* Allow module to change analog characteristics (10G->1G) */ 4303 msleep(40); 4304 4305 status = hw->mac.ops.setup_mac_link(hw, 4306 IXGBE_LINK_SPEED_1GB_FULL, 4307 autoneg_wait_to_complete); 4308 if (status) 4309 return status; 4310 4311 /* Flap the Tx laser if it has not already been done */ 4312 if (hw->mac.ops.flap_tx_laser) 4313 hw->mac.ops.flap_tx_laser(hw); 4314 4315 /* Wait for the link partner to also set speed */ 4316 msleep(100); 4317 4318 /* If we have link, just jump out */ 4319 status = hw->mac.ops.check_link(hw, &link_speed, &link_up, 4320 false); 4321 if (status) 4322 return status; 4323 4324 if (link_up) 4325 goto out; 4326 } 4327 4328 /* We didn't get link. Configure back to the highest speed we tried, 4329 * (if there was more than one). We call ourselves back with just the 4330 * single highest speed that the user requested. 4331 */ 4332 if (speedcnt > 1) 4333 status = ixgbe_setup_mac_link_multispeed_fiber(hw, 4334 highest_link_speed, 4335 autoneg_wait_to_complete); 4336 4337 out: 4338 /* Set autoneg_advertised value based on input link speed */ 4339 hw->phy.autoneg_advertised = 0; 4340 4341 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 4342 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 4343 4344 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 4345 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 4346 4347 return status; 4348 } 4349 4350 /** 4351 * ixgbe_set_soft_rate_select_speed - Set module link speed 4352 * @hw: pointer to hardware structure 4353 * @speed: link speed to set 4354 * 4355 * Set module link speed via the soft rate select. 4356 */ 4357 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, 4358 ixgbe_link_speed speed) 4359 { 4360 s32 status; 4361 u8 rs, eeprom_data; 4362 4363 switch (speed) { 4364 case IXGBE_LINK_SPEED_10GB_FULL: 4365 /* one bit mask same as setting on */ 4366 rs = IXGBE_SFF_SOFT_RS_SELECT_10G; 4367 break; 4368 case IXGBE_LINK_SPEED_1GB_FULL: 4369 rs = IXGBE_SFF_SOFT_RS_SELECT_1G; 4370 break; 4371 default: 4372 hw_dbg(hw, "Invalid fixed module speed\n"); 4373 return; 4374 } 4375 4376 /* Set RS0 */ 4377 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, 4378 IXGBE_I2C_EEPROM_DEV_ADDR2, 4379 &eeprom_data); 4380 if (status) { 4381 hw_dbg(hw, "Failed to read Rx Rate Select RS0\n"); 4382 return; 4383 } 4384 4385 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; 4386 4387 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, 4388 IXGBE_I2C_EEPROM_DEV_ADDR2, 4389 eeprom_data); 4390 if (status) { 4391 hw_dbg(hw, "Failed to write Rx Rate Select RS0\n"); 4392 return; 4393 } 4394 4395 /* Set RS1 */ 4396 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, 4397 IXGBE_I2C_EEPROM_DEV_ADDR2, 4398 &eeprom_data); 4399 if (status) { 4400 hw_dbg(hw, "Failed to read Rx Rate Select RS1\n"); 4401 return; 4402 } 4403 4404 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; 4405 4406 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, 4407 IXGBE_I2C_EEPROM_DEV_ADDR2, 4408 eeprom_data); 4409 if (status) { 4410 hw_dbg(hw, "Failed to write Rx Rate Select RS1\n"); 4411 return; 4412 } 4413 } 4414