1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2013 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 #include <linux/pci.h> 29 #include <linux/delay.h> 30 #include <linux/sched.h> 31 #include <linux/netdevice.h> 32 33 #include "ixgbe.h" 34 #include "ixgbe_common.h" 35 #include "ixgbe_phy.h" 36 37 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); 38 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); 39 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); 40 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); 41 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); 42 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 43 u16 count); 44 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); 45 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 46 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 47 static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 48 49 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 50 static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); 51 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 52 u16 words, u16 *data); 53 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 54 u16 words, u16 *data); 55 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, 56 u16 offset); 57 static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); 58 59 /** 60 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow 61 * control 62 * @hw: pointer to hardware structure 63 * 64 * There are several phys that do not support autoneg flow control. This 65 * function check the device id to see if the associated phy supports 66 * autoneg flow control. 67 **/ 68 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) 69 { 70 bool supported = false; 71 ixgbe_link_speed speed; 72 bool link_up; 73 74 switch (hw->phy.media_type) { 75 case ixgbe_media_type_fiber_fixed: 76 case ixgbe_media_type_fiber: 77 hw->mac.ops.check_link(hw, &speed, &link_up, false); 78 /* if link is down, assume supported */ 79 if (link_up) 80 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? 81 true : false; 82 else 83 supported = true; 84 break; 85 case ixgbe_media_type_backplane: 86 supported = true; 87 break; 88 case ixgbe_media_type_copper: 89 /* only some copper devices support flow control autoneg */ 90 switch (hw->device_id) { 91 case IXGBE_DEV_ID_82599_T3_LOM: 92 case IXGBE_DEV_ID_X540T: 93 case IXGBE_DEV_ID_X540T1: 94 supported = true; 95 break; 96 default: 97 break; 98 } 99 default: 100 break; 101 } 102 103 return supported; 104 } 105 106 /** 107 * ixgbe_setup_fc - Set up flow control 108 * @hw: pointer to hardware structure 109 * 110 * Called at init time to set up flow control. 111 **/ 112 static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) 113 { 114 s32 ret_val = 0; 115 u32 reg = 0, reg_bp = 0; 116 u16 reg_cu = 0; 117 bool got_lock = false; 118 119 /* 120 * Validate the requested mode. Strict IEEE mode does not allow 121 * ixgbe_fc_rx_pause because it will cause us to fail at UNH. 122 */ 123 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { 124 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); 125 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 126 goto out; 127 } 128 129 /* 130 * 10gig parts do not have a word in the EEPROM to determine the 131 * default flow control setting, so we explicitly set it to full. 132 */ 133 if (hw->fc.requested_mode == ixgbe_fc_default) 134 hw->fc.requested_mode = ixgbe_fc_full; 135 136 /* 137 * Set up the 1G and 10G flow control advertisement registers so the 138 * HW will be able to do fc autoneg once the cable is plugged in. If 139 * we link at 10G, the 1G advertisement is harmless and vice versa. 140 */ 141 switch (hw->phy.media_type) { 142 case ixgbe_media_type_fiber_fixed: 143 case ixgbe_media_type_fiber: 144 case ixgbe_media_type_backplane: 145 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 146 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC); 147 break; 148 case ixgbe_media_type_copper: 149 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, 150 MDIO_MMD_AN, ®_cu); 151 break; 152 default: 153 break; 154 } 155 156 /* 157 * The possible values of fc.requested_mode are: 158 * 0: Flow control is completely disabled 159 * 1: Rx flow control is enabled (we can receive pause frames, 160 * but not send pause frames). 161 * 2: Tx flow control is enabled (we can send pause frames but 162 * we do not support receiving pause frames). 163 * 3: Both Rx and Tx flow control (symmetric) are enabled. 164 * other: Invalid. 165 */ 166 switch (hw->fc.requested_mode) { 167 case ixgbe_fc_none: 168 /* Flow control completely disabled by software override. */ 169 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 170 if (hw->phy.media_type == ixgbe_media_type_backplane) 171 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | 172 IXGBE_AUTOC_ASM_PAUSE); 173 else if (hw->phy.media_type == ixgbe_media_type_copper) 174 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); 175 break; 176 case ixgbe_fc_tx_pause: 177 /* 178 * Tx Flow control is enabled, and Rx Flow control is 179 * disabled by software override. 180 */ 181 reg |= IXGBE_PCS1GANA_ASM_PAUSE; 182 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE; 183 if (hw->phy.media_type == ixgbe_media_type_backplane) { 184 reg_bp |= IXGBE_AUTOC_ASM_PAUSE; 185 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE; 186 } else if (hw->phy.media_type == ixgbe_media_type_copper) { 187 reg_cu |= IXGBE_TAF_ASM_PAUSE; 188 reg_cu &= ~IXGBE_TAF_SYM_PAUSE; 189 } 190 break; 191 case ixgbe_fc_rx_pause: 192 /* 193 * Rx Flow control is enabled and Tx Flow control is 194 * disabled by software override. Since there really 195 * isn't a way to advertise that we are capable of RX 196 * Pause ONLY, we will advertise that we support both 197 * symmetric and asymmetric Rx PAUSE, as such we fall 198 * through to the fc_full statement. Later, we will 199 * disable the adapter's ability to send PAUSE frames. 200 */ 201 case ixgbe_fc_full: 202 /* Flow control (both Rx and Tx) is enabled by SW override. */ 203 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE; 204 if (hw->phy.media_type == ixgbe_media_type_backplane) 205 reg_bp |= IXGBE_AUTOC_SYM_PAUSE | 206 IXGBE_AUTOC_ASM_PAUSE; 207 else if (hw->phy.media_type == ixgbe_media_type_copper) 208 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE; 209 break; 210 default: 211 hw_dbg(hw, "Flow control param set incorrectly\n"); 212 ret_val = IXGBE_ERR_CONFIG; 213 goto out; 214 break; 215 } 216 217 if (hw->mac.type != ixgbe_mac_X540) { 218 /* 219 * Enable auto-negotiation between the MAC & PHY; 220 * the MAC will advertise clause 37 flow control. 221 */ 222 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 223 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 224 225 /* Disable AN timeout */ 226 if (hw->fc.strict_ieee) 227 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 228 229 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 230 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); 231 } 232 233 /* 234 * AUTOC restart handles negotiation of 1G and 10G on backplane 235 * and copper. There is no need to set the PCS1GCTL register. 236 * 237 */ 238 if (hw->phy.media_type == ixgbe_media_type_backplane) { 239 /* Need the SW/FW semaphore around AUTOC writes if 82599 and 240 * LESM is on, likewise reset_pipeline requries the lock as 241 * it also writes AUTOC. 242 */ 243 if ((hw->mac.type == ixgbe_mac_82599EB) && 244 ixgbe_verify_lesm_fw_enabled_82599(hw)) { 245 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 246 IXGBE_GSSR_MAC_CSR_SM); 247 if (ret_val) 248 goto out; 249 250 got_lock = true; 251 } 252 253 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp); 254 255 if (hw->mac.type == ixgbe_mac_82599EB) 256 ixgbe_reset_pipeline_82599(hw); 257 258 if (got_lock) 259 hw->mac.ops.release_swfw_sync(hw, 260 IXGBE_GSSR_MAC_CSR_SM); 261 262 } else if ((hw->phy.media_type == ixgbe_media_type_copper) && 263 ixgbe_device_supports_autoneg_fc(hw)) { 264 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, 265 MDIO_MMD_AN, reg_cu); 266 } 267 268 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); 269 out: 270 return ret_val; 271 } 272 273 /** 274 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 275 * @hw: pointer to hardware structure 276 * 277 * Starts the hardware by filling the bus info structure and media type, clears 278 * all on chip counters, initializes receive address registers, multicast 279 * table, VLAN filter table, calls routine to set up link and flow control 280 * settings, and leaves transmit and receive units disabled and uninitialized 281 **/ 282 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) 283 { 284 u32 ctrl_ext; 285 286 /* Set the media type */ 287 hw->phy.media_type = hw->mac.ops.get_media_type(hw); 288 289 /* Identify the PHY */ 290 hw->phy.ops.identify(hw); 291 292 /* Clear the VLAN filter table */ 293 hw->mac.ops.clear_vfta(hw); 294 295 /* Clear statistics registers */ 296 hw->mac.ops.clear_hw_cntrs(hw); 297 298 /* Set No Snoop Disable */ 299 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 300 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; 301 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 302 IXGBE_WRITE_FLUSH(hw); 303 304 /* Setup flow control */ 305 ixgbe_setup_fc(hw); 306 307 /* Clear adapter stopped flag */ 308 hw->adapter_stopped = false; 309 310 return 0; 311 } 312 313 /** 314 * ixgbe_start_hw_gen2 - Init sequence for common device family 315 * @hw: pointer to hw structure 316 * 317 * Performs the init sequence common to the second generation 318 * of 10 GbE devices. 319 * Devices in the second generation: 320 * 82599 321 * X540 322 **/ 323 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) 324 { 325 u32 i; 326 u32 regval; 327 328 /* Clear the rate limiters */ 329 for (i = 0; i < hw->mac.max_tx_queues; i++) { 330 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); 331 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); 332 } 333 IXGBE_WRITE_FLUSH(hw); 334 335 /* Disable relaxed ordering */ 336 for (i = 0; i < hw->mac.max_tx_queues; i++) { 337 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); 338 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 339 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); 340 } 341 342 for (i = 0; i < hw->mac.max_rx_queues; i++) { 343 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 344 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | 345 IXGBE_DCA_RXCTRL_HEAD_WRO_EN); 346 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 347 } 348 349 return 0; 350 } 351 352 /** 353 * ixgbe_init_hw_generic - Generic hardware initialization 354 * @hw: pointer to hardware structure 355 * 356 * Initialize the hardware by resetting the hardware, filling the bus info 357 * structure and media type, clears all on chip counters, initializes receive 358 * address registers, multicast table, VLAN filter table, calls routine to set 359 * up link and flow control settings, and leaves transmit and receive units 360 * disabled and uninitialized 361 **/ 362 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) 363 { 364 s32 status; 365 366 /* Reset the hardware */ 367 status = hw->mac.ops.reset_hw(hw); 368 369 if (status == 0) { 370 /* Start the HW */ 371 status = hw->mac.ops.start_hw(hw); 372 } 373 374 return status; 375 } 376 377 /** 378 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters 379 * @hw: pointer to hardware structure 380 * 381 * Clears all hardware statistics counters by reading them from the hardware 382 * Statistics counters are clear on read. 383 **/ 384 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) 385 { 386 u16 i = 0; 387 388 IXGBE_READ_REG(hw, IXGBE_CRCERRS); 389 IXGBE_READ_REG(hw, IXGBE_ILLERRC); 390 IXGBE_READ_REG(hw, IXGBE_ERRBC); 391 IXGBE_READ_REG(hw, IXGBE_MSPDC); 392 for (i = 0; i < 8; i++) 393 IXGBE_READ_REG(hw, IXGBE_MPC(i)); 394 395 IXGBE_READ_REG(hw, IXGBE_MLFC); 396 IXGBE_READ_REG(hw, IXGBE_MRFC); 397 IXGBE_READ_REG(hw, IXGBE_RLEC); 398 IXGBE_READ_REG(hw, IXGBE_LXONTXC); 399 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 400 if (hw->mac.type >= ixgbe_mac_82599EB) { 401 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 402 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 403 } else { 404 IXGBE_READ_REG(hw, IXGBE_LXONRXC); 405 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 406 } 407 408 for (i = 0; i < 8; i++) { 409 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 410 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 411 if (hw->mac.type >= ixgbe_mac_82599EB) { 412 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 413 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 414 } else { 415 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 416 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 417 } 418 } 419 if (hw->mac.type >= ixgbe_mac_82599EB) 420 for (i = 0; i < 8; i++) 421 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 422 IXGBE_READ_REG(hw, IXGBE_PRC64); 423 IXGBE_READ_REG(hw, IXGBE_PRC127); 424 IXGBE_READ_REG(hw, IXGBE_PRC255); 425 IXGBE_READ_REG(hw, IXGBE_PRC511); 426 IXGBE_READ_REG(hw, IXGBE_PRC1023); 427 IXGBE_READ_REG(hw, IXGBE_PRC1522); 428 IXGBE_READ_REG(hw, IXGBE_GPRC); 429 IXGBE_READ_REG(hw, IXGBE_BPRC); 430 IXGBE_READ_REG(hw, IXGBE_MPRC); 431 IXGBE_READ_REG(hw, IXGBE_GPTC); 432 IXGBE_READ_REG(hw, IXGBE_GORCL); 433 IXGBE_READ_REG(hw, IXGBE_GORCH); 434 IXGBE_READ_REG(hw, IXGBE_GOTCL); 435 IXGBE_READ_REG(hw, IXGBE_GOTCH); 436 if (hw->mac.type == ixgbe_mac_82598EB) 437 for (i = 0; i < 8; i++) 438 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 439 IXGBE_READ_REG(hw, IXGBE_RUC); 440 IXGBE_READ_REG(hw, IXGBE_RFC); 441 IXGBE_READ_REG(hw, IXGBE_ROC); 442 IXGBE_READ_REG(hw, IXGBE_RJC); 443 IXGBE_READ_REG(hw, IXGBE_MNGPRC); 444 IXGBE_READ_REG(hw, IXGBE_MNGPDC); 445 IXGBE_READ_REG(hw, IXGBE_MNGPTC); 446 IXGBE_READ_REG(hw, IXGBE_TORL); 447 IXGBE_READ_REG(hw, IXGBE_TORH); 448 IXGBE_READ_REG(hw, IXGBE_TPR); 449 IXGBE_READ_REG(hw, IXGBE_TPT); 450 IXGBE_READ_REG(hw, IXGBE_PTC64); 451 IXGBE_READ_REG(hw, IXGBE_PTC127); 452 IXGBE_READ_REG(hw, IXGBE_PTC255); 453 IXGBE_READ_REG(hw, IXGBE_PTC511); 454 IXGBE_READ_REG(hw, IXGBE_PTC1023); 455 IXGBE_READ_REG(hw, IXGBE_PTC1522); 456 IXGBE_READ_REG(hw, IXGBE_MPTC); 457 IXGBE_READ_REG(hw, IXGBE_BPTC); 458 for (i = 0; i < 16; i++) { 459 IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 460 IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 461 if (hw->mac.type >= ixgbe_mac_82599EB) { 462 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 463 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); 464 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 465 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); 466 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 467 } else { 468 IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 469 IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 470 } 471 } 472 473 if (hw->mac.type == ixgbe_mac_X540) { 474 if (hw->phy.id == 0) 475 hw->phy.ops.identify(hw); 476 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i); 477 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i); 478 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i); 479 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i); 480 } 481 482 return 0; 483 } 484 485 /** 486 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM 487 * @hw: pointer to hardware structure 488 * @pba_num: stores the part number string from the EEPROM 489 * @pba_num_size: part number string buffer length 490 * 491 * Reads the part number string from the EEPROM. 492 **/ 493 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 494 u32 pba_num_size) 495 { 496 s32 ret_val; 497 u16 data; 498 u16 pba_ptr; 499 u16 offset; 500 u16 length; 501 502 if (pba_num == NULL) { 503 hw_dbg(hw, "PBA string buffer was null\n"); 504 return IXGBE_ERR_INVALID_ARGUMENT; 505 } 506 507 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 508 if (ret_val) { 509 hw_dbg(hw, "NVM Read Error\n"); 510 return ret_val; 511 } 512 513 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); 514 if (ret_val) { 515 hw_dbg(hw, "NVM Read Error\n"); 516 return ret_val; 517 } 518 519 /* 520 * if data is not ptr guard the PBA must be in legacy format which 521 * means pba_ptr is actually our second data word for the PBA number 522 * and we can decode it into an ascii string 523 */ 524 if (data != IXGBE_PBANUM_PTR_GUARD) { 525 hw_dbg(hw, "NVM PBA number is not stored as string\n"); 526 527 /* we will need 11 characters to store the PBA */ 528 if (pba_num_size < 11) { 529 hw_dbg(hw, "PBA string buffer too small\n"); 530 return IXGBE_ERR_NO_SPACE; 531 } 532 533 /* extract hex string from data and pba_ptr */ 534 pba_num[0] = (data >> 12) & 0xF; 535 pba_num[1] = (data >> 8) & 0xF; 536 pba_num[2] = (data >> 4) & 0xF; 537 pba_num[3] = data & 0xF; 538 pba_num[4] = (pba_ptr >> 12) & 0xF; 539 pba_num[5] = (pba_ptr >> 8) & 0xF; 540 pba_num[6] = '-'; 541 pba_num[7] = 0; 542 pba_num[8] = (pba_ptr >> 4) & 0xF; 543 pba_num[9] = pba_ptr & 0xF; 544 545 /* put a null character on the end of our string */ 546 pba_num[10] = '\0'; 547 548 /* switch all the data but the '-' to hex char */ 549 for (offset = 0; offset < 10; offset++) { 550 if (pba_num[offset] < 0xA) 551 pba_num[offset] += '0'; 552 else if (pba_num[offset] < 0x10) 553 pba_num[offset] += 'A' - 0xA; 554 } 555 556 return 0; 557 } 558 559 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); 560 if (ret_val) { 561 hw_dbg(hw, "NVM Read Error\n"); 562 return ret_val; 563 } 564 565 if (length == 0xFFFF || length == 0) { 566 hw_dbg(hw, "NVM PBA number section invalid length\n"); 567 return IXGBE_ERR_PBA_SECTION; 568 } 569 570 /* check if pba_num buffer is big enough */ 571 if (pba_num_size < (((u32)length * 2) - 1)) { 572 hw_dbg(hw, "PBA string buffer too small\n"); 573 return IXGBE_ERR_NO_SPACE; 574 } 575 576 /* trim pba length from start of string */ 577 pba_ptr++; 578 length--; 579 580 for (offset = 0; offset < length; offset++) { 581 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); 582 if (ret_val) { 583 hw_dbg(hw, "NVM Read Error\n"); 584 return ret_val; 585 } 586 pba_num[offset * 2] = (u8)(data >> 8); 587 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); 588 } 589 pba_num[offset * 2] = '\0'; 590 591 return 0; 592 } 593 594 /** 595 * ixgbe_get_mac_addr_generic - Generic get MAC address 596 * @hw: pointer to hardware structure 597 * @mac_addr: Adapter MAC address 598 * 599 * Reads the adapter's MAC address from first Receive Address Register (RAR0) 600 * A reset of the adapter must be performed prior to calling this function 601 * in order for the MAC address to have been loaded from the EEPROM into RAR0 602 **/ 603 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) 604 { 605 u32 rar_high; 606 u32 rar_low; 607 u16 i; 608 609 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); 610 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); 611 612 for (i = 0; i < 4; i++) 613 mac_addr[i] = (u8)(rar_low >> (i*8)); 614 615 for (i = 0; i < 2; i++) 616 mac_addr[i+4] = (u8)(rar_high >> (i*8)); 617 618 return 0; 619 } 620 621 enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status) 622 { 623 switch (link_status & IXGBE_PCI_LINK_WIDTH) { 624 case IXGBE_PCI_LINK_WIDTH_1: 625 return ixgbe_bus_width_pcie_x1; 626 case IXGBE_PCI_LINK_WIDTH_2: 627 return ixgbe_bus_width_pcie_x2; 628 case IXGBE_PCI_LINK_WIDTH_4: 629 return ixgbe_bus_width_pcie_x4; 630 case IXGBE_PCI_LINK_WIDTH_8: 631 return ixgbe_bus_width_pcie_x8; 632 default: 633 return ixgbe_bus_width_unknown; 634 } 635 } 636 637 enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status) 638 { 639 switch (link_status & IXGBE_PCI_LINK_SPEED) { 640 case IXGBE_PCI_LINK_SPEED_2500: 641 return ixgbe_bus_speed_2500; 642 case IXGBE_PCI_LINK_SPEED_5000: 643 return ixgbe_bus_speed_5000; 644 case IXGBE_PCI_LINK_SPEED_8000: 645 return ixgbe_bus_speed_8000; 646 default: 647 return ixgbe_bus_speed_unknown; 648 } 649 } 650 651 /** 652 * ixgbe_get_bus_info_generic - Generic set PCI bus info 653 * @hw: pointer to hardware structure 654 * 655 * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure 656 **/ 657 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) 658 { 659 struct ixgbe_adapter *adapter = hw->back; 660 struct ixgbe_mac_info *mac = &hw->mac; 661 u16 link_status; 662 663 hw->bus.type = ixgbe_bus_type_pci_express; 664 665 /* Get the negotiated link width and speed from PCI config space */ 666 pci_read_config_word(adapter->pdev, IXGBE_PCI_LINK_STATUS, 667 &link_status); 668 669 hw->bus.width = ixgbe_convert_bus_width(link_status); 670 hw->bus.speed = ixgbe_convert_bus_speed(link_status); 671 672 mac->ops.set_lan_id(hw); 673 674 return 0; 675 } 676 677 /** 678 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices 679 * @hw: pointer to the HW structure 680 * 681 * Determines the LAN function id by reading memory-mapped registers 682 * and swaps the port value if requested. 683 **/ 684 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) 685 { 686 struct ixgbe_bus_info *bus = &hw->bus; 687 u32 reg; 688 689 reg = IXGBE_READ_REG(hw, IXGBE_STATUS); 690 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; 691 bus->lan_id = bus->func; 692 693 /* check for a port swap */ 694 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS); 695 if (reg & IXGBE_FACTPS_LFS) 696 bus->func ^= 0x1; 697 } 698 699 /** 700 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units 701 * @hw: pointer to hardware structure 702 * 703 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, 704 * disables transmit and receive units. The adapter_stopped flag is used by 705 * the shared code and drivers to determine if the adapter is in a stopped 706 * state and should not touch the hardware. 707 **/ 708 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) 709 { 710 u32 reg_val; 711 u16 i; 712 713 /* 714 * Set the adapter_stopped flag so other driver functions stop touching 715 * the hardware 716 */ 717 hw->adapter_stopped = true; 718 719 /* Disable the receive unit */ 720 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0); 721 722 /* Clear interrupt mask to stop interrupts from being generated */ 723 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 724 725 /* Clear any pending interrupts, flush previous writes */ 726 IXGBE_READ_REG(hw, IXGBE_EICR); 727 728 /* Disable the transmit unit. Each queue must be disabled. */ 729 for (i = 0; i < hw->mac.max_tx_queues; i++) 730 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH); 731 732 /* Disable the receive unit by stopping each queue */ 733 for (i = 0; i < hw->mac.max_rx_queues; i++) { 734 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 735 reg_val &= ~IXGBE_RXDCTL_ENABLE; 736 reg_val |= IXGBE_RXDCTL_SWFLSH; 737 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); 738 } 739 740 /* flush all queues disables */ 741 IXGBE_WRITE_FLUSH(hw); 742 usleep_range(1000, 2000); 743 744 /* 745 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 746 * access and verify no pending requests 747 */ 748 return ixgbe_disable_pcie_master(hw); 749 } 750 751 /** 752 * ixgbe_led_on_generic - Turns on the software controllable LEDs. 753 * @hw: pointer to hardware structure 754 * @index: led number to turn on 755 **/ 756 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) 757 { 758 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 759 760 /* To turn on the LED, set mode to ON. */ 761 led_reg &= ~IXGBE_LED_MODE_MASK(index); 762 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); 763 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 764 IXGBE_WRITE_FLUSH(hw); 765 766 return 0; 767 } 768 769 /** 770 * ixgbe_led_off_generic - Turns off the software controllable LEDs. 771 * @hw: pointer to hardware structure 772 * @index: led number to turn off 773 **/ 774 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) 775 { 776 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 777 778 /* To turn off the LED, set mode to OFF. */ 779 led_reg &= ~IXGBE_LED_MODE_MASK(index); 780 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); 781 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 782 IXGBE_WRITE_FLUSH(hw); 783 784 return 0; 785 } 786 787 /** 788 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params 789 * @hw: pointer to hardware structure 790 * 791 * Initializes the EEPROM parameters ixgbe_eeprom_info within the 792 * ixgbe_hw struct in order to set up EEPROM access. 793 **/ 794 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) 795 { 796 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 797 u32 eec; 798 u16 eeprom_size; 799 800 if (eeprom->type == ixgbe_eeprom_uninitialized) { 801 eeprom->type = ixgbe_eeprom_none; 802 /* Set default semaphore delay to 10ms which is a well 803 * tested value */ 804 eeprom->semaphore_delay = 10; 805 /* Clear EEPROM page size, it will be initialized as needed */ 806 eeprom->word_page_size = 0; 807 808 /* 809 * Check for EEPROM present first. 810 * If not present leave as none 811 */ 812 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 813 if (eec & IXGBE_EEC_PRES) { 814 eeprom->type = ixgbe_eeprom_spi; 815 816 /* 817 * SPI EEPROM is assumed here. This code would need to 818 * change if a future EEPROM is not SPI. 819 */ 820 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> 821 IXGBE_EEC_SIZE_SHIFT); 822 eeprom->word_size = 1 << (eeprom_size + 823 IXGBE_EEPROM_WORD_SIZE_SHIFT); 824 } 825 826 if (eec & IXGBE_EEC_ADDR_SIZE) 827 eeprom->address_bits = 16; 828 else 829 eeprom->address_bits = 8; 830 hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: " 831 "%d\n", eeprom->type, eeprom->word_size, 832 eeprom->address_bits); 833 } 834 835 return 0; 836 } 837 838 /** 839 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang 840 * @hw: pointer to hardware structure 841 * @offset: offset within the EEPROM to write 842 * @words: number of words 843 * @data: 16 bit word(s) to write to EEPROM 844 * 845 * Reads 16 bit word(s) from EEPROM through bit-bang method 846 **/ 847 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 848 u16 words, u16 *data) 849 { 850 s32 status = 0; 851 u16 i, count; 852 853 hw->eeprom.ops.init_params(hw); 854 855 if (words == 0) { 856 status = IXGBE_ERR_INVALID_ARGUMENT; 857 goto out; 858 } 859 860 if (offset + words > hw->eeprom.word_size) { 861 status = IXGBE_ERR_EEPROM; 862 goto out; 863 } 864 865 /* 866 * The EEPROM page size cannot be queried from the chip. We do lazy 867 * initialization. It is worth to do that when we write large buffer. 868 */ 869 if ((hw->eeprom.word_page_size == 0) && 870 (words > IXGBE_EEPROM_PAGE_SIZE_MAX)) 871 ixgbe_detect_eeprom_page_size_generic(hw, offset); 872 873 /* 874 * We cannot hold synchronization semaphores for too long 875 * to avoid other entity starvation. However it is more efficient 876 * to read in bursts than synchronizing access for each word. 877 */ 878 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { 879 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? 880 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); 881 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, 882 count, &data[i]); 883 884 if (status != 0) 885 break; 886 } 887 888 out: 889 return status; 890 } 891 892 /** 893 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM 894 * @hw: pointer to hardware structure 895 * @offset: offset within the EEPROM to be written to 896 * @words: number of word(s) 897 * @data: 16 bit word(s) to be written to the EEPROM 898 * 899 * If ixgbe_eeprom_update_checksum is not called after this function, the 900 * EEPROM will most likely contain an invalid checksum. 901 **/ 902 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 903 u16 words, u16 *data) 904 { 905 s32 status; 906 u16 word; 907 u16 page_size; 908 u16 i; 909 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; 910 911 /* Prepare the EEPROM for writing */ 912 status = ixgbe_acquire_eeprom(hw); 913 914 if (status == 0) { 915 if (ixgbe_ready_eeprom(hw) != 0) { 916 ixgbe_release_eeprom(hw); 917 status = IXGBE_ERR_EEPROM; 918 } 919 } 920 921 if (status == 0) { 922 for (i = 0; i < words; i++) { 923 ixgbe_standby_eeprom(hw); 924 925 /* Send the WRITE ENABLE command (8 bit opcode ) */ 926 ixgbe_shift_out_eeprom_bits(hw, 927 IXGBE_EEPROM_WREN_OPCODE_SPI, 928 IXGBE_EEPROM_OPCODE_BITS); 929 930 ixgbe_standby_eeprom(hw); 931 932 /* 933 * Some SPI eeproms use the 8th address bit embedded 934 * in the opcode 935 */ 936 if ((hw->eeprom.address_bits == 8) && 937 ((offset + i) >= 128)) 938 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 939 940 /* Send the Write command (8-bit opcode + addr) */ 941 ixgbe_shift_out_eeprom_bits(hw, write_opcode, 942 IXGBE_EEPROM_OPCODE_BITS); 943 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), 944 hw->eeprom.address_bits); 945 946 page_size = hw->eeprom.word_page_size; 947 948 /* Send the data in burst via SPI*/ 949 do { 950 word = data[i]; 951 word = (word >> 8) | (word << 8); 952 ixgbe_shift_out_eeprom_bits(hw, word, 16); 953 954 if (page_size == 0) 955 break; 956 957 /* do not wrap around page */ 958 if (((offset + i) & (page_size - 1)) == 959 (page_size - 1)) 960 break; 961 } while (++i < words); 962 963 ixgbe_standby_eeprom(hw); 964 usleep_range(10000, 20000); 965 } 966 /* Done with writing - release the EEPROM */ 967 ixgbe_release_eeprom(hw); 968 } 969 970 return status; 971 } 972 973 /** 974 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM 975 * @hw: pointer to hardware structure 976 * @offset: offset within the EEPROM to be written to 977 * @data: 16 bit word to be written to the EEPROM 978 * 979 * If ixgbe_eeprom_update_checksum is not called after this function, the 980 * EEPROM will most likely contain an invalid checksum. 981 **/ 982 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 983 { 984 s32 status; 985 986 hw->eeprom.ops.init_params(hw); 987 988 if (offset >= hw->eeprom.word_size) { 989 status = IXGBE_ERR_EEPROM; 990 goto out; 991 } 992 993 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); 994 995 out: 996 return status; 997 } 998 999 /** 1000 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang 1001 * @hw: pointer to hardware structure 1002 * @offset: offset within the EEPROM to be read 1003 * @words: number of word(s) 1004 * @data: read 16 bit words(s) from EEPROM 1005 * 1006 * Reads 16 bit word(s) from EEPROM through bit-bang method 1007 **/ 1008 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1009 u16 words, u16 *data) 1010 { 1011 s32 status = 0; 1012 u16 i, count; 1013 1014 hw->eeprom.ops.init_params(hw); 1015 1016 if (words == 0) { 1017 status = IXGBE_ERR_INVALID_ARGUMENT; 1018 goto out; 1019 } 1020 1021 if (offset + words > hw->eeprom.word_size) { 1022 status = IXGBE_ERR_EEPROM; 1023 goto out; 1024 } 1025 1026 /* 1027 * We cannot hold synchronization semaphores for too long 1028 * to avoid other entity starvation. However it is more efficient 1029 * to read in bursts than synchronizing access for each word. 1030 */ 1031 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { 1032 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? 1033 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); 1034 1035 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, 1036 count, &data[i]); 1037 1038 if (status != 0) 1039 break; 1040 } 1041 1042 out: 1043 return status; 1044 } 1045 1046 /** 1047 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang 1048 * @hw: pointer to hardware structure 1049 * @offset: offset within the EEPROM to be read 1050 * @words: number of word(s) 1051 * @data: read 16 bit word(s) from EEPROM 1052 * 1053 * Reads 16 bit word(s) from EEPROM through bit-bang method 1054 **/ 1055 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 1056 u16 words, u16 *data) 1057 { 1058 s32 status; 1059 u16 word_in; 1060 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; 1061 u16 i; 1062 1063 /* Prepare the EEPROM for reading */ 1064 status = ixgbe_acquire_eeprom(hw); 1065 1066 if (status == 0) { 1067 if (ixgbe_ready_eeprom(hw) != 0) { 1068 ixgbe_release_eeprom(hw); 1069 status = IXGBE_ERR_EEPROM; 1070 } 1071 } 1072 1073 if (status == 0) { 1074 for (i = 0; i < words; i++) { 1075 ixgbe_standby_eeprom(hw); 1076 /* 1077 * Some SPI eeproms use the 8th address bit embedded 1078 * in the opcode 1079 */ 1080 if ((hw->eeprom.address_bits == 8) && 1081 ((offset + i) >= 128)) 1082 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 1083 1084 /* Send the READ command (opcode + addr) */ 1085 ixgbe_shift_out_eeprom_bits(hw, read_opcode, 1086 IXGBE_EEPROM_OPCODE_BITS); 1087 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), 1088 hw->eeprom.address_bits); 1089 1090 /* Read the data. */ 1091 word_in = ixgbe_shift_in_eeprom_bits(hw, 16); 1092 data[i] = (word_in >> 8) | (word_in << 8); 1093 } 1094 1095 /* End this read operation */ 1096 ixgbe_release_eeprom(hw); 1097 } 1098 1099 return status; 1100 } 1101 1102 /** 1103 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang 1104 * @hw: pointer to hardware structure 1105 * @offset: offset within the EEPROM to be read 1106 * @data: read 16 bit value from EEPROM 1107 * 1108 * Reads 16 bit value from EEPROM through bit-bang method 1109 **/ 1110 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1111 u16 *data) 1112 { 1113 s32 status; 1114 1115 hw->eeprom.ops.init_params(hw); 1116 1117 if (offset >= hw->eeprom.word_size) { 1118 status = IXGBE_ERR_EEPROM; 1119 goto out; 1120 } 1121 1122 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); 1123 1124 out: 1125 return status; 1126 } 1127 1128 /** 1129 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD 1130 * @hw: pointer to hardware structure 1131 * @offset: offset of word in the EEPROM to read 1132 * @words: number of word(s) 1133 * @data: 16 bit word(s) from the EEPROM 1134 * 1135 * Reads a 16 bit word(s) from the EEPROM using the EERD register. 1136 **/ 1137 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, 1138 u16 words, u16 *data) 1139 { 1140 u32 eerd; 1141 s32 status = 0; 1142 u32 i; 1143 1144 hw->eeprom.ops.init_params(hw); 1145 1146 if (words == 0) { 1147 status = IXGBE_ERR_INVALID_ARGUMENT; 1148 goto out; 1149 } 1150 1151 if (offset >= hw->eeprom.word_size) { 1152 status = IXGBE_ERR_EEPROM; 1153 goto out; 1154 } 1155 1156 for (i = 0; i < words; i++) { 1157 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1158 IXGBE_EEPROM_RW_REG_START; 1159 1160 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); 1161 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); 1162 1163 if (status == 0) { 1164 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> 1165 IXGBE_EEPROM_RW_REG_DATA); 1166 } else { 1167 hw_dbg(hw, "Eeprom read timed out\n"); 1168 goto out; 1169 } 1170 } 1171 out: 1172 return status; 1173 } 1174 1175 /** 1176 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size 1177 * @hw: pointer to hardware structure 1178 * @offset: offset within the EEPROM to be used as a scratch pad 1179 * 1180 * Discover EEPROM page size by writing marching data at given offset. 1181 * This function is called only when we are writing a new large buffer 1182 * at given offset so the data would be overwritten anyway. 1183 **/ 1184 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, 1185 u16 offset) 1186 { 1187 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; 1188 s32 status = 0; 1189 u16 i; 1190 1191 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) 1192 data[i] = i; 1193 1194 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX; 1195 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1196 IXGBE_EEPROM_PAGE_SIZE_MAX, data); 1197 hw->eeprom.word_page_size = 0; 1198 if (status != 0) 1199 goto out; 1200 1201 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); 1202 if (status != 0) 1203 goto out; 1204 1205 /* 1206 * When writing in burst more than the actual page size 1207 * EEPROM address wraps around current page. 1208 */ 1209 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; 1210 1211 hw_dbg(hw, "Detected EEPROM page size = %d words.", 1212 hw->eeprom.word_page_size); 1213 out: 1214 return status; 1215 } 1216 1217 /** 1218 * ixgbe_read_eerd_generic - Read EEPROM word using EERD 1219 * @hw: pointer to hardware structure 1220 * @offset: offset of word in the EEPROM to read 1221 * @data: word read from the EEPROM 1222 * 1223 * Reads a 16 bit word from the EEPROM using the EERD register. 1224 **/ 1225 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) 1226 { 1227 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); 1228 } 1229 1230 /** 1231 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR 1232 * @hw: pointer to hardware structure 1233 * @offset: offset of word in the EEPROM to write 1234 * @words: number of words 1235 * @data: word(s) write to the EEPROM 1236 * 1237 * Write a 16 bit word(s) to the EEPROM using the EEWR register. 1238 **/ 1239 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, 1240 u16 words, u16 *data) 1241 { 1242 u32 eewr; 1243 s32 status = 0; 1244 u16 i; 1245 1246 hw->eeprom.ops.init_params(hw); 1247 1248 if (words == 0) { 1249 status = IXGBE_ERR_INVALID_ARGUMENT; 1250 goto out; 1251 } 1252 1253 if (offset >= hw->eeprom.word_size) { 1254 status = IXGBE_ERR_EEPROM; 1255 goto out; 1256 } 1257 1258 for (i = 0; i < words; i++) { 1259 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1260 (data[i] << IXGBE_EEPROM_RW_REG_DATA) | 1261 IXGBE_EEPROM_RW_REG_START; 1262 1263 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1264 if (status != 0) { 1265 hw_dbg(hw, "Eeprom write EEWR timed out\n"); 1266 goto out; 1267 } 1268 1269 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); 1270 1271 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1272 if (status != 0) { 1273 hw_dbg(hw, "Eeprom write EEWR timed out\n"); 1274 goto out; 1275 } 1276 } 1277 1278 out: 1279 return status; 1280 } 1281 1282 /** 1283 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR 1284 * @hw: pointer to hardware structure 1285 * @offset: offset of word in the EEPROM to write 1286 * @data: word write to the EEPROM 1287 * 1288 * Write a 16 bit word to the EEPROM using the EEWR register. 1289 **/ 1290 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 1291 { 1292 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); 1293 } 1294 1295 /** 1296 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status 1297 * @hw: pointer to hardware structure 1298 * @ee_reg: EEPROM flag for polling 1299 * 1300 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the 1301 * read or write is done respectively. 1302 **/ 1303 static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) 1304 { 1305 u32 i; 1306 u32 reg; 1307 s32 status = IXGBE_ERR_EEPROM; 1308 1309 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { 1310 if (ee_reg == IXGBE_NVM_POLL_READ) 1311 reg = IXGBE_READ_REG(hw, IXGBE_EERD); 1312 else 1313 reg = IXGBE_READ_REG(hw, IXGBE_EEWR); 1314 1315 if (reg & IXGBE_EEPROM_RW_REG_DONE) { 1316 status = 0; 1317 break; 1318 } 1319 udelay(5); 1320 } 1321 return status; 1322 } 1323 1324 /** 1325 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang 1326 * @hw: pointer to hardware structure 1327 * 1328 * Prepares EEPROM for access using bit-bang method. This function should 1329 * be called before issuing a command to the EEPROM. 1330 **/ 1331 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) 1332 { 1333 s32 status = 0; 1334 u32 eec; 1335 u32 i; 1336 1337 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) 1338 status = IXGBE_ERR_SWFW_SYNC; 1339 1340 if (status == 0) { 1341 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1342 1343 /* Request EEPROM Access */ 1344 eec |= IXGBE_EEC_REQ; 1345 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1346 1347 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { 1348 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1349 if (eec & IXGBE_EEC_GNT) 1350 break; 1351 udelay(5); 1352 } 1353 1354 /* Release if grant not acquired */ 1355 if (!(eec & IXGBE_EEC_GNT)) { 1356 eec &= ~IXGBE_EEC_REQ; 1357 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1358 hw_dbg(hw, "Could not acquire EEPROM grant\n"); 1359 1360 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1361 status = IXGBE_ERR_EEPROM; 1362 } 1363 1364 /* Setup EEPROM for Read/Write */ 1365 if (status == 0) { 1366 /* Clear CS and SK */ 1367 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); 1368 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1369 IXGBE_WRITE_FLUSH(hw); 1370 udelay(1); 1371 } 1372 } 1373 return status; 1374 } 1375 1376 /** 1377 * ixgbe_get_eeprom_semaphore - Get hardware semaphore 1378 * @hw: pointer to hardware structure 1379 * 1380 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method 1381 **/ 1382 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) 1383 { 1384 s32 status = IXGBE_ERR_EEPROM; 1385 u32 timeout = 2000; 1386 u32 i; 1387 u32 swsm; 1388 1389 /* Get SMBI software semaphore between device drivers first */ 1390 for (i = 0; i < timeout; i++) { 1391 /* 1392 * If the SMBI bit is 0 when we read it, then the bit will be 1393 * set and we have the semaphore 1394 */ 1395 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1396 if (!(swsm & IXGBE_SWSM_SMBI)) { 1397 status = 0; 1398 break; 1399 } 1400 udelay(50); 1401 } 1402 1403 if (i == timeout) { 1404 hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore " 1405 "not granted.\n"); 1406 /* 1407 * this release is particularly important because our attempts 1408 * above to get the semaphore may have succeeded, and if there 1409 * was a timeout, we should unconditionally clear the semaphore 1410 * bits to free the driver to make progress 1411 */ 1412 ixgbe_release_eeprom_semaphore(hw); 1413 1414 udelay(50); 1415 /* 1416 * one last try 1417 * If the SMBI bit is 0 when we read it, then the bit will be 1418 * set and we have the semaphore 1419 */ 1420 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1421 if (!(swsm & IXGBE_SWSM_SMBI)) 1422 status = 0; 1423 } 1424 1425 /* Now get the semaphore between SW/FW through the SWESMBI bit */ 1426 if (status == 0) { 1427 for (i = 0; i < timeout; i++) { 1428 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1429 1430 /* Set the SW EEPROM semaphore bit to request access */ 1431 swsm |= IXGBE_SWSM_SWESMBI; 1432 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); 1433 1434 /* 1435 * If we set the bit successfully then we got the 1436 * semaphore. 1437 */ 1438 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1439 if (swsm & IXGBE_SWSM_SWESMBI) 1440 break; 1441 1442 udelay(50); 1443 } 1444 1445 /* 1446 * Release semaphores and return error if SW EEPROM semaphore 1447 * was not granted because we don't have access to the EEPROM 1448 */ 1449 if (i >= timeout) { 1450 hw_dbg(hw, "SWESMBI Software EEPROM semaphore " 1451 "not granted.\n"); 1452 ixgbe_release_eeprom_semaphore(hw); 1453 status = IXGBE_ERR_EEPROM; 1454 } 1455 } else { 1456 hw_dbg(hw, "Software semaphore SMBI between device drivers " 1457 "not granted.\n"); 1458 } 1459 1460 return status; 1461 } 1462 1463 /** 1464 * ixgbe_release_eeprom_semaphore - Release hardware semaphore 1465 * @hw: pointer to hardware structure 1466 * 1467 * This function clears hardware semaphore bits. 1468 **/ 1469 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) 1470 { 1471 u32 swsm; 1472 1473 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1474 1475 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ 1476 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); 1477 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); 1478 IXGBE_WRITE_FLUSH(hw); 1479 } 1480 1481 /** 1482 * ixgbe_ready_eeprom - Polls for EEPROM ready 1483 * @hw: pointer to hardware structure 1484 **/ 1485 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) 1486 { 1487 s32 status = 0; 1488 u16 i; 1489 u8 spi_stat_reg; 1490 1491 /* 1492 * Read "Status Register" repeatedly until the LSB is cleared. The 1493 * EEPROM will signal that the command has been completed by clearing 1494 * bit 0 of the internal status register. If it's not cleared within 1495 * 5 milliseconds, then error out. 1496 */ 1497 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { 1498 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, 1499 IXGBE_EEPROM_OPCODE_BITS); 1500 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); 1501 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) 1502 break; 1503 1504 udelay(5); 1505 ixgbe_standby_eeprom(hw); 1506 } 1507 1508 /* 1509 * On some parts, SPI write time could vary from 0-20mSec on 3.3V 1510 * devices (and only 0-5mSec on 5V devices) 1511 */ 1512 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { 1513 hw_dbg(hw, "SPI EEPROM Status error\n"); 1514 status = IXGBE_ERR_EEPROM; 1515 } 1516 1517 return status; 1518 } 1519 1520 /** 1521 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state 1522 * @hw: pointer to hardware structure 1523 **/ 1524 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) 1525 { 1526 u32 eec; 1527 1528 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1529 1530 /* Toggle CS to flush commands */ 1531 eec |= IXGBE_EEC_CS; 1532 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1533 IXGBE_WRITE_FLUSH(hw); 1534 udelay(1); 1535 eec &= ~IXGBE_EEC_CS; 1536 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1537 IXGBE_WRITE_FLUSH(hw); 1538 udelay(1); 1539 } 1540 1541 /** 1542 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. 1543 * @hw: pointer to hardware structure 1544 * @data: data to send to the EEPROM 1545 * @count: number of bits to shift out 1546 **/ 1547 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 1548 u16 count) 1549 { 1550 u32 eec; 1551 u32 mask; 1552 u32 i; 1553 1554 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1555 1556 /* 1557 * Mask is used to shift "count" bits of "data" out to the EEPROM 1558 * one bit at a time. Determine the starting bit based on count 1559 */ 1560 mask = 0x01 << (count - 1); 1561 1562 for (i = 0; i < count; i++) { 1563 /* 1564 * A "1" is shifted out to the EEPROM by setting bit "DI" to a 1565 * "1", and then raising and then lowering the clock (the SK 1566 * bit controls the clock input to the EEPROM). A "0" is 1567 * shifted out to the EEPROM by setting "DI" to "0" and then 1568 * raising and then lowering the clock. 1569 */ 1570 if (data & mask) 1571 eec |= IXGBE_EEC_DI; 1572 else 1573 eec &= ~IXGBE_EEC_DI; 1574 1575 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1576 IXGBE_WRITE_FLUSH(hw); 1577 1578 udelay(1); 1579 1580 ixgbe_raise_eeprom_clk(hw, &eec); 1581 ixgbe_lower_eeprom_clk(hw, &eec); 1582 1583 /* 1584 * Shift mask to signify next bit of data to shift in to the 1585 * EEPROM 1586 */ 1587 mask = mask >> 1; 1588 } 1589 1590 /* We leave the "DI" bit set to "0" when we leave this routine. */ 1591 eec &= ~IXGBE_EEC_DI; 1592 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1593 IXGBE_WRITE_FLUSH(hw); 1594 } 1595 1596 /** 1597 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM 1598 * @hw: pointer to hardware structure 1599 **/ 1600 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) 1601 { 1602 u32 eec; 1603 u32 i; 1604 u16 data = 0; 1605 1606 /* 1607 * In order to read a register from the EEPROM, we need to shift 1608 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising 1609 * the clock input to the EEPROM (setting the SK bit), and then reading 1610 * the value of the "DO" bit. During this "shifting in" process the 1611 * "DI" bit should always be clear. 1612 */ 1613 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1614 1615 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); 1616 1617 for (i = 0; i < count; i++) { 1618 data = data << 1; 1619 ixgbe_raise_eeprom_clk(hw, &eec); 1620 1621 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1622 1623 eec &= ~(IXGBE_EEC_DI); 1624 if (eec & IXGBE_EEC_DO) 1625 data |= 1; 1626 1627 ixgbe_lower_eeprom_clk(hw, &eec); 1628 } 1629 1630 return data; 1631 } 1632 1633 /** 1634 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. 1635 * @hw: pointer to hardware structure 1636 * @eec: EEC register's current value 1637 **/ 1638 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 1639 { 1640 /* 1641 * Raise the clock input to the EEPROM 1642 * (setting the SK bit), then delay 1643 */ 1644 *eec = *eec | IXGBE_EEC_SK; 1645 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); 1646 IXGBE_WRITE_FLUSH(hw); 1647 udelay(1); 1648 } 1649 1650 /** 1651 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. 1652 * @hw: pointer to hardware structure 1653 * @eecd: EECD's current value 1654 **/ 1655 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 1656 { 1657 /* 1658 * Lower the clock input to the EEPROM (clearing the SK bit), then 1659 * delay 1660 */ 1661 *eec = *eec & ~IXGBE_EEC_SK; 1662 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); 1663 IXGBE_WRITE_FLUSH(hw); 1664 udelay(1); 1665 } 1666 1667 /** 1668 * ixgbe_release_eeprom - Release EEPROM, release semaphores 1669 * @hw: pointer to hardware structure 1670 **/ 1671 static void ixgbe_release_eeprom(struct ixgbe_hw *hw) 1672 { 1673 u32 eec; 1674 1675 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1676 1677 eec |= IXGBE_EEC_CS; /* Pull CS high */ 1678 eec &= ~IXGBE_EEC_SK; /* Lower SCK */ 1679 1680 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1681 IXGBE_WRITE_FLUSH(hw); 1682 1683 udelay(1); 1684 1685 /* Stop requesting EEPROM access */ 1686 eec &= ~IXGBE_EEC_REQ; 1687 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1688 1689 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1690 1691 /* 1692 * Delay before attempt to obtain semaphore again to allow FW 1693 * access. semaphore_delay is in ms we need us for usleep_range 1694 */ 1695 usleep_range(hw->eeprom.semaphore_delay * 1000, 1696 hw->eeprom.semaphore_delay * 2000); 1697 } 1698 1699 /** 1700 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum 1701 * @hw: pointer to hardware structure 1702 **/ 1703 u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) 1704 { 1705 u16 i; 1706 u16 j; 1707 u16 checksum = 0; 1708 u16 length = 0; 1709 u16 pointer = 0; 1710 u16 word = 0; 1711 1712 /* Include 0x0-0x3F in the checksum */ 1713 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { 1714 if (hw->eeprom.ops.read(hw, i, &word) != 0) { 1715 hw_dbg(hw, "EEPROM read failed\n"); 1716 break; 1717 } 1718 checksum += word; 1719 } 1720 1721 /* Include all data from pointers except for the fw pointer */ 1722 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { 1723 hw->eeprom.ops.read(hw, i, &pointer); 1724 1725 /* Make sure the pointer seems valid */ 1726 if (pointer != 0xFFFF && pointer != 0) { 1727 hw->eeprom.ops.read(hw, pointer, &length); 1728 1729 if (length != 0xFFFF && length != 0) { 1730 for (j = pointer+1; j <= pointer+length; j++) { 1731 hw->eeprom.ops.read(hw, j, &word); 1732 checksum += word; 1733 } 1734 } 1735 } 1736 } 1737 1738 checksum = (u16)IXGBE_EEPROM_SUM - checksum; 1739 1740 return checksum; 1741 } 1742 1743 /** 1744 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum 1745 * @hw: pointer to hardware structure 1746 * @checksum_val: calculated checksum 1747 * 1748 * Performs checksum calculation and validates the EEPROM checksum. If the 1749 * caller does not need checksum_val, the value can be NULL. 1750 **/ 1751 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 1752 u16 *checksum_val) 1753 { 1754 s32 status; 1755 u16 checksum; 1756 u16 read_checksum = 0; 1757 1758 /* 1759 * Read the first word from the EEPROM. If this times out or fails, do 1760 * not continue or we could be in for a very long wait while every 1761 * EEPROM read fails 1762 */ 1763 status = hw->eeprom.ops.read(hw, 0, &checksum); 1764 1765 if (status == 0) { 1766 checksum = hw->eeprom.ops.calc_checksum(hw); 1767 1768 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); 1769 1770 /* 1771 * Verify read checksum from EEPROM is the same as 1772 * calculated checksum 1773 */ 1774 if (read_checksum != checksum) 1775 status = IXGBE_ERR_EEPROM_CHECKSUM; 1776 1777 /* If the user cares, return the calculated checksum */ 1778 if (checksum_val) 1779 *checksum_val = checksum; 1780 } else { 1781 hw_dbg(hw, "EEPROM read failed\n"); 1782 } 1783 1784 return status; 1785 } 1786 1787 /** 1788 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum 1789 * @hw: pointer to hardware structure 1790 **/ 1791 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) 1792 { 1793 s32 status; 1794 u16 checksum; 1795 1796 /* 1797 * Read the first word from the EEPROM. If this times out or fails, do 1798 * not continue or we could be in for a very long wait while every 1799 * EEPROM read fails 1800 */ 1801 status = hw->eeprom.ops.read(hw, 0, &checksum); 1802 1803 if (status == 0) { 1804 checksum = hw->eeprom.ops.calc_checksum(hw); 1805 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, 1806 checksum); 1807 } else { 1808 hw_dbg(hw, "EEPROM read failed\n"); 1809 } 1810 1811 return status; 1812 } 1813 1814 /** 1815 * ixgbe_set_rar_generic - Set Rx address register 1816 * @hw: pointer to hardware structure 1817 * @index: Receive address register to write 1818 * @addr: Address to put into receive address register 1819 * @vmdq: VMDq "set" or "pool" index 1820 * @enable_addr: set flag that address is active 1821 * 1822 * Puts an ethernet address into a receive address register. 1823 **/ 1824 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 1825 u32 enable_addr) 1826 { 1827 u32 rar_low, rar_high; 1828 u32 rar_entries = hw->mac.num_rar_entries; 1829 1830 /* Make sure we are using a valid rar index range */ 1831 if (index >= rar_entries) { 1832 hw_dbg(hw, "RAR index %d is out of range.\n", index); 1833 return IXGBE_ERR_INVALID_ARGUMENT; 1834 } 1835 1836 /* setup VMDq pool selection before this RAR gets enabled */ 1837 hw->mac.ops.set_vmdq(hw, index, vmdq); 1838 1839 /* 1840 * HW expects these in little endian so we reverse the byte 1841 * order from network order (big endian) to little endian 1842 */ 1843 rar_low = ((u32)addr[0] | 1844 ((u32)addr[1] << 8) | 1845 ((u32)addr[2] << 16) | 1846 ((u32)addr[3] << 24)); 1847 /* 1848 * Some parts put the VMDq setting in the extra RAH bits, 1849 * so save everything except the lower 16 bits that hold part 1850 * of the address and the address valid bit. 1851 */ 1852 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1853 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 1854 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); 1855 1856 if (enable_addr != 0) 1857 rar_high |= IXGBE_RAH_AV; 1858 1859 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 1860 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1861 1862 return 0; 1863 } 1864 1865 /** 1866 * ixgbe_clear_rar_generic - Remove Rx address register 1867 * @hw: pointer to hardware structure 1868 * @index: Receive address register to write 1869 * 1870 * Clears an ethernet address from a receive address register. 1871 **/ 1872 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) 1873 { 1874 u32 rar_high; 1875 u32 rar_entries = hw->mac.num_rar_entries; 1876 1877 /* Make sure we are using a valid rar index range */ 1878 if (index >= rar_entries) { 1879 hw_dbg(hw, "RAR index %d is out of range.\n", index); 1880 return IXGBE_ERR_INVALID_ARGUMENT; 1881 } 1882 1883 /* 1884 * Some parts put the VMDq setting in the extra RAH bits, 1885 * so save everything except the lower 16 bits that hold part 1886 * of the address and the address valid bit. 1887 */ 1888 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1889 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 1890 1891 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); 1892 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1893 1894 /* clear VMDq pool/queue selection for this RAR */ 1895 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); 1896 1897 return 0; 1898 } 1899 1900 /** 1901 * ixgbe_init_rx_addrs_generic - Initializes receive address filters. 1902 * @hw: pointer to hardware structure 1903 * 1904 * Places the MAC address in receive address register 0 and clears the rest 1905 * of the receive address registers. Clears the multicast table. Assumes 1906 * the receiver is in reset when the routine is called. 1907 **/ 1908 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) 1909 { 1910 u32 i; 1911 u32 rar_entries = hw->mac.num_rar_entries; 1912 1913 /* 1914 * If the current mac address is valid, assume it is a software override 1915 * to the permanent address. 1916 * Otherwise, use the permanent address from the eeprom. 1917 */ 1918 if (!is_valid_ether_addr(hw->mac.addr)) { 1919 /* Get the MAC address from the RAR0 for later reference */ 1920 hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 1921 1922 hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr); 1923 } else { 1924 /* Setup the receive address. */ 1925 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); 1926 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); 1927 1928 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1929 1930 /* clear VMDq pool/queue selection for RAR 0 */ 1931 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); 1932 } 1933 hw->addr_ctrl.overflow_promisc = 0; 1934 1935 hw->addr_ctrl.rar_used_count = 1; 1936 1937 /* Zero out the other receive addresses. */ 1938 hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1); 1939 for (i = 1; i < rar_entries; i++) { 1940 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 1941 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 1942 } 1943 1944 /* Clear the MTA */ 1945 hw->addr_ctrl.mta_in_use = 0; 1946 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1947 1948 hw_dbg(hw, " Clearing MTA\n"); 1949 for (i = 0; i < hw->mac.mcft_size; i++) 1950 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 1951 1952 if (hw->mac.ops.init_uta_tables) 1953 hw->mac.ops.init_uta_tables(hw); 1954 1955 return 0; 1956 } 1957 1958 /** 1959 * ixgbe_mta_vector - Determines bit-vector in multicast table to set 1960 * @hw: pointer to hardware structure 1961 * @mc_addr: the multicast address 1962 * 1963 * Extracts the 12 bits, from a multicast address, to determine which 1964 * bit-vector to set in the multicast table. The hardware uses 12 bits, from 1965 * incoming rx multicast addresses, to determine the bit-vector to check in 1966 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set 1967 * by the MO field of the MCSTCTRL. The MO field is set during initialization 1968 * to mc_filter_type. 1969 **/ 1970 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) 1971 { 1972 u32 vector = 0; 1973 1974 switch (hw->mac.mc_filter_type) { 1975 case 0: /* use bits [47:36] of the address */ 1976 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); 1977 break; 1978 case 1: /* use bits [46:35] of the address */ 1979 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); 1980 break; 1981 case 2: /* use bits [45:34] of the address */ 1982 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); 1983 break; 1984 case 3: /* use bits [43:32] of the address */ 1985 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); 1986 break; 1987 default: /* Invalid mc_filter_type */ 1988 hw_dbg(hw, "MC filter type param set incorrectly\n"); 1989 break; 1990 } 1991 1992 /* vector can only be 12-bits or boundary will be exceeded */ 1993 vector &= 0xFFF; 1994 return vector; 1995 } 1996 1997 /** 1998 * ixgbe_set_mta - Set bit-vector in multicast table 1999 * @hw: pointer to hardware structure 2000 * @hash_value: Multicast address hash value 2001 * 2002 * Sets the bit-vector in the multicast table. 2003 **/ 2004 static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) 2005 { 2006 u32 vector; 2007 u32 vector_bit; 2008 u32 vector_reg; 2009 2010 hw->addr_ctrl.mta_in_use++; 2011 2012 vector = ixgbe_mta_vector(hw, mc_addr); 2013 hw_dbg(hw, " bit-vector = 0x%03X\n", vector); 2014 2015 /* 2016 * The MTA is a register array of 128 32-bit registers. It is treated 2017 * like an array of 4096 bits. We want to set bit 2018 * BitArray[vector_value]. So we figure out what register the bit is 2019 * in, read it, OR in the new bit, then write back the new value. The 2020 * register is determined by the upper 7 bits of the vector value and 2021 * the bit within that register are determined by the lower 5 bits of 2022 * the value. 2023 */ 2024 vector_reg = (vector >> 5) & 0x7F; 2025 vector_bit = vector & 0x1F; 2026 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); 2027 } 2028 2029 /** 2030 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses 2031 * @hw: pointer to hardware structure 2032 * @netdev: pointer to net device structure 2033 * 2034 * The given list replaces any existing list. Clears the MC addrs from receive 2035 * address registers and the multicast table. Uses unused receive address 2036 * registers for the first multicast addresses, and hashes the rest into the 2037 * multicast table. 2038 **/ 2039 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, 2040 struct net_device *netdev) 2041 { 2042 struct netdev_hw_addr *ha; 2043 u32 i; 2044 2045 /* 2046 * Set the new number of MC addresses that we are being requested to 2047 * use. 2048 */ 2049 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); 2050 hw->addr_ctrl.mta_in_use = 0; 2051 2052 /* Clear mta_shadow */ 2053 hw_dbg(hw, " Clearing MTA\n"); 2054 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); 2055 2056 /* Update mta shadow */ 2057 netdev_for_each_mc_addr(ha, netdev) { 2058 hw_dbg(hw, " Adding the multicast addresses:\n"); 2059 ixgbe_set_mta(hw, ha->addr); 2060 } 2061 2062 /* Enable mta */ 2063 for (i = 0; i < hw->mac.mcft_size; i++) 2064 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, 2065 hw->mac.mta_shadow[i]); 2066 2067 if (hw->addr_ctrl.mta_in_use > 0) 2068 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 2069 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 2070 2071 hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); 2072 return 0; 2073 } 2074 2075 /** 2076 * ixgbe_enable_mc_generic - Enable multicast address in RAR 2077 * @hw: pointer to hardware structure 2078 * 2079 * Enables multicast address in RAR and the use of the multicast hash table. 2080 **/ 2081 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) 2082 { 2083 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 2084 2085 if (a->mta_in_use > 0) 2086 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 2087 hw->mac.mc_filter_type); 2088 2089 return 0; 2090 } 2091 2092 /** 2093 * ixgbe_disable_mc_generic - Disable multicast address in RAR 2094 * @hw: pointer to hardware structure 2095 * 2096 * Disables multicast address in RAR and the use of the multicast hash table. 2097 **/ 2098 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) 2099 { 2100 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 2101 2102 if (a->mta_in_use > 0) 2103 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 2104 2105 return 0; 2106 } 2107 2108 /** 2109 * ixgbe_fc_enable_generic - Enable flow control 2110 * @hw: pointer to hardware structure 2111 * 2112 * Enable flow control according to the current settings. 2113 **/ 2114 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) 2115 { 2116 s32 ret_val = 0; 2117 u32 mflcn_reg, fccfg_reg; 2118 u32 reg; 2119 u32 fcrtl, fcrth; 2120 int i; 2121 2122 /* 2123 * Validate the water mark configuration for packet buffer 0. Zero 2124 * water marks indicate that the packet buffer was not configured 2125 * and the watermarks for packet buffer 0 should always be configured. 2126 */ 2127 if (!hw->fc.low_water || 2128 !hw->fc.high_water[0] || 2129 !hw->fc.pause_time) { 2130 hw_dbg(hw, "Invalid water mark configuration\n"); 2131 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2132 goto out; 2133 } 2134 2135 /* Negotiate the fc mode to use */ 2136 ixgbe_fc_autoneg(hw); 2137 2138 /* Disable any previous flow control settings */ 2139 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2140 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); 2141 2142 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 2143 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 2144 2145 /* 2146 * The possible values of fc.current_mode are: 2147 * 0: Flow control is completely disabled 2148 * 1: Rx flow control is enabled (we can receive pause frames, 2149 * but not send pause frames). 2150 * 2: Tx flow control is enabled (we can send pause frames but 2151 * we do not support receiving pause frames). 2152 * 3: Both Rx and Tx flow control (symmetric) are enabled. 2153 * other: Invalid. 2154 */ 2155 switch (hw->fc.current_mode) { 2156 case ixgbe_fc_none: 2157 /* 2158 * Flow control is disabled by software override or autoneg. 2159 * The code below will actually disable it in the HW. 2160 */ 2161 break; 2162 case ixgbe_fc_rx_pause: 2163 /* 2164 * Rx Flow control is enabled and Tx Flow control is 2165 * disabled by software override. Since there really 2166 * isn't a way to advertise that we are capable of RX 2167 * Pause ONLY, we will advertise that we support both 2168 * symmetric and asymmetric Rx PAUSE. Later, we will 2169 * disable the adapter's ability to send PAUSE frames. 2170 */ 2171 mflcn_reg |= IXGBE_MFLCN_RFCE; 2172 break; 2173 case ixgbe_fc_tx_pause: 2174 /* 2175 * Tx Flow control is enabled, and Rx Flow control is 2176 * disabled by software override. 2177 */ 2178 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2179 break; 2180 case ixgbe_fc_full: 2181 /* Flow control (both Rx and Tx) is enabled by SW override. */ 2182 mflcn_reg |= IXGBE_MFLCN_RFCE; 2183 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2184 break; 2185 default: 2186 hw_dbg(hw, "Flow control param set incorrectly\n"); 2187 ret_val = IXGBE_ERR_CONFIG; 2188 goto out; 2189 break; 2190 } 2191 2192 /* Set 802.3x based flow control settings. */ 2193 mflcn_reg |= IXGBE_MFLCN_DPF; 2194 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 2195 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 2196 2197 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE; 2198 2199 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 2200 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 2201 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2202 hw->fc.high_water[i]) { 2203 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); 2204 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 2205 } else { 2206 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); 2207 /* 2208 * In order to prevent Tx hangs when the internal Tx 2209 * switch is enabled we must set the high water mark 2210 * to the maximum FCRTH value. This allows the Tx 2211 * switch to function even under heavy Rx workloads. 2212 */ 2213 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32; 2214 } 2215 2216 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); 2217 } 2218 2219 /* Configure pause time (2 TCs per register) */ 2220 reg = hw->fc.pause_time * 0x00010001; 2221 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) 2222 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 2223 2224 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 2225 2226 out: 2227 return ret_val; 2228 } 2229 2230 /** 2231 * ixgbe_negotiate_fc - Negotiate flow control 2232 * @hw: pointer to hardware structure 2233 * @adv_reg: flow control advertised settings 2234 * @lp_reg: link partner's flow control settings 2235 * @adv_sym: symmetric pause bit in advertisement 2236 * @adv_asm: asymmetric pause bit in advertisement 2237 * @lp_sym: symmetric pause bit in link partner advertisement 2238 * @lp_asm: asymmetric pause bit in link partner advertisement 2239 * 2240 * Find the intersection between advertised settings and link partner's 2241 * advertised settings 2242 **/ 2243 static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, 2244 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) 2245 { 2246 if ((!(adv_reg)) || (!(lp_reg))) 2247 return IXGBE_ERR_FC_NOT_NEGOTIATED; 2248 2249 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { 2250 /* 2251 * Now we need to check if the user selected Rx ONLY 2252 * of pause frames. In this case, we had to advertise 2253 * FULL flow control because we could not advertise RX 2254 * ONLY. Hence, we must now check to see if we need to 2255 * turn OFF the TRANSMISSION of PAUSE frames. 2256 */ 2257 if (hw->fc.requested_mode == ixgbe_fc_full) { 2258 hw->fc.current_mode = ixgbe_fc_full; 2259 hw_dbg(hw, "Flow Control = FULL.\n"); 2260 } else { 2261 hw->fc.current_mode = ixgbe_fc_rx_pause; 2262 hw_dbg(hw, "Flow Control=RX PAUSE frames only\n"); 2263 } 2264 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && 2265 (lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2266 hw->fc.current_mode = ixgbe_fc_tx_pause; 2267 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); 2268 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && 2269 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2270 hw->fc.current_mode = ixgbe_fc_rx_pause; 2271 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); 2272 } else { 2273 hw->fc.current_mode = ixgbe_fc_none; 2274 hw_dbg(hw, "Flow Control = NONE.\n"); 2275 } 2276 return 0; 2277 } 2278 2279 /** 2280 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber 2281 * @hw: pointer to hardware structure 2282 * 2283 * Enable flow control according on 1 gig fiber. 2284 **/ 2285 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) 2286 { 2287 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; 2288 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2289 2290 /* 2291 * On multispeed fiber at 1g, bail out if 2292 * - link is up but AN did not complete, or if 2293 * - link is up and AN completed but timed out 2294 */ 2295 2296 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 2297 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || 2298 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) 2299 goto out; 2300 2301 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 2302 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 2303 2304 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, 2305 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, 2306 IXGBE_PCS1GANA_ASM_PAUSE, 2307 IXGBE_PCS1GANA_SYM_PAUSE, 2308 IXGBE_PCS1GANA_ASM_PAUSE); 2309 2310 out: 2311 return ret_val; 2312 } 2313 2314 /** 2315 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 2316 * @hw: pointer to hardware structure 2317 * 2318 * Enable flow control according to IEEE clause 37. 2319 **/ 2320 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) 2321 { 2322 u32 links2, anlp1_reg, autoc_reg, links; 2323 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2324 2325 /* 2326 * On backplane, bail out if 2327 * - backplane autoneg was not completed, or if 2328 * - we are 82599 and link partner is not AN enabled 2329 */ 2330 links = IXGBE_READ_REG(hw, IXGBE_LINKS); 2331 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) 2332 goto out; 2333 2334 if (hw->mac.type == ixgbe_mac_82599EB) { 2335 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 2336 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) 2337 goto out; 2338 } 2339 /* 2340 * Read the 10g AN autoc and LP ability registers and resolve 2341 * local flow control settings accordingly 2342 */ 2343 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2344 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); 2345 2346 ret_val = ixgbe_negotiate_fc(hw, autoc_reg, 2347 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, 2348 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); 2349 2350 out: 2351 return ret_val; 2352 } 2353 2354 /** 2355 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 2356 * @hw: pointer to hardware structure 2357 * 2358 * Enable flow control according to IEEE clause 37. 2359 **/ 2360 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) 2361 { 2362 u16 technology_ability_reg = 0; 2363 u16 lp_technology_ability_reg = 0; 2364 2365 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, 2366 MDIO_MMD_AN, 2367 &technology_ability_reg); 2368 hw->phy.ops.read_reg(hw, MDIO_AN_LPA, 2369 MDIO_MMD_AN, 2370 &lp_technology_ability_reg); 2371 2372 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, 2373 (u32)lp_technology_ability_reg, 2374 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, 2375 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); 2376 } 2377 2378 /** 2379 * ixgbe_fc_autoneg - Configure flow control 2380 * @hw: pointer to hardware structure 2381 * 2382 * Compares our advertised flow control capabilities to those advertised by 2383 * our link partner, and determines the proper flow control mode to use. 2384 **/ 2385 void ixgbe_fc_autoneg(struct ixgbe_hw *hw) 2386 { 2387 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2388 ixgbe_link_speed speed; 2389 bool link_up; 2390 2391 /* 2392 * AN should have completed when the cable was plugged in. 2393 * Look for reasons to bail out. Bail out if: 2394 * - FC autoneg is disabled, or if 2395 * - link is not up. 2396 * 2397 * Since we're being called from an LSC, link is already known to be up. 2398 * So use link_up_wait_to_complete=false. 2399 */ 2400 if (hw->fc.disable_fc_autoneg) 2401 goto out; 2402 2403 hw->mac.ops.check_link(hw, &speed, &link_up, false); 2404 if (!link_up) 2405 goto out; 2406 2407 switch (hw->phy.media_type) { 2408 /* Autoneg flow control on fiber adapters */ 2409 case ixgbe_media_type_fiber_fixed: 2410 case ixgbe_media_type_fiber: 2411 if (speed == IXGBE_LINK_SPEED_1GB_FULL) 2412 ret_val = ixgbe_fc_autoneg_fiber(hw); 2413 break; 2414 2415 /* Autoneg flow control on backplane adapters */ 2416 case ixgbe_media_type_backplane: 2417 ret_val = ixgbe_fc_autoneg_backplane(hw); 2418 break; 2419 2420 /* Autoneg flow control on copper adapters */ 2421 case ixgbe_media_type_copper: 2422 if (ixgbe_device_supports_autoneg_fc(hw)) 2423 ret_val = ixgbe_fc_autoneg_copper(hw); 2424 break; 2425 2426 default: 2427 break; 2428 } 2429 2430 out: 2431 if (ret_val == 0) { 2432 hw->fc.fc_was_autonegged = true; 2433 } else { 2434 hw->fc.fc_was_autonegged = false; 2435 hw->fc.current_mode = hw->fc.requested_mode; 2436 } 2437 } 2438 2439 /** 2440 * ixgbe_disable_pcie_master - Disable PCI-express master access 2441 * @hw: pointer to hardware structure 2442 * 2443 * Disables PCI-Express master access and verifies there are no pending 2444 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable 2445 * bit hasn't caused the master requests to be disabled, else 0 2446 * is returned signifying master requests disabled. 2447 **/ 2448 static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 2449 { 2450 struct ixgbe_adapter *adapter = hw->back; 2451 s32 status = 0; 2452 u32 i; 2453 u16 value; 2454 2455 /* Always set this bit to ensure any future transactions are blocked */ 2456 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); 2457 2458 /* Exit if master requests are blocked */ 2459 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) 2460 goto out; 2461 2462 /* Poll for master request bit to clear */ 2463 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2464 udelay(100); 2465 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) 2466 goto out; 2467 } 2468 2469 /* 2470 * Two consecutive resets are required via CTRL.RST per datasheet 2471 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine 2472 * of this need. The first reset prevents new master requests from 2473 * being issued by our device. We then must wait 1usec or more for any 2474 * remaining completions from the PCIe bus to trickle in, and then reset 2475 * again to clear out any effects they may have had on our device. 2476 */ 2477 hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n"); 2478 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 2479 2480 /* 2481 * Before proceeding, make sure that the PCIe block does not have 2482 * transactions pending. 2483 */ 2484 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2485 udelay(100); 2486 pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS, 2487 &value); 2488 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 2489 goto out; 2490 } 2491 2492 hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n"); 2493 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 2494 2495 out: 2496 return status; 2497 } 2498 2499 /** 2500 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore 2501 * @hw: pointer to hardware structure 2502 * @mask: Mask to specify which semaphore to acquire 2503 * 2504 * Acquires the SWFW semaphore through the GSSR register for the specified 2505 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2506 **/ 2507 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) 2508 { 2509 u32 gssr = 0; 2510 u32 swmask = mask; 2511 u32 fwmask = mask << 5; 2512 u32 timeout = 200; 2513 u32 i; 2514 2515 for (i = 0; i < timeout; i++) { 2516 /* 2517 * SW NVM semaphore bit is used for access to all 2518 * SW_FW_SYNC bits (not just NVM) 2519 */ 2520 if (ixgbe_get_eeprom_semaphore(hw)) 2521 return IXGBE_ERR_SWFW_SYNC; 2522 2523 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2524 if (!(gssr & (fwmask | swmask))) { 2525 gssr |= swmask; 2526 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 2527 ixgbe_release_eeprom_semaphore(hw); 2528 return 0; 2529 } else { 2530 /* Resource is currently in use by FW or SW */ 2531 ixgbe_release_eeprom_semaphore(hw); 2532 usleep_range(5000, 10000); 2533 } 2534 } 2535 2536 /* If time expired clear the bits holding the lock and retry */ 2537 if (gssr & (fwmask | swmask)) 2538 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); 2539 2540 usleep_range(5000, 10000); 2541 return IXGBE_ERR_SWFW_SYNC; 2542 } 2543 2544 /** 2545 * ixgbe_release_swfw_sync - Release SWFW semaphore 2546 * @hw: pointer to hardware structure 2547 * @mask: Mask to specify which semaphore to release 2548 * 2549 * Releases the SWFW semaphore through the GSSR register for the specified 2550 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2551 **/ 2552 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) 2553 { 2554 u32 gssr; 2555 u32 swmask = mask; 2556 2557 ixgbe_get_eeprom_semaphore(hw); 2558 2559 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2560 gssr &= ~swmask; 2561 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 2562 2563 ixgbe_release_eeprom_semaphore(hw); 2564 } 2565 2566 /** 2567 * ixgbe_disable_rx_buff_generic - Stops the receive data path 2568 * @hw: pointer to hardware structure 2569 * 2570 * Stops the receive data path and waits for the HW to internally 2571 * empty the Rx security block. 2572 **/ 2573 s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) 2574 { 2575 #define IXGBE_MAX_SECRX_POLL 40 2576 int i; 2577 int secrxreg; 2578 2579 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2580 secrxreg |= IXGBE_SECRXCTRL_RX_DIS; 2581 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 2582 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { 2583 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); 2584 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) 2585 break; 2586 else 2587 /* Use interrupt-safe sleep just in case */ 2588 udelay(1000); 2589 } 2590 2591 /* For informational purposes only */ 2592 if (i >= IXGBE_MAX_SECRX_POLL) 2593 hw_dbg(hw, "Rx unit being enabled before security " 2594 "path fully disabled. Continuing with init.\n"); 2595 2596 return 0; 2597 2598 } 2599 2600 /** 2601 * ixgbe_enable_rx_buff - Enables the receive data path 2602 * @hw: pointer to hardware structure 2603 * 2604 * Enables the receive data path 2605 **/ 2606 s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) 2607 { 2608 int secrxreg; 2609 2610 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2611 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; 2612 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 2613 IXGBE_WRITE_FLUSH(hw); 2614 2615 return 0; 2616 } 2617 2618 /** 2619 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit 2620 * @hw: pointer to hardware structure 2621 * @regval: register value to write to RXCTRL 2622 * 2623 * Enables the Rx DMA unit 2624 **/ 2625 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) 2626 { 2627 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); 2628 2629 return 0; 2630 } 2631 2632 /** 2633 * ixgbe_blink_led_start_generic - Blink LED based on index. 2634 * @hw: pointer to hardware structure 2635 * @index: led number to blink 2636 **/ 2637 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) 2638 { 2639 ixgbe_link_speed speed = 0; 2640 bool link_up = false; 2641 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2642 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2643 s32 ret_val = 0; 2644 2645 /* 2646 * Link must be up to auto-blink the LEDs; 2647 * Force it if link is down. 2648 */ 2649 hw->mac.ops.check_link(hw, &speed, &link_up, false); 2650 2651 if (!link_up) { 2652 /* Need the SW/FW semaphore around AUTOC writes if 82599 and 2653 * LESM is on. 2654 */ 2655 bool got_lock = false; 2656 2657 if ((hw->mac.type == ixgbe_mac_82599EB) && 2658 ixgbe_verify_lesm_fw_enabled_82599(hw)) { 2659 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 2660 IXGBE_GSSR_MAC_CSR_SM); 2661 if (ret_val) 2662 goto out; 2663 2664 got_lock = true; 2665 } 2666 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2667 autoc_reg |= IXGBE_AUTOC_FLU; 2668 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 2669 IXGBE_WRITE_FLUSH(hw); 2670 2671 if (got_lock) 2672 hw->mac.ops.release_swfw_sync(hw, 2673 IXGBE_GSSR_MAC_CSR_SM); 2674 usleep_range(10000, 20000); 2675 } 2676 2677 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2678 led_reg |= IXGBE_LED_BLINK(index); 2679 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2680 IXGBE_WRITE_FLUSH(hw); 2681 2682 out: 2683 return ret_val; 2684 } 2685 2686 /** 2687 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. 2688 * @hw: pointer to hardware structure 2689 * @index: led number to stop blinking 2690 **/ 2691 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) 2692 { 2693 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2694 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2695 s32 ret_val = 0; 2696 bool got_lock = false; 2697 2698 /* Need the SW/FW semaphore around AUTOC writes if 82599 and 2699 * LESM is on. 2700 */ 2701 if ((hw->mac.type == ixgbe_mac_82599EB) && 2702 ixgbe_verify_lesm_fw_enabled_82599(hw)) { 2703 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 2704 IXGBE_GSSR_MAC_CSR_SM); 2705 if (ret_val) 2706 goto out; 2707 2708 got_lock = true; 2709 } 2710 2711 autoc_reg &= ~IXGBE_AUTOC_FLU; 2712 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2713 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 2714 2715 if (hw->mac.type == ixgbe_mac_82599EB) 2716 ixgbe_reset_pipeline_82599(hw); 2717 2718 if (got_lock) 2719 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 2720 2721 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2722 led_reg &= ~IXGBE_LED_BLINK(index); 2723 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); 2724 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2725 IXGBE_WRITE_FLUSH(hw); 2726 2727 out: 2728 return ret_val; 2729 } 2730 2731 /** 2732 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM 2733 * @hw: pointer to hardware structure 2734 * @san_mac_offset: SAN MAC address offset 2735 * 2736 * This function will read the EEPROM location for the SAN MAC address 2737 * pointer, and returns the value at that location. This is used in both 2738 * get and set mac_addr routines. 2739 **/ 2740 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 2741 u16 *san_mac_offset) 2742 { 2743 s32 ret_val; 2744 2745 /* 2746 * First read the EEPROM pointer to see if the MAC addresses are 2747 * available. 2748 */ 2749 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, 2750 san_mac_offset); 2751 if (ret_val) 2752 hw_err(hw, "eeprom read at offset %d failed\n", 2753 IXGBE_SAN_MAC_ADDR_PTR); 2754 2755 return ret_val; 2756 } 2757 2758 /** 2759 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM 2760 * @hw: pointer to hardware structure 2761 * @san_mac_addr: SAN MAC address 2762 * 2763 * Reads the SAN MAC address from the EEPROM, if it's available. This is 2764 * per-port, so set_lan_id() must be called before reading the addresses. 2765 * set_lan_id() is called by identify_sfp(), but this cannot be relied 2766 * upon for non-SFP connections, so we must call it here. 2767 **/ 2768 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) 2769 { 2770 u16 san_mac_data, san_mac_offset; 2771 u8 i; 2772 s32 ret_val; 2773 2774 /* 2775 * First read the EEPROM pointer to see if the MAC addresses are 2776 * available. If they're not, no point in calling set_lan_id() here. 2777 */ 2778 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 2779 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) 2780 2781 goto san_mac_addr_clr; 2782 2783 /* make sure we know which port we need to program */ 2784 hw->mac.ops.set_lan_id(hw); 2785 /* apply the port offset to the address offset */ 2786 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 2787 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 2788 for (i = 0; i < 3; i++) { 2789 ret_val = hw->eeprom.ops.read(hw, san_mac_offset, 2790 &san_mac_data); 2791 if (ret_val) { 2792 hw_err(hw, "eeprom read at offset %d failed\n", 2793 san_mac_offset); 2794 goto san_mac_addr_clr; 2795 } 2796 san_mac_addr[i * 2] = (u8)(san_mac_data); 2797 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); 2798 san_mac_offset++; 2799 } 2800 return 0; 2801 2802 san_mac_addr_clr: 2803 /* No addresses available in this EEPROM. It's not necessarily an 2804 * error though, so just wipe the local address and return. 2805 */ 2806 for (i = 0; i < 6; i++) 2807 san_mac_addr[i] = 0xFF; 2808 return ret_val; 2809 } 2810 2811 /** 2812 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count 2813 * @hw: pointer to hardware structure 2814 * 2815 * Read PCIe configuration space, and get the MSI-X vector count from 2816 * the capabilities table. 2817 **/ 2818 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) 2819 { 2820 struct ixgbe_adapter *adapter = hw->back; 2821 u16 msix_count = 1; 2822 u16 max_msix_count; 2823 u16 pcie_offset; 2824 2825 switch (hw->mac.type) { 2826 case ixgbe_mac_82598EB: 2827 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS; 2828 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598; 2829 break; 2830 case ixgbe_mac_82599EB: 2831 case ixgbe_mac_X540: 2832 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; 2833 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; 2834 break; 2835 default: 2836 return msix_count; 2837 } 2838 2839 pci_read_config_word(adapter->pdev, pcie_offset, &msix_count); 2840 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; 2841 2842 /* MSI-X count is zero-based in HW */ 2843 msix_count++; 2844 2845 if (msix_count > max_msix_count) 2846 msix_count = max_msix_count; 2847 2848 return msix_count; 2849 } 2850 2851 /** 2852 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address 2853 * @hw: pointer to hardware struct 2854 * @rar: receive address register index to disassociate 2855 * @vmdq: VMDq pool index to remove from the rar 2856 **/ 2857 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 2858 { 2859 u32 mpsar_lo, mpsar_hi; 2860 u32 rar_entries = hw->mac.num_rar_entries; 2861 2862 /* Make sure we are using a valid rar index range */ 2863 if (rar >= rar_entries) { 2864 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 2865 return IXGBE_ERR_INVALID_ARGUMENT; 2866 } 2867 2868 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 2869 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 2870 2871 if (!mpsar_lo && !mpsar_hi) 2872 goto done; 2873 2874 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { 2875 if (mpsar_lo) { 2876 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 2877 mpsar_lo = 0; 2878 } 2879 if (mpsar_hi) { 2880 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 2881 mpsar_hi = 0; 2882 } 2883 } else if (vmdq < 32) { 2884 mpsar_lo &= ~(1 << vmdq); 2885 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); 2886 } else { 2887 mpsar_hi &= ~(1 << (vmdq - 32)); 2888 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); 2889 } 2890 2891 /* was that the last pool using this rar? */ 2892 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) 2893 hw->mac.ops.clear_rar(hw, rar); 2894 done: 2895 return 0; 2896 } 2897 2898 /** 2899 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address 2900 * @hw: pointer to hardware struct 2901 * @rar: receive address register index to associate with a VMDq index 2902 * @vmdq: VMDq pool index 2903 **/ 2904 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 2905 { 2906 u32 mpsar; 2907 u32 rar_entries = hw->mac.num_rar_entries; 2908 2909 /* Make sure we are using a valid rar index range */ 2910 if (rar >= rar_entries) { 2911 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 2912 return IXGBE_ERR_INVALID_ARGUMENT; 2913 } 2914 2915 if (vmdq < 32) { 2916 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 2917 mpsar |= 1 << vmdq; 2918 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); 2919 } else { 2920 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 2921 mpsar |= 1 << (vmdq - 32); 2922 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); 2923 } 2924 return 0; 2925 } 2926 2927 /** 2928 * This function should only be involved in the IOV mode. 2929 * In IOV mode, Default pool is next pool after the number of 2930 * VFs advertized and not 0. 2931 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] 2932 * 2933 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address 2934 * @hw: pointer to hardware struct 2935 * @vmdq: VMDq pool index 2936 **/ 2937 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) 2938 { 2939 u32 rar = hw->mac.san_mac_rar_index; 2940 2941 if (vmdq < 32) { 2942 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq); 2943 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 2944 } else { 2945 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 2946 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32)); 2947 } 2948 2949 return 0; 2950 } 2951 2952 /** 2953 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array 2954 * @hw: pointer to hardware structure 2955 **/ 2956 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) 2957 { 2958 int i; 2959 2960 for (i = 0; i < 128; i++) 2961 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 2962 2963 return 0; 2964 } 2965 2966 /** 2967 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot 2968 * @hw: pointer to hardware structure 2969 * @vlan: VLAN id to write to VLAN filter 2970 * 2971 * return the VLVF index where this VLAN id should be placed 2972 * 2973 **/ 2974 static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) 2975 { 2976 u32 bits = 0; 2977 u32 first_empty_slot = 0; 2978 s32 regindex; 2979 2980 /* short cut the special case */ 2981 if (vlan == 0) 2982 return 0; 2983 2984 /* 2985 * Search for the vlan id in the VLVF entries. Save off the first empty 2986 * slot found along the way 2987 */ 2988 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { 2989 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); 2990 if (!bits && !(first_empty_slot)) 2991 first_empty_slot = regindex; 2992 else if ((bits & 0x0FFF) == vlan) 2993 break; 2994 } 2995 2996 /* 2997 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan 2998 * in the VLVF. Else use the first empty VLVF register for this 2999 * vlan id. 3000 */ 3001 if (regindex >= IXGBE_VLVF_ENTRIES) { 3002 if (first_empty_slot) 3003 regindex = first_empty_slot; 3004 else { 3005 hw_dbg(hw, "No space in VLVF.\n"); 3006 regindex = IXGBE_ERR_NO_SPACE; 3007 } 3008 } 3009 3010 return regindex; 3011 } 3012 3013 /** 3014 * ixgbe_set_vfta_generic - Set VLAN filter table 3015 * @hw: pointer to hardware structure 3016 * @vlan: VLAN id to write to VLAN filter 3017 * @vind: VMDq output index that maps queue to VLAN id in VFVFB 3018 * @vlan_on: boolean flag to turn on/off VLAN in VFVF 3019 * 3020 * Turn on/off specified VLAN in the VLAN filter table. 3021 **/ 3022 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, 3023 bool vlan_on) 3024 { 3025 s32 regindex; 3026 u32 bitindex; 3027 u32 vfta; 3028 u32 bits; 3029 u32 vt; 3030 u32 targetbit; 3031 bool vfta_changed = false; 3032 3033 if (vlan > 4095) 3034 return IXGBE_ERR_PARAM; 3035 3036 /* 3037 * this is a 2 part operation - first the VFTA, then the 3038 * VLVF and VLVFB if VT Mode is set 3039 * We don't write the VFTA until we know the VLVF part succeeded. 3040 */ 3041 3042 /* Part 1 3043 * The VFTA is a bitstring made up of 128 32-bit registers 3044 * that enable the particular VLAN id, much like the MTA: 3045 * bits[11-5]: which register 3046 * bits[4-0]: which bit in the register 3047 */ 3048 regindex = (vlan >> 5) & 0x7F; 3049 bitindex = vlan & 0x1F; 3050 targetbit = (1 << bitindex); 3051 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); 3052 3053 if (vlan_on) { 3054 if (!(vfta & targetbit)) { 3055 vfta |= targetbit; 3056 vfta_changed = true; 3057 } 3058 } else { 3059 if ((vfta & targetbit)) { 3060 vfta &= ~targetbit; 3061 vfta_changed = true; 3062 } 3063 } 3064 3065 /* Part 2 3066 * If VT Mode is set 3067 * Either vlan_on 3068 * make sure the vlan is in VLVF 3069 * set the vind bit in the matching VLVFB 3070 * Or !vlan_on 3071 * clear the pool bit and possibly the vind 3072 */ 3073 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 3074 if (vt & IXGBE_VT_CTL_VT_ENABLE) { 3075 s32 vlvf_index; 3076 3077 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan); 3078 if (vlvf_index < 0) 3079 return vlvf_index; 3080 3081 if (vlan_on) { 3082 /* set the pool bit */ 3083 if (vind < 32) { 3084 bits = IXGBE_READ_REG(hw, 3085 IXGBE_VLVFB(vlvf_index*2)); 3086 bits |= (1 << vind); 3087 IXGBE_WRITE_REG(hw, 3088 IXGBE_VLVFB(vlvf_index*2), 3089 bits); 3090 } else { 3091 bits = IXGBE_READ_REG(hw, 3092 IXGBE_VLVFB((vlvf_index*2)+1)); 3093 bits |= (1 << (vind-32)); 3094 IXGBE_WRITE_REG(hw, 3095 IXGBE_VLVFB((vlvf_index*2)+1), 3096 bits); 3097 } 3098 } else { 3099 /* clear the pool bit */ 3100 if (vind < 32) { 3101 bits = IXGBE_READ_REG(hw, 3102 IXGBE_VLVFB(vlvf_index*2)); 3103 bits &= ~(1 << vind); 3104 IXGBE_WRITE_REG(hw, 3105 IXGBE_VLVFB(vlvf_index*2), 3106 bits); 3107 bits |= IXGBE_READ_REG(hw, 3108 IXGBE_VLVFB((vlvf_index*2)+1)); 3109 } else { 3110 bits = IXGBE_READ_REG(hw, 3111 IXGBE_VLVFB((vlvf_index*2)+1)); 3112 bits &= ~(1 << (vind-32)); 3113 IXGBE_WRITE_REG(hw, 3114 IXGBE_VLVFB((vlvf_index*2)+1), 3115 bits); 3116 bits |= IXGBE_READ_REG(hw, 3117 IXGBE_VLVFB(vlvf_index*2)); 3118 } 3119 } 3120 3121 /* 3122 * If there are still bits set in the VLVFB registers 3123 * for the VLAN ID indicated we need to see if the 3124 * caller is requesting that we clear the VFTA entry bit. 3125 * If the caller has requested that we clear the VFTA 3126 * entry bit but there are still pools/VFs using this VLAN 3127 * ID entry then ignore the request. We're not worried 3128 * about the case where we're turning the VFTA VLAN ID 3129 * entry bit on, only when requested to turn it off as 3130 * there may be multiple pools and/or VFs using the 3131 * VLAN ID entry. In that case we cannot clear the 3132 * VFTA bit until all pools/VFs using that VLAN ID have also 3133 * been cleared. This will be indicated by "bits" being 3134 * zero. 3135 */ 3136 if (bits) { 3137 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 3138 (IXGBE_VLVF_VIEN | vlan)); 3139 if (!vlan_on) { 3140 /* someone wants to clear the vfta entry 3141 * but some pools/VFs are still using it. 3142 * Ignore it. */ 3143 vfta_changed = false; 3144 } 3145 } 3146 else 3147 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); 3148 } 3149 3150 if (vfta_changed) 3151 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta); 3152 3153 return 0; 3154 } 3155 3156 /** 3157 * ixgbe_clear_vfta_generic - Clear VLAN filter table 3158 * @hw: pointer to hardware structure 3159 * 3160 * Clears the VLAN filer table, and the VMDq index associated with the filter 3161 **/ 3162 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) 3163 { 3164 u32 offset; 3165 3166 for (offset = 0; offset < hw->mac.vft_size; offset++) 3167 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 3168 3169 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { 3170 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); 3171 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0); 3172 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0); 3173 } 3174 3175 return 0; 3176 } 3177 3178 /** 3179 * ixgbe_check_mac_link_generic - Determine link and speed status 3180 * @hw: pointer to hardware structure 3181 * @speed: pointer to link speed 3182 * @link_up: true when link is up 3183 * @link_up_wait_to_complete: bool used to wait for link up or not 3184 * 3185 * Reads the links register to determine if link is up and the current speed 3186 **/ 3187 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 3188 bool *link_up, bool link_up_wait_to_complete) 3189 { 3190 u32 links_reg, links_orig; 3191 u32 i; 3192 3193 /* clear the old state */ 3194 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); 3195 3196 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 3197 3198 if (links_orig != links_reg) { 3199 hw_dbg(hw, "LINKS changed from %08X to %08X\n", 3200 links_orig, links_reg); 3201 } 3202 3203 if (link_up_wait_to_complete) { 3204 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 3205 if (links_reg & IXGBE_LINKS_UP) { 3206 *link_up = true; 3207 break; 3208 } else { 3209 *link_up = false; 3210 } 3211 msleep(100); 3212 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 3213 } 3214 } else { 3215 if (links_reg & IXGBE_LINKS_UP) 3216 *link_up = true; 3217 else 3218 *link_up = false; 3219 } 3220 3221 if ((links_reg & IXGBE_LINKS_SPEED_82599) == 3222 IXGBE_LINKS_SPEED_10G_82599) 3223 *speed = IXGBE_LINK_SPEED_10GB_FULL; 3224 else if ((links_reg & IXGBE_LINKS_SPEED_82599) == 3225 IXGBE_LINKS_SPEED_1G_82599) 3226 *speed = IXGBE_LINK_SPEED_1GB_FULL; 3227 else if ((links_reg & IXGBE_LINKS_SPEED_82599) == 3228 IXGBE_LINKS_SPEED_100_82599) 3229 *speed = IXGBE_LINK_SPEED_100_FULL; 3230 else 3231 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3232 3233 return 0; 3234 } 3235 3236 /** 3237 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from 3238 * the EEPROM 3239 * @hw: pointer to hardware structure 3240 * @wwnn_prefix: the alternative WWNN prefix 3241 * @wwpn_prefix: the alternative WWPN prefix 3242 * 3243 * This function will read the EEPROM from the alternative SAN MAC address 3244 * block to check the support for the alternative WWNN/WWPN prefix support. 3245 **/ 3246 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 3247 u16 *wwpn_prefix) 3248 { 3249 u16 offset, caps; 3250 u16 alt_san_mac_blk_offset; 3251 3252 /* clear output first */ 3253 *wwnn_prefix = 0xFFFF; 3254 *wwpn_prefix = 0xFFFF; 3255 3256 /* check if alternative SAN MAC is supported */ 3257 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR; 3258 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset)) 3259 goto wwn_prefix_err; 3260 3261 if ((alt_san_mac_blk_offset == 0) || 3262 (alt_san_mac_blk_offset == 0xFFFF)) 3263 goto wwn_prefix_out; 3264 3265 /* check capability in alternative san mac address block */ 3266 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; 3267 if (hw->eeprom.ops.read(hw, offset, &caps)) 3268 goto wwn_prefix_err; 3269 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) 3270 goto wwn_prefix_out; 3271 3272 /* get the corresponding prefix for WWNN/WWPN */ 3273 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; 3274 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) 3275 hw_err(hw, "eeprom read at offset %d failed\n", offset); 3276 3277 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; 3278 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix)) 3279 goto wwn_prefix_err; 3280 3281 wwn_prefix_out: 3282 return 0; 3283 3284 wwn_prefix_err: 3285 hw_err(hw, "eeprom read at offset %d failed\n", offset); 3286 return 0; 3287 } 3288 3289 /** 3290 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing 3291 * @hw: pointer to hardware structure 3292 * @enable: enable or disable switch for anti-spoofing 3293 * @pf: Physical Function pool - do not enable anti-spoofing for the PF 3294 * 3295 **/ 3296 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf) 3297 { 3298 int j; 3299 int pf_target_reg = pf >> 3; 3300 int pf_target_shift = pf % 8; 3301 u32 pfvfspoof = 0; 3302 3303 if (hw->mac.type == ixgbe_mac_82598EB) 3304 return; 3305 3306 if (enable) 3307 pfvfspoof = IXGBE_SPOOF_MACAS_MASK; 3308 3309 /* 3310 * PFVFSPOOF register array is size 8 with 8 bits assigned to 3311 * MAC anti-spoof enables in each register array element. 3312 */ 3313 for (j = 0; j < pf_target_reg; j++) 3314 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); 3315 3316 /* 3317 * The PF should be allowed to spoof so that it can support 3318 * emulation mode NICs. Do not set the bits assigned to the PF 3319 */ 3320 pfvfspoof &= (1 << pf_target_shift) - 1; 3321 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); 3322 3323 /* 3324 * Remaining pools belong to the PF so they do not need to have 3325 * anti-spoofing enabled. 3326 */ 3327 for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) 3328 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0); 3329 } 3330 3331 /** 3332 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing 3333 * @hw: pointer to hardware structure 3334 * @enable: enable or disable switch for VLAN anti-spoofing 3335 * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing 3336 * 3337 **/ 3338 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) 3339 { 3340 int vf_target_reg = vf >> 3; 3341 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; 3342 u32 pfvfspoof; 3343 3344 if (hw->mac.type == ixgbe_mac_82598EB) 3345 return; 3346 3347 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 3348 if (enable) 3349 pfvfspoof |= (1 << vf_target_shift); 3350 else 3351 pfvfspoof &= ~(1 << vf_target_shift); 3352 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); 3353 } 3354 3355 /** 3356 * ixgbe_get_device_caps_generic - Get additional device capabilities 3357 * @hw: pointer to hardware structure 3358 * @device_caps: the EEPROM word with the extra device capabilities 3359 * 3360 * This function will read the EEPROM location for the device capabilities, 3361 * and return the word through device_caps. 3362 **/ 3363 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) 3364 { 3365 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); 3366 3367 return 0; 3368 } 3369 3370 /** 3371 * ixgbe_set_rxpba_generic - Initialize RX packet buffer 3372 * @hw: pointer to hardware structure 3373 * @num_pb: number of packet buffers to allocate 3374 * @headroom: reserve n KB of headroom 3375 * @strategy: packet buffer allocation strategy 3376 **/ 3377 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, 3378 int num_pb, 3379 u32 headroom, 3380 int strategy) 3381 { 3382 u32 pbsize = hw->mac.rx_pb_size; 3383 int i = 0; 3384 u32 rxpktsize, txpktsize, txpbthresh; 3385 3386 /* Reserve headroom */ 3387 pbsize -= headroom; 3388 3389 if (!num_pb) 3390 num_pb = 1; 3391 3392 /* Divide remaining packet buffer space amongst the number 3393 * of packet buffers requested using supplied strategy. 3394 */ 3395 switch (strategy) { 3396 case (PBA_STRATEGY_WEIGHTED): 3397 /* pba_80_48 strategy weight first half of packet buffer with 3398 * 5/8 of the packet buffer space. 3399 */ 3400 rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8)); 3401 pbsize -= rxpktsize * (num_pb / 2); 3402 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; 3403 for (; i < (num_pb / 2); i++) 3404 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 3405 /* Fall through to configure remaining packet buffers */ 3406 case (PBA_STRATEGY_EQUAL): 3407 /* Divide the remaining Rx packet buffer evenly among the TCs */ 3408 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; 3409 for (; i < num_pb; i++) 3410 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 3411 break; 3412 default: 3413 break; 3414 } 3415 3416 /* 3417 * Setup Tx packet buffer and threshold equally for all TCs 3418 * TXPBTHRESH register is set in K so divide by 1024 and subtract 3419 * 10 since the largest packet we support is just over 9K. 3420 */ 3421 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; 3422 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; 3423 for (i = 0; i < num_pb; i++) { 3424 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); 3425 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); 3426 } 3427 3428 /* Clear unused TCs, if any, to zero buffer size*/ 3429 for (; i < IXGBE_MAX_PB; i++) { 3430 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); 3431 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); 3432 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); 3433 } 3434 } 3435 3436 /** 3437 * ixgbe_calculate_checksum - Calculate checksum for buffer 3438 * @buffer: pointer to EEPROM 3439 * @length: size of EEPROM to calculate a checksum for 3440 * 3441 * Calculates the checksum for some buffer on a specified length. The 3442 * checksum calculated is returned. 3443 **/ 3444 static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) 3445 { 3446 u32 i; 3447 u8 sum = 0; 3448 3449 if (!buffer) 3450 return 0; 3451 3452 for (i = 0; i < length; i++) 3453 sum += buffer[i]; 3454 3455 return (u8) (0 - sum); 3456 } 3457 3458 /** 3459 * ixgbe_host_interface_command - Issue command to manageability block 3460 * @hw: pointer to the HW structure 3461 * @buffer: contains the command to write and where the return status will 3462 * be placed 3463 * @length: length of buffer, must be multiple of 4 bytes 3464 * 3465 * Communicates with the manageability block. On success return 0 3466 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. 3467 **/ 3468 static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, 3469 u32 length) 3470 { 3471 u32 hicr, i, bi; 3472 u32 hdr_size = sizeof(struct ixgbe_hic_hdr); 3473 u8 buf_len, dword_len; 3474 3475 s32 ret_val = 0; 3476 3477 if (length == 0 || length & 0x3 || 3478 length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { 3479 hw_dbg(hw, "Buffer length failure.\n"); 3480 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; 3481 goto out; 3482 } 3483 3484 /* Check that the host interface is enabled. */ 3485 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 3486 if ((hicr & IXGBE_HICR_EN) == 0) { 3487 hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); 3488 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; 3489 goto out; 3490 } 3491 3492 /* Calculate length in DWORDs */ 3493 dword_len = length >> 2; 3494 3495 /* 3496 * The device driver writes the relevant command block 3497 * into the ram area. 3498 */ 3499 for (i = 0; i < dword_len; i++) 3500 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, 3501 i, cpu_to_le32(buffer[i])); 3502 3503 /* Setting this bit tells the ARC that a new command is pending. */ 3504 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); 3505 3506 for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) { 3507 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 3508 if (!(hicr & IXGBE_HICR_C)) 3509 break; 3510 usleep_range(1000, 2000); 3511 } 3512 3513 /* Check command successful completion. */ 3514 if (i == IXGBE_HI_COMMAND_TIMEOUT || 3515 (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) { 3516 hw_dbg(hw, "Command has failed with no status valid.\n"); 3517 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; 3518 goto out; 3519 } 3520 3521 /* Calculate length in DWORDs */ 3522 dword_len = hdr_size >> 2; 3523 3524 /* first pull in the header so we know the buffer length */ 3525 for (bi = 0; bi < dword_len; bi++) { 3526 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 3527 le32_to_cpus(&buffer[bi]); 3528 } 3529 3530 /* If there is any thing in data position pull it in */ 3531 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len; 3532 if (buf_len == 0) 3533 goto out; 3534 3535 if (length < (buf_len + hdr_size)) { 3536 hw_dbg(hw, "Buffer not large enough for reply message.\n"); 3537 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; 3538 goto out; 3539 } 3540 3541 /* Calculate length in DWORDs, add 3 for odd lengths */ 3542 dword_len = (buf_len + 3) >> 2; 3543 3544 /* Pull in the rest of the buffer (bi is where we left off)*/ 3545 for (; bi <= dword_len; bi++) { 3546 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 3547 le32_to_cpus(&buffer[bi]); 3548 } 3549 3550 out: 3551 return ret_val; 3552 } 3553 3554 /** 3555 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware 3556 * @hw: pointer to the HW structure 3557 * @maj: driver version major number 3558 * @min: driver version minor number 3559 * @build: driver version build number 3560 * @sub: driver version sub build number 3561 * 3562 * Sends driver version number to firmware through the manageability 3563 * block. On success return 0 3564 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring 3565 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. 3566 **/ 3567 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, 3568 u8 build, u8 sub) 3569 { 3570 struct ixgbe_hic_drv_info fw_cmd; 3571 int i; 3572 s32 ret_val = 0; 3573 3574 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM) != 0) { 3575 ret_val = IXGBE_ERR_SWFW_SYNC; 3576 goto out; 3577 } 3578 3579 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; 3580 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; 3581 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; 3582 fw_cmd.port_num = (u8)hw->bus.func; 3583 fw_cmd.ver_maj = maj; 3584 fw_cmd.ver_min = min; 3585 fw_cmd.ver_build = build; 3586 fw_cmd.ver_sub = sub; 3587 fw_cmd.hdr.checksum = 0; 3588 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, 3589 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); 3590 fw_cmd.pad = 0; 3591 fw_cmd.pad2 = 0; 3592 3593 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { 3594 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, 3595 sizeof(fw_cmd)); 3596 if (ret_val != 0) 3597 continue; 3598 3599 if (fw_cmd.hdr.cmd_or_resp.ret_status == 3600 FW_CEM_RESP_STATUS_SUCCESS) 3601 ret_val = 0; 3602 else 3603 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; 3604 3605 break; 3606 } 3607 3608 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); 3609 out: 3610 return ret_val; 3611 } 3612 3613 /** 3614 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo 3615 * @hw: pointer to the hardware structure 3616 * 3617 * The 82599 and x540 MACs can experience issues if TX work is still pending 3618 * when a reset occurs. This function prevents this by flushing the PCIe 3619 * buffers on the system. 3620 **/ 3621 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) 3622 { 3623 u32 gcr_ext, hlreg0; 3624 3625 /* 3626 * If double reset is not requested then all transactions should 3627 * already be clear and as such there is no work to do 3628 */ 3629 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED)) 3630 return; 3631 3632 /* 3633 * Set loopback enable to prevent any transmits from being sent 3634 * should the link come up. This assumes that the RXCTRL.RXEN bit 3635 * has already been cleared. 3636 */ 3637 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 3638 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); 3639 3640 /* initiate cleaning flow for buffers in the PCIe transaction layer */ 3641 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); 3642 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 3643 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR); 3644 3645 /* Flush all writes and allow 20usec for all transactions to clear */ 3646 IXGBE_WRITE_FLUSH(hw); 3647 udelay(20); 3648 3649 /* restore previous register values */ 3650 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 3651 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 3652 } 3653 3654 static const u8 ixgbe_emc_temp_data[4] = { 3655 IXGBE_EMC_INTERNAL_DATA, 3656 IXGBE_EMC_DIODE1_DATA, 3657 IXGBE_EMC_DIODE2_DATA, 3658 IXGBE_EMC_DIODE3_DATA 3659 }; 3660 static const u8 ixgbe_emc_therm_limit[4] = { 3661 IXGBE_EMC_INTERNAL_THERM_LIMIT, 3662 IXGBE_EMC_DIODE1_THERM_LIMIT, 3663 IXGBE_EMC_DIODE2_THERM_LIMIT, 3664 IXGBE_EMC_DIODE3_THERM_LIMIT 3665 }; 3666 3667 /** 3668 * ixgbe_get_ets_data - Extracts the ETS bit data 3669 * @hw: pointer to hardware structure 3670 * @ets_cfg: extected ETS data 3671 * @ets_offset: offset of ETS data 3672 * 3673 * Returns error code. 3674 **/ 3675 static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg, 3676 u16 *ets_offset) 3677 { 3678 s32 status = 0; 3679 3680 status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset); 3681 if (status) 3682 goto out; 3683 3684 if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) { 3685 status = IXGBE_NOT_IMPLEMENTED; 3686 goto out; 3687 } 3688 3689 status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg); 3690 if (status) 3691 goto out; 3692 3693 if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) { 3694 status = IXGBE_NOT_IMPLEMENTED; 3695 goto out; 3696 } 3697 3698 out: 3699 return status; 3700 } 3701 3702 /** 3703 * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data 3704 * @hw: pointer to hardware structure 3705 * 3706 * Returns the thermal sensor data structure 3707 **/ 3708 s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) 3709 { 3710 s32 status = 0; 3711 u16 ets_offset; 3712 u16 ets_cfg; 3713 u16 ets_sensor; 3714 u8 num_sensors; 3715 u8 i; 3716 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 3717 3718 /* Only support thermal sensors attached to physical port 0 */ 3719 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) { 3720 status = IXGBE_NOT_IMPLEMENTED; 3721 goto out; 3722 } 3723 3724 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); 3725 if (status) 3726 goto out; 3727 3728 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); 3729 if (num_sensors > IXGBE_MAX_SENSORS) 3730 num_sensors = IXGBE_MAX_SENSORS; 3731 3732 for (i = 0; i < num_sensors; i++) { 3733 u8 sensor_index; 3734 u8 sensor_location; 3735 3736 status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i), 3737 &ets_sensor); 3738 if (status) 3739 goto out; 3740 3741 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> 3742 IXGBE_ETS_DATA_INDEX_SHIFT); 3743 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> 3744 IXGBE_ETS_DATA_LOC_SHIFT); 3745 3746 if (sensor_location != 0) { 3747 status = hw->phy.ops.read_i2c_byte(hw, 3748 ixgbe_emc_temp_data[sensor_index], 3749 IXGBE_I2C_THERMAL_SENSOR_ADDR, 3750 &data->sensor[i].temp); 3751 if (status) 3752 goto out; 3753 } 3754 } 3755 out: 3756 return status; 3757 } 3758 3759 /** 3760 * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds 3761 * @hw: pointer to hardware structure 3762 * 3763 * Inits the thermal sensor thresholds according to the NVM map 3764 * and save off the threshold and location values into mac.thermal_sensor_data 3765 **/ 3766 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) 3767 { 3768 s32 status = 0; 3769 u16 ets_offset; 3770 u16 ets_cfg; 3771 u16 ets_sensor; 3772 u8 low_thresh_delta; 3773 u8 num_sensors; 3774 u8 therm_limit; 3775 u8 i; 3776 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 3777 3778 memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data)); 3779 3780 /* Only support thermal sensors attached to physical port 0 */ 3781 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) { 3782 status = IXGBE_NOT_IMPLEMENTED; 3783 goto out; 3784 } 3785 3786 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); 3787 if (status) 3788 goto out; 3789 3790 low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >> 3791 IXGBE_ETS_LTHRES_DELTA_SHIFT); 3792 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); 3793 if (num_sensors > IXGBE_MAX_SENSORS) 3794 num_sensors = IXGBE_MAX_SENSORS; 3795 3796 for (i = 0; i < num_sensors; i++) { 3797 u8 sensor_index; 3798 u8 sensor_location; 3799 3800 if (hw->eeprom.ops.read(hw, ets_offset + 1 + i, &ets_sensor)) { 3801 hw_err(hw, "eeprom read at offset %d failed\n", 3802 ets_offset + 1 + i); 3803 continue; 3804 } 3805 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> 3806 IXGBE_ETS_DATA_INDEX_SHIFT); 3807 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> 3808 IXGBE_ETS_DATA_LOC_SHIFT); 3809 therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK; 3810 3811 hw->phy.ops.write_i2c_byte(hw, 3812 ixgbe_emc_therm_limit[sensor_index], 3813 IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit); 3814 3815 if (sensor_location == 0) 3816 continue; 3817 3818 data->sensor[i].location = sensor_location; 3819 data->sensor[i].caution_thresh = therm_limit; 3820 data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta; 3821 } 3822 out: 3823 return status; 3824 } 3825 3826