1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 1999 - 2018 Intel Corporation. */ 3 4 #include <linux/pci.h> 5 #include <linux/delay.h> 6 #include <linux/sched.h> 7 #include <linux/netdevice.h> 8 9 #include "ixgbe.h" 10 #include "ixgbe_common.h" 11 #include "ixgbe_phy.h" 12 13 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); 14 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); 15 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); 16 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); 17 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); 18 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 19 u16 count); 20 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); 21 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 22 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 23 static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 24 25 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 26 static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); 27 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 28 u16 words, u16 *data); 29 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 30 u16 words, u16 *data); 31 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, 32 u16 offset); 33 static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); 34 35 /* Base table for registers values that change by MAC */ 36 const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = { 37 IXGBE_MVALS_INIT(8259X) 38 }; 39 40 /** 41 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow 42 * control 43 * @hw: pointer to hardware structure 44 * 45 * There are several phys that do not support autoneg flow control. This 46 * function check the device id to see if the associated phy supports 47 * autoneg flow control. 48 **/ 49 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) 50 { 51 bool supported = false; 52 ixgbe_link_speed speed; 53 bool link_up; 54 55 switch (hw->phy.media_type) { 56 case ixgbe_media_type_fiber: 57 /* flow control autoneg black list */ 58 switch (hw->device_id) { 59 case IXGBE_DEV_ID_X550EM_A_SFP: 60 case IXGBE_DEV_ID_X550EM_A_SFP_N: 61 supported = false; 62 break; 63 default: 64 hw->mac.ops.check_link(hw, &speed, &link_up, false); 65 /* if link is down, assume supported */ 66 if (link_up) 67 supported = speed == IXGBE_LINK_SPEED_1GB_FULL; 68 else 69 supported = true; 70 } 71 72 break; 73 case ixgbe_media_type_backplane: 74 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI) 75 supported = false; 76 else 77 supported = true; 78 break; 79 case ixgbe_media_type_copper: 80 /* only some copper devices support flow control autoneg */ 81 switch (hw->device_id) { 82 case IXGBE_DEV_ID_82599_T3_LOM: 83 case IXGBE_DEV_ID_X540T: 84 case IXGBE_DEV_ID_X540T1: 85 case IXGBE_DEV_ID_X550T: 86 case IXGBE_DEV_ID_X550T1: 87 case IXGBE_DEV_ID_X550EM_X_10G_T: 88 case IXGBE_DEV_ID_X550EM_A_10G_T: 89 case IXGBE_DEV_ID_X550EM_A_1G_T: 90 case IXGBE_DEV_ID_X550EM_A_1G_T_L: 91 supported = true; 92 break; 93 default: 94 break; 95 } 96 default: 97 break; 98 } 99 100 if (!supported) 101 hw_dbg(hw, "Device %x does not support flow control autoneg\n", 102 hw->device_id); 103 104 return supported; 105 } 106 107 /** 108 * ixgbe_setup_fc_generic - Set up flow control 109 * @hw: pointer to hardware structure 110 * 111 * Called at init time to set up flow control. 112 **/ 113 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) 114 { 115 s32 ret_val = 0; 116 u32 reg = 0, reg_bp = 0; 117 u16 reg_cu = 0; 118 bool locked = false; 119 120 /* 121 * Validate the requested mode. Strict IEEE mode does not allow 122 * ixgbe_fc_rx_pause because it will cause us to fail at UNH. 123 */ 124 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { 125 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); 126 return IXGBE_ERR_INVALID_LINK_SETTINGS; 127 } 128 129 /* 130 * 10gig parts do not have a word in the EEPROM to determine the 131 * default flow control setting, so we explicitly set it to full. 132 */ 133 if (hw->fc.requested_mode == ixgbe_fc_default) 134 hw->fc.requested_mode = ixgbe_fc_full; 135 136 /* 137 * Set up the 1G and 10G flow control advertisement registers so the 138 * HW will be able to do fc autoneg once the cable is plugged in. If 139 * we link at 10G, the 1G advertisement is harmless and vice versa. 140 */ 141 switch (hw->phy.media_type) { 142 case ixgbe_media_type_backplane: 143 /* some MAC's need RMW protection on AUTOC */ 144 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp); 145 if (ret_val) 146 return ret_val; 147 148 /* fall through - only backplane uses autoc */ 149 case ixgbe_media_type_fiber: 150 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 151 152 break; 153 case ixgbe_media_type_copper: 154 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, 155 MDIO_MMD_AN, ®_cu); 156 break; 157 default: 158 break; 159 } 160 161 /* 162 * The possible values of fc.requested_mode are: 163 * 0: Flow control is completely disabled 164 * 1: Rx flow control is enabled (we can receive pause frames, 165 * but not send pause frames). 166 * 2: Tx flow control is enabled (we can send pause frames but 167 * we do not support receiving pause frames). 168 * 3: Both Rx and Tx flow control (symmetric) are enabled. 169 * other: Invalid. 170 */ 171 switch (hw->fc.requested_mode) { 172 case ixgbe_fc_none: 173 /* Flow control completely disabled by software override. */ 174 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 175 if (hw->phy.media_type == ixgbe_media_type_backplane) 176 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | 177 IXGBE_AUTOC_ASM_PAUSE); 178 else if (hw->phy.media_type == ixgbe_media_type_copper) 179 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); 180 break; 181 case ixgbe_fc_tx_pause: 182 /* 183 * Tx Flow control is enabled, and Rx Flow control is 184 * disabled by software override. 185 */ 186 reg |= IXGBE_PCS1GANA_ASM_PAUSE; 187 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE; 188 if (hw->phy.media_type == ixgbe_media_type_backplane) { 189 reg_bp |= IXGBE_AUTOC_ASM_PAUSE; 190 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE; 191 } else if (hw->phy.media_type == ixgbe_media_type_copper) { 192 reg_cu |= IXGBE_TAF_ASM_PAUSE; 193 reg_cu &= ~IXGBE_TAF_SYM_PAUSE; 194 } 195 break; 196 case ixgbe_fc_rx_pause: 197 /* 198 * Rx Flow control is enabled and Tx Flow control is 199 * disabled by software override. Since there really 200 * isn't a way to advertise that we are capable of RX 201 * Pause ONLY, we will advertise that we support both 202 * symmetric and asymmetric Rx PAUSE, as such we fall 203 * through to the fc_full statement. Later, we will 204 * disable the adapter's ability to send PAUSE frames. 205 */ 206 case ixgbe_fc_full: 207 /* Flow control (both Rx and Tx) is enabled by SW override. */ 208 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE; 209 if (hw->phy.media_type == ixgbe_media_type_backplane) 210 reg_bp |= IXGBE_AUTOC_SYM_PAUSE | 211 IXGBE_AUTOC_ASM_PAUSE; 212 else if (hw->phy.media_type == ixgbe_media_type_copper) 213 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE; 214 break; 215 default: 216 hw_dbg(hw, "Flow control param set incorrectly\n"); 217 return IXGBE_ERR_CONFIG; 218 } 219 220 if (hw->mac.type != ixgbe_mac_X540) { 221 /* 222 * Enable auto-negotiation between the MAC & PHY; 223 * the MAC will advertise clause 37 flow control. 224 */ 225 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 226 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 227 228 /* Disable AN timeout */ 229 if (hw->fc.strict_ieee) 230 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 231 232 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 233 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); 234 } 235 236 /* 237 * AUTOC restart handles negotiation of 1G and 10G on backplane 238 * and copper. There is no need to set the PCS1GCTL register. 239 * 240 */ 241 if (hw->phy.media_type == ixgbe_media_type_backplane) { 242 /* Need the SW/FW semaphore around AUTOC writes if 82599 and 243 * LESM is on, likewise reset_pipeline requries the lock as 244 * it also writes AUTOC. 245 */ 246 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked); 247 if (ret_val) 248 return ret_val; 249 250 } else if ((hw->phy.media_type == ixgbe_media_type_copper) && 251 ixgbe_device_supports_autoneg_fc(hw)) { 252 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, 253 MDIO_MMD_AN, reg_cu); 254 } 255 256 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); 257 return ret_val; 258 } 259 260 /** 261 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 262 * @hw: pointer to hardware structure 263 * 264 * Starts the hardware by filling the bus info structure and media type, clears 265 * all on chip counters, initializes receive address registers, multicast 266 * table, VLAN filter table, calls routine to set up link and flow control 267 * settings, and leaves transmit and receive units disabled and uninitialized 268 **/ 269 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) 270 { 271 s32 ret_val; 272 u32 ctrl_ext; 273 u16 device_caps; 274 275 /* Set the media type */ 276 hw->phy.media_type = hw->mac.ops.get_media_type(hw); 277 278 /* Identify the PHY */ 279 hw->phy.ops.identify(hw); 280 281 /* Clear the VLAN filter table */ 282 hw->mac.ops.clear_vfta(hw); 283 284 /* Clear statistics registers */ 285 hw->mac.ops.clear_hw_cntrs(hw); 286 287 /* Set No Snoop Disable */ 288 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 289 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; 290 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 291 IXGBE_WRITE_FLUSH(hw); 292 293 /* Setup flow control if method for doing so */ 294 if (hw->mac.ops.setup_fc) { 295 ret_val = hw->mac.ops.setup_fc(hw); 296 if (ret_val) 297 return ret_val; 298 } 299 300 /* Cashe bit indicating need for crosstalk fix */ 301 switch (hw->mac.type) { 302 case ixgbe_mac_82599EB: 303 case ixgbe_mac_X550EM_x: 304 case ixgbe_mac_x550em_a: 305 hw->mac.ops.get_device_caps(hw, &device_caps); 306 if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) 307 hw->need_crosstalk_fix = false; 308 else 309 hw->need_crosstalk_fix = true; 310 break; 311 default: 312 hw->need_crosstalk_fix = false; 313 break; 314 } 315 316 /* Clear adapter stopped flag */ 317 hw->adapter_stopped = false; 318 319 return 0; 320 } 321 322 /** 323 * ixgbe_start_hw_gen2 - Init sequence for common device family 324 * @hw: pointer to hw structure 325 * 326 * Performs the init sequence common to the second generation 327 * of 10 GbE devices. 328 * Devices in the second generation: 329 * 82599 330 * X540 331 **/ 332 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) 333 { 334 u32 i; 335 336 /* Clear the rate limiters */ 337 for (i = 0; i < hw->mac.max_tx_queues; i++) { 338 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); 339 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); 340 } 341 IXGBE_WRITE_FLUSH(hw); 342 343 return 0; 344 } 345 346 /** 347 * ixgbe_init_hw_generic - Generic hardware initialization 348 * @hw: pointer to hardware structure 349 * 350 * Initialize the hardware by resetting the hardware, filling the bus info 351 * structure and media type, clears all on chip counters, initializes receive 352 * address registers, multicast table, VLAN filter table, calls routine to set 353 * up link and flow control settings, and leaves transmit and receive units 354 * disabled and uninitialized 355 **/ 356 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) 357 { 358 s32 status; 359 360 /* Reset the hardware */ 361 status = hw->mac.ops.reset_hw(hw); 362 363 if (status == 0) { 364 /* Start the HW */ 365 status = hw->mac.ops.start_hw(hw); 366 } 367 368 /* Initialize the LED link active for LED blink support */ 369 if (hw->mac.ops.init_led_link_act) 370 hw->mac.ops.init_led_link_act(hw); 371 372 return status; 373 } 374 375 /** 376 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters 377 * @hw: pointer to hardware structure 378 * 379 * Clears all hardware statistics counters by reading them from the hardware 380 * Statistics counters are clear on read. 381 **/ 382 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) 383 { 384 u16 i = 0; 385 386 IXGBE_READ_REG(hw, IXGBE_CRCERRS); 387 IXGBE_READ_REG(hw, IXGBE_ILLERRC); 388 IXGBE_READ_REG(hw, IXGBE_ERRBC); 389 IXGBE_READ_REG(hw, IXGBE_MSPDC); 390 for (i = 0; i < 8; i++) 391 IXGBE_READ_REG(hw, IXGBE_MPC(i)); 392 393 IXGBE_READ_REG(hw, IXGBE_MLFC); 394 IXGBE_READ_REG(hw, IXGBE_MRFC); 395 IXGBE_READ_REG(hw, IXGBE_RLEC); 396 IXGBE_READ_REG(hw, IXGBE_LXONTXC); 397 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 398 if (hw->mac.type >= ixgbe_mac_82599EB) { 399 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 400 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 401 } else { 402 IXGBE_READ_REG(hw, IXGBE_LXONRXC); 403 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 404 } 405 406 for (i = 0; i < 8; i++) { 407 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 408 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 409 if (hw->mac.type >= ixgbe_mac_82599EB) { 410 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 411 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 412 } else { 413 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 414 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 415 } 416 } 417 if (hw->mac.type >= ixgbe_mac_82599EB) 418 for (i = 0; i < 8; i++) 419 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 420 IXGBE_READ_REG(hw, IXGBE_PRC64); 421 IXGBE_READ_REG(hw, IXGBE_PRC127); 422 IXGBE_READ_REG(hw, IXGBE_PRC255); 423 IXGBE_READ_REG(hw, IXGBE_PRC511); 424 IXGBE_READ_REG(hw, IXGBE_PRC1023); 425 IXGBE_READ_REG(hw, IXGBE_PRC1522); 426 IXGBE_READ_REG(hw, IXGBE_GPRC); 427 IXGBE_READ_REG(hw, IXGBE_BPRC); 428 IXGBE_READ_REG(hw, IXGBE_MPRC); 429 IXGBE_READ_REG(hw, IXGBE_GPTC); 430 IXGBE_READ_REG(hw, IXGBE_GORCL); 431 IXGBE_READ_REG(hw, IXGBE_GORCH); 432 IXGBE_READ_REG(hw, IXGBE_GOTCL); 433 IXGBE_READ_REG(hw, IXGBE_GOTCH); 434 if (hw->mac.type == ixgbe_mac_82598EB) 435 for (i = 0; i < 8; i++) 436 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 437 IXGBE_READ_REG(hw, IXGBE_RUC); 438 IXGBE_READ_REG(hw, IXGBE_RFC); 439 IXGBE_READ_REG(hw, IXGBE_ROC); 440 IXGBE_READ_REG(hw, IXGBE_RJC); 441 IXGBE_READ_REG(hw, IXGBE_MNGPRC); 442 IXGBE_READ_REG(hw, IXGBE_MNGPDC); 443 IXGBE_READ_REG(hw, IXGBE_MNGPTC); 444 IXGBE_READ_REG(hw, IXGBE_TORL); 445 IXGBE_READ_REG(hw, IXGBE_TORH); 446 IXGBE_READ_REG(hw, IXGBE_TPR); 447 IXGBE_READ_REG(hw, IXGBE_TPT); 448 IXGBE_READ_REG(hw, IXGBE_PTC64); 449 IXGBE_READ_REG(hw, IXGBE_PTC127); 450 IXGBE_READ_REG(hw, IXGBE_PTC255); 451 IXGBE_READ_REG(hw, IXGBE_PTC511); 452 IXGBE_READ_REG(hw, IXGBE_PTC1023); 453 IXGBE_READ_REG(hw, IXGBE_PTC1522); 454 IXGBE_READ_REG(hw, IXGBE_MPTC); 455 IXGBE_READ_REG(hw, IXGBE_BPTC); 456 for (i = 0; i < 16; i++) { 457 IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 458 IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 459 if (hw->mac.type >= ixgbe_mac_82599EB) { 460 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 461 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); 462 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 463 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); 464 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 465 } else { 466 IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 467 IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 468 } 469 } 470 471 if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { 472 if (hw->phy.id == 0) 473 hw->phy.ops.identify(hw); 474 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i); 475 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i); 476 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i); 477 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i); 478 } 479 480 return 0; 481 } 482 483 /** 484 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM 485 * @hw: pointer to hardware structure 486 * @pba_num: stores the part number string from the EEPROM 487 * @pba_num_size: part number string buffer length 488 * 489 * Reads the part number string from the EEPROM. 490 **/ 491 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 492 u32 pba_num_size) 493 { 494 s32 ret_val; 495 u16 data; 496 u16 pba_ptr; 497 u16 offset; 498 u16 length; 499 500 if (pba_num == NULL) { 501 hw_dbg(hw, "PBA string buffer was null\n"); 502 return IXGBE_ERR_INVALID_ARGUMENT; 503 } 504 505 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 506 if (ret_val) { 507 hw_dbg(hw, "NVM Read Error\n"); 508 return ret_val; 509 } 510 511 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); 512 if (ret_val) { 513 hw_dbg(hw, "NVM Read Error\n"); 514 return ret_val; 515 } 516 517 /* 518 * if data is not ptr guard the PBA must be in legacy format which 519 * means pba_ptr is actually our second data word for the PBA number 520 * and we can decode it into an ascii string 521 */ 522 if (data != IXGBE_PBANUM_PTR_GUARD) { 523 hw_dbg(hw, "NVM PBA number is not stored as string\n"); 524 525 /* we will need 11 characters to store the PBA */ 526 if (pba_num_size < 11) { 527 hw_dbg(hw, "PBA string buffer too small\n"); 528 return IXGBE_ERR_NO_SPACE; 529 } 530 531 /* extract hex string from data and pba_ptr */ 532 pba_num[0] = (data >> 12) & 0xF; 533 pba_num[1] = (data >> 8) & 0xF; 534 pba_num[2] = (data >> 4) & 0xF; 535 pba_num[3] = data & 0xF; 536 pba_num[4] = (pba_ptr >> 12) & 0xF; 537 pba_num[5] = (pba_ptr >> 8) & 0xF; 538 pba_num[6] = '-'; 539 pba_num[7] = 0; 540 pba_num[8] = (pba_ptr >> 4) & 0xF; 541 pba_num[9] = pba_ptr & 0xF; 542 543 /* put a null character on the end of our string */ 544 pba_num[10] = '\0'; 545 546 /* switch all the data but the '-' to hex char */ 547 for (offset = 0; offset < 10; offset++) { 548 if (pba_num[offset] < 0xA) 549 pba_num[offset] += '0'; 550 else if (pba_num[offset] < 0x10) 551 pba_num[offset] += 'A' - 0xA; 552 } 553 554 return 0; 555 } 556 557 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); 558 if (ret_val) { 559 hw_dbg(hw, "NVM Read Error\n"); 560 return ret_val; 561 } 562 563 if (length == 0xFFFF || length == 0) { 564 hw_dbg(hw, "NVM PBA number section invalid length\n"); 565 return IXGBE_ERR_PBA_SECTION; 566 } 567 568 /* check if pba_num buffer is big enough */ 569 if (pba_num_size < (((u32)length * 2) - 1)) { 570 hw_dbg(hw, "PBA string buffer too small\n"); 571 return IXGBE_ERR_NO_SPACE; 572 } 573 574 /* trim pba length from start of string */ 575 pba_ptr++; 576 length--; 577 578 for (offset = 0; offset < length; offset++) { 579 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); 580 if (ret_val) { 581 hw_dbg(hw, "NVM Read Error\n"); 582 return ret_val; 583 } 584 pba_num[offset * 2] = (u8)(data >> 8); 585 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); 586 } 587 pba_num[offset * 2] = '\0'; 588 589 return 0; 590 } 591 592 /** 593 * ixgbe_get_mac_addr_generic - Generic get MAC address 594 * @hw: pointer to hardware structure 595 * @mac_addr: Adapter MAC address 596 * 597 * Reads the adapter's MAC address from first Receive Address Register (RAR0) 598 * A reset of the adapter must be performed prior to calling this function 599 * in order for the MAC address to have been loaded from the EEPROM into RAR0 600 **/ 601 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) 602 { 603 u32 rar_high; 604 u32 rar_low; 605 u16 i; 606 607 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); 608 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); 609 610 for (i = 0; i < 4; i++) 611 mac_addr[i] = (u8)(rar_low >> (i*8)); 612 613 for (i = 0; i < 2; i++) 614 mac_addr[i+4] = (u8)(rar_high >> (i*8)); 615 616 return 0; 617 } 618 619 enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status) 620 { 621 switch (link_status & IXGBE_PCI_LINK_WIDTH) { 622 case IXGBE_PCI_LINK_WIDTH_1: 623 return ixgbe_bus_width_pcie_x1; 624 case IXGBE_PCI_LINK_WIDTH_2: 625 return ixgbe_bus_width_pcie_x2; 626 case IXGBE_PCI_LINK_WIDTH_4: 627 return ixgbe_bus_width_pcie_x4; 628 case IXGBE_PCI_LINK_WIDTH_8: 629 return ixgbe_bus_width_pcie_x8; 630 default: 631 return ixgbe_bus_width_unknown; 632 } 633 } 634 635 enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status) 636 { 637 switch (link_status & IXGBE_PCI_LINK_SPEED) { 638 case IXGBE_PCI_LINK_SPEED_2500: 639 return ixgbe_bus_speed_2500; 640 case IXGBE_PCI_LINK_SPEED_5000: 641 return ixgbe_bus_speed_5000; 642 case IXGBE_PCI_LINK_SPEED_8000: 643 return ixgbe_bus_speed_8000; 644 default: 645 return ixgbe_bus_speed_unknown; 646 } 647 } 648 649 /** 650 * ixgbe_get_bus_info_generic - Generic set PCI bus info 651 * @hw: pointer to hardware structure 652 * 653 * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure 654 **/ 655 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) 656 { 657 u16 link_status; 658 659 hw->bus.type = ixgbe_bus_type_pci_express; 660 661 /* Get the negotiated link width and speed from PCI config space */ 662 link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS); 663 664 hw->bus.width = ixgbe_convert_bus_width(link_status); 665 hw->bus.speed = ixgbe_convert_bus_speed(link_status); 666 667 hw->mac.ops.set_lan_id(hw); 668 669 return 0; 670 } 671 672 /** 673 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices 674 * @hw: pointer to the HW structure 675 * 676 * Determines the LAN function id by reading memory-mapped registers 677 * and swaps the port value if requested. 678 **/ 679 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) 680 { 681 struct ixgbe_bus_info *bus = &hw->bus; 682 u16 ee_ctrl_4; 683 u32 reg; 684 685 reg = IXGBE_READ_REG(hw, IXGBE_STATUS); 686 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; 687 bus->lan_id = bus->func; 688 689 /* check for a port swap */ 690 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw)); 691 if (reg & IXGBE_FACTPS_LFS) 692 bus->func ^= 0x1; 693 694 /* Get MAC instance from EEPROM for configuring CS4227 */ 695 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) { 696 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4); 697 bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >> 698 IXGBE_EE_CTRL_4_INST_ID_SHIFT; 699 } 700 } 701 702 /** 703 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units 704 * @hw: pointer to hardware structure 705 * 706 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, 707 * disables transmit and receive units. The adapter_stopped flag is used by 708 * the shared code and drivers to determine if the adapter is in a stopped 709 * state and should not touch the hardware. 710 **/ 711 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) 712 { 713 u32 reg_val; 714 u16 i; 715 716 /* 717 * Set the adapter_stopped flag so other driver functions stop touching 718 * the hardware 719 */ 720 hw->adapter_stopped = true; 721 722 /* Disable the receive unit */ 723 hw->mac.ops.disable_rx(hw); 724 725 /* Clear interrupt mask to stop interrupts from being generated */ 726 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 727 728 /* Clear any pending interrupts, flush previous writes */ 729 IXGBE_READ_REG(hw, IXGBE_EICR); 730 731 /* Disable the transmit unit. Each queue must be disabled. */ 732 for (i = 0; i < hw->mac.max_tx_queues; i++) 733 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH); 734 735 /* Disable the receive unit by stopping each queue */ 736 for (i = 0; i < hw->mac.max_rx_queues; i++) { 737 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 738 reg_val &= ~IXGBE_RXDCTL_ENABLE; 739 reg_val |= IXGBE_RXDCTL_SWFLSH; 740 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); 741 } 742 743 /* flush all queues disables */ 744 IXGBE_WRITE_FLUSH(hw); 745 usleep_range(1000, 2000); 746 747 /* 748 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 749 * access and verify no pending requests 750 */ 751 return ixgbe_disable_pcie_master(hw); 752 } 753 754 /** 755 * ixgbe_init_led_link_act_generic - Store the LED index link/activity. 756 * @hw: pointer to hardware structure 757 * 758 * Store the index for the link active LED. This will be used to support 759 * blinking the LED. 760 **/ 761 s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) 762 { 763 struct ixgbe_mac_info *mac = &hw->mac; 764 u32 led_reg, led_mode; 765 u16 i; 766 767 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 768 769 /* Get LED link active from the LEDCTL register */ 770 for (i = 0; i < 4; i++) { 771 led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i); 772 773 if ((led_mode & IXGBE_LED_MODE_MASK_BASE) == 774 IXGBE_LED_LINK_ACTIVE) { 775 mac->led_link_act = i; 776 return 0; 777 } 778 } 779 780 /* If LEDCTL register does not have the LED link active set, then use 781 * known MAC defaults. 782 */ 783 switch (hw->mac.type) { 784 case ixgbe_mac_x550em_a: 785 mac->led_link_act = 0; 786 break; 787 case ixgbe_mac_X550EM_x: 788 mac->led_link_act = 1; 789 break; 790 default: 791 mac->led_link_act = 2; 792 } 793 794 return 0; 795 } 796 797 /** 798 * ixgbe_led_on_generic - Turns on the software controllable LEDs. 799 * @hw: pointer to hardware structure 800 * @index: led number to turn on 801 **/ 802 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) 803 { 804 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 805 806 if (index > 3) 807 return IXGBE_ERR_PARAM; 808 809 /* To turn on the LED, set mode to ON. */ 810 led_reg &= ~IXGBE_LED_MODE_MASK(index); 811 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); 812 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 813 IXGBE_WRITE_FLUSH(hw); 814 815 return 0; 816 } 817 818 /** 819 * ixgbe_led_off_generic - Turns off the software controllable LEDs. 820 * @hw: pointer to hardware structure 821 * @index: led number to turn off 822 **/ 823 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) 824 { 825 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 826 827 if (index > 3) 828 return IXGBE_ERR_PARAM; 829 830 /* To turn off the LED, set mode to OFF. */ 831 led_reg &= ~IXGBE_LED_MODE_MASK(index); 832 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); 833 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 834 IXGBE_WRITE_FLUSH(hw); 835 836 return 0; 837 } 838 839 /** 840 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params 841 * @hw: pointer to hardware structure 842 * 843 * Initializes the EEPROM parameters ixgbe_eeprom_info within the 844 * ixgbe_hw struct in order to set up EEPROM access. 845 **/ 846 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) 847 { 848 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 849 u32 eec; 850 u16 eeprom_size; 851 852 if (eeprom->type == ixgbe_eeprom_uninitialized) { 853 eeprom->type = ixgbe_eeprom_none; 854 /* Set default semaphore delay to 10ms which is a well 855 * tested value */ 856 eeprom->semaphore_delay = 10; 857 /* Clear EEPROM page size, it will be initialized as needed */ 858 eeprom->word_page_size = 0; 859 860 /* 861 * Check for EEPROM present first. 862 * If not present leave as none 863 */ 864 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 865 if (eec & IXGBE_EEC_PRES) { 866 eeprom->type = ixgbe_eeprom_spi; 867 868 /* 869 * SPI EEPROM is assumed here. This code would need to 870 * change if a future EEPROM is not SPI. 871 */ 872 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> 873 IXGBE_EEC_SIZE_SHIFT); 874 eeprom->word_size = BIT(eeprom_size + 875 IXGBE_EEPROM_WORD_SIZE_SHIFT); 876 } 877 878 if (eec & IXGBE_EEC_ADDR_SIZE) 879 eeprom->address_bits = 16; 880 else 881 eeprom->address_bits = 8; 882 hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n", 883 eeprom->type, eeprom->word_size, eeprom->address_bits); 884 } 885 886 return 0; 887 } 888 889 /** 890 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang 891 * @hw: pointer to hardware structure 892 * @offset: offset within the EEPROM to write 893 * @words: number of words 894 * @data: 16 bit word(s) to write to EEPROM 895 * 896 * Reads 16 bit word(s) from EEPROM through bit-bang method 897 **/ 898 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 899 u16 words, u16 *data) 900 { 901 s32 status; 902 u16 i, count; 903 904 hw->eeprom.ops.init_params(hw); 905 906 if (words == 0) 907 return IXGBE_ERR_INVALID_ARGUMENT; 908 909 if (offset + words > hw->eeprom.word_size) 910 return IXGBE_ERR_EEPROM; 911 912 /* 913 * The EEPROM page size cannot be queried from the chip. We do lazy 914 * initialization. It is worth to do that when we write large buffer. 915 */ 916 if ((hw->eeprom.word_page_size == 0) && 917 (words > IXGBE_EEPROM_PAGE_SIZE_MAX)) 918 ixgbe_detect_eeprom_page_size_generic(hw, offset); 919 920 /* 921 * We cannot hold synchronization semaphores for too long 922 * to avoid other entity starvation. However it is more efficient 923 * to read in bursts than synchronizing access for each word. 924 */ 925 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { 926 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? 927 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); 928 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, 929 count, &data[i]); 930 931 if (status != 0) 932 break; 933 } 934 935 return status; 936 } 937 938 /** 939 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM 940 * @hw: pointer to hardware structure 941 * @offset: offset within the EEPROM to be written to 942 * @words: number of word(s) 943 * @data: 16 bit word(s) to be written to the EEPROM 944 * 945 * If ixgbe_eeprom_update_checksum is not called after this function, the 946 * EEPROM will most likely contain an invalid checksum. 947 **/ 948 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 949 u16 words, u16 *data) 950 { 951 s32 status; 952 u16 word; 953 u16 page_size; 954 u16 i; 955 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; 956 957 /* Prepare the EEPROM for writing */ 958 status = ixgbe_acquire_eeprom(hw); 959 if (status) 960 return status; 961 962 if (ixgbe_ready_eeprom(hw) != 0) { 963 ixgbe_release_eeprom(hw); 964 return IXGBE_ERR_EEPROM; 965 } 966 967 for (i = 0; i < words; i++) { 968 ixgbe_standby_eeprom(hw); 969 970 /* Send the WRITE ENABLE command (8 bit opcode) */ 971 ixgbe_shift_out_eeprom_bits(hw, 972 IXGBE_EEPROM_WREN_OPCODE_SPI, 973 IXGBE_EEPROM_OPCODE_BITS); 974 975 ixgbe_standby_eeprom(hw); 976 977 /* Some SPI eeproms use the 8th address bit embedded 978 * in the opcode 979 */ 980 if ((hw->eeprom.address_bits == 8) && 981 ((offset + i) >= 128)) 982 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 983 984 /* Send the Write command (8-bit opcode + addr) */ 985 ixgbe_shift_out_eeprom_bits(hw, write_opcode, 986 IXGBE_EEPROM_OPCODE_BITS); 987 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), 988 hw->eeprom.address_bits); 989 990 page_size = hw->eeprom.word_page_size; 991 992 /* Send the data in burst via SPI */ 993 do { 994 word = data[i]; 995 word = (word >> 8) | (word << 8); 996 ixgbe_shift_out_eeprom_bits(hw, word, 16); 997 998 if (page_size == 0) 999 break; 1000 1001 /* do not wrap around page */ 1002 if (((offset + i) & (page_size - 1)) == 1003 (page_size - 1)) 1004 break; 1005 } while (++i < words); 1006 1007 ixgbe_standby_eeprom(hw); 1008 usleep_range(10000, 20000); 1009 } 1010 /* Done with writing - release the EEPROM */ 1011 ixgbe_release_eeprom(hw); 1012 1013 return 0; 1014 } 1015 1016 /** 1017 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM 1018 * @hw: pointer to hardware structure 1019 * @offset: offset within the EEPROM to be written to 1020 * @data: 16 bit word to be written to the EEPROM 1021 * 1022 * If ixgbe_eeprom_update_checksum is not called after this function, the 1023 * EEPROM will most likely contain an invalid checksum. 1024 **/ 1025 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 1026 { 1027 hw->eeprom.ops.init_params(hw); 1028 1029 if (offset >= hw->eeprom.word_size) 1030 return IXGBE_ERR_EEPROM; 1031 1032 return ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); 1033 } 1034 1035 /** 1036 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang 1037 * @hw: pointer to hardware structure 1038 * @offset: offset within the EEPROM to be read 1039 * @words: number of word(s) 1040 * @data: read 16 bit words(s) from EEPROM 1041 * 1042 * Reads 16 bit word(s) from EEPROM through bit-bang method 1043 **/ 1044 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1045 u16 words, u16 *data) 1046 { 1047 s32 status; 1048 u16 i, count; 1049 1050 hw->eeprom.ops.init_params(hw); 1051 1052 if (words == 0) 1053 return IXGBE_ERR_INVALID_ARGUMENT; 1054 1055 if (offset + words > hw->eeprom.word_size) 1056 return IXGBE_ERR_EEPROM; 1057 1058 /* 1059 * We cannot hold synchronization semaphores for too long 1060 * to avoid other entity starvation. However it is more efficient 1061 * to read in bursts than synchronizing access for each word. 1062 */ 1063 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { 1064 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? 1065 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); 1066 1067 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, 1068 count, &data[i]); 1069 1070 if (status) 1071 return status; 1072 } 1073 1074 return 0; 1075 } 1076 1077 /** 1078 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang 1079 * @hw: pointer to hardware structure 1080 * @offset: offset within the EEPROM to be read 1081 * @words: number of word(s) 1082 * @data: read 16 bit word(s) from EEPROM 1083 * 1084 * Reads 16 bit word(s) from EEPROM through bit-bang method 1085 **/ 1086 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 1087 u16 words, u16 *data) 1088 { 1089 s32 status; 1090 u16 word_in; 1091 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; 1092 u16 i; 1093 1094 /* Prepare the EEPROM for reading */ 1095 status = ixgbe_acquire_eeprom(hw); 1096 if (status) 1097 return status; 1098 1099 if (ixgbe_ready_eeprom(hw) != 0) { 1100 ixgbe_release_eeprom(hw); 1101 return IXGBE_ERR_EEPROM; 1102 } 1103 1104 for (i = 0; i < words; i++) { 1105 ixgbe_standby_eeprom(hw); 1106 /* Some SPI eeproms use the 8th address bit embedded 1107 * in the opcode 1108 */ 1109 if ((hw->eeprom.address_bits == 8) && 1110 ((offset + i) >= 128)) 1111 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 1112 1113 /* Send the READ command (opcode + addr) */ 1114 ixgbe_shift_out_eeprom_bits(hw, read_opcode, 1115 IXGBE_EEPROM_OPCODE_BITS); 1116 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), 1117 hw->eeprom.address_bits); 1118 1119 /* Read the data. */ 1120 word_in = ixgbe_shift_in_eeprom_bits(hw, 16); 1121 data[i] = (word_in >> 8) | (word_in << 8); 1122 } 1123 1124 /* End this read operation */ 1125 ixgbe_release_eeprom(hw); 1126 1127 return 0; 1128 } 1129 1130 /** 1131 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang 1132 * @hw: pointer to hardware structure 1133 * @offset: offset within the EEPROM to be read 1134 * @data: read 16 bit value from EEPROM 1135 * 1136 * Reads 16 bit value from EEPROM through bit-bang method 1137 **/ 1138 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1139 u16 *data) 1140 { 1141 hw->eeprom.ops.init_params(hw); 1142 1143 if (offset >= hw->eeprom.word_size) 1144 return IXGBE_ERR_EEPROM; 1145 1146 return ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); 1147 } 1148 1149 /** 1150 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD 1151 * @hw: pointer to hardware structure 1152 * @offset: offset of word in the EEPROM to read 1153 * @words: number of word(s) 1154 * @data: 16 bit word(s) from the EEPROM 1155 * 1156 * Reads a 16 bit word(s) from the EEPROM using the EERD register. 1157 **/ 1158 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, 1159 u16 words, u16 *data) 1160 { 1161 u32 eerd; 1162 s32 status; 1163 u32 i; 1164 1165 hw->eeprom.ops.init_params(hw); 1166 1167 if (words == 0) 1168 return IXGBE_ERR_INVALID_ARGUMENT; 1169 1170 if (offset >= hw->eeprom.word_size) 1171 return IXGBE_ERR_EEPROM; 1172 1173 for (i = 0; i < words; i++) { 1174 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1175 IXGBE_EEPROM_RW_REG_START; 1176 1177 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); 1178 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); 1179 1180 if (status == 0) { 1181 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> 1182 IXGBE_EEPROM_RW_REG_DATA); 1183 } else { 1184 hw_dbg(hw, "Eeprom read timed out\n"); 1185 return status; 1186 } 1187 } 1188 1189 return 0; 1190 } 1191 1192 /** 1193 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size 1194 * @hw: pointer to hardware structure 1195 * @offset: offset within the EEPROM to be used as a scratch pad 1196 * 1197 * Discover EEPROM page size by writing marching data at given offset. 1198 * This function is called only when we are writing a new large buffer 1199 * at given offset so the data would be overwritten anyway. 1200 **/ 1201 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, 1202 u16 offset) 1203 { 1204 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; 1205 s32 status; 1206 u16 i; 1207 1208 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) 1209 data[i] = i; 1210 1211 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX; 1212 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1213 IXGBE_EEPROM_PAGE_SIZE_MAX, data); 1214 hw->eeprom.word_page_size = 0; 1215 if (status) 1216 return status; 1217 1218 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); 1219 if (status) 1220 return status; 1221 1222 /* 1223 * When writing in burst more than the actual page size 1224 * EEPROM address wraps around current page. 1225 */ 1226 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; 1227 1228 hw_dbg(hw, "Detected EEPROM page size = %d words.\n", 1229 hw->eeprom.word_page_size); 1230 return 0; 1231 } 1232 1233 /** 1234 * ixgbe_read_eerd_generic - Read EEPROM word using EERD 1235 * @hw: pointer to hardware structure 1236 * @offset: offset of word in the EEPROM to read 1237 * @data: word read from the EEPROM 1238 * 1239 * Reads a 16 bit word from the EEPROM using the EERD register. 1240 **/ 1241 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) 1242 { 1243 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); 1244 } 1245 1246 /** 1247 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR 1248 * @hw: pointer to hardware structure 1249 * @offset: offset of word in the EEPROM to write 1250 * @words: number of words 1251 * @data: word(s) write to the EEPROM 1252 * 1253 * Write a 16 bit word(s) to the EEPROM using the EEWR register. 1254 **/ 1255 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, 1256 u16 words, u16 *data) 1257 { 1258 u32 eewr; 1259 s32 status; 1260 u16 i; 1261 1262 hw->eeprom.ops.init_params(hw); 1263 1264 if (words == 0) 1265 return IXGBE_ERR_INVALID_ARGUMENT; 1266 1267 if (offset >= hw->eeprom.word_size) 1268 return IXGBE_ERR_EEPROM; 1269 1270 for (i = 0; i < words; i++) { 1271 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1272 (data[i] << IXGBE_EEPROM_RW_REG_DATA) | 1273 IXGBE_EEPROM_RW_REG_START; 1274 1275 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1276 if (status) { 1277 hw_dbg(hw, "Eeprom write EEWR timed out\n"); 1278 return status; 1279 } 1280 1281 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); 1282 1283 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1284 if (status) { 1285 hw_dbg(hw, "Eeprom write EEWR timed out\n"); 1286 return status; 1287 } 1288 } 1289 1290 return 0; 1291 } 1292 1293 /** 1294 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR 1295 * @hw: pointer to hardware structure 1296 * @offset: offset of word in the EEPROM to write 1297 * @data: word write to the EEPROM 1298 * 1299 * Write a 16 bit word to the EEPROM using the EEWR register. 1300 **/ 1301 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 1302 { 1303 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); 1304 } 1305 1306 /** 1307 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status 1308 * @hw: pointer to hardware structure 1309 * @ee_reg: EEPROM flag for polling 1310 * 1311 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the 1312 * read or write is done respectively. 1313 **/ 1314 static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) 1315 { 1316 u32 i; 1317 u32 reg; 1318 1319 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { 1320 if (ee_reg == IXGBE_NVM_POLL_READ) 1321 reg = IXGBE_READ_REG(hw, IXGBE_EERD); 1322 else 1323 reg = IXGBE_READ_REG(hw, IXGBE_EEWR); 1324 1325 if (reg & IXGBE_EEPROM_RW_REG_DONE) { 1326 return 0; 1327 } 1328 udelay(5); 1329 } 1330 return IXGBE_ERR_EEPROM; 1331 } 1332 1333 /** 1334 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang 1335 * @hw: pointer to hardware structure 1336 * 1337 * Prepares EEPROM for access using bit-bang method. This function should 1338 * be called before issuing a command to the EEPROM. 1339 **/ 1340 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) 1341 { 1342 u32 eec; 1343 u32 i; 1344 1345 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) 1346 return IXGBE_ERR_SWFW_SYNC; 1347 1348 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1349 1350 /* Request EEPROM Access */ 1351 eec |= IXGBE_EEC_REQ; 1352 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1353 1354 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { 1355 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1356 if (eec & IXGBE_EEC_GNT) 1357 break; 1358 udelay(5); 1359 } 1360 1361 /* Release if grant not acquired */ 1362 if (!(eec & IXGBE_EEC_GNT)) { 1363 eec &= ~IXGBE_EEC_REQ; 1364 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1365 hw_dbg(hw, "Could not acquire EEPROM grant\n"); 1366 1367 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1368 return IXGBE_ERR_EEPROM; 1369 } 1370 1371 /* Setup EEPROM for Read/Write */ 1372 /* Clear CS and SK */ 1373 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); 1374 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1375 IXGBE_WRITE_FLUSH(hw); 1376 udelay(1); 1377 return 0; 1378 } 1379 1380 /** 1381 * ixgbe_get_eeprom_semaphore - Get hardware semaphore 1382 * @hw: pointer to hardware structure 1383 * 1384 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method 1385 **/ 1386 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) 1387 { 1388 u32 timeout = 2000; 1389 u32 i; 1390 u32 swsm; 1391 1392 /* Get SMBI software semaphore between device drivers first */ 1393 for (i = 0; i < timeout; i++) { 1394 /* 1395 * If the SMBI bit is 0 when we read it, then the bit will be 1396 * set and we have the semaphore 1397 */ 1398 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1399 if (!(swsm & IXGBE_SWSM_SMBI)) 1400 break; 1401 usleep_range(50, 100); 1402 } 1403 1404 if (i == timeout) { 1405 hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n"); 1406 /* this release is particularly important because our attempts 1407 * above to get the semaphore may have succeeded, and if there 1408 * was a timeout, we should unconditionally clear the semaphore 1409 * bits to free the driver to make progress 1410 */ 1411 ixgbe_release_eeprom_semaphore(hw); 1412 1413 usleep_range(50, 100); 1414 /* one last try 1415 * If the SMBI bit is 0 when we read it, then the bit will be 1416 * set and we have the semaphore 1417 */ 1418 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1419 if (swsm & IXGBE_SWSM_SMBI) { 1420 hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n"); 1421 return IXGBE_ERR_EEPROM; 1422 } 1423 } 1424 1425 /* Now get the semaphore between SW/FW through the SWESMBI bit */ 1426 for (i = 0; i < timeout; i++) { 1427 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1428 1429 /* Set the SW EEPROM semaphore bit to request access */ 1430 swsm |= IXGBE_SWSM_SWESMBI; 1431 IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); 1432 1433 /* If we set the bit successfully then we got the 1434 * semaphore. 1435 */ 1436 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1437 if (swsm & IXGBE_SWSM_SWESMBI) 1438 break; 1439 1440 usleep_range(50, 100); 1441 } 1442 1443 /* Release semaphores and return error if SW EEPROM semaphore 1444 * was not granted because we don't have access to the EEPROM 1445 */ 1446 if (i >= timeout) { 1447 hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n"); 1448 ixgbe_release_eeprom_semaphore(hw); 1449 return IXGBE_ERR_EEPROM; 1450 } 1451 1452 return 0; 1453 } 1454 1455 /** 1456 * ixgbe_release_eeprom_semaphore - Release hardware semaphore 1457 * @hw: pointer to hardware structure 1458 * 1459 * This function clears hardware semaphore bits. 1460 **/ 1461 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) 1462 { 1463 u32 swsm; 1464 1465 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1466 1467 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ 1468 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); 1469 IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); 1470 IXGBE_WRITE_FLUSH(hw); 1471 } 1472 1473 /** 1474 * ixgbe_ready_eeprom - Polls for EEPROM ready 1475 * @hw: pointer to hardware structure 1476 **/ 1477 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) 1478 { 1479 u16 i; 1480 u8 spi_stat_reg; 1481 1482 /* 1483 * Read "Status Register" repeatedly until the LSB is cleared. The 1484 * EEPROM will signal that the command has been completed by clearing 1485 * bit 0 of the internal status register. If it's not cleared within 1486 * 5 milliseconds, then error out. 1487 */ 1488 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { 1489 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, 1490 IXGBE_EEPROM_OPCODE_BITS); 1491 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); 1492 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) 1493 break; 1494 1495 udelay(5); 1496 ixgbe_standby_eeprom(hw); 1497 } 1498 1499 /* 1500 * On some parts, SPI write time could vary from 0-20mSec on 3.3V 1501 * devices (and only 0-5mSec on 5V devices) 1502 */ 1503 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { 1504 hw_dbg(hw, "SPI EEPROM Status error\n"); 1505 return IXGBE_ERR_EEPROM; 1506 } 1507 1508 return 0; 1509 } 1510 1511 /** 1512 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state 1513 * @hw: pointer to hardware structure 1514 **/ 1515 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) 1516 { 1517 u32 eec; 1518 1519 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1520 1521 /* Toggle CS to flush commands */ 1522 eec |= IXGBE_EEC_CS; 1523 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1524 IXGBE_WRITE_FLUSH(hw); 1525 udelay(1); 1526 eec &= ~IXGBE_EEC_CS; 1527 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1528 IXGBE_WRITE_FLUSH(hw); 1529 udelay(1); 1530 } 1531 1532 /** 1533 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. 1534 * @hw: pointer to hardware structure 1535 * @data: data to send to the EEPROM 1536 * @count: number of bits to shift out 1537 **/ 1538 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 1539 u16 count) 1540 { 1541 u32 eec; 1542 u32 mask; 1543 u32 i; 1544 1545 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1546 1547 /* 1548 * Mask is used to shift "count" bits of "data" out to the EEPROM 1549 * one bit at a time. Determine the starting bit based on count 1550 */ 1551 mask = BIT(count - 1); 1552 1553 for (i = 0; i < count; i++) { 1554 /* 1555 * A "1" is shifted out to the EEPROM by setting bit "DI" to a 1556 * "1", and then raising and then lowering the clock (the SK 1557 * bit controls the clock input to the EEPROM). A "0" is 1558 * shifted out to the EEPROM by setting "DI" to "0" and then 1559 * raising and then lowering the clock. 1560 */ 1561 if (data & mask) 1562 eec |= IXGBE_EEC_DI; 1563 else 1564 eec &= ~IXGBE_EEC_DI; 1565 1566 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1567 IXGBE_WRITE_FLUSH(hw); 1568 1569 udelay(1); 1570 1571 ixgbe_raise_eeprom_clk(hw, &eec); 1572 ixgbe_lower_eeprom_clk(hw, &eec); 1573 1574 /* 1575 * Shift mask to signify next bit of data to shift in to the 1576 * EEPROM 1577 */ 1578 mask = mask >> 1; 1579 } 1580 1581 /* We leave the "DI" bit set to "0" when we leave this routine. */ 1582 eec &= ~IXGBE_EEC_DI; 1583 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1584 IXGBE_WRITE_FLUSH(hw); 1585 } 1586 1587 /** 1588 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM 1589 * @hw: pointer to hardware structure 1590 * @count: number of bits to shift 1591 **/ 1592 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) 1593 { 1594 u32 eec; 1595 u32 i; 1596 u16 data = 0; 1597 1598 /* 1599 * In order to read a register from the EEPROM, we need to shift 1600 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising 1601 * the clock input to the EEPROM (setting the SK bit), and then reading 1602 * the value of the "DO" bit. During this "shifting in" process the 1603 * "DI" bit should always be clear. 1604 */ 1605 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1606 1607 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); 1608 1609 for (i = 0; i < count; i++) { 1610 data = data << 1; 1611 ixgbe_raise_eeprom_clk(hw, &eec); 1612 1613 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1614 1615 eec &= ~(IXGBE_EEC_DI); 1616 if (eec & IXGBE_EEC_DO) 1617 data |= 1; 1618 1619 ixgbe_lower_eeprom_clk(hw, &eec); 1620 } 1621 1622 return data; 1623 } 1624 1625 /** 1626 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. 1627 * @hw: pointer to hardware structure 1628 * @eec: EEC register's current value 1629 **/ 1630 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 1631 { 1632 /* 1633 * Raise the clock input to the EEPROM 1634 * (setting the SK bit), then delay 1635 */ 1636 *eec = *eec | IXGBE_EEC_SK; 1637 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec); 1638 IXGBE_WRITE_FLUSH(hw); 1639 udelay(1); 1640 } 1641 1642 /** 1643 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. 1644 * @hw: pointer to hardware structure 1645 * @eec: EEC's current value 1646 **/ 1647 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 1648 { 1649 /* 1650 * Lower the clock input to the EEPROM (clearing the SK bit), then 1651 * delay 1652 */ 1653 *eec = *eec & ~IXGBE_EEC_SK; 1654 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec); 1655 IXGBE_WRITE_FLUSH(hw); 1656 udelay(1); 1657 } 1658 1659 /** 1660 * ixgbe_release_eeprom - Release EEPROM, release semaphores 1661 * @hw: pointer to hardware structure 1662 **/ 1663 static void ixgbe_release_eeprom(struct ixgbe_hw *hw) 1664 { 1665 u32 eec; 1666 1667 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1668 1669 eec |= IXGBE_EEC_CS; /* Pull CS high */ 1670 eec &= ~IXGBE_EEC_SK; /* Lower SCK */ 1671 1672 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1673 IXGBE_WRITE_FLUSH(hw); 1674 1675 udelay(1); 1676 1677 /* Stop requesting EEPROM access */ 1678 eec &= ~IXGBE_EEC_REQ; 1679 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1680 1681 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1682 1683 /* 1684 * Delay before attempt to obtain semaphore again to allow FW 1685 * access. semaphore_delay is in ms we need us for usleep_range 1686 */ 1687 usleep_range(hw->eeprom.semaphore_delay * 1000, 1688 hw->eeprom.semaphore_delay * 2000); 1689 } 1690 1691 /** 1692 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum 1693 * @hw: pointer to hardware structure 1694 **/ 1695 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) 1696 { 1697 u16 i; 1698 u16 j; 1699 u16 checksum = 0; 1700 u16 length = 0; 1701 u16 pointer = 0; 1702 u16 word = 0; 1703 1704 /* Include 0x0-0x3F in the checksum */ 1705 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { 1706 if (hw->eeprom.ops.read(hw, i, &word)) { 1707 hw_dbg(hw, "EEPROM read failed\n"); 1708 break; 1709 } 1710 checksum += word; 1711 } 1712 1713 /* Include all data from pointers except for the fw pointer */ 1714 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { 1715 if (hw->eeprom.ops.read(hw, i, &pointer)) { 1716 hw_dbg(hw, "EEPROM read failed\n"); 1717 return IXGBE_ERR_EEPROM; 1718 } 1719 1720 /* If the pointer seems invalid */ 1721 if (pointer == 0xFFFF || pointer == 0) 1722 continue; 1723 1724 if (hw->eeprom.ops.read(hw, pointer, &length)) { 1725 hw_dbg(hw, "EEPROM read failed\n"); 1726 return IXGBE_ERR_EEPROM; 1727 } 1728 1729 if (length == 0xFFFF || length == 0) 1730 continue; 1731 1732 for (j = pointer + 1; j <= pointer + length; j++) { 1733 if (hw->eeprom.ops.read(hw, j, &word)) { 1734 hw_dbg(hw, "EEPROM read failed\n"); 1735 return IXGBE_ERR_EEPROM; 1736 } 1737 checksum += word; 1738 } 1739 } 1740 1741 checksum = (u16)IXGBE_EEPROM_SUM - checksum; 1742 1743 return (s32)checksum; 1744 } 1745 1746 /** 1747 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum 1748 * @hw: pointer to hardware structure 1749 * @checksum_val: calculated checksum 1750 * 1751 * Performs checksum calculation and validates the EEPROM checksum. If the 1752 * caller does not need checksum_val, the value can be NULL. 1753 **/ 1754 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 1755 u16 *checksum_val) 1756 { 1757 s32 status; 1758 u16 checksum; 1759 u16 read_checksum = 0; 1760 1761 /* 1762 * Read the first word from the EEPROM. If this times out or fails, do 1763 * not continue or we could be in for a very long wait while every 1764 * EEPROM read fails 1765 */ 1766 status = hw->eeprom.ops.read(hw, 0, &checksum); 1767 if (status) { 1768 hw_dbg(hw, "EEPROM read failed\n"); 1769 return status; 1770 } 1771 1772 status = hw->eeprom.ops.calc_checksum(hw); 1773 if (status < 0) 1774 return status; 1775 1776 checksum = (u16)(status & 0xffff); 1777 1778 status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); 1779 if (status) { 1780 hw_dbg(hw, "EEPROM read failed\n"); 1781 return status; 1782 } 1783 1784 /* Verify read checksum from EEPROM is the same as 1785 * calculated checksum 1786 */ 1787 if (read_checksum != checksum) 1788 status = IXGBE_ERR_EEPROM_CHECKSUM; 1789 1790 /* If the user cares, return the calculated checksum */ 1791 if (checksum_val) 1792 *checksum_val = checksum; 1793 1794 return status; 1795 } 1796 1797 /** 1798 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum 1799 * @hw: pointer to hardware structure 1800 **/ 1801 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) 1802 { 1803 s32 status; 1804 u16 checksum; 1805 1806 /* 1807 * Read the first word from the EEPROM. If this times out or fails, do 1808 * not continue or we could be in for a very long wait while every 1809 * EEPROM read fails 1810 */ 1811 status = hw->eeprom.ops.read(hw, 0, &checksum); 1812 if (status) { 1813 hw_dbg(hw, "EEPROM read failed\n"); 1814 return status; 1815 } 1816 1817 status = hw->eeprom.ops.calc_checksum(hw); 1818 if (status < 0) 1819 return status; 1820 1821 checksum = (u16)(status & 0xffff); 1822 1823 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum); 1824 1825 return status; 1826 } 1827 1828 /** 1829 * ixgbe_set_rar_generic - Set Rx address register 1830 * @hw: pointer to hardware structure 1831 * @index: Receive address register to write 1832 * @addr: Address to put into receive address register 1833 * @vmdq: VMDq "set" or "pool" index 1834 * @enable_addr: set flag that address is active 1835 * 1836 * Puts an ethernet address into a receive address register. 1837 **/ 1838 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 1839 u32 enable_addr) 1840 { 1841 u32 rar_low, rar_high; 1842 u32 rar_entries = hw->mac.num_rar_entries; 1843 1844 /* Make sure we are using a valid rar index range */ 1845 if (index >= rar_entries) { 1846 hw_dbg(hw, "RAR index %d is out of range.\n", index); 1847 return IXGBE_ERR_INVALID_ARGUMENT; 1848 } 1849 1850 /* setup VMDq pool selection before this RAR gets enabled */ 1851 hw->mac.ops.set_vmdq(hw, index, vmdq); 1852 1853 /* 1854 * HW expects these in little endian so we reverse the byte 1855 * order from network order (big endian) to little endian 1856 */ 1857 rar_low = ((u32)addr[0] | 1858 ((u32)addr[1] << 8) | 1859 ((u32)addr[2] << 16) | 1860 ((u32)addr[3] << 24)); 1861 /* 1862 * Some parts put the VMDq setting in the extra RAH bits, 1863 * so save everything except the lower 16 bits that hold part 1864 * of the address and the address valid bit. 1865 */ 1866 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1867 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 1868 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); 1869 1870 if (enable_addr != 0) 1871 rar_high |= IXGBE_RAH_AV; 1872 1873 /* Record lower 32 bits of MAC address and then make 1874 * sure that write is flushed to hardware before writing 1875 * the upper 16 bits and setting the valid bit. 1876 */ 1877 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 1878 IXGBE_WRITE_FLUSH(hw); 1879 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1880 1881 return 0; 1882 } 1883 1884 /** 1885 * ixgbe_clear_rar_generic - Remove Rx address register 1886 * @hw: pointer to hardware structure 1887 * @index: Receive address register to write 1888 * 1889 * Clears an ethernet address from a receive address register. 1890 **/ 1891 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) 1892 { 1893 u32 rar_high; 1894 u32 rar_entries = hw->mac.num_rar_entries; 1895 1896 /* Make sure we are using a valid rar index range */ 1897 if (index >= rar_entries) { 1898 hw_dbg(hw, "RAR index %d is out of range.\n", index); 1899 return IXGBE_ERR_INVALID_ARGUMENT; 1900 } 1901 1902 /* 1903 * Some parts put the VMDq setting in the extra RAH bits, 1904 * so save everything except the lower 16 bits that hold part 1905 * of the address and the address valid bit. 1906 */ 1907 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1908 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 1909 1910 /* Clear the address valid bit and upper 16 bits of the address 1911 * before clearing the lower bits. This way we aren't updating 1912 * a live filter. 1913 */ 1914 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1915 IXGBE_WRITE_FLUSH(hw); 1916 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); 1917 1918 /* clear VMDq pool/queue selection for this RAR */ 1919 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); 1920 1921 return 0; 1922 } 1923 1924 /** 1925 * ixgbe_init_rx_addrs_generic - Initializes receive address filters. 1926 * @hw: pointer to hardware structure 1927 * 1928 * Places the MAC address in receive address register 0 and clears the rest 1929 * of the receive address registers. Clears the multicast table. Assumes 1930 * the receiver is in reset when the routine is called. 1931 **/ 1932 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) 1933 { 1934 u32 i; 1935 u32 rar_entries = hw->mac.num_rar_entries; 1936 1937 /* 1938 * If the current mac address is valid, assume it is a software override 1939 * to the permanent address. 1940 * Otherwise, use the permanent address from the eeprom. 1941 */ 1942 if (!is_valid_ether_addr(hw->mac.addr)) { 1943 /* Get the MAC address from the RAR0 for later reference */ 1944 hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 1945 1946 hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr); 1947 } else { 1948 /* Setup the receive address. */ 1949 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); 1950 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); 1951 1952 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1953 } 1954 1955 /* clear VMDq pool/queue selection for RAR 0 */ 1956 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); 1957 1958 hw->addr_ctrl.overflow_promisc = 0; 1959 1960 hw->addr_ctrl.rar_used_count = 1; 1961 1962 /* Zero out the other receive addresses. */ 1963 hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1); 1964 for (i = 1; i < rar_entries; i++) { 1965 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 1966 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 1967 } 1968 1969 /* Clear the MTA */ 1970 hw->addr_ctrl.mta_in_use = 0; 1971 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1972 1973 hw_dbg(hw, " Clearing MTA\n"); 1974 for (i = 0; i < hw->mac.mcft_size; i++) 1975 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 1976 1977 if (hw->mac.ops.init_uta_tables) 1978 hw->mac.ops.init_uta_tables(hw); 1979 1980 return 0; 1981 } 1982 1983 /** 1984 * ixgbe_mta_vector - Determines bit-vector in multicast table to set 1985 * @hw: pointer to hardware structure 1986 * @mc_addr: the multicast address 1987 * 1988 * Extracts the 12 bits, from a multicast address, to determine which 1989 * bit-vector to set in the multicast table. The hardware uses 12 bits, from 1990 * incoming rx multicast addresses, to determine the bit-vector to check in 1991 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set 1992 * by the MO field of the MCSTCTRL. The MO field is set during initialization 1993 * to mc_filter_type. 1994 **/ 1995 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) 1996 { 1997 u32 vector = 0; 1998 1999 switch (hw->mac.mc_filter_type) { 2000 case 0: /* use bits [47:36] of the address */ 2001 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); 2002 break; 2003 case 1: /* use bits [46:35] of the address */ 2004 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); 2005 break; 2006 case 2: /* use bits [45:34] of the address */ 2007 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); 2008 break; 2009 case 3: /* use bits [43:32] of the address */ 2010 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); 2011 break; 2012 default: /* Invalid mc_filter_type */ 2013 hw_dbg(hw, "MC filter type param set incorrectly\n"); 2014 break; 2015 } 2016 2017 /* vector can only be 12-bits or boundary will be exceeded */ 2018 vector &= 0xFFF; 2019 return vector; 2020 } 2021 2022 /** 2023 * ixgbe_set_mta - Set bit-vector in multicast table 2024 * @hw: pointer to hardware structure 2025 * @mc_addr: Multicast address 2026 * 2027 * Sets the bit-vector in the multicast table. 2028 **/ 2029 static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) 2030 { 2031 u32 vector; 2032 u32 vector_bit; 2033 u32 vector_reg; 2034 2035 hw->addr_ctrl.mta_in_use++; 2036 2037 vector = ixgbe_mta_vector(hw, mc_addr); 2038 hw_dbg(hw, " bit-vector = 0x%03X\n", vector); 2039 2040 /* 2041 * The MTA is a register array of 128 32-bit registers. It is treated 2042 * like an array of 4096 bits. We want to set bit 2043 * BitArray[vector_value]. So we figure out what register the bit is 2044 * in, read it, OR in the new bit, then write back the new value. The 2045 * register is determined by the upper 7 bits of the vector value and 2046 * the bit within that register are determined by the lower 5 bits of 2047 * the value. 2048 */ 2049 vector_reg = (vector >> 5) & 0x7F; 2050 vector_bit = vector & 0x1F; 2051 hw->mac.mta_shadow[vector_reg] |= BIT(vector_bit); 2052 } 2053 2054 /** 2055 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses 2056 * @hw: pointer to hardware structure 2057 * @netdev: pointer to net device structure 2058 * 2059 * The given list replaces any existing list. Clears the MC addrs from receive 2060 * address registers and the multicast table. Uses unused receive address 2061 * registers for the first multicast addresses, and hashes the rest into the 2062 * multicast table. 2063 **/ 2064 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, 2065 struct net_device *netdev) 2066 { 2067 struct netdev_hw_addr *ha; 2068 u32 i; 2069 2070 /* 2071 * Set the new number of MC addresses that we are being requested to 2072 * use. 2073 */ 2074 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); 2075 hw->addr_ctrl.mta_in_use = 0; 2076 2077 /* Clear mta_shadow */ 2078 hw_dbg(hw, " Clearing MTA\n"); 2079 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); 2080 2081 /* Update mta shadow */ 2082 netdev_for_each_mc_addr(ha, netdev) { 2083 hw_dbg(hw, " Adding the multicast addresses:\n"); 2084 ixgbe_set_mta(hw, ha->addr); 2085 } 2086 2087 /* Enable mta */ 2088 for (i = 0; i < hw->mac.mcft_size; i++) 2089 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, 2090 hw->mac.mta_shadow[i]); 2091 2092 if (hw->addr_ctrl.mta_in_use > 0) 2093 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 2094 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 2095 2096 hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); 2097 return 0; 2098 } 2099 2100 /** 2101 * ixgbe_enable_mc_generic - Enable multicast address in RAR 2102 * @hw: pointer to hardware structure 2103 * 2104 * Enables multicast address in RAR and the use of the multicast hash table. 2105 **/ 2106 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) 2107 { 2108 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 2109 2110 if (a->mta_in_use > 0) 2111 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 2112 hw->mac.mc_filter_type); 2113 2114 return 0; 2115 } 2116 2117 /** 2118 * ixgbe_disable_mc_generic - Disable multicast address in RAR 2119 * @hw: pointer to hardware structure 2120 * 2121 * Disables multicast address in RAR and the use of the multicast hash table. 2122 **/ 2123 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) 2124 { 2125 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 2126 2127 if (a->mta_in_use > 0) 2128 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 2129 2130 return 0; 2131 } 2132 2133 /** 2134 * ixgbe_fc_enable_generic - Enable flow control 2135 * @hw: pointer to hardware structure 2136 * 2137 * Enable flow control according to the current settings. 2138 **/ 2139 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) 2140 { 2141 u32 mflcn_reg, fccfg_reg; 2142 u32 reg; 2143 u32 fcrtl, fcrth; 2144 int i; 2145 2146 /* Validate the water mark configuration. */ 2147 if (!hw->fc.pause_time) 2148 return IXGBE_ERR_INVALID_LINK_SETTINGS; 2149 2150 /* Low water mark of zero causes XOFF floods */ 2151 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 2152 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2153 hw->fc.high_water[i]) { 2154 if (!hw->fc.low_water[i] || 2155 hw->fc.low_water[i] >= hw->fc.high_water[i]) { 2156 hw_dbg(hw, "Invalid water mark configuration\n"); 2157 return IXGBE_ERR_INVALID_LINK_SETTINGS; 2158 } 2159 } 2160 } 2161 2162 /* Negotiate the fc mode to use */ 2163 hw->mac.ops.fc_autoneg(hw); 2164 2165 /* Disable any previous flow control settings */ 2166 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2167 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); 2168 2169 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 2170 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 2171 2172 /* 2173 * The possible values of fc.current_mode are: 2174 * 0: Flow control is completely disabled 2175 * 1: Rx flow control is enabled (we can receive pause frames, 2176 * but not send pause frames). 2177 * 2: Tx flow control is enabled (we can send pause frames but 2178 * we do not support receiving pause frames). 2179 * 3: Both Rx and Tx flow control (symmetric) are enabled. 2180 * other: Invalid. 2181 */ 2182 switch (hw->fc.current_mode) { 2183 case ixgbe_fc_none: 2184 /* 2185 * Flow control is disabled by software override or autoneg. 2186 * The code below will actually disable it in the HW. 2187 */ 2188 break; 2189 case ixgbe_fc_rx_pause: 2190 /* 2191 * Rx Flow control is enabled and Tx Flow control is 2192 * disabled by software override. Since there really 2193 * isn't a way to advertise that we are capable of RX 2194 * Pause ONLY, we will advertise that we support both 2195 * symmetric and asymmetric Rx PAUSE. Later, we will 2196 * disable the adapter's ability to send PAUSE frames. 2197 */ 2198 mflcn_reg |= IXGBE_MFLCN_RFCE; 2199 break; 2200 case ixgbe_fc_tx_pause: 2201 /* 2202 * Tx Flow control is enabled, and Rx Flow control is 2203 * disabled by software override. 2204 */ 2205 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2206 break; 2207 case ixgbe_fc_full: 2208 /* Flow control (both Rx and Tx) is enabled by SW override. */ 2209 mflcn_reg |= IXGBE_MFLCN_RFCE; 2210 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2211 break; 2212 default: 2213 hw_dbg(hw, "Flow control param set incorrectly\n"); 2214 return IXGBE_ERR_CONFIG; 2215 } 2216 2217 /* Set 802.3x based flow control settings. */ 2218 mflcn_reg |= IXGBE_MFLCN_DPF; 2219 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 2220 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 2221 2222 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 2223 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 2224 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2225 hw->fc.high_water[i]) { 2226 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; 2227 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); 2228 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 2229 } else { 2230 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); 2231 /* 2232 * In order to prevent Tx hangs when the internal Tx 2233 * switch is enabled we must set the high water mark 2234 * to the Rx packet buffer size - 24KB. This allows 2235 * the Tx switch to function even under heavy Rx 2236 * workloads. 2237 */ 2238 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; 2239 } 2240 2241 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); 2242 } 2243 2244 /* Configure pause time (2 TCs per register) */ 2245 reg = hw->fc.pause_time * 0x00010001U; 2246 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) 2247 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 2248 2249 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 2250 2251 return 0; 2252 } 2253 2254 /** 2255 * ixgbe_negotiate_fc - Negotiate flow control 2256 * @hw: pointer to hardware structure 2257 * @adv_reg: flow control advertised settings 2258 * @lp_reg: link partner's flow control settings 2259 * @adv_sym: symmetric pause bit in advertisement 2260 * @adv_asm: asymmetric pause bit in advertisement 2261 * @lp_sym: symmetric pause bit in link partner advertisement 2262 * @lp_asm: asymmetric pause bit in link partner advertisement 2263 * 2264 * Find the intersection between advertised settings and link partner's 2265 * advertised settings 2266 **/ 2267 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, 2268 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) 2269 { 2270 if ((!(adv_reg)) || (!(lp_reg))) 2271 return IXGBE_ERR_FC_NOT_NEGOTIATED; 2272 2273 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { 2274 /* 2275 * Now we need to check if the user selected Rx ONLY 2276 * of pause frames. In this case, we had to advertise 2277 * FULL flow control because we could not advertise RX 2278 * ONLY. Hence, we must now check to see if we need to 2279 * turn OFF the TRANSMISSION of PAUSE frames. 2280 */ 2281 if (hw->fc.requested_mode == ixgbe_fc_full) { 2282 hw->fc.current_mode = ixgbe_fc_full; 2283 hw_dbg(hw, "Flow Control = FULL.\n"); 2284 } else { 2285 hw->fc.current_mode = ixgbe_fc_rx_pause; 2286 hw_dbg(hw, "Flow Control=RX PAUSE frames only\n"); 2287 } 2288 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && 2289 (lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2290 hw->fc.current_mode = ixgbe_fc_tx_pause; 2291 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); 2292 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && 2293 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2294 hw->fc.current_mode = ixgbe_fc_rx_pause; 2295 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); 2296 } else { 2297 hw->fc.current_mode = ixgbe_fc_none; 2298 hw_dbg(hw, "Flow Control = NONE.\n"); 2299 } 2300 return 0; 2301 } 2302 2303 /** 2304 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber 2305 * @hw: pointer to hardware structure 2306 * 2307 * Enable flow control according on 1 gig fiber. 2308 **/ 2309 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) 2310 { 2311 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; 2312 s32 ret_val; 2313 2314 /* 2315 * On multispeed fiber at 1g, bail out if 2316 * - link is up but AN did not complete, or if 2317 * - link is up and AN completed but timed out 2318 */ 2319 2320 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 2321 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || 2322 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) 2323 return IXGBE_ERR_FC_NOT_NEGOTIATED; 2324 2325 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 2326 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 2327 2328 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, 2329 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, 2330 IXGBE_PCS1GANA_ASM_PAUSE, 2331 IXGBE_PCS1GANA_SYM_PAUSE, 2332 IXGBE_PCS1GANA_ASM_PAUSE); 2333 2334 return ret_val; 2335 } 2336 2337 /** 2338 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 2339 * @hw: pointer to hardware structure 2340 * 2341 * Enable flow control according to IEEE clause 37. 2342 **/ 2343 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) 2344 { 2345 u32 links2, anlp1_reg, autoc_reg, links; 2346 s32 ret_val; 2347 2348 /* 2349 * On backplane, bail out if 2350 * - backplane autoneg was not completed, or if 2351 * - we are 82599 and link partner is not AN enabled 2352 */ 2353 links = IXGBE_READ_REG(hw, IXGBE_LINKS); 2354 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) 2355 return IXGBE_ERR_FC_NOT_NEGOTIATED; 2356 2357 if (hw->mac.type == ixgbe_mac_82599EB) { 2358 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 2359 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) 2360 return IXGBE_ERR_FC_NOT_NEGOTIATED; 2361 } 2362 /* 2363 * Read the 10g AN autoc and LP ability registers and resolve 2364 * local flow control settings accordingly 2365 */ 2366 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2367 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); 2368 2369 ret_val = ixgbe_negotiate_fc(hw, autoc_reg, 2370 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, 2371 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); 2372 2373 return ret_val; 2374 } 2375 2376 /** 2377 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 2378 * @hw: pointer to hardware structure 2379 * 2380 * Enable flow control according to IEEE clause 37. 2381 **/ 2382 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) 2383 { 2384 u16 technology_ability_reg = 0; 2385 u16 lp_technology_ability_reg = 0; 2386 2387 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, 2388 MDIO_MMD_AN, 2389 &technology_ability_reg); 2390 hw->phy.ops.read_reg(hw, MDIO_AN_LPA, 2391 MDIO_MMD_AN, 2392 &lp_technology_ability_reg); 2393 2394 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, 2395 (u32)lp_technology_ability_reg, 2396 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, 2397 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); 2398 } 2399 2400 /** 2401 * ixgbe_fc_autoneg - Configure flow control 2402 * @hw: pointer to hardware structure 2403 * 2404 * Compares our advertised flow control capabilities to those advertised by 2405 * our link partner, and determines the proper flow control mode to use. 2406 **/ 2407 void ixgbe_fc_autoneg(struct ixgbe_hw *hw) 2408 { 2409 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2410 ixgbe_link_speed speed; 2411 bool link_up; 2412 2413 /* 2414 * AN should have completed when the cable was plugged in. 2415 * Look for reasons to bail out. Bail out if: 2416 * - FC autoneg is disabled, or if 2417 * - link is not up. 2418 * 2419 * Since we're being called from an LSC, link is already known to be up. 2420 * So use link_up_wait_to_complete=false. 2421 */ 2422 if (hw->fc.disable_fc_autoneg) 2423 goto out; 2424 2425 hw->mac.ops.check_link(hw, &speed, &link_up, false); 2426 if (!link_up) 2427 goto out; 2428 2429 switch (hw->phy.media_type) { 2430 /* Autoneg flow control on fiber adapters */ 2431 case ixgbe_media_type_fiber: 2432 if (speed == IXGBE_LINK_SPEED_1GB_FULL) 2433 ret_val = ixgbe_fc_autoneg_fiber(hw); 2434 break; 2435 2436 /* Autoneg flow control on backplane adapters */ 2437 case ixgbe_media_type_backplane: 2438 ret_val = ixgbe_fc_autoneg_backplane(hw); 2439 break; 2440 2441 /* Autoneg flow control on copper adapters */ 2442 case ixgbe_media_type_copper: 2443 if (ixgbe_device_supports_autoneg_fc(hw)) 2444 ret_val = ixgbe_fc_autoneg_copper(hw); 2445 break; 2446 2447 default: 2448 break; 2449 } 2450 2451 out: 2452 if (ret_val == 0) { 2453 hw->fc.fc_was_autonegged = true; 2454 } else { 2455 hw->fc.fc_was_autonegged = false; 2456 hw->fc.current_mode = hw->fc.requested_mode; 2457 } 2458 } 2459 2460 /** 2461 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion 2462 * @hw: pointer to hardware structure 2463 * 2464 * System-wide timeout range is encoded in PCIe Device Control2 register. 2465 * 2466 * Add 10% to specified maximum and return the number of times to poll for 2467 * completion timeout, in units of 100 microsec. Never return less than 2468 * 800 = 80 millisec. 2469 **/ 2470 static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) 2471 { 2472 s16 devctl2; 2473 u32 pollcnt; 2474 2475 devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2); 2476 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK; 2477 2478 switch (devctl2) { 2479 case IXGBE_PCIDEVCTRL2_65_130ms: 2480 pollcnt = 1300; /* 130 millisec */ 2481 break; 2482 case IXGBE_PCIDEVCTRL2_260_520ms: 2483 pollcnt = 5200; /* 520 millisec */ 2484 break; 2485 case IXGBE_PCIDEVCTRL2_1_2s: 2486 pollcnt = 20000; /* 2 sec */ 2487 break; 2488 case IXGBE_PCIDEVCTRL2_4_8s: 2489 pollcnt = 80000; /* 8 sec */ 2490 break; 2491 case IXGBE_PCIDEVCTRL2_17_34s: 2492 pollcnt = 34000; /* 34 sec */ 2493 break; 2494 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */ 2495 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */ 2496 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */ 2497 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */ 2498 default: 2499 pollcnt = 800; /* 80 millisec minimum */ 2500 break; 2501 } 2502 2503 /* add 10% to spec maximum */ 2504 return (pollcnt * 11) / 10; 2505 } 2506 2507 /** 2508 * ixgbe_disable_pcie_master - Disable PCI-express master access 2509 * @hw: pointer to hardware structure 2510 * 2511 * Disables PCI-Express master access and verifies there are no pending 2512 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable 2513 * bit hasn't caused the master requests to be disabled, else 0 2514 * is returned signifying master requests disabled. 2515 **/ 2516 static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 2517 { 2518 u32 i, poll; 2519 u16 value; 2520 2521 /* Always set this bit to ensure any future transactions are blocked */ 2522 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); 2523 2524 /* Poll for bit to read as set */ 2525 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2526 if (IXGBE_READ_REG(hw, IXGBE_CTRL) & IXGBE_CTRL_GIO_DIS) 2527 break; 2528 usleep_range(100, 120); 2529 } 2530 if (i >= IXGBE_PCI_MASTER_DISABLE_TIMEOUT) { 2531 hw_dbg(hw, "GIO disable did not set - requesting resets\n"); 2532 goto gio_disable_fail; 2533 } 2534 2535 /* Exit if master requests are blocked */ 2536 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || 2537 ixgbe_removed(hw->hw_addr)) 2538 return 0; 2539 2540 /* Poll for master request bit to clear */ 2541 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2542 udelay(100); 2543 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) 2544 return 0; 2545 } 2546 2547 /* 2548 * Two consecutive resets are required via CTRL.RST per datasheet 2549 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine 2550 * of this need. The first reset prevents new master requests from 2551 * being issued by our device. We then must wait 1usec or more for any 2552 * remaining completions from the PCIe bus to trickle in, and then reset 2553 * again to clear out any effects they may have had on our device. 2554 */ 2555 hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n"); 2556 gio_disable_fail: 2557 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 2558 2559 if (hw->mac.type >= ixgbe_mac_X550) 2560 return 0; 2561 2562 /* 2563 * Before proceeding, make sure that the PCIe block does not have 2564 * transactions pending. 2565 */ 2566 poll = ixgbe_pcie_timeout_poll(hw); 2567 for (i = 0; i < poll; i++) { 2568 udelay(100); 2569 value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); 2570 if (ixgbe_removed(hw->hw_addr)) 2571 return 0; 2572 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 2573 return 0; 2574 } 2575 2576 hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n"); 2577 return IXGBE_ERR_MASTER_REQUESTS_PENDING; 2578 } 2579 2580 /** 2581 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore 2582 * @hw: pointer to hardware structure 2583 * @mask: Mask to specify which semaphore to acquire 2584 * 2585 * Acquires the SWFW semaphore through the GSSR register for the specified 2586 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2587 **/ 2588 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) 2589 { 2590 u32 gssr = 0; 2591 u32 swmask = mask; 2592 u32 fwmask = mask << 5; 2593 u32 timeout = 200; 2594 u32 i; 2595 2596 for (i = 0; i < timeout; i++) { 2597 /* 2598 * SW NVM semaphore bit is used for access to all 2599 * SW_FW_SYNC bits (not just NVM) 2600 */ 2601 if (ixgbe_get_eeprom_semaphore(hw)) 2602 return IXGBE_ERR_SWFW_SYNC; 2603 2604 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2605 if (!(gssr & (fwmask | swmask))) { 2606 gssr |= swmask; 2607 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 2608 ixgbe_release_eeprom_semaphore(hw); 2609 return 0; 2610 } else { 2611 /* Resource is currently in use by FW or SW */ 2612 ixgbe_release_eeprom_semaphore(hw); 2613 usleep_range(5000, 10000); 2614 } 2615 } 2616 2617 /* If time expired clear the bits holding the lock and retry */ 2618 if (gssr & (fwmask | swmask)) 2619 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); 2620 2621 usleep_range(5000, 10000); 2622 return IXGBE_ERR_SWFW_SYNC; 2623 } 2624 2625 /** 2626 * ixgbe_release_swfw_sync - Release SWFW semaphore 2627 * @hw: pointer to hardware structure 2628 * @mask: Mask to specify which semaphore to release 2629 * 2630 * Releases the SWFW semaphore through the GSSR register for the specified 2631 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2632 **/ 2633 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) 2634 { 2635 u32 gssr; 2636 u32 swmask = mask; 2637 2638 ixgbe_get_eeprom_semaphore(hw); 2639 2640 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2641 gssr &= ~swmask; 2642 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 2643 2644 ixgbe_release_eeprom_semaphore(hw); 2645 } 2646 2647 /** 2648 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read 2649 * @hw: pointer to hardware structure 2650 * @reg_val: Value we read from AUTOC 2651 * @locked: bool to indicate whether the SW/FW lock should be taken. Never 2652 * true in this the generic case. 2653 * 2654 * The default case requires no protection so just to the register read. 2655 **/ 2656 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) 2657 { 2658 *locked = false; 2659 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2660 return 0; 2661 } 2662 2663 /** 2664 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write 2665 * @hw: pointer to hardware structure 2666 * @reg_val: value to write to AUTOC 2667 * @locked: bool to indicate whether the SW/FW lock was already taken by 2668 * previous read. 2669 **/ 2670 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) 2671 { 2672 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); 2673 return 0; 2674 } 2675 2676 /** 2677 * ixgbe_disable_rx_buff_generic - Stops the receive data path 2678 * @hw: pointer to hardware structure 2679 * 2680 * Stops the receive data path and waits for the HW to internally 2681 * empty the Rx security block. 2682 **/ 2683 s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) 2684 { 2685 #define IXGBE_MAX_SECRX_POLL 40 2686 int i; 2687 int secrxreg; 2688 2689 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2690 secrxreg |= IXGBE_SECRXCTRL_RX_DIS; 2691 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 2692 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { 2693 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); 2694 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) 2695 break; 2696 else 2697 /* Use interrupt-safe sleep just in case */ 2698 udelay(1000); 2699 } 2700 2701 /* For informational purposes only */ 2702 if (i >= IXGBE_MAX_SECRX_POLL) 2703 hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n"); 2704 2705 return 0; 2706 2707 } 2708 2709 /** 2710 * ixgbe_enable_rx_buff - Enables the receive data path 2711 * @hw: pointer to hardware structure 2712 * 2713 * Enables the receive data path 2714 **/ 2715 s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) 2716 { 2717 u32 secrxreg; 2718 2719 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2720 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; 2721 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 2722 IXGBE_WRITE_FLUSH(hw); 2723 2724 return 0; 2725 } 2726 2727 /** 2728 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit 2729 * @hw: pointer to hardware structure 2730 * @regval: register value to write to RXCTRL 2731 * 2732 * Enables the Rx DMA unit 2733 **/ 2734 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) 2735 { 2736 if (regval & IXGBE_RXCTRL_RXEN) 2737 hw->mac.ops.enable_rx(hw); 2738 else 2739 hw->mac.ops.disable_rx(hw); 2740 2741 return 0; 2742 } 2743 2744 /** 2745 * ixgbe_blink_led_start_generic - Blink LED based on index. 2746 * @hw: pointer to hardware structure 2747 * @index: led number to blink 2748 **/ 2749 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) 2750 { 2751 ixgbe_link_speed speed = 0; 2752 bool link_up = false; 2753 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2754 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2755 bool locked = false; 2756 s32 ret_val; 2757 2758 if (index > 3) 2759 return IXGBE_ERR_PARAM; 2760 2761 /* 2762 * Link must be up to auto-blink the LEDs; 2763 * Force it if link is down. 2764 */ 2765 hw->mac.ops.check_link(hw, &speed, &link_up, false); 2766 2767 if (!link_up) { 2768 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); 2769 if (ret_val) 2770 return ret_val; 2771 2772 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2773 autoc_reg |= IXGBE_AUTOC_FLU; 2774 2775 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); 2776 if (ret_val) 2777 return ret_val; 2778 2779 IXGBE_WRITE_FLUSH(hw); 2780 2781 usleep_range(10000, 20000); 2782 } 2783 2784 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2785 led_reg |= IXGBE_LED_BLINK(index); 2786 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2787 IXGBE_WRITE_FLUSH(hw); 2788 2789 return 0; 2790 } 2791 2792 /** 2793 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. 2794 * @hw: pointer to hardware structure 2795 * @index: led number to stop blinking 2796 **/ 2797 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) 2798 { 2799 u32 autoc_reg = 0; 2800 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2801 bool locked = false; 2802 s32 ret_val; 2803 2804 if (index > 3) 2805 return IXGBE_ERR_PARAM; 2806 2807 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); 2808 if (ret_val) 2809 return ret_val; 2810 2811 autoc_reg &= ~IXGBE_AUTOC_FLU; 2812 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2813 2814 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); 2815 if (ret_val) 2816 return ret_val; 2817 2818 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2819 led_reg &= ~IXGBE_LED_BLINK(index); 2820 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); 2821 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2822 IXGBE_WRITE_FLUSH(hw); 2823 2824 return 0; 2825 } 2826 2827 /** 2828 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM 2829 * @hw: pointer to hardware structure 2830 * @san_mac_offset: SAN MAC address offset 2831 * 2832 * This function will read the EEPROM location for the SAN MAC address 2833 * pointer, and returns the value at that location. This is used in both 2834 * get and set mac_addr routines. 2835 **/ 2836 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 2837 u16 *san_mac_offset) 2838 { 2839 s32 ret_val; 2840 2841 /* 2842 * First read the EEPROM pointer to see if the MAC addresses are 2843 * available. 2844 */ 2845 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, 2846 san_mac_offset); 2847 if (ret_val) 2848 hw_err(hw, "eeprom read at offset %d failed\n", 2849 IXGBE_SAN_MAC_ADDR_PTR); 2850 2851 return ret_val; 2852 } 2853 2854 /** 2855 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM 2856 * @hw: pointer to hardware structure 2857 * @san_mac_addr: SAN MAC address 2858 * 2859 * Reads the SAN MAC address from the EEPROM, if it's available. This is 2860 * per-port, so set_lan_id() must be called before reading the addresses. 2861 * set_lan_id() is called by identify_sfp(), but this cannot be relied 2862 * upon for non-SFP connections, so we must call it here. 2863 **/ 2864 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) 2865 { 2866 u16 san_mac_data, san_mac_offset; 2867 u8 i; 2868 s32 ret_val; 2869 2870 /* 2871 * First read the EEPROM pointer to see if the MAC addresses are 2872 * available. If they're not, no point in calling set_lan_id() here. 2873 */ 2874 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 2875 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) 2876 2877 goto san_mac_addr_clr; 2878 2879 /* make sure we know which port we need to program */ 2880 hw->mac.ops.set_lan_id(hw); 2881 /* apply the port offset to the address offset */ 2882 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 2883 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 2884 for (i = 0; i < 3; i++) { 2885 ret_val = hw->eeprom.ops.read(hw, san_mac_offset, 2886 &san_mac_data); 2887 if (ret_val) { 2888 hw_err(hw, "eeprom read at offset %d failed\n", 2889 san_mac_offset); 2890 goto san_mac_addr_clr; 2891 } 2892 san_mac_addr[i * 2] = (u8)(san_mac_data); 2893 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); 2894 san_mac_offset++; 2895 } 2896 return 0; 2897 2898 san_mac_addr_clr: 2899 /* No addresses available in this EEPROM. It's not necessarily an 2900 * error though, so just wipe the local address and return. 2901 */ 2902 for (i = 0; i < 6; i++) 2903 san_mac_addr[i] = 0xFF; 2904 return ret_val; 2905 } 2906 2907 /** 2908 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count 2909 * @hw: pointer to hardware structure 2910 * 2911 * Read PCIe configuration space, and get the MSI-X vector count from 2912 * the capabilities table. 2913 **/ 2914 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) 2915 { 2916 u16 msix_count; 2917 u16 max_msix_count; 2918 u16 pcie_offset; 2919 2920 switch (hw->mac.type) { 2921 case ixgbe_mac_82598EB: 2922 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS; 2923 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598; 2924 break; 2925 case ixgbe_mac_82599EB: 2926 case ixgbe_mac_X540: 2927 case ixgbe_mac_X550: 2928 case ixgbe_mac_X550EM_x: 2929 case ixgbe_mac_x550em_a: 2930 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; 2931 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; 2932 break; 2933 default: 2934 return 1; 2935 } 2936 2937 msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset); 2938 if (ixgbe_removed(hw->hw_addr)) 2939 msix_count = 0; 2940 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; 2941 2942 /* MSI-X count is zero-based in HW */ 2943 msix_count++; 2944 2945 if (msix_count > max_msix_count) 2946 msix_count = max_msix_count; 2947 2948 return msix_count; 2949 } 2950 2951 /** 2952 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address 2953 * @hw: pointer to hardware struct 2954 * @rar: receive address register index to disassociate 2955 * @vmdq: VMDq pool index to remove from the rar 2956 **/ 2957 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 2958 { 2959 u32 mpsar_lo, mpsar_hi; 2960 u32 rar_entries = hw->mac.num_rar_entries; 2961 2962 /* Make sure we are using a valid rar index range */ 2963 if (rar >= rar_entries) { 2964 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 2965 return IXGBE_ERR_INVALID_ARGUMENT; 2966 } 2967 2968 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 2969 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 2970 2971 if (ixgbe_removed(hw->hw_addr)) 2972 return 0; 2973 2974 if (!mpsar_lo && !mpsar_hi) 2975 return 0; 2976 2977 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { 2978 if (mpsar_lo) { 2979 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 2980 mpsar_lo = 0; 2981 } 2982 if (mpsar_hi) { 2983 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 2984 mpsar_hi = 0; 2985 } 2986 } else if (vmdq < 32) { 2987 mpsar_lo &= ~BIT(vmdq); 2988 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); 2989 } else { 2990 mpsar_hi &= ~BIT(vmdq - 32); 2991 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); 2992 } 2993 2994 /* was that the last pool using this rar? */ 2995 if (mpsar_lo == 0 && mpsar_hi == 0 && 2996 rar != 0 && rar != hw->mac.san_mac_rar_index) 2997 hw->mac.ops.clear_rar(hw, rar); 2998 2999 return 0; 3000 } 3001 3002 /** 3003 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address 3004 * @hw: pointer to hardware struct 3005 * @rar: receive address register index to associate with a VMDq index 3006 * @vmdq: VMDq pool index 3007 **/ 3008 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 3009 { 3010 u32 mpsar; 3011 u32 rar_entries = hw->mac.num_rar_entries; 3012 3013 /* Make sure we are using a valid rar index range */ 3014 if (rar >= rar_entries) { 3015 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 3016 return IXGBE_ERR_INVALID_ARGUMENT; 3017 } 3018 3019 if (vmdq < 32) { 3020 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 3021 mpsar |= BIT(vmdq); 3022 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); 3023 } else { 3024 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 3025 mpsar |= BIT(vmdq - 32); 3026 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); 3027 } 3028 return 0; 3029 } 3030 3031 /** 3032 * This function should only be involved in the IOV mode. 3033 * In IOV mode, Default pool is next pool after the number of 3034 * VFs advertized and not 0. 3035 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] 3036 * 3037 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address 3038 * @hw: pointer to hardware struct 3039 * @vmdq: VMDq pool index 3040 **/ 3041 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) 3042 { 3043 u32 rar = hw->mac.san_mac_rar_index; 3044 3045 if (vmdq < 32) { 3046 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), BIT(vmdq)); 3047 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 3048 } else { 3049 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 3050 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), BIT(vmdq - 32)); 3051 } 3052 3053 return 0; 3054 } 3055 3056 /** 3057 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array 3058 * @hw: pointer to hardware structure 3059 **/ 3060 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) 3061 { 3062 int i; 3063 3064 for (i = 0; i < 128; i++) 3065 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 3066 3067 return 0; 3068 } 3069 3070 /** 3071 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot 3072 * @hw: pointer to hardware structure 3073 * @vlan: VLAN id to write to VLAN filter 3074 * @vlvf_bypass: true to find vlanid only, false returns first empty slot if 3075 * vlanid not found 3076 * 3077 * return the VLVF index where this VLAN id should be placed 3078 * 3079 **/ 3080 static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) 3081 { 3082 s32 regindex, first_empty_slot; 3083 u32 bits; 3084 3085 /* short cut the special case */ 3086 if (vlan == 0) 3087 return 0; 3088 3089 /* if vlvf_bypass is set we don't want to use an empty slot, we 3090 * will simply bypass the VLVF if there are no entries present in the 3091 * VLVF that contain our VLAN 3092 */ 3093 first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0; 3094 3095 /* add VLAN enable bit for comparison */ 3096 vlan |= IXGBE_VLVF_VIEN; 3097 3098 /* Search for the vlan id in the VLVF entries. Save off the first empty 3099 * slot found along the way. 3100 * 3101 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 3102 */ 3103 for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) { 3104 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); 3105 if (bits == vlan) 3106 return regindex; 3107 if (!first_empty_slot && !bits) 3108 first_empty_slot = regindex; 3109 } 3110 3111 /* If we are here then we didn't find the VLAN. Return first empty 3112 * slot we found during our search, else error. 3113 */ 3114 if (!first_empty_slot) 3115 hw_dbg(hw, "No space in VLVF.\n"); 3116 3117 return first_empty_slot ? : IXGBE_ERR_NO_SPACE; 3118 } 3119 3120 /** 3121 * ixgbe_set_vfta_generic - Set VLAN filter table 3122 * @hw: pointer to hardware structure 3123 * @vlan: VLAN id to write to VLAN filter 3124 * @vind: VMDq output index that maps queue to VLAN id in VFVFB 3125 * @vlan_on: boolean flag to turn on/off VLAN in VFVF 3126 * @vlvf_bypass: boolean flag indicating updating default pool is okay 3127 * 3128 * Turn on/off specified VLAN in the VLAN filter table. 3129 **/ 3130 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, 3131 bool vlan_on, bool vlvf_bypass) 3132 { 3133 u32 regidx, vfta_delta, vfta, bits; 3134 s32 vlvf_index; 3135 3136 if ((vlan > 4095) || (vind > 63)) 3137 return IXGBE_ERR_PARAM; 3138 3139 /* 3140 * this is a 2 part operation - first the VFTA, then the 3141 * VLVF and VLVFB if VT Mode is set 3142 * We don't write the VFTA until we know the VLVF part succeeded. 3143 */ 3144 3145 /* Part 1 3146 * The VFTA is a bitstring made up of 128 32-bit registers 3147 * that enable the particular VLAN id, much like the MTA: 3148 * bits[11-5]: which register 3149 * bits[4-0]: which bit in the register 3150 */ 3151 regidx = vlan / 32; 3152 vfta_delta = BIT(vlan % 32); 3153 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); 3154 3155 /* vfta_delta represents the difference between the current value 3156 * of vfta and the value we want in the register. Since the diff 3157 * is an XOR mask we can just update vfta using an XOR. 3158 */ 3159 vfta_delta &= vlan_on ? ~vfta : vfta; 3160 vfta ^= vfta_delta; 3161 3162 /* Part 2 3163 * If VT Mode is set 3164 * Either vlan_on 3165 * make sure the vlan is in VLVF 3166 * set the vind bit in the matching VLVFB 3167 * Or !vlan_on 3168 * clear the pool bit and possibly the vind 3169 */ 3170 if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE)) 3171 goto vfta_update; 3172 3173 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass); 3174 if (vlvf_index < 0) { 3175 if (vlvf_bypass) 3176 goto vfta_update; 3177 return vlvf_index; 3178 } 3179 3180 bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); 3181 3182 /* set the pool bit */ 3183 bits |= BIT(vind % 32); 3184 if (vlan_on) 3185 goto vlvf_update; 3186 3187 /* clear the pool bit */ 3188 bits ^= BIT(vind % 32); 3189 3190 if (!bits && 3191 !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { 3192 /* Clear VFTA first, then disable VLVF. Otherwise 3193 * we run the risk of stray packets leaking into 3194 * the PF via the default pool 3195 */ 3196 if (vfta_delta) 3197 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); 3198 3199 /* disable VLVF and clear remaining bit from pool */ 3200 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); 3201 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0); 3202 3203 return 0; 3204 } 3205 3206 /* If there are still bits set in the VLVFB registers 3207 * for the VLAN ID indicated we need to see if the 3208 * caller is requesting that we clear the VFTA entry bit. 3209 * If the caller has requested that we clear the VFTA 3210 * entry bit but there are still pools/VFs using this VLAN 3211 * ID entry then ignore the request. We're not worried 3212 * about the case where we're turning the VFTA VLAN ID 3213 * entry bit on, only when requested to turn it off as 3214 * there may be multiple pools and/or VFs using the 3215 * VLAN ID entry. In that case we cannot clear the 3216 * VFTA bit until all pools/VFs using that VLAN ID have also 3217 * been cleared. This will be indicated by "bits" being 3218 * zero. 3219 */ 3220 vfta_delta = 0; 3221 3222 vlvf_update: 3223 /* record pool change and enable VLAN ID if not already enabled */ 3224 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits); 3225 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan); 3226 3227 vfta_update: 3228 /* Update VFTA now that we are ready for traffic */ 3229 if (vfta_delta) 3230 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); 3231 3232 return 0; 3233 } 3234 3235 /** 3236 * ixgbe_clear_vfta_generic - Clear VLAN filter table 3237 * @hw: pointer to hardware structure 3238 * 3239 * Clears the VLAN filer table, and the VMDq index associated with the filter 3240 **/ 3241 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) 3242 { 3243 u32 offset; 3244 3245 for (offset = 0; offset < hw->mac.vft_size; offset++) 3246 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 3247 3248 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { 3249 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); 3250 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0); 3251 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0); 3252 } 3253 3254 return 0; 3255 } 3256 3257 /** 3258 * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix 3259 * @hw: pointer to hardware structure 3260 * 3261 * Contains the logic to identify if we need to verify link for the 3262 * crosstalk fix 3263 **/ 3264 static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw) 3265 { 3266 /* Does FW say we need the fix */ 3267 if (!hw->need_crosstalk_fix) 3268 return false; 3269 3270 /* Only consider SFP+ PHYs i.e. media type fiber */ 3271 switch (hw->mac.ops.get_media_type(hw)) { 3272 case ixgbe_media_type_fiber: 3273 case ixgbe_media_type_fiber_qsfp: 3274 break; 3275 default: 3276 return false; 3277 } 3278 3279 return true; 3280 } 3281 3282 /** 3283 * ixgbe_check_mac_link_generic - Determine link and speed status 3284 * @hw: pointer to hardware structure 3285 * @speed: pointer to link speed 3286 * @link_up: true when link is up 3287 * @link_up_wait_to_complete: bool used to wait for link up or not 3288 * 3289 * Reads the links register to determine if link is up and the current speed 3290 **/ 3291 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 3292 bool *link_up, bool link_up_wait_to_complete) 3293 { 3294 u32 links_reg, links_orig; 3295 u32 i; 3296 3297 /* If Crosstalk fix enabled do the sanity check of making sure 3298 * the SFP+ cage is full. 3299 */ 3300 if (ixgbe_need_crosstalk_fix(hw)) { 3301 u32 sfp_cage_full; 3302 3303 switch (hw->mac.type) { 3304 case ixgbe_mac_82599EB: 3305 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3306 IXGBE_ESDP_SDP2; 3307 break; 3308 case ixgbe_mac_X550EM_x: 3309 case ixgbe_mac_x550em_a: 3310 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3311 IXGBE_ESDP_SDP0; 3312 break; 3313 default: 3314 /* sanity check - No SFP+ devices here */ 3315 sfp_cage_full = false; 3316 break; 3317 } 3318 3319 if (!sfp_cage_full) { 3320 *link_up = false; 3321 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3322 return 0; 3323 } 3324 } 3325 3326 /* clear the old state */ 3327 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); 3328 3329 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 3330 3331 if (links_orig != links_reg) { 3332 hw_dbg(hw, "LINKS changed from %08X to %08X\n", 3333 links_orig, links_reg); 3334 } 3335 3336 if (link_up_wait_to_complete) { 3337 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 3338 if (links_reg & IXGBE_LINKS_UP) { 3339 *link_up = true; 3340 break; 3341 } else { 3342 *link_up = false; 3343 } 3344 msleep(100); 3345 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 3346 } 3347 } else { 3348 if (links_reg & IXGBE_LINKS_UP) 3349 *link_up = true; 3350 else 3351 *link_up = false; 3352 } 3353 3354 switch (links_reg & IXGBE_LINKS_SPEED_82599) { 3355 case IXGBE_LINKS_SPEED_10G_82599: 3356 if ((hw->mac.type >= ixgbe_mac_X550) && 3357 (links_reg & IXGBE_LINKS_SPEED_NON_STD)) 3358 *speed = IXGBE_LINK_SPEED_2_5GB_FULL; 3359 else 3360 *speed = IXGBE_LINK_SPEED_10GB_FULL; 3361 break; 3362 case IXGBE_LINKS_SPEED_1G_82599: 3363 *speed = IXGBE_LINK_SPEED_1GB_FULL; 3364 break; 3365 case IXGBE_LINKS_SPEED_100_82599: 3366 if ((hw->mac.type >= ixgbe_mac_X550) && 3367 (links_reg & IXGBE_LINKS_SPEED_NON_STD)) 3368 *speed = IXGBE_LINK_SPEED_5GB_FULL; 3369 else 3370 *speed = IXGBE_LINK_SPEED_100_FULL; 3371 break; 3372 case IXGBE_LINKS_SPEED_10_X550EM_A: 3373 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3374 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 3375 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { 3376 *speed = IXGBE_LINK_SPEED_10_FULL; 3377 } 3378 break; 3379 default: 3380 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3381 } 3382 3383 return 0; 3384 } 3385 3386 /** 3387 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from 3388 * the EEPROM 3389 * @hw: pointer to hardware structure 3390 * @wwnn_prefix: the alternative WWNN prefix 3391 * @wwpn_prefix: the alternative WWPN prefix 3392 * 3393 * This function will read the EEPROM from the alternative SAN MAC address 3394 * block to check the support for the alternative WWNN/WWPN prefix support. 3395 **/ 3396 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 3397 u16 *wwpn_prefix) 3398 { 3399 u16 offset, caps; 3400 u16 alt_san_mac_blk_offset; 3401 3402 /* clear output first */ 3403 *wwnn_prefix = 0xFFFF; 3404 *wwpn_prefix = 0xFFFF; 3405 3406 /* check if alternative SAN MAC is supported */ 3407 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR; 3408 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset)) 3409 goto wwn_prefix_err; 3410 3411 if ((alt_san_mac_blk_offset == 0) || 3412 (alt_san_mac_blk_offset == 0xFFFF)) 3413 return 0; 3414 3415 /* check capability in alternative san mac address block */ 3416 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; 3417 if (hw->eeprom.ops.read(hw, offset, &caps)) 3418 goto wwn_prefix_err; 3419 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) 3420 return 0; 3421 3422 /* get the corresponding prefix for WWNN/WWPN */ 3423 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; 3424 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) 3425 hw_err(hw, "eeprom read at offset %d failed\n", offset); 3426 3427 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; 3428 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix)) 3429 goto wwn_prefix_err; 3430 3431 return 0; 3432 3433 wwn_prefix_err: 3434 hw_err(hw, "eeprom read at offset %d failed\n", offset); 3435 return 0; 3436 } 3437 3438 /** 3439 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing 3440 * @hw: pointer to hardware structure 3441 * @enable: enable or disable switch for MAC anti-spoofing 3442 * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing 3443 * 3444 **/ 3445 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) 3446 { 3447 int vf_target_reg = vf >> 3; 3448 int vf_target_shift = vf % 8; 3449 u32 pfvfspoof; 3450 3451 if (hw->mac.type == ixgbe_mac_82598EB) 3452 return; 3453 3454 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 3455 if (enable) 3456 pfvfspoof |= BIT(vf_target_shift); 3457 else 3458 pfvfspoof &= ~BIT(vf_target_shift); 3459 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); 3460 } 3461 3462 /** 3463 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing 3464 * @hw: pointer to hardware structure 3465 * @enable: enable or disable switch for VLAN anti-spoofing 3466 * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing 3467 * 3468 **/ 3469 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) 3470 { 3471 int vf_target_reg = vf >> 3; 3472 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; 3473 u32 pfvfspoof; 3474 3475 if (hw->mac.type == ixgbe_mac_82598EB) 3476 return; 3477 3478 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 3479 if (enable) 3480 pfvfspoof |= BIT(vf_target_shift); 3481 else 3482 pfvfspoof &= ~BIT(vf_target_shift); 3483 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); 3484 } 3485 3486 /** 3487 * ixgbe_get_device_caps_generic - Get additional device capabilities 3488 * @hw: pointer to hardware structure 3489 * @device_caps: the EEPROM word with the extra device capabilities 3490 * 3491 * This function will read the EEPROM location for the device capabilities, 3492 * and return the word through device_caps. 3493 **/ 3494 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) 3495 { 3496 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); 3497 3498 return 0; 3499 } 3500 3501 /** 3502 * ixgbe_set_rxpba_generic - Initialize RX packet buffer 3503 * @hw: pointer to hardware structure 3504 * @num_pb: number of packet buffers to allocate 3505 * @headroom: reserve n KB of headroom 3506 * @strategy: packet buffer allocation strategy 3507 **/ 3508 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, 3509 int num_pb, 3510 u32 headroom, 3511 int strategy) 3512 { 3513 u32 pbsize = hw->mac.rx_pb_size; 3514 int i = 0; 3515 u32 rxpktsize, txpktsize, txpbthresh; 3516 3517 /* Reserve headroom */ 3518 pbsize -= headroom; 3519 3520 if (!num_pb) 3521 num_pb = 1; 3522 3523 /* Divide remaining packet buffer space amongst the number 3524 * of packet buffers requested using supplied strategy. 3525 */ 3526 switch (strategy) { 3527 case (PBA_STRATEGY_WEIGHTED): 3528 /* pba_80_48 strategy weight first half of packet buffer with 3529 * 5/8 of the packet buffer space. 3530 */ 3531 rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8)); 3532 pbsize -= rxpktsize * (num_pb / 2); 3533 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; 3534 for (; i < (num_pb / 2); i++) 3535 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 3536 /* fall through - configure remaining packet buffers */ 3537 case (PBA_STRATEGY_EQUAL): 3538 /* Divide the remaining Rx packet buffer evenly among the TCs */ 3539 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; 3540 for (; i < num_pb; i++) 3541 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 3542 break; 3543 default: 3544 break; 3545 } 3546 3547 /* 3548 * Setup Tx packet buffer and threshold equally for all TCs 3549 * TXPBTHRESH register is set in K so divide by 1024 and subtract 3550 * 10 since the largest packet we support is just over 9K. 3551 */ 3552 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; 3553 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; 3554 for (i = 0; i < num_pb; i++) { 3555 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); 3556 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); 3557 } 3558 3559 /* Clear unused TCs, if any, to zero buffer size*/ 3560 for (; i < IXGBE_MAX_PB; i++) { 3561 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); 3562 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); 3563 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); 3564 } 3565 } 3566 3567 /** 3568 * ixgbe_calculate_checksum - Calculate checksum for buffer 3569 * @buffer: pointer to EEPROM 3570 * @length: size of EEPROM to calculate a checksum for 3571 * 3572 * Calculates the checksum for some buffer on a specified length. The 3573 * checksum calculated is returned. 3574 **/ 3575 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) 3576 { 3577 u32 i; 3578 u8 sum = 0; 3579 3580 if (!buffer) 3581 return 0; 3582 3583 for (i = 0; i < length; i++) 3584 sum += buffer[i]; 3585 3586 return (u8) (0 - sum); 3587 } 3588 3589 /** 3590 * ixgbe_hic_unlocked - Issue command to manageability block unlocked 3591 * @hw: pointer to the HW structure 3592 * @buffer: command to write and where the return status will be placed 3593 * @length: length of buffer, must be multiple of 4 bytes 3594 * @timeout: time in ms to wait for command completion 3595 * 3596 * Communicates with the manageability block. On success return 0 3597 * else returns semaphore error when encountering an error acquiring 3598 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. 3599 * 3600 * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held 3601 * by the caller. 3602 **/ 3603 s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, 3604 u32 timeout) 3605 { 3606 u32 hicr, i, fwsts; 3607 u16 dword_len; 3608 3609 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { 3610 hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); 3611 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 3612 } 3613 3614 /* Set bit 9 of FWSTS clearing FW reset indication */ 3615 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); 3616 IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI); 3617 3618 /* Check that the host interface is enabled. */ 3619 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 3620 if (!(hicr & IXGBE_HICR_EN)) { 3621 hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); 3622 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 3623 } 3624 3625 /* Calculate length in DWORDs. We must be DWORD aligned */ 3626 if (length % sizeof(u32)) { 3627 hw_dbg(hw, "Buffer length failure, not aligned to dword"); 3628 return IXGBE_ERR_INVALID_ARGUMENT; 3629 } 3630 3631 dword_len = length >> 2; 3632 3633 /* The device driver writes the relevant command block 3634 * into the ram area. 3635 */ 3636 for (i = 0; i < dword_len; i++) 3637 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, 3638 i, (__force u32)cpu_to_le32(buffer[i])); 3639 3640 /* Setting this bit tells the ARC that a new command is pending. */ 3641 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); 3642 3643 for (i = 0; i < timeout; i++) { 3644 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 3645 if (!(hicr & IXGBE_HICR_C)) 3646 break; 3647 usleep_range(1000, 2000); 3648 } 3649 3650 /* Check command successful completion. */ 3651 if ((timeout && i == timeout) || 3652 !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) 3653 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 3654 3655 return 0; 3656 } 3657 3658 /** 3659 * ixgbe_host_interface_command - Issue command to manageability block 3660 * @hw: pointer to the HW structure 3661 * @buffer: contains the command to write and where the return status will 3662 * be placed 3663 * @length: length of buffer, must be multiple of 4 bytes 3664 * @timeout: time in ms to wait for command completion 3665 * @return_data: read and return data from the buffer (true) or not (false) 3666 * Needed because FW structures are big endian and decoding of 3667 * these fields can be 8 bit or 16 bit based on command. Decoding 3668 * is not easily understood without making a table of commands. 3669 * So we will leave this up to the caller to read back the data 3670 * in these cases. 3671 * 3672 * Communicates with the manageability block. On success return 0 3673 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. 3674 **/ 3675 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, 3676 u32 length, u32 timeout, 3677 bool return_data) 3678 { 3679 u32 hdr_size = sizeof(struct ixgbe_hic_hdr); 3680 union { 3681 struct ixgbe_hic_hdr hdr; 3682 u32 u32arr[1]; 3683 } *bp = buffer; 3684 u16 buf_len, dword_len; 3685 s32 status; 3686 u32 bi; 3687 3688 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { 3689 hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); 3690 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 3691 } 3692 /* Take management host interface semaphore */ 3693 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); 3694 if (status) 3695 return status; 3696 3697 status = ixgbe_hic_unlocked(hw, buffer, length, timeout); 3698 if (status) 3699 goto rel_out; 3700 3701 if (!return_data) 3702 goto rel_out; 3703 3704 /* Calculate length in DWORDs */ 3705 dword_len = hdr_size >> 2; 3706 3707 /* first pull in the header so we know the buffer length */ 3708 for (bi = 0; bi < dword_len; bi++) { 3709 bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 3710 le32_to_cpus(&bp->u32arr[bi]); 3711 } 3712 3713 /* If there is any thing in data position pull it in */ 3714 buf_len = bp->hdr.buf_len; 3715 if (!buf_len) 3716 goto rel_out; 3717 3718 if (length < round_up(buf_len, 4) + hdr_size) { 3719 hw_dbg(hw, "Buffer not large enough for reply message.\n"); 3720 status = IXGBE_ERR_HOST_INTERFACE_COMMAND; 3721 goto rel_out; 3722 } 3723 3724 /* Calculate length in DWORDs, add 3 for odd lengths */ 3725 dword_len = (buf_len + 3) >> 2; 3726 3727 /* Pull in the rest of the buffer (bi is where we left off) */ 3728 for (; bi <= dword_len; bi++) { 3729 bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 3730 le32_to_cpus(&bp->u32arr[bi]); 3731 } 3732 3733 rel_out: 3734 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); 3735 3736 return status; 3737 } 3738 3739 /** 3740 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware 3741 * @hw: pointer to the HW structure 3742 * @maj: driver version major number 3743 * @min: driver version minor number 3744 * @build: driver version build number 3745 * @sub: driver version sub build number 3746 * @len: length of driver_ver string 3747 * @driver_ver: driver string 3748 * 3749 * Sends driver version number to firmware through the manageability 3750 * block. On success return 0 3751 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring 3752 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. 3753 **/ 3754 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, 3755 u8 build, u8 sub, __always_unused u16 len, 3756 __always_unused const char *driver_ver) 3757 { 3758 struct ixgbe_hic_drv_info fw_cmd; 3759 int i; 3760 s32 ret_val; 3761 3762 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; 3763 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; 3764 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; 3765 fw_cmd.port_num = hw->bus.func; 3766 fw_cmd.ver_maj = maj; 3767 fw_cmd.ver_min = min; 3768 fw_cmd.ver_build = build; 3769 fw_cmd.ver_sub = sub; 3770 fw_cmd.hdr.checksum = 0; 3771 fw_cmd.pad = 0; 3772 fw_cmd.pad2 = 0; 3773 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, 3774 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); 3775 3776 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { 3777 ret_val = ixgbe_host_interface_command(hw, &fw_cmd, 3778 sizeof(fw_cmd), 3779 IXGBE_HI_COMMAND_TIMEOUT, 3780 true); 3781 if (ret_val != 0) 3782 continue; 3783 3784 if (fw_cmd.hdr.cmd_or_resp.ret_status == 3785 FW_CEM_RESP_STATUS_SUCCESS) 3786 ret_val = 0; 3787 else 3788 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; 3789 3790 break; 3791 } 3792 3793 return ret_val; 3794 } 3795 3796 /** 3797 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo 3798 * @hw: pointer to the hardware structure 3799 * 3800 * The 82599 and x540 MACs can experience issues if TX work is still pending 3801 * when a reset occurs. This function prevents this by flushing the PCIe 3802 * buffers on the system. 3803 **/ 3804 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) 3805 { 3806 u32 gcr_ext, hlreg0, i, poll; 3807 u16 value; 3808 3809 /* 3810 * If double reset is not requested then all transactions should 3811 * already be clear and as such there is no work to do 3812 */ 3813 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED)) 3814 return; 3815 3816 /* 3817 * Set loopback enable to prevent any transmits from being sent 3818 * should the link come up. This assumes that the RXCTRL.RXEN bit 3819 * has already been cleared. 3820 */ 3821 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 3822 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); 3823 3824 /* wait for a last completion before clearing buffers */ 3825 IXGBE_WRITE_FLUSH(hw); 3826 usleep_range(3000, 6000); 3827 3828 /* Before proceeding, make sure that the PCIe block does not have 3829 * transactions pending. 3830 */ 3831 poll = ixgbe_pcie_timeout_poll(hw); 3832 for (i = 0; i < poll; i++) { 3833 usleep_range(100, 200); 3834 value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); 3835 if (ixgbe_removed(hw->hw_addr)) 3836 break; 3837 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 3838 break; 3839 } 3840 3841 /* initiate cleaning flow for buffers in the PCIe transaction layer */ 3842 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); 3843 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 3844 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR); 3845 3846 /* Flush all writes and allow 20usec for all transactions to clear */ 3847 IXGBE_WRITE_FLUSH(hw); 3848 udelay(20); 3849 3850 /* restore previous register values */ 3851 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 3852 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 3853 } 3854 3855 static const u8 ixgbe_emc_temp_data[4] = { 3856 IXGBE_EMC_INTERNAL_DATA, 3857 IXGBE_EMC_DIODE1_DATA, 3858 IXGBE_EMC_DIODE2_DATA, 3859 IXGBE_EMC_DIODE3_DATA 3860 }; 3861 static const u8 ixgbe_emc_therm_limit[4] = { 3862 IXGBE_EMC_INTERNAL_THERM_LIMIT, 3863 IXGBE_EMC_DIODE1_THERM_LIMIT, 3864 IXGBE_EMC_DIODE2_THERM_LIMIT, 3865 IXGBE_EMC_DIODE3_THERM_LIMIT 3866 }; 3867 3868 /** 3869 * ixgbe_get_ets_data - Extracts the ETS bit data 3870 * @hw: pointer to hardware structure 3871 * @ets_cfg: extected ETS data 3872 * @ets_offset: offset of ETS data 3873 * 3874 * Returns error code. 3875 **/ 3876 static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg, 3877 u16 *ets_offset) 3878 { 3879 s32 status; 3880 3881 status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset); 3882 if (status) 3883 return status; 3884 3885 if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) 3886 return IXGBE_NOT_IMPLEMENTED; 3887 3888 status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg); 3889 if (status) 3890 return status; 3891 3892 if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) 3893 return IXGBE_NOT_IMPLEMENTED; 3894 3895 return 0; 3896 } 3897 3898 /** 3899 * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data 3900 * @hw: pointer to hardware structure 3901 * 3902 * Returns the thermal sensor data structure 3903 **/ 3904 s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) 3905 { 3906 s32 status; 3907 u16 ets_offset; 3908 u16 ets_cfg; 3909 u16 ets_sensor; 3910 u8 num_sensors; 3911 u8 i; 3912 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 3913 3914 /* Only support thermal sensors attached to physical port 0 */ 3915 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) 3916 return IXGBE_NOT_IMPLEMENTED; 3917 3918 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); 3919 if (status) 3920 return status; 3921 3922 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); 3923 if (num_sensors > IXGBE_MAX_SENSORS) 3924 num_sensors = IXGBE_MAX_SENSORS; 3925 3926 for (i = 0; i < num_sensors; i++) { 3927 u8 sensor_index; 3928 u8 sensor_location; 3929 3930 status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i), 3931 &ets_sensor); 3932 if (status) 3933 return status; 3934 3935 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> 3936 IXGBE_ETS_DATA_INDEX_SHIFT); 3937 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> 3938 IXGBE_ETS_DATA_LOC_SHIFT); 3939 3940 if (sensor_location != 0) { 3941 status = hw->phy.ops.read_i2c_byte(hw, 3942 ixgbe_emc_temp_data[sensor_index], 3943 IXGBE_I2C_THERMAL_SENSOR_ADDR, 3944 &data->sensor[i].temp); 3945 if (status) 3946 return status; 3947 } 3948 } 3949 3950 return 0; 3951 } 3952 3953 /** 3954 * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds 3955 * @hw: pointer to hardware structure 3956 * 3957 * Inits the thermal sensor thresholds according to the NVM map 3958 * and save off the threshold and location values into mac.thermal_sensor_data 3959 **/ 3960 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) 3961 { 3962 s32 status; 3963 u16 ets_offset; 3964 u16 ets_cfg; 3965 u16 ets_sensor; 3966 u8 low_thresh_delta; 3967 u8 num_sensors; 3968 u8 therm_limit; 3969 u8 i; 3970 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 3971 3972 memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data)); 3973 3974 /* Only support thermal sensors attached to physical port 0 */ 3975 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) 3976 return IXGBE_NOT_IMPLEMENTED; 3977 3978 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); 3979 if (status) 3980 return status; 3981 3982 low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >> 3983 IXGBE_ETS_LTHRES_DELTA_SHIFT); 3984 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); 3985 if (num_sensors > IXGBE_MAX_SENSORS) 3986 num_sensors = IXGBE_MAX_SENSORS; 3987 3988 for (i = 0; i < num_sensors; i++) { 3989 u8 sensor_index; 3990 u8 sensor_location; 3991 3992 if (hw->eeprom.ops.read(hw, ets_offset + 1 + i, &ets_sensor)) { 3993 hw_err(hw, "eeprom read at offset %d failed\n", 3994 ets_offset + 1 + i); 3995 continue; 3996 } 3997 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> 3998 IXGBE_ETS_DATA_INDEX_SHIFT); 3999 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> 4000 IXGBE_ETS_DATA_LOC_SHIFT); 4001 therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK; 4002 4003 hw->phy.ops.write_i2c_byte(hw, 4004 ixgbe_emc_therm_limit[sensor_index], 4005 IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit); 4006 4007 if (sensor_location == 0) 4008 continue; 4009 4010 data->sensor[i].location = sensor_location; 4011 data->sensor[i].caution_thresh = therm_limit; 4012 data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta; 4013 } 4014 4015 return 0; 4016 } 4017 4018 /** 4019 * ixgbe_get_orom_version - Return option ROM from EEPROM 4020 * 4021 * @hw: pointer to hardware structure 4022 * @nvm_ver: pointer to output structure 4023 * 4024 * if valid option ROM version, nvm_ver->or_valid set to true 4025 * else nvm_ver->or_valid is false. 4026 **/ 4027 void ixgbe_get_orom_version(struct ixgbe_hw *hw, 4028 struct ixgbe_nvm_version *nvm_ver) 4029 { 4030 u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl; 4031 4032 nvm_ver->or_valid = false; 4033 /* Option Rom may or may not be present. Start with pointer */ 4034 hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset); 4035 4036 /* make sure offset is valid */ 4037 if (offset == 0x0 || offset == NVM_INVALID_PTR) 4038 return; 4039 4040 hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh); 4041 hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl); 4042 4043 /* option rom exists and is valid */ 4044 if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 || 4045 eeprom_cfg_blkl == NVM_VER_INVALID || 4046 eeprom_cfg_blkh == NVM_VER_INVALID) 4047 return; 4048 4049 nvm_ver->or_valid = true; 4050 nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT; 4051 nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) | 4052 (eeprom_cfg_blkh >> NVM_OROM_SHIFT); 4053 nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK; 4054 } 4055 4056 /** 4057 * ixgbe_get_oem_prod_version Etrack ID from EEPROM 4058 * 4059 * @hw: pointer to hardware structure 4060 * @nvm_ver: pointer to output structure 4061 * 4062 * if valid OEM product version, nvm_ver->oem_valid set to true 4063 * else nvm_ver->oem_valid is false. 4064 **/ 4065 void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, 4066 struct ixgbe_nvm_version *nvm_ver) 4067 { 4068 u16 rel_num, prod_ver, mod_len, cap, offset; 4069 4070 nvm_ver->oem_valid = false; 4071 hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset); 4072 4073 /* Return is offset to OEM Product Version block is invalid */ 4074 if (offset == 0x0 || offset == NVM_INVALID_PTR) 4075 return; 4076 4077 /* Read product version block */ 4078 hw->eeprom.ops.read(hw, offset, &mod_len); 4079 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap); 4080 4081 /* Return if OEM product version block is invalid */ 4082 if (mod_len != NVM_OEM_PROD_VER_MOD_LEN || 4083 (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0) 4084 return; 4085 4086 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver); 4087 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num); 4088 4089 /* Return if version is invalid */ 4090 if ((rel_num | prod_ver) == 0x0 || 4091 rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID) 4092 return; 4093 4094 nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT; 4095 nvm_ver->oem_minor = prod_ver & NVM_VER_MASK; 4096 nvm_ver->oem_release = rel_num; 4097 nvm_ver->oem_valid = true; 4098 } 4099 4100 /** 4101 * ixgbe_get_etk_id - Return Etrack ID from EEPROM 4102 * 4103 * @hw: pointer to hardware structure 4104 * @nvm_ver: pointer to output structure 4105 * 4106 * word read errors will return 0xFFFF 4107 **/ 4108 void ixgbe_get_etk_id(struct ixgbe_hw *hw, 4109 struct ixgbe_nvm_version *nvm_ver) 4110 { 4111 u16 etk_id_l, etk_id_h; 4112 4113 if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l)) 4114 etk_id_l = NVM_VER_INVALID; 4115 if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h)) 4116 etk_id_h = NVM_VER_INVALID; 4117 4118 /* The word order for the version format is determined by high order 4119 * word bit 15. 4120 */ 4121 if ((etk_id_h & NVM_ETK_VALID) == 0) { 4122 nvm_ver->etk_id = etk_id_h; 4123 nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT); 4124 } else { 4125 nvm_ver->etk_id = etk_id_l; 4126 nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT); 4127 } 4128 } 4129 4130 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) 4131 { 4132 u32 rxctrl; 4133 4134 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4135 if (rxctrl & IXGBE_RXCTRL_RXEN) { 4136 if (hw->mac.type != ixgbe_mac_82598EB) { 4137 u32 pfdtxgswc; 4138 4139 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); 4140 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { 4141 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; 4142 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); 4143 hw->mac.set_lben = true; 4144 } else { 4145 hw->mac.set_lben = false; 4146 } 4147 } 4148 rxctrl &= ~IXGBE_RXCTRL_RXEN; 4149 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); 4150 } 4151 } 4152 4153 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) 4154 { 4155 u32 rxctrl; 4156 4157 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4158 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN)); 4159 4160 if (hw->mac.type != ixgbe_mac_82598EB) { 4161 if (hw->mac.set_lben) { 4162 u32 pfdtxgswc; 4163 4164 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); 4165 pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN; 4166 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); 4167 hw->mac.set_lben = false; 4168 } 4169 } 4170 } 4171 4172 /** ixgbe_mng_present - returns true when management capability is present 4173 * @hw: pointer to hardware structure 4174 **/ 4175 bool ixgbe_mng_present(struct ixgbe_hw *hw) 4176 { 4177 u32 fwsm; 4178 4179 if (hw->mac.type < ixgbe_mac_82599EB) 4180 return false; 4181 4182 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); 4183 4184 return !!(fwsm & IXGBE_FWSM_FW_MODE_PT); 4185 } 4186 4187 /** 4188 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed 4189 * @hw: pointer to hardware structure 4190 * @speed: new link speed 4191 * @autoneg_wait_to_complete: true when waiting for completion is needed 4192 * 4193 * Set the link speed in the MAC and/or PHY register and restarts link. 4194 */ 4195 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 4196 ixgbe_link_speed speed, 4197 bool autoneg_wait_to_complete) 4198 { 4199 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 4200 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; 4201 s32 status = 0; 4202 u32 speedcnt = 0; 4203 u32 i = 0; 4204 bool autoneg, link_up = false; 4205 4206 /* Mask off requested but non-supported speeds */ 4207 status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg); 4208 if (status) 4209 return status; 4210 4211 speed &= link_speed; 4212 4213 /* Try each speed one by one, highest priority first. We do this in 4214 * software because 10Gb fiber doesn't support speed autonegotiation. 4215 */ 4216 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { 4217 speedcnt++; 4218 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; 4219 4220 /* Set the module link speed */ 4221 switch (hw->phy.media_type) { 4222 case ixgbe_media_type_fiber: 4223 hw->mac.ops.set_rate_select_speed(hw, 4224 IXGBE_LINK_SPEED_10GB_FULL); 4225 break; 4226 case ixgbe_media_type_fiber_qsfp: 4227 /* QSFP module automatically detects MAC link speed */ 4228 break; 4229 default: 4230 hw_dbg(hw, "Unexpected media type\n"); 4231 break; 4232 } 4233 4234 /* Allow module to change analog characteristics (1G->10G) */ 4235 msleep(40); 4236 4237 status = hw->mac.ops.setup_mac_link(hw, 4238 IXGBE_LINK_SPEED_10GB_FULL, 4239 autoneg_wait_to_complete); 4240 if (status) 4241 return status; 4242 4243 /* Flap the Tx laser if it has not already been done */ 4244 if (hw->mac.ops.flap_tx_laser) 4245 hw->mac.ops.flap_tx_laser(hw); 4246 4247 /* Wait for the controller to acquire link. Per IEEE 802.3ap, 4248 * Section 73.10.2, we may have to wait up to 500ms if KR is 4249 * attempted. 82599 uses the same timing for 10g SFI. 4250 */ 4251 for (i = 0; i < 5; i++) { 4252 /* Wait for the link partner to also set speed */ 4253 msleep(100); 4254 4255 /* If we have link, just jump out */ 4256 status = hw->mac.ops.check_link(hw, &link_speed, 4257 &link_up, false); 4258 if (status) 4259 return status; 4260 4261 if (link_up) 4262 goto out; 4263 } 4264 } 4265 4266 if (speed & IXGBE_LINK_SPEED_1GB_FULL) { 4267 speedcnt++; 4268 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) 4269 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; 4270 4271 /* Set the module link speed */ 4272 switch (hw->phy.media_type) { 4273 case ixgbe_media_type_fiber: 4274 hw->mac.ops.set_rate_select_speed(hw, 4275 IXGBE_LINK_SPEED_1GB_FULL); 4276 break; 4277 case ixgbe_media_type_fiber_qsfp: 4278 /* QSFP module automatically detects link speed */ 4279 break; 4280 default: 4281 hw_dbg(hw, "Unexpected media type\n"); 4282 break; 4283 } 4284 4285 /* Allow module to change analog characteristics (10G->1G) */ 4286 msleep(40); 4287 4288 status = hw->mac.ops.setup_mac_link(hw, 4289 IXGBE_LINK_SPEED_1GB_FULL, 4290 autoneg_wait_to_complete); 4291 if (status) 4292 return status; 4293 4294 /* Flap the Tx laser if it has not already been done */ 4295 if (hw->mac.ops.flap_tx_laser) 4296 hw->mac.ops.flap_tx_laser(hw); 4297 4298 /* Wait for the link partner to also set speed */ 4299 msleep(100); 4300 4301 /* If we have link, just jump out */ 4302 status = hw->mac.ops.check_link(hw, &link_speed, &link_up, 4303 false); 4304 if (status) 4305 return status; 4306 4307 if (link_up) 4308 goto out; 4309 } 4310 4311 /* We didn't get link. Configure back to the highest speed we tried, 4312 * (if there was more than one). We call ourselves back with just the 4313 * single highest speed that the user requested. 4314 */ 4315 if (speedcnt > 1) 4316 status = ixgbe_setup_mac_link_multispeed_fiber(hw, 4317 highest_link_speed, 4318 autoneg_wait_to_complete); 4319 4320 out: 4321 /* Set autoneg_advertised value based on input link speed */ 4322 hw->phy.autoneg_advertised = 0; 4323 4324 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 4325 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 4326 4327 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 4328 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 4329 4330 return status; 4331 } 4332 4333 /** 4334 * ixgbe_set_soft_rate_select_speed - Set module link speed 4335 * @hw: pointer to hardware structure 4336 * @speed: link speed to set 4337 * 4338 * Set module link speed via the soft rate select. 4339 */ 4340 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, 4341 ixgbe_link_speed speed) 4342 { 4343 s32 status; 4344 u8 rs, eeprom_data; 4345 4346 switch (speed) { 4347 case IXGBE_LINK_SPEED_10GB_FULL: 4348 /* one bit mask same as setting on */ 4349 rs = IXGBE_SFF_SOFT_RS_SELECT_10G; 4350 break; 4351 case IXGBE_LINK_SPEED_1GB_FULL: 4352 rs = IXGBE_SFF_SOFT_RS_SELECT_1G; 4353 break; 4354 default: 4355 hw_dbg(hw, "Invalid fixed module speed\n"); 4356 return; 4357 } 4358 4359 /* Set RS0 */ 4360 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, 4361 IXGBE_I2C_EEPROM_DEV_ADDR2, 4362 &eeprom_data); 4363 if (status) { 4364 hw_dbg(hw, "Failed to read Rx Rate Select RS0\n"); 4365 return; 4366 } 4367 4368 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; 4369 4370 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, 4371 IXGBE_I2C_EEPROM_DEV_ADDR2, 4372 eeprom_data); 4373 if (status) { 4374 hw_dbg(hw, "Failed to write Rx Rate Select RS0\n"); 4375 return; 4376 } 4377 4378 /* Set RS1 */ 4379 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, 4380 IXGBE_I2C_EEPROM_DEV_ADDR2, 4381 &eeprom_data); 4382 if (status) { 4383 hw_dbg(hw, "Failed to read Rx Rate Select RS1\n"); 4384 return; 4385 } 4386 4387 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; 4388 4389 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, 4390 IXGBE_I2C_EEPROM_DEV_ADDR2, 4391 eeprom_data); 4392 if (status) { 4393 hw_dbg(hw, "Failed to write Rx Rate Select RS1\n"); 4394 return; 4395 } 4396 } 4397