1 /* Intel(R) Gigabit Ethernet Linux driver 2 * Copyright(c) 2007-2014 Intel Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, see <http://www.gnu.org/licenses/>. 15 * 16 * The full GNU General Public License is included in this distribution in 17 * the file called "COPYING". 18 * 19 * Contact Information: 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 22 */ 23 24 /* e1000_i210 25 * e1000_i211 26 */ 27 28 #include <linux/types.h> 29 #include <linux/if_ether.h> 30 31 #include "e1000_hw.h" 32 #include "e1000_i210.h" 33 34 static s32 igb_update_flash_i210(struct e1000_hw *hw); 35 36 /** 37 * igb_get_hw_semaphore_i210 - Acquire hardware semaphore 38 * @hw: pointer to the HW structure 39 * 40 * Acquire the HW semaphore to access the PHY or NVM 41 */ 42 static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw) 43 { 44 u32 swsm; 45 s32 timeout = hw->nvm.word_size + 1; 46 s32 i = 0; 47 48 /* Get the SW semaphore */ 49 while (i < timeout) { 50 swsm = rd32(E1000_SWSM); 51 if (!(swsm & E1000_SWSM_SMBI)) 52 break; 53 54 udelay(50); 55 i++; 56 } 57 58 if (i == timeout) { 59 /* In rare circumstances, the SW semaphore may already be held 60 * unintentionally. Clear the semaphore once before giving up. 61 */ 62 if (hw->dev_spec._82575.clear_semaphore_once) { 63 hw->dev_spec._82575.clear_semaphore_once = false; 64 igb_put_hw_semaphore(hw); 65 for (i = 0; i < timeout; i++) { 66 swsm = rd32(E1000_SWSM); 67 if (!(swsm & E1000_SWSM_SMBI)) 68 break; 69 70 udelay(50); 71 } 72 } 73 74 /* If we do not have the semaphore here, we have to give up. */ 75 if (i == timeout) { 76 hw_dbg("Driver can't access device - SMBI bit is set.\n"); 77 return -E1000_ERR_NVM; 78 } 79 } 80 81 /* Get the FW semaphore. */ 82 for (i = 0; i < timeout; i++) { 83 swsm = rd32(E1000_SWSM); 84 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); 85 86 /* Semaphore acquired if bit latched */ 87 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) 88 break; 89 90 udelay(50); 91 } 92 93 if (i == timeout) { 94 /* Release semaphores */ 95 igb_put_hw_semaphore(hw); 96 hw_dbg("Driver can't access the NVM\n"); 97 return -E1000_ERR_NVM; 98 } 99 100 return 0; 101 } 102 103 /** 104 * igb_acquire_nvm_i210 - Request for access to EEPROM 105 * @hw: pointer to the HW structure 106 * 107 * Acquire the necessary semaphores for exclusive access to the EEPROM. 108 * Set the EEPROM access request bit and wait for EEPROM access grant bit. 109 * Return successful if access grant bit set, else clear the request for 110 * EEPROM access and return -E1000_ERR_NVM (-1). 111 **/ 112 static s32 igb_acquire_nvm_i210(struct e1000_hw *hw) 113 { 114 return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); 115 } 116 117 /** 118 * igb_release_nvm_i210 - Release exclusive access to EEPROM 119 * @hw: pointer to the HW structure 120 * 121 * Stop any current commands to the EEPROM and clear the EEPROM request bit, 122 * then release the semaphores acquired. 123 **/ 124 static void igb_release_nvm_i210(struct e1000_hw *hw) 125 { 126 igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); 127 } 128 129 /** 130 * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore 131 * @hw: pointer to the HW structure 132 * @mask: specifies which semaphore to acquire 133 * 134 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask 135 * will also specify which port we're acquiring the lock for. 136 **/ 137 s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) 138 { 139 u32 swfw_sync; 140 u32 swmask = mask; 141 u32 fwmask = mask << 16; 142 s32 ret_val = 0; 143 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ 144 145 while (i < timeout) { 146 if (igb_get_hw_semaphore_i210(hw)) { 147 ret_val = -E1000_ERR_SWFW_SYNC; 148 goto out; 149 } 150 151 swfw_sync = rd32(E1000_SW_FW_SYNC); 152 if (!(swfw_sync & (fwmask | swmask))) 153 break; 154 155 /* Firmware currently using resource (fwmask) */ 156 igb_put_hw_semaphore(hw); 157 mdelay(5); 158 i++; 159 } 160 161 if (i == timeout) { 162 hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); 163 ret_val = -E1000_ERR_SWFW_SYNC; 164 goto out; 165 } 166 167 swfw_sync |= swmask; 168 wr32(E1000_SW_FW_SYNC, swfw_sync); 169 170 igb_put_hw_semaphore(hw); 171 out: 172 return ret_val; 173 } 174 175 /** 176 * igb_release_swfw_sync_i210 - Release SW/FW semaphore 177 * @hw: pointer to the HW structure 178 * @mask: specifies which semaphore to acquire 179 * 180 * Release the SW/FW semaphore used to access the PHY or NVM. The mask 181 * will also specify which port we're releasing the lock for. 182 **/ 183 void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) 184 { 185 u32 swfw_sync; 186 187 while (igb_get_hw_semaphore_i210(hw)) 188 ; /* Empty */ 189 190 swfw_sync = rd32(E1000_SW_FW_SYNC); 191 swfw_sync &= ~mask; 192 wr32(E1000_SW_FW_SYNC, swfw_sync); 193 194 igb_put_hw_semaphore(hw); 195 } 196 197 /** 198 * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register 199 * @hw: pointer to the HW structure 200 * @offset: offset of word in the Shadow Ram to read 201 * @words: number of words to read 202 * @data: word read from the Shadow Ram 203 * 204 * Reads a 16 bit word from the Shadow Ram using the EERD register. 205 * Uses necessary synchronization semaphores. 206 **/ 207 static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, 208 u16 *data) 209 { 210 s32 status = 0; 211 u16 i, count; 212 213 /* We cannot hold synchronization semaphores for too long, 214 * because of forceful takeover procedure. However it is more efficient 215 * to read in bursts than synchronizing access for each word. 216 */ 217 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { 218 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? 219 E1000_EERD_EEWR_MAX_COUNT : (words - i); 220 if (!(hw->nvm.ops.acquire(hw))) { 221 status = igb_read_nvm_eerd(hw, offset, count, 222 data + i); 223 hw->nvm.ops.release(hw); 224 } else { 225 status = E1000_ERR_SWFW_SYNC; 226 } 227 228 if (status) 229 break; 230 } 231 232 return status; 233 } 234 235 /** 236 * igb_write_nvm_srwr - Write to Shadow Ram using EEWR 237 * @hw: pointer to the HW structure 238 * @offset: offset within the Shadow Ram to be written to 239 * @words: number of words to write 240 * @data: 16 bit word(s) to be written to the Shadow Ram 241 * 242 * Writes data to Shadow Ram at offset using EEWR register. 243 * 244 * If igb_update_nvm_checksum is not called after this function , the 245 * Shadow Ram will most likely contain an invalid checksum. 246 **/ 247 static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, 248 u16 *data) 249 { 250 struct e1000_nvm_info *nvm = &hw->nvm; 251 u32 i, k, eewr = 0; 252 u32 attempts = 100000; 253 s32 ret_val = 0; 254 255 /* A check for invalid values: offset too large, too many words, 256 * too many words for the offset, and not enough words. 257 */ 258 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 259 (words == 0)) { 260 hw_dbg("nvm parameter(s) out of bounds\n"); 261 ret_val = -E1000_ERR_NVM; 262 goto out; 263 } 264 265 for (i = 0; i < words; i++) { 266 eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | 267 (data[i] << E1000_NVM_RW_REG_DATA) | 268 E1000_NVM_RW_REG_START; 269 270 wr32(E1000_SRWR, eewr); 271 272 for (k = 0; k < attempts; k++) { 273 if (E1000_NVM_RW_REG_DONE & 274 rd32(E1000_SRWR)) { 275 ret_val = 0; 276 break; 277 } 278 udelay(5); 279 } 280 281 if (ret_val) { 282 hw_dbg("Shadow RAM write EEWR timed out\n"); 283 break; 284 } 285 } 286 287 out: 288 return ret_val; 289 } 290 291 /** 292 * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR 293 * @hw: pointer to the HW structure 294 * @offset: offset within the Shadow RAM to be written to 295 * @words: number of words to write 296 * @data: 16 bit word(s) to be written to the Shadow RAM 297 * 298 * Writes data to Shadow RAM at offset using EEWR register. 299 * 300 * If e1000_update_nvm_checksum is not called after this function , the 301 * data will not be committed to FLASH and also Shadow RAM will most likely 302 * contain an invalid checksum. 303 * 304 * If error code is returned, data and Shadow RAM may be inconsistent - buffer 305 * partially written. 306 **/ 307 static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, 308 u16 *data) 309 { 310 s32 status = 0; 311 u16 i, count; 312 313 /* We cannot hold synchronization semaphores for too long, 314 * because of forceful takeover procedure. However it is more efficient 315 * to write in bursts than synchronizing access for each word. 316 */ 317 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { 318 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? 319 E1000_EERD_EEWR_MAX_COUNT : (words - i); 320 if (!(hw->nvm.ops.acquire(hw))) { 321 status = igb_write_nvm_srwr(hw, offset, count, 322 data + i); 323 hw->nvm.ops.release(hw); 324 } else { 325 status = E1000_ERR_SWFW_SYNC; 326 } 327 328 if (status) 329 break; 330 } 331 332 return status; 333 } 334 335 /** 336 * igb_read_invm_word_i210 - Reads OTP 337 * @hw: pointer to the HW structure 338 * @address: the word address (aka eeprom offset) to read 339 * @data: pointer to the data read 340 * 341 * Reads 16-bit words from the OTP. Return error when the word is not 342 * stored in OTP. 343 **/ 344 static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) 345 { 346 s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; 347 u32 invm_dword; 348 u16 i; 349 u8 record_type, word_address; 350 351 for (i = 0; i < E1000_INVM_SIZE; i++) { 352 invm_dword = rd32(E1000_INVM_DATA_REG(i)); 353 /* Get record type */ 354 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); 355 if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) 356 break; 357 if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) 358 i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; 359 if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) 360 i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; 361 if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { 362 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); 363 if (word_address == address) { 364 *data = INVM_DWORD_TO_WORD_DATA(invm_dword); 365 hw_dbg("Read INVM Word 0x%02x = %x\n", 366 address, *data); 367 status = 0; 368 break; 369 } 370 } 371 } 372 if (status) 373 hw_dbg("Requested word 0x%02x not found in OTP\n", address); 374 return status; 375 } 376 377 /** 378 * igb_read_invm_i210 - Read invm wrapper function for I210/I211 379 * @hw: pointer to the HW structure 380 * @words: number of words to read 381 * @data: pointer to the data read 382 * 383 * Wrapper function to return data formerly found in the NVM. 384 **/ 385 static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset, 386 u16 words __always_unused, u16 *data) 387 { 388 s32 ret_val = 0; 389 390 /* Only the MAC addr is required to be present in the iNVM */ 391 switch (offset) { 392 case NVM_MAC_ADDR: 393 ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]); 394 ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1, 395 &data[1]); 396 ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2, 397 &data[2]); 398 if (ret_val) 399 hw_dbg("MAC Addr not found in iNVM\n"); 400 break; 401 case NVM_INIT_CTRL_2: 402 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 403 if (ret_val) { 404 *data = NVM_INIT_CTRL_2_DEFAULT_I211; 405 ret_val = 0; 406 } 407 break; 408 case NVM_INIT_CTRL_4: 409 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 410 if (ret_val) { 411 *data = NVM_INIT_CTRL_4_DEFAULT_I211; 412 ret_val = 0; 413 } 414 break; 415 case NVM_LED_1_CFG: 416 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 417 if (ret_val) { 418 *data = NVM_LED_1_CFG_DEFAULT_I211; 419 ret_val = 0; 420 } 421 break; 422 case NVM_LED_0_2_CFG: 423 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 424 if (ret_val) { 425 *data = NVM_LED_0_2_CFG_DEFAULT_I211; 426 ret_val = 0; 427 } 428 break; 429 case NVM_ID_LED_SETTINGS: 430 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 431 if (ret_val) { 432 *data = ID_LED_RESERVED_FFFF; 433 ret_val = 0; 434 } 435 break; 436 case NVM_SUB_DEV_ID: 437 *data = hw->subsystem_device_id; 438 break; 439 case NVM_SUB_VEN_ID: 440 *data = hw->subsystem_vendor_id; 441 break; 442 case NVM_DEV_ID: 443 *data = hw->device_id; 444 break; 445 case NVM_VEN_ID: 446 *data = hw->vendor_id; 447 break; 448 default: 449 hw_dbg("NVM word 0x%02x is not mapped.\n", offset); 450 *data = NVM_RESERVED_WORD; 451 break; 452 } 453 return ret_val; 454 } 455 456 /** 457 * igb_read_invm_version - Reads iNVM version and image type 458 * @hw: pointer to the HW structure 459 * @invm_ver: version structure for the version read 460 * 461 * Reads iNVM version and image type. 462 **/ 463 s32 igb_read_invm_version(struct e1000_hw *hw, 464 struct e1000_fw_version *invm_ver) { 465 u32 *record = NULL; 466 u32 *next_record = NULL; 467 u32 i = 0; 468 u32 invm_dword = 0; 469 u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE / 470 E1000_INVM_RECORD_SIZE_IN_BYTES); 471 u32 buffer[E1000_INVM_SIZE]; 472 s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; 473 u16 version = 0; 474 475 /* Read iNVM memory */ 476 for (i = 0; i < E1000_INVM_SIZE; i++) { 477 invm_dword = rd32(E1000_INVM_DATA_REG(i)); 478 buffer[i] = invm_dword; 479 } 480 481 /* Read version number */ 482 for (i = 1; i < invm_blocks; i++) { 483 record = &buffer[invm_blocks - i]; 484 next_record = &buffer[invm_blocks - i + 1]; 485 486 /* Check if we have first version location used */ 487 if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { 488 version = 0; 489 status = 0; 490 break; 491 } 492 /* Check if we have second version location used */ 493 else if ((i == 1) && 494 ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { 495 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; 496 status = 0; 497 break; 498 } 499 /* Check if we have odd version location 500 * used and it is the last one used 501 */ 502 else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) && 503 ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) && 504 (i != 1))) { 505 version = (*next_record & E1000_INVM_VER_FIELD_TWO) 506 >> 13; 507 status = 0; 508 break; 509 } 510 /* Check if we have even version location 511 * used and it is the last one used 512 */ 513 else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && 514 ((*record & 0x3) == 0)) { 515 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; 516 status = 0; 517 break; 518 } 519 } 520 521 if (!status) { 522 invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) 523 >> E1000_INVM_MAJOR_SHIFT; 524 invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; 525 } 526 /* Read Image Type */ 527 for (i = 1; i < invm_blocks; i++) { 528 record = &buffer[invm_blocks - i]; 529 next_record = &buffer[invm_blocks - i + 1]; 530 531 /* Check if we have image type in first location used */ 532 if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { 533 invm_ver->invm_img_type = 0; 534 status = 0; 535 break; 536 } 537 /* Check if we have image type in first location used */ 538 else if ((((*record & 0x3) == 0) && 539 ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) || 540 ((((*record & 0x3) != 0) && (i != 1)))) { 541 invm_ver->invm_img_type = 542 (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; 543 status = 0; 544 break; 545 } 546 } 547 return status; 548 } 549 550 /** 551 * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum 552 * @hw: pointer to the HW structure 553 * 554 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM 555 * and then verifies that the sum of the EEPROM is equal to 0xBABA. 556 **/ 557 static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) 558 { 559 s32 status = 0; 560 s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); 561 562 if (!(hw->nvm.ops.acquire(hw))) { 563 564 /* Replace the read function with semaphore grabbing with 565 * the one that skips this for a while. 566 * We have semaphore taken already here. 567 */ 568 read_op_ptr = hw->nvm.ops.read; 569 hw->nvm.ops.read = igb_read_nvm_eerd; 570 571 status = igb_validate_nvm_checksum(hw); 572 573 /* Revert original read operation. */ 574 hw->nvm.ops.read = read_op_ptr; 575 576 hw->nvm.ops.release(hw); 577 } else { 578 status = E1000_ERR_SWFW_SYNC; 579 } 580 581 return status; 582 } 583 584 /** 585 * igb_update_nvm_checksum_i210 - Update EEPROM checksum 586 * @hw: pointer to the HW structure 587 * 588 * Updates the EEPROM checksum by reading/adding each word of the EEPROM 589 * up to the checksum. Then calculates the EEPROM checksum and writes the 590 * value to the EEPROM. Next commit EEPROM data onto the Flash. 591 **/ 592 static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) 593 { 594 s32 ret_val = 0; 595 u16 checksum = 0; 596 u16 i, nvm_data; 597 598 /* Read the first word from the EEPROM. If this times out or fails, do 599 * not continue or we could be in for a very long wait while every 600 * EEPROM read fails 601 */ 602 ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data); 603 if (ret_val) { 604 hw_dbg("EEPROM read failed\n"); 605 goto out; 606 } 607 608 if (!(hw->nvm.ops.acquire(hw))) { 609 /* Do not use hw->nvm.ops.write, hw->nvm.ops.read 610 * because we do not want to take the synchronization 611 * semaphores twice here. 612 */ 613 614 for (i = 0; i < NVM_CHECKSUM_REG; i++) { 615 ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data); 616 if (ret_val) { 617 hw->nvm.ops.release(hw); 618 hw_dbg("NVM Read Error while updating checksum.\n"); 619 goto out; 620 } 621 checksum += nvm_data; 622 } 623 checksum = (u16) NVM_SUM - checksum; 624 ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, 625 &checksum); 626 if (ret_val) { 627 hw->nvm.ops.release(hw); 628 hw_dbg("NVM Write Error while updating checksum.\n"); 629 goto out; 630 } 631 632 hw->nvm.ops.release(hw); 633 634 ret_val = igb_update_flash_i210(hw); 635 } else { 636 ret_val = -E1000_ERR_SWFW_SYNC; 637 } 638 out: 639 return ret_val; 640 } 641 642 /** 643 * igb_pool_flash_update_done_i210 - Pool FLUDONE status. 644 * @hw: pointer to the HW structure 645 * 646 **/ 647 static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw) 648 { 649 s32 ret_val = -E1000_ERR_NVM; 650 u32 i, reg; 651 652 for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { 653 reg = rd32(E1000_EECD); 654 if (reg & E1000_EECD_FLUDONE_I210) { 655 ret_val = 0; 656 break; 657 } 658 udelay(5); 659 } 660 661 return ret_val; 662 } 663 664 /** 665 * igb_get_flash_presence_i210 - Check if flash device is detected. 666 * @hw: pointer to the HW structure 667 * 668 **/ 669 bool igb_get_flash_presence_i210(struct e1000_hw *hw) 670 { 671 u32 eec = 0; 672 bool ret_val = false; 673 674 eec = rd32(E1000_EECD); 675 if (eec & E1000_EECD_FLASH_DETECTED_I210) 676 ret_val = true; 677 678 return ret_val; 679 } 680 681 /** 682 * igb_update_flash_i210 - Commit EEPROM to the flash 683 * @hw: pointer to the HW structure 684 * 685 **/ 686 static s32 igb_update_flash_i210(struct e1000_hw *hw) 687 { 688 s32 ret_val = 0; 689 u32 flup; 690 691 ret_val = igb_pool_flash_update_done_i210(hw); 692 if (ret_val == -E1000_ERR_NVM) { 693 hw_dbg("Flash update time out\n"); 694 goto out; 695 } 696 697 flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210; 698 wr32(E1000_EECD, flup); 699 700 ret_val = igb_pool_flash_update_done_i210(hw); 701 if (ret_val) 702 hw_dbg("Flash update complete\n"); 703 else 704 hw_dbg("Flash update time out\n"); 705 706 out: 707 return ret_val; 708 } 709 710 /** 711 * igb_valid_led_default_i210 - Verify a valid default LED config 712 * @hw: pointer to the HW structure 713 * @data: pointer to the NVM (EEPROM) 714 * 715 * Read the EEPROM for the current default LED configuration. If the 716 * LED configuration is not valid, set to a valid LED configuration. 717 **/ 718 s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data) 719 { 720 s32 ret_val; 721 722 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); 723 if (ret_val) { 724 hw_dbg("NVM Read Error\n"); 725 goto out; 726 } 727 728 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { 729 switch (hw->phy.media_type) { 730 case e1000_media_type_internal_serdes: 731 *data = ID_LED_DEFAULT_I210_SERDES; 732 break; 733 case e1000_media_type_copper: 734 default: 735 *data = ID_LED_DEFAULT_I210; 736 break; 737 } 738 } 739 out: 740 return ret_val; 741 } 742 743 /** 744 * __igb_access_xmdio_reg - Read/write XMDIO register 745 * @hw: pointer to the HW structure 746 * @address: XMDIO address to program 747 * @dev_addr: device address to program 748 * @data: pointer to value to read/write from/to the XMDIO address 749 * @read: boolean flag to indicate read or write 750 **/ 751 static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address, 752 u8 dev_addr, u16 *data, bool read) 753 { 754 s32 ret_val = 0; 755 756 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); 757 if (ret_val) 758 return ret_val; 759 760 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address); 761 if (ret_val) 762 return ret_val; 763 764 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA | 765 dev_addr); 766 if (ret_val) 767 return ret_val; 768 769 if (read) 770 ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data); 771 else 772 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data); 773 if (ret_val) 774 return ret_val; 775 776 /* Recalibrate the device back to 0 */ 777 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0); 778 if (ret_val) 779 return ret_val; 780 781 return ret_val; 782 } 783 784 /** 785 * igb_read_xmdio_reg - Read XMDIO register 786 * @hw: pointer to the HW structure 787 * @addr: XMDIO address to program 788 * @dev_addr: device address to program 789 * @data: value to be read from the EMI address 790 **/ 791 s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) 792 { 793 return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true); 794 } 795 796 /** 797 * igb_write_xmdio_reg - Write XMDIO register 798 * @hw: pointer to the HW structure 799 * @addr: XMDIO address to program 800 * @dev_addr: device address to program 801 * @data: value to be written to the XMDIO address 802 **/ 803 s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) 804 { 805 return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false); 806 } 807 808 /** 809 * igb_init_nvm_params_i210 - Init NVM func ptrs. 810 * @hw: pointer to the HW structure 811 **/ 812 s32 igb_init_nvm_params_i210(struct e1000_hw *hw) 813 { 814 s32 ret_val = 0; 815 struct e1000_nvm_info *nvm = &hw->nvm; 816 817 nvm->ops.acquire = igb_acquire_nvm_i210; 818 nvm->ops.release = igb_release_nvm_i210; 819 nvm->ops.valid_led_default = igb_valid_led_default_i210; 820 821 /* NVM Function Pointers */ 822 if (igb_get_flash_presence_i210(hw)) { 823 hw->nvm.type = e1000_nvm_flash_hw; 824 nvm->ops.read = igb_read_nvm_srrd_i210; 825 nvm->ops.write = igb_write_nvm_srwr_i210; 826 nvm->ops.validate = igb_validate_nvm_checksum_i210; 827 nvm->ops.update = igb_update_nvm_checksum_i210; 828 } else { 829 hw->nvm.type = e1000_nvm_invm; 830 nvm->ops.read = igb_read_invm_i210; 831 nvm->ops.write = NULL; 832 nvm->ops.validate = NULL; 833 nvm->ops.update = NULL; 834 } 835 return ret_val; 836 } 837 838 /** 839 * igb_pll_workaround_i210 840 * @hw: pointer to the HW structure 841 * 842 * Works around an errata in the PLL circuit where it occasionally 843 * provides the wrong clock frequency after power up. 844 **/ 845 s32 igb_pll_workaround_i210(struct e1000_hw *hw) 846 { 847 s32 ret_val; 848 u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val; 849 u16 nvm_word, phy_word, pci_word, tmp_nvm; 850 int i; 851 852 /* Get and set needed register values */ 853 wuc = rd32(E1000_WUC); 854 mdicnfg = rd32(E1000_MDICNFG); 855 reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO; 856 wr32(E1000_MDICNFG, reg_val); 857 858 /* Get data from NVM, or set default */ 859 ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, 860 &nvm_word); 861 if (ret_val) 862 nvm_word = E1000_INVM_DEFAULT_AL; 863 tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; 864 for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { 865 /* check current state directly from internal PHY */ 866 igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE | 867 E1000_PHY_PLL_FREQ_REG), &phy_word); 868 if ((phy_word & E1000_PHY_PLL_UNCONF) 869 != E1000_PHY_PLL_UNCONF) { 870 ret_val = 0; 871 break; 872 } else { 873 ret_val = -E1000_ERR_PHY; 874 } 875 /* directly reset the internal PHY */ 876 ctrl = rd32(E1000_CTRL); 877 wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); 878 879 ctrl_ext = rd32(E1000_CTRL_EXT); 880 ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE); 881 wr32(E1000_CTRL_EXT, ctrl_ext); 882 883 wr32(E1000_WUC, 0); 884 reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16); 885 wr32(E1000_EEARBC_I210, reg_val); 886 887 igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); 888 pci_word |= E1000_PCI_PMCSR_D3; 889 igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); 890 usleep_range(1000, 2000); 891 pci_word &= ~E1000_PCI_PMCSR_D3; 892 igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); 893 reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16); 894 wr32(E1000_EEARBC_I210, reg_val); 895 896 /* restore WUC register */ 897 wr32(E1000_WUC, wuc); 898 } 899 /* restore MDICNFG setting */ 900 wr32(E1000_MDICNFG, mdicnfg); 901 return ret_val; 902 } 903