1 // SPDX-License-Identifier: GPL-2.0 2 /* Intel(R) Gigabit Ethernet Linux driver 3 * Copyright(c) 2007-2014 Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, see <http://www.gnu.org/licenses/>. 16 * 17 * The full GNU General Public License is included in this distribution in 18 * the file called "COPYING". 19 * 20 * Contact Information: 21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 23 */ 24 25 /* e1000_i210 26 * e1000_i211 27 */ 28 29 #include <linux/types.h> 30 #include <linux/if_ether.h> 31 32 #include "e1000_hw.h" 33 #include "e1000_i210.h" 34 35 static s32 igb_update_flash_i210(struct e1000_hw *hw); 36 37 /** 38 * igb_get_hw_semaphore_i210 - Acquire hardware semaphore 39 * @hw: pointer to the HW structure 40 * 41 * Acquire the HW semaphore to access the PHY or NVM 42 */ 43 static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw) 44 { 45 u32 swsm; 46 s32 timeout = hw->nvm.word_size + 1; 47 s32 i = 0; 48 49 /* Get the SW semaphore */ 50 while (i < timeout) { 51 swsm = rd32(E1000_SWSM); 52 if (!(swsm & E1000_SWSM_SMBI)) 53 break; 54 55 udelay(50); 56 i++; 57 } 58 59 if (i == timeout) { 60 /* In rare circumstances, the SW semaphore may already be held 61 * unintentionally. Clear the semaphore once before giving up. 62 */ 63 if (hw->dev_spec._82575.clear_semaphore_once) { 64 hw->dev_spec._82575.clear_semaphore_once = false; 65 igb_put_hw_semaphore(hw); 66 for (i = 0; i < timeout; i++) { 67 swsm = rd32(E1000_SWSM); 68 if (!(swsm & E1000_SWSM_SMBI)) 69 break; 70 71 udelay(50); 72 } 73 } 74 75 /* If we do not have the semaphore here, we have to give up. */ 76 if (i == timeout) { 77 hw_dbg("Driver can't access device - SMBI bit is set.\n"); 78 return -E1000_ERR_NVM; 79 } 80 } 81 82 /* Get the FW semaphore. */ 83 for (i = 0; i < timeout; i++) { 84 swsm = rd32(E1000_SWSM); 85 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); 86 87 /* Semaphore acquired if bit latched */ 88 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) 89 break; 90 91 udelay(50); 92 } 93 94 if (i == timeout) { 95 /* Release semaphores */ 96 igb_put_hw_semaphore(hw); 97 hw_dbg("Driver can't access the NVM\n"); 98 return -E1000_ERR_NVM; 99 } 100 101 return 0; 102 } 103 104 /** 105 * igb_acquire_nvm_i210 - Request for access to EEPROM 106 * @hw: pointer to the HW structure 107 * 108 * Acquire the necessary semaphores for exclusive access to the EEPROM. 109 * Set the EEPROM access request bit and wait for EEPROM access grant bit. 110 * Return successful if access grant bit set, else clear the request for 111 * EEPROM access and return -E1000_ERR_NVM (-1). 112 **/ 113 static s32 igb_acquire_nvm_i210(struct e1000_hw *hw) 114 { 115 return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); 116 } 117 118 /** 119 * igb_release_nvm_i210 - Release exclusive access to EEPROM 120 * @hw: pointer to the HW structure 121 * 122 * Stop any current commands to the EEPROM and clear the EEPROM request bit, 123 * then release the semaphores acquired. 124 **/ 125 static void igb_release_nvm_i210(struct e1000_hw *hw) 126 { 127 igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); 128 } 129 130 /** 131 * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore 132 * @hw: pointer to the HW structure 133 * @mask: specifies which semaphore to acquire 134 * 135 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask 136 * will also specify which port we're acquiring the lock for. 137 **/ 138 s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) 139 { 140 u32 swfw_sync; 141 u32 swmask = mask; 142 u32 fwmask = mask << 16; 143 s32 ret_val = 0; 144 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ 145 146 while (i < timeout) { 147 if (igb_get_hw_semaphore_i210(hw)) { 148 ret_val = -E1000_ERR_SWFW_SYNC; 149 goto out; 150 } 151 152 swfw_sync = rd32(E1000_SW_FW_SYNC); 153 if (!(swfw_sync & (fwmask | swmask))) 154 break; 155 156 /* Firmware currently using resource (fwmask) */ 157 igb_put_hw_semaphore(hw); 158 mdelay(5); 159 i++; 160 } 161 162 if (i == timeout) { 163 hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); 164 ret_val = -E1000_ERR_SWFW_SYNC; 165 goto out; 166 } 167 168 swfw_sync |= swmask; 169 wr32(E1000_SW_FW_SYNC, swfw_sync); 170 171 igb_put_hw_semaphore(hw); 172 out: 173 return ret_val; 174 } 175 176 /** 177 * igb_release_swfw_sync_i210 - Release SW/FW semaphore 178 * @hw: pointer to the HW structure 179 * @mask: specifies which semaphore to acquire 180 * 181 * Release the SW/FW semaphore used to access the PHY or NVM. The mask 182 * will also specify which port we're releasing the lock for. 183 **/ 184 void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) 185 { 186 u32 swfw_sync; 187 188 while (igb_get_hw_semaphore_i210(hw)) 189 ; /* Empty */ 190 191 swfw_sync = rd32(E1000_SW_FW_SYNC); 192 swfw_sync &= ~mask; 193 wr32(E1000_SW_FW_SYNC, swfw_sync); 194 195 igb_put_hw_semaphore(hw); 196 } 197 198 /** 199 * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register 200 * @hw: pointer to the HW structure 201 * @offset: offset of word in the Shadow Ram to read 202 * @words: number of words to read 203 * @data: word read from the Shadow Ram 204 * 205 * Reads a 16 bit word from the Shadow Ram using the EERD register. 206 * Uses necessary synchronization semaphores. 207 **/ 208 static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, 209 u16 *data) 210 { 211 s32 status = 0; 212 u16 i, count; 213 214 /* We cannot hold synchronization semaphores for too long, 215 * because of forceful takeover procedure. However it is more efficient 216 * to read in bursts than synchronizing access for each word. 217 */ 218 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { 219 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? 220 E1000_EERD_EEWR_MAX_COUNT : (words - i); 221 if (!(hw->nvm.ops.acquire(hw))) { 222 status = igb_read_nvm_eerd(hw, offset, count, 223 data + i); 224 hw->nvm.ops.release(hw); 225 } else { 226 status = E1000_ERR_SWFW_SYNC; 227 } 228 229 if (status) 230 break; 231 } 232 233 return status; 234 } 235 236 /** 237 * igb_write_nvm_srwr - Write to Shadow Ram using EEWR 238 * @hw: pointer to the HW structure 239 * @offset: offset within the Shadow Ram to be written to 240 * @words: number of words to write 241 * @data: 16 bit word(s) to be written to the Shadow Ram 242 * 243 * Writes data to Shadow Ram at offset using EEWR register. 244 * 245 * If igb_update_nvm_checksum is not called after this function , the 246 * Shadow Ram will most likely contain an invalid checksum. 247 **/ 248 static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, 249 u16 *data) 250 { 251 struct e1000_nvm_info *nvm = &hw->nvm; 252 u32 i, k, eewr = 0; 253 u32 attempts = 100000; 254 s32 ret_val = 0; 255 256 /* A check for invalid values: offset too large, too many words, 257 * too many words for the offset, and not enough words. 258 */ 259 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 260 (words == 0)) { 261 hw_dbg("nvm parameter(s) out of bounds\n"); 262 ret_val = -E1000_ERR_NVM; 263 goto out; 264 } 265 266 for (i = 0; i < words; i++) { 267 eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | 268 (data[i] << E1000_NVM_RW_REG_DATA) | 269 E1000_NVM_RW_REG_START; 270 271 wr32(E1000_SRWR, eewr); 272 273 for (k = 0; k < attempts; k++) { 274 if (E1000_NVM_RW_REG_DONE & 275 rd32(E1000_SRWR)) { 276 ret_val = 0; 277 break; 278 } 279 udelay(5); 280 } 281 282 if (ret_val) { 283 hw_dbg("Shadow RAM write EEWR timed out\n"); 284 break; 285 } 286 } 287 288 out: 289 return ret_val; 290 } 291 292 /** 293 * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR 294 * @hw: pointer to the HW structure 295 * @offset: offset within the Shadow RAM to be written to 296 * @words: number of words to write 297 * @data: 16 bit word(s) to be written to the Shadow RAM 298 * 299 * Writes data to Shadow RAM at offset using EEWR register. 300 * 301 * If e1000_update_nvm_checksum is not called after this function , the 302 * data will not be committed to FLASH and also Shadow RAM will most likely 303 * contain an invalid checksum. 304 * 305 * If error code is returned, data and Shadow RAM may be inconsistent - buffer 306 * partially written. 307 **/ 308 static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, 309 u16 *data) 310 { 311 s32 status = 0; 312 u16 i, count; 313 314 /* We cannot hold synchronization semaphores for too long, 315 * because of forceful takeover procedure. However it is more efficient 316 * to write in bursts than synchronizing access for each word. 317 */ 318 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { 319 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? 320 E1000_EERD_EEWR_MAX_COUNT : (words - i); 321 if (!(hw->nvm.ops.acquire(hw))) { 322 status = igb_write_nvm_srwr(hw, offset, count, 323 data + i); 324 hw->nvm.ops.release(hw); 325 } else { 326 status = E1000_ERR_SWFW_SYNC; 327 } 328 329 if (status) 330 break; 331 } 332 333 return status; 334 } 335 336 /** 337 * igb_read_invm_word_i210 - Reads OTP 338 * @hw: pointer to the HW structure 339 * @address: the word address (aka eeprom offset) to read 340 * @data: pointer to the data read 341 * 342 * Reads 16-bit words from the OTP. Return error when the word is not 343 * stored in OTP. 344 **/ 345 static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) 346 { 347 s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; 348 u32 invm_dword; 349 u16 i; 350 u8 record_type, word_address; 351 352 for (i = 0; i < E1000_INVM_SIZE; i++) { 353 invm_dword = rd32(E1000_INVM_DATA_REG(i)); 354 /* Get record type */ 355 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); 356 if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) 357 break; 358 if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) 359 i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; 360 if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) 361 i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; 362 if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { 363 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); 364 if (word_address == address) { 365 *data = INVM_DWORD_TO_WORD_DATA(invm_dword); 366 hw_dbg("Read INVM Word 0x%02x = %x\n", 367 address, *data); 368 status = 0; 369 break; 370 } 371 } 372 } 373 if (status) 374 hw_dbg("Requested word 0x%02x not found in OTP\n", address); 375 return status; 376 } 377 378 /** 379 * igb_read_invm_i210 - Read invm wrapper function for I210/I211 380 * @hw: pointer to the HW structure 381 * @words: number of words to read 382 * @data: pointer to the data read 383 * 384 * Wrapper function to return data formerly found in the NVM. 385 **/ 386 static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset, 387 u16 words __always_unused, u16 *data) 388 { 389 s32 ret_val = 0; 390 391 /* Only the MAC addr is required to be present in the iNVM */ 392 switch (offset) { 393 case NVM_MAC_ADDR: 394 ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]); 395 ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1, 396 &data[1]); 397 ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2, 398 &data[2]); 399 if (ret_val) 400 hw_dbg("MAC Addr not found in iNVM\n"); 401 break; 402 case NVM_INIT_CTRL_2: 403 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 404 if (ret_val) { 405 *data = NVM_INIT_CTRL_2_DEFAULT_I211; 406 ret_val = 0; 407 } 408 break; 409 case NVM_INIT_CTRL_4: 410 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 411 if (ret_val) { 412 *data = NVM_INIT_CTRL_4_DEFAULT_I211; 413 ret_val = 0; 414 } 415 break; 416 case NVM_LED_1_CFG: 417 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 418 if (ret_val) { 419 *data = NVM_LED_1_CFG_DEFAULT_I211; 420 ret_val = 0; 421 } 422 break; 423 case NVM_LED_0_2_CFG: 424 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 425 if (ret_val) { 426 *data = NVM_LED_0_2_CFG_DEFAULT_I211; 427 ret_val = 0; 428 } 429 break; 430 case NVM_ID_LED_SETTINGS: 431 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 432 if (ret_val) { 433 *data = ID_LED_RESERVED_FFFF; 434 ret_val = 0; 435 } 436 break; 437 case NVM_SUB_DEV_ID: 438 *data = hw->subsystem_device_id; 439 break; 440 case NVM_SUB_VEN_ID: 441 *data = hw->subsystem_vendor_id; 442 break; 443 case NVM_DEV_ID: 444 *data = hw->device_id; 445 break; 446 case NVM_VEN_ID: 447 *data = hw->vendor_id; 448 break; 449 default: 450 hw_dbg("NVM word 0x%02x is not mapped.\n", offset); 451 *data = NVM_RESERVED_WORD; 452 break; 453 } 454 return ret_val; 455 } 456 457 /** 458 * igb_read_invm_version - Reads iNVM version and image type 459 * @hw: pointer to the HW structure 460 * @invm_ver: version structure for the version read 461 * 462 * Reads iNVM version and image type. 463 **/ 464 s32 igb_read_invm_version(struct e1000_hw *hw, 465 struct e1000_fw_version *invm_ver) { 466 u32 *record = NULL; 467 u32 *next_record = NULL; 468 u32 i = 0; 469 u32 invm_dword = 0; 470 u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE / 471 E1000_INVM_RECORD_SIZE_IN_BYTES); 472 u32 buffer[E1000_INVM_SIZE]; 473 s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; 474 u16 version = 0; 475 476 /* Read iNVM memory */ 477 for (i = 0; i < E1000_INVM_SIZE; i++) { 478 invm_dword = rd32(E1000_INVM_DATA_REG(i)); 479 buffer[i] = invm_dword; 480 } 481 482 /* Read version number */ 483 for (i = 1; i < invm_blocks; i++) { 484 record = &buffer[invm_blocks - i]; 485 next_record = &buffer[invm_blocks - i + 1]; 486 487 /* Check if we have first version location used */ 488 if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { 489 version = 0; 490 status = 0; 491 break; 492 } 493 /* Check if we have second version location used */ 494 else if ((i == 1) && 495 ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { 496 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; 497 status = 0; 498 break; 499 } 500 /* Check if we have odd version location 501 * used and it is the last one used 502 */ 503 else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) && 504 ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) && 505 (i != 1))) { 506 version = (*next_record & E1000_INVM_VER_FIELD_TWO) 507 >> 13; 508 status = 0; 509 break; 510 } 511 /* Check if we have even version location 512 * used and it is the last one used 513 */ 514 else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && 515 ((*record & 0x3) == 0)) { 516 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; 517 status = 0; 518 break; 519 } 520 } 521 522 if (!status) { 523 invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) 524 >> E1000_INVM_MAJOR_SHIFT; 525 invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; 526 } 527 /* Read Image Type */ 528 for (i = 1; i < invm_blocks; i++) { 529 record = &buffer[invm_blocks - i]; 530 next_record = &buffer[invm_blocks - i + 1]; 531 532 /* Check if we have image type in first location used */ 533 if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { 534 invm_ver->invm_img_type = 0; 535 status = 0; 536 break; 537 } 538 /* Check if we have image type in first location used */ 539 else if ((((*record & 0x3) == 0) && 540 ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) || 541 ((((*record & 0x3) != 0) && (i != 1)))) { 542 invm_ver->invm_img_type = 543 (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; 544 status = 0; 545 break; 546 } 547 } 548 return status; 549 } 550 551 /** 552 * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum 553 * @hw: pointer to the HW structure 554 * 555 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM 556 * and then verifies that the sum of the EEPROM is equal to 0xBABA. 557 **/ 558 static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) 559 { 560 s32 status = 0; 561 s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); 562 563 if (!(hw->nvm.ops.acquire(hw))) { 564 565 /* Replace the read function with semaphore grabbing with 566 * the one that skips this for a while. 567 * We have semaphore taken already here. 568 */ 569 read_op_ptr = hw->nvm.ops.read; 570 hw->nvm.ops.read = igb_read_nvm_eerd; 571 572 status = igb_validate_nvm_checksum(hw); 573 574 /* Revert original read operation. */ 575 hw->nvm.ops.read = read_op_ptr; 576 577 hw->nvm.ops.release(hw); 578 } else { 579 status = E1000_ERR_SWFW_SYNC; 580 } 581 582 return status; 583 } 584 585 /** 586 * igb_update_nvm_checksum_i210 - Update EEPROM checksum 587 * @hw: pointer to the HW structure 588 * 589 * Updates the EEPROM checksum by reading/adding each word of the EEPROM 590 * up to the checksum. Then calculates the EEPROM checksum and writes the 591 * value to the EEPROM. Next commit EEPROM data onto the Flash. 592 **/ 593 static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) 594 { 595 s32 ret_val = 0; 596 u16 checksum = 0; 597 u16 i, nvm_data; 598 599 /* Read the first word from the EEPROM. If this times out or fails, do 600 * not continue or we could be in for a very long wait while every 601 * EEPROM read fails 602 */ 603 ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data); 604 if (ret_val) { 605 hw_dbg("EEPROM read failed\n"); 606 goto out; 607 } 608 609 if (!(hw->nvm.ops.acquire(hw))) { 610 /* Do not use hw->nvm.ops.write, hw->nvm.ops.read 611 * because we do not want to take the synchronization 612 * semaphores twice here. 613 */ 614 615 for (i = 0; i < NVM_CHECKSUM_REG; i++) { 616 ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data); 617 if (ret_val) { 618 hw->nvm.ops.release(hw); 619 hw_dbg("NVM Read Error while updating checksum.\n"); 620 goto out; 621 } 622 checksum += nvm_data; 623 } 624 checksum = (u16) NVM_SUM - checksum; 625 ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, 626 &checksum); 627 if (ret_val) { 628 hw->nvm.ops.release(hw); 629 hw_dbg("NVM Write Error while updating checksum.\n"); 630 goto out; 631 } 632 633 hw->nvm.ops.release(hw); 634 635 ret_val = igb_update_flash_i210(hw); 636 } else { 637 ret_val = -E1000_ERR_SWFW_SYNC; 638 } 639 out: 640 return ret_val; 641 } 642 643 /** 644 * igb_pool_flash_update_done_i210 - Pool FLUDONE status. 645 * @hw: pointer to the HW structure 646 * 647 **/ 648 static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw) 649 { 650 s32 ret_val = -E1000_ERR_NVM; 651 u32 i, reg; 652 653 for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { 654 reg = rd32(E1000_EECD); 655 if (reg & E1000_EECD_FLUDONE_I210) { 656 ret_val = 0; 657 break; 658 } 659 udelay(5); 660 } 661 662 return ret_val; 663 } 664 665 /** 666 * igb_get_flash_presence_i210 - Check if flash device is detected. 667 * @hw: pointer to the HW structure 668 * 669 **/ 670 bool igb_get_flash_presence_i210(struct e1000_hw *hw) 671 { 672 u32 eec = 0; 673 bool ret_val = false; 674 675 eec = rd32(E1000_EECD); 676 if (eec & E1000_EECD_FLASH_DETECTED_I210) 677 ret_val = true; 678 679 return ret_val; 680 } 681 682 /** 683 * igb_update_flash_i210 - Commit EEPROM to the flash 684 * @hw: pointer to the HW structure 685 * 686 **/ 687 static s32 igb_update_flash_i210(struct e1000_hw *hw) 688 { 689 s32 ret_val = 0; 690 u32 flup; 691 692 ret_val = igb_pool_flash_update_done_i210(hw); 693 if (ret_val == -E1000_ERR_NVM) { 694 hw_dbg("Flash update time out\n"); 695 goto out; 696 } 697 698 flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210; 699 wr32(E1000_EECD, flup); 700 701 ret_val = igb_pool_flash_update_done_i210(hw); 702 if (ret_val) 703 hw_dbg("Flash update time out\n"); 704 else 705 hw_dbg("Flash update complete\n"); 706 707 out: 708 return ret_val; 709 } 710 711 /** 712 * igb_valid_led_default_i210 - Verify a valid default LED config 713 * @hw: pointer to the HW structure 714 * @data: pointer to the NVM (EEPROM) 715 * 716 * Read the EEPROM for the current default LED configuration. If the 717 * LED configuration is not valid, set to a valid LED configuration. 718 **/ 719 s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data) 720 { 721 s32 ret_val; 722 723 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); 724 if (ret_val) { 725 hw_dbg("NVM Read Error\n"); 726 goto out; 727 } 728 729 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { 730 switch (hw->phy.media_type) { 731 case e1000_media_type_internal_serdes: 732 *data = ID_LED_DEFAULT_I210_SERDES; 733 break; 734 case e1000_media_type_copper: 735 default: 736 *data = ID_LED_DEFAULT_I210; 737 break; 738 } 739 } 740 out: 741 return ret_val; 742 } 743 744 /** 745 * __igb_access_xmdio_reg - Read/write XMDIO register 746 * @hw: pointer to the HW structure 747 * @address: XMDIO address to program 748 * @dev_addr: device address to program 749 * @data: pointer to value to read/write from/to the XMDIO address 750 * @read: boolean flag to indicate read or write 751 **/ 752 static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address, 753 u8 dev_addr, u16 *data, bool read) 754 { 755 s32 ret_val = 0; 756 757 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); 758 if (ret_val) 759 return ret_val; 760 761 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address); 762 if (ret_val) 763 return ret_val; 764 765 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA | 766 dev_addr); 767 if (ret_val) 768 return ret_val; 769 770 if (read) 771 ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data); 772 else 773 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data); 774 if (ret_val) 775 return ret_val; 776 777 /* Recalibrate the device back to 0 */ 778 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0); 779 if (ret_val) 780 return ret_val; 781 782 return ret_val; 783 } 784 785 /** 786 * igb_read_xmdio_reg - Read XMDIO register 787 * @hw: pointer to the HW structure 788 * @addr: XMDIO address to program 789 * @dev_addr: device address to program 790 * @data: value to be read from the EMI address 791 **/ 792 s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) 793 { 794 return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true); 795 } 796 797 /** 798 * igb_write_xmdio_reg - Write XMDIO register 799 * @hw: pointer to the HW structure 800 * @addr: XMDIO address to program 801 * @dev_addr: device address to program 802 * @data: value to be written to the XMDIO address 803 **/ 804 s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) 805 { 806 return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false); 807 } 808 809 /** 810 * igb_init_nvm_params_i210 - Init NVM func ptrs. 811 * @hw: pointer to the HW structure 812 **/ 813 s32 igb_init_nvm_params_i210(struct e1000_hw *hw) 814 { 815 s32 ret_val = 0; 816 struct e1000_nvm_info *nvm = &hw->nvm; 817 818 nvm->ops.acquire = igb_acquire_nvm_i210; 819 nvm->ops.release = igb_release_nvm_i210; 820 nvm->ops.valid_led_default = igb_valid_led_default_i210; 821 822 /* NVM Function Pointers */ 823 if (igb_get_flash_presence_i210(hw)) { 824 hw->nvm.type = e1000_nvm_flash_hw; 825 nvm->ops.read = igb_read_nvm_srrd_i210; 826 nvm->ops.write = igb_write_nvm_srwr_i210; 827 nvm->ops.validate = igb_validate_nvm_checksum_i210; 828 nvm->ops.update = igb_update_nvm_checksum_i210; 829 } else { 830 hw->nvm.type = e1000_nvm_invm; 831 nvm->ops.read = igb_read_invm_i210; 832 nvm->ops.write = NULL; 833 nvm->ops.validate = NULL; 834 nvm->ops.update = NULL; 835 } 836 return ret_val; 837 } 838 839 /** 840 * igb_pll_workaround_i210 841 * @hw: pointer to the HW structure 842 * 843 * Works around an errata in the PLL circuit where it occasionally 844 * provides the wrong clock frequency after power up. 845 **/ 846 s32 igb_pll_workaround_i210(struct e1000_hw *hw) 847 { 848 s32 ret_val; 849 u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val; 850 u16 nvm_word, phy_word, pci_word, tmp_nvm; 851 int i; 852 853 /* Get and set needed register values */ 854 wuc = rd32(E1000_WUC); 855 mdicnfg = rd32(E1000_MDICNFG); 856 reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO; 857 wr32(E1000_MDICNFG, reg_val); 858 859 /* Get data from NVM, or set default */ 860 ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, 861 &nvm_word); 862 if (ret_val) 863 nvm_word = E1000_INVM_DEFAULT_AL; 864 tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; 865 igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE); 866 for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { 867 /* check current state directly from internal PHY */ 868 igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word); 869 if ((phy_word & E1000_PHY_PLL_UNCONF) 870 != E1000_PHY_PLL_UNCONF) { 871 ret_val = 0; 872 break; 873 } else { 874 ret_val = -E1000_ERR_PHY; 875 } 876 /* directly reset the internal PHY */ 877 ctrl = rd32(E1000_CTRL); 878 wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); 879 880 ctrl_ext = rd32(E1000_CTRL_EXT); 881 ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE); 882 wr32(E1000_CTRL_EXT, ctrl_ext); 883 884 wr32(E1000_WUC, 0); 885 reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16); 886 wr32(E1000_EEARBC_I210, reg_val); 887 888 igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); 889 pci_word |= E1000_PCI_PMCSR_D3; 890 igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); 891 usleep_range(1000, 2000); 892 pci_word &= ~E1000_PCI_PMCSR_D3; 893 igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); 894 reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16); 895 wr32(E1000_EEARBC_I210, reg_val); 896 897 /* restore WUC register */ 898 wr32(E1000_WUC, wuc); 899 } 900 igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0); 901 /* restore MDICNFG setting */ 902 wr32(E1000_MDICNFG, mdicnfg); 903 return ret_val; 904 } 905 906 /** 907 * igb_get_cfg_done_i210 - Read config done bit 908 * @hw: pointer to the HW structure 909 * 910 * Read the management control register for the config done bit for 911 * completion status. NOTE: silicon which is EEPROM-less will fail trying 912 * to read the config done bit, so an error is *ONLY* logged and returns 913 * 0. If we were to return with error, EEPROM-less silicon 914 * would not be able to be reset or change link. 915 **/ 916 s32 igb_get_cfg_done_i210(struct e1000_hw *hw) 917 { 918 s32 timeout = PHY_CFG_TIMEOUT; 919 u32 mask = E1000_NVM_CFG_DONE_PORT_0; 920 921 while (timeout) { 922 if (rd32(E1000_EEMNGCTL_I210) & mask) 923 break; 924 usleep_range(1000, 2000); 925 timeout--; 926 } 927 if (!timeout) 928 hw_dbg("MNG configuration cycle has not completed.\n"); 929 930 return 0; 931 } 932