1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018 Intel Corporation */ 3 4 #include <linux/delay.h> 5 6 #include "igc_hw.h" 7 8 /** 9 * igc_acquire_nvm_i225 - Acquire exclusive access to EEPROM 10 * @hw: pointer to the HW structure 11 * 12 * Acquire the necessary semaphores for exclusive access to the EEPROM. 13 * Set the EEPROM access request bit and wait for EEPROM access grant bit. 14 * Return successful if access grant bit set, else clear the request for 15 * EEPROM access and return -IGC_ERR_NVM (-1). 16 */ 17 static s32 igc_acquire_nvm_i225(struct igc_hw *hw) 18 { 19 return igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); 20 } 21 22 /** 23 * igc_release_nvm_i225 - Release exclusive access to EEPROM 24 * @hw: pointer to the HW structure 25 * 26 * Stop any current commands to the EEPROM and clear the EEPROM request bit, 27 * then release the semaphores acquired. 28 */ 29 static void igc_release_nvm_i225(struct igc_hw *hw) 30 { 31 igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); 32 } 33 34 /** 35 * igc_get_hw_semaphore_i225 - Acquire hardware semaphore 36 * @hw: pointer to the HW structure 37 * 38 * Acquire the HW semaphore to access the PHY or NVM 39 */ 40 static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw) 41 { 42 s32 timeout = hw->nvm.word_size + 1; 43 s32 i = 0; 44 u32 swsm; 45 46 /* Get the SW semaphore */ 47 while (i < timeout) { 48 swsm = rd32(IGC_SWSM); 49 if (!(swsm & IGC_SWSM_SMBI)) 50 break; 51 52 usleep_range(500, 600); 53 i++; 54 } 55 56 if (i == timeout) { 57 /* In rare circumstances, the SW semaphore may already be held 58 * unintentionally. Clear the semaphore once before giving up. 59 */ 60 if (hw->dev_spec._base.clear_semaphore_once) { 61 hw->dev_spec._base.clear_semaphore_once = false; 62 igc_put_hw_semaphore(hw); 63 for (i = 0; i < timeout; i++) { 64 swsm = rd32(IGC_SWSM); 65 if (!(swsm & IGC_SWSM_SMBI)) 66 break; 67 68 usleep_range(500, 600); 69 } 70 } 71 72 /* If we do not have the semaphore here, we have to give up. */ 73 if (i == timeout) { 74 hw_dbg("Driver can't access device - SMBI bit is set.\n"); 75 return -IGC_ERR_NVM; 76 } 77 } 78 79 /* Get the FW semaphore. */ 80 for (i = 0; i < timeout; i++) { 81 swsm = rd32(IGC_SWSM); 82 wr32(IGC_SWSM, swsm | IGC_SWSM_SWESMBI); 83 84 /* Semaphore acquired if bit latched */ 85 if (rd32(IGC_SWSM) & IGC_SWSM_SWESMBI) 86 break; 87 88 usleep_range(500, 600); 89 } 90 91 if (i == timeout) { 92 /* Release semaphores */ 93 igc_put_hw_semaphore(hw); 94 hw_dbg("Driver can't access the NVM\n"); 95 return -IGC_ERR_NVM; 96 } 97 98 return 0; 99 } 100 101 /** 102 * igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore 103 * @hw: pointer to the HW structure 104 * @mask: specifies which semaphore to acquire 105 * 106 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask 107 * will also specify which port we're acquiring the lock for. 108 */ 109 s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask) 110 { 111 s32 i = 0, timeout = 200; 112 u32 fwmask = mask << 16; 113 u32 swmask = mask; 114 s32 ret_val = 0; 115 u32 swfw_sync; 116 117 while (i < timeout) { 118 if (igc_get_hw_semaphore_i225(hw)) { 119 ret_val = -IGC_ERR_SWFW_SYNC; 120 goto out; 121 } 122 123 swfw_sync = rd32(IGC_SW_FW_SYNC); 124 if (!(swfw_sync & (fwmask | swmask))) 125 break; 126 127 /* Firmware currently using resource (fwmask) */ 128 igc_put_hw_semaphore(hw); 129 mdelay(5); 130 i++; 131 } 132 133 if (i == timeout) { 134 hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); 135 ret_val = -IGC_ERR_SWFW_SYNC; 136 goto out; 137 } 138 139 swfw_sync |= swmask; 140 wr32(IGC_SW_FW_SYNC, swfw_sync); 141 142 igc_put_hw_semaphore(hw); 143 out: 144 return ret_val; 145 } 146 147 /** 148 * igc_release_swfw_sync_i225 - Release SW/FW semaphore 149 * @hw: pointer to the HW structure 150 * @mask: specifies which semaphore to acquire 151 * 152 * Release the SW/FW semaphore used to access the PHY or NVM. The mask 153 * will also specify which port we're releasing the lock for. 154 */ 155 void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask) 156 { 157 u32 swfw_sync; 158 159 /* Releasing the resource requires first getting the HW semaphore. 160 * If we fail to get the semaphore, there is nothing we can do, 161 * except log an error and quit. We are not allowed to hang here 162 * indefinitely, as it may cause denial of service or system crash. 163 */ 164 if (igc_get_hw_semaphore_i225(hw)) { 165 hw_dbg("Failed to release SW_FW_SYNC.\n"); 166 return; 167 } 168 169 swfw_sync = rd32(IGC_SW_FW_SYNC); 170 swfw_sync &= ~mask; 171 wr32(IGC_SW_FW_SYNC, swfw_sync); 172 173 igc_put_hw_semaphore(hw); 174 } 175 176 /** 177 * igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register 178 * @hw: pointer to the HW structure 179 * @offset: offset of word in the Shadow Ram to read 180 * @words: number of words to read 181 * @data: word read from the Shadow Ram 182 * 183 * Reads a 16 bit word from the Shadow Ram using the EERD register. 184 * Uses necessary synchronization semaphores. 185 */ 186 static s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words, 187 u16 *data) 188 { 189 s32 status = 0; 190 u16 i, count; 191 192 /* We cannot hold synchronization semaphores for too long, 193 * because of forceful takeover procedure. However it is more efficient 194 * to read in bursts than synchronizing access for each word. 195 */ 196 for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { 197 count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? 198 IGC_EERD_EEWR_MAX_COUNT : (words - i); 199 200 status = hw->nvm.ops.acquire(hw); 201 if (status) 202 break; 203 204 status = igc_read_nvm_eerd(hw, offset, count, data + i); 205 hw->nvm.ops.release(hw); 206 if (status) 207 break; 208 } 209 210 return status; 211 } 212 213 /** 214 * igc_write_nvm_srwr - Write to Shadow Ram using EEWR 215 * @hw: pointer to the HW structure 216 * @offset: offset within the Shadow Ram to be written to 217 * @words: number of words to write 218 * @data: 16 bit word(s) to be written to the Shadow Ram 219 * 220 * Writes data to Shadow Ram at offset using EEWR register. 221 * 222 * If igc_update_nvm_checksum is not called after this function , the 223 * Shadow Ram will most likely contain an invalid checksum. 224 */ 225 static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, 226 u16 *data) 227 { 228 struct igc_nvm_info *nvm = &hw->nvm; 229 s32 ret_val = -IGC_ERR_NVM; 230 u32 attempts = 100000; 231 u32 i, k, eewr = 0; 232 233 /* A check for invalid values: offset too large, too many words, 234 * too many words for the offset, and not enough words. 235 */ 236 if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || 237 words == 0) { 238 hw_dbg("nvm parameter(s) out of bounds\n"); 239 return ret_val; 240 } 241 242 for (i = 0; i < words; i++) { 243 ret_val = -IGC_ERR_NVM; 244 eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) | 245 (data[i] << IGC_NVM_RW_REG_DATA) | 246 IGC_NVM_RW_REG_START; 247 248 wr32(IGC_SRWR, eewr); 249 250 for (k = 0; k < attempts; k++) { 251 if (IGC_NVM_RW_REG_DONE & 252 rd32(IGC_SRWR)) { 253 ret_val = 0; 254 break; 255 } 256 udelay(5); 257 } 258 259 if (ret_val) { 260 hw_dbg("Shadow RAM write EEWR timed out\n"); 261 break; 262 } 263 } 264 265 return ret_val; 266 } 267 268 /** 269 * igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR 270 * @hw: pointer to the HW structure 271 * @offset: offset within the Shadow RAM to be written to 272 * @words: number of words to write 273 * @data: 16 bit word(s) to be written to the Shadow RAM 274 * 275 * Writes data to Shadow RAM at offset using EEWR register. 276 * 277 * If igc_update_nvm_checksum is not called after this function , the 278 * data will not be committed to FLASH and also Shadow RAM will most likely 279 * contain an invalid checksum. 280 * 281 * If error code is returned, data and Shadow RAM may be inconsistent - buffer 282 * partially written. 283 */ 284 static s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words, 285 u16 *data) 286 { 287 s32 status = 0; 288 u16 i, count; 289 290 /* We cannot hold synchronization semaphores for too long, 291 * because of forceful takeover procedure. However it is more efficient 292 * to write in bursts than synchronizing access for each word. 293 */ 294 for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { 295 count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? 296 IGC_EERD_EEWR_MAX_COUNT : (words - i); 297 298 status = hw->nvm.ops.acquire(hw); 299 if (status) 300 break; 301 302 status = igc_write_nvm_srwr(hw, offset, count, data + i); 303 hw->nvm.ops.release(hw); 304 if (status) 305 break; 306 } 307 308 return status; 309 } 310 311 /** 312 * igc_validate_nvm_checksum_i225 - Validate EEPROM checksum 313 * @hw: pointer to the HW structure 314 * 315 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM 316 * and then verifies that the sum of the EEPROM is equal to 0xBABA. 317 */ 318 static s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw) 319 { 320 s32 (*read_op_ptr)(struct igc_hw *hw, u16 offset, u16 count, 321 u16 *data); 322 s32 status = 0; 323 324 status = hw->nvm.ops.acquire(hw); 325 if (status) 326 goto out; 327 328 /* Replace the read function with semaphore grabbing with 329 * the one that skips this for a while. 330 * We have semaphore taken already here. 331 */ 332 read_op_ptr = hw->nvm.ops.read; 333 hw->nvm.ops.read = igc_read_nvm_eerd; 334 335 status = igc_validate_nvm_checksum(hw); 336 337 /* Revert original read operation. */ 338 hw->nvm.ops.read = read_op_ptr; 339 340 hw->nvm.ops.release(hw); 341 342 out: 343 return status; 344 } 345 346 /** 347 * igc_pool_flash_update_done_i225 - Pool FLUDONE status 348 * @hw: pointer to the HW structure 349 */ 350 static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw) 351 { 352 s32 ret_val = -IGC_ERR_NVM; 353 u32 i, reg; 354 355 for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) { 356 reg = rd32(IGC_EECD); 357 if (reg & IGC_EECD_FLUDONE_I225) { 358 ret_val = 0; 359 break; 360 } 361 udelay(5); 362 } 363 364 return ret_val; 365 } 366 367 /** 368 * igc_update_flash_i225 - Commit EEPROM to the flash 369 * @hw: pointer to the HW structure 370 */ 371 static s32 igc_update_flash_i225(struct igc_hw *hw) 372 { 373 s32 ret_val = 0; 374 u32 flup; 375 376 ret_val = igc_pool_flash_update_done_i225(hw); 377 if (ret_val == -IGC_ERR_NVM) { 378 hw_dbg("Flash update time out\n"); 379 goto out; 380 } 381 382 flup = rd32(IGC_EECD) | IGC_EECD_FLUPD_I225; 383 wr32(IGC_EECD, flup); 384 385 ret_val = igc_pool_flash_update_done_i225(hw); 386 if (ret_val) 387 hw_dbg("Flash update time out\n"); 388 else 389 hw_dbg("Flash update complete\n"); 390 391 out: 392 return ret_val; 393 } 394 395 /** 396 * igc_update_nvm_checksum_i225 - Update EEPROM checksum 397 * @hw: pointer to the HW structure 398 * 399 * Updates the EEPROM checksum by reading/adding each word of the EEPROM 400 * up to the checksum. Then calculates the EEPROM checksum and writes the 401 * value to the EEPROM. Next commit EEPROM data onto the Flash. 402 */ 403 static s32 igc_update_nvm_checksum_i225(struct igc_hw *hw) 404 { 405 u16 checksum = 0; 406 s32 ret_val = 0; 407 u16 i, nvm_data; 408 409 /* Read the first word from the EEPROM. If this times out or fails, do 410 * not continue or we could be in for a very long wait while every 411 * EEPROM read fails 412 */ 413 ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data); 414 if (ret_val) { 415 hw_dbg("EEPROM read failed\n"); 416 goto out; 417 } 418 419 ret_val = hw->nvm.ops.acquire(hw); 420 if (ret_val) 421 goto out; 422 423 /* Do not use hw->nvm.ops.write, hw->nvm.ops.read 424 * because we do not want to take the synchronization 425 * semaphores twice here. 426 */ 427 428 for (i = 0; i < NVM_CHECKSUM_REG; i++) { 429 ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data); 430 if (ret_val) { 431 hw->nvm.ops.release(hw); 432 hw_dbg("NVM Read Error while updating checksum.\n"); 433 goto out; 434 } 435 checksum += nvm_data; 436 } 437 checksum = (u16)NVM_SUM - checksum; 438 ret_val = igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, 439 &checksum); 440 if (ret_val) { 441 hw->nvm.ops.release(hw); 442 hw_dbg("NVM Write Error while updating checksum.\n"); 443 goto out; 444 } 445 446 hw->nvm.ops.release(hw); 447 448 ret_val = igc_update_flash_i225(hw); 449 450 out: 451 return ret_val; 452 } 453 454 /** 455 * igc_get_flash_presence_i225 - Check if flash device is detected 456 * @hw: pointer to the HW structure 457 */ 458 bool igc_get_flash_presence_i225(struct igc_hw *hw) 459 { 460 bool ret_val = false; 461 u32 eec = 0; 462 463 eec = rd32(IGC_EECD); 464 if (eec & IGC_EECD_FLASH_DETECTED_I225) 465 ret_val = true; 466 467 return ret_val; 468 } 469 470 /** 471 * igc_init_nvm_params_i225 - Init NVM func ptrs. 472 * @hw: pointer to the HW structure 473 */ 474 s32 igc_init_nvm_params_i225(struct igc_hw *hw) 475 { 476 struct igc_nvm_info *nvm = &hw->nvm; 477 478 nvm->ops.acquire = igc_acquire_nvm_i225; 479 nvm->ops.release = igc_release_nvm_i225; 480 481 /* NVM Function Pointers */ 482 if (igc_get_flash_presence_i225(hw)) { 483 nvm->ops.read = igc_read_nvm_srrd_i225; 484 nvm->ops.write = igc_write_nvm_srwr_i225; 485 nvm->ops.validate = igc_validate_nvm_checksum_i225; 486 nvm->ops.update = igc_update_nvm_checksum_i225; 487 } else { 488 nvm->ops.read = igc_read_nvm_eerd; 489 nvm->ops.write = NULL; 490 nvm->ops.validate = NULL; 491 nvm->ops.update = NULL; 492 } 493 return 0; 494 } 495 496 /** 497 * igc_set_eee_i225 - Enable/disable EEE support 498 * @hw: pointer to the HW structure 499 * @adv2p5G: boolean flag enabling 2.5G EEE advertisement 500 * @adv1G: boolean flag enabling 1G EEE advertisement 501 * @adv100M: boolean flag enabling 100M EEE advertisement 502 * 503 * Enable/disable EEE based on setting in dev_spec structure. 504 **/ 505 s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, 506 bool adv100M) 507 { 508 u32 ipcnfg, eeer; 509 510 ipcnfg = rd32(IGC_IPCNFG); 511 eeer = rd32(IGC_EEER); 512 513 /* enable or disable per user setting */ 514 if (hw->dev_spec._base.eee_enable) { 515 u32 eee_su = rd32(IGC_EEE_SU); 516 517 if (adv100M) 518 ipcnfg |= IGC_IPCNFG_EEE_100M_AN; 519 else 520 ipcnfg &= ~IGC_IPCNFG_EEE_100M_AN; 521 522 if (adv1G) 523 ipcnfg |= IGC_IPCNFG_EEE_1G_AN; 524 else 525 ipcnfg &= ~IGC_IPCNFG_EEE_1G_AN; 526 527 if (adv2p5G) 528 ipcnfg |= IGC_IPCNFG_EEE_2_5G_AN; 529 else 530 ipcnfg &= ~IGC_IPCNFG_EEE_2_5G_AN; 531 532 eeer |= (IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | 533 IGC_EEER_LPI_FC); 534 535 /* This bit should not be set in normal operation. */ 536 if (eee_su & IGC_EEE_SU_LPI_CLK_STP) 537 hw_dbg("LPI Clock Stop Bit should not be set!\n"); 538 } else { 539 ipcnfg &= ~(IGC_IPCNFG_EEE_2_5G_AN | IGC_IPCNFG_EEE_1G_AN | 540 IGC_IPCNFG_EEE_100M_AN); 541 eeer &= ~(IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | 542 IGC_EEER_LPI_FC); 543 } 544 wr32(IGC_IPCNFG, ipcnfg); 545 wr32(IGC_EEER, eeer); 546 rd32(IGC_IPCNFG); 547 rd32(IGC_EEER); 548 549 return IGC_SUCCESS; 550 } 551 552 /* igc_set_ltr_i225 - Set Latency Tolerance Reporting thresholds 553 * @hw: pointer to the HW structure 554 * @link: bool indicating link status 555 * 556 * Set the LTR thresholds based on the link speed (Mbps), EEE, and DMAC 557 * settings, otherwise specify that there is no LTR requirement. 558 */ 559 s32 igc_set_ltr_i225(struct igc_hw *hw, bool link) 560 { 561 u32 tw_system, ltrc, ltrv, ltr_min, ltr_max, scale_min, scale_max; 562 u16 speed, duplex; 563 s32 size; 564 565 /* If we do not have link, LTR thresholds are zero. */ 566 if (link) { 567 hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); 568 569 /* Check if using copper interface with EEE enabled or if the 570 * link speed is 10 Mbps. 571 */ 572 if (hw->dev_spec._base.eee_enable && 573 speed != SPEED_10) { 574 /* EEE enabled, so send LTRMAX threshold. */ 575 ltrc = rd32(IGC_LTRC) | 576 IGC_LTRC_EEEMS_EN; 577 wr32(IGC_LTRC, ltrc); 578 579 /* Calculate tw_system (nsec). */ 580 if (speed == SPEED_100) { 581 tw_system = ((rd32(IGC_EEE_SU) & 582 IGC_TW_SYSTEM_100_MASK) >> 583 IGC_TW_SYSTEM_100_SHIFT) * 500; 584 } else { 585 tw_system = (rd32(IGC_EEE_SU) & 586 IGC_TW_SYSTEM_1000_MASK) * 500; 587 } 588 } else { 589 tw_system = 0; 590 } 591 592 /* Get the Rx packet buffer size. */ 593 size = rd32(IGC_RXPBS) & 594 IGC_RXPBS_SIZE_I225_MASK; 595 596 /* Calculations vary based on DMAC settings. */ 597 if (rd32(IGC_DMACR) & IGC_DMACR_DMAC_EN) { 598 size -= (rd32(IGC_DMACR) & 599 IGC_DMACR_DMACTHR_MASK) >> 600 IGC_DMACR_DMACTHR_SHIFT; 601 /* Convert size to bits. */ 602 size *= 1024 * 8; 603 } else { 604 /* Convert size to bytes, subtract the MTU, and then 605 * convert the size to bits. 606 */ 607 size *= 1024; 608 size *= 8; 609 } 610 611 if (size < 0) { 612 hw_dbg("Invalid effective Rx buffer size %d\n", 613 size); 614 return -IGC_ERR_CONFIG; 615 } 616 617 /* Calculate the thresholds. Since speed is in Mbps, simplify 618 * the calculation by multiplying size/speed by 1000 for result 619 * to be in nsec before dividing by the scale in nsec. Set the 620 * scale such that the LTR threshold fits in the register. 621 */ 622 ltr_min = (1000 * size) / speed; 623 ltr_max = ltr_min + tw_system; 624 scale_min = (ltr_min / 1024) < 1024 ? IGC_LTRMINV_SCALE_1024 : 625 IGC_LTRMINV_SCALE_32768; 626 scale_max = (ltr_max / 1024) < 1024 ? IGC_LTRMAXV_SCALE_1024 : 627 IGC_LTRMAXV_SCALE_32768; 628 ltr_min /= scale_min == IGC_LTRMINV_SCALE_1024 ? 1024 : 32768; 629 ltr_min -= 1; 630 ltr_max /= scale_max == IGC_LTRMAXV_SCALE_1024 ? 1024 : 32768; 631 ltr_max -= 1; 632 633 /* Only write the LTR thresholds if they differ from before. */ 634 ltrv = rd32(IGC_LTRMINV); 635 if (ltr_min != (ltrv & IGC_LTRMINV_LTRV_MASK)) { 636 ltrv = IGC_LTRMINV_LSNP_REQ | ltr_min | 637 (scale_min << IGC_LTRMINV_SCALE_SHIFT); 638 wr32(IGC_LTRMINV, ltrv); 639 } 640 641 ltrv = rd32(IGC_LTRMAXV); 642 if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) { 643 ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max | 644 (scale_max << IGC_LTRMAXV_SCALE_SHIFT); 645 wr32(IGC_LTRMAXV, ltrv); 646 } 647 } 648 649 return IGC_SUCCESS; 650 } 651