1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2014 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #include "i40e_prototype.h" 28 29 /** 30 * i40e_init_nvm_ops - Initialize NVM function pointers 31 * @hw: pointer to the HW structure 32 * 33 * Setup the function pointers and the NVM info structure. Should be called 34 * once per NVM initialization, e.g. inside the i40e_init_shared_code(). 35 * Please notice that the NVM term is used here (& in all methods covered 36 * in this file) as an equivalent of the FLASH part mapped into the SR. 37 * We are accessing FLASH always thru the Shadow RAM. 38 **/ 39 i40e_status i40e_init_nvm(struct i40e_hw *hw) 40 { 41 struct i40e_nvm_info *nvm = &hw->nvm; 42 i40e_status ret_code = 0; 43 u32 fla, gens; 44 u8 sr_size; 45 46 /* The SR size is stored regardless of the nvm programming mode 47 * as the blank mode may be used in the factory line. 48 */ 49 gens = rd32(hw, I40E_GLNVM_GENS); 50 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >> 51 I40E_GLNVM_GENS_SR_SIZE_SHIFT); 52 /* Switching to words (sr_size contains power of 2KB) */ 53 nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB; 54 55 /* Check if we are in the normal or blank NVM programming mode */ 56 fla = rd32(hw, I40E_GLNVM_FLA); 57 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */ 58 /* Max NVM timeout */ 59 nvm->timeout = I40E_MAX_NVM_TIMEOUT; 60 nvm->blank_nvm_mode = false; 61 } else { /* Blank programming mode */ 62 nvm->blank_nvm_mode = true; 63 ret_code = I40E_ERR_NVM_BLANK_MODE; 64 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n"); 65 } 66 67 return ret_code; 68 } 69 70 /** 71 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership 72 * @hw: pointer to the HW structure 73 * @access: NVM access type (read or write) 74 * 75 * This function will request NVM ownership for reading 76 * via the proper Admin Command. 77 **/ 78 i40e_status i40e_acquire_nvm(struct i40e_hw *hw, 79 enum i40e_aq_resource_access_type access) 80 { 81 i40e_status ret_code = 0; 82 u64 gtime, timeout; 83 u64 time_left = 0; 84 85 if (hw->nvm.blank_nvm_mode) 86 goto i40e_i40e_acquire_nvm_exit; 87 88 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access, 89 0, &time_left, NULL); 90 /* Reading the Global Device Timer */ 91 gtime = rd32(hw, I40E_GLVFGEN_TIMER); 92 93 /* Store the timeout */ 94 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime; 95 96 if (ret_code) 97 i40e_debug(hw, I40E_DEBUG_NVM, 98 "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n", 99 access, time_left, ret_code, hw->aq.asq_last_status); 100 101 if (ret_code && time_left) { 102 /* Poll until the current NVM owner timeouts */ 103 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime; 104 while ((gtime < timeout) && time_left) { 105 usleep_range(10000, 20000); 106 gtime = rd32(hw, I40E_GLVFGEN_TIMER); 107 ret_code = i40e_aq_request_resource(hw, 108 I40E_NVM_RESOURCE_ID, 109 access, 0, &time_left, 110 NULL); 111 if (!ret_code) { 112 hw->nvm.hw_semaphore_timeout = 113 I40E_MS_TO_GTIME(time_left) + gtime; 114 break; 115 } 116 } 117 if (ret_code) { 118 hw->nvm.hw_semaphore_timeout = 0; 119 i40e_debug(hw, I40E_DEBUG_NVM, 120 "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n", 121 time_left, ret_code, hw->aq.asq_last_status); 122 } 123 } 124 125 i40e_i40e_acquire_nvm_exit: 126 return ret_code; 127 } 128 129 /** 130 * i40e_release_nvm - Generic request for releasing the NVM ownership 131 * @hw: pointer to the HW structure 132 * 133 * This function will release NVM resource via the proper Admin Command. 134 **/ 135 void i40e_release_nvm(struct i40e_hw *hw) 136 { 137 i40e_status ret_code = I40E_SUCCESS; 138 u32 total_delay = 0; 139 140 if (hw->nvm.blank_nvm_mode) 141 return; 142 143 ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); 144 145 /* there are some rare cases when trying to release the resource 146 * results in an admin Q timeout, so handle them correctly 147 */ 148 while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) && 149 (total_delay < hw->aq.asq_cmd_timeout)) { 150 usleep_range(1000, 2000); 151 ret_code = i40e_aq_release_resource(hw, 152 I40E_NVM_RESOURCE_ID, 153 0, NULL); 154 total_delay++; 155 } 156 } 157 158 /** 159 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit 160 * @hw: pointer to the HW structure 161 * 162 * Polls the SRCTL Shadow RAM register done bit. 163 **/ 164 static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) 165 { 166 i40e_status ret_code = I40E_ERR_TIMEOUT; 167 u32 srctl, wait_cnt; 168 169 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */ 170 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) { 171 srctl = rd32(hw, I40E_GLNVM_SRCTL); 172 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) { 173 ret_code = 0; 174 break; 175 } 176 udelay(5); 177 } 178 if (ret_code == I40E_ERR_TIMEOUT) 179 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set"); 180 return ret_code; 181 } 182 183 /** 184 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register 185 * @hw: pointer to the HW structure 186 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 187 * @data: word read from the Shadow RAM 188 * 189 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. 190 **/ 191 static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, 192 u16 *data) 193 { 194 i40e_status ret_code = I40E_ERR_TIMEOUT; 195 u32 sr_reg; 196 197 if (offset >= hw->nvm.sr_size) { 198 i40e_debug(hw, I40E_DEBUG_NVM, 199 "NVM read error: offset %d beyond Shadow RAM limit %d\n", 200 offset, hw->nvm.sr_size); 201 ret_code = I40E_ERR_PARAM; 202 goto read_nvm_exit; 203 } 204 205 /* Poll the done bit first */ 206 ret_code = i40e_poll_sr_srctl_done_bit(hw); 207 if (!ret_code) { 208 /* Write the address and start reading */ 209 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | 210 BIT(I40E_GLNVM_SRCTL_START_SHIFT); 211 wr32(hw, I40E_GLNVM_SRCTL, sr_reg); 212 213 /* Poll I40E_GLNVM_SRCTL until the done bit is set */ 214 ret_code = i40e_poll_sr_srctl_done_bit(hw); 215 if (!ret_code) { 216 sr_reg = rd32(hw, I40E_GLNVM_SRDATA); 217 *data = (u16)((sr_reg & 218 I40E_GLNVM_SRDATA_RDDATA_MASK) 219 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT); 220 } 221 } 222 if (ret_code) 223 i40e_debug(hw, I40E_DEBUG_NVM, 224 "NVM read error: Couldn't access Shadow RAM address: 0x%x\n", 225 offset); 226 227 read_nvm_exit: 228 return ret_code; 229 } 230 231 /** 232 * i40e_read_nvm_aq - Read Shadow RAM. 233 * @hw: pointer to the HW structure. 234 * @module_pointer: module pointer location in words from the NVM beginning 235 * @offset: offset in words from module start 236 * @words: number of words to write 237 * @data: buffer with words to write to the Shadow RAM 238 * @last_command: tells the AdminQ that this is the last command 239 * 240 * Writes a 16 bit words buffer to the Shadow RAM using the admin command. 241 **/ 242 static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer, 243 u32 offset, u16 words, void *data, 244 bool last_command) 245 { 246 i40e_status ret_code = I40E_ERR_NVM; 247 struct i40e_asq_cmd_details cmd_details; 248 249 memset(&cmd_details, 0, sizeof(cmd_details)); 250 cmd_details.wb_desc = &hw->nvm_wb_desc; 251 252 /* Here we are checking the SR limit only for the flat memory model. 253 * We cannot do it for the module-based model, as we did not acquire 254 * the NVM resource yet (we cannot get the module pointer value). 255 * Firmware will check the module-based model. 256 */ 257 if ((offset + words) > hw->nvm.sr_size) 258 i40e_debug(hw, I40E_DEBUG_NVM, 259 "NVM write error: offset %d beyond Shadow RAM limit %d\n", 260 (offset + words), hw->nvm.sr_size); 261 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) 262 /* We can write only up to 4KB (one sector), in one AQ write */ 263 i40e_debug(hw, I40E_DEBUG_NVM, 264 "NVM write fail error: tried to write %d words, limit is %d.\n", 265 words, I40E_SR_SECTOR_SIZE_IN_WORDS); 266 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) 267 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) 268 /* A single write cannot spread over two sectors */ 269 i40e_debug(hw, I40E_DEBUG_NVM, 270 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", 271 offset, words); 272 else 273 ret_code = i40e_aq_read_nvm(hw, module_pointer, 274 2 * offset, /*bytes*/ 275 2 * words, /*bytes*/ 276 data, last_command, &cmd_details); 277 278 return ret_code; 279 } 280 281 /** 282 * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ 283 * @hw: pointer to the HW structure 284 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 285 * @data: word read from the Shadow RAM 286 * 287 * Reads one 16 bit word from the Shadow RAM using the AdminQ 288 **/ 289 static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, 290 u16 *data) 291 { 292 i40e_status ret_code = I40E_ERR_TIMEOUT; 293 294 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true); 295 *data = le16_to_cpu(*(__le16 *)data); 296 297 return ret_code; 298 } 299 300 /** 301 * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking 302 * @hw: pointer to the HW structure 303 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 304 * @data: word read from the Shadow RAM 305 * 306 * Reads one 16 bit word from the Shadow RAM. 307 * 308 * Do not use this function except in cases where the nvm lock is already 309 * taken via i40e_acquire_nvm(). 310 **/ 311 static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw, 312 u16 offset, u16 *data) 313 { 314 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) 315 return i40e_read_nvm_word_aq(hw, offset, data); 316 317 return i40e_read_nvm_word_srctl(hw, offset, data); 318 } 319 320 /** 321 * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary 322 * @hw: pointer to the HW structure 323 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 324 * @data: word read from the Shadow RAM 325 * 326 * Reads one 16 bit word from the Shadow RAM. 327 **/ 328 i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, 329 u16 *data) 330 { 331 i40e_status ret_code = 0; 332 333 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) 334 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 335 if (ret_code) 336 return ret_code; 337 338 ret_code = __i40e_read_nvm_word(hw, offset, data); 339 340 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) 341 i40e_release_nvm(hw); 342 343 return ret_code; 344 } 345 346 /** 347 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register 348 * @hw: pointer to the HW structure 349 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). 350 * @words: (in) number of words to read; (out) number of words actually read 351 * @data: words read from the Shadow RAM 352 * 353 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() 354 * method. The buffer read is preceded by the NVM ownership take 355 * and followed by the release. 356 **/ 357 static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, 358 u16 *words, u16 *data) 359 { 360 i40e_status ret_code = 0; 361 u16 index, word; 362 363 /* Loop thru the selected region */ 364 for (word = 0; word < *words; word++) { 365 index = offset + word; 366 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]); 367 if (ret_code) 368 break; 369 } 370 371 /* Update the number of words read from the Shadow RAM */ 372 *words = word; 373 374 return ret_code; 375 } 376 377 /** 378 * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ 379 * @hw: pointer to the HW structure 380 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). 381 * @words: (in) number of words to read; (out) number of words actually read 382 * @data: words read from the Shadow RAM 383 * 384 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq() 385 * method. The buffer read is preceded by the NVM ownership take 386 * and followed by the release. 387 **/ 388 static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, 389 u16 *words, u16 *data) 390 { 391 i40e_status ret_code; 392 u16 read_size = *words; 393 bool last_cmd = false; 394 u16 words_read = 0; 395 u16 i = 0; 396 397 do { 398 /* Calculate number of bytes we should read in this step. 399 * FVL AQ do not allow to read more than one page at a time or 400 * to cross page boundaries. 401 */ 402 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS) 403 read_size = min(*words, 404 (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS - 405 (offset % I40E_SR_SECTOR_SIZE_IN_WORDS))); 406 else 407 read_size = min((*words - words_read), 408 I40E_SR_SECTOR_SIZE_IN_WORDS); 409 410 /* Check if this is last command, if so set proper flag */ 411 if ((words_read + read_size) >= *words) 412 last_cmd = true; 413 414 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size, 415 data + words_read, last_cmd); 416 if (ret_code) 417 goto read_nvm_buffer_aq_exit; 418 419 /* Increment counter for words already read and move offset to 420 * new read location 421 */ 422 words_read += read_size; 423 offset += read_size; 424 } while (words_read < *words); 425 426 for (i = 0; i < *words; i++) 427 data[i] = le16_to_cpu(((__le16 *)data)[i]); 428 429 read_nvm_buffer_aq_exit: 430 *words = words_read; 431 return ret_code; 432 } 433 434 /** 435 * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock 436 * @hw: pointer to the HW structure 437 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). 438 * @words: (in) number of words to read; (out) number of words actually read 439 * @data: words read from the Shadow RAM 440 * 441 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() 442 * method. 443 **/ 444 static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw, 445 u16 offset, u16 *words, 446 u16 *data) 447 { 448 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) 449 return i40e_read_nvm_buffer_aq(hw, offset, words, data); 450 451 return i40e_read_nvm_buffer_srctl(hw, offset, words, data); 452 } 453 454 /** 455 * i40e_write_nvm_aq - Writes Shadow RAM. 456 * @hw: pointer to the HW structure. 457 * @module_pointer: module pointer location in words from the NVM beginning 458 * @offset: offset in words from module start 459 * @words: number of words to write 460 * @data: buffer with words to write to the Shadow RAM 461 * @last_command: tells the AdminQ that this is the last command 462 * 463 * Writes a 16 bit words buffer to the Shadow RAM using the admin command. 464 **/ 465 static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, 466 u32 offset, u16 words, void *data, 467 bool last_command) 468 { 469 i40e_status ret_code = I40E_ERR_NVM; 470 struct i40e_asq_cmd_details cmd_details; 471 472 memset(&cmd_details, 0, sizeof(cmd_details)); 473 cmd_details.wb_desc = &hw->nvm_wb_desc; 474 475 /* Here we are checking the SR limit only for the flat memory model. 476 * We cannot do it for the module-based model, as we did not acquire 477 * the NVM resource yet (we cannot get the module pointer value). 478 * Firmware will check the module-based model. 479 */ 480 if ((offset + words) > hw->nvm.sr_size) 481 i40e_debug(hw, I40E_DEBUG_NVM, 482 "NVM write error: offset %d beyond Shadow RAM limit %d\n", 483 (offset + words), hw->nvm.sr_size); 484 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) 485 /* We can write only up to 4KB (one sector), in one AQ write */ 486 i40e_debug(hw, I40E_DEBUG_NVM, 487 "NVM write fail error: tried to write %d words, limit is %d.\n", 488 words, I40E_SR_SECTOR_SIZE_IN_WORDS); 489 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) 490 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) 491 /* A single write cannot spread over two sectors */ 492 i40e_debug(hw, I40E_DEBUG_NVM, 493 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", 494 offset, words); 495 else 496 ret_code = i40e_aq_update_nvm(hw, module_pointer, 497 2 * offset, /*bytes*/ 498 2 * words, /*bytes*/ 499 data, last_command, &cmd_details); 500 501 return ret_code; 502 } 503 504 /** 505 * i40e_calc_nvm_checksum - Calculates and returns the checksum 506 * @hw: pointer to hardware structure 507 * @checksum: pointer to the checksum 508 * 509 * This function calculates SW Checksum that covers the whole 64kB shadow RAM 510 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD 511 * is customer specific and unknown. Therefore, this function skips all maximum 512 * possible size of VPD (1kB). 513 **/ 514 static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, 515 u16 *checksum) 516 { 517 i40e_status ret_code; 518 struct i40e_virt_mem vmem; 519 u16 pcie_alt_module = 0; 520 u16 checksum_local = 0; 521 u16 vpd_module = 0; 522 u16 *data; 523 u16 i = 0; 524 525 ret_code = i40e_allocate_virt_mem(hw, &vmem, 526 I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16)); 527 if (ret_code) 528 goto i40e_calc_nvm_checksum_exit; 529 data = (u16 *)vmem.va; 530 531 /* read pointer to VPD area */ 532 ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); 533 if (ret_code) { 534 ret_code = I40E_ERR_NVM_CHECKSUM; 535 goto i40e_calc_nvm_checksum_exit; 536 } 537 538 /* read pointer to PCIe Alt Auto-load module */ 539 ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, 540 &pcie_alt_module); 541 if (ret_code) { 542 ret_code = I40E_ERR_NVM_CHECKSUM; 543 goto i40e_calc_nvm_checksum_exit; 544 } 545 546 /* Calculate SW checksum that covers the whole 64kB shadow RAM 547 * except the VPD and PCIe ALT Auto-load modules 548 */ 549 for (i = 0; i < hw->nvm.sr_size; i++) { 550 /* Read SR page */ 551 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) { 552 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS; 553 554 ret_code = __i40e_read_nvm_buffer(hw, i, &words, data); 555 if (ret_code) { 556 ret_code = I40E_ERR_NVM_CHECKSUM; 557 goto i40e_calc_nvm_checksum_exit; 558 } 559 } 560 561 /* Skip Checksum word */ 562 if (i == I40E_SR_SW_CHECKSUM_WORD) 563 continue; 564 /* Skip VPD module (convert byte size to word count) */ 565 if ((i >= (u32)vpd_module) && 566 (i < ((u32)vpd_module + 567 (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) { 568 continue; 569 } 570 /* Skip PCIe ALT module (convert byte size to word count) */ 571 if ((i >= (u32)pcie_alt_module) && 572 (i < ((u32)pcie_alt_module + 573 (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) { 574 continue; 575 } 576 577 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS]; 578 } 579 580 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local; 581 582 i40e_calc_nvm_checksum_exit: 583 i40e_free_virt_mem(hw, &vmem); 584 return ret_code; 585 } 586 587 /** 588 * i40e_update_nvm_checksum - Updates the NVM checksum 589 * @hw: pointer to hardware structure 590 * 591 * NVM ownership must be acquired before calling this function and released 592 * on ARQ completion event reception by caller. 593 * This function will commit SR to NVM. 594 **/ 595 i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw) 596 { 597 i40e_status ret_code; 598 u16 checksum; 599 __le16 le_sum; 600 601 ret_code = i40e_calc_nvm_checksum(hw, &checksum); 602 if (!ret_code) { 603 le_sum = cpu_to_le16(checksum); 604 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD, 605 1, &le_sum, true); 606 } 607 608 return ret_code; 609 } 610 611 /** 612 * i40e_validate_nvm_checksum - Validate EEPROM checksum 613 * @hw: pointer to hardware structure 614 * @checksum: calculated checksum 615 * 616 * Performs checksum calculation and validates the NVM SW checksum. If the 617 * caller does not need checksum, the value can be NULL. 618 **/ 619 i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, 620 u16 *checksum) 621 { 622 i40e_status ret_code = 0; 623 u16 checksum_sr = 0; 624 u16 checksum_local = 0; 625 626 /* We must acquire the NVM lock in order to correctly synchronize the 627 * NVM accesses across multiple PFs. Without doing so it is possible 628 * for one of the PFs to read invalid data potentially indicating that 629 * the checksum is invalid. 630 */ 631 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 632 if (ret_code) 633 return ret_code; 634 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local); 635 __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr); 636 i40e_release_nvm(hw); 637 if (ret_code) 638 return ret_code; 639 640 /* Verify read checksum from EEPROM is the same as 641 * calculated checksum 642 */ 643 if (checksum_local != checksum_sr) 644 ret_code = I40E_ERR_NVM_CHECKSUM; 645 646 /* If the user cares, return the calculated checksum */ 647 if (checksum) 648 *checksum = checksum_local; 649 650 return ret_code; 651 } 652 653 static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, 654 struct i40e_nvm_access *cmd, 655 u8 *bytes, int *perrno); 656 static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, 657 struct i40e_nvm_access *cmd, 658 u8 *bytes, int *perrno); 659 static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, 660 struct i40e_nvm_access *cmd, 661 u8 *bytes, int *errno); 662 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, 663 struct i40e_nvm_access *cmd, 664 int *perrno); 665 static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, 666 struct i40e_nvm_access *cmd, 667 int *perrno); 668 static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, 669 struct i40e_nvm_access *cmd, 670 u8 *bytes, int *perrno); 671 static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, 672 struct i40e_nvm_access *cmd, 673 u8 *bytes, int *perrno); 674 static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, 675 struct i40e_nvm_access *cmd, 676 u8 *bytes, int *perrno); 677 static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw, 678 struct i40e_nvm_access *cmd, 679 u8 *bytes, int *perrno); 680 static inline u8 i40e_nvmupd_get_module(u32 val) 681 { 682 return (u8)(val & I40E_NVM_MOD_PNT_MASK); 683 } 684 static inline u8 i40e_nvmupd_get_transaction(u32 val) 685 { 686 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT); 687 } 688 689 static const char * const i40e_nvm_update_state_str[] = { 690 "I40E_NVMUPD_INVALID", 691 "I40E_NVMUPD_READ_CON", 692 "I40E_NVMUPD_READ_SNT", 693 "I40E_NVMUPD_READ_LCB", 694 "I40E_NVMUPD_READ_SA", 695 "I40E_NVMUPD_WRITE_ERA", 696 "I40E_NVMUPD_WRITE_CON", 697 "I40E_NVMUPD_WRITE_SNT", 698 "I40E_NVMUPD_WRITE_LCB", 699 "I40E_NVMUPD_WRITE_SA", 700 "I40E_NVMUPD_CSUM_CON", 701 "I40E_NVMUPD_CSUM_SA", 702 "I40E_NVMUPD_CSUM_LCB", 703 "I40E_NVMUPD_STATUS", 704 "I40E_NVMUPD_EXEC_AQ", 705 "I40E_NVMUPD_GET_AQ_RESULT", 706 }; 707 708 /** 709 * i40e_nvmupd_command - Process an NVM update command 710 * @hw: pointer to hardware structure 711 * @cmd: pointer to nvm update command 712 * @bytes: pointer to the data buffer 713 * @perrno: pointer to return error code 714 * 715 * Dispatches command depending on what update state is current 716 **/ 717 i40e_status i40e_nvmupd_command(struct i40e_hw *hw, 718 struct i40e_nvm_access *cmd, 719 u8 *bytes, int *perrno) 720 { 721 i40e_status status; 722 enum i40e_nvmupd_cmd upd_cmd; 723 724 /* assume success */ 725 *perrno = 0; 726 727 /* early check for status command and debug msgs */ 728 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); 729 730 i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", 731 i40e_nvm_update_state_str[upd_cmd], 732 hw->nvmupd_state, 733 hw->nvm_release_on_done, hw->nvm_wait_opcode, 734 cmd->command, cmd->config, cmd->offset, cmd->data_size); 735 736 if (upd_cmd == I40E_NVMUPD_INVALID) { 737 *perrno = -EFAULT; 738 i40e_debug(hw, I40E_DEBUG_NVM, 739 "i40e_nvmupd_validate_command returns %d errno %d\n", 740 upd_cmd, *perrno); 741 } 742 743 /* a status request returns immediately rather than 744 * going into the state machine 745 */ 746 if (upd_cmd == I40E_NVMUPD_STATUS) { 747 if (!cmd->data_size) { 748 *perrno = -EFAULT; 749 return I40E_ERR_BUF_TOO_SHORT; 750 } 751 752 bytes[0] = hw->nvmupd_state; 753 754 if (cmd->data_size >= 4) { 755 bytes[1] = 0; 756 *((u16 *)&bytes[2]) = hw->nvm_wait_opcode; 757 } 758 759 /* Clear error status on read */ 760 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) 761 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 762 763 return 0; 764 } 765 766 /* Clear status even it is not read and log */ 767 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) { 768 i40e_debug(hw, I40E_DEBUG_NVM, 769 "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n"); 770 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 771 } 772 773 /* Acquire lock to prevent race condition where adminq_task 774 * can execute after i40e_nvmupd_nvm_read/write but before state 775 * variables (nvm_wait_opcode, nvm_release_on_done) are updated. 776 * 777 * During NVMUpdate, it is observed that lock could be held for 778 * ~5ms for most commands. However lock is held for ~60ms for 779 * NVMUPD_CSUM_LCB command. 780 */ 781 mutex_lock(&hw->aq.arq_mutex); 782 switch (hw->nvmupd_state) { 783 case I40E_NVMUPD_STATE_INIT: 784 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno); 785 break; 786 787 case I40E_NVMUPD_STATE_READING: 788 status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno); 789 break; 790 791 case I40E_NVMUPD_STATE_WRITING: 792 status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno); 793 break; 794 795 case I40E_NVMUPD_STATE_INIT_WAIT: 796 case I40E_NVMUPD_STATE_WRITE_WAIT: 797 /* if we need to stop waiting for an event, clear 798 * the wait info and return before doing anything else 799 */ 800 if (cmd->offset == 0xffff) { 801 i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode); 802 status = 0; 803 goto exit; 804 } 805 806 status = I40E_ERR_NOT_READY; 807 *perrno = -EBUSY; 808 break; 809 810 default: 811 /* invalid state, should never happen */ 812 i40e_debug(hw, I40E_DEBUG_NVM, 813 "NVMUPD: no such state %d\n", hw->nvmupd_state); 814 status = I40E_NOT_SUPPORTED; 815 *perrno = -ESRCH; 816 break; 817 } 818 exit: 819 mutex_unlock(&hw->aq.arq_mutex); 820 return status; 821 } 822 823 /** 824 * i40e_nvmupd_state_init - Handle NVM update state Init 825 * @hw: pointer to hardware structure 826 * @cmd: pointer to nvm update command buffer 827 * @bytes: pointer to the data buffer 828 * @perrno: pointer to return error code 829 * 830 * Process legitimate commands of the Init state and conditionally set next 831 * state. Reject all other commands. 832 **/ 833 static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, 834 struct i40e_nvm_access *cmd, 835 u8 *bytes, int *perrno) 836 { 837 i40e_status status = 0; 838 enum i40e_nvmupd_cmd upd_cmd; 839 840 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); 841 842 switch (upd_cmd) { 843 case I40E_NVMUPD_READ_SA: 844 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 845 if (status) { 846 *perrno = i40e_aq_rc_to_posix(status, 847 hw->aq.asq_last_status); 848 } else { 849 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); 850 i40e_release_nvm(hw); 851 } 852 break; 853 854 case I40E_NVMUPD_READ_SNT: 855 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 856 if (status) { 857 *perrno = i40e_aq_rc_to_posix(status, 858 hw->aq.asq_last_status); 859 } else { 860 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); 861 if (status) 862 i40e_release_nvm(hw); 863 else 864 hw->nvmupd_state = I40E_NVMUPD_STATE_READING; 865 } 866 break; 867 868 case I40E_NVMUPD_WRITE_ERA: 869 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 870 if (status) { 871 *perrno = i40e_aq_rc_to_posix(status, 872 hw->aq.asq_last_status); 873 } else { 874 status = i40e_nvmupd_nvm_erase(hw, cmd, perrno); 875 if (status) { 876 i40e_release_nvm(hw); 877 } else { 878 hw->nvm_release_on_done = true; 879 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase; 880 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 881 } 882 } 883 break; 884 885 case I40E_NVMUPD_WRITE_SA: 886 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 887 if (status) { 888 *perrno = i40e_aq_rc_to_posix(status, 889 hw->aq.asq_last_status); 890 } else { 891 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); 892 if (status) { 893 i40e_release_nvm(hw); 894 } else { 895 hw->nvm_release_on_done = true; 896 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 897 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 898 } 899 } 900 break; 901 902 case I40E_NVMUPD_WRITE_SNT: 903 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 904 if (status) { 905 *perrno = i40e_aq_rc_to_posix(status, 906 hw->aq.asq_last_status); 907 } else { 908 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); 909 if (status) { 910 i40e_release_nvm(hw); 911 } else { 912 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 913 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; 914 } 915 } 916 break; 917 918 case I40E_NVMUPD_CSUM_SA: 919 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 920 if (status) { 921 *perrno = i40e_aq_rc_to_posix(status, 922 hw->aq.asq_last_status); 923 } else { 924 status = i40e_update_nvm_checksum(hw); 925 if (status) { 926 *perrno = hw->aq.asq_last_status ? 927 i40e_aq_rc_to_posix(status, 928 hw->aq.asq_last_status) : 929 -EIO; 930 i40e_release_nvm(hw); 931 } else { 932 hw->nvm_release_on_done = true; 933 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 934 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 935 } 936 } 937 break; 938 939 case I40E_NVMUPD_EXEC_AQ: 940 status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno); 941 break; 942 943 case I40E_NVMUPD_GET_AQ_RESULT: 944 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno); 945 break; 946 947 default: 948 i40e_debug(hw, I40E_DEBUG_NVM, 949 "NVMUPD: bad cmd %s in init state\n", 950 i40e_nvm_update_state_str[upd_cmd]); 951 status = I40E_ERR_NVM; 952 *perrno = -ESRCH; 953 break; 954 } 955 return status; 956 } 957 958 /** 959 * i40e_nvmupd_state_reading - Handle NVM update state Reading 960 * @hw: pointer to hardware structure 961 * @cmd: pointer to nvm update command buffer 962 * @bytes: pointer to the data buffer 963 * @perrno: pointer to return error code 964 * 965 * NVM ownership is already held. Process legitimate commands and set any 966 * change in state; reject all other commands. 967 **/ 968 static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, 969 struct i40e_nvm_access *cmd, 970 u8 *bytes, int *perrno) 971 { 972 i40e_status status = 0; 973 enum i40e_nvmupd_cmd upd_cmd; 974 975 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); 976 977 switch (upd_cmd) { 978 case I40E_NVMUPD_READ_SA: 979 case I40E_NVMUPD_READ_CON: 980 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); 981 break; 982 983 case I40E_NVMUPD_READ_LCB: 984 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); 985 i40e_release_nvm(hw); 986 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 987 break; 988 989 default: 990 i40e_debug(hw, I40E_DEBUG_NVM, 991 "NVMUPD: bad cmd %s in reading state.\n", 992 i40e_nvm_update_state_str[upd_cmd]); 993 status = I40E_NOT_SUPPORTED; 994 *perrno = -ESRCH; 995 break; 996 } 997 return status; 998 } 999 1000 /** 1001 * i40e_nvmupd_state_writing - Handle NVM update state Writing 1002 * @hw: pointer to hardware structure 1003 * @cmd: pointer to nvm update command buffer 1004 * @bytes: pointer to the data buffer 1005 * @perrno: pointer to return error code 1006 * 1007 * NVM ownership is already held. Process legitimate commands and set any 1008 * change in state; reject all other commands 1009 **/ 1010 static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, 1011 struct i40e_nvm_access *cmd, 1012 u8 *bytes, int *perrno) 1013 { 1014 i40e_status status = 0; 1015 enum i40e_nvmupd_cmd upd_cmd; 1016 bool retry_attempt = false; 1017 1018 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); 1019 1020 retry: 1021 switch (upd_cmd) { 1022 case I40E_NVMUPD_WRITE_CON: 1023 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); 1024 if (!status) { 1025 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 1026 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; 1027 } 1028 break; 1029 1030 case I40E_NVMUPD_WRITE_LCB: 1031 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); 1032 if (status) { 1033 *perrno = hw->aq.asq_last_status ? 1034 i40e_aq_rc_to_posix(status, 1035 hw->aq.asq_last_status) : 1036 -EIO; 1037 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1038 } else { 1039 hw->nvm_release_on_done = true; 1040 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 1041 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 1042 } 1043 break; 1044 1045 case I40E_NVMUPD_CSUM_CON: 1046 /* Assumes the caller has acquired the nvm */ 1047 status = i40e_update_nvm_checksum(hw); 1048 if (status) { 1049 *perrno = hw->aq.asq_last_status ? 1050 i40e_aq_rc_to_posix(status, 1051 hw->aq.asq_last_status) : 1052 -EIO; 1053 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1054 } else { 1055 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 1056 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; 1057 } 1058 break; 1059 1060 case I40E_NVMUPD_CSUM_LCB: 1061 /* Assumes the caller has acquired the nvm */ 1062 status = i40e_update_nvm_checksum(hw); 1063 if (status) { 1064 *perrno = hw->aq.asq_last_status ? 1065 i40e_aq_rc_to_posix(status, 1066 hw->aq.asq_last_status) : 1067 -EIO; 1068 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1069 } else { 1070 hw->nvm_release_on_done = true; 1071 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 1072 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 1073 } 1074 break; 1075 1076 default: 1077 i40e_debug(hw, I40E_DEBUG_NVM, 1078 "NVMUPD: bad cmd %s in writing state.\n", 1079 i40e_nvm_update_state_str[upd_cmd]); 1080 status = I40E_NOT_SUPPORTED; 1081 *perrno = -ESRCH; 1082 break; 1083 } 1084 1085 /* In some circumstances, a multi-write transaction takes longer 1086 * than the default 3 minute timeout on the write semaphore. If 1087 * the write failed with an EBUSY status, this is likely the problem, 1088 * so here we try to reacquire the semaphore then retry the write. 1089 * We only do one retry, then give up. 1090 */ 1091 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) && 1092 !retry_attempt) { 1093 i40e_status old_status = status; 1094 u32 old_asq_status = hw->aq.asq_last_status; 1095 u32 gtime; 1096 1097 gtime = rd32(hw, I40E_GLVFGEN_TIMER); 1098 if (gtime >= hw->nvm.hw_semaphore_timeout) { 1099 i40e_debug(hw, I40E_DEBUG_ALL, 1100 "NVMUPD: write semaphore expired (%d >= %lld), retrying\n", 1101 gtime, hw->nvm.hw_semaphore_timeout); 1102 i40e_release_nvm(hw); 1103 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 1104 if (status) { 1105 i40e_debug(hw, I40E_DEBUG_ALL, 1106 "NVMUPD: write semaphore reacquire failed aq_err = %d\n", 1107 hw->aq.asq_last_status); 1108 status = old_status; 1109 hw->aq.asq_last_status = old_asq_status; 1110 } else { 1111 retry_attempt = true; 1112 goto retry; 1113 } 1114 } 1115 } 1116 1117 return status; 1118 } 1119 1120 /** 1121 * i40e_nvmupd_check_wait_event - handle NVM update operation events 1122 * @hw: pointer to the hardware structure 1123 * @opcode: the event that just happened 1124 **/ 1125 void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode) 1126 { 1127 if (opcode == hw->nvm_wait_opcode) { 1128 i40e_debug(hw, I40E_DEBUG_NVM, 1129 "NVMUPD: clearing wait on opcode 0x%04x\n", opcode); 1130 if (hw->nvm_release_on_done) { 1131 i40e_release_nvm(hw); 1132 hw->nvm_release_on_done = false; 1133 } 1134 hw->nvm_wait_opcode = 0; 1135 1136 if (hw->aq.arq_last_status) { 1137 hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR; 1138 return; 1139 } 1140 1141 switch (hw->nvmupd_state) { 1142 case I40E_NVMUPD_STATE_INIT_WAIT: 1143 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1144 break; 1145 1146 case I40E_NVMUPD_STATE_WRITE_WAIT: 1147 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; 1148 break; 1149 1150 default: 1151 break; 1152 } 1153 } 1154 } 1155 1156 /** 1157 * i40e_nvmupd_validate_command - Validate given command 1158 * @hw: pointer to hardware structure 1159 * @cmd: pointer to nvm update command buffer 1160 * @perrno: pointer to return error code 1161 * 1162 * Return one of the valid command types or I40E_NVMUPD_INVALID 1163 **/ 1164 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, 1165 struct i40e_nvm_access *cmd, 1166 int *perrno) 1167 { 1168 enum i40e_nvmupd_cmd upd_cmd; 1169 u8 module, transaction; 1170 1171 /* anything that doesn't match a recognized case is an error */ 1172 upd_cmd = I40E_NVMUPD_INVALID; 1173 1174 transaction = i40e_nvmupd_get_transaction(cmd->config); 1175 module = i40e_nvmupd_get_module(cmd->config); 1176 1177 /* limits on data size */ 1178 if ((cmd->data_size < 1) || 1179 (cmd->data_size > I40E_NVMUPD_MAX_DATA)) { 1180 i40e_debug(hw, I40E_DEBUG_NVM, 1181 "i40e_nvmupd_validate_command data_size %d\n", 1182 cmd->data_size); 1183 *perrno = -EFAULT; 1184 return I40E_NVMUPD_INVALID; 1185 } 1186 1187 switch (cmd->command) { 1188 case I40E_NVM_READ: 1189 switch (transaction) { 1190 case I40E_NVM_CON: 1191 upd_cmd = I40E_NVMUPD_READ_CON; 1192 break; 1193 case I40E_NVM_SNT: 1194 upd_cmd = I40E_NVMUPD_READ_SNT; 1195 break; 1196 case I40E_NVM_LCB: 1197 upd_cmd = I40E_NVMUPD_READ_LCB; 1198 break; 1199 case I40E_NVM_SA: 1200 upd_cmd = I40E_NVMUPD_READ_SA; 1201 break; 1202 case I40E_NVM_EXEC: 1203 if (module == 0xf) 1204 upd_cmd = I40E_NVMUPD_STATUS; 1205 else if (module == 0) 1206 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT; 1207 break; 1208 } 1209 break; 1210 1211 case I40E_NVM_WRITE: 1212 switch (transaction) { 1213 case I40E_NVM_CON: 1214 upd_cmd = I40E_NVMUPD_WRITE_CON; 1215 break; 1216 case I40E_NVM_SNT: 1217 upd_cmd = I40E_NVMUPD_WRITE_SNT; 1218 break; 1219 case I40E_NVM_LCB: 1220 upd_cmd = I40E_NVMUPD_WRITE_LCB; 1221 break; 1222 case I40E_NVM_SA: 1223 upd_cmd = I40E_NVMUPD_WRITE_SA; 1224 break; 1225 case I40E_NVM_ERA: 1226 upd_cmd = I40E_NVMUPD_WRITE_ERA; 1227 break; 1228 case I40E_NVM_CSUM: 1229 upd_cmd = I40E_NVMUPD_CSUM_CON; 1230 break; 1231 case (I40E_NVM_CSUM|I40E_NVM_SA): 1232 upd_cmd = I40E_NVMUPD_CSUM_SA; 1233 break; 1234 case (I40E_NVM_CSUM|I40E_NVM_LCB): 1235 upd_cmd = I40E_NVMUPD_CSUM_LCB; 1236 break; 1237 case I40E_NVM_EXEC: 1238 if (module == 0) 1239 upd_cmd = I40E_NVMUPD_EXEC_AQ; 1240 break; 1241 } 1242 break; 1243 } 1244 1245 return upd_cmd; 1246 } 1247 1248 /** 1249 * i40e_nvmupd_exec_aq - Run an AQ command 1250 * @hw: pointer to hardware structure 1251 * @cmd: pointer to nvm update command buffer 1252 * @bytes: pointer to the data buffer 1253 * @perrno: pointer to return error code 1254 * 1255 * cmd structure contains identifiers and data buffer 1256 **/ 1257 static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, 1258 struct i40e_nvm_access *cmd, 1259 u8 *bytes, int *perrno) 1260 { 1261 struct i40e_asq_cmd_details cmd_details; 1262 i40e_status status; 1263 struct i40e_aq_desc *aq_desc; 1264 u32 buff_size = 0; 1265 u8 *buff = NULL; 1266 u32 aq_desc_len; 1267 u32 aq_data_len; 1268 1269 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); 1270 memset(&cmd_details, 0, sizeof(cmd_details)); 1271 cmd_details.wb_desc = &hw->nvm_wb_desc; 1272 1273 aq_desc_len = sizeof(struct i40e_aq_desc); 1274 memset(&hw->nvm_wb_desc, 0, aq_desc_len); 1275 1276 /* get the aq descriptor */ 1277 if (cmd->data_size < aq_desc_len) { 1278 i40e_debug(hw, I40E_DEBUG_NVM, 1279 "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n", 1280 cmd->data_size, aq_desc_len); 1281 *perrno = -EINVAL; 1282 return I40E_ERR_PARAM; 1283 } 1284 aq_desc = (struct i40e_aq_desc *)bytes; 1285 1286 /* if data buffer needed, make sure it's ready */ 1287 aq_data_len = cmd->data_size - aq_desc_len; 1288 buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen)); 1289 if (buff_size) { 1290 if (!hw->nvm_buff.va) { 1291 status = i40e_allocate_virt_mem(hw, &hw->nvm_buff, 1292 hw->aq.asq_buf_size); 1293 if (status) 1294 i40e_debug(hw, I40E_DEBUG_NVM, 1295 "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n", 1296 status); 1297 } 1298 1299 if (hw->nvm_buff.va) { 1300 buff = hw->nvm_buff.va; 1301 memcpy(buff, &bytes[aq_desc_len], aq_data_len); 1302 } 1303 } 1304 1305 /* and away we go! */ 1306 status = i40e_asq_send_command(hw, aq_desc, buff, 1307 buff_size, &cmd_details); 1308 if (status) { 1309 i40e_debug(hw, I40E_DEBUG_NVM, 1310 "i40e_nvmupd_exec_aq err %s aq_err %s\n", 1311 i40e_stat_str(hw, status), 1312 i40e_aq_str(hw, hw->aq.asq_last_status)); 1313 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); 1314 } 1315 1316 /* should we wait for a followup event? */ 1317 if (cmd->offset) { 1318 hw->nvm_wait_opcode = cmd->offset; 1319 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 1320 } 1321 1322 return status; 1323 } 1324 1325 /** 1326 * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq 1327 * @hw: pointer to hardware structure 1328 * @cmd: pointer to nvm update command buffer 1329 * @bytes: pointer to the data buffer 1330 * @perrno: pointer to return error code 1331 * 1332 * cmd structure contains identifiers and data buffer 1333 **/ 1334 static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw, 1335 struct i40e_nvm_access *cmd, 1336 u8 *bytes, int *perrno) 1337 { 1338 u32 aq_total_len; 1339 u32 aq_desc_len; 1340 int remainder; 1341 u8 *buff; 1342 1343 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); 1344 1345 aq_desc_len = sizeof(struct i40e_aq_desc); 1346 aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen); 1347 1348 /* check offset range */ 1349 if (cmd->offset > aq_total_len) { 1350 i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n", 1351 __func__, cmd->offset, aq_total_len); 1352 *perrno = -EINVAL; 1353 return I40E_ERR_PARAM; 1354 } 1355 1356 /* check copylength range */ 1357 if (cmd->data_size > (aq_total_len - cmd->offset)) { 1358 int new_len = aq_total_len - cmd->offset; 1359 1360 i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n", 1361 __func__, cmd->data_size, new_len); 1362 cmd->data_size = new_len; 1363 } 1364 1365 remainder = cmd->data_size; 1366 if (cmd->offset < aq_desc_len) { 1367 u32 len = aq_desc_len - cmd->offset; 1368 1369 len = min(len, cmd->data_size); 1370 i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n", 1371 __func__, cmd->offset, cmd->offset + len); 1372 1373 buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset; 1374 memcpy(bytes, buff, len); 1375 1376 bytes += len; 1377 remainder -= len; 1378 buff = hw->nvm_buff.va; 1379 } else { 1380 buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len); 1381 } 1382 1383 if (remainder > 0) { 1384 int start_byte = buff - (u8 *)hw->nvm_buff.va; 1385 1386 i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n", 1387 __func__, start_byte, start_byte + remainder); 1388 memcpy(bytes, buff, remainder); 1389 } 1390 1391 return 0; 1392 } 1393 1394 /** 1395 * i40e_nvmupd_nvm_read - Read NVM 1396 * @hw: pointer to hardware structure 1397 * @cmd: pointer to nvm update command buffer 1398 * @bytes: pointer to the data buffer 1399 * @perrno: pointer to return error code 1400 * 1401 * cmd structure contains identifiers and data buffer 1402 **/ 1403 static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, 1404 struct i40e_nvm_access *cmd, 1405 u8 *bytes, int *perrno) 1406 { 1407 struct i40e_asq_cmd_details cmd_details; 1408 i40e_status status; 1409 u8 module, transaction; 1410 bool last; 1411 1412 transaction = i40e_nvmupd_get_transaction(cmd->config); 1413 module = i40e_nvmupd_get_module(cmd->config); 1414 last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA); 1415 1416 memset(&cmd_details, 0, sizeof(cmd_details)); 1417 cmd_details.wb_desc = &hw->nvm_wb_desc; 1418 1419 status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size, 1420 bytes, last, &cmd_details); 1421 if (status) { 1422 i40e_debug(hw, I40E_DEBUG_NVM, 1423 "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n", 1424 module, cmd->offset, cmd->data_size); 1425 i40e_debug(hw, I40E_DEBUG_NVM, 1426 "i40e_nvmupd_nvm_read status %d aq %d\n", 1427 status, hw->aq.asq_last_status); 1428 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); 1429 } 1430 1431 return status; 1432 } 1433 1434 /** 1435 * i40e_nvmupd_nvm_erase - Erase an NVM module 1436 * @hw: pointer to hardware structure 1437 * @cmd: pointer to nvm update command buffer 1438 * @perrno: pointer to return error code 1439 * 1440 * module, offset, data_size and data are in cmd structure 1441 **/ 1442 static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, 1443 struct i40e_nvm_access *cmd, 1444 int *perrno) 1445 { 1446 i40e_status status = 0; 1447 struct i40e_asq_cmd_details cmd_details; 1448 u8 module, transaction; 1449 bool last; 1450 1451 transaction = i40e_nvmupd_get_transaction(cmd->config); 1452 module = i40e_nvmupd_get_module(cmd->config); 1453 last = (transaction & I40E_NVM_LCB); 1454 1455 memset(&cmd_details, 0, sizeof(cmd_details)); 1456 cmd_details.wb_desc = &hw->nvm_wb_desc; 1457 1458 status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size, 1459 last, &cmd_details); 1460 if (status) { 1461 i40e_debug(hw, I40E_DEBUG_NVM, 1462 "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n", 1463 module, cmd->offset, cmd->data_size); 1464 i40e_debug(hw, I40E_DEBUG_NVM, 1465 "i40e_nvmupd_nvm_erase status %d aq %d\n", 1466 status, hw->aq.asq_last_status); 1467 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); 1468 } 1469 1470 return status; 1471 } 1472 1473 /** 1474 * i40e_nvmupd_nvm_write - Write NVM 1475 * @hw: pointer to hardware structure 1476 * @cmd: pointer to nvm update command buffer 1477 * @bytes: pointer to the data buffer 1478 * @perrno: pointer to return error code 1479 * 1480 * module, offset, data_size and data are in cmd structure 1481 **/ 1482 static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, 1483 struct i40e_nvm_access *cmd, 1484 u8 *bytes, int *perrno) 1485 { 1486 i40e_status status = 0; 1487 struct i40e_asq_cmd_details cmd_details; 1488 u8 module, transaction; 1489 bool last; 1490 1491 transaction = i40e_nvmupd_get_transaction(cmd->config); 1492 module = i40e_nvmupd_get_module(cmd->config); 1493 last = (transaction & I40E_NVM_LCB); 1494 1495 memset(&cmd_details, 0, sizeof(cmd_details)); 1496 cmd_details.wb_desc = &hw->nvm_wb_desc; 1497 1498 status = i40e_aq_update_nvm(hw, module, cmd->offset, 1499 (u16)cmd->data_size, bytes, last, 1500 &cmd_details); 1501 if (status) { 1502 i40e_debug(hw, I40E_DEBUG_NVM, 1503 "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", 1504 module, cmd->offset, cmd->data_size); 1505 i40e_debug(hw, I40E_DEBUG_NVM, 1506 "i40e_nvmupd_nvm_write status %d aq %d\n", 1507 status, hw->aq.asq_last_status); 1508 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); 1509 } 1510 1511 return status; 1512 } 1513