1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include <linux/bitfield.h> 5 #include <linux/delay.h> 6 #include "i40e_alloc.h" 7 #include "i40e_prototype.h" 8 9 /** 10 * i40e_init_nvm - Initialize NVM function pointers 11 * @hw: pointer to the HW structure 12 * 13 * Setup the function pointers and the NVM info structure. Should be called 14 * once per NVM initialization, e.g. inside the i40e_init_shared_code(). 15 * Please notice that the NVM term is used here (& in all methods covered 16 * in this file) as an equivalent of the FLASH part mapped into the SR. 17 * We are accessing FLASH always thru the Shadow RAM. 18 **/ 19 int i40e_init_nvm(struct i40e_hw *hw) 20 { 21 struct i40e_nvm_info *nvm = &hw->nvm; 22 int ret_code = 0; 23 u32 fla, gens; 24 u8 sr_size; 25 26 /* The SR size is stored regardless of the nvm programming mode 27 * as the blank mode may be used in the factory line. 28 */ 29 gens = rd32(hw, I40E_GLNVM_GENS); 30 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >> 31 I40E_GLNVM_GENS_SR_SIZE_SHIFT); 32 /* Switching to words (sr_size contains power of 2KB) */ 33 nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB; 34 35 /* Check if we are in the normal or blank NVM programming mode */ 36 fla = rd32(hw, I40E_GLNVM_FLA); 37 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */ 38 /* Max NVM timeout */ 39 nvm->timeout = I40E_MAX_NVM_TIMEOUT; 40 nvm->blank_nvm_mode = false; 41 } else { /* Blank programming mode */ 42 nvm->blank_nvm_mode = true; 43 ret_code = -EIO; 44 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n"); 45 } 46 47 return ret_code; 48 } 49 50 /** 51 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership 52 * @hw: pointer to the HW structure 53 * @access: NVM access type (read or write) 54 * 55 * This function will request NVM ownership for reading 56 * via the proper Admin Command. 57 **/ 58 int i40e_acquire_nvm(struct i40e_hw *hw, 59 enum i40e_aq_resource_access_type access) 60 { 61 u64 gtime, timeout; 62 u64 time_left = 0; 63 int ret_code = 0; 64 65 if (hw->nvm.blank_nvm_mode) 66 goto i40e_i40e_acquire_nvm_exit; 67 68 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access, 69 0, &time_left, NULL); 70 /* Reading the Global Device Timer */ 71 gtime = rd32(hw, I40E_GLVFGEN_TIMER); 72 73 /* Store the timeout */ 74 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime; 75 76 if (ret_code) 77 i40e_debug(hw, I40E_DEBUG_NVM, 78 "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n", 79 access, time_left, ret_code, hw->aq.asq_last_status); 80 81 if (ret_code && time_left) { 82 /* Poll until the current NVM owner timeouts */ 83 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime; 84 while ((gtime < timeout) && time_left) { 85 usleep_range(10000, 20000); 86 gtime = rd32(hw, I40E_GLVFGEN_TIMER); 87 ret_code = i40e_aq_request_resource(hw, 88 I40E_NVM_RESOURCE_ID, 89 access, 0, &time_left, 90 NULL); 91 if (!ret_code) { 92 hw->nvm.hw_semaphore_timeout = 93 I40E_MS_TO_GTIME(time_left) + gtime; 94 break; 95 } 96 } 97 if (ret_code) { 98 hw->nvm.hw_semaphore_timeout = 0; 99 i40e_debug(hw, I40E_DEBUG_NVM, 100 "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n", 101 time_left, ret_code, hw->aq.asq_last_status); 102 } 103 } 104 105 i40e_i40e_acquire_nvm_exit: 106 return ret_code; 107 } 108 109 /** 110 * i40e_release_nvm - Generic request for releasing the NVM ownership 111 * @hw: pointer to the HW structure 112 * 113 * This function will release NVM resource via the proper Admin Command. 114 **/ 115 void i40e_release_nvm(struct i40e_hw *hw) 116 { 117 u32 total_delay = 0; 118 int ret_code = 0; 119 120 if (hw->nvm.blank_nvm_mode) 121 return; 122 123 ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); 124 125 /* there are some rare cases when trying to release the resource 126 * results in an admin Q timeout, so handle them correctly 127 */ 128 while ((ret_code == -EIO) && 129 (total_delay < hw->aq.asq_cmd_timeout)) { 130 usleep_range(1000, 2000); 131 ret_code = i40e_aq_release_resource(hw, 132 I40E_NVM_RESOURCE_ID, 133 0, NULL); 134 total_delay++; 135 } 136 } 137 138 /** 139 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit 140 * @hw: pointer to the HW structure 141 * 142 * Polls the SRCTL Shadow RAM register done bit. 143 **/ 144 static int i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) 145 { 146 int ret_code = -EIO; 147 u32 srctl, wait_cnt; 148 149 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */ 150 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) { 151 srctl = rd32(hw, I40E_GLNVM_SRCTL); 152 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) { 153 ret_code = 0; 154 break; 155 } 156 udelay(5); 157 } 158 if (ret_code == -EIO) 159 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set"); 160 return ret_code; 161 } 162 163 /** 164 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register 165 * @hw: pointer to the HW structure 166 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 167 * @data: word read from the Shadow RAM 168 * 169 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. 170 **/ 171 static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, 172 u16 *data) 173 { 174 int ret_code = -EIO; 175 u32 sr_reg; 176 177 if (offset >= hw->nvm.sr_size) { 178 i40e_debug(hw, I40E_DEBUG_NVM, 179 "NVM read error: offset %d beyond Shadow RAM limit %d\n", 180 offset, hw->nvm.sr_size); 181 ret_code = -EINVAL; 182 goto read_nvm_exit; 183 } 184 185 /* Poll the done bit first */ 186 ret_code = i40e_poll_sr_srctl_done_bit(hw); 187 if (!ret_code) { 188 /* Write the address and start reading */ 189 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | 190 BIT(I40E_GLNVM_SRCTL_START_SHIFT); 191 wr32(hw, I40E_GLNVM_SRCTL, sr_reg); 192 193 /* Poll I40E_GLNVM_SRCTL until the done bit is set */ 194 ret_code = i40e_poll_sr_srctl_done_bit(hw); 195 if (!ret_code) { 196 sr_reg = rd32(hw, I40E_GLNVM_SRDATA); 197 *data = (u16)((sr_reg & 198 I40E_GLNVM_SRDATA_RDDATA_MASK) 199 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT); 200 } 201 } 202 if (ret_code) 203 i40e_debug(hw, I40E_DEBUG_NVM, 204 "NVM read error: Couldn't access Shadow RAM address: 0x%x\n", 205 offset); 206 207 read_nvm_exit: 208 return ret_code; 209 } 210 211 /** 212 * i40e_read_nvm_aq - Read Shadow RAM. 213 * @hw: pointer to the HW structure. 214 * @module_pointer: module pointer location in words from the NVM beginning 215 * @offset: offset in words from module start 216 * @words: number of words to read 217 * @data: buffer with words to read to the Shadow RAM 218 * @last_command: tells the AdminQ that this is the last command 219 * 220 * Reads a 16 bit words buffer to the Shadow RAM using the admin command. 221 **/ 222 static int i40e_read_nvm_aq(struct i40e_hw *hw, 223 u8 module_pointer, u32 offset, 224 u16 words, void *data, 225 bool last_command) 226 { 227 struct i40e_asq_cmd_details cmd_details; 228 int ret_code = -EIO; 229 230 memset(&cmd_details, 0, sizeof(cmd_details)); 231 cmd_details.wb_desc = &hw->nvm_wb_desc; 232 233 /* Here we are checking the SR limit only for the flat memory model. 234 * We cannot do it for the module-based model, as we did not acquire 235 * the NVM resource yet (we cannot get the module pointer value). 236 * Firmware will check the module-based model. 237 */ 238 if ((offset + words) > hw->nvm.sr_size) 239 i40e_debug(hw, I40E_DEBUG_NVM, 240 "NVM read error: offset %d beyond Shadow RAM limit %d\n", 241 (offset + words), hw->nvm.sr_size); 242 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) 243 /* We can read only up to 4KB (one sector), in one AQ write */ 244 i40e_debug(hw, I40E_DEBUG_NVM, 245 "NVM read fail error: tried to read %d words, limit is %d.\n", 246 words, I40E_SR_SECTOR_SIZE_IN_WORDS); 247 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) 248 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) 249 /* A single read cannot spread over two sectors */ 250 i40e_debug(hw, I40E_DEBUG_NVM, 251 "NVM read error: cannot spread over two sectors in a single read offset=%d words=%d\n", 252 offset, words); 253 else 254 ret_code = i40e_aq_read_nvm(hw, module_pointer, 255 2 * offset, /*bytes*/ 256 2 * words, /*bytes*/ 257 data, last_command, &cmd_details); 258 259 return ret_code; 260 } 261 262 /** 263 * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ 264 * @hw: pointer to the HW structure 265 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 266 * @data: word read from the Shadow RAM 267 * 268 * Reads one 16 bit word from the Shadow RAM using the AdminQ 269 **/ 270 static int i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, 271 u16 *data) 272 { 273 int ret_code = -EIO; 274 275 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true); 276 *data = le16_to_cpu(*(__le16 *)data); 277 278 return ret_code; 279 } 280 281 /** 282 * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking 283 * @hw: pointer to the HW structure 284 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 285 * @data: word read from the Shadow RAM 286 * 287 * Reads one 16 bit word from the Shadow RAM. 288 * 289 * Do not use this function except in cases where the nvm lock is already 290 * taken via i40e_acquire_nvm(). 291 **/ 292 static int __i40e_read_nvm_word(struct i40e_hw *hw, 293 u16 offset, u16 *data) 294 { 295 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) 296 return i40e_read_nvm_word_aq(hw, offset, data); 297 298 return i40e_read_nvm_word_srctl(hw, offset, data); 299 } 300 301 /** 302 * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary 303 * @hw: pointer to the HW structure 304 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 305 * @data: word read from the Shadow RAM 306 * 307 * Reads one 16 bit word from the Shadow RAM. 308 **/ 309 int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, 310 u16 *data) 311 { 312 int ret_code = 0; 313 314 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) 315 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 316 if (ret_code) 317 return ret_code; 318 319 ret_code = __i40e_read_nvm_word(hw, offset, data); 320 321 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) 322 i40e_release_nvm(hw); 323 324 return ret_code; 325 } 326 327 /** 328 * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location 329 * @hw: Pointer to the HW structure 330 * @module_ptr: Pointer to module in words with respect to NVM beginning 331 * @module_offset: Offset in words from module start 332 * @data_offset: Offset in words from reading data area start 333 * @words_data_size: Words to read from NVM 334 * @data_ptr: Pointer to memory location where resulting buffer will be stored 335 **/ 336 int i40e_read_nvm_module_data(struct i40e_hw *hw, 337 u8 module_ptr, 338 u16 module_offset, 339 u16 data_offset, 340 u16 words_data_size, 341 u16 *data_ptr) 342 { 343 u16 specific_ptr = 0; 344 u16 ptr_value = 0; 345 u32 offset = 0; 346 int status; 347 348 if (module_ptr != 0) { 349 status = i40e_read_nvm_word(hw, module_ptr, &ptr_value); 350 if (status) { 351 i40e_debug(hw, I40E_DEBUG_ALL, 352 "Reading nvm word failed.Error code: %d.\n", 353 status); 354 return -EIO; 355 } 356 } 357 #define I40E_NVM_INVALID_PTR_VAL 0x7FFF 358 #define I40E_NVM_INVALID_VAL 0xFFFF 359 360 /* Pointer not initialized */ 361 if (ptr_value == I40E_NVM_INVALID_PTR_VAL || 362 ptr_value == I40E_NVM_INVALID_VAL) { 363 i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n"); 364 return -EINVAL; 365 } 366 367 /* Check whether the module is in SR mapped area or outside */ 368 if (ptr_value & I40E_PTR_TYPE) { 369 /* Pointer points outside of the Shared RAM mapped area */ 370 i40e_debug(hw, I40E_DEBUG_ALL, 371 "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n"); 372 373 return -EINVAL; 374 } else { 375 /* Read from the Shadow RAM */ 376 377 status = i40e_read_nvm_word(hw, ptr_value + module_offset, 378 &specific_ptr); 379 if (status) { 380 i40e_debug(hw, I40E_DEBUG_ALL, 381 "Reading nvm word failed.Error code: %d.\n", 382 status); 383 return -EIO; 384 } 385 386 offset = ptr_value + module_offset + specific_ptr + 387 data_offset; 388 389 status = i40e_read_nvm_buffer(hw, offset, &words_data_size, 390 data_ptr); 391 if (status) { 392 i40e_debug(hw, I40E_DEBUG_ALL, 393 "Reading nvm buffer failed.Error code: %d.\n", 394 status); 395 } 396 } 397 398 return status; 399 } 400 401 /** 402 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register 403 * @hw: pointer to the HW structure 404 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). 405 * @words: (in) number of words to read; (out) number of words actually read 406 * @data: words read from the Shadow RAM 407 * 408 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() 409 * method. The buffer read is preceded by the NVM ownership take 410 * and followed by the release. 411 **/ 412 static int i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, 413 u16 *words, u16 *data) 414 { 415 int ret_code = 0; 416 u16 index, word; 417 418 /* Loop thru the selected region */ 419 for (word = 0; word < *words; word++) { 420 index = offset + word; 421 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]); 422 if (ret_code) 423 break; 424 } 425 426 /* Update the number of words read from the Shadow RAM */ 427 *words = word; 428 429 return ret_code; 430 } 431 432 /** 433 * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ 434 * @hw: pointer to the HW structure 435 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). 436 * @words: (in) number of words to read; (out) number of words actually read 437 * @data: words read from the Shadow RAM 438 * 439 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq() 440 * method. The buffer read is preceded by the NVM ownership take 441 * and followed by the release. 442 **/ 443 static int i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, 444 u16 *words, u16 *data) 445 { 446 bool last_cmd = false; 447 u16 words_read = 0; 448 u16 read_size; 449 int ret_code; 450 u16 i = 0; 451 452 do { 453 /* Calculate number of bytes we should read in this step. 454 * FVL AQ do not allow to read more than one page at a time or 455 * to cross page boundaries. 456 */ 457 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS) 458 read_size = min(*words, 459 (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS - 460 (offset % I40E_SR_SECTOR_SIZE_IN_WORDS))); 461 else 462 read_size = min((*words - words_read), 463 I40E_SR_SECTOR_SIZE_IN_WORDS); 464 465 /* Check if this is last command, if so set proper flag */ 466 if ((words_read + read_size) >= *words) 467 last_cmd = true; 468 469 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size, 470 data + words_read, last_cmd); 471 if (ret_code) 472 goto read_nvm_buffer_aq_exit; 473 474 /* Increment counter for words already read and move offset to 475 * new read location 476 */ 477 words_read += read_size; 478 offset += read_size; 479 } while (words_read < *words); 480 481 for (i = 0; i < *words; i++) 482 data[i] = le16_to_cpu(((__le16 *)data)[i]); 483 484 read_nvm_buffer_aq_exit: 485 *words = words_read; 486 return ret_code; 487 } 488 489 /** 490 * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock 491 * @hw: pointer to the HW structure 492 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). 493 * @words: (in) number of words to read; (out) number of words actually read 494 * @data: words read from the Shadow RAM 495 * 496 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() 497 * method. 498 **/ 499 static int __i40e_read_nvm_buffer(struct i40e_hw *hw, 500 u16 offset, u16 *words, 501 u16 *data) 502 { 503 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) 504 return i40e_read_nvm_buffer_aq(hw, offset, words, data); 505 506 return i40e_read_nvm_buffer_srctl(hw, offset, words, data); 507 } 508 509 /** 510 * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary 511 * @hw: pointer to the HW structure 512 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). 513 * @words: (in) number of words to read; (out) number of words actually read 514 * @data: words read from the Shadow RAM 515 * 516 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() 517 * method. The buffer read is preceded by the NVM ownership take 518 * and followed by the release. 519 **/ 520 int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, 521 u16 *words, u16 *data) 522 { 523 int ret_code = 0; 524 525 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { 526 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 527 if (!ret_code) { 528 ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, 529 data); 530 i40e_release_nvm(hw); 531 } 532 } else { 533 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); 534 } 535 536 return ret_code; 537 } 538 539 /** 540 * i40e_write_nvm_aq - Writes Shadow RAM. 541 * @hw: pointer to the HW structure. 542 * @module_pointer: module pointer location in words from the NVM beginning 543 * @offset: offset in words from module start 544 * @words: number of words to write 545 * @data: buffer with words to write to the Shadow RAM 546 * @last_command: tells the AdminQ that this is the last command 547 * 548 * Writes a 16 bit words buffer to the Shadow RAM using the admin command. 549 **/ 550 static int i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, 551 u32 offset, u16 words, void *data, 552 bool last_command) 553 { 554 struct i40e_asq_cmd_details cmd_details; 555 int ret_code = -EIO; 556 557 memset(&cmd_details, 0, sizeof(cmd_details)); 558 cmd_details.wb_desc = &hw->nvm_wb_desc; 559 560 /* Here we are checking the SR limit only for the flat memory model. 561 * We cannot do it for the module-based model, as we did not acquire 562 * the NVM resource yet (we cannot get the module pointer value). 563 * Firmware will check the module-based model. 564 */ 565 if ((offset + words) > hw->nvm.sr_size) 566 i40e_debug(hw, I40E_DEBUG_NVM, 567 "NVM write error: offset %d beyond Shadow RAM limit %d\n", 568 (offset + words), hw->nvm.sr_size); 569 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) 570 /* We can write only up to 4KB (one sector), in one AQ write */ 571 i40e_debug(hw, I40E_DEBUG_NVM, 572 "NVM write fail error: tried to write %d words, limit is %d.\n", 573 words, I40E_SR_SECTOR_SIZE_IN_WORDS); 574 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) 575 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) 576 /* A single write cannot spread over two sectors */ 577 i40e_debug(hw, I40E_DEBUG_NVM, 578 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", 579 offset, words); 580 else 581 ret_code = i40e_aq_update_nvm(hw, module_pointer, 582 2 * offset, /*bytes*/ 583 2 * words, /*bytes*/ 584 data, last_command, 0, 585 &cmd_details); 586 587 return ret_code; 588 } 589 590 /** 591 * i40e_calc_nvm_checksum - Calculates and returns the checksum 592 * @hw: pointer to hardware structure 593 * @checksum: pointer to the checksum 594 * 595 * This function calculates SW Checksum that covers the whole 64kB shadow RAM 596 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD 597 * is customer specific and unknown. Therefore, this function skips all maximum 598 * possible size of VPD (1kB). 599 **/ 600 static int i40e_calc_nvm_checksum(struct i40e_hw *hw, 601 u16 *checksum) 602 { 603 struct i40e_virt_mem vmem; 604 u16 pcie_alt_module = 0; 605 u16 checksum_local = 0; 606 u16 vpd_module = 0; 607 int ret_code; 608 u16 *data; 609 u16 i = 0; 610 611 ret_code = i40e_allocate_virt_mem(hw, &vmem, 612 I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16)); 613 if (ret_code) 614 goto i40e_calc_nvm_checksum_exit; 615 data = (u16 *)vmem.va; 616 617 /* read pointer to VPD area */ 618 ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); 619 if (ret_code) { 620 ret_code = -EIO; 621 goto i40e_calc_nvm_checksum_exit; 622 } 623 624 /* read pointer to PCIe Alt Auto-load module */ 625 ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, 626 &pcie_alt_module); 627 if (ret_code) { 628 ret_code = -EIO; 629 goto i40e_calc_nvm_checksum_exit; 630 } 631 632 /* Calculate SW checksum that covers the whole 64kB shadow RAM 633 * except the VPD and PCIe ALT Auto-load modules 634 */ 635 for (i = 0; i < hw->nvm.sr_size; i++) { 636 /* Read SR page */ 637 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) { 638 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS; 639 640 ret_code = __i40e_read_nvm_buffer(hw, i, &words, data); 641 if (ret_code) { 642 ret_code = -EIO; 643 goto i40e_calc_nvm_checksum_exit; 644 } 645 } 646 647 /* Skip Checksum word */ 648 if (i == I40E_SR_SW_CHECKSUM_WORD) 649 continue; 650 /* Skip VPD module (convert byte size to word count) */ 651 if ((i >= (u32)vpd_module) && 652 (i < ((u32)vpd_module + 653 (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) { 654 continue; 655 } 656 /* Skip PCIe ALT module (convert byte size to word count) */ 657 if ((i >= (u32)pcie_alt_module) && 658 (i < ((u32)pcie_alt_module + 659 (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) { 660 continue; 661 } 662 663 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS]; 664 } 665 666 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local; 667 668 i40e_calc_nvm_checksum_exit: 669 i40e_free_virt_mem(hw, &vmem); 670 return ret_code; 671 } 672 673 /** 674 * i40e_update_nvm_checksum - Updates the NVM checksum 675 * @hw: pointer to hardware structure 676 * 677 * NVM ownership must be acquired before calling this function and released 678 * on ARQ completion event reception by caller. 679 * This function will commit SR to NVM. 680 **/ 681 int i40e_update_nvm_checksum(struct i40e_hw *hw) 682 { 683 __le16 le_sum; 684 int ret_code; 685 u16 checksum; 686 687 ret_code = i40e_calc_nvm_checksum(hw, &checksum); 688 if (!ret_code) { 689 le_sum = cpu_to_le16(checksum); 690 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD, 691 1, &le_sum, true); 692 } 693 694 return ret_code; 695 } 696 697 /** 698 * i40e_validate_nvm_checksum - Validate EEPROM checksum 699 * @hw: pointer to hardware structure 700 * @checksum: calculated checksum 701 * 702 * Performs checksum calculation and validates the NVM SW checksum. If the 703 * caller does not need checksum, the value can be NULL. 704 **/ 705 int i40e_validate_nvm_checksum(struct i40e_hw *hw, 706 u16 *checksum) 707 { 708 u16 checksum_local = 0; 709 u16 checksum_sr = 0; 710 int ret_code = 0; 711 712 /* We must acquire the NVM lock in order to correctly synchronize the 713 * NVM accesses across multiple PFs. Without doing so it is possible 714 * for one of the PFs to read invalid data potentially indicating that 715 * the checksum is invalid. 716 */ 717 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 718 if (ret_code) 719 return ret_code; 720 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local); 721 __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr); 722 i40e_release_nvm(hw); 723 if (ret_code) 724 return ret_code; 725 726 /* Verify read checksum from EEPROM is the same as 727 * calculated checksum 728 */ 729 if (checksum_local != checksum_sr) 730 ret_code = -EIO; 731 732 /* If the user cares, return the calculated checksum */ 733 if (checksum) 734 *checksum = checksum_local; 735 736 return ret_code; 737 } 738 739 static int i40e_nvmupd_state_init(struct i40e_hw *hw, 740 struct i40e_nvm_access *cmd, 741 u8 *bytes, int *perrno); 742 static int i40e_nvmupd_state_reading(struct i40e_hw *hw, 743 struct i40e_nvm_access *cmd, 744 u8 *bytes, int *perrno); 745 static int i40e_nvmupd_state_writing(struct i40e_hw *hw, 746 struct i40e_nvm_access *cmd, 747 u8 *bytes, int *errno); 748 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, 749 struct i40e_nvm_access *cmd, 750 int *perrno); 751 static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw, 752 struct i40e_nvm_access *cmd, 753 int *perrno); 754 static int i40e_nvmupd_nvm_write(struct i40e_hw *hw, 755 struct i40e_nvm_access *cmd, 756 u8 *bytes, int *perrno); 757 static int i40e_nvmupd_nvm_read(struct i40e_hw *hw, 758 struct i40e_nvm_access *cmd, 759 u8 *bytes, int *perrno); 760 static int i40e_nvmupd_exec_aq(struct i40e_hw *hw, 761 struct i40e_nvm_access *cmd, 762 u8 *bytes, int *perrno); 763 static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw, 764 struct i40e_nvm_access *cmd, 765 u8 *bytes, int *perrno); 766 static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw, 767 struct i40e_nvm_access *cmd, 768 u8 *bytes, int *perrno); 769 static inline u8 i40e_nvmupd_get_module(u32 val) 770 { 771 return (u8)(val & I40E_NVM_MOD_PNT_MASK); 772 } 773 static inline u8 i40e_nvmupd_get_transaction(u32 val) 774 { 775 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT); 776 } 777 778 static inline u8 i40e_nvmupd_get_preservation_flags(u32 val) 779 { 780 return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >> 781 I40E_NVM_PRESERVATION_FLAGS_SHIFT); 782 } 783 784 static const char * const i40e_nvm_update_state_str[] = { 785 "I40E_NVMUPD_INVALID", 786 "I40E_NVMUPD_READ_CON", 787 "I40E_NVMUPD_READ_SNT", 788 "I40E_NVMUPD_READ_LCB", 789 "I40E_NVMUPD_READ_SA", 790 "I40E_NVMUPD_WRITE_ERA", 791 "I40E_NVMUPD_WRITE_CON", 792 "I40E_NVMUPD_WRITE_SNT", 793 "I40E_NVMUPD_WRITE_LCB", 794 "I40E_NVMUPD_WRITE_SA", 795 "I40E_NVMUPD_CSUM_CON", 796 "I40E_NVMUPD_CSUM_SA", 797 "I40E_NVMUPD_CSUM_LCB", 798 "I40E_NVMUPD_STATUS", 799 "I40E_NVMUPD_EXEC_AQ", 800 "I40E_NVMUPD_GET_AQ_RESULT", 801 "I40E_NVMUPD_GET_AQ_EVENT", 802 }; 803 804 /** 805 * i40e_nvmupd_command - Process an NVM update command 806 * @hw: pointer to hardware structure 807 * @cmd: pointer to nvm update command 808 * @bytes: pointer to the data buffer 809 * @perrno: pointer to return error code 810 * 811 * Dispatches command depending on what update state is current 812 **/ 813 int i40e_nvmupd_command(struct i40e_hw *hw, 814 struct i40e_nvm_access *cmd, 815 u8 *bytes, int *perrno) 816 { 817 enum i40e_nvmupd_cmd upd_cmd; 818 int status; 819 820 /* assume success */ 821 *perrno = 0; 822 823 /* early check for status command and debug msgs */ 824 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); 825 826 i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", 827 i40e_nvm_update_state_str[upd_cmd], 828 hw->nvmupd_state, 829 hw->nvm_release_on_done, hw->nvm_wait_opcode, 830 cmd->command, cmd->config, cmd->offset, cmd->data_size); 831 832 if (upd_cmd == I40E_NVMUPD_INVALID) { 833 *perrno = -EFAULT; 834 i40e_debug(hw, I40E_DEBUG_NVM, 835 "i40e_nvmupd_validate_command returns %d errno %d\n", 836 upd_cmd, *perrno); 837 } 838 839 /* a status request returns immediately rather than 840 * going into the state machine 841 */ 842 if (upd_cmd == I40E_NVMUPD_STATUS) { 843 if (!cmd->data_size) { 844 *perrno = -EFAULT; 845 return -EINVAL; 846 } 847 848 bytes[0] = hw->nvmupd_state; 849 850 if (cmd->data_size >= 4) { 851 bytes[1] = 0; 852 *((u16 *)&bytes[2]) = hw->nvm_wait_opcode; 853 } 854 855 /* Clear error status on read */ 856 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) 857 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 858 859 return 0; 860 } 861 862 /* Clear status even it is not read and log */ 863 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) { 864 i40e_debug(hw, I40E_DEBUG_NVM, 865 "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n"); 866 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 867 } 868 869 /* Acquire lock to prevent race condition where adminq_task 870 * can execute after i40e_nvmupd_nvm_read/write but before state 871 * variables (nvm_wait_opcode, nvm_release_on_done) are updated. 872 * 873 * During NVMUpdate, it is observed that lock could be held for 874 * ~5ms for most commands. However lock is held for ~60ms for 875 * NVMUPD_CSUM_LCB command. 876 */ 877 mutex_lock(&hw->aq.arq_mutex); 878 switch (hw->nvmupd_state) { 879 case I40E_NVMUPD_STATE_INIT: 880 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno); 881 break; 882 883 case I40E_NVMUPD_STATE_READING: 884 status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno); 885 break; 886 887 case I40E_NVMUPD_STATE_WRITING: 888 status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno); 889 break; 890 891 case I40E_NVMUPD_STATE_INIT_WAIT: 892 case I40E_NVMUPD_STATE_WRITE_WAIT: 893 /* if we need to stop waiting for an event, clear 894 * the wait info and return before doing anything else 895 */ 896 if (cmd->offset == 0xffff) { 897 i40e_nvmupd_clear_wait_state(hw); 898 status = 0; 899 break; 900 } 901 902 status = -EBUSY; 903 *perrno = -EBUSY; 904 break; 905 906 default: 907 /* invalid state, should never happen */ 908 i40e_debug(hw, I40E_DEBUG_NVM, 909 "NVMUPD: no such state %d\n", hw->nvmupd_state); 910 status = -EOPNOTSUPP; 911 *perrno = -ESRCH; 912 break; 913 } 914 915 mutex_unlock(&hw->aq.arq_mutex); 916 return status; 917 } 918 919 /** 920 * i40e_nvmupd_state_init - Handle NVM update state Init 921 * @hw: pointer to hardware structure 922 * @cmd: pointer to nvm update command buffer 923 * @bytes: pointer to the data buffer 924 * @perrno: pointer to return error code 925 * 926 * Process legitimate commands of the Init state and conditionally set next 927 * state. Reject all other commands. 928 **/ 929 static int i40e_nvmupd_state_init(struct i40e_hw *hw, 930 struct i40e_nvm_access *cmd, 931 u8 *bytes, int *perrno) 932 { 933 enum i40e_nvmupd_cmd upd_cmd; 934 int status = 0; 935 936 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); 937 938 switch (upd_cmd) { 939 case I40E_NVMUPD_READ_SA: 940 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 941 if (status) { 942 *perrno = i40e_aq_rc_to_posix(status, 943 hw->aq.asq_last_status); 944 } else { 945 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); 946 i40e_release_nvm(hw); 947 } 948 break; 949 950 case I40E_NVMUPD_READ_SNT: 951 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 952 if (status) { 953 *perrno = i40e_aq_rc_to_posix(status, 954 hw->aq.asq_last_status); 955 } else { 956 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); 957 if (status) 958 i40e_release_nvm(hw); 959 else 960 hw->nvmupd_state = I40E_NVMUPD_STATE_READING; 961 } 962 break; 963 964 case I40E_NVMUPD_WRITE_ERA: 965 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 966 if (status) { 967 *perrno = i40e_aq_rc_to_posix(status, 968 hw->aq.asq_last_status); 969 } else { 970 status = i40e_nvmupd_nvm_erase(hw, cmd, perrno); 971 if (status) { 972 i40e_release_nvm(hw); 973 } else { 974 hw->nvm_release_on_done = true; 975 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase; 976 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 977 } 978 } 979 break; 980 981 case I40E_NVMUPD_WRITE_SA: 982 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 983 if (status) { 984 *perrno = i40e_aq_rc_to_posix(status, 985 hw->aq.asq_last_status); 986 } else { 987 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); 988 if (status) { 989 i40e_release_nvm(hw); 990 } else { 991 hw->nvm_release_on_done = true; 992 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 993 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 994 } 995 } 996 break; 997 998 case I40E_NVMUPD_WRITE_SNT: 999 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 1000 if (status) { 1001 *perrno = i40e_aq_rc_to_posix(status, 1002 hw->aq.asq_last_status); 1003 } else { 1004 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); 1005 if (status) { 1006 i40e_release_nvm(hw); 1007 } else { 1008 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 1009 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; 1010 } 1011 } 1012 break; 1013 1014 case I40E_NVMUPD_CSUM_SA: 1015 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 1016 if (status) { 1017 *perrno = i40e_aq_rc_to_posix(status, 1018 hw->aq.asq_last_status); 1019 } else { 1020 status = i40e_update_nvm_checksum(hw); 1021 if (status) { 1022 *perrno = hw->aq.asq_last_status ? 1023 i40e_aq_rc_to_posix(status, 1024 hw->aq.asq_last_status) : 1025 -EIO; 1026 i40e_release_nvm(hw); 1027 } else { 1028 hw->nvm_release_on_done = true; 1029 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 1030 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 1031 } 1032 } 1033 break; 1034 1035 case I40E_NVMUPD_EXEC_AQ: 1036 status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno); 1037 break; 1038 1039 case I40E_NVMUPD_GET_AQ_RESULT: 1040 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno); 1041 break; 1042 1043 case I40E_NVMUPD_GET_AQ_EVENT: 1044 status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno); 1045 break; 1046 1047 default: 1048 i40e_debug(hw, I40E_DEBUG_NVM, 1049 "NVMUPD: bad cmd %s in init state\n", 1050 i40e_nvm_update_state_str[upd_cmd]); 1051 status = -EIO; 1052 *perrno = -ESRCH; 1053 break; 1054 } 1055 return status; 1056 } 1057 1058 /** 1059 * i40e_nvmupd_state_reading - Handle NVM update state Reading 1060 * @hw: pointer to hardware structure 1061 * @cmd: pointer to nvm update command buffer 1062 * @bytes: pointer to the data buffer 1063 * @perrno: pointer to return error code 1064 * 1065 * NVM ownership is already held. Process legitimate commands and set any 1066 * change in state; reject all other commands. 1067 **/ 1068 static int i40e_nvmupd_state_reading(struct i40e_hw *hw, 1069 struct i40e_nvm_access *cmd, 1070 u8 *bytes, int *perrno) 1071 { 1072 enum i40e_nvmupd_cmd upd_cmd; 1073 int status = 0; 1074 1075 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); 1076 1077 switch (upd_cmd) { 1078 case I40E_NVMUPD_READ_SA: 1079 case I40E_NVMUPD_READ_CON: 1080 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); 1081 break; 1082 1083 case I40E_NVMUPD_READ_LCB: 1084 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); 1085 i40e_release_nvm(hw); 1086 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1087 break; 1088 1089 default: 1090 i40e_debug(hw, I40E_DEBUG_NVM, 1091 "NVMUPD: bad cmd %s in reading state.\n", 1092 i40e_nvm_update_state_str[upd_cmd]); 1093 status = -EOPNOTSUPP; 1094 *perrno = -ESRCH; 1095 break; 1096 } 1097 return status; 1098 } 1099 1100 /** 1101 * i40e_nvmupd_state_writing - Handle NVM update state Writing 1102 * @hw: pointer to hardware structure 1103 * @cmd: pointer to nvm update command buffer 1104 * @bytes: pointer to the data buffer 1105 * @perrno: pointer to return error code 1106 * 1107 * NVM ownership is already held. Process legitimate commands and set any 1108 * change in state; reject all other commands 1109 **/ 1110 static int i40e_nvmupd_state_writing(struct i40e_hw *hw, 1111 struct i40e_nvm_access *cmd, 1112 u8 *bytes, int *perrno) 1113 { 1114 enum i40e_nvmupd_cmd upd_cmd; 1115 bool retry_attempt = false; 1116 int status = 0; 1117 1118 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); 1119 1120 retry: 1121 switch (upd_cmd) { 1122 case I40E_NVMUPD_WRITE_CON: 1123 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); 1124 if (!status) { 1125 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 1126 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; 1127 } 1128 break; 1129 1130 case I40E_NVMUPD_WRITE_LCB: 1131 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); 1132 if (status) { 1133 *perrno = hw->aq.asq_last_status ? 1134 i40e_aq_rc_to_posix(status, 1135 hw->aq.asq_last_status) : 1136 -EIO; 1137 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1138 } else { 1139 hw->nvm_release_on_done = true; 1140 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 1141 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 1142 } 1143 break; 1144 1145 case I40E_NVMUPD_CSUM_CON: 1146 /* Assumes the caller has acquired the nvm */ 1147 status = i40e_update_nvm_checksum(hw); 1148 if (status) { 1149 *perrno = hw->aq.asq_last_status ? 1150 i40e_aq_rc_to_posix(status, 1151 hw->aq.asq_last_status) : 1152 -EIO; 1153 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1154 } else { 1155 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 1156 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; 1157 } 1158 break; 1159 1160 case I40E_NVMUPD_CSUM_LCB: 1161 /* Assumes the caller has acquired the nvm */ 1162 status = i40e_update_nvm_checksum(hw); 1163 if (status) { 1164 *perrno = hw->aq.asq_last_status ? 1165 i40e_aq_rc_to_posix(status, 1166 hw->aq.asq_last_status) : 1167 -EIO; 1168 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1169 } else { 1170 hw->nvm_release_on_done = true; 1171 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 1172 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 1173 } 1174 break; 1175 1176 default: 1177 i40e_debug(hw, I40E_DEBUG_NVM, 1178 "NVMUPD: bad cmd %s in writing state.\n", 1179 i40e_nvm_update_state_str[upd_cmd]); 1180 status = -EOPNOTSUPP; 1181 *perrno = -ESRCH; 1182 break; 1183 } 1184 1185 /* In some circumstances, a multi-write transaction takes longer 1186 * than the default 3 minute timeout on the write semaphore. If 1187 * the write failed with an EBUSY status, this is likely the problem, 1188 * so here we try to reacquire the semaphore then retry the write. 1189 * We only do one retry, then give up. 1190 */ 1191 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) && 1192 !retry_attempt) { 1193 u32 old_asq_status = hw->aq.asq_last_status; 1194 int old_status = status; 1195 u32 gtime; 1196 1197 gtime = rd32(hw, I40E_GLVFGEN_TIMER); 1198 if (gtime >= hw->nvm.hw_semaphore_timeout) { 1199 i40e_debug(hw, I40E_DEBUG_ALL, 1200 "NVMUPD: write semaphore expired (%d >= %lld), retrying\n", 1201 gtime, hw->nvm.hw_semaphore_timeout); 1202 i40e_release_nvm(hw); 1203 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 1204 if (status) { 1205 i40e_debug(hw, I40E_DEBUG_ALL, 1206 "NVMUPD: write semaphore reacquire failed aq_err = %d\n", 1207 hw->aq.asq_last_status); 1208 status = old_status; 1209 hw->aq.asq_last_status = old_asq_status; 1210 } else { 1211 retry_attempt = true; 1212 goto retry; 1213 } 1214 } 1215 } 1216 1217 return status; 1218 } 1219 1220 /** 1221 * i40e_nvmupd_clear_wait_state - clear wait state on hw 1222 * @hw: pointer to the hardware structure 1223 **/ 1224 void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw) 1225 { 1226 i40e_debug(hw, I40E_DEBUG_NVM, 1227 "NVMUPD: clearing wait on opcode 0x%04x\n", 1228 hw->nvm_wait_opcode); 1229 1230 if (hw->nvm_release_on_done) { 1231 i40e_release_nvm(hw); 1232 hw->nvm_release_on_done = false; 1233 } 1234 hw->nvm_wait_opcode = 0; 1235 1236 if (hw->aq.arq_last_status) { 1237 hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR; 1238 return; 1239 } 1240 1241 switch (hw->nvmupd_state) { 1242 case I40E_NVMUPD_STATE_INIT_WAIT: 1243 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1244 break; 1245 1246 case I40E_NVMUPD_STATE_WRITE_WAIT: 1247 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; 1248 break; 1249 1250 default: 1251 break; 1252 } 1253 } 1254 1255 /** 1256 * i40e_nvmupd_check_wait_event - handle NVM update operation events 1257 * @hw: pointer to the hardware structure 1258 * @opcode: the event that just happened 1259 * @desc: AdminQ descriptor 1260 **/ 1261 void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, 1262 struct i40e_aq_desc *desc) 1263 { 1264 u32 aq_desc_len = sizeof(struct i40e_aq_desc); 1265 1266 if (opcode == hw->nvm_wait_opcode) { 1267 memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len); 1268 i40e_nvmupd_clear_wait_state(hw); 1269 } 1270 } 1271 1272 /** 1273 * i40e_nvmupd_validate_command - Validate given command 1274 * @hw: pointer to hardware structure 1275 * @cmd: pointer to nvm update command buffer 1276 * @perrno: pointer to return error code 1277 * 1278 * Return one of the valid command types or I40E_NVMUPD_INVALID 1279 **/ 1280 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, 1281 struct i40e_nvm_access *cmd, 1282 int *perrno) 1283 { 1284 enum i40e_nvmupd_cmd upd_cmd; 1285 u8 module, transaction; 1286 1287 /* anything that doesn't match a recognized case is an error */ 1288 upd_cmd = I40E_NVMUPD_INVALID; 1289 1290 transaction = i40e_nvmupd_get_transaction(cmd->config); 1291 module = i40e_nvmupd_get_module(cmd->config); 1292 1293 /* limits on data size */ 1294 if ((cmd->data_size < 1) || 1295 (cmd->data_size > I40E_NVMUPD_MAX_DATA)) { 1296 i40e_debug(hw, I40E_DEBUG_NVM, 1297 "i40e_nvmupd_validate_command data_size %d\n", 1298 cmd->data_size); 1299 *perrno = -EFAULT; 1300 return I40E_NVMUPD_INVALID; 1301 } 1302 1303 switch (cmd->command) { 1304 case I40E_NVM_READ: 1305 switch (transaction) { 1306 case I40E_NVM_CON: 1307 upd_cmd = I40E_NVMUPD_READ_CON; 1308 break; 1309 case I40E_NVM_SNT: 1310 upd_cmd = I40E_NVMUPD_READ_SNT; 1311 break; 1312 case I40E_NVM_LCB: 1313 upd_cmd = I40E_NVMUPD_READ_LCB; 1314 break; 1315 case I40E_NVM_SA: 1316 upd_cmd = I40E_NVMUPD_READ_SA; 1317 break; 1318 case I40E_NVM_EXEC: 1319 if (module == 0xf) 1320 upd_cmd = I40E_NVMUPD_STATUS; 1321 else if (module == 0) 1322 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT; 1323 break; 1324 case I40E_NVM_AQE: 1325 upd_cmd = I40E_NVMUPD_GET_AQ_EVENT; 1326 break; 1327 } 1328 break; 1329 1330 case I40E_NVM_WRITE: 1331 switch (transaction) { 1332 case I40E_NVM_CON: 1333 upd_cmd = I40E_NVMUPD_WRITE_CON; 1334 break; 1335 case I40E_NVM_SNT: 1336 upd_cmd = I40E_NVMUPD_WRITE_SNT; 1337 break; 1338 case I40E_NVM_LCB: 1339 upd_cmd = I40E_NVMUPD_WRITE_LCB; 1340 break; 1341 case I40E_NVM_SA: 1342 upd_cmd = I40E_NVMUPD_WRITE_SA; 1343 break; 1344 case I40E_NVM_ERA: 1345 upd_cmd = I40E_NVMUPD_WRITE_ERA; 1346 break; 1347 case I40E_NVM_CSUM: 1348 upd_cmd = I40E_NVMUPD_CSUM_CON; 1349 break; 1350 case (I40E_NVM_CSUM|I40E_NVM_SA): 1351 upd_cmd = I40E_NVMUPD_CSUM_SA; 1352 break; 1353 case (I40E_NVM_CSUM|I40E_NVM_LCB): 1354 upd_cmd = I40E_NVMUPD_CSUM_LCB; 1355 break; 1356 case I40E_NVM_EXEC: 1357 if (module == 0) 1358 upd_cmd = I40E_NVMUPD_EXEC_AQ; 1359 break; 1360 } 1361 break; 1362 } 1363 1364 return upd_cmd; 1365 } 1366 1367 /** 1368 * i40e_nvmupd_exec_aq - Run an AQ command 1369 * @hw: pointer to hardware structure 1370 * @cmd: pointer to nvm update command buffer 1371 * @bytes: pointer to the data buffer 1372 * @perrno: pointer to return error code 1373 * 1374 * cmd structure contains identifiers and data buffer 1375 **/ 1376 static int i40e_nvmupd_exec_aq(struct i40e_hw *hw, 1377 struct i40e_nvm_access *cmd, 1378 u8 *bytes, int *perrno) 1379 { 1380 struct i40e_asq_cmd_details cmd_details; 1381 struct i40e_aq_desc *aq_desc; 1382 u32 buff_size = 0; 1383 u8 *buff = NULL; 1384 u32 aq_desc_len; 1385 u32 aq_data_len; 1386 int status; 1387 1388 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); 1389 if (cmd->offset == 0xffff) 1390 return 0; 1391 1392 memset(&cmd_details, 0, sizeof(cmd_details)); 1393 cmd_details.wb_desc = &hw->nvm_wb_desc; 1394 1395 aq_desc_len = sizeof(struct i40e_aq_desc); 1396 memset(&hw->nvm_wb_desc, 0, aq_desc_len); 1397 1398 /* get the aq descriptor */ 1399 if (cmd->data_size < aq_desc_len) { 1400 i40e_debug(hw, I40E_DEBUG_NVM, 1401 "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n", 1402 cmd->data_size, aq_desc_len); 1403 *perrno = -EINVAL; 1404 return -EINVAL; 1405 } 1406 aq_desc = (struct i40e_aq_desc *)bytes; 1407 1408 /* if data buffer needed, make sure it's ready */ 1409 aq_data_len = cmd->data_size - aq_desc_len; 1410 buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen)); 1411 if (buff_size) { 1412 if (!hw->nvm_buff.va) { 1413 status = i40e_allocate_virt_mem(hw, &hw->nvm_buff, 1414 hw->aq.asq_buf_size); 1415 if (status) 1416 i40e_debug(hw, I40E_DEBUG_NVM, 1417 "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n", 1418 status); 1419 } 1420 1421 if (hw->nvm_buff.va) { 1422 buff = hw->nvm_buff.va; 1423 memcpy(buff, &bytes[aq_desc_len], aq_data_len); 1424 } 1425 } 1426 1427 if (cmd->offset) 1428 memset(&hw->nvm_aq_event_desc, 0, aq_desc_len); 1429 1430 /* and away we go! */ 1431 status = i40e_asq_send_command(hw, aq_desc, buff, 1432 buff_size, &cmd_details); 1433 if (status) { 1434 i40e_debug(hw, I40E_DEBUG_NVM, 1435 "%s err %pe aq_err %s\n", 1436 __func__, ERR_PTR(status), 1437 i40e_aq_str(hw, hw->aq.asq_last_status)); 1438 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); 1439 return status; 1440 } 1441 1442 /* should we wait for a followup event? */ 1443 if (cmd->offset) { 1444 hw->nvm_wait_opcode = cmd->offset; 1445 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 1446 } 1447 1448 return status; 1449 } 1450 1451 /** 1452 * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq 1453 * @hw: pointer to hardware structure 1454 * @cmd: pointer to nvm update command buffer 1455 * @bytes: pointer to the data buffer 1456 * @perrno: pointer to return error code 1457 * 1458 * cmd structure contains identifiers and data buffer 1459 **/ 1460 static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw, 1461 struct i40e_nvm_access *cmd, 1462 u8 *bytes, int *perrno) 1463 { 1464 u32 aq_total_len; 1465 u32 aq_desc_len; 1466 int remainder; 1467 u8 *buff; 1468 1469 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); 1470 1471 aq_desc_len = sizeof(struct i40e_aq_desc); 1472 aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen); 1473 1474 /* check offset range */ 1475 if (cmd->offset > aq_total_len) { 1476 i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n", 1477 __func__, cmd->offset, aq_total_len); 1478 *perrno = -EINVAL; 1479 return -EINVAL; 1480 } 1481 1482 /* check copylength range */ 1483 if (cmd->data_size > (aq_total_len - cmd->offset)) { 1484 int new_len = aq_total_len - cmd->offset; 1485 1486 i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n", 1487 __func__, cmd->data_size, new_len); 1488 cmd->data_size = new_len; 1489 } 1490 1491 remainder = cmd->data_size; 1492 if (cmd->offset < aq_desc_len) { 1493 u32 len = aq_desc_len - cmd->offset; 1494 1495 len = min(len, cmd->data_size); 1496 i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n", 1497 __func__, cmd->offset, cmd->offset + len); 1498 1499 buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset; 1500 memcpy(bytes, buff, len); 1501 1502 bytes += len; 1503 remainder -= len; 1504 buff = hw->nvm_buff.va; 1505 } else { 1506 buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len); 1507 } 1508 1509 if (remainder > 0) { 1510 int start_byte = buff - (u8 *)hw->nvm_buff.va; 1511 1512 i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n", 1513 __func__, start_byte, start_byte + remainder); 1514 memcpy(bytes, buff, remainder); 1515 } 1516 1517 return 0; 1518 } 1519 1520 /** 1521 * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq 1522 * @hw: pointer to hardware structure 1523 * @cmd: pointer to nvm update command buffer 1524 * @bytes: pointer to the data buffer 1525 * @perrno: pointer to return error code 1526 * 1527 * cmd structure contains identifiers and data buffer 1528 **/ 1529 static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw, 1530 struct i40e_nvm_access *cmd, 1531 u8 *bytes, int *perrno) 1532 { 1533 u32 aq_total_len; 1534 u32 aq_desc_len; 1535 1536 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); 1537 1538 aq_desc_len = sizeof(struct i40e_aq_desc); 1539 aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen); 1540 1541 /* check copylength range */ 1542 if (cmd->data_size > aq_total_len) { 1543 i40e_debug(hw, I40E_DEBUG_NVM, 1544 "%s: copy length %d too big, trimming to %d\n", 1545 __func__, cmd->data_size, aq_total_len); 1546 cmd->data_size = aq_total_len; 1547 } 1548 1549 memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size); 1550 1551 return 0; 1552 } 1553 1554 /** 1555 * i40e_nvmupd_nvm_read - Read NVM 1556 * @hw: pointer to hardware structure 1557 * @cmd: pointer to nvm update command buffer 1558 * @bytes: pointer to the data buffer 1559 * @perrno: pointer to return error code 1560 * 1561 * cmd structure contains identifiers and data buffer 1562 **/ 1563 static int i40e_nvmupd_nvm_read(struct i40e_hw *hw, 1564 struct i40e_nvm_access *cmd, 1565 u8 *bytes, int *perrno) 1566 { 1567 struct i40e_asq_cmd_details cmd_details; 1568 u8 module, transaction; 1569 int status; 1570 bool last; 1571 1572 transaction = i40e_nvmupd_get_transaction(cmd->config); 1573 module = i40e_nvmupd_get_module(cmd->config); 1574 last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA); 1575 1576 memset(&cmd_details, 0, sizeof(cmd_details)); 1577 cmd_details.wb_desc = &hw->nvm_wb_desc; 1578 1579 status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size, 1580 bytes, last, &cmd_details); 1581 if (status) { 1582 i40e_debug(hw, I40E_DEBUG_NVM, 1583 "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n", 1584 module, cmd->offset, cmd->data_size); 1585 i40e_debug(hw, I40E_DEBUG_NVM, 1586 "i40e_nvmupd_nvm_read status %d aq %d\n", 1587 status, hw->aq.asq_last_status); 1588 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); 1589 } 1590 1591 return status; 1592 } 1593 1594 /** 1595 * i40e_nvmupd_nvm_erase - Erase an NVM module 1596 * @hw: pointer to hardware structure 1597 * @cmd: pointer to nvm update command buffer 1598 * @perrno: pointer to return error code 1599 * 1600 * module, offset, data_size and data are in cmd structure 1601 **/ 1602 static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw, 1603 struct i40e_nvm_access *cmd, 1604 int *perrno) 1605 { 1606 struct i40e_asq_cmd_details cmd_details; 1607 u8 module, transaction; 1608 int status = 0; 1609 bool last; 1610 1611 transaction = i40e_nvmupd_get_transaction(cmd->config); 1612 module = i40e_nvmupd_get_module(cmd->config); 1613 last = (transaction & I40E_NVM_LCB); 1614 1615 memset(&cmd_details, 0, sizeof(cmd_details)); 1616 cmd_details.wb_desc = &hw->nvm_wb_desc; 1617 1618 status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size, 1619 last, &cmd_details); 1620 if (status) { 1621 i40e_debug(hw, I40E_DEBUG_NVM, 1622 "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n", 1623 module, cmd->offset, cmd->data_size); 1624 i40e_debug(hw, I40E_DEBUG_NVM, 1625 "i40e_nvmupd_nvm_erase status %d aq %d\n", 1626 status, hw->aq.asq_last_status); 1627 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); 1628 } 1629 1630 return status; 1631 } 1632 1633 /** 1634 * i40e_nvmupd_nvm_write - Write NVM 1635 * @hw: pointer to hardware structure 1636 * @cmd: pointer to nvm update command buffer 1637 * @bytes: pointer to the data buffer 1638 * @perrno: pointer to return error code 1639 * 1640 * module, offset, data_size and data are in cmd structure 1641 **/ 1642 static int i40e_nvmupd_nvm_write(struct i40e_hw *hw, 1643 struct i40e_nvm_access *cmd, 1644 u8 *bytes, int *perrno) 1645 { 1646 struct i40e_asq_cmd_details cmd_details; 1647 u8 module, transaction; 1648 u8 preservation_flags; 1649 int status = 0; 1650 bool last; 1651 1652 transaction = i40e_nvmupd_get_transaction(cmd->config); 1653 module = i40e_nvmupd_get_module(cmd->config); 1654 last = (transaction & I40E_NVM_LCB); 1655 preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config); 1656 1657 memset(&cmd_details, 0, sizeof(cmd_details)); 1658 cmd_details.wb_desc = &hw->nvm_wb_desc; 1659 1660 status = i40e_aq_update_nvm(hw, module, cmd->offset, 1661 (u16)cmd->data_size, bytes, last, 1662 preservation_flags, &cmd_details); 1663 if (status) { 1664 i40e_debug(hw, I40E_DEBUG_NVM, 1665 "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", 1666 module, cmd->offset, cmd->data_size); 1667 i40e_debug(hw, I40E_DEBUG_NVM, 1668 "i40e_nvmupd_nvm_write status %d aq %d\n", 1669 status, hw->aq.asq_last_status); 1670 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); 1671 } 1672 1673 return status; 1674 } 1675