1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2014 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #include "i40e_prototype.h" 28 29 /** 30 * i40e_init_nvm_ops - Initialize NVM function pointers 31 * @hw: pointer to the HW structure 32 * 33 * Setup the function pointers and the NVM info structure. Should be called 34 * once per NVM initialization, e.g. inside the i40e_init_shared_code(). 35 * Please notice that the NVM term is used here (& in all methods covered 36 * in this file) as an equivalent of the FLASH part mapped into the SR. 37 * We are accessing FLASH always thru the Shadow RAM. 38 **/ 39 i40e_status i40e_init_nvm(struct i40e_hw *hw) 40 { 41 struct i40e_nvm_info *nvm = &hw->nvm; 42 i40e_status ret_code = 0; 43 u32 fla, gens; 44 u8 sr_size; 45 46 /* The SR size is stored regardless of the nvm programming mode 47 * as the blank mode may be used in the factory line. 48 */ 49 gens = rd32(hw, I40E_GLNVM_GENS); 50 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >> 51 I40E_GLNVM_GENS_SR_SIZE_SHIFT); 52 /* Switching to words (sr_size contains power of 2KB) */ 53 nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB; 54 55 /* Check if we are in the normal or blank NVM programming mode */ 56 fla = rd32(hw, I40E_GLNVM_FLA); 57 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */ 58 /* Max NVM timeout */ 59 nvm->timeout = I40E_MAX_NVM_TIMEOUT; 60 nvm->blank_nvm_mode = false; 61 } else { /* Blank programming mode */ 62 nvm->blank_nvm_mode = true; 63 ret_code = I40E_ERR_NVM_BLANK_MODE; 64 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n"); 65 } 66 67 return ret_code; 68 } 69 70 /** 71 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership 72 * @hw: pointer to the HW structure 73 * @access: NVM access type (read or write) 74 * 75 * This function will request NVM ownership for reading 76 * via the proper Admin Command. 77 **/ 78 i40e_status i40e_acquire_nvm(struct i40e_hw *hw, 79 enum i40e_aq_resource_access_type access) 80 { 81 i40e_status ret_code = 0; 82 u64 gtime, timeout; 83 u64 time_left = 0; 84 85 if (hw->nvm.blank_nvm_mode) 86 goto i40e_i40e_acquire_nvm_exit; 87 88 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access, 89 0, &time_left, NULL); 90 /* Reading the Global Device Timer */ 91 gtime = rd32(hw, I40E_GLVFGEN_TIMER); 92 93 /* Store the timeout */ 94 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime; 95 96 if (ret_code) 97 i40e_debug(hw, I40E_DEBUG_NVM, 98 "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n", 99 access, time_left, ret_code, hw->aq.asq_last_status); 100 101 if (ret_code && time_left) { 102 /* Poll until the current NVM owner timeouts */ 103 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime; 104 while ((gtime < timeout) && time_left) { 105 usleep_range(10000, 20000); 106 gtime = rd32(hw, I40E_GLVFGEN_TIMER); 107 ret_code = i40e_aq_request_resource(hw, 108 I40E_NVM_RESOURCE_ID, 109 access, 0, &time_left, 110 NULL); 111 if (!ret_code) { 112 hw->nvm.hw_semaphore_timeout = 113 I40E_MS_TO_GTIME(time_left) + gtime; 114 break; 115 } 116 } 117 if (ret_code) { 118 hw->nvm.hw_semaphore_timeout = 0; 119 i40e_debug(hw, I40E_DEBUG_NVM, 120 "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n", 121 time_left, ret_code, hw->aq.asq_last_status); 122 } 123 } 124 125 i40e_i40e_acquire_nvm_exit: 126 return ret_code; 127 } 128 129 /** 130 * i40e_release_nvm - Generic request for releasing the NVM ownership 131 * @hw: pointer to the HW structure 132 * 133 * This function will release NVM resource via the proper Admin Command. 134 **/ 135 void i40e_release_nvm(struct i40e_hw *hw) 136 { 137 i40e_status ret_code = I40E_SUCCESS; 138 u32 total_delay = 0; 139 140 if (hw->nvm.blank_nvm_mode) 141 return; 142 143 ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); 144 145 /* there are some rare cases when trying to release the resource 146 * results in an admin Q timeout, so handle them correctly 147 */ 148 while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) && 149 (total_delay < hw->aq.asq_cmd_timeout)) { 150 usleep_range(1000, 2000); 151 ret_code = i40e_aq_release_resource(hw, 152 I40E_NVM_RESOURCE_ID, 153 0, NULL); 154 total_delay++; 155 } 156 } 157 158 /** 159 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit 160 * @hw: pointer to the HW structure 161 * 162 * Polls the SRCTL Shadow RAM register done bit. 163 **/ 164 static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) 165 { 166 i40e_status ret_code = I40E_ERR_TIMEOUT; 167 u32 srctl, wait_cnt; 168 169 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */ 170 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) { 171 srctl = rd32(hw, I40E_GLNVM_SRCTL); 172 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) { 173 ret_code = 0; 174 break; 175 } 176 udelay(5); 177 } 178 if (ret_code == I40E_ERR_TIMEOUT) 179 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set"); 180 return ret_code; 181 } 182 183 /** 184 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register 185 * @hw: pointer to the HW structure 186 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 187 * @data: word read from the Shadow RAM 188 * 189 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. 190 **/ 191 static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, 192 u16 *data) 193 { 194 i40e_status ret_code = I40E_ERR_TIMEOUT; 195 u32 sr_reg; 196 197 if (offset >= hw->nvm.sr_size) { 198 i40e_debug(hw, I40E_DEBUG_NVM, 199 "NVM read error: offset %d beyond Shadow RAM limit %d\n", 200 offset, hw->nvm.sr_size); 201 ret_code = I40E_ERR_PARAM; 202 goto read_nvm_exit; 203 } 204 205 /* Poll the done bit first */ 206 ret_code = i40e_poll_sr_srctl_done_bit(hw); 207 if (!ret_code) { 208 /* Write the address and start reading */ 209 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | 210 BIT(I40E_GLNVM_SRCTL_START_SHIFT); 211 wr32(hw, I40E_GLNVM_SRCTL, sr_reg); 212 213 /* Poll I40E_GLNVM_SRCTL until the done bit is set */ 214 ret_code = i40e_poll_sr_srctl_done_bit(hw); 215 if (!ret_code) { 216 sr_reg = rd32(hw, I40E_GLNVM_SRDATA); 217 *data = (u16)((sr_reg & 218 I40E_GLNVM_SRDATA_RDDATA_MASK) 219 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT); 220 } 221 } 222 if (ret_code) 223 i40e_debug(hw, I40E_DEBUG_NVM, 224 "NVM read error: Couldn't access Shadow RAM address: 0x%x\n", 225 offset); 226 227 read_nvm_exit: 228 return ret_code; 229 } 230 231 /** 232 * i40e_read_nvm_aq - Read Shadow RAM. 233 * @hw: pointer to the HW structure. 234 * @module_pointer: module pointer location in words from the NVM beginning 235 * @offset: offset in words from module start 236 * @words: number of words to write 237 * @data: buffer with words to write to the Shadow RAM 238 * @last_command: tells the AdminQ that this is the last command 239 * 240 * Writes a 16 bit words buffer to the Shadow RAM using the admin command. 241 **/ 242 static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, 243 u8 module_pointer, u32 offset, 244 u16 words, void *data, 245 bool last_command) 246 { 247 i40e_status ret_code = I40E_ERR_NVM; 248 struct i40e_asq_cmd_details cmd_details; 249 250 memset(&cmd_details, 0, sizeof(cmd_details)); 251 cmd_details.wb_desc = &hw->nvm_wb_desc; 252 253 /* Here we are checking the SR limit only for the flat memory model. 254 * We cannot do it for the module-based model, as we did not acquire 255 * the NVM resource yet (we cannot get the module pointer value). 256 * Firmware will check the module-based model. 257 */ 258 if ((offset + words) > hw->nvm.sr_size) 259 i40e_debug(hw, I40E_DEBUG_NVM, 260 "NVM write error: offset %d beyond Shadow RAM limit %d\n", 261 (offset + words), hw->nvm.sr_size); 262 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) 263 /* We can write only up to 4KB (one sector), in one AQ write */ 264 i40e_debug(hw, I40E_DEBUG_NVM, 265 "NVM write fail error: tried to write %d words, limit is %d.\n", 266 words, I40E_SR_SECTOR_SIZE_IN_WORDS); 267 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) 268 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) 269 /* A single write cannot spread over two sectors */ 270 i40e_debug(hw, I40E_DEBUG_NVM, 271 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", 272 offset, words); 273 else 274 ret_code = i40e_aq_read_nvm(hw, module_pointer, 275 2 * offset, /*bytes*/ 276 2 * words, /*bytes*/ 277 data, last_command, &cmd_details); 278 279 return ret_code; 280 } 281 282 /** 283 * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ 284 * @hw: pointer to the HW structure 285 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 286 * @data: word read from the Shadow RAM 287 * 288 * Reads one 16 bit word from the Shadow RAM using the AdminQ 289 **/ 290 static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, 291 u16 *data) 292 { 293 i40e_status ret_code = I40E_ERR_TIMEOUT; 294 295 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true); 296 *data = le16_to_cpu(*(__le16 *)data); 297 298 return ret_code; 299 } 300 301 /** 302 * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking 303 * @hw: pointer to the HW structure 304 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 305 * @data: word read from the Shadow RAM 306 * 307 * Reads one 16 bit word from the Shadow RAM. 308 * 309 * Do not use this function except in cases where the nvm lock is already 310 * taken via i40e_acquire_nvm(). 311 **/ 312 static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw, 313 u16 offset, u16 *data) 314 { 315 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) 316 return i40e_read_nvm_word_aq(hw, offset, data); 317 318 return i40e_read_nvm_word_srctl(hw, offset, data); 319 } 320 321 /** 322 * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary 323 * @hw: pointer to the HW structure 324 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 325 * @data: word read from the Shadow RAM 326 * 327 * Reads one 16 bit word from the Shadow RAM. 328 **/ 329 i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, 330 u16 *data) 331 { 332 i40e_status ret_code = 0; 333 334 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) 335 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 336 if (ret_code) 337 return ret_code; 338 339 ret_code = __i40e_read_nvm_word(hw, offset, data); 340 341 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) 342 i40e_release_nvm(hw); 343 344 return ret_code; 345 } 346 347 /** 348 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register 349 * @hw: pointer to the HW structure 350 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). 351 * @words: (in) number of words to read; (out) number of words actually read 352 * @data: words read from the Shadow RAM 353 * 354 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() 355 * method. The buffer read is preceded by the NVM ownership take 356 * and followed by the release. 357 **/ 358 static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, 359 u16 *words, u16 *data) 360 { 361 i40e_status ret_code = 0; 362 u16 index, word; 363 364 /* Loop thru the selected region */ 365 for (word = 0; word < *words; word++) { 366 index = offset + word; 367 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]); 368 if (ret_code) 369 break; 370 } 371 372 /* Update the number of words read from the Shadow RAM */ 373 *words = word; 374 375 return ret_code; 376 } 377 378 /** 379 * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ 380 * @hw: pointer to the HW structure 381 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). 382 * @words: (in) number of words to read; (out) number of words actually read 383 * @data: words read from the Shadow RAM 384 * 385 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq() 386 * method. The buffer read is preceded by the NVM ownership take 387 * and followed by the release. 388 **/ 389 static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, 390 u16 *words, u16 *data) 391 { 392 i40e_status ret_code; 393 u16 read_size; 394 bool last_cmd = false; 395 u16 words_read = 0; 396 u16 i = 0; 397 398 do { 399 /* Calculate number of bytes we should read in this step. 400 * FVL AQ do not allow to read more than one page at a time or 401 * to cross page boundaries. 402 */ 403 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS) 404 read_size = min(*words, 405 (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS - 406 (offset % I40E_SR_SECTOR_SIZE_IN_WORDS))); 407 else 408 read_size = min((*words - words_read), 409 I40E_SR_SECTOR_SIZE_IN_WORDS); 410 411 /* Check if this is last command, if so set proper flag */ 412 if ((words_read + read_size) >= *words) 413 last_cmd = true; 414 415 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size, 416 data + words_read, last_cmd); 417 if (ret_code) 418 goto read_nvm_buffer_aq_exit; 419 420 /* Increment counter for words already read and move offset to 421 * new read location 422 */ 423 words_read += read_size; 424 offset += read_size; 425 } while (words_read < *words); 426 427 for (i = 0; i < *words; i++) 428 data[i] = le16_to_cpu(((__le16 *)data)[i]); 429 430 read_nvm_buffer_aq_exit: 431 *words = words_read; 432 return ret_code; 433 } 434 435 /** 436 * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock 437 * @hw: pointer to the HW structure 438 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). 439 * @words: (in) number of words to read; (out) number of words actually read 440 * @data: words read from the Shadow RAM 441 * 442 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() 443 * method. 444 **/ 445 static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw, 446 u16 offset, u16 *words, 447 u16 *data) 448 { 449 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) 450 return i40e_read_nvm_buffer_aq(hw, offset, words, data); 451 452 return i40e_read_nvm_buffer_srctl(hw, offset, words, data); 453 } 454 455 /** 456 * i40e_write_nvm_aq - Writes Shadow RAM. 457 * @hw: pointer to the HW structure. 458 * @module_pointer: module pointer location in words from the NVM beginning 459 * @offset: offset in words from module start 460 * @words: number of words to write 461 * @data: buffer with words to write to the Shadow RAM 462 * @last_command: tells the AdminQ that this is the last command 463 * 464 * Writes a 16 bit words buffer to the Shadow RAM using the admin command. 465 **/ 466 static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, 467 u32 offset, u16 words, void *data, 468 bool last_command) 469 { 470 i40e_status ret_code = I40E_ERR_NVM; 471 struct i40e_asq_cmd_details cmd_details; 472 473 memset(&cmd_details, 0, sizeof(cmd_details)); 474 cmd_details.wb_desc = &hw->nvm_wb_desc; 475 476 /* Here we are checking the SR limit only for the flat memory model. 477 * We cannot do it for the module-based model, as we did not acquire 478 * the NVM resource yet (we cannot get the module pointer value). 479 * Firmware will check the module-based model. 480 */ 481 if ((offset + words) > hw->nvm.sr_size) 482 i40e_debug(hw, I40E_DEBUG_NVM, 483 "NVM write error: offset %d beyond Shadow RAM limit %d\n", 484 (offset + words), hw->nvm.sr_size); 485 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) 486 /* We can write only up to 4KB (one sector), in one AQ write */ 487 i40e_debug(hw, I40E_DEBUG_NVM, 488 "NVM write fail error: tried to write %d words, limit is %d.\n", 489 words, I40E_SR_SECTOR_SIZE_IN_WORDS); 490 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) 491 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) 492 /* A single write cannot spread over two sectors */ 493 i40e_debug(hw, I40E_DEBUG_NVM, 494 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", 495 offset, words); 496 else 497 ret_code = i40e_aq_update_nvm(hw, module_pointer, 498 2 * offset, /*bytes*/ 499 2 * words, /*bytes*/ 500 data, last_command, 0, 501 &cmd_details); 502 503 return ret_code; 504 } 505 506 /** 507 * i40e_calc_nvm_checksum - Calculates and returns the checksum 508 * @hw: pointer to hardware structure 509 * @checksum: pointer to the checksum 510 * 511 * This function calculates SW Checksum that covers the whole 64kB shadow RAM 512 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD 513 * is customer specific and unknown. Therefore, this function skips all maximum 514 * possible size of VPD (1kB). 515 **/ 516 static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, 517 u16 *checksum) 518 { 519 i40e_status ret_code; 520 struct i40e_virt_mem vmem; 521 u16 pcie_alt_module = 0; 522 u16 checksum_local = 0; 523 u16 vpd_module = 0; 524 u16 *data; 525 u16 i = 0; 526 527 ret_code = i40e_allocate_virt_mem(hw, &vmem, 528 I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16)); 529 if (ret_code) 530 goto i40e_calc_nvm_checksum_exit; 531 data = (u16 *)vmem.va; 532 533 /* read pointer to VPD area */ 534 ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); 535 if (ret_code) { 536 ret_code = I40E_ERR_NVM_CHECKSUM; 537 goto i40e_calc_nvm_checksum_exit; 538 } 539 540 /* read pointer to PCIe Alt Auto-load module */ 541 ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, 542 &pcie_alt_module); 543 if (ret_code) { 544 ret_code = I40E_ERR_NVM_CHECKSUM; 545 goto i40e_calc_nvm_checksum_exit; 546 } 547 548 /* Calculate SW checksum that covers the whole 64kB shadow RAM 549 * except the VPD and PCIe ALT Auto-load modules 550 */ 551 for (i = 0; i < hw->nvm.sr_size; i++) { 552 /* Read SR page */ 553 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) { 554 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS; 555 556 ret_code = __i40e_read_nvm_buffer(hw, i, &words, data); 557 if (ret_code) { 558 ret_code = I40E_ERR_NVM_CHECKSUM; 559 goto i40e_calc_nvm_checksum_exit; 560 } 561 } 562 563 /* Skip Checksum word */ 564 if (i == I40E_SR_SW_CHECKSUM_WORD) 565 continue; 566 /* Skip VPD module (convert byte size to word count) */ 567 if ((i >= (u32)vpd_module) && 568 (i < ((u32)vpd_module + 569 (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) { 570 continue; 571 } 572 /* Skip PCIe ALT module (convert byte size to word count) */ 573 if ((i >= (u32)pcie_alt_module) && 574 (i < ((u32)pcie_alt_module + 575 (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) { 576 continue; 577 } 578 579 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS]; 580 } 581 582 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local; 583 584 i40e_calc_nvm_checksum_exit: 585 i40e_free_virt_mem(hw, &vmem); 586 return ret_code; 587 } 588 589 /** 590 * i40e_update_nvm_checksum - Updates the NVM checksum 591 * @hw: pointer to hardware structure 592 * 593 * NVM ownership must be acquired before calling this function and released 594 * on ARQ completion event reception by caller. 595 * This function will commit SR to NVM. 596 **/ 597 i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw) 598 { 599 i40e_status ret_code; 600 u16 checksum; 601 __le16 le_sum; 602 603 ret_code = i40e_calc_nvm_checksum(hw, &checksum); 604 if (!ret_code) { 605 le_sum = cpu_to_le16(checksum); 606 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD, 607 1, &le_sum, true); 608 } 609 610 return ret_code; 611 } 612 613 /** 614 * i40e_validate_nvm_checksum - Validate EEPROM checksum 615 * @hw: pointer to hardware structure 616 * @checksum: calculated checksum 617 * 618 * Performs checksum calculation and validates the NVM SW checksum. If the 619 * caller does not need checksum, the value can be NULL. 620 **/ 621 i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, 622 u16 *checksum) 623 { 624 i40e_status ret_code = 0; 625 u16 checksum_sr = 0; 626 u16 checksum_local = 0; 627 628 /* We must acquire the NVM lock in order to correctly synchronize the 629 * NVM accesses across multiple PFs. Without doing so it is possible 630 * for one of the PFs to read invalid data potentially indicating that 631 * the checksum is invalid. 632 */ 633 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 634 if (ret_code) 635 return ret_code; 636 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local); 637 __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr); 638 i40e_release_nvm(hw); 639 if (ret_code) 640 return ret_code; 641 642 /* Verify read checksum from EEPROM is the same as 643 * calculated checksum 644 */ 645 if (checksum_local != checksum_sr) 646 ret_code = I40E_ERR_NVM_CHECKSUM; 647 648 /* If the user cares, return the calculated checksum */ 649 if (checksum) 650 *checksum = checksum_local; 651 652 return ret_code; 653 } 654 655 static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, 656 struct i40e_nvm_access *cmd, 657 u8 *bytes, int *perrno); 658 static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, 659 struct i40e_nvm_access *cmd, 660 u8 *bytes, int *perrno); 661 static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, 662 struct i40e_nvm_access *cmd, 663 u8 *bytes, int *errno); 664 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, 665 struct i40e_nvm_access *cmd, 666 int *perrno); 667 static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, 668 struct i40e_nvm_access *cmd, 669 int *perrno); 670 static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, 671 struct i40e_nvm_access *cmd, 672 u8 *bytes, int *perrno); 673 static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, 674 struct i40e_nvm_access *cmd, 675 u8 *bytes, int *perrno); 676 static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, 677 struct i40e_nvm_access *cmd, 678 u8 *bytes, int *perrno); 679 static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw, 680 struct i40e_nvm_access *cmd, 681 u8 *bytes, int *perrno); 682 static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw, 683 struct i40e_nvm_access *cmd, 684 u8 *bytes, int *perrno); 685 static inline u8 i40e_nvmupd_get_module(u32 val) 686 { 687 return (u8)(val & I40E_NVM_MOD_PNT_MASK); 688 } 689 static inline u8 i40e_nvmupd_get_transaction(u32 val) 690 { 691 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT); 692 } 693 694 static inline u8 i40e_nvmupd_get_preservation_flags(u32 val) 695 { 696 return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >> 697 I40E_NVM_PRESERVATION_FLAGS_SHIFT); 698 } 699 700 static const char * const i40e_nvm_update_state_str[] = { 701 "I40E_NVMUPD_INVALID", 702 "I40E_NVMUPD_READ_CON", 703 "I40E_NVMUPD_READ_SNT", 704 "I40E_NVMUPD_READ_LCB", 705 "I40E_NVMUPD_READ_SA", 706 "I40E_NVMUPD_WRITE_ERA", 707 "I40E_NVMUPD_WRITE_CON", 708 "I40E_NVMUPD_WRITE_SNT", 709 "I40E_NVMUPD_WRITE_LCB", 710 "I40E_NVMUPD_WRITE_SA", 711 "I40E_NVMUPD_CSUM_CON", 712 "I40E_NVMUPD_CSUM_SA", 713 "I40E_NVMUPD_CSUM_LCB", 714 "I40E_NVMUPD_STATUS", 715 "I40E_NVMUPD_EXEC_AQ", 716 "I40E_NVMUPD_GET_AQ_RESULT", 717 "I40E_NVMUPD_GET_AQ_EVENT", 718 }; 719 720 /** 721 * i40e_nvmupd_command - Process an NVM update command 722 * @hw: pointer to hardware structure 723 * @cmd: pointer to nvm update command 724 * @bytes: pointer to the data buffer 725 * @perrno: pointer to return error code 726 * 727 * Dispatches command depending on what update state is current 728 **/ 729 i40e_status i40e_nvmupd_command(struct i40e_hw *hw, 730 struct i40e_nvm_access *cmd, 731 u8 *bytes, int *perrno) 732 { 733 i40e_status status; 734 enum i40e_nvmupd_cmd upd_cmd; 735 736 /* assume success */ 737 *perrno = 0; 738 739 /* early check for status command and debug msgs */ 740 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); 741 742 i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", 743 i40e_nvm_update_state_str[upd_cmd], 744 hw->nvmupd_state, 745 hw->nvm_release_on_done, hw->nvm_wait_opcode, 746 cmd->command, cmd->config, cmd->offset, cmd->data_size); 747 748 if (upd_cmd == I40E_NVMUPD_INVALID) { 749 *perrno = -EFAULT; 750 i40e_debug(hw, I40E_DEBUG_NVM, 751 "i40e_nvmupd_validate_command returns %d errno %d\n", 752 upd_cmd, *perrno); 753 } 754 755 /* a status request returns immediately rather than 756 * going into the state machine 757 */ 758 if (upd_cmd == I40E_NVMUPD_STATUS) { 759 if (!cmd->data_size) { 760 *perrno = -EFAULT; 761 return I40E_ERR_BUF_TOO_SHORT; 762 } 763 764 bytes[0] = hw->nvmupd_state; 765 766 if (cmd->data_size >= 4) { 767 bytes[1] = 0; 768 *((u16 *)&bytes[2]) = hw->nvm_wait_opcode; 769 } 770 771 /* Clear error status on read */ 772 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) 773 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 774 775 return 0; 776 } 777 778 /* Clear status even it is not read and log */ 779 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) { 780 i40e_debug(hw, I40E_DEBUG_NVM, 781 "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n"); 782 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 783 } 784 785 /* Acquire lock to prevent race condition where adminq_task 786 * can execute after i40e_nvmupd_nvm_read/write but before state 787 * variables (nvm_wait_opcode, nvm_release_on_done) are updated. 788 * 789 * During NVMUpdate, it is observed that lock could be held for 790 * ~5ms for most commands. However lock is held for ~60ms for 791 * NVMUPD_CSUM_LCB command. 792 */ 793 mutex_lock(&hw->aq.arq_mutex); 794 switch (hw->nvmupd_state) { 795 case I40E_NVMUPD_STATE_INIT: 796 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno); 797 break; 798 799 case I40E_NVMUPD_STATE_READING: 800 status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno); 801 break; 802 803 case I40E_NVMUPD_STATE_WRITING: 804 status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno); 805 break; 806 807 case I40E_NVMUPD_STATE_INIT_WAIT: 808 case I40E_NVMUPD_STATE_WRITE_WAIT: 809 /* if we need to stop waiting for an event, clear 810 * the wait info and return before doing anything else 811 */ 812 if (cmd->offset == 0xffff) { 813 i40e_nvmupd_clear_wait_state(hw); 814 status = 0; 815 break; 816 } 817 818 status = I40E_ERR_NOT_READY; 819 *perrno = -EBUSY; 820 break; 821 822 default: 823 /* invalid state, should never happen */ 824 i40e_debug(hw, I40E_DEBUG_NVM, 825 "NVMUPD: no such state %d\n", hw->nvmupd_state); 826 status = I40E_NOT_SUPPORTED; 827 *perrno = -ESRCH; 828 break; 829 } 830 831 mutex_unlock(&hw->aq.arq_mutex); 832 return status; 833 } 834 835 /** 836 * i40e_nvmupd_state_init - Handle NVM update state Init 837 * @hw: pointer to hardware structure 838 * @cmd: pointer to nvm update command buffer 839 * @bytes: pointer to the data buffer 840 * @perrno: pointer to return error code 841 * 842 * Process legitimate commands of the Init state and conditionally set next 843 * state. Reject all other commands. 844 **/ 845 static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, 846 struct i40e_nvm_access *cmd, 847 u8 *bytes, int *perrno) 848 { 849 i40e_status status = 0; 850 enum i40e_nvmupd_cmd upd_cmd; 851 852 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); 853 854 switch (upd_cmd) { 855 case I40E_NVMUPD_READ_SA: 856 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 857 if (status) { 858 *perrno = i40e_aq_rc_to_posix(status, 859 hw->aq.asq_last_status); 860 } else { 861 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); 862 i40e_release_nvm(hw); 863 } 864 break; 865 866 case I40E_NVMUPD_READ_SNT: 867 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 868 if (status) { 869 *perrno = i40e_aq_rc_to_posix(status, 870 hw->aq.asq_last_status); 871 } else { 872 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); 873 if (status) 874 i40e_release_nvm(hw); 875 else 876 hw->nvmupd_state = I40E_NVMUPD_STATE_READING; 877 } 878 break; 879 880 case I40E_NVMUPD_WRITE_ERA: 881 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 882 if (status) { 883 *perrno = i40e_aq_rc_to_posix(status, 884 hw->aq.asq_last_status); 885 } else { 886 status = i40e_nvmupd_nvm_erase(hw, cmd, perrno); 887 if (status) { 888 i40e_release_nvm(hw); 889 } else { 890 hw->nvm_release_on_done = true; 891 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase; 892 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 893 } 894 } 895 break; 896 897 case I40E_NVMUPD_WRITE_SA: 898 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 899 if (status) { 900 *perrno = i40e_aq_rc_to_posix(status, 901 hw->aq.asq_last_status); 902 } else { 903 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); 904 if (status) { 905 i40e_release_nvm(hw); 906 } else { 907 hw->nvm_release_on_done = true; 908 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 909 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 910 } 911 } 912 break; 913 914 case I40E_NVMUPD_WRITE_SNT: 915 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 916 if (status) { 917 *perrno = i40e_aq_rc_to_posix(status, 918 hw->aq.asq_last_status); 919 } else { 920 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); 921 if (status) { 922 i40e_release_nvm(hw); 923 } else { 924 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 925 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; 926 } 927 } 928 break; 929 930 case I40E_NVMUPD_CSUM_SA: 931 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 932 if (status) { 933 *perrno = i40e_aq_rc_to_posix(status, 934 hw->aq.asq_last_status); 935 } else { 936 status = i40e_update_nvm_checksum(hw); 937 if (status) { 938 *perrno = hw->aq.asq_last_status ? 939 i40e_aq_rc_to_posix(status, 940 hw->aq.asq_last_status) : 941 -EIO; 942 i40e_release_nvm(hw); 943 } else { 944 hw->nvm_release_on_done = true; 945 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 946 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 947 } 948 } 949 break; 950 951 case I40E_NVMUPD_EXEC_AQ: 952 status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno); 953 break; 954 955 case I40E_NVMUPD_GET_AQ_RESULT: 956 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno); 957 break; 958 959 case I40E_NVMUPD_GET_AQ_EVENT: 960 status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno); 961 break; 962 963 default: 964 i40e_debug(hw, I40E_DEBUG_NVM, 965 "NVMUPD: bad cmd %s in init state\n", 966 i40e_nvm_update_state_str[upd_cmd]); 967 status = I40E_ERR_NVM; 968 *perrno = -ESRCH; 969 break; 970 } 971 return status; 972 } 973 974 /** 975 * i40e_nvmupd_state_reading - Handle NVM update state Reading 976 * @hw: pointer to hardware structure 977 * @cmd: pointer to nvm update command buffer 978 * @bytes: pointer to the data buffer 979 * @perrno: pointer to return error code 980 * 981 * NVM ownership is already held. Process legitimate commands and set any 982 * change in state; reject all other commands. 983 **/ 984 static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, 985 struct i40e_nvm_access *cmd, 986 u8 *bytes, int *perrno) 987 { 988 i40e_status status = 0; 989 enum i40e_nvmupd_cmd upd_cmd; 990 991 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); 992 993 switch (upd_cmd) { 994 case I40E_NVMUPD_READ_SA: 995 case I40E_NVMUPD_READ_CON: 996 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); 997 break; 998 999 case I40E_NVMUPD_READ_LCB: 1000 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); 1001 i40e_release_nvm(hw); 1002 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1003 break; 1004 1005 default: 1006 i40e_debug(hw, I40E_DEBUG_NVM, 1007 "NVMUPD: bad cmd %s in reading state.\n", 1008 i40e_nvm_update_state_str[upd_cmd]); 1009 status = I40E_NOT_SUPPORTED; 1010 *perrno = -ESRCH; 1011 break; 1012 } 1013 return status; 1014 } 1015 1016 /** 1017 * i40e_nvmupd_state_writing - Handle NVM update state Writing 1018 * @hw: pointer to hardware structure 1019 * @cmd: pointer to nvm update command buffer 1020 * @bytes: pointer to the data buffer 1021 * @perrno: pointer to return error code 1022 * 1023 * NVM ownership is already held. Process legitimate commands and set any 1024 * change in state; reject all other commands 1025 **/ 1026 static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, 1027 struct i40e_nvm_access *cmd, 1028 u8 *bytes, int *perrno) 1029 { 1030 i40e_status status = 0; 1031 enum i40e_nvmupd_cmd upd_cmd; 1032 bool retry_attempt = false; 1033 1034 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); 1035 1036 retry: 1037 switch (upd_cmd) { 1038 case I40E_NVMUPD_WRITE_CON: 1039 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); 1040 if (!status) { 1041 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 1042 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; 1043 } 1044 break; 1045 1046 case I40E_NVMUPD_WRITE_LCB: 1047 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); 1048 if (status) { 1049 *perrno = hw->aq.asq_last_status ? 1050 i40e_aq_rc_to_posix(status, 1051 hw->aq.asq_last_status) : 1052 -EIO; 1053 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1054 } else { 1055 hw->nvm_release_on_done = true; 1056 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 1057 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 1058 } 1059 break; 1060 1061 case I40E_NVMUPD_CSUM_CON: 1062 /* Assumes the caller has acquired the nvm */ 1063 status = i40e_update_nvm_checksum(hw); 1064 if (status) { 1065 *perrno = hw->aq.asq_last_status ? 1066 i40e_aq_rc_to_posix(status, 1067 hw->aq.asq_last_status) : 1068 -EIO; 1069 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1070 } else { 1071 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 1072 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; 1073 } 1074 break; 1075 1076 case I40E_NVMUPD_CSUM_LCB: 1077 /* Assumes the caller has acquired the nvm */ 1078 status = i40e_update_nvm_checksum(hw); 1079 if (status) { 1080 *perrno = hw->aq.asq_last_status ? 1081 i40e_aq_rc_to_posix(status, 1082 hw->aq.asq_last_status) : 1083 -EIO; 1084 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1085 } else { 1086 hw->nvm_release_on_done = true; 1087 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 1088 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 1089 } 1090 break; 1091 1092 default: 1093 i40e_debug(hw, I40E_DEBUG_NVM, 1094 "NVMUPD: bad cmd %s in writing state.\n", 1095 i40e_nvm_update_state_str[upd_cmd]); 1096 status = I40E_NOT_SUPPORTED; 1097 *perrno = -ESRCH; 1098 break; 1099 } 1100 1101 /* In some circumstances, a multi-write transaction takes longer 1102 * than the default 3 minute timeout on the write semaphore. If 1103 * the write failed with an EBUSY status, this is likely the problem, 1104 * so here we try to reacquire the semaphore then retry the write. 1105 * We only do one retry, then give up. 1106 */ 1107 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) && 1108 !retry_attempt) { 1109 i40e_status old_status = status; 1110 u32 old_asq_status = hw->aq.asq_last_status; 1111 u32 gtime; 1112 1113 gtime = rd32(hw, I40E_GLVFGEN_TIMER); 1114 if (gtime >= hw->nvm.hw_semaphore_timeout) { 1115 i40e_debug(hw, I40E_DEBUG_ALL, 1116 "NVMUPD: write semaphore expired (%d >= %lld), retrying\n", 1117 gtime, hw->nvm.hw_semaphore_timeout); 1118 i40e_release_nvm(hw); 1119 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 1120 if (status) { 1121 i40e_debug(hw, I40E_DEBUG_ALL, 1122 "NVMUPD: write semaphore reacquire failed aq_err = %d\n", 1123 hw->aq.asq_last_status); 1124 status = old_status; 1125 hw->aq.asq_last_status = old_asq_status; 1126 } else { 1127 retry_attempt = true; 1128 goto retry; 1129 } 1130 } 1131 } 1132 1133 return status; 1134 } 1135 1136 /** 1137 * i40e_nvmupd_clear_wait_state - clear wait state on hw 1138 * @hw: pointer to the hardware structure 1139 **/ 1140 void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw) 1141 { 1142 i40e_debug(hw, I40E_DEBUG_NVM, 1143 "NVMUPD: clearing wait on opcode 0x%04x\n", 1144 hw->nvm_wait_opcode); 1145 1146 if (hw->nvm_release_on_done) { 1147 i40e_release_nvm(hw); 1148 hw->nvm_release_on_done = false; 1149 } 1150 hw->nvm_wait_opcode = 0; 1151 1152 if (hw->aq.arq_last_status) { 1153 hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR; 1154 return; 1155 } 1156 1157 switch (hw->nvmupd_state) { 1158 case I40E_NVMUPD_STATE_INIT_WAIT: 1159 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1160 break; 1161 1162 case I40E_NVMUPD_STATE_WRITE_WAIT: 1163 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; 1164 break; 1165 1166 default: 1167 break; 1168 } 1169 } 1170 1171 /** 1172 * i40e_nvmupd_check_wait_event - handle NVM update operation events 1173 * @hw: pointer to the hardware structure 1174 * @opcode: the event that just happened 1175 **/ 1176 void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, 1177 struct i40e_aq_desc *desc) 1178 { 1179 u32 aq_desc_len = sizeof(struct i40e_aq_desc); 1180 1181 if (opcode == hw->nvm_wait_opcode) { 1182 memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len); 1183 i40e_nvmupd_clear_wait_state(hw); 1184 } 1185 } 1186 1187 /** 1188 * i40e_nvmupd_validate_command - Validate given command 1189 * @hw: pointer to hardware structure 1190 * @cmd: pointer to nvm update command buffer 1191 * @perrno: pointer to return error code 1192 * 1193 * Return one of the valid command types or I40E_NVMUPD_INVALID 1194 **/ 1195 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, 1196 struct i40e_nvm_access *cmd, 1197 int *perrno) 1198 { 1199 enum i40e_nvmupd_cmd upd_cmd; 1200 u8 module, transaction; 1201 1202 /* anything that doesn't match a recognized case is an error */ 1203 upd_cmd = I40E_NVMUPD_INVALID; 1204 1205 transaction = i40e_nvmupd_get_transaction(cmd->config); 1206 module = i40e_nvmupd_get_module(cmd->config); 1207 1208 /* limits on data size */ 1209 if ((cmd->data_size < 1) || 1210 (cmd->data_size > I40E_NVMUPD_MAX_DATA)) { 1211 i40e_debug(hw, I40E_DEBUG_NVM, 1212 "i40e_nvmupd_validate_command data_size %d\n", 1213 cmd->data_size); 1214 *perrno = -EFAULT; 1215 return I40E_NVMUPD_INVALID; 1216 } 1217 1218 switch (cmd->command) { 1219 case I40E_NVM_READ: 1220 switch (transaction) { 1221 case I40E_NVM_CON: 1222 upd_cmd = I40E_NVMUPD_READ_CON; 1223 break; 1224 case I40E_NVM_SNT: 1225 upd_cmd = I40E_NVMUPD_READ_SNT; 1226 break; 1227 case I40E_NVM_LCB: 1228 upd_cmd = I40E_NVMUPD_READ_LCB; 1229 break; 1230 case I40E_NVM_SA: 1231 upd_cmd = I40E_NVMUPD_READ_SA; 1232 break; 1233 case I40E_NVM_EXEC: 1234 if (module == 0xf) 1235 upd_cmd = I40E_NVMUPD_STATUS; 1236 else if (module == 0) 1237 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT; 1238 break; 1239 case I40E_NVM_AQE: 1240 upd_cmd = I40E_NVMUPD_GET_AQ_EVENT; 1241 break; 1242 } 1243 break; 1244 1245 case I40E_NVM_WRITE: 1246 switch (transaction) { 1247 case I40E_NVM_CON: 1248 upd_cmd = I40E_NVMUPD_WRITE_CON; 1249 break; 1250 case I40E_NVM_SNT: 1251 upd_cmd = I40E_NVMUPD_WRITE_SNT; 1252 break; 1253 case I40E_NVM_LCB: 1254 upd_cmd = I40E_NVMUPD_WRITE_LCB; 1255 break; 1256 case I40E_NVM_SA: 1257 upd_cmd = I40E_NVMUPD_WRITE_SA; 1258 break; 1259 case I40E_NVM_ERA: 1260 upd_cmd = I40E_NVMUPD_WRITE_ERA; 1261 break; 1262 case I40E_NVM_CSUM: 1263 upd_cmd = I40E_NVMUPD_CSUM_CON; 1264 break; 1265 case (I40E_NVM_CSUM|I40E_NVM_SA): 1266 upd_cmd = I40E_NVMUPD_CSUM_SA; 1267 break; 1268 case (I40E_NVM_CSUM|I40E_NVM_LCB): 1269 upd_cmd = I40E_NVMUPD_CSUM_LCB; 1270 break; 1271 case I40E_NVM_EXEC: 1272 if (module == 0) 1273 upd_cmd = I40E_NVMUPD_EXEC_AQ; 1274 break; 1275 } 1276 break; 1277 } 1278 1279 return upd_cmd; 1280 } 1281 1282 /** 1283 * i40e_nvmupd_exec_aq - Run an AQ command 1284 * @hw: pointer to hardware structure 1285 * @cmd: pointer to nvm update command buffer 1286 * @bytes: pointer to the data buffer 1287 * @perrno: pointer to return error code 1288 * 1289 * cmd structure contains identifiers and data buffer 1290 **/ 1291 static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, 1292 struct i40e_nvm_access *cmd, 1293 u8 *bytes, int *perrno) 1294 { 1295 struct i40e_asq_cmd_details cmd_details; 1296 i40e_status status; 1297 struct i40e_aq_desc *aq_desc; 1298 u32 buff_size = 0; 1299 u8 *buff = NULL; 1300 u32 aq_desc_len; 1301 u32 aq_data_len; 1302 1303 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); 1304 if (cmd->offset == 0xffff) 1305 return 0; 1306 1307 memset(&cmd_details, 0, sizeof(cmd_details)); 1308 cmd_details.wb_desc = &hw->nvm_wb_desc; 1309 1310 aq_desc_len = sizeof(struct i40e_aq_desc); 1311 memset(&hw->nvm_wb_desc, 0, aq_desc_len); 1312 1313 /* get the aq descriptor */ 1314 if (cmd->data_size < aq_desc_len) { 1315 i40e_debug(hw, I40E_DEBUG_NVM, 1316 "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n", 1317 cmd->data_size, aq_desc_len); 1318 *perrno = -EINVAL; 1319 return I40E_ERR_PARAM; 1320 } 1321 aq_desc = (struct i40e_aq_desc *)bytes; 1322 1323 /* if data buffer needed, make sure it's ready */ 1324 aq_data_len = cmd->data_size - aq_desc_len; 1325 buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen)); 1326 if (buff_size) { 1327 if (!hw->nvm_buff.va) { 1328 status = i40e_allocate_virt_mem(hw, &hw->nvm_buff, 1329 hw->aq.asq_buf_size); 1330 if (status) 1331 i40e_debug(hw, I40E_DEBUG_NVM, 1332 "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n", 1333 status); 1334 } 1335 1336 if (hw->nvm_buff.va) { 1337 buff = hw->nvm_buff.va; 1338 memcpy(buff, &bytes[aq_desc_len], aq_data_len); 1339 } 1340 } 1341 1342 if (cmd->offset) 1343 memset(&hw->nvm_aq_event_desc, 0, aq_desc_len); 1344 1345 /* and away we go! */ 1346 status = i40e_asq_send_command(hw, aq_desc, buff, 1347 buff_size, &cmd_details); 1348 if (status) { 1349 i40e_debug(hw, I40E_DEBUG_NVM, 1350 "i40e_nvmupd_exec_aq err %s aq_err %s\n", 1351 i40e_stat_str(hw, status), 1352 i40e_aq_str(hw, hw->aq.asq_last_status)); 1353 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); 1354 return status; 1355 } 1356 1357 /* should we wait for a followup event? */ 1358 if (cmd->offset) { 1359 hw->nvm_wait_opcode = cmd->offset; 1360 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 1361 } 1362 1363 return status; 1364 } 1365 1366 /** 1367 * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq 1368 * @hw: pointer to hardware structure 1369 * @cmd: pointer to nvm update command buffer 1370 * @bytes: pointer to the data buffer 1371 * @perrno: pointer to return error code 1372 * 1373 * cmd structure contains identifiers and data buffer 1374 **/ 1375 static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw, 1376 struct i40e_nvm_access *cmd, 1377 u8 *bytes, int *perrno) 1378 { 1379 u32 aq_total_len; 1380 u32 aq_desc_len; 1381 int remainder; 1382 u8 *buff; 1383 1384 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); 1385 1386 aq_desc_len = sizeof(struct i40e_aq_desc); 1387 aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen); 1388 1389 /* check offset range */ 1390 if (cmd->offset > aq_total_len) { 1391 i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n", 1392 __func__, cmd->offset, aq_total_len); 1393 *perrno = -EINVAL; 1394 return I40E_ERR_PARAM; 1395 } 1396 1397 /* check copylength range */ 1398 if (cmd->data_size > (aq_total_len - cmd->offset)) { 1399 int new_len = aq_total_len - cmd->offset; 1400 1401 i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n", 1402 __func__, cmd->data_size, new_len); 1403 cmd->data_size = new_len; 1404 } 1405 1406 remainder = cmd->data_size; 1407 if (cmd->offset < aq_desc_len) { 1408 u32 len = aq_desc_len - cmd->offset; 1409 1410 len = min(len, cmd->data_size); 1411 i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n", 1412 __func__, cmd->offset, cmd->offset + len); 1413 1414 buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset; 1415 memcpy(bytes, buff, len); 1416 1417 bytes += len; 1418 remainder -= len; 1419 buff = hw->nvm_buff.va; 1420 } else { 1421 buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len); 1422 } 1423 1424 if (remainder > 0) { 1425 int start_byte = buff - (u8 *)hw->nvm_buff.va; 1426 1427 i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n", 1428 __func__, start_byte, start_byte + remainder); 1429 memcpy(bytes, buff, remainder); 1430 } 1431 1432 return 0; 1433 } 1434 1435 /** 1436 * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq 1437 * @hw: pointer to hardware structure 1438 * @cmd: pointer to nvm update command buffer 1439 * @bytes: pointer to the data buffer 1440 * @perrno: pointer to return error code 1441 * 1442 * cmd structure contains identifiers and data buffer 1443 **/ 1444 static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw, 1445 struct i40e_nvm_access *cmd, 1446 u8 *bytes, int *perrno) 1447 { 1448 u32 aq_total_len; 1449 u32 aq_desc_len; 1450 1451 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); 1452 1453 aq_desc_len = sizeof(struct i40e_aq_desc); 1454 aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen); 1455 1456 /* check copylength range */ 1457 if (cmd->data_size > aq_total_len) { 1458 i40e_debug(hw, I40E_DEBUG_NVM, 1459 "%s: copy length %d too big, trimming to %d\n", 1460 __func__, cmd->data_size, aq_total_len); 1461 cmd->data_size = aq_total_len; 1462 } 1463 1464 memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size); 1465 1466 return 0; 1467 } 1468 1469 /** 1470 * i40e_nvmupd_nvm_read - Read NVM 1471 * @hw: pointer to hardware structure 1472 * @cmd: pointer to nvm update command buffer 1473 * @bytes: pointer to the data buffer 1474 * @perrno: pointer to return error code 1475 * 1476 * cmd structure contains identifiers and data buffer 1477 **/ 1478 static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, 1479 struct i40e_nvm_access *cmd, 1480 u8 *bytes, int *perrno) 1481 { 1482 struct i40e_asq_cmd_details cmd_details; 1483 i40e_status status; 1484 u8 module, transaction; 1485 bool last; 1486 1487 transaction = i40e_nvmupd_get_transaction(cmd->config); 1488 module = i40e_nvmupd_get_module(cmd->config); 1489 last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA); 1490 1491 memset(&cmd_details, 0, sizeof(cmd_details)); 1492 cmd_details.wb_desc = &hw->nvm_wb_desc; 1493 1494 status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size, 1495 bytes, last, &cmd_details); 1496 if (status) { 1497 i40e_debug(hw, I40E_DEBUG_NVM, 1498 "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n", 1499 module, cmd->offset, cmd->data_size); 1500 i40e_debug(hw, I40E_DEBUG_NVM, 1501 "i40e_nvmupd_nvm_read status %d aq %d\n", 1502 status, hw->aq.asq_last_status); 1503 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); 1504 } 1505 1506 return status; 1507 } 1508 1509 /** 1510 * i40e_nvmupd_nvm_erase - Erase an NVM module 1511 * @hw: pointer to hardware structure 1512 * @cmd: pointer to nvm update command buffer 1513 * @perrno: pointer to return error code 1514 * 1515 * module, offset, data_size and data are in cmd structure 1516 **/ 1517 static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, 1518 struct i40e_nvm_access *cmd, 1519 int *perrno) 1520 { 1521 i40e_status status = 0; 1522 struct i40e_asq_cmd_details cmd_details; 1523 u8 module, transaction; 1524 bool last; 1525 1526 transaction = i40e_nvmupd_get_transaction(cmd->config); 1527 module = i40e_nvmupd_get_module(cmd->config); 1528 last = (transaction & I40E_NVM_LCB); 1529 1530 memset(&cmd_details, 0, sizeof(cmd_details)); 1531 cmd_details.wb_desc = &hw->nvm_wb_desc; 1532 1533 status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size, 1534 last, &cmd_details); 1535 if (status) { 1536 i40e_debug(hw, I40E_DEBUG_NVM, 1537 "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n", 1538 module, cmd->offset, cmd->data_size); 1539 i40e_debug(hw, I40E_DEBUG_NVM, 1540 "i40e_nvmupd_nvm_erase status %d aq %d\n", 1541 status, hw->aq.asq_last_status); 1542 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); 1543 } 1544 1545 return status; 1546 } 1547 1548 /** 1549 * i40e_nvmupd_nvm_write - Write NVM 1550 * @hw: pointer to hardware structure 1551 * @cmd: pointer to nvm update command buffer 1552 * @bytes: pointer to the data buffer 1553 * @perrno: pointer to return error code 1554 * 1555 * module, offset, data_size and data are in cmd structure 1556 **/ 1557 static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, 1558 struct i40e_nvm_access *cmd, 1559 u8 *bytes, int *perrno) 1560 { 1561 i40e_status status = 0; 1562 struct i40e_asq_cmd_details cmd_details; 1563 u8 module, transaction; 1564 u8 preservation_flags; 1565 bool last; 1566 1567 transaction = i40e_nvmupd_get_transaction(cmd->config); 1568 module = i40e_nvmupd_get_module(cmd->config); 1569 last = (transaction & I40E_NVM_LCB); 1570 preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config); 1571 1572 memset(&cmd_details, 0, sizeof(cmd_details)); 1573 cmd_details.wb_desc = &hw->nvm_wb_desc; 1574 1575 status = i40e_aq_update_nvm(hw, module, cmd->offset, 1576 (u16)cmd->data_size, bytes, last, 1577 preservation_flags, &cmd_details); 1578 if (status) { 1579 i40e_debug(hw, I40E_DEBUG_NVM, 1580 "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", 1581 module, cmd->offset, cmd->data_size); 1582 i40e_debug(hw, I40E_DEBUG_NVM, 1583 "i40e_nvmupd_nvm_write status %d aq %d\n", 1584 status, hw->aq.asq_last_status); 1585 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); 1586 } 1587 1588 return status; 1589 } 1590