1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 6 /** 7 * ice_aq_read_nvm 8 * @hw: pointer to the HW struct 9 * @module_typeid: module pointer location in words from the NVM beginning 10 * @offset: byte offset from the module beginning 11 * @length: length of the section to be read (in bytes from the offset) 12 * @data: command buffer (size [bytes] = length) 13 * @last_command: tells if this is the last command in a series 14 * @read_shadow_ram: tell if this is a shadow RAM read 15 * @cd: pointer to command details structure or NULL 16 * 17 * Read the NVM using the admin queue commands (0x0701) 18 */ 19 static enum ice_status 20 ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, 21 void *data, bool last_command, bool read_shadow_ram, 22 struct ice_sq_cd *cd) 23 { 24 struct ice_aq_desc desc; 25 struct ice_aqc_nvm *cmd; 26 27 cmd = &desc.params.nvm; 28 29 if (offset > ICE_AQC_NVM_MAX_OFFSET) 30 return ICE_ERR_PARAM; 31 32 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read); 33 34 if (!read_shadow_ram && module_typeid == ICE_AQC_NVM_START_POINT) 35 cmd->cmd_flags |= ICE_AQC_NVM_FLASH_ONLY; 36 37 /* If this is the last command in a series, set the proper flag. */ 38 if (last_command) 39 cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD; 40 cmd->module_typeid = cpu_to_le16(module_typeid); 41 cmd->offset_low = cpu_to_le16(offset & 0xFFFF); 42 cmd->offset_high = (offset >> 16) & 0xFF; 43 cmd->length = cpu_to_le16(length); 44 45 return ice_aq_send_cmd(hw, &desc, data, length, cd); 46 } 47 48 /** 49 * ice_read_flat_nvm - Read portion of NVM by flat offset 50 * @hw: pointer to the HW struct 51 * @offset: offset from beginning of NVM 52 * @length: (in) number of bytes to read; (out) number of bytes actually read 53 * @data: buffer to return data in (sized to fit the specified length) 54 * @read_shadow_ram: if true, read from shadow RAM instead of NVM 55 * 56 * Reads a portion of the NVM, as a flat memory space. This function correctly 57 * breaks read requests across Shadow RAM sectors and ensures that no single 58 * read request exceeds the maximum 4Kb read for a single AdminQ command. 59 * 60 * Returns a status code on failure. Note that the data pointer may be 61 * partially updated if some reads succeed before a failure. 62 */ 63 enum ice_status 64 ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, 65 bool read_shadow_ram) 66 { 67 enum ice_status status; 68 u32 inlen = *length; 69 u32 bytes_read = 0; 70 bool last_cmd; 71 72 *length = 0; 73 74 /* Verify the length of the read if this is for the Shadow RAM */ 75 if (read_shadow_ram && ((offset + inlen) > (hw->nvm.sr_words * 2u))) { 76 ice_debug(hw, ICE_DBG_NVM, 77 "NVM error: requested offset is beyond Shadow RAM limit\n"); 78 return ICE_ERR_PARAM; 79 } 80 81 do { 82 u32 read_size, sector_offset; 83 84 /* ice_aq_read_nvm cannot read more than 4Kb at a time. 85 * Additionally, a read from the Shadow RAM may not cross over 86 * a sector boundary. Conveniently, the sector size is also 87 * 4Kb. 88 */ 89 sector_offset = offset % ICE_AQ_MAX_BUF_LEN; 90 read_size = min_t(u32, ICE_AQ_MAX_BUF_LEN - sector_offset, 91 inlen - bytes_read); 92 93 last_cmd = !(bytes_read + read_size < inlen); 94 95 status = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, 96 offset, read_size, 97 data + bytes_read, last_cmd, 98 read_shadow_ram, NULL); 99 if (status) 100 break; 101 102 bytes_read += read_size; 103 offset += read_size; 104 } while (!last_cmd); 105 106 *length = bytes_read; 107 return status; 108 } 109 110 /** 111 * ice_read_sr_word_aq - Reads Shadow RAM via AQ 112 * @hw: pointer to the HW structure 113 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 114 * @data: word read from the Shadow RAM 115 * 116 * Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm. 117 */ 118 static enum ice_status 119 ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) 120 { 121 u32 bytes = sizeof(u16); 122 enum ice_status status; 123 __le16 data_local; 124 125 /* Note that ice_read_flat_nvm takes into account the 4Kb AdminQ and 126 * Shadow RAM sector restrictions necessary when reading from the NVM. 127 */ 128 status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes, 129 (u8 *)&data_local, true); 130 if (status) 131 return status; 132 133 *data = le16_to_cpu(data_local); 134 return 0; 135 } 136 137 /** 138 * ice_acquire_nvm - Generic request for acquiring the NVM ownership 139 * @hw: pointer to the HW structure 140 * @access: NVM access type (read or write) 141 * 142 * This function will request NVM ownership. 143 */ 144 enum ice_status 145 ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) 146 { 147 if (hw->nvm.blank_nvm_mode) 148 return 0; 149 150 return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT); 151 } 152 153 /** 154 * ice_release_nvm - Generic request for releasing the NVM ownership 155 * @hw: pointer to the HW structure 156 * 157 * This function will release NVM ownership. 158 */ 159 void ice_release_nvm(struct ice_hw *hw) 160 { 161 if (hw->nvm.blank_nvm_mode) 162 return; 163 164 ice_release_res(hw, ICE_NVM_RES_ID); 165 } 166 167 /** 168 * ice_read_sr_word - Reads Shadow RAM word and acquire NVM if necessary 169 * @hw: pointer to the HW structure 170 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 171 * @data: word read from the Shadow RAM 172 * 173 * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq. 174 */ 175 enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) 176 { 177 enum ice_status status; 178 179 status = ice_acquire_nvm(hw, ICE_RES_READ); 180 if (!status) { 181 status = ice_read_sr_word_aq(hw, offset, data); 182 ice_release_nvm(hw); 183 } 184 185 return status; 186 } 187 188 /** 189 * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA 190 * @hw: pointer to hardware structure 191 * @module_tlv: pointer to module TLV to return 192 * @module_tlv_len: pointer to module TLV length to return 193 * @module_type: module type requested 194 * 195 * Finds the requested sub module TLV type from the Preserved Field 196 * Area (PFA) and returns the TLV pointer and length. The caller can 197 * use these to read the variable length TLV value. 198 */ 199 enum ice_status 200 ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, 201 u16 module_type) 202 { 203 enum ice_status status; 204 u16 pfa_len, pfa_ptr; 205 u16 next_tlv; 206 207 status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); 208 if (status) { 209 ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n"); 210 return status; 211 } 212 status = ice_read_sr_word(hw, pfa_ptr, &pfa_len); 213 if (status) { 214 ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n"); 215 return status; 216 } 217 /* Starting with first TLV after PFA length, iterate through the list 218 * of TLVs to find the requested one. 219 */ 220 next_tlv = pfa_ptr + 1; 221 while (next_tlv < pfa_ptr + pfa_len) { 222 u16 tlv_sub_module_type; 223 u16 tlv_len; 224 225 /* Read TLV type */ 226 status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type); 227 if (status) { 228 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n"); 229 break; 230 } 231 /* Read TLV length */ 232 status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len); 233 if (status) { 234 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n"); 235 break; 236 } 237 if (tlv_sub_module_type == module_type) { 238 if (tlv_len) { 239 *module_tlv = next_tlv; 240 *module_tlv_len = tlv_len; 241 return 0; 242 } 243 return ICE_ERR_INVAL_SIZE; 244 } 245 /* Check next TLV, i.e. current TLV pointer + length + 2 words 246 * (for current TLV's type and length) 247 */ 248 next_tlv = next_tlv + tlv_len + 2; 249 } 250 /* Module does not exist */ 251 return ICE_ERR_DOES_NOT_EXIST; 252 } 253 254 /** 255 * ice_read_pba_string - Reads part number string from NVM 256 * @hw: pointer to hardware structure 257 * @pba_num: stores the part number string from the NVM 258 * @pba_num_size: part number string buffer length 259 * 260 * Reads the part number string from the NVM. 261 */ 262 enum ice_status 263 ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) 264 { 265 u16 pba_tlv, pba_tlv_len; 266 enum ice_status status; 267 u16 pba_word, pba_size; 268 u16 i; 269 270 status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len, 271 ICE_SR_PBA_BLOCK_PTR); 272 if (status) { 273 ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block TLV.\n"); 274 return status; 275 } 276 277 /* pba_size is the next word */ 278 status = ice_read_sr_word(hw, (pba_tlv + 2), &pba_size); 279 if (status) { 280 ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Section size.\n"); 281 return status; 282 } 283 284 if (pba_tlv_len < pba_size) { 285 ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n"); 286 return ICE_ERR_INVAL_SIZE; 287 } 288 289 /* Subtract one to get PBA word count (PBA Size word is included in 290 * total size) 291 */ 292 pba_size--; 293 if (pba_num_size < (((u32)pba_size * 2) + 1)) { 294 ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n"); 295 return ICE_ERR_PARAM; 296 } 297 298 for (i = 0; i < pba_size; i++) { 299 status = ice_read_sr_word(hw, (pba_tlv + 2 + 1) + i, &pba_word); 300 if (status) { 301 ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block word %d.\n", i); 302 return status; 303 } 304 305 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; 306 pba_num[(i * 2) + 1] = pba_word & 0xFF; 307 } 308 pba_num[(pba_size * 2)] = '\0'; 309 310 return status; 311 } 312 313 /** 314 * ice_get_orom_ver_info - Read Option ROM version information 315 * @hw: pointer to the HW struct 316 * 317 * Read the Combo Image version data from the Boot Configuration TLV and fill 318 * in the option ROM version data. 319 */ 320 static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw) 321 { 322 u16 combo_hi, combo_lo, boot_cfg_tlv, boot_cfg_tlv_len; 323 struct ice_orom_info *orom = &hw->nvm.orom; 324 enum ice_status status; 325 u32 combo_ver; 326 327 status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len, 328 ICE_SR_BOOT_CFG_PTR); 329 if (status) { 330 ice_debug(hw, ICE_DBG_INIT, 331 "Failed to read Boot Configuration Block TLV.\n"); 332 return status; 333 } 334 335 /* Boot Configuration Block must have length at least 2 words 336 * (Combo Image Version High and Combo Image Version Low) 337 */ 338 if (boot_cfg_tlv_len < 2) { 339 ice_debug(hw, ICE_DBG_INIT, 340 "Invalid Boot Configuration Block TLV size.\n"); 341 return ICE_ERR_INVAL_SIZE; 342 } 343 344 status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF), 345 &combo_hi); 346 if (status) { 347 ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER hi.\n"); 348 return status; 349 } 350 351 status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF + 1), 352 &combo_lo); 353 if (status) { 354 ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER lo.\n"); 355 return status; 356 } 357 358 combo_ver = ((u32)combo_hi << 16) | combo_lo; 359 360 orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >> 361 ICE_OROM_VER_SHIFT); 362 orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK); 363 orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >> 364 ICE_OROM_VER_BUILD_SHIFT); 365 366 return 0; 367 } 368 369 /** 370 * ice_get_netlist_ver_info 371 * @hw: pointer to the HW struct 372 * 373 * Get the netlist version information 374 */ 375 static enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw) 376 { 377 struct ice_netlist_ver_info *ver = &hw->netlist_ver; 378 enum ice_status ret; 379 u32 id_blk_start; 380 __le16 raw_data; 381 u16 data, i; 382 u16 *buff; 383 384 ret = ice_acquire_nvm(hw, ICE_RES_READ); 385 if (ret) 386 return ret; 387 buff = kcalloc(ICE_AQC_NVM_NETLIST_ID_BLK_LEN, sizeof(*buff), 388 GFP_KERNEL); 389 if (!buff) { 390 ret = ICE_ERR_NO_MEMORY; 391 goto exit_no_mem; 392 } 393 394 /* read module length */ 395 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID, 396 ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET * 2, 397 ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN, &raw_data, 398 false, false, NULL); 399 if (ret) 400 goto exit_error; 401 402 data = le16_to_cpu(raw_data); 403 /* exit if length is = 0 */ 404 if (!data) 405 goto exit_error; 406 407 /* read node count */ 408 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID, 409 ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET * 2, 410 ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN, &raw_data, 411 false, false, NULL); 412 if (ret) 413 goto exit_error; 414 data = le16_to_cpu(raw_data) & ICE_AQC_NVM_NETLIST_NODE_COUNT_M; 415 416 /* netlist ID block starts from offset 4 + node count * 2 */ 417 id_blk_start = ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET + data * 2; 418 419 /* read the entire netlist ID block */ 420 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID, 421 id_blk_start * 2, 422 ICE_AQC_NVM_NETLIST_ID_BLK_LEN * 2, buff, false, 423 false, NULL); 424 if (ret) 425 goto exit_error; 426 427 for (i = 0; i < ICE_AQC_NVM_NETLIST_ID_BLK_LEN; i++) 428 buff[i] = le16_to_cpu(((__force __le16 *)buff)[i]); 429 430 ver->major = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16) | 431 buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW]; 432 ver->minor = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16) | 433 buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW]; 434 ver->type = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH] << 16) | 435 buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW]; 436 ver->rev = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH] << 16) | 437 buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW]; 438 ver->cust_ver = buff[ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER]; 439 /* Read the left most 4 bytes of SHA */ 440 ver->hash = buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 15] << 16 | 441 buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 14]; 442 443 exit_error: 444 kfree(buff); 445 exit_no_mem: 446 ice_release_nvm(hw); 447 return ret; 448 } 449 450 /** 451 * ice_discover_flash_size - Discover the available flash size. 452 * @hw: pointer to the HW struct 453 * 454 * The device flash could be up to 16MB in size. However, it is possible that 455 * the actual size is smaller. Use bisection to determine the accessible size 456 * of flash memory. 457 */ 458 static enum ice_status ice_discover_flash_size(struct ice_hw *hw) 459 { 460 u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1; 461 enum ice_status status; 462 463 status = ice_acquire_nvm(hw, ICE_RES_READ); 464 if (status) 465 return status; 466 467 while ((max_size - min_size) > 1) { 468 u32 offset = (max_size + min_size) / 2; 469 u32 len = 1; 470 u8 data; 471 472 status = ice_read_flat_nvm(hw, offset, &len, &data, false); 473 if (status == ICE_ERR_AQ_ERROR && 474 hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) { 475 ice_debug(hw, ICE_DBG_NVM, 476 "%s: New upper bound of %u bytes\n", 477 __func__, offset); 478 status = 0; 479 max_size = offset; 480 } else if (!status) { 481 ice_debug(hw, ICE_DBG_NVM, 482 "%s: New lower bound of %u bytes\n", 483 __func__, offset); 484 min_size = offset; 485 } else { 486 /* an unexpected error occurred */ 487 goto err_read_flat_nvm; 488 } 489 } 490 491 ice_debug(hw, ICE_DBG_NVM, 492 "Predicted flash size is %u bytes\n", max_size); 493 494 hw->nvm.flash_size = max_size; 495 496 err_read_flat_nvm: 497 ice_release_nvm(hw); 498 499 return status; 500 } 501 502 /** 503 * ice_init_nvm - initializes NVM setting 504 * @hw: pointer to the HW struct 505 * 506 * This function reads and populates NVM settings such as Shadow RAM size, 507 * max_timeout, and blank_nvm_mode 508 */ 509 enum ice_status ice_init_nvm(struct ice_hw *hw) 510 { 511 struct ice_nvm_info *nvm = &hw->nvm; 512 u16 eetrack_lo, eetrack_hi, ver; 513 enum ice_status status; 514 u32 fla, gens_stat; 515 u8 sr_size; 516 517 /* The SR size is stored regardless of the NVM programming mode 518 * as the blank mode may be used in the factory line. 519 */ 520 gens_stat = rd32(hw, GLNVM_GENS); 521 sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S; 522 523 /* Switching to words (sr_size contains power of 2) */ 524 nvm->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB; 525 526 /* Check if we are in the normal or blank NVM programming mode */ 527 fla = rd32(hw, GLNVM_FLA); 528 if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */ 529 nvm->blank_nvm_mode = false; 530 } else { 531 /* Blank programming mode */ 532 nvm->blank_nvm_mode = true; 533 ice_debug(hw, ICE_DBG_NVM, 534 "NVM init error: unsupported blank mode.\n"); 535 return ICE_ERR_NVM_BLANK_MODE; 536 } 537 538 status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &ver); 539 if (status) { 540 ice_debug(hw, ICE_DBG_INIT, 541 "Failed to read DEV starter version.\n"); 542 return status; 543 } 544 nvm->major_ver = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT; 545 nvm->minor_ver = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT; 546 547 status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo); 548 if (status) { 549 ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK lo.\n"); 550 return status; 551 } 552 status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_HI, &eetrack_hi); 553 if (status) { 554 ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK hi.\n"); 555 return status; 556 } 557 558 nvm->eetrack = (eetrack_hi << 16) | eetrack_lo; 559 560 status = ice_discover_flash_size(hw); 561 if (status) { 562 ice_debug(hw, ICE_DBG_NVM, 563 "NVM init error: failed to discover flash size.\n"); 564 return status; 565 } 566 567 switch (hw->device_id) { 568 /* the following devices do not have boot_cfg_tlv yet */ 569 case ICE_DEV_ID_E823C_BACKPLANE: 570 case ICE_DEV_ID_E823C_QSFP: 571 case ICE_DEV_ID_E823C_SFP: 572 case ICE_DEV_ID_E823C_10G_BASE_T: 573 case ICE_DEV_ID_E823C_SGMII: 574 case ICE_DEV_ID_E822C_BACKPLANE: 575 case ICE_DEV_ID_E822C_QSFP: 576 case ICE_DEV_ID_E822C_10G_BASE_T: 577 case ICE_DEV_ID_E822C_SGMII: 578 case ICE_DEV_ID_E822C_SFP: 579 case ICE_DEV_ID_E822L_BACKPLANE: 580 case ICE_DEV_ID_E822L_SFP: 581 case ICE_DEV_ID_E822L_10G_BASE_T: 582 case ICE_DEV_ID_E822L_SGMII: 583 case ICE_DEV_ID_E823L_BACKPLANE: 584 case ICE_DEV_ID_E823L_SFP: 585 case ICE_DEV_ID_E823L_10G_BASE_T: 586 case ICE_DEV_ID_E823L_1GBE: 587 case ICE_DEV_ID_E823L_QSFP: 588 return status; 589 default: 590 break; 591 } 592 593 status = ice_get_orom_ver_info(hw); 594 if (status) { 595 ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n"); 596 return status; 597 } 598 599 /* read the netlist version information */ 600 status = ice_get_netlist_ver_info(hw); 601 if (status) 602 ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n"); 603 604 return 0; 605 } 606 607 /** 608 * ice_nvm_validate_checksum 609 * @hw: pointer to the HW struct 610 * 611 * Verify NVM PFA checksum validity (0x0706) 612 */ 613 enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw) 614 { 615 struct ice_aqc_nvm_checksum *cmd; 616 struct ice_aq_desc desc; 617 enum ice_status status; 618 619 status = ice_acquire_nvm(hw, ICE_RES_READ); 620 if (status) 621 return status; 622 623 cmd = &desc.params.nvm_checksum; 624 625 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum); 626 cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY; 627 628 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 629 ice_release_nvm(hw); 630 631 if (!status) 632 if (le16_to_cpu(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT) 633 status = ICE_ERR_NVM_CHECKSUM; 634 635 return status; 636 } 637