1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 6 /** 7 * ice_aq_read_nvm 8 * @hw: pointer to the HW struct 9 * @module_typeid: module pointer location in words from the NVM beginning 10 * @offset: byte offset from the module beginning 11 * @length: length of the section to be read (in bytes from the offset) 12 * @data: command buffer (size [bytes] = length) 13 * @last_command: tells if this is the last command in a series 14 * @read_shadow_ram: tell if this is a shadow RAM read 15 * @cd: pointer to command details structure or NULL 16 * 17 * Read the NVM using the admin queue commands (0x0701) 18 */ 19 static enum ice_status 20 ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, 21 void *data, bool last_command, bool read_shadow_ram, 22 struct ice_sq_cd *cd) 23 { 24 struct ice_aq_desc desc; 25 struct ice_aqc_nvm *cmd; 26 27 cmd = &desc.params.nvm; 28 29 if (offset > ICE_AQC_NVM_MAX_OFFSET) 30 return ICE_ERR_PARAM; 31 32 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read); 33 34 if (!read_shadow_ram && module_typeid == ICE_AQC_NVM_START_POINT) 35 cmd->cmd_flags |= ICE_AQC_NVM_FLASH_ONLY; 36 37 /* If this is the last command in a series, set the proper flag. */ 38 if (last_command) 39 cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD; 40 cmd->module_typeid = cpu_to_le16(module_typeid); 41 cmd->offset_low = cpu_to_le16(offset & 0xFFFF); 42 cmd->offset_high = (offset >> 16) & 0xFF; 43 cmd->length = cpu_to_le16(length); 44 45 return ice_aq_send_cmd(hw, &desc, data, length, cd); 46 } 47 48 /** 49 * ice_read_flat_nvm - Read portion of NVM by flat offset 50 * @hw: pointer to the HW struct 51 * @offset: offset from beginning of NVM 52 * @length: (in) number of bytes to read; (out) number of bytes actually read 53 * @data: buffer to return data in (sized to fit the specified length) 54 * @read_shadow_ram: if true, read from shadow RAM instead of NVM 55 * 56 * Reads a portion of the NVM, as a flat memory space. This function correctly 57 * breaks read requests across Shadow RAM sectors and ensures that no single 58 * read request exceeds the maximum 4Kb read for a single AdminQ command. 59 * 60 * Returns a status code on failure. Note that the data pointer may be 61 * partially updated if some reads succeed before a failure. 62 */ 63 enum ice_status 64 ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, 65 bool read_shadow_ram) 66 { 67 enum ice_status status; 68 u32 inlen = *length; 69 u32 bytes_read = 0; 70 bool last_cmd; 71 72 *length = 0; 73 74 /* Verify the length of the read if this is for the Shadow RAM */ 75 if (read_shadow_ram && ((offset + inlen) > (hw->nvm.sr_words * 2u))) { 76 ice_debug(hw, ICE_DBG_NVM, 77 "NVM error: requested offset is beyond Shadow RAM limit\n"); 78 return ICE_ERR_PARAM; 79 } 80 81 do { 82 u32 read_size, sector_offset; 83 84 /* ice_aq_read_nvm cannot read more than 4Kb at a time. 85 * Additionally, a read from the Shadow RAM may not cross over 86 * a sector boundary. Conveniently, the sector size is also 87 * 4Kb. 88 */ 89 sector_offset = offset % ICE_AQ_MAX_BUF_LEN; 90 read_size = min_t(u32, ICE_AQ_MAX_BUF_LEN - sector_offset, 91 inlen - bytes_read); 92 93 last_cmd = !(bytes_read + read_size < inlen); 94 95 status = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, 96 offset, read_size, 97 data + bytes_read, last_cmd, 98 read_shadow_ram, NULL); 99 if (status) 100 break; 101 102 bytes_read += read_size; 103 offset += read_size; 104 } while (!last_cmd); 105 106 *length = bytes_read; 107 return status; 108 } 109 110 /** 111 * ice_read_sr_word_aq - Reads Shadow RAM via AQ 112 * @hw: pointer to the HW structure 113 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 114 * @data: word read from the Shadow RAM 115 * 116 * Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm. 117 */ 118 static enum ice_status 119 ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) 120 { 121 u32 bytes = sizeof(u16); 122 enum ice_status status; 123 __le16 data_local; 124 125 /* Note that ice_read_flat_nvm takes into account the 4Kb AdminQ and 126 * Shadow RAM sector restrictions necessary when reading from the NVM. 127 */ 128 status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes, 129 (u8 *)&data_local, true); 130 if (status) 131 return status; 132 133 *data = le16_to_cpu(data_local); 134 return 0; 135 } 136 137 /** 138 * ice_acquire_nvm - Generic request for acquiring the NVM ownership 139 * @hw: pointer to the HW structure 140 * @access: NVM access type (read or write) 141 * 142 * This function will request NVM ownership. 143 */ 144 enum ice_status 145 ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) 146 { 147 if (hw->nvm.blank_nvm_mode) 148 return 0; 149 150 return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT); 151 } 152 153 /** 154 * ice_release_nvm - Generic request for releasing the NVM ownership 155 * @hw: pointer to the HW structure 156 * 157 * This function will release NVM ownership. 158 */ 159 void ice_release_nvm(struct ice_hw *hw) 160 { 161 if (hw->nvm.blank_nvm_mode) 162 return; 163 164 ice_release_res(hw, ICE_NVM_RES_ID); 165 } 166 167 /** 168 * ice_read_sr_word - Reads Shadow RAM word and acquire NVM if necessary 169 * @hw: pointer to the HW structure 170 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 171 * @data: word read from the Shadow RAM 172 * 173 * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq. 174 */ 175 static enum ice_status 176 ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) 177 { 178 enum ice_status status; 179 180 status = ice_acquire_nvm(hw, ICE_RES_READ); 181 if (!status) { 182 status = ice_read_sr_word_aq(hw, offset, data); 183 ice_release_nvm(hw); 184 } 185 186 return status; 187 } 188 189 /** 190 * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA 191 * @hw: pointer to hardware structure 192 * @module_tlv: pointer to module TLV to return 193 * @module_tlv_len: pointer to module TLV length to return 194 * @module_type: module type requested 195 * 196 * Finds the requested sub module TLV type from the Preserved Field 197 * Area (PFA) and returns the TLV pointer and length. The caller can 198 * use these to read the variable length TLV value. 199 */ 200 static enum ice_status 201 ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, 202 u16 module_type) 203 { 204 enum ice_status status; 205 u16 pfa_len, pfa_ptr; 206 u16 next_tlv; 207 208 status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); 209 if (status) { 210 ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n"); 211 return status; 212 } 213 status = ice_read_sr_word(hw, pfa_ptr, &pfa_len); 214 if (status) { 215 ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n"); 216 return status; 217 } 218 /* Starting with first TLV after PFA length, iterate through the list 219 * of TLVs to find the requested one. 220 */ 221 next_tlv = pfa_ptr + 1; 222 while (next_tlv < pfa_ptr + pfa_len) { 223 u16 tlv_sub_module_type; 224 u16 tlv_len; 225 226 /* Read TLV type */ 227 status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type); 228 if (status) { 229 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n"); 230 break; 231 } 232 /* Read TLV length */ 233 status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len); 234 if (status) { 235 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n"); 236 break; 237 } 238 if (tlv_sub_module_type == module_type) { 239 if (tlv_len) { 240 *module_tlv = next_tlv; 241 *module_tlv_len = tlv_len; 242 return 0; 243 } 244 return ICE_ERR_INVAL_SIZE; 245 } 246 /* Check next TLV, i.e. current TLV pointer + length + 2 words 247 * (for current TLV's type and length) 248 */ 249 next_tlv = next_tlv + tlv_len + 2; 250 } 251 /* Module does not exist */ 252 return ICE_ERR_DOES_NOT_EXIST; 253 } 254 255 /** 256 * ice_read_pba_string - Reads part number string from NVM 257 * @hw: pointer to hardware structure 258 * @pba_num: stores the part number string from the NVM 259 * @pba_num_size: part number string buffer length 260 * 261 * Reads the part number string from the NVM. 262 */ 263 enum ice_status 264 ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) 265 { 266 u16 pba_tlv, pba_tlv_len; 267 enum ice_status status; 268 u16 pba_word, pba_size; 269 u16 i; 270 271 status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len, 272 ICE_SR_PBA_BLOCK_PTR); 273 if (status) { 274 ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block TLV.\n"); 275 return status; 276 } 277 278 /* pba_size is the next word */ 279 status = ice_read_sr_word(hw, (pba_tlv + 2), &pba_size); 280 if (status) { 281 ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Section size.\n"); 282 return status; 283 } 284 285 if (pba_tlv_len < pba_size) { 286 ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n"); 287 return ICE_ERR_INVAL_SIZE; 288 } 289 290 /* Subtract one to get PBA word count (PBA Size word is included in 291 * total size) 292 */ 293 pba_size--; 294 if (pba_num_size < (((u32)pba_size * 2) + 1)) { 295 ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n"); 296 return ICE_ERR_PARAM; 297 } 298 299 for (i = 0; i < pba_size; i++) { 300 status = ice_read_sr_word(hw, (pba_tlv + 2 + 1) + i, &pba_word); 301 if (status) { 302 ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block word %d.\n", i); 303 return status; 304 } 305 306 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; 307 pba_num[(i * 2) + 1] = pba_word & 0xFF; 308 } 309 pba_num[(pba_size * 2)] = '\0'; 310 311 return status; 312 } 313 314 /** 315 * ice_get_orom_ver_info - Read Option ROM version information 316 * @hw: pointer to the HW struct 317 * 318 * Read the Combo Image version data from the Boot Configuration TLV and fill 319 * in the option ROM version data. 320 */ 321 static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw) 322 { 323 u16 combo_hi, combo_lo, boot_cfg_tlv, boot_cfg_tlv_len; 324 struct ice_orom_info *orom = &hw->nvm.orom; 325 enum ice_status status; 326 u32 combo_ver; 327 328 status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len, 329 ICE_SR_BOOT_CFG_PTR); 330 if (status) { 331 ice_debug(hw, ICE_DBG_INIT, 332 "Failed to read Boot Configuration Block TLV.\n"); 333 return status; 334 } 335 336 /* Boot Configuration Block must have length at least 2 words 337 * (Combo Image Version High and Combo Image Version Low) 338 */ 339 if (boot_cfg_tlv_len < 2) { 340 ice_debug(hw, ICE_DBG_INIT, 341 "Invalid Boot Configuration Block TLV size.\n"); 342 return ICE_ERR_INVAL_SIZE; 343 } 344 345 status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF), 346 &combo_hi); 347 if (status) { 348 ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER hi.\n"); 349 return status; 350 } 351 352 status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF + 1), 353 &combo_lo); 354 if (status) { 355 ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER lo.\n"); 356 return status; 357 } 358 359 combo_ver = ((u32)combo_hi << 16) | combo_lo; 360 361 orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >> 362 ICE_OROM_VER_SHIFT); 363 orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK); 364 orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >> 365 ICE_OROM_VER_BUILD_SHIFT); 366 367 return 0; 368 } 369 370 /** 371 * ice_get_netlist_ver_info 372 * @hw: pointer to the HW struct 373 * 374 * Get the netlist version information 375 */ 376 static enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw) 377 { 378 struct ice_netlist_ver_info *ver = &hw->netlist_ver; 379 enum ice_status ret; 380 u32 id_blk_start; 381 __le16 raw_data; 382 u16 data, i; 383 u16 *buff; 384 385 ret = ice_acquire_nvm(hw, ICE_RES_READ); 386 if (ret) 387 return ret; 388 buff = kcalloc(ICE_AQC_NVM_NETLIST_ID_BLK_LEN, sizeof(*buff), 389 GFP_KERNEL); 390 if (!buff) { 391 ret = ICE_ERR_NO_MEMORY; 392 goto exit_no_mem; 393 } 394 395 /* read module length */ 396 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID, 397 ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET * 2, 398 ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN, &raw_data, 399 false, false, NULL); 400 if (ret) 401 goto exit_error; 402 403 data = le16_to_cpu(raw_data); 404 /* exit if length is = 0 */ 405 if (!data) 406 goto exit_error; 407 408 /* read node count */ 409 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID, 410 ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET * 2, 411 ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN, &raw_data, 412 false, false, NULL); 413 if (ret) 414 goto exit_error; 415 data = le16_to_cpu(raw_data) & ICE_AQC_NVM_NETLIST_NODE_COUNT_M; 416 417 /* netlist ID block starts from offset 4 + node count * 2 */ 418 id_blk_start = ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET + data * 2; 419 420 /* read the entire netlist ID block */ 421 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID, 422 id_blk_start * 2, 423 ICE_AQC_NVM_NETLIST_ID_BLK_LEN * 2, buff, false, 424 false, NULL); 425 if (ret) 426 goto exit_error; 427 428 for (i = 0; i < ICE_AQC_NVM_NETLIST_ID_BLK_LEN; i++) 429 buff[i] = le16_to_cpu(((__force __le16 *)buff)[i]); 430 431 ver->major = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16) | 432 buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW]; 433 ver->minor = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16) | 434 buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW]; 435 ver->type = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH] << 16) | 436 buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW]; 437 ver->rev = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH] << 16) | 438 buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW]; 439 ver->cust_ver = buff[ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER]; 440 /* Read the left most 4 bytes of SHA */ 441 ver->hash = buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 15] << 16 | 442 buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 14]; 443 444 exit_error: 445 kfree(buff); 446 exit_no_mem: 447 ice_release_nvm(hw); 448 return ret; 449 } 450 451 /** 452 * ice_discover_flash_size - Discover the available flash size. 453 * @hw: pointer to the HW struct 454 * 455 * The device flash could be up to 16MB in size. However, it is possible that 456 * the actual size is smaller. Use bisection to determine the accessible size 457 * of flash memory. 458 */ 459 static enum ice_status ice_discover_flash_size(struct ice_hw *hw) 460 { 461 u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1; 462 enum ice_status status; 463 464 status = ice_acquire_nvm(hw, ICE_RES_READ); 465 if (status) 466 return status; 467 468 while ((max_size - min_size) > 1) { 469 u32 offset = (max_size + min_size) / 2; 470 u32 len = 1; 471 u8 data; 472 473 status = ice_read_flat_nvm(hw, offset, &len, &data, false); 474 if (status == ICE_ERR_AQ_ERROR && 475 hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) { 476 ice_debug(hw, ICE_DBG_NVM, 477 "%s: New upper bound of %u bytes\n", 478 __func__, offset); 479 status = 0; 480 max_size = offset; 481 } else if (!status) { 482 ice_debug(hw, ICE_DBG_NVM, 483 "%s: New lower bound of %u bytes\n", 484 __func__, offset); 485 min_size = offset; 486 } else { 487 /* an unexpected error occurred */ 488 goto err_read_flat_nvm; 489 } 490 } 491 492 ice_debug(hw, ICE_DBG_NVM, 493 "Predicted flash size is %u bytes\n", max_size); 494 495 hw->nvm.flash_size = max_size; 496 497 err_read_flat_nvm: 498 ice_release_nvm(hw); 499 500 return status; 501 } 502 503 /** 504 * ice_init_nvm - initializes NVM setting 505 * @hw: pointer to the HW struct 506 * 507 * This function reads and populates NVM settings such as Shadow RAM size, 508 * max_timeout, and blank_nvm_mode 509 */ 510 enum ice_status ice_init_nvm(struct ice_hw *hw) 511 { 512 struct ice_nvm_info *nvm = &hw->nvm; 513 u16 eetrack_lo, eetrack_hi, ver; 514 enum ice_status status; 515 u32 fla, gens_stat; 516 u8 sr_size; 517 518 /* The SR size is stored regardless of the NVM programming mode 519 * as the blank mode may be used in the factory line. 520 */ 521 gens_stat = rd32(hw, GLNVM_GENS); 522 sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S; 523 524 /* Switching to words (sr_size contains power of 2) */ 525 nvm->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB; 526 527 /* Check if we are in the normal or blank NVM programming mode */ 528 fla = rd32(hw, GLNVM_FLA); 529 if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */ 530 nvm->blank_nvm_mode = false; 531 } else { 532 /* Blank programming mode */ 533 nvm->blank_nvm_mode = true; 534 ice_debug(hw, ICE_DBG_NVM, 535 "NVM init error: unsupported blank mode.\n"); 536 return ICE_ERR_NVM_BLANK_MODE; 537 } 538 539 status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &ver); 540 if (status) { 541 ice_debug(hw, ICE_DBG_INIT, 542 "Failed to read DEV starter version.\n"); 543 return status; 544 } 545 nvm->major_ver = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT; 546 nvm->minor_ver = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT; 547 548 status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo); 549 if (status) { 550 ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK lo.\n"); 551 return status; 552 } 553 status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_HI, &eetrack_hi); 554 if (status) { 555 ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK hi.\n"); 556 return status; 557 } 558 559 nvm->eetrack = (eetrack_hi << 16) | eetrack_lo; 560 561 status = ice_discover_flash_size(hw); 562 if (status) { 563 ice_debug(hw, ICE_DBG_NVM, 564 "NVM init error: failed to discover flash size.\n"); 565 return status; 566 } 567 568 switch (hw->device_id) { 569 /* the following devices do not have boot_cfg_tlv yet */ 570 case ICE_DEV_ID_E823C_BACKPLANE: 571 case ICE_DEV_ID_E823C_QSFP: 572 case ICE_DEV_ID_E823C_SFP: 573 case ICE_DEV_ID_E823C_10G_BASE_T: 574 case ICE_DEV_ID_E823C_SGMII: 575 case ICE_DEV_ID_E822C_BACKPLANE: 576 case ICE_DEV_ID_E822C_QSFP: 577 case ICE_DEV_ID_E822C_10G_BASE_T: 578 case ICE_DEV_ID_E822C_SGMII: 579 case ICE_DEV_ID_E822C_SFP: 580 case ICE_DEV_ID_E822L_BACKPLANE: 581 case ICE_DEV_ID_E822L_SFP: 582 case ICE_DEV_ID_E822L_10G_BASE_T: 583 case ICE_DEV_ID_E822L_SGMII: 584 case ICE_DEV_ID_E823L_BACKPLANE: 585 case ICE_DEV_ID_E823L_SFP: 586 case ICE_DEV_ID_E823L_10G_BASE_T: 587 case ICE_DEV_ID_E823L_1GBE: 588 case ICE_DEV_ID_E823L_QSFP: 589 return status; 590 default: 591 break; 592 } 593 594 status = ice_get_orom_ver_info(hw); 595 if (status) { 596 ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n"); 597 return status; 598 } 599 600 /* read the netlist version information */ 601 status = ice_get_netlist_ver_info(hw); 602 if (status) 603 ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n"); 604 605 return 0; 606 } 607 608 /** 609 * ice_nvm_validate_checksum 610 * @hw: pointer to the HW struct 611 * 612 * Verify NVM PFA checksum validity (0x0706) 613 */ 614 enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw) 615 { 616 struct ice_aqc_nvm_checksum *cmd; 617 struct ice_aq_desc desc; 618 enum ice_status status; 619 620 status = ice_acquire_nvm(hw, ICE_RES_READ); 621 if (status) 622 return status; 623 624 cmd = &desc.params.nvm_checksum; 625 626 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum); 627 cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY; 628 629 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 630 ice_release_nvm(hw); 631 632 if (!status) 633 if (le16_to_cpu(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT) 634 status = ICE_ERR_NVM_CHECKSUM; 635 636 return status; 637 } 638