1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ 2 /* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7 #ifndef _QED_MCP_H 8 #define _QED_MCP_H 9 10 #include <linux/types.h> 11 #include <linux/delay.h> 12 #include <linux/slab.h> 13 #include <linux/spinlock.h> 14 #include <linux/qed/qed_fcoe_if.h> 15 #include "qed_hsi.h" 16 #include "qed_dev_api.h" 17 18 struct qed_mcp_link_speed_params { 19 bool autoneg; 20 21 u32 advertised_speeds; 22 #define QED_EXT_SPEED_MASK_RES 0x1 23 #define QED_EXT_SPEED_MASK_1G 0x2 24 #define QED_EXT_SPEED_MASK_10G 0x4 25 #define QED_EXT_SPEED_MASK_20G 0x8 26 #define QED_EXT_SPEED_MASK_25G 0x10 27 #define QED_EXT_SPEED_MASK_40G 0x20 28 #define QED_EXT_SPEED_MASK_50G_R 0x40 29 #define QED_EXT_SPEED_MASK_50G_R2 0x80 30 #define QED_EXT_SPEED_MASK_100G_R2 0x100 31 #define QED_EXT_SPEED_MASK_100G_R4 0x200 32 #define QED_EXT_SPEED_MASK_100G_P4 0x400 33 34 u32 forced_speed; /* In Mb/s */ 35 #define QED_EXT_SPEED_1G 0x1 36 #define QED_EXT_SPEED_10G 0x2 37 #define QED_EXT_SPEED_20G 0x4 38 #define QED_EXT_SPEED_25G 0x8 39 #define QED_EXT_SPEED_40G 0x10 40 #define QED_EXT_SPEED_50G_R 0x20 41 #define QED_EXT_SPEED_50G_R2 0x40 42 #define QED_EXT_SPEED_100G_R2 0x80 43 #define QED_EXT_SPEED_100G_R4 0x100 44 #define QED_EXT_SPEED_100G_P4 0x200 45 }; 46 47 struct qed_mcp_link_pause_params { 48 bool autoneg; 49 bool forced_rx; 50 bool forced_tx; 51 }; 52 53 enum qed_mcp_eee_mode { 54 QED_MCP_EEE_DISABLED, 55 QED_MCP_EEE_ENABLED, 56 QED_MCP_EEE_UNSUPPORTED 57 }; 58 59 struct qed_mcp_link_params { 60 struct qed_mcp_link_speed_params speed; 61 struct qed_mcp_link_pause_params pause; 62 u32 loopback_mode; 63 struct qed_link_eee_params eee; 64 u32 fec; 65 66 struct qed_mcp_link_speed_params ext_speed; 67 u32 ext_fec_mode; 68 }; 69 70 struct qed_mcp_link_capabilities { 71 u32 speed_capabilities; 72 bool default_speed_autoneg; 73 u32 fec_default; 74 enum qed_mcp_eee_mode default_eee; 75 u32 eee_lpi_timer; 76 u8 eee_speed_caps; 77 78 u32 default_ext_speed_caps; 79 u32 default_ext_autoneg; 80 u32 default_ext_speed; 81 u32 default_ext_fec; 82 }; 83 84 struct qed_mcp_link_state { 85 bool link_up; 86 u32 min_pf_rate; 87 88 /* Actual link speed in Mb/s */ 89 u32 line_speed; 90 91 /* PF max speed in Mb/s, deduced from line_speed 92 * according to PF max bandwidth configuration. 93 */ 94 u32 speed; 95 96 bool full_duplex; 97 bool an; 98 bool an_complete; 99 bool parallel_detection; 100 bool pfc_enabled; 101 102 u32 partner_adv_speed; 103 #define QED_LINK_PARTNER_SPEED_1G_HD BIT(0) 104 #define QED_LINK_PARTNER_SPEED_1G_FD BIT(1) 105 #define QED_LINK_PARTNER_SPEED_10G BIT(2) 106 #define QED_LINK_PARTNER_SPEED_20G BIT(3) 107 #define QED_LINK_PARTNER_SPEED_25G BIT(4) 108 #define QED_LINK_PARTNER_SPEED_40G BIT(5) 109 #define QED_LINK_PARTNER_SPEED_50G BIT(6) 110 #define QED_LINK_PARTNER_SPEED_100G BIT(7) 111 112 bool partner_tx_flow_ctrl_en; 113 bool partner_rx_flow_ctrl_en; 114 115 u8 partner_adv_pause; 116 #define QED_LINK_PARTNER_SYMMETRIC_PAUSE 0x1 117 #define QED_LINK_PARTNER_ASYMMETRIC_PAUSE 0x2 118 #define QED_LINK_PARTNER_BOTH_PAUSE 0x3 119 120 bool sfp_tx_fault; 121 bool eee_active; 122 u8 eee_adv_caps; 123 u8 eee_lp_adv_caps; 124 125 u32 fec_active; 126 }; 127 128 struct qed_mcp_function_info { 129 u8 pause_on_host; 130 131 enum qed_pci_personality protocol; 132 133 u8 bandwidth_min; 134 u8 bandwidth_max; 135 136 u8 mac[ETH_ALEN]; 137 138 u64 wwn_port; 139 u64 wwn_node; 140 141 #define QED_MCP_VLAN_UNSET (0xffff) 142 u16 ovlan; 143 144 u16 mtu; 145 }; 146 147 struct qed_mcp_nvm_common { 148 u32 offset; 149 u32 param; 150 u32 resp; 151 u32 cmd; 152 }; 153 154 struct qed_mcp_drv_version { 155 u32 version; 156 u8 name[MCP_DRV_VER_STR_SIZE - 4]; 157 }; 158 159 struct qed_mcp_lan_stats { 160 u64 ucast_rx_pkts; 161 u64 ucast_tx_pkts; 162 u32 fcs_err; 163 }; 164 165 struct qed_mcp_fcoe_stats { 166 u64 rx_pkts; 167 u64 tx_pkts; 168 u32 fcs_err; 169 u32 login_failure; 170 }; 171 172 struct qed_mcp_iscsi_stats { 173 u64 rx_pdus; 174 u64 tx_pdus; 175 u64 rx_bytes; 176 u64 tx_bytes; 177 }; 178 179 struct qed_mcp_rdma_stats { 180 u64 rx_pkts; 181 u64 tx_pkts; 182 u64 rx_bytes; 183 u64 tx_byts; 184 }; 185 186 enum qed_mcp_protocol_type { 187 QED_MCP_LAN_STATS, 188 QED_MCP_FCOE_STATS, 189 QED_MCP_ISCSI_STATS, 190 QED_MCP_RDMA_STATS 191 }; 192 193 union qed_mcp_protocol_stats { 194 struct qed_mcp_lan_stats lan_stats; 195 struct qed_mcp_fcoe_stats fcoe_stats; 196 struct qed_mcp_iscsi_stats iscsi_stats; 197 struct qed_mcp_rdma_stats rdma_stats; 198 }; 199 200 enum qed_ov_eswitch { 201 QED_OV_ESWITCH_NONE, 202 QED_OV_ESWITCH_VEB, 203 QED_OV_ESWITCH_VEPA 204 }; 205 206 enum qed_ov_client { 207 QED_OV_CLIENT_DRV, 208 QED_OV_CLIENT_USER, 209 QED_OV_CLIENT_VENDOR_SPEC 210 }; 211 212 enum qed_ov_driver_state { 213 QED_OV_DRIVER_STATE_NOT_LOADED, 214 QED_OV_DRIVER_STATE_DISABLED, 215 QED_OV_DRIVER_STATE_ACTIVE 216 }; 217 218 enum qed_ov_wol { 219 QED_OV_WOL_DEFAULT, 220 QED_OV_WOL_DISABLED, 221 QED_OV_WOL_ENABLED 222 }; 223 224 enum qed_mfw_tlv_type { 225 QED_MFW_TLV_GENERIC = 0x1, /* Core driver TLVs */ 226 QED_MFW_TLV_ETH = 0x2, /* L2 driver TLVs */ 227 QED_MFW_TLV_FCOE = 0x4, /* FCoE protocol TLVs */ 228 QED_MFW_TLV_ISCSI = 0x8, /* SCSI protocol TLVs */ 229 QED_MFW_TLV_MAX = 0x16, 230 }; 231 232 struct qed_mfw_tlv_generic { 233 #define QED_MFW_TLV_FLAGS_SIZE 2 234 struct { 235 u8 ipv4_csum_offload; 236 u8 lso_supported; 237 bool b_set; 238 } flags; 239 240 #define QED_MFW_TLV_MAC_COUNT 3 241 /* First entry for primary MAC, 2 secondary MACs possible */ 242 u8 mac[QED_MFW_TLV_MAC_COUNT][6]; 243 bool mac_set[QED_MFW_TLV_MAC_COUNT]; 244 245 u64 rx_frames; 246 bool rx_frames_set; 247 u64 rx_bytes; 248 bool rx_bytes_set; 249 u64 tx_frames; 250 bool tx_frames_set; 251 u64 tx_bytes; 252 bool tx_bytes_set; 253 }; 254 255 union qed_mfw_tlv_data { 256 struct qed_mfw_tlv_generic generic; 257 struct qed_mfw_tlv_eth eth; 258 struct qed_mfw_tlv_fcoe fcoe; 259 struct qed_mfw_tlv_iscsi iscsi; 260 }; 261 262 #define QED_NVM_CFG_OPTION_ALL BIT(0) 263 #define QED_NVM_CFG_OPTION_INIT BIT(1) 264 #define QED_NVM_CFG_OPTION_COMMIT BIT(2) 265 #define QED_NVM_CFG_OPTION_FREE BIT(3) 266 #define QED_NVM_CFG_OPTION_ENTITY_SEL BIT(4) 267 268 /** 269 * qed_mcp_get_link_params(): Returns the link params of the hw function. 270 * 271 * @p_hwfn: HW device data. 272 * 273 * Returns: Pointer to link params. 274 */ 275 struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn); 276 277 /** 278 * qed_mcp_get_link_state(): Return the link state of the hw function. 279 * 280 * @p_hwfn: HW device data. 281 * 282 * Returns: Pointer to link state. 283 */ 284 struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn); 285 286 /** 287 * qed_mcp_get_link_capabilities(): Return the link capabilities of the 288 * hw function. 289 * 290 * @p_hwfn: HW device data. 291 * 292 * Returns: Pointer to link capabilities. 293 */ 294 struct qed_mcp_link_capabilities 295 *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn); 296 297 /** 298 * qed_mcp_set_link(): Request the MFW to set the link according 299 * to 'link_input'. 300 * 301 * @p_hwfn: HW device data. 302 * @p_ptt: P_ptt. 303 * @b_up: Raise link if `true'. Reset link if `false'. 304 * 305 * Return: Int. 306 */ 307 int qed_mcp_set_link(struct qed_hwfn *p_hwfn, 308 struct qed_ptt *p_ptt, 309 bool b_up); 310 311 /** 312 * qed_mcp_get_mfw_ver(): Get the management firmware version value. 313 * 314 * @p_hwfn: HW device data. 315 * @p_ptt: P_ptt. 316 * @p_mfw_ver: MFW version value. 317 * @p_running_bundle_id: Image id in nvram; Optional. 318 * 319 * Return: Int - 0 - operation was successful. 320 */ 321 int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, 322 struct qed_ptt *p_ptt, 323 u32 *p_mfw_ver, u32 *p_running_bundle_id); 324 325 /** 326 * qed_mcp_get_mbi_ver(): Get the MBI version value. 327 * 328 * @p_hwfn: HW device data. 329 * @p_ptt: P_ptt. 330 * @p_mbi_ver: A pointer to a variable to be filled with the MBI version. 331 * 332 * Return: Int - 0 - operation was successful. 333 */ 334 int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn, 335 struct qed_ptt *p_ptt, u32 *p_mbi_ver); 336 337 /** 338 * qed_mcp_get_media_type(): Get media type value of the port. 339 * 340 * @p_hwfn: HW device data. 341 * @p_ptt: P_ptt. 342 * @media_type: Media type value 343 * 344 * Return: Int - 0 - Operation was successul. 345 * -EBUSY - Operation failed 346 */ 347 int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn, 348 struct qed_ptt *p_ptt, u32 *media_type); 349 350 /** 351 * qed_mcp_get_transceiver_data(): Get transceiver data of the port. 352 * 353 * @p_hwfn: HW device data. 354 * @p_ptt: P_ptt. 355 * @p_transceiver_state: Transceiver state. 356 * @p_tranceiver_type: Media type value. 357 * 358 * Return: Int - 0 - Operation was successul. 359 * -EBUSY - Operation failed 360 */ 361 int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn, 362 struct qed_ptt *p_ptt, 363 u32 *p_transceiver_state, 364 u32 *p_tranceiver_type); 365 366 /** 367 * qed_mcp_trans_speed_mask(): Get transceiver supported speed mask. 368 * 369 * @p_hwfn: HW device data. 370 * @p_ptt: P_ptt. 371 * @p_speed_mask: Bit mask of all supported speeds. 372 * 373 * Return: Int - 0 - Operation was successul. 374 * -EBUSY - Operation failed 375 */ 376 377 int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn, 378 struct qed_ptt *p_ptt, u32 *p_speed_mask); 379 380 /** 381 * qed_mcp_get_board_config(): Get board configuration. 382 * 383 * @p_hwfn: HW device data. 384 * @p_ptt: P_ptt. 385 * @p_board_config: Board config. 386 * 387 * Return: Int - 0 - Operation was successul. 388 * -EBUSY - Operation failed 389 */ 390 int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn, 391 struct qed_ptt *p_ptt, u32 *p_board_config); 392 393 /** 394 * qed_mcp_cmd(): General function for sending commands to the MCP 395 * mailbox. It acquire mutex lock for the entire 396 * operation, from sending the request until the MCP 397 * response. Waiting for MCP response will be checked up 398 * to 5 seconds every 5ms. 399 * 400 * @p_hwfn: HW device data. 401 * @p_ptt: PTT required for register access. 402 * @cmd: command to be sent to the MCP. 403 * @param: Optional param 404 * @o_mcp_resp: The MCP response code (exclude sequence). 405 * @o_mcp_param: Optional parameter provided by the MCP 406 * response 407 * 408 * Return: Int - 0 - Operation was successul. 409 */ 410 int qed_mcp_cmd(struct qed_hwfn *p_hwfn, 411 struct qed_ptt *p_ptt, 412 u32 cmd, 413 u32 param, 414 u32 *o_mcp_resp, 415 u32 *o_mcp_param); 416 417 /** 418 * qed_mcp_drain(): drains the nig, allowing completion to pass in 419 * case of pauses. 420 * (Should be called only from sleepable context) 421 * 422 * @p_hwfn: HW device data. 423 * @p_ptt: PTT required for register access. 424 * 425 * Return: Int. 426 */ 427 int qed_mcp_drain(struct qed_hwfn *p_hwfn, 428 struct qed_ptt *p_ptt); 429 430 /** 431 * qed_mcp_get_flash_size(): Get the flash size value. 432 * 433 * @p_hwfn: HW device data. 434 * @p_ptt: PTT required for register access. 435 * @p_flash_size: Flash size in bytes to be filled. 436 * 437 * Return: Int - 0 - Operation was successul. 438 */ 439 int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, 440 struct qed_ptt *p_ptt, 441 u32 *p_flash_size); 442 443 /** 444 * qed_mcp_send_drv_version(): Send driver version to MFW. 445 * 446 * @p_hwfn: HW device data. 447 * @p_ptt: PTT required for register access. 448 * @p_ver: Version value. 449 * 450 * Return: Int - 0 - Operation was successul. 451 */ 452 int 453 qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, 454 struct qed_ptt *p_ptt, 455 struct qed_mcp_drv_version *p_ver); 456 457 /** 458 * qed_get_process_kill_counter(): Read the MFW process kill counter. 459 * 460 * @p_hwfn: HW device data. 461 * @p_ptt: PTT required for register access. 462 * 463 * Return: u32. 464 */ 465 u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn, 466 struct qed_ptt *p_ptt); 467 468 /** 469 * qed_start_recovery_process(): Trigger a recovery process. 470 * 471 * @p_hwfn: HW device data. 472 * @p_ptt: PTT required for register access. 473 * 474 * Return: Int. 475 */ 476 int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 477 478 /** 479 * qed_recovery_prolog(): A recovery handler must call this function 480 * as its first step. 481 * It is assumed that the handler is not run from 482 * an interrupt context. 483 * 484 * @cdev: Qed dev pointer. 485 * 486 * Return: int. 487 */ 488 int qed_recovery_prolog(struct qed_dev *cdev); 489 490 /** 491 * qed_mcp_ov_update_current_config(): Notify MFW about the change in base 492 * device properties 493 * 494 * @p_hwfn: HW device data. 495 * @p_ptt: P_ptt. 496 * @client: Qed client type. 497 * 498 * Return: Int - 0 - Operation was successul. 499 */ 500 int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, 501 struct qed_ptt *p_ptt, 502 enum qed_ov_client client); 503 504 /** 505 * qed_mcp_ov_update_driver_state(): Notify MFW about the driver state. 506 * 507 * @p_hwfn: HW device data. 508 * @p_ptt: P_ptt. 509 * @drv_state: Driver state. 510 * 511 * Return: Int - 0 - Operation was successul. 512 */ 513 int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn, 514 struct qed_ptt *p_ptt, 515 enum qed_ov_driver_state drv_state); 516 517 /** 518 * qed_mcp_ov_update_mtu(): Send MTU size to MFW. 519 * 520 * @p_hwfn: HW device data. 521 * @p_ptt: P_ptt. 522 * @mtu: MTU size. 523 * 524 * Return: Int - 0 - Operation was successul. 525 */ 526 int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn, 527 struct qed_ptt *p_ptt, u16 mtu); 528 529 /** 530 * qed_mcp_ov_update_mac(): Send MAC address to MFW. 531 * 532 * @p_hwfn: HW device data. 533 * @p_ptt: P_ptt. 534 * @mac: MAC address. 535 * 536 * Return: Int - 0 - Operation was successul. 537 */ 538 int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn, 539 struct qed_ptt *p_ptt, const u8 *mac); 540 541 /** 542 * qed_mcp_ov_update_wol(): Send WOL mode to MFW. 543 * 544 * @p_hwfn: HW device data. 545 * @p_ptt: P_ptt. 546 * @wol: WOL mode. 547 * 548 * Return: Int - 0 - Operation was successul. 549 */ 550 int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn, 551 struct qed_ptt *p_ptt, 552 enum qed_ov_wol wol); 553 554 /** 555 * qed_mcp_set_led(): Set LED status. 556 * 557 * @p_hwfn: HW device data. 558 * @p_ptt: P_ptt. 559 * @mode: LED mode. 560 * 561 * Return: Int - 0 - Operation was successul. 562 */ 563 int qed_mcp_set_led(struct qed_hwfn *p_hwfn, 564 struct qed_ptt *p_ptt, 565 enum qed_led_mode mode); 566 567 /** 568 * qed_mcp_nvm_read(): Read from NVM. 569 * 570 * @cdev: Qed dev pointer. 571 * @addr: NVM offset. 572 * @p_buf: NVM read buffer. 573 * @len: Buffer len. 574 * 575 * Return: Int - 0 - Operation was successul. 576 */ 577 int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len); 578 579 /** 580 * qed_mcp_nvm_write(): Write to NVM. 581 * 582 * @cdev: Qed dev pointer. 583 * @addr: NVM offset. 584 * @cmd: NVM command. 585 * @p_buf: NVM write buffer. 586 * @len: Buffer len. 587 * 588 * Return: Int - 0 - Operation was successul. 589 */ 590 int qed_mcp_nvm_write(struct qed_dev *cdev, 591 u32 cmd, u32 addr, u8 *p_buf, u32 len); 592 593 /** 594 * qed_mcp_nvm_resp(): Check latest response. 595 * 596 * @cdev: Qed dev pointer. 597 * @p_buf: NVM write buffer. 598 * 599 * Return: Int - 0 - Operation was successul. 600 */ 601 int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf); 602 603 struct qed_nvm_image_att { 604 u32 start_addr; 605 u32 length; 606 }; 607 608 /** 609 * qed_mcp_get_nvm_image_att(): Allows reading a whole nvram image. 610 * 611 * @p_hwfn: HW device data. 612 * @image_id: Image to get attributes for. 613 * @p_image_att: Image attributes structure into which to fill data. 614 * 615 * Return: Int - 0 - Operation was successul. 616 */ 617 int 618 qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, 619 enum qed_nvm_images image_id, 620 struct qed_nvm_image_att *p_image_att); 621 622 /** 623 * qed_mcp_get_nvm_image(): Allows reading a whole nvram image. 624 * 625 * @p_hwfn: HW device data. 626 * @image_id: image requested for reading. 627 * @p_buffer: allocated buffer into which to fill data. 628 * @buffer_len: length of the allocated buffer. 629 * 630 * Return: 0 if p_buffer now contains the nvram image. 631 */ 632 int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, 633 enum qed_nvm_images image_id, 634 u8 *p_buffer, u32 buffer_len); 635 636 /** 637 * qed_mcp_bist_register_test(): Bist register test. 638 * 639 * @p_hwfn: HW device data. 640 * @p_ptt: PTT required for register access. 641 * 642 * Return: Int - 0 - Operation was successul. 643 */ 644 int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, 645 struct qed_ptt *p_ptt); 646 647 /** 648 * qed_mcp_bist_clock_test(): Bist clock test. 649 * 650 * @p_hwfn: HW device data. 651 * @p_ptt: PTT required for register access. 652 * 653 * Return: Int - 0 - Operation was successul. 654 */ 655 int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, 656 struct qed_ptt *p_ptt); 657 658 /** 659 * qed_mcp_bist_nvm_get_num_images(): Bist nvm test - get number of images. 660 * 661 * @p_hwfn: HW device data. 662 * @p_ptt: PTT required for register access. 663 * @num_images: number of images if operation was 664 * successful. 0 if not. 665 * 666 * Return: Int - 0 - Operation was successul. 667 */ 668 int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn, 669 struct qed_ptt *p_ptt, 670 u32 *num_images); 671 672 /** 673 * qed_mcp_bist_nvm_get_image_att(): Bist nvm test - get image attributes 674 * by index. 675 * 676 * @p_hwfn: HW device data. 677 * @p_ptt: PTT required for register access. 678 * @p_image_att: Attributes of image. 679 * @image_index: Index of image to get information for. 680 * 681 * Return: Int - 0 - Operation was successul. 682 */ 683 int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, 684 struct qed_ptt *p_ptt, 685 struct bist_nvm_image_att *p_image_att, 686 u32 image_index); 687 688 /** 689 * qed_mfw_process_tlv_req(): Processes the TLV request from MFW i.e., 690 * get the required TLV info 691 * from the qed client and send it to the MFW. 692 * 693 * @p_hwfn: HW device data. 694 * @p_ptt: P_ptt. 695 * 696 * Return: 0 upon success. 697 */ 698 int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 699 700 /** 701 * qed_mcp_send_raw_debug_data(): Send raw debug data to the MFW 702 * 703 * @p_hwfn: HW device data. 704 * @p_ptt: P_ptt. 705 * @p_buf: raw debug data buffer. 706 * @size: Buffer size. 707 * 708 * Return : Int. 709 */ 710 int 711 qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn, 712 struct qed_ptt *p_ptt, u8 *p_buf, u32 size); 713 714 /* Using hwfn number (and not pf_num) is required since in CMT mode, 715 * same pf_num may be used by two different hwfn 716 * TODO - this shouldn't really be in .h file, but until all fields 717 * required during hw-init will be placed in their correct place in shmem 718 * we need it in qed_dev.c [for readin the nvram reflection in shmem]. 719 */ 720 #define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (QED_IS_BB((p_hwfn)->cdev) ? \ 721 ((rel_pfid) | \ 722 ((p_hwfn)->abs_pf_id & 1) << 3) : \ 723 rel_pfid) 724 #define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id) 725 726 struct qed_mcp_info { 727 /* List for mailbox commands which were sent and wait for a response */ 728 struct list_head cmd_list; 729 730 /* Spinlock used for protecting the access to the mailbox commands list 731 * and the sending of the commands. 732 */ 733 spinlock_t cmd_lock; 734 735 /* Flag to indicate whether sending a MFW mailbox command is blocked */ 736 bool b_block_cmd; 737 738 /* Spinlock used for syncing SW link-changes and link-changes 739 * originating from attention context. 740 */ 741 spinlock_t link_lock; 742 743 u32 public_base; 744 u32 drv_mb_addr; 745 u32 mfw_mb_addr; 746 u32 port_addr; 747 u16 drv_mb_seq; 748 u16 drv_pulse_seq; 749 struct qed_mcp_link_params link_input; 750 struct qed_mcp_link_state link_output; 751 struct qed_mcp_link_capabilities link_capabilities; 752 struct qed_mcp_function_info func_info; 753 u8 *mfw_mb_cur; 754 u8 *mfw_mb_shadow; 755 u16 mfw_mb_length; 756 u32 mcp_hist; 757 758 /* Capabilties negotiated with the MFW */ 759 u32 capabilities; 760 761 /* S/N for debug data mailbox commands */ 762 atomic_t dbg_data_seq; 763 }; 764 765 struct qed_mcp_mb_params { 766 u32 cmd; 767 u32 param; 768 void *p_data_src; 769 void *p_data_dst; 770 u8 data_src_size; 771 u8 data_dst_size; 772 u32 mcp_resp; 773 u32 mcp_param; 774 u32 flags; 775 #define QED_MB_FLAG_CAN_SLEEP (0x1 << 0) 776 #define QED_MB_FLAG_AVOID_BLOCK (0x1 << 1) 777 #define QED_MB_FLAGS_IS_SET(params, flag) \ 778 ({ typeof(params) __params = (params); \ 779 (__params && (__params->flags & QED_MB_FLAG_ ## flag)); }) 780 }; 781 782 struct qed_drv_tlv_hdr { 783 u8 tlv_type; 784 u8 tlv_length; /* In dwords - not including this header */ 785 u8 tlv_reserved; 786 #define QED_DRV_TLV_FLAGS_CHANGED 0x01 787 u8 tlv_flags; 788 }; 789 790 /** 791 * qed_mcp_is_ext_speed_supported() - Check if management firmware supports 792 * extended speeds. 793 * @p_hwfn: HW device data. 794 * 795 * Return: true if supported, false otherwise. 796 */ 797 static inline bool 798 qed_mcp_is_ext_speed_supported(const struct qed_hwfn *p_hwfn) 799 { 800 return !!(p_hwfn->mcp_info->capabilities & 801 FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL); 802 } 803 804 /** 805 * qed_mcp_cmd_init(): Initialize the interface with the MCP. 806 * 807 * @p_hwfn: HW device data. 808 * @p_ptt: PTT required for register access. 809 * 810 * Return: Int. 811 */ 812 int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, 813 struct qed_ptt *p_ptt); 814 815 /** 816 * qed_mcp_cmd_port_init(): Initialize the port interface with the MCP 817 * 818 * @p_hwfn: HW device data. 819 * @p_ptt: P_ptt. 820 * 821 * Return: Void. 822 * 823 * Can only be called after `num_ports_in_engines' is set 824 */ 825 void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, 826 struct qed_ptt *p_ptt); 827 /** 828 * qed_mcp_free(): Releases resources allocated during the init process. 829 * 830 * @p_hwfn: HW function. 831 * 832 * Return: Int. 833 */ 834 835 int qed_mcp_free(struct qed_hwfn *p_hwfn); 836 837 /** 838 * qed_mcp_handle_events(): This function is called from the DPC context. 839 * After pointing PTT to the mfw mb, check for events sent by 840 * the MCP to the driver and ack them. In case a critical event 841 * detected, it will be handled here, otherwise the work will be 842 * queued to a sleepable work-queue. 843 * 844 * @p_hwfn: HW function. 845 * @p_ptt: PTT required for register access. 846 * 847 * Return: Int - 0 - Operation was successul. 848 */ 849 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, 850 struct qed_ptt *p_ptt); 851 852 enum qed_drv_role { 853 QED_DRV_ROLE_OS, 854 QED_DRV_ROLE_KDUMP, 855 }; 856 857 struct qed_load_req_params { 858 /* Input params */ 859 enum qed_drv_role drv_role; 860 u8 timeout_val; 861 bool avoid_eng_reset; 862 enum qed_override_force_load override_force_load; 863 864 /* Output params */ 865 u32 load_code; 866 }; 867 868 /** 869 * qed_mcp_load_req(): Sends a LOAD_REQ to the MFW, and in case the 870 * operation succeeds, returns whether this PF is 871 * the first on the engine/port or function. 872 * 873 * @p_hwfn: HW device data. 874 * @p_ptt: P_ptt. 875 * @p_params: Params. 876 * 877 * Return: Int - 0 - Operation was successul. 878 */ 879 int qed_mcp_load_req(struct qed_hwfn *p_hwfn, 880 struct qed_ptt *p_ptt, 881 struct qed_load_req_params *p_params); 882 883 /** 884 * qed_mcp_load_done(): Sends a LOAD_DONE message to the MFW. 885 * 886 * @p_hwfn: HW device data. 887 * @p_ptt: P_ptt. 888 * 889 * Return: Int - 0 - Operation was successul. 890 */ 891 int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 892 893 /** 894 * qed_mcp_unload_req(): Sends a UNLOAD_REQ message to the MFW. 895 * 896 * @p_hwfn: HW device data. 897 * @p_ptt: P_ptt. 898 * 899 * Return: Int - 0 - Operation was successul. 900 */ 901 int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 902 903 /** 904 * qed_mcp_unload_done(): Sends a UNLOAD_DONE message to the MFW 905 * 906 * @p_hwfn: HW device data. 907 * @p_ptt: P_ptt. 908 * 909 * Return: Int - 0 - Operation was successul. 910 */ 911 int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 912 913 /** 914 * qed_mcp_read_mb(): Read the MFW mailbox into Current buffer. 915 * 916 * @p_hwfn: HW device data. 917 * @p_ptt: P_ptt. 918 * 919 * Return: Void. 920 */ 921 void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, 922 struct qed_ptt *p_ptt); 923 924 /** 925 * qed_mcp_ack_vf_flr(): Ack to mfw that driver finished FLR process for VFs 926 * 927 * @p_hwfn: HW device data. 928 * @p_ptt: P_ptt. 929 * @vfs_to_ack: bit mask of all engine VFs for which the PF acks. 930 * 931 * Return: Int - 0 - Operation was successul. 932 */ 933 int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn, 934 struct qed_ptt *p_ptt, u32 *vfs_to_ack); 935 936 /** 937 * qed_mcp_fill_shmem_func_info(): Calls during init to read shmem of 938 * all function-related info. 939 * 940 * @p_hwfn: HW device data. 941 * @p_ptt: P_ptt. 942 * 943 * Return: 0 upon success. 944 */ 945 int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, 946 struct qed_ptt *p_ptt); 947 948 /** 949 * qed_mcp_reset(): Reset the MCP using mailbox command. 950 * 951 * @p_hwfn: HW device data. 952 * @p_ptt: P_ptt. 953 * 954 * Return: 0 upon success. 955 */ 956 int qed_mcp_reset(struct qed_hwfn *p_hwfn, 957 struct qed_ptt *p_ptt); 958 959 /** 960 * qed_mcp_nvm_rd_cmd(): Sends an NVM read command request to the MFW to get 961 * a buffer. 962 * 963 * @p_hwfn: HW device data. 964 * @p_ptt: P_ptt. 965 * @cmd: (Command) DRV_MSG_CODE_NVM_GET_FILE_DATA or 966 * DRV_MSG_CODE_NVM_READ_NVRAM commands. 967 * @param: [0:23] - Offset [24:31] - Size. 968 * @o_mcp_resp: MCP response. 969 * @o_mcp_param: MCP response param. 970 * @o_txn_size: Buffer size output. 971 * @o_buf: Pointer to the buffer returned by the MFW. 972 * @b_can_sleep: Can sleep. 973 * 974 * Return: 0 upon success. 975 */ 976 int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, 977 struct qed_ptt *p_ptt, 978 u32 cmd, 979 u32 param, 980 u32 *o_mcp_resp, 981 u32 *o_mcp_param, 982 u32 *o_txn_size, u32 *o_buf, bool b_can_sleep); 983 984 /** 985 * qed_mcp_phy_sfp_read(): Read from sfp. 986 * 987 * @p_hwfn: HW device data. 988 * @p_ptt: PTT required for register access. 989 * @port: transceiver port. 990 * @addr: I2C address. 991 * @offset: offset in sfp. 992 * @len: buffer length. 993 * @p_buf: buffer to read into. 994 * 995 * Return: Int - 0 - Operation was successul. 996 */ 997 int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 998 u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf); 999 1000 /** 1001 * qed_mcp_is_init(): indicates whether the MFW objects [under mcp_info] 1002 * are accessible 1003 * 1004 * @p_hwfn: HW device data. 1005 * 1006 * Return: true if MFW is running and mcp_info is initialized. 1007 */ 1008 bool qed_mcp_is_init(struct qed_hwfn *p_hwfn); 1009 1010 /** 1011 * qed_mcp_config_vf_msix(): Request MFW to configure MSI-X for a VF. 1012 * 1013 * @p_hwfn: HW device data. 1014 * @p_ptt: P_ptt. 1015 * @vf_id: absolute inside engine. 1016 * @num: number of entries to request. 1017 * 1018 * Return: Int. 1019 */ 1020 int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn, 1021 struct qed_ptt *p_ptt, u8 vf_id, u8 num); 1022 1023 /** 1024 * qed_mcp_halt(): Halt the MCP. 1025 * 1026 * @p_hwfn: HW device data. 1027 * @p_ptt: P_ptt. 1028 * 1029 * Return: 0 upon success. 1030 */ 1031 int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 1032 1033 /** 1034 * qed_mcp_resume: Wake up the MCP. 1035 * 1036 * @p_hwfn: HW device data. 1037 * @p_ptt: P_ptt. 1038 * 1039 * Return: 0 upon success. 1040 */ 1041 int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 1042 1043 int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw); 1044 int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw); 1045 int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn, 1046 struct qed_ptt *p_ptt, 1047 struct qed_mcp_link_state *p_link, 1048 u8 max_bw); 1049 int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, 1050 struct qed_ptt *p_ptt, 1051 struct qed_mcp_link_state *p_link, 1052 u8 min_bw); 1053 1054 int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn, 1055 struct qed_ptt *p_ptt, u32 mask_parities); 1056 1057 /* qed_mcp_mdump_get_retain(): Gets the mdump retained data from the MFW. 1058 * 1059 * @p_hwfn: HW device data. 1060 * @p_ptt: P_ptt. 1061 * @p_mdump_retain: mdump retain. 1062 * 1063 * Return: Int - 0 - Operation was successul. 1064 */ 1065 int 1066 qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn, 1067 struct qed_ptt *p_ptt, 1068 struct mdump_retain_data_stc *p_mdump_retain); 1069 1070 /** 1071 * qed_mcp_set_resc_max_val(): Sets the MFW's max value for the given resource. 1072 * 1073 * @p_hwfn: HW device data. 1074 * @p_ptt: P_ptt. 1075 * @res_id: RES ID. 1076 * @resc_max_val: Resec max val. 1077 * @p_mcp_resp: MCP Resp 1078 * 1079 * Return: Int - 0 - Operation was successul. 1080 */ 1081 int 1082 qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn, 1083 struct qed_ptt *p_ptt, 1084 enum qed_resources res_id, 1085 u32 resc_max_val, u32 *p_mcp_resp); 1086 1087 /** 1088 * qed_mcp_get_resc_info(): Gets the MFW allocation info for the given 1089 * resource. 1090 * 1091 * @p_hwfn: HW device data. 1092 * @p_ptt: P_ptt. 1093 * @res_id: Res ID. 1094 * @p_mcp_resp: MCP resp. 1095 * @p_resc_num: Resc num. 1096 * @p_resc_start: Resc start. 1097 * 1098 * Return: Int - 0 - Operation was successul. 1099 */ 1100 int 1101 qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, 1102 struct qed_ptt *p_ptt, 1103 enum qed_resources res_id, 1104 u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start); 1105 1106 /** 1107 * qed_mcp_ov_update_eswitch(): Send eswitch mode to MFW. 1108 * 1109 * @p_hwfn: HW device data. 1110 * @p_ptt: P_ptt. 1111 * @eswitch: eswitch mode. 1112 * 1113 * Return: Int - 0 - Operation was successul. 1114 */ 1115 int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn, 1116 struct qed_ptt *p_ptt, 1117 enum qed_ov_eswitch eswitch); 1118 1119 #define QED_MCP_RESC_LOCK_MIN_VAL RESOURCE_DUMP 1120 #define QED_MCP_RESC_LOCK_MAX_VAL 31 1121 1122 enum qed_resc_lock { 1123 QED_RESC_LOCK_DBG_DUMP = QED_MCP_RESC_LOCK_MIN_VAL, 1124 QED_RESC_LOCK_PTP_PORT0, 1125 QED_RESC_LOCK_PTP_PORT1, 1126 QED_RESC_LOCK_PTP_PORT2, 1127 QED_RESC_LOCK_PTP_PORT3, 1128 QED_RESC_LOCK_RESC_ALLOC = QED_MCP_RESC_LOCK_MAX_VAL, 1129 QED_RESC_LOCK_RESC_INVALID 1130 }; 1131 1132 /** 1133 * qed_mcp_initiate_pf_flr(): Initiates PF FLR. 1134 * 1135 * @p_hwfn: HW device data. 1136 * @p_ptt: P_ptt. 1137 * 1138 * Return: Int - 0 - Operation was successul. 1139 */ 1140 int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 1141 struct qed_resc_lock_params { 1142 /* Resource number [valid values are 0..31] */ 1143 u8 resource; 1144 1145 /* Lock timeout value in seconds [default, none or 1..254] */ 1146 u8 timeout; 1147 #define QED_MCP_RESC_LOCK_TO_DEFAULT 0 1148 #define QED_MCP_RESC_LOCK_TO_NONE 255 1149 1150 /* Number of times to retry locking */ 1151 u8 retry_num; 1152 #define QED_MCP_RESC_LOCK_RETRY_CNT_DFLT 10 1153 1154 /* The interval in usec between retries */ 1155 u16 retry_interval; 1156 #define QED_MCP_RESC_LOCK_RETRY_VAL_DFLT 10000 1157 1158 /* Use sleep or delay between retries */ 1159 bool sleep_b4_retry; 1160 1161 /* Will be set as true if the resource is free and granted */ 1162 bool b_granted; 1163 1164 /* Will be filled with the resource owner. 1165 * [0..15 = PF0-15, 16 = MFW] 1166 */ 1167 u8 owner; 1168 }; 1169 1170 /** 1171 * qed_mcp_resc_lock(): Acquires MFW generic resource lock. 1172 * 1173 * @p_hwfn: HW device data. 1174 * @p_ptt: P_ptt. 1175 * @p_params: Params. 1176 * 1177 * Return: Int - 0 - Operation was successul. 1178 */ 1179 int 1180 qed_mcp_resc_lock(struct qed_hwfn *p_hwfn, 1181 struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params); 1182 1183 struct qed_resc_unlock_params { 1184 /* Resource number [valid values are 0..31] */ 1185 u8 resource; 1186 1187 /* Allow to release a resource even if belongs to another PF */ 1188 bool b_force; 1189 1190 /* Will be set as true if the resource is released */ 1191 bool b_released; 1192 }; 1193 1194 /** 1195 * qed_mcp_resc_unlock(): Releases MFW generic resource lock. 1196 * 1197 * @p_hwfn: HW device data. 1198 * @p_ptt: P_ptt. 1199 * @p_params: Params. 1200 * 1201 * Return: Int - 0 - Operation was successul. 1202 */ 1203 int 1204 qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn, 1205 struct qed_ptt *p_ptt, 1206 struct qed_resc_unlock_params *p_params); 1207 1208 /** 1209 * qed_mcp_resc_lock_default_init(): Default initialization for 1210 * lock/unlock resource structs. 1211 * 1212 * @p_lock: lock params struct to be initialized; Can be NULL. 1213 * @p_unlock: unlock params struct to be initialized; Can be NULL. 1214 * @resource: the requested resource. 1215 * @b_is_permanent: disable retries & aging when set. 1216 * 1217 * Return: Void. 1218 */ 1219 void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, 1220 struct qed_resc_unlock_params *p_unlock, 1221 enum qed_resc_lock 1222 resource, bool b_is_permanent); 1223 1224 /** 1225 * qed_mcp_is_smart_an_supported(): Return whether management firmware 1226 * support smart AN 1227 * 1228 * @p_hwfn: HW device data. 1229 * 1230 * Return: bool true if feature is supported. 1231 */ 1232 bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn); 1233 1234 /** 1235 * qed_mcp_get_capabilities(): Learn of supported MFW features; 1236 * To be done during early init. 1237 * 1238 * @p_hwfn: HW device data. 1239 * @p_ptt: P_ptt. 1240 * 1241 * Return: Int. 1242 */ 1243 int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 1244 1245 /** 1246 * qed_mcp_set_capabilities(): Inform MFW of set of features supported 1247 * by driver. Should be done inside the content 1248 * of the LOAD_REQ. 1249 * 1250 * @p_hwfn: HW device data. 1251 * @p_ptt: P_ptt. 1252 * 1253 * Return: Int. 1254 */ 1255 int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 1256 1257 /** 1258 * qed_mcp_read_ufp_config(): Read ufp config from the shared memory. 1259 * 1260 * @p_hwfn: HW device data. 1261 * @p_ptt: P_ptt. 1262 * 1263 * Return: Void. 1264 */ 1265 void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 1266 1267 /** 1268 * qed_mcp_nvm_info_populate(): Populate the nvm info shadow in the given 1269 * hardware function. 1270 * 1271 * @p_hwfn: HW device data. 1272 * 1273 * Return: Int. 1274 */ 1275 int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn); 1276 1277 /** 1278 * qed_mcp_nvm_info_free(): Delete nvm info shadow in the given 1279 * hardware function. 1280 * 1281 * @p_hwfn: HW device data. 1282 * 1283 * Return: Void. 1284 */ 1285 void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn); 1286 1287 /** 1288 * qed_mcp_get_engine_config(): Get the engine affinity configuration. 1289 * 1290 * @p_hwfn: HW device data. 1291 * @p_ptt: P_ptt. 1292 * 1293 * Return: Int. 1294 */ 1295 int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 1296 1297 /** 1298 * qed_mcp_get_ppfid_bitmap(): Get the PPFID bitmap. 1299 * 1300 * @p_hwfn: HW device data. 1301 * @p_ptt: P_ptt. 1302 * 1303 * Return: Int. 1304 */ 1305 int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 1306 1307 /** 1308 * qed_mcp_nvm_get_cfg(): Get NVM config attribute value. 1309 * 1310 * @p_hwfn: HW device data. 1311 * @p_ptt: P_ptt. 1312 * @option_id: Option ID. 1313 * @entity_id: Entity ID. 1314 * @flags: Flags. 1315 * @p_buf: Buf. 1316 * @p_len: Len. 1317 * 1318 * Return: Int. 1319 */ 1320 int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 1321 u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, 1322 u32 *p_len); 1323 1324 /** 1325 * qed_mcp_nvm_set_cfg(): Set NVM config attribute value. 1326 * 1327 * @p_hwfn: HW device data. 1328 * @p_ptt: P_ptt. 1329 * @option_id: Option ID. 1330 * @entity_id: Entity ID. 1331 * @flags: Flags. 1332 * @p_buf: Buf. 1333 * @len: Len. 1334 * 1335 * Return: Int. 1336 */ 1337 int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 1338 u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, 1339 u32 len); 1340 #endif 1341