1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef _QED_VF_H 34 #define _QED_VF_H 35 36 #include "qed_l2.h" 37 #include "qed_mcp.h" 38 39 #define T_ETH_INDIRECTION_TABLE_SIZE 128 40 #define T_ETH_RSS_KEY_SIZE 10 41 42 struct vf_pf_resc_request { 43 u8 num_rxqs; 44 u8 num_txqs; 45 u8 num_sbs; 46 u8 num_mac_filters; 47 u8 num_vlan_filters; 48 u8 num_mc_filters; 49 u16 padding; 50 }; 51 52 struct hw_sb_info { 53 u16 hw_sb_id; 54 u8 sb_qid; 55 u8 padding[5]; 56 }; 57 58 #define TLV_BUFFER_SIZE 1024 59 60 enum { 61 PFVF_STATUS_WAITING, 62 PFVF_STATUS_SUCCESS, 63 PFVF_STATUS_FAILURE, 64 PFVF_STATUS_NOT_SUPPORTED, 65 PFVF_STATUS_NO_RESOURCE, 66 PFVF_STATUS_FORCED, 67 PFVF_STATUS_MALICIOUS, 68 }; 69 70 /* vf pf channel tlvs */ 71 /* general tlv header (used for both vf->pf request and pf->vf response) */ 72 struct channel_tlv { 73 u16 type; 74 u16 length; 75 }; 76 77 /* header of first vf->pf tlv carries the offset used to calculate reponse 78 * buffer address 79 */ 80 struct vfpf_first_tlv { 81 struct channel_tlv tl; 82 u32 padding; 83 u64 reply_address; 84 }; 85 86 /* header of pf->vf tlvs, carries the status of handling the request */ 87 struct pfvf_tlv { 88 struct channel_tlv tl; 89 u8 status; 90 u8 padding[3]; 91 }; 92 93 /* response tlv used for most tlvs */ 94 struct pfvf_def_resp_tlv { 95 struct pfvf_tlv hdr; 96 }; 97 98 /* used to terminate and pad a tlv list */ 99 struct channel_list_end_tlv { 100 struct channel_tlv tl; 101 u8 padding[4]; 102 }; 103 104 #define VFPF_ACQUIRE_OS_LINUX (0) 105 #define VFPF_ACQUIRE_OS_WINDOWS (1) 106 #define VFPF_ACQUIRE_OS_ESX (2) 107 #define VFPF_ACQUIRE_OS_SOLARIS (3) 108 #define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4) 109 110 struct vfpf_acquire_tlv { 111 struct vfpf_first_tlv first_tlv; 112 113 struct vf_pf_vfdev_info { 114 #define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */ 115 #define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */ 116 u64 capabilities; 117 u8 fw_major; 118 u8 fw_minor; 119 u8 fw_revision; 120 u8 fw_engineering; 121 u32 driver_version; 122 u16 opaque_fid; /* ME register value */ 123 u8 os_type; /* VFPF_ACQUIRE_OS_* value */ 124 u8 eth_fp_hsi_major; 125 u8 eth_fp_hsi_minor; 126 u8 padding[3]; 127 } vfdev_info; 128 129 struct vf_pf_resc_request resc_request; 130 131 u64 bulletin_addr; 132 u32 bulletin_size; 133 u32 padding; 134 }; 135 136 /* receive side scaling tlv */ 137 struct vfpf_vport_update_rss_tlv { 138 struct channel_tlv tl; 139 140 u8 update_rss_flags; 141 #define VFPF_UPDATE_RSS_CONFIG_FLAG BIT(0) 142 #define VFPF_UPDATE_RSS_CAPS_FLAG BIT(1) 143 #define VFPF_UPDATE_RSS_IND_TABLE_FLAG BIT(2) 144 #define VFPF_UPDATE_RSS_KEY_FLAG BIT(3) 145 146 u8 rss_enable; 147 u8 rss_caps; 148 u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */ 149 u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; 150 u32 rss_key[T_ETH_RSS_KEY_SIZE]; 151 }; 152 153 struct pfvf_storm_stats { 154 u32 address; 155 u32 len; 156 }; 157 158 struct pfvf_stats_info { 159 struct pfvf_storm_stats mstats; 160 struct pfvf_storm_stats pstats; 161 struct pfvf_storm_stats tstats; 162 struct pfvf_storm_stats ustats; 163 }; 164 165 struct pfvf_acquire_resp_tlv { 166 struct pfvf_tlv hdr; 167 168 struct pf_vf_pfdev_info { 169 u32 chip_num; 170 u32 mfw_ver; 171 172 u16 fw_major; 173 u16 fw_minor; 174 u16 fw_rev; 175 u16 fw_eng; 176 177 u64 capabilities; 178 #define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED BIT(0) 179 #define PFVF_ACQUIRE_CAP_100G BIT(1) /* If set, 100g PF */ 180 /* There are old PF versions where the PF might mistakenly override the sanity 181 * mechanism [version-based] and allow a VF that can't be supported to pass 182 * the acquisition phase. 183 * To overcome this, PFs now indicate that they're past that point and the new 184 * VFs would fail probe on the older PFs that fail to do so. 185 */ 186 #define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2) 187 188 u16 db_size; 189 u8 indices_per_sb; 190 u8 os_type; 191 192 /* These should match the PF's qed_dev values */ 193 u16 chip_rev; 194 u8 dev_type; 195 196 u8 padding; 197 198 struct pfvf_stats_info stats_info; 199 200 u8 port_mac[ETH_ALEN]; 201 202 /* It's possible PF had to configure an older fastpath HSI 203 * [in case VF is newer than PF]. This is communicated back 204 * to the VF. It can also be used in case of error due to 205 * non-matching versions to shed light in VF about failure. 206 */ 207 u8 major_fp_hsi; 208 u8 minor_fp_hsi; 209 } pfdev_info; 210 211 struct pf_vf_resc { 212 #define PFVF_MAX_QUEUES_PER_VF 16 213 #define PFVF_MAX_SBS_PER_VF 16 214 struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF]; 215 u8 hw_qid[PFVF_MAX_QUEUES_PER_VF]; 216 u8 cid[PFVF_MAX_QUEUES_PER_VF]; 217 218 u8 num_rxqs; 219 u8 num_txqs; 220 u8 num_sbs; 221 u8 num_mac_filters; 222 u8 num_vlan_filters; 223 u8 num_mc_filters; 224 u8 padding[2]; 225 } resc; 226 227 u32 bulletin_size; 228 u32 padding; 229 }; 230 231 struct pfvf_start_queue_resp_tlv { 232 struct pfvf_tlv hdr; 233 u32 offset; /* offset to consumer/producer of queue */ 234 u8 padding[4]; 235 }; 236 237 /* Setup Queue */ 238 struct vfpf_start_rxq_tlv { 239 struct vfpf_first_tlv first_tlv; 240 241 /* physical addresses */ 242 u64 rxq_addr; 243 u64 deprecated_sge_addr; 244 u64 cqe_pbl_addr; 245 246 u16 cqe_pbl_size; 247 u16 hw_sb; 248 u16 rx_qid; 249 u16 hc_rate; /* desired interrupts per sec. */ 250 251 u16 bd_max_bytes; 252 u16 stat_id; 253 u8 sb_index; 254 u8 padding[3]; 255 }; 256 257 struct vfpf_start_txq_tlv { 258 struct vfpf_first_tlv first_tlv; 259 260 /* physical addresses */ 261 u64 pbl_addr; 262 u16 pbl_size; 263 u16 stat_id; 264 u16 tx_qid; 265 u16 hw_sb; 266 267 u32 flags; /* VFPF_QUEUE_FLG_X flags */ 268 u16 hc_rate; /* desired interrupts per sec. */ 269 u8 sb_index; 270 u8 padding[3]; 271 }; 272 273 /* Stop RX Queue */ 274 struct vfpf_stop_rxqs_tlv { 275 struct vfpf_first_tlv first_tlv; 276 277 u16 rx_qid; 278 279 /* this field is deprecated and should *always* be set to '1' */ 280 u8 num_rxqs; 281 u8 cqe_completion; 282 u8 padding[4]; 283 }; 284 285 /* Stop TX Queues */ 286 struct vfpf_stop_txqs_tlv { 287 struct vfpf_first_tlv first_tlv; 288 289 u16 tx_qid; 290 291 /* this field is deprecated and should *always* be set to '1' */ 292 u8 num_txqs; 293 u8 padding[5]; 294 }; 295 296 struct vfpf_update_rxq_tlv { 297 struct vfpf_first_tlv first_tlv; 298 299 u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF]; 300 301 u16 rx_qid; 302 u8 num_rxqs; 303 u8 flags; 304 #define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG BIT(0) 305 #define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG BIT(1) 306 #define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG BIT(2) 307 308 u8 padding[4]; 309 }; 310 311 /* Set Queue Filters */ 312 struct vfpf_q_mac_vlan_filter { 313 u32 flags; 314 #define VFPF_Q_FILTER_DEST_MAC_VALID 0x01 315 #define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02 316 #define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */ 317 318 u8 mac[ETH_ALEN]; 319 u16 vlan_tag; 320 321 u8 padding[4]; 322 }; 323 324 /* Start a vport */ 325 struct vfpf_vport_start_tlv { 326 struct vfpf_first_tlv first_tlv; 327 328 u64 sb_addr[PFVF_MAX_SBS_PER_VF]; 329 330 u32 tpa_mode; 331 u16 dep1; 332 u16 mtu; 333 334 u8 vport_id; 335 u8 inner_vlan_removal; 336 337 u8 only_untagged; 338 u8 max_buffers_per_cqe; 339 340 u8 padding[4]; 341 }; 342 343 /* Extended tlvs - need to add rss, mcast, accept mode tlvs */ 344 struct vfpf_vport_update_activate_tlv { 345 struct channel_tlv tl; 346 u8 update_rx; 347 u8 update_tx; 348 u8 active_rx; 349 u8 active_tx; 350 }; 351 352 struct vfpf_vport_update_tx_switch_tlv { 353 struct channel_tlv tl; 354 u8 tx_switching; 355 u8 padding[3]; 356 }; 357 358 struct vfpf_vport_update_vlan_strip_tlv { 359 struct channel_tlv tl; 360 u8 remove_vlan; 361 u8 padding[3]; 362 }; 363 364 struct vfpf_vport_update_mcast_bin_tlv { 365 struct channel_tlv tl; 366 u8 padding[4]; 367 368 u64 bins[8]; 369 }; 370 371 struct vfpf_vport_update_accept_param_tlv { 372 struct channel_tlv tl; 373 u8 update_rx_mode; 374 u8 update_tx_mode; 375 u8 rx_accept_filter; 376 u8 tx_accept_filter; 377 }; 378 379 struct vfpf_vport_update_accept_any_vlan_tlv { 380 struct channel_tlv tl; 381 u8 update_accept_any_vlan_flg; 382 u8 accept_any_vlan; 383 384 u8 padding[2]; 385 }; 386 387 struct vfpf_vport_update_sge_tpa_tlv { 388 struct channel_tlv tl; 389 390 u16 sge_tpa_flags; 391 #define VFPF_TPA_IPV4_EN_FLAG BIT(0) 392 #define VFPF_TPA_IPV6_EN_FLAG BIT(1) 393 #define VFPF_TPA_PKT_SPLIT_FLAG BIT(2) 394 #define VFPF_TPA_HDR_DATA_SPLIT_FLAG BIT(3) 395 #define VFPF_TPA_GRO_CONSIST_FLAG BIT(4) 396 397 u8 update_sge_tpa_flags; 398 #define VFPF_UPDATE_SGE_DEPRECATED_FLAG BIT(0) 399 #define VFPF_UPDATE_TPA_EN_FLAG BIT(1) 400 #define VFPF_UPDATE_TPA_PARAM_FLAG BIT(2) 401 402 u8 max_buffers_per_cqe; 403 404 u16 deprecated_sge_buff_size; 405 u16 tpa_max_size; 406 u16 tpa_min_size_to_start; 407 u16 tpa_min_size_to_cont; 408 409 u8 tpa_max_aggs_num; 410 u8 padding[7]; 411 }; 412 413 /* Primary tlv as a header for various extended tlvs for 414 * various functionalities in vport update ramrod. 415 */ 416 struct vfpf_vport_update_tlv { 417 struct vfpf_first_tlv first_tlv; 418 }; 419 420 struct vfpf_ucast_filter_tlv { 421 struct vfpf_first_tlv first_tlv; 422 423 u8 opcode; 424 u8 type; 425 426 u8 mac[ETH_ALEN]; 427 428 u16 vlan; 429 u16 padding[3]; 430 }; 431 432 struct tlv_buffer_size { 433 u8 tlv_buffer[TLV_BUFFER_SIZE]; 434 }; 435 436 union vfpf_tlvs { 437 struct vfpf_first_tlv first_tlv; 438 struct vfpf_acquire_tlv acquire; 439 struct vfpf_start_rxq_tlv start_rxq; 440 struct vfpf_start_txq_tlv start_txq; 441 struct vfpf_stop_rxqs_tlv stop_rxqs; 442 struct vfpf_stop_txqs_tlv stop_txqs; 443 struct vfpf_update_rxq_tlv update_rxq; 444 struct vfpf_vport_start_tlv start_vport; 445 struct vfpf_vport_update_tlv vport_update; 446 struct vfpf_ucast_filter_tlv ucast_filter; 447 struct channel_list_end_tlv list_end; 448 struct tlv_buffer_size tlv_buf_size; 449 }; 450 451 union pfvf_tlvs { 452 struct pfvf_def_resp_tlv default_resp; 453 struct pfvf_acquire_resp_tlv acquire_resp; 454 struct tlv_buffer_size tlv_buf_size; 455 struct pfvf_start_queue_resp_tlv queue_start; 456 }; 457 458 enum qed_bulletin_bit { 459 /* Alert the VF that a forced MAC was set by the PF */ 460 MAC_ADDR_FORCED = 0, 461 /* Alert the VF that a forced VLAN was set by the PF */ 462 VLAN_ADDR_FORCED = 2, 463 464 /* Indicate that `default_only_untagged' contains actual data */ 465 VFPF_BULLETIN_UNTAGGED_DEFAULT = 3, 466 VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4, 467 468 /* Alert the VF that suggested mac was sent by the PF. 469 * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set. 470 */ 471 VFPF_BULLETIN_MAC_ADDR = 5 472 }; 473 474 struct qed_bulletin_content { 475 /* crc of structure to ensure is not in mid-update */ 476 u32 crc; 477 478 u32 version; 479 480 /* bitmap indicating which fields hold valid values */ 481 u64 valid_bitmap; 482 483 /* used for MAC_ADDR or MAC_ADDR_FORCED */ 484 u8 mac[ETH_ALEN]; 485 486 /* If valid, 1 => only untagged Rx if no vlan is configured */ 487 u8 default_only_untagged; 488 u8 padding; 489 490 /* The following is a 'copy' of qed_mcp_link_state, 491 * qed_mcp_link_params and qed_mcp_link_capabilities. Since it's 492 * possible the structs will increase further along the road we cannot 493 * have it here; Instead we need to have all of its fields. 494 */ 495 u8 req_autoneg; 496 u8 req_autoneg_pause; 497 u8 req_forced_rx; 498 u8 req_forced_tx; 499 u8 padding2[4]; 500 501 u32 req_adv_speed; 502 u32 req_forced_speed; 503 u32 req_loopback; 504 u32 padding3; 505 506 u8 link_up; 507 u8 full_duplex; 508 u8 autoneg; 509 u8 autoneg_complete; 510 u8 parallel_detection; 511 u8 pfc_enabled; 512 u8 partner_tx_flow_ctrl_en; 513 u8 partner_rx_flow_ctrl_en; 514 u8 partner_adv_pause; 515 u8 sfp_tx_fault; 516 u8 padding4[6]; 517 518 u32 speed; 519 u32 partner_adv_speed; 520 521 u32 capability_speed; 522 523 /* Forced vlan */ 524 u16 pvid; 525 u16 padding5; 526 }; 527 528 struct qed_bulletin { 529 dma_addr_t phys; 530 struct qed_bulletin_content *p_virt; 531 u32 size; 532 }; 533 534 enum { 535 CHANNEL_TLV_NONE, /* ends tlv sequence */ 536 CHANNEL_TLV_ACQUIRE, 537 CHANNEL_TLV_VPORT_START, 538 CHANNEL_TLV_VPORT_UPDATE, 539 CHANNEL_TLV_VPORT_TEARDOWN, 540 CHANNEL_TLV_START_RXQ, 541 CHANNEL_TLV_START_TXQ, 542 CHANNEL_TLV_STOP_RXQS, 543 CHANNEL_TLV_STOP_TXQS, 544 CHANNEL_TLV_UPDATE_RXQ, 545 CHANNEL_TLV_INT_CLEANUP, 546 CHANNEL_TLV_CLOSE, 547 CHANNEL_TLV_RELEASE, 548 CHANNEL_TLV_LIST_END, 549 CHANNEL_TLV_UCAST_FILTER, 550 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, 551 CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH, 552 CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP, 553 CHANNEL_TLV_VPORT_UPDATE_MCAST, 554 CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM, 555 CHANNEL_TLV_VPORT_UPDATE_RSS, 556 CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN, 557 CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, 558 CHANNEL_TLV_MAX, 559 560 /* Required for iterating over vport-update tlvs. 561 * Will break in case non-sequential vport-update tlvs. 562 */ 563 CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1, 564 }; 565 566 /* This data is held in the qed_hwfn structure for VFs only. */ 567 struct qed_vf_iov { 568 union vfpf_tlvs *vf2pf_request; 569 dma_addr_t vf2pf_request_phys; 570 union pfvf_tlvs *pf2vf_reply; 571 dma_addr_t pf2vf_reply_phys; 572 573 /* Should be taken whenever the mailbox buffers are accessed */ 574 struct mutex mutex; 575 u8 *offset; 576 577 /* Bulletin Board */ 578 struct qed_bulletin bulletin; 579 struct qed_bulletin_content bulletin_shadow; 580 581 /* we set aside a copy of the acquire response */ 582 struct pfvf_acquire_resp_tlv acquire_resp; 583 584 /* In case PF originates prior to the fp-hsi version comparison, 585 * this has to be propagated as it affects the fastpath. 586 */ 587 bool b_pre_fp_hsi; 588 }; 589 590 #ifdef CONFIG_QED_SRIOV 591 /** 592 * @brief Read the VF bulletin and act on it if needed 593 * 594 * @param p_hwfn 595 * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise. 596 * 597 * @return enum _qed_status 598 */ 599 int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change); 600 601 /** 602 * @brief Get link paramters for VF from qed 603 * 604 * @param p_hwfn 605 * @param params - the link params structure to be filled for the VF 606 */ 607 void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, 608 struct qed_mcp_link_params *params); 609 610 /** 611 * @brief Get link state for VF from qed 612 * 613 * @param p_hwfn 614 * @param link - the link state structure to be filled for the VF 615 */ 616 void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, 617 struct qed_mcp_link_state *link); 618 619 /** 620 * @brief Get link capabilities for VF from qed 621 * 622 * @param p_hwfn 623 * @param p_link_caps - the link capabilities structure to be filled for the VF 624 */ 625 void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, 626 struct qed_mcp_link_capabilities *p_link_caps); 627 628 /** 629 * @brief Get number of Rx queues allocated for VF by qed 630 * 631 * @param p_hwfn 632 * @param num_rxqs - allocated RX queues 633 */ 634 void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs); 635 636 /** 637 * @brief Get port mac address for VF 638 * 639 * @param p_hwfn 640 * @param port_mac - destination location for port mac 641 */ 642 void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac); 643 644 /** 645 * @brief Get number of VLAN filters allocated for VF by qed 646 * 647 * @param p_hwfn 648 * @param num_rxqs - allocated VLAN filters 649 */ 650 void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, 651 u8 *num_vlan_filters); 652 653 /** 654 * @brief Get number of MAC filters allocated for VF by qed 655 * 656 * @param p_hwfn 657 * @param num_rxqs - allocated MAC filters 658 */ 659 void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters); 660 661 /** 662 * @brief Check if VF can set a MAC address 663 * 664 * @param p_hwfn 665 * @param mac 666 * 667 * @return bool 668 */ 669 bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac); 670 671 /** 672 * @brief Set firmware version information in dev_info from VFs acquire response tlv 673 * 674 * @param p_hwfn 675 * @param fw_major 676 * @param fw_minor 677 * @param fw_rev 678 * @param fw_eng 679 */ 680 void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, 681 u16 *fw_major, u16 *fw_minor, 682 u16 *fw_rev, u16 *fw_eng); 683 684 /** 685 * @brief hw preparation for VF 686 * sends ACQUIRE message 687 * 688 * @param p_hwfn 689 * 690 * @return int 691 */ 692 int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn); 693 694 /** 695 * @brief VF - start the RX Queue by sending a message to the PF 696 * @param p_hwfn 697 * @param p_cid - Only relative fields are relevant 698 * @param bd_max_bytes - maximum number of bytes per bd 699 * @param bd_chain_phys_addr - physical address of bd chain 700 * @param cqe_pbl_addr - physical address of pbl 701 * @param cqe_pbl_size - pbl size 702 * @param pp_prod - pointer to the producer to be 703 * used in fastpath 704 * 705 * @return int 706 */ 707 int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, 708 struct qed_queue_cid *p_cid, 709 u16 bd_max_bytes, 710 dma_addr_t bd_chain_phys_addr, 711 dma_addr_t cqe_pbl_addr, 712 u16 cqe_pbl_size, void __iomem **pp_prod); 713 714 /** 715 * @brief VF - start the TX queue by sending a message to the 716 * PF. 717 * 718 * @param p_hwfn 719 * @param tx_queue_id - zero based within the VF 720 * @param sb - status block for this queue 721 * @param sb_index - index within the status block 722 * @param bd_chain_phys_addr - physical address of tx chain 723 * @param pp_doorbell - pointer to address to which to 724 * write the doorbell too.. 725 * 726 * @return int 727 */ 728 int 729 qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, 730 struct qed_queue_cid *p_cid, 731 dma_addr_t pbl_addr, 732 u16 pbl_size, void __iomem **pp_doorbell); 733 734 /** 735 * @brief VF - stop the RX queue by sending a message to the PF 736 * 737 * @param p_hwfn 738 * @param p_cid 739 * @param cqe_completion 740 * 741 * @return int 742 */ 743 int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, 744 struct qed_queue_cid *p_cid, bool cqe_completion); 745 746 /** 747 * @brief VF - stop the TX queue by sending a message to the PF 748 * 749 * @param p_hwfn 750 * @param tx_qid 751 * 752 * @return int 753 */ 754 int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid); 755 756 /** 757 * @brief VF - send a vport update command 758 * 759 * @param p_hwfn 760 * @param params 761 * 762 * @return int 763 */ 764 int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, 765 struct qed_sp_vport_update_params *p_params); 766 767 /** 768 * 769 * @brief VF - send a close message to PF 770 * 771 * @param p_hwfn 772 * 773 * @return enum _qed_status 774 */ 775 int qed_vf_pf_reset(struct qed_hwfn *p_hwfn); 776 777 /** 778 * @brief VF - free vf`s memories 779 * 780 * @param p_hwfn 781 * 782 * @return enum _qed_status 783 */ 784 int qed_vf_pf_release(struct qed_hwfn *p_hwfn); 785 786 /** 787 * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given 788 * sb_id. For VFs igu sbs don't have to be contiguous 789 * 790 * @param p_hwfn 791 * @param sb_id 792 * 793 * @return INLINE u16 794 */ 795 u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); 796 797 /** 798 * @brief qed_vf_pf_vport_start - perform vport start for VF. 799 * 800 * @param p_hwfn 801 * @param vport_id 802 * @param mtu 803 * @param inner_vlan_removal 804 * @param tpa_mode 805 * @param max_buffers_per_cqe, 806 * @param only_untagged - default behavior regarding vlan acceptance 807 * 808 * @return enum _qed_status 809 */ 810 int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, 811 u8 vport_id, 812 u16 mtu, 813 u8 inner_vlan_removal, 814 enum qed_tpa_mode tpa_mode, 815 u8 max_buffers_per_cqe, u8 only_untagged); 816 817 /** 818 * @brief qed_vf_pf_vport_stop - stop the VF's vport 819 * 820 * @param p_hwfn 821 * 822 * @return enum _qed_status 823 */ 824 int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn); 825 826 int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, 827 struct qed_filter_ucast *p_param); 828 829 void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, 830 struct qed_filter_mcast *p_filter_cmd); 831 832 /** 833 * @brief qed_vf_pf_int_cleanup - clean the SB of the VF 834 * 835 * @param p_hwfn 836 * 837 * @return enum _qed_status 838 */ 839 int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn); 840 841 /** 842 * @brief - return the link params in a given bulletin board 843 * 844 * @param p_hwfn 845 * @param p_params - pointer to a struct to fill with link params 846 * @param p_bulletin 847 */ 848 void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, 849 struct qed_mcp_link_params *p_params, 850 struct qed_bulletin_content *p_bulletin); 851 852 /** 853 * @brief - return the link state in a given bulletin board 854 * 855 * @param p_hwfn 856 * @param p_link - pointer to a struct to fill with link state 857 * @param p_bulletin 858 */ 859 void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, 860 struct qed_mcp_link_state *p_link, 861 struct qed_bulletin_content *p_bulletin); 862 863 /** 864 * @brief - return the link capabilities in a given bulletin board 865 * 866 * @param p_hwfn 867 * @param p_link - pointer to a struct to fill with link capabilities 868 * @param p_bulletin 869 */ 870 void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, 871 struct qed_mcp_link_capabilities *p_link_caps, 872 struct qed_bulletin_content *p_bulletin); 873 874 void qed_iov_vf_task(struct work_struct *work); 875 #else 876 static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, 877 struct qed_mcp_link_params *params) 878 { 879 } 880 881 static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, 882 struct qed_mcp_link_state *link) 883 { 884 } 885 886 static inline void 887 qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, 888 struct qed_mcp_link_capabilities *p_link_caps) 889 { 890 } 891 892 static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs) 893 { 894 } 895 896 static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac) 897 { 898 } 899 900 static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, 901 u8 *num_vlan_filters) 902 { 903 } 904 905 static inline void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, 906 u8 *num_mac_filters) 907 { 908 } 909 910 static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac) 911 { 912 return false; 913 } 914 915 static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, 916 u16 *fw_major, u16 *fw_minor, 917 u16 *fw_rev, u16 *fw_eng) 918 { 919 } 920 921 static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) 922 { 923 return -EINVAL; 924 } 925 926 static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, 927 struct qed_queue_cid *p_cid, 928 u16 bd_max_bytes, 929 dma_addr_t bd_chain_phys_adr, 930 dma_addr_t cqe_pbl_addr, 931 u16 cqe_pbl_size, void __iomem **pp_prod) 932 { 933 return -EINVAL; 934 } 935 936 static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, 937 struct qed_queue_cid *p_cid, 938 dma_addr_t pbl_addr, 939 u16 pbl_size, void __iomem **pp_doorbell) 940 { 941 return -EINVAL; 942 } 943 944 static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, 945 struct qed_queue_cid *p_cid, 946 bool cqe_completion) 947 { 948 return -EINVAL; 949 } 950 951 static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, 952 struct qed_queue_cid *p_cid) 953 { 954 return -EINVAL; 955 } 956 957 static inline int 958 qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, 959 struct qed_sp_vport_update_params *p_params) 960 { 961 return -EINVAL; 962 } 963 964 static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn) 965 { 966 return -EINVAL; 967 } 968 969 static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn) 970 { 971 return -EINVAL; 972 } 973 974 static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) 975 { 976 return 0; 977 } 978 979 static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, 980 u8 vport_id, 981 u16 mtu, 982 u8 inner_vlan_removal, 983 enum qed_tpa_mode tpa_mode, 984 u8 max_buffers_per_cqe, 985 u8 only_untagged) 986 { 987 return -EINVAL; 988 } 989 990 static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn) 991 { 992 return -EINVAL; 993 } 994 995 static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, 996 struct qed_filter_ucast *p_param) 997 { 998 return -EINVAL; 999 } 1000 1001 static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, 1002 struct qed_filter_mcast *p_filter_cmd) 1003 { 1004 } 1005 1006 static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) 1007 { 1008 return -EINVAL; 1009 } 1010 1011 static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, 1012 struct qed_mcp_link_params 1013 *p_params, 1014 struct qed_bulletin_content 1015 *p_bulletin) 1016 { 1017 } 1018 1019 static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, 1020 struct qed_mcp_link_state *p_link, 1021 struct qed_bulletin_content 1022 *p_bulletin) 1023 { 1024 } 1025 1026 static inline void 1027 __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, 1028 struct qed_mcp_link_capabilities *p_link_caps, 1029 struct qed_bulletin_content *p_bulletin) 1030 { 1031 } 1032 1033 static inline void qed_iov_vf_task(struct work_struct *work) 1034 { 1035 } 1036 #endif 1037 1038 #endif 1039