1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef _QED_VF_H 34 #define _QED_VF_H 35 36 #include "qed_l2.h" 37 #include "qed_mcp.h" 38 39 #define T_ETH_INDIRECTION_TABLE_SIZE 128 40 #define T_ETH_RSS_KEY_SIZE 10 41 42 struct vf_pf_resc_request { 43 u8 num_rxqs; 44 u8 num_txqs; 45 u8 num_sbs; 46 u8 num_mac_filters; 47 u8 num_vlan_filters; 48 u8 num_mc_filters; 49 u16 padding; 50 }; 51 52 struct hw_sb_info { 53 u16 hw_sb_id; 54 u8 sb_qid; 55 u8 padding[5]; 56 }; 57 58 #define TLV_BUFFER_SIZE 1024 59 60 enum { 61 PFVF_STATUS_WAITING, 62 PFVF_STATUS_SUCCESS, 63 PFVF_STATUS_FAILURE, 64 PFVF_STATUS_NOT_SUPPORTED, 65 PFVF_STATUS_NO_RESOURCE, 66 PFVF_STATUS_FORCED, 67 PFVF_STATUS_MALICIOUS, 68 }; 69 70 /* vf pf channel tlvs */ 71 /* general tlv header (used for both vf->pf request and pf->vf response) */ 72 struct channel_tlv { 73 u16 type; 74 u16 length; 75 }; 76 77 /* header of first vf->pf tlv carries the offset used to calculate reponse 78 * buffer address 79 */ 80 struct vfpf_first_tlv { 81 struct channel_tlv tl; 82 u32 padding; 83 u64 reply_address; 84 }; 85 86 /* header of pf->vf tlvs, carries the status of handling the request */ 87 struct pfvf_tlv { 88 struct channel_tlv tl; 89 u8 status; 90 u8 padding[3]; 91 }; 92 93 /* response tlv used for most tlvs */ 94 struct pfvf_def_resp_tlv { 95 struct pfvf_tlv hdr; 96 }; 97 98 /* used to terminate and pad a tlv list */ 99 struct channel_list_end_tlv { 100 struct channel_tlv tl; 101 u8 padding[4]; 102 }; 103 104 #define VFPF_ACQUIRE_OS_LINUX (0) 105 #define VFPF_ACQUIRE_OS_WINDOWS (1) 106 #define VFPF_ACQUIRE_OS_ESX (2) 107 #define VFPF_ACQUIRE_OS_SOLARIS (3) 108 #define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4) 109 110 struct vfpf_acquire_tlv { 111 struct vfpf_first_tlv first_tlv; 112 113 struct vf_pf_vfdev_info { 114 #define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */ 115 #define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */ 116 u64 capabilities; 117 u8 fw_major; 118 u8 fw_minor; 119 u8 fw_revision; 120 u8 fw_engineering; 121 u32 driver_version; 122 u16 opaque_fid; /* ME register value */ 123 u8 os_type; /* VFPF_ACQUIRE_OS_* value */ 124 u8 eth_fp_hsi_major; 125 u8 eth_fp_hsi_minor; 126 u8 padding[3]; 127 } vfdev_info; 128 129 struct vf_pf_resc_request resc_request; 130 131 u64 bulletin_addr; 132 u32 bulletin_size; 133 u32 padding; 134 }; 135 136 /* receive side scaling tlv */ 137 struct vfpf_vport_update_rss_tlv { 138 struct channel_tlv tl; 139 140 u8 update_rss_flags; 141 #define VFPF_UPDATE_RSS_CONFIG_FLAG BIT(0) 142 #define VFPF_UPDATE_RSS_CAPS_FLAG BIT(1) 143 #define VFPF_UPDATE_RSS_IND_TABLE_FLAG BIT(2) 144 #define VFPF_UPDATE_RSS_KEY_FLAG BIT(3) 145 146 u8 rss_enable; 147 u8 rss_caps; 148 u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */ 149 u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; 150 u32 rss_key[T_ETH_RSS_KEY_SIZE]; 151 }; 152 153 struct pfvf_storm_stats { 154 u32 address; 155 u32 len; 156 }; 157 158 struct pfvf_stats_info { 159 struct pfvf_storm_stats mstats; 160 struct pfvf_storm_stats pstats; 161 struct pfvf_storm_stats tstats; 162 struct pfvf_storm_stats ustats; 163 }; 164 165 struct pfvf_acquire_resp_tlv { 166 struct pfvf_tlv hdr; 167 168 struct pf_vf_pfdev_info { 169 u32 chip_num; 170 u32 mfw_ver; 171 172 u16 fw_major; 173 u16 fw_minor; 174 u16 fw_rev; 175 u16 fw_eng; 176 177 u64 capabilities; 178 #define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED BIT(0) 179 #define PFVF_ACQUIRE_CAP_100G BIT(1) /* If set, 100g PF */ 180 /* There are old PF versions where the PF might mistakenly override the sanity 181 * mechanism [version-based] and allow a VF that can't be supported to pass 182 * the acquisition phase. 183 * To overcome this, PFs now indicate that they're past that point and the new 184 * VFs would fail probe on the older PFs that fail to do so. 185 */ 186 #define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2) 187 188 u16 db_size; 189 u8 indices_per_sb; 190 u8 os_type; 191 192 /* These should match the PF's qed_dev values */ 193 u16 chip_rev; 194 u8 dev_type; 195 196 u8 padding; 197 198 struct pfvf_stats_info stats_info; 199 200 u8 port_mac[ETH_ALEN]; 201 202 /* It's possible PF had to configure an older fastpath HSI 203 * [in case VF is newer than PF]. This is communicated back 204 * to the VF. It can also be used in case of error due to 205 * non-matching versions to shed light in VF about failure. 206 */ 207 u8 major_fp_hsi; 208 u8 minor_fp_hsi; 209 } pfdev_info; 210 211 struct pf_vf_resc { 212 #define PFVF_MAX_QUEUES_PER_VF 16 213 #define PFVF_MAX_SBS_PER_VF 16 214 struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF]; 215 u8 hw_qid[PFVF_MAX_QUEUES_PER_VF]; 216 u8 cid[PFVF_MAX_QUEUES_PER_VF]; 217 218 u8 num_rxqs; 219 u8 num_txqs; 220 u8 num_sbs; 221 u8 num_mac_filters; 222 u8 num_vlan_filters; 223 u8 num_mc_filters; 224 u8 padding[2]; 225 } resc; 226 227 u32 bulletin_size; 228 u32 padding; 229 }; 230 231 struct pfvf_start_queue_resp_tlv { 232 struct pfvf_tlv hdr; 233 u32 offset; /* offset to consumer/producer of queue */ 234 u8 padding[4]; 235 }; 236 237 /* Setup Queue */ 238 struct vfpf_start_rxq_tlv { 239 struct vfpf_first_tlv first_tlv; 240 241 /* physical addresses */ 242 u64 rxq_addr; 243 u64 deprecated_sge_addr; 244 u64 cqe_pbl_addr; 245 246 u16 cqe_pbl_size; 247 u16 hw_sb; 248 u16 rx_qid; 249 u16 hc_rate; /* desired interrupts per sec. */ 250 251 u16 bd_max_bytes; 252 u16 stat_id; 253 u8 sb_index; 254 u8 padding[3]; 255 }; 256 257 struct vfpf_start_txq_tlv { 258 struct vfpf_first_tlv first_tlv; 259 260 /* physical addresses */ 261 u64 pbl_addr; 262 u16 pbl_size; 263 u16 stat_id; 264 u16 tx_qid; 265 u16 hw_sb; 266 267 u32 flags; /* VFPF_QUEUE_FLG_X flags */ 268 u16 hc_rate; /* desired interrupts per sec. */ 269 u8 sb_index; 270 u8 padding[3]; 271 }; 272 273 /* Stop RX Queue */ 274 struct vfpf_stop_rxqs_tlv { 275 struct vfpf_first_tlv first_tlv; 276 277 u16 rx_qid; 278 279 /* this field is deprecated and should *always* be set to '1' */ 280 u8 num_rxqs; 281 u8 cqe_completion; 282 u8 padding[4]; 283 }; 284 285 /* Stop TX Queues */ 286 struct vfpf_stop_txqs_tlv { 287 struct vfpf_first_tlv first_tlv; 288 289 u16 tx_qid; 290 291 /* this field is deprecated and should *always* be set to '1' */ 292 u8 num_txqs; 293 u8 padding[5]; 294 }; 295 296 struct vfpf_update_rxq_tlv { 297 struct vfpf_first_tlv first_tlv; 298 299 u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF]; 300 301 u16 rx_qid; 302 u8 num_rxqs; 303 u8 flags; 304 #define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG BIT(0) 305 #define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG BIT(1) 306 #define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG BIT(2) 307 308 u8 padding[4]; 309 }; 310 311 /* Set Queue Filters */ 312 struct vfpf_q_mac_vlan_filter { 313 u32 flags; 314 #define VFPF_Q_FILTER_DEST_MAC_VALID 0x01 315 #define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02 316 #define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */ 317 318 u8 mac[ETH_ALEN]; 319 u16 vlan_tag; 320 321 u8 padding[4]; 322 }; 323 324 /* Start a vport */ 325 struct vfpf_vport_start_tlv { 326 struct vfpf_first_tlv first_tlv; 327 328 u64 sb_addr[PFVF_MAX_SBS_PER_VF]; 329 330 u32 tpa_mode; 331 u16 dep1; 332 u16 mtu; 333 334 u8 vport_id; 335 u8 inner_vlan_removal; 336 337 u8 only_untagged; 338 u8 max_buffers_per_cqe; 339 340 u8 padding[4]; 341 }; 342 343 /* Extended tlvs - need to add rss, mcast, accept mode tlvs */ 344 struct vfpf_vport_update_activate_tlv { 345 struct channel_tlv tl; 346 u8 update_rx; 347 u8 update_tx; 348 u8 active_rx; 349 u8 active_tx; 350 }; 351 352 struct vfpf_vport_update_tx_switch_tlv { 353 struct channel_tlv tl; 354 u8 tx_switching; 355 u8 padding[3]; 356 }; 357 358 struct vfpf_vport_update_vlan_strip_tlv { 359 struct channel_tlv tl; 360 u8 remove_vlan; 361 u8 padding[3]; 362 }; 363 364 struct vfpf_vport_update_mcast_bin_tlv { 365 struct channel_tlv tl; 366 u8 padding[4]; 367 368 u64 bins[8]; 369 }; 370 371 struct vfpf_vport_update_accept_param_tlv { 372 struct channel_tlv tl; 373 u8 update_rx_mode; 374 u8 update_tx_mode; 375 u8 rx_accept_filter; 376 u8 tx_accept_filter; 377 }; 378 379 struct vfpf_vport_update_accept_any_vlan_tlv { 380 struct channel_tlv tl; 381 u8 update_accept_any_vlan_flg; 382 u8 accept_any_vlan; 383 384 u8 padding[2]; 385 }; 386 387 struct vfpf_vport_update_sge_tpa_tlv { 388 struct channel_tlv tl; 389 390 u16 sge_tpa_flags; 391 #define VFPF_TPA_IPV4_EN_FLAG BIT(0) 392 #define VFPF_TPA_IPV6_EN_FLAG BIT(1) 393 #define VFPF_TPA_PKT_SPLIT_FLAG BIT(2) 394 #define VFPF_TPA_HDR_DATA_SPLIT_FLAG BIT(3) 395 #define VFPF_TPA_GRO_CONSIST_FLAG BIT(4) 396 397 u8 update_sge_tpa_flags; 398 #define VFPF_UPDATE_SGE_DEPRECATED_FLAG BIT(0) 399 #define VFPF_UPDATE_TPA_EN_FLAG BIT(1) 400 #define VFPF_UPDATE_TPA_PARAM_FLAG BIT(2) 401 402 u8 max_buffers_per_cqe; 403 404 u16 deprecated_sge_buff_size; 405 u16 tpa_max_size; 406 u16 tpa_min_size_to_start; 407 u16 tpa_min_size_to_cont; 408 409 u8 tpa_max_aggs_num; 410 u8 padding[7]; 411 }; 412 413 /* Primary tlv as a header for various extended tlvs for 414 * various functionalities in vport update ramrod. 415 */ 416 struct vfpf_vport_update_tlv { 417 struct vfpf_first_tlv first_tlv; 418 }; 419 420 struct vfpf_ucast_filter_tlv { 421 struct vfpf_first_tlv first_tlv; 422 423 u8 opcode; 424 u8 type; 425 426 u8 mac[ETH_ALEN]; 427 428 u16 vlan; 429 u16 padding[3]; 430 }; 431 432 /* tunnel update param tlv */ 433 struct vfpf_update_tunn_param_tlv { 434 struct vfpf_first_tlv first_tlv; 435 436 u8 tun_mode_update_mask; 437 u8 tunn_mode; 438 u8 update_tun_cls; 439 u8 vxlan_clss; 440 u8 l2gre_clss; 441 u8 ipgre_clss; 442 u8 l2geneve_clss; 443 u8 ipgeneve_clss; 444 u8 update_geneve_port; 445 u8 update_vxlan_port; 446 u16 geneve_port; 447 u16 vxlan_port; 448 u8 padding[2]; 449 }; 450 451 struct pfvf_update_tunn_param_tlv { 452 struct pfvf_tlv hdr; 453 454 u16 tunn_feature_mask; 455 u8 vxlan_mode; 456 u8 l2geneve_mode; 457 u8 ipgeneve_mode; 458 u8 l2gre_mode; 459 u8 ipgre_mode; 460 u8 vxlan_clss; 461 u8 l2gre_clss; 462 u8 ipgre_clss; 463 u8 l2geneve_clss; 464 u8 ipgeneve_clss; 465 u16 vxlan_udp_port; 466 u16 geneve_udp_port; 467 }; 468 469 struct tlv_buffer_size { 470 u8 tlv_buffer[TLV_BUFFER_SIZE]; 471 }; 472 473 union vfpf_tlvs { 474 struct vfpf_first_tlv first_tlv; 475 struct vfpf_acquire_tlv acquire; 476 struct vfpf_start_rxq_tlv start_rxq; 477 struct vfpf_start_txq_tlv start_txq; 478 struct vfpf_stop_rxqs_tlv stop_rxqs; 479 struct vfpf_stop_txqs_tlv stop_txqs; 480 struct vfpf_update_rxq_tlv update_rxq; 481 struct vfpf_vport_start_tlv start_vport; 482 struct vfpf_vport_update_tlv vport_update; 483 struct vfpf_ucast_filter_tlv ucast_filter; 484 struct vfpf_update_tunn_param_tlv tunn_param_update; 485 struct channel_list_end_tlv list_end; 486 struct tlv_buffer_size tlv_buf_size; 487 }; 488 489 union pfvf_tlvs { 490 struct pfvf_def_resp_tlv default_resp; 491 struct pfvf_acquire_resp_tlv acquire_resp; 492 struct tlv_buffer_size tlv_buf_size; 493 struct pfvf_start_queue_resp_tlv queue_start; 494 struct pfvf_update_tunn_param_tlv tunn_param_resp; 495 }; 496 497 enum qed_bulletin_bit { 498 /* Alert the VF that a forced MAC was set by the PF */ 499 MAC_ADDR_FORCED = 0, 500 /* Alert the VF that a forced VLAN was set by the PF */ 501 VLAN_ADDR_FORCED = 2, 502 503 /* Indicate that `default_only_untagged' contains actual data */ 504 VFPF_BULLETIN_UNTAGGED_DEFAULT = 3, 505 VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4, 506 507 /* Alert the VF that suggested mac was sent by the PF. 508 * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set. 509 */ 510 VFPF_BULLETIN_MAC_ADDR = 5 511 }; 512 513 struct qed_bulletin_content { 514 /* crc of structure to ensure is not in mid-update */ 515 u32 crc; 516 517 u32 version; 518 519 /* bitmap indicating which fields hold valid values */ 520 u64 valid_bitmap; 521 522 /* used for MAC_ADDR or MAC_ADDR_FORCED */ 523 u8 mac[ETH_ALEN]; 524 525 /* If valid, 1 => only untagged Rx if no vlan is configured */ 526 u8 default_only_untagged; 527 u8 padding; 528 529 /* The following is a 'copy' of qed_mcp_link_state, 530 * qed_mcp_link_params and qed_mcp_link_capabilities. Since it's 531 * possible the structs will increase further along the road we cannot 532 * have it here; Instead we need to have all of its fields. 533 */ 534 u8 req_autoneg; 535 u8 req_autoneg_pause; 536 u8 req_forced_rx; 537 u8 req_forced_tx; 538 u8 padding2[4]; 539 540 u32 req_adv_speed; 541 u32 req_forced_speed; 542 u32 req_loopback; 543 u32 padding3; 544 545 u8 link_up; 546 u8 full_duplex; 547 u8 autoneg; 548 u8 autoneg_complete; 549 u8 parallel_detection; 550 u8 pfc_enabled; 551 u8 partner_tx_flow_ctrl_en; 552 u8 partner_rx_flow_ctrl_en; 553 u8 partner_adv_pause; 554 u8 sfp_tx_fault; 555 u16 vxlan_udp_port; 556 u16 geneve_udp_port; 557 u8 padding4[2]; 558 559 u32 speed; 560 u32 partner_adv_speed; 561 562 u32 capability_speed; 563 564 /* Forced vlan */ 565 u16 pvid; 566 u16 padding5; 567 }; 568 569 struct qed_bulletin { 570 dma_addr_t phys; 571 struct qed_bulletin_content *p_virt; 572 u32 size; 573 }; 574 575 enum { 576 CHANNEL_TLV_NONE, /* ends tlv sequence */ 577 CHANNEL_TLV_ACQUIRE, 578 CHANNEL_TLV_VPORT_START, 579 CHANNEL_TLV_VPORT_UPDATE, 580 CHANNEL_TLV_VPORT_TEARDOWN, 581 CHANNEL_TLV_START_RXQ, 582 CHANNEL_TLV_START_TXQ, 583 CHANNEL_TLV_STOP_RXQS, 584 CHANNEL_TLV_STOP_TXQS, 585 CHANNEL_TLV_UPDATE_RXQ, 586 CHANNEL_TLV_INT_CLEANUP, 587 CHANNEL_TLV_CLOSE, 588 CHANNEL_TLV_RELEASE, 589 CHANNEL_TLV_LIST_END, 590 CHANNEL_TLV_UCAST_FILTER, 591 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, 592 CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH, 593 CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP, 594 CHANNEL_TLV_VPORT_UPDATE_MCAST, 595 CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM, 596 CHANNEL_TLV_VPORT_UPDATE_RSS, 597 CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN, 598 CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, 599 CHANNEL_TLV_UPDATE_TUNN_PARAM, 600 CHANNEL_TLV_MAX, 601 602 /* Required for iterating over vport-update tlvs. 603 * Will break in case non-sequential vport-update tlvs. 604 */ 605 CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1, 606 }; 607 608 /* This data is held in the qed_hwfn structure for VFs only. */ 609 struct qed_vf_iov { 610 union vfpf_tlvs *vf2pf_request; 611 dma_addr_t vf2pf_request_phys; 612 union pfvf_tlvs *pf2vf_reply; 613 dma_addr_t pf2vf_reply_phys; 614 615 /* Should be taken whenever the mailbox buffers are accessed */ 616 struct mutex mutex; 617 u8 *offset; 618 619 /* Bulletin Board */ 620 struct qed_bulletin bulletin; 621 struct qed_bulletin_content bulletin_shadow; 622 623 /* we set aside a copy of the acquire response */ 624 struct pfvf_acquire_resp_tlv acquire_resp; 625 626 /* In case PF originates prior to the fp-hsi version comparison, 627 * this has to be propagated as it affects the fastpath. 628 */ 629 bool b_pre_fp_hsi; 630 }; 631 632 #ifdef CONFIG_QED_SRIOV 633 /** 634 * @brief Read the VF bulletin and act on it if needed 635 * 636 * @param p_hwfn 637 * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise. 638 * 639 * @return enum _qed_status 640 */ 641 int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change); 642 643 /** 644 * @brief Get link paramters for VF from qed 645 * 646 * @param p_hwfn 647 * @param params - the link params structure to be filled for the VF 648 */ 649 void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, 650 struct qed_mcp_link_params *params); 651 652 /** 653 * @brief Get link state for VF from qed 654 * 655 * @param p_hwfn 656 * @param link - the link state structure to be filled for the VF 657 */ 658 void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, 659 struct qed_mcp_link_state *link); 660 661 /** 662 * @brief Get link capabilities for VF from qed 663 * 664 * @param p_hwfn 665 * @param p_link_caps - the link capabilities structure to be filled for the VF 666 */ 667 void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, 668 struct qed_mcp_link_capabilities *p_link_caps); 669 670 /** 671 * @brief Get number of Rx queues allocated for VF by qed 672 * 673 * @param p_hwfn 674 * @param num_rxqs - allocated RX queues 675 */ 676 void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs); 677 678 /** 679 * @brief Get port mac address for VF 680 * 681 * @param p_hwfn 682 * @param port_mac - destination location for port mac 683 */ 684 void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac); 685 686 /** 687 * @brief Get number of VLAN filters allocated for VF by qed 688 * 689 * @param p_hwfn 690 * @param num_rxqs - allocated VLAN filters 691 */ 692 void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, 693 u8 *num_vlan_filters); 694 695 /** 696 * @brief Get number of MAC filters allocated for VF by qed 697 * 698 * @param p_hwfn 699 * @param num_rxqs - allocated MAC filters 700 */ 701 void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters); 702 703 /** 704 * @brief Check if VF can set a MAC address 705 * 706 * @param p_hwfn 707 * @param mac 708 * 709 * @return bool 710 */ 711 bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac); 712 713 /** 714 * @brief Set firmware version information in dev_info from VFs acquire response tlv 715 * 716 * @param p_hwfn 717 * @param fw_major 718 * @param fw_minor 719 * @param fw_rev 720 * @param fw_eng 721 */ 722 void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, 723 u16 *fw_major, u16 *fw_minor, 724 u16 *fw_rev, u16 *fw_eng); 725 726 /** 727 * @brief hw preparation for VF 728 * sends ACQUIRE message 729 * 730 * @param p_hwfn 731 * 732 * @return int 733 */ 734 int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn); 735 736 /** 737 * @brief VF - start the RX Queue by sending a message to the PF 738 * @param p_hwfn 739 * @param p_cid - Only relative fields are relevant 740 * @param bd_max_bytes - maximum number of bytes per bd 741 * @param bd_chain_phys_addr - physical address of bd chain 742 * @param cqe_pbl_addr - physical address of pbl 743 * @param cqe_pbl_size - pbl size 744 * @param pp_prod - pointer to the producer to be 745 * used in fastpath 746 * 747 * @return int 748 */ 749 int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, 750 struct qed_queue_cid *p_cid, 751 u16 bd_max_bytes, 752 dma_addr_t bd_chain_phys_addr, 753 dma_addr_t cqe_pbl_addr, 754 u16 cqe_pbl_size, void __iomem **pp_prod); 755 756 /** 757 * @brief VF - start the TX queue by sending a message to the 758 * PF. 759 * 760 * @param p_hwfn 761 * @param tx_queue_id - zero based within the VF 762 * @param sb - status block for this queue 763 * @param sb_index - index within the status block 764 * @param bd_chain_phys_addr - physical address of tx chain 765 * @param pp_doorbell - pointer to address to which to 766 * write the doorbell too.. 767 * 768 * @return int 769 */ 770 int 771 qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, 772 struct qed_queue_cid *p_cid, 773 dma_addr_t pbl_addr, 774 u16 pbl_size, void __iomem **pp_doorbell); 775 776 /** 777 * @brief VF - stop the RX queue by sending a message to the PF 778 * 779 * @param p_hwfn 780 * @param p_cid 781 * @param cqe_completion 782 * 783 * @return int 784 */ 785 int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, 786 struct qed_queue_cid *p_cid, bool cqe_completion); 787 788 /** 789 * @brief VF - stop the TX queue by sending a message to the PF 790 * 791 * @param p_hwfn 792 * @param tx_qid 793 * 794 * @return int 795 */ 796 int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid); 797 798 /** 799 * @brief VF - send a vport update command 800 * 801 * @param p_hwfn 802 * @param params 803 * 804 * @return int 805 */ 806 int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, 807 struct qed_sp_vport_update_params *p_params); 808 809 /** 810 * 811 * @brief VF - send a close message to PF 812 * 813 * @param p_hwfn 814 * 815 * @return enum _qed_status 816 */ 817 int qed_vf_pf_reset(struct qed_hwfn *p_hwfn); 818 819 /** 820 * @brief VF - free vf`s memories 821 * 822 * @param p_hwfn 823 * 824 * @return enum _qed_status 825 */ 826 int qed_vf_pf_release(struct qed_hwfn *p_hwfn); 827 828 /** 829 * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given 830 * sb_id. For VFs igu sbs don't have to be contiguous 831 * 832 * @param p_hwfn 833 * @param sb_id 834 * 835 * @return INLINE u16 836 */ 837 u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); 838 839 /** 840 * @brief qed_vf_pf_vport_start - perform vport start for VF. 841 * 842 * @param p_hwfn 843 * @param vport_id 844 * @param mtu 845 * @param inner_vlan_removal 846 * @param tpa_mode 847 * @param max_buffers_per_cqe, 848 * @param only_untagged - default behavior regarding vlan acceptance 849 * 850 * @return enum _qed_status 851 */ 852 int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, 853 u8 vport_id, 854 u16 mtu, 855 u8 inner_vlan_removal, 856 enum qed_tpa_mode tpa_mode, 857 u8 max_buffers_per_cqe, u8 only_untagged); 858 859 /** 860 * @brief qed_vf_pf_vport_stop - stop the VF's vport 861 * 862 * @param p_hwfn 863 * 864 * @return enum _qed_status 865 */ 866 int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn); 867 868 int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, 869 struct qed_filter_ucast *p_param); 870 871 void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, 872 struct qed_filter_mcast *p_filter_cmd); 873 874 /** 875 * @brief qed_vf_pf_int_cleanup - clean the SB of the VF 876 * 877 * @param p_hwfn 878 * 879 * @return enum _qed_status 880 */ 881 int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn); 882 883 /** 884 * @brief - return the link params in a given bulletin board 885 * 886 * @param p_hwfn 887 * @param p_params - pointer to a struct to fill with link params 888 * @param p_bulletin 889 */ 890 void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, 891 struct qed_mcp_link_params *p_params, 892 struct qed_bulletin_content *p_bulletin); 893 894 /** 895 * @brief - return the link state in a given bulletin board 896 * 897 * @param p_hwfn 898 * @param p_link - pointer to a struct to fill with link state 899 * @param p_bulletin 900 */ 901 void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, 902 struct qed_mcp_link_state *p_link, 903 struct qed_bulletin_content *p_bulletin); 904 905 /** 906 * @brief - return the link capabilities in a given bulletin board 907 * 908 * @param p_hwfn 909 * @param p_link - pointer to a struct to fill with link capabilities 910 * @param p_bulletin 911 */ 912 void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, 913 struct qed_mcp_link_capabilities *p_link_caps, 914 struct qed_bulletin_content *p_bulletin); 915 916 void qed_iov_vf_task(struct work_struct *work); 917 void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun); 918 int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, 919 struct qed_tunnel_info *p_tunn); 920 #else 921 static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, 922 struct qed_mcp_link_params *params) 923 { 924 } 925 926 static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, 927 struct qed_mcp_link_state *link) 928 { 929 } 930 931 static inline void 932 qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, 933 struct qed_mcp_link_capabilities *p_link_caps) 934 { 935 } 936 937 static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs) 938 { 939 } 940 941 static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac) 942 { 943 } 944 945 static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, 946 u8 *num_vlan_filters) 947 { 948 } 949 950 static inline void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, 951 u8 *num_mac_filters) 952 { 953 } 954 955 static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac) 956 { 957 return false; 958 } 959 960 static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, 961 u16 *fw_major, u16 *fw_minor, 962 u16 *fw_rev, u16 *fw_eng) 963 { 964 } 965 966 static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) 967 { 968 return -EINVAL; 969 } 970 971 static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, 972 struct qed_queue_cid *p_cid, 973 u16 bd_max_bytes, 974 dma_addr_t bd_chain_phys_adr, 975 dma_addr_t cqe_pbl_addr, 976 u16 cqe_pbl_size, void __iomem **pp_prod) 977 { 978 return -EINVAL; 979 } 980 981 static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, 982 struct qed_queue_cid *p_cid, 983 dma_addr_t pbl_addr, 984 u16 pbl_size, void __iomem **pp_doorbell) 985 { 986 return -EINVAL; 987 } 988 989 static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, 990 struct qed_queue_cid *p_cid, 991 bool cqe_completion) 992 { 993 return -EINVAL; 994 } 995 996 static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, 997 struct qed_queue_cid *p_cid) 998 { 999 return -EINVAL; 1000 } 1001 1002 static inline int 1003 qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, 1004 struct qed_sp_vport_update_params *p_params) 1005 { 1006 return -EINVAL; 1007 } 1008 1009 static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn) 1010 { 1011 return -EINVAL; 1012 } 1013 1014 static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn) 1015 { 1016 return -EINVAL; 1017 } 1018 1019 static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) 1020 { 1021 return 0; 1022 } 1023 1024 static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, 1025 u8 vport_id, 1026 u16 mtu, 1027 u8 inner_vlan_removal, 1028 enum qed_tpa_mode tpa_mode, 1029 u8 max_buffers_per_cqe, 1030 u8 only_untagged) 1031 { 1032 return -EINVAL; 1033 } 1034 1035 static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn) 1036 { 1037 return -EINVAL; 1038 } 1039 1040 static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, 1041 struct qed_filter_ucast *p_param) 1042 { 1043 return -EINVAL; 1044 } 1045 1046 static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, 1047 struct qed_filter_mcast *p_filter_cmd) 1048 { 1049 } 1050 1051 static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) 1052 { 1053 return -EINVAL; 1054 } 1055 1056 static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, 1057 struct qed_mcp_link_params 1058 *p_params, 1059 struct qed_bulletin_content 1060 *p_bulletin) 1061 { 1062 } 1063 1064 static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, 1065 struct qed_mcp_link_state *p_link, 1066 struct qed_bulletin_content 1067 *p_bulletin) 1068 { 1069 } 1070 1071 static inline void 1072 __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, 1073 struct qed_mcp_link_capabilities *p_link_caps, 1074 struct qed_bulletin_content *p_bulletin) 1075 { 1076 } 1077 1078 static inline void qed_iov_vf_task(struct work_struct *work) 1079 { 1080 } 1081 1082 static inline void 1083 qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun) 1084 { 1085 } 1086 1087 static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, 1088 struct qed_tunnel_info *p_tunn) 1089 { 1090 return -EINVAL; 1091 } 1092 #endif 1093 1094 #endif 1095