132a47e72SYuval Mintz /* QLogic qed NIC Driver 232a47e72SYuval Mintz * Copyright (c) 2015 QLogic Corporation 332a47e72SYuval Mintz * 432a47e72SYuval Mintz * This software is available under the terms of the GNU General Public License 532a47e72SYuval Mintz * (GPL) Version 2, available from the file COPYING in the main directory of 632a47e72SYuval Mintz * this source tree. 732a47e72SYuval Mintz */ 832a47e72SYuval Mintz 9dacd88d6SYuval Mintz #include <linux/etherdevice.h> 1036558c3dSYuval Mintz #include <linux/crc32.h> 110b55e27dSYuval Mintz #include <linux/qed/qed_iov_if.h> 121408cc1fSYuval Mintz #include "qed_cxt.h" 131408cc1fSYuval Mintz #include "qed_hsi.h" 1432a47e72SYuval Mintz #include "qed_hw.h" 151408cc1fSYuval Mintz #include "qed_init_ops.h" 1632a47e72SYuval Mintz #include "qed_int.h" 171408cc1fSYuval Mintz #include "qed_mcp.h" 1832a47e72SYuval Mintz #include "qed_reg_addr.h" 191408cc1fSYuval Mintz #include "qed_sp.h" 2032a47e72SYuval Mintz #include "qed_sriov.h" 2132a47e72SYuval Mintz #include "qed_vf.h" 2232a47e72SYuval Mintz 231408cc1fSYuval Mintz /* IOV ramrods */ 241fe614d1SYuval Mintz static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) 251408cc1fSYuval Mintz { 261408cc1fSYuval Mintz struct vf_start_ramrod_data *p_ramrod = NULL; 271408cc1fSYuval Mintz struct qed_spq_entry *p_ent = NULL; 281408cc1fSYuval Mintz struct qed_sp_init_data init_data; 291408cc1fSYuval Mintz int rc = -EINVAL; 301fe614d1SYuval Mintz u8 fp_minor; 311408cc1fSYuval Mintz 321408cc1fSYuval Mintz /* Get SPQ entry */ 331408cc1fSYuval Mintz memset(&init_data, 0, sizeof(init_data)); 341408cc1fSYuval Mintz init_data.cid = qed_spq_get_cid(p_hwfn); 351fe614d1SYuval Mintz init_data.opaque_fid = p_vf->opaque_fid; 361408cc1fSYuval Mintz init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 371408cc1fSYuval Mintz 381408cc1fSYuval Mintz rc = qed_sp_init_request(p_hwfn, &p_ent, 391408cc1fSYuval Mintz COMMON_RAMROD_VF_START, 401408cc1fSYuval Mintz PROTOCOLID_COMMON, &init_data); 411408cc1fSYuval Mintz if (rc) 421408cc1fSYuval Mintz return rc; 431408cc1fSYuval Mintz 441408cc1fSYuval Mintz p_ramrod = &p_ent->ramrod.vf_start; 451408cc1fSYuval Mintz 461fe614d1SYuval Mintz p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID); 471fe614d1SYuval Mintz p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid); 481408cc1fSYuval Mintz 491fe614d1SYuval Mintz switch (p_hwfn->hw_info.personality) { 501fe614d1SYuval Mintz case QED_PCI_ETH: 511408cc1fSYuval Mintz p_ramrod->personality = PERSONALITY_ETH; 521fe614d1SYuval Mintz break; 531fe614d1SYuval Mintz case QED_PCI_ETH_ROCE: 541fe614d1SYuval Mintz p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; 551fe614d1SYuval Mintz break; 561fe614d1SYuval Mintz default: 571fe614d1SYuval Mintz DP_NOTICE(p_hwfn, "Unknown VF personality %d\n", 581fe614d1SYuval Mintz p_hwfn->hw_info.personality); 591fe614d1SYuval Mintz return -EINVAL; 601fe614d1SYuval Mintz } 611fe614d1SYuval Mintz 621fe614d1SYuval Mintz fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor; 631fe614d1SYuval Mintz if (fp_minor > ETH_HSI_VER_MINOR) { 641fe614d1SYuval Mintz DP_VERBOSE(p_hwfn, 651fe614d1SYuval Mintz QED_MSG_IOV, 661fe614d1SYuval Mintz "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n", 671fe614d1SYuval Mintz p_vf->abs_vf_id, 681fe614d1SYuval Mintz ETH_HSI_VER_MAJOR, 691fe614d1SYuval Mintz fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); 701fe614d1SYuval Mintz fp_minor = ETH_HSI_VER_MINOR; 711fe614d1SYuval Mintz } 721fe614d1SYuval Mintz 73351a4dedSYuval Mintz p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; 741fe614d1SYuval Mintz p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor; 751fe614d1SYuval Mintz 761fe614d1SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 771fe614d1SYuval Mintz "VF[%d] - Starting using HSI %02x.%02x\n", 781fe614d1SYuval Mintz p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor); 791408cc1fSYuval Mintz 801408cc1fSYuval Mintz return qed_spq_post(p_hwfn, p_ent, NULL); 811408cc1fSYuval Mintz } 821408cc1fSYuval Mintz 830b55e27dSYuval Mintz static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn, 840b55e27dSYuval Mintz u32 concrete_vfid, u16 opaque_vfid) 850b55e27dSYuval Mintz { 860b55e27dSYuval Mintz struct vf_stop_ramrod_data *p_ramrod = NULL; 870b55e27dSYuval Mintz struct qed_spq_entry *p_ent = NULL; 880b55e27dSYuval Mintz struct qed_sp_init_data init_data; 890b55e27dSYuval Mintz int rc = -EINVAL; 900b55e27dSYuval Mintz 910b55e27dSYuval Mintz /* Get SPQ entry */ 920b55e27dSYuval Mintz memset(&init_data, 0, sizeof(init_data)); 930b55e27dSYuval Mintz init_data.cid = qed_spq_get_cid(p_hwfn); 940b55e27dSYuval Mintz init_data.opaque_fid = opaque_vfid; 950b55e27dSYuval Mintz init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 960b55e27dSYuval Mintz 970b55e27dSYuval Mintz rc = qed_sp_init_request(p_hwfn, &p_ent, 980b55e27dSYuval Mintz COMMON_RAMROD_VF_STOP, 990b55e27dSYuval Mintz PROTOCOLID_COMMON, &init_data); 1000b55e27dSYuval Mintz if (rc) 1010b55e27dSYuval Mintz return rc; 1020b55e27dSYuval Mintz 1030b55e27dSYuval Mintz p_ramrod = &p_ent->ramrod.vf_stop; 1040b55e27dSYuval Mintz 1050b55e27dSYuval Mintz p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); 1060b55e27dSYuval Mintz 1070b55e27dSYuval Mintz return qed_spq_post(p_hwfn, p_ent, NULL); 1080b55e27dSYuval Mintz } 1090b55e27dSYuval Mintz 11032a47e72SYuval Mintz bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, 11132a47e72SYuval Mintz int rel_vf_id, bool b_enabled_only) 11232a47e72SYuval Mintz { 11332a47e72SYuval Mintz if (!p_hwfn->pf_iov_info) { 11432a47e72SYuval Mintz DP_NOTICE(p_hwfn->cdev, "No iov info\n"); 11532a47e72SYuval Mintz return false; 11632a47e72SYuval Mintz } 11732a47e72SYuval Mintz 11832a47e72SYuval Mintz if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) || 11932a47e72SYuval Mintz (rel_vf_id < 0)) 12032a47e72SYuval Mintz return false; 12132a47e72SYuval Mintz 12232a47e72SYuval Mintz if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) && 12332a47e72SYuval Mintz b_enabled_only) 12432a47e72SYuval Mintz return false; 12532a47e72SYuval Mintz 12632a47e72SYuval Mintz return true; 12732a47e72SYuval Mintz } 12832a47e72SYuval Mintz 12937bff2b9SYuval Mintz static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, 13037bff2b9SYuval Mintz u16 relative_vf_id, 13137bff2b9SYuval Mintz bool b_enabled_only) 13237bff2b9SYuval Mintz { 13337bff2b9SYuval Mintz struct qed_vf_info *vf = NULL; 13437bff2b9SYuval Mintz 13537bff2b9SYuval Mintz if (!p_hwfn->pf_iov_info) { 13637bff2b9SYuval Mintz DP_NOTICE(p_hwfn->cdev, "No iov info\n"); 13737bff2b9SYuval Mintz return NULL; 13837bff2b9SYuval Mintz } 13937bff2b9SYuval Mintz 14037bff2b9SYuval Mintz if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only)) 14137bff2b9SYuval Mintz vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; 14237bff2b9SYuval Mintz else 14337bff2b9SYuval Mintz DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n", 14437bff2b9SYuval Mintz relative_vf_id); 14537bff2b9SYuval Mintz 14637bff2b9SYuval Mintz return vf; 14737bff2b9SYuval Mintz } 14837bff2b9SYuval Mintz 14936558c3dSYuval Mintz int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn, 15036558c3dSYuval Mintz int vfid, struct qed_ptt *p_ptt) 15136558c3dSYuval Mintz { 15236558c3dSYuval Mintz struct qed_bulletin_content *p_bulletin; 15336558c3dSYuval Mintz int crc_size = sizeof(p_bulletin->crc); 15436558c3dSYuval Mintz struct qed_dmae_params params; 15536558c3dSYuval Mintz struct qed_vf_info *p_vf; 15636558c3dSYuval Mintz 15736558c3dSYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 15836558c3dSYuval Mintz if (!p_vf) 15936558c3dSYuval Mintz return -EINVAL; 16036558c3dSYuval Mintz 16136558c3dSYuval Mintz if (!p_vf->vf_bulletin) 16236558c3dSYuval Mintz return -EINVAL; 16336558c3dSYuval Mintz 16436558c3dSYuval Mintz p_bulletin = p_vf->bulletin.p_virt; 16536558c3dSYuval Mintz 16636558c3dSYuval Mintz /* Increment bulletin board version and compute crc */ 16736558c3dSYuval Mintz p_bulletin->version++; 16836558c3dSYuval Mintz p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size, 16936558c3dSYuval Mintz p_vf->bulletin.size - crc_size); 17036558c3dSYuval Mintz 17136558c3dSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 17236558c3dSYuval Mintz "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n", 17336558c3dSYuval Mintz p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc); 17436558c3dSYuval Mintz 17536558c3dSYuval Mintz /* propagate bulletin board via dmae to vm memory */ 17636558c3dSYuval Mintz memset(¶ms, 0, sizeof(params)); 17736558c3dSYuval Mintz params.flags = QED_DMAE_FLAG_VF_DST; 17836558c3dSYuval Mintz params.dst_vfid = p_vf->abs_vf_id; 17936558c3dSYuval Mintz return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys, 18036558c3dSYuval Mintz p_vf->vf_bulletin, p_vf->bulletin.size / 4, 18136558c3dSYuval Mintz ¶ms); 18236558c3dSYuval Mintz } 18336558c3dSYuval Mintz 18432a47e72SYuval Mintz static int qed_iov_pci_cfg_info(struct qed_dev *cdev) 18532a47e72SYuval Mintz { 18632a47e72SYuval Mintz struct qed_hw_sriov_info *iov = cdev->p_iov_info; 18732a47e72SYuval Mintz int pos = iov->pos; 18832a47e72SYuval Mintz 18932a47e72SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos); 19032a47e72SYuval Mintz pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 19132a47e72SYuval Mintz 19232a47e72SYuval Mintz pci_read_config_word(cdev->pdev, 19332a47e72SYuval Mintz pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs); 19432a47e72SYuval Mintz pci_read_config_word(cdev->pdev, 19532a47e72SYuval Mintz pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs); 19632a47e72SYuval Mintz 19732a47e72SYuval Mintz pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs); 19832a47e72SYuval Mintz if (iov->num_vfs) { 19932a47e72SYuval Mintz DP_VERBOSE(cdev, 20032a47e72SYuval Mintz QED_MSG_IOV, 20132a47e72SYuval Mintz "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n"); 20232a47e72SYuval Mintz iov->num_vfs = 0; 20332a47e72SYuval Mintz } 20432a47e72SYuval Mintz 20532a47e72SYuval Mintz pci_read_config_word(cdev->pdev, 20632a47e72SYuval Mintz pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 20732a47e72SYuval Mintz 20832a47e72SYuval Mintz pci_read_config_word(cdev->pdev, 20932a47e72SYuval Mintz pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 21032a47e72SYuval Mintz 21132a47e72SYuval Mintz pci_read_config_word(cdev->pdev, 21232a47e72SYuval Mintz pos + PCI_SRIOV_VF_DID, &iov->vf_device_id); 21332a47e72SYuval Mintz 21432a47e72SYuval Mintz pci_read_config_dword(cdev->pdev, 21532a47e72SYuval Mintz pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 21632a47e72SYuval Mintz 21732a47e72SYuval Mintz pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap); 21832a47e72SYuval Mintz 21932a47e72SYuval Mintz pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 22032a47e72SYuval Mintz 22132a47e72SYuval Mintz DP_VERBOSE(cdev, 22232a47e72SYuval Mintz QED_MSG_IOV, 22332a47e72SYuval Mintz "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 22432a47e72SYuval Mintz iov->nres, 22532a47e72SYuval Mintz iov->cap, 22632a47e72SYuval Mintz iov->ctrl, 22732a47e72SYuval Mintz iov->total_vfs, 22832a47e72SYuval Mintz iov->initial_vfs, 22932a47e72SYuval Mintz iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 23032a47e72SYuval Mintz 23132a47e72SYuval Mintz /* Some sanity checks */ 23232a47e72SYuval Mintz if (iov->num_vfs > NUM_OF_VFS(cdev) || 23332a47e72SYuval Mintz iov->total_vfs > NUM_OF_VFS(cdev)) { 23432a47e72SYuval Mintz /* This can happen only due to a bug. In this case we set 23532a47e72SYuval Mintz * num_vfs to zero to avoid memory corruption in the code that 23632a47e72SYuval Mintz * assumes max number of vfs 23732a47e72SYuval Mintz */ 23832a47e72SYuval Mintz DP_NOTICE(cdev, 23932a47e72SYuval Mintz "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n", 24032a47e72SYuval Mintz iov->num_vfs); 24132a47e72SYuval Mintz 24232a47e72SYuval Mintz iov->num_vfs = 0; 24332a47e72SYuval Mintz iov->total_vfs = 0; 24432a47e72SYuval Mintz } 24532a47e72SYuval Mintz 24632a47e72SYuval Mintz return 0; 24732a47e72SYuval Mintz } 24832a47e72SYuval Mintz 24932a47e72SYuval Mintz static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn, 25032a47e72SYuval Mintz struct qed_ptt *p_ptt) 25132a47e72SYuval Mintz { 25232a47e72SYuval Mintz struct qed_igu_block *p_sb; 25332a47e72SYuval Mintz u16 sb_id; 25432a47e72SYuval Mintz u32 val; 25532a47e72SYuval Mintz 25632a47e72SYuval Mintz if (!p_hwfn->hw_info.p_igu_info) { 25732a47e72SYuval Mintz DP_ERR(p_hwfn, 25832a47e72SYuval Mintz "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n"); 25932a47e72SYuval Mintz return; 26032a47e72SYuval Mintz } 26132a47e72SYuval Mintz 26232a47e72SYuval Mintz for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); 26332a47e72SYuval Mintz sb_id++) { 26432a47e72SYuval Mintz p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id]; 26532a47e72SYuval Mintz if ((p_sb->status & QED_IGU_STATUS_FREE) && 26632a47e72SYuval Mintz !(p_sb->status & QED_IGU_STATUS_PF)) { 26732a47e72SYuval Mintz val = qed_rd(p_hwfn, p_ptt, 26832a47e72SYuval Mintz IGU_REG_MAPPING_MEMORY + sb_id * 4); 26932a47e72SYuval Mintz SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); 27032a47e72SYuval Mintz qed_wr(p_hwfn, p_ptt, 27132a47e72SYuval Mintz IGU_REG_MAPPING_MEMORY + 4 * sb_id, val); 27232a47e72SYuval Mintz } 27332a47e72SYuval Mintz } 27432a47e72SYuval Mintz } 27532a47e72SYuval Mintz 27632a47e72SYuval Mintz static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn) 27732a47e72SYuval Mintz { 27832a47e72SYuval Mintz struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 27932a47e72SYuval Mintz struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 28032a47e72SYuval Mintz struct qed_bulletin_content *p_bulletin_virt; 28132a47e72SYuval Mintz dma_addr_t req_p, rply_p, bulletin_p; 28232a47e72SYuval Mintz union pfvf_tlvs *p_reply_virt_addr; 28332a47e72SYuval Mintz union vfpf_tlvs *p_req_virt_addr; 28432a47e72SYuval Mintz u8 idx = 0; 28532a47e72SYuval Mintz 28632a47e72SYuval Mintz memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array)); 28732a47e72SYuval Mintz 28832a47e72SYuval Mintz p_req_virt_addr = p_iov_info->mbx_msg_virt_addr; 28932a47e72SYuval Mintz req_p = p_iov_info->mbx_msg_phys_addr; 29032a47e72SYuval Mintz p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr; 29132a47e72SYuval Mintz rply_p = p_iov_info->mbx_reply_phys_addr; 29232a47e72SYuval Mintz p_bulletin_virt = p_iov_info->p_bulletins; 29332a47e72SYuval Mintz bulletin_p = p_iov_info->bulletins_phys; 29432a47e72SYuval Mintz if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) { 29532a47e72SYuval Mintz DP_ERR(p_hwfn, 29632a47e72SYuval Mintz "qed_iov_setup_vfdb called without allocating mem first\n"); 29732a47e72SYuval Mintz return; 29832a47e72SYuval Mintz } 29932a47e72SYuval Mintz 30032a47e72SYuval Mintz for (idx = 0; idx < p_iov->total_vfs; idx++) { 30132a47e72SYuval Mintz struct qed_vf_info *vf = &p_iov_info->vfs_array[idx]; 30232a47e72SYuval Mintz u32 concrete; 30332a47e72SYuval Mintz 30432a47e72SYuval Mintz vf->vf_mbx.req_virt = p_req_virt_addr + idx; 30532a47e72SYuval Mintz vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs); 30632a47e72SYuval Mintz vf->vf_mbx.reply_virt = p_reply_virt_addr + idx; 30732a47e72SYuval Mintz vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs); 30832a47e72SYuval Mintz 30932a47e72SYuval Mintz vf->state = VF_STOPPED; 31032a47e72SYuval Mintz vf->b_init = false; 31132a47e72SYuval Mintz 31232a47e72SYuval Mintz vf->bulletin.phys = idx * 31332a47e72SYuval Mintz sizeof(struct qed_bulletin_content) + 31432a47e72SYuval Mintz bulletin_p; 31532a47e72SYuval Mintz vf->bulletin.p_virt = p_bulletin_virt + idx; 31632a47e72SYuval Mintz vf->bulletin.size = sizeof(struct qed_bulletin_content); 31732a47e72SYuval Mintz 31832a47e72SYuval Mintz vf->relative_vf_id = idx; 31932a47e72SYuval Mintz vf->abs_vf_id = idx + p_iov->first_vf_in_pf; 32032a47e72SYuval Mintz concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id); 32132a47e72SYuval Mintz vf->concrete_fid = concrete; 32232a47e72SYuval Mintz vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) | 32332a47e72SYuval Mintz (vf->abs_vf_id << 8); 32432a47e72SYuval Mintz vf->vport_id = idx + 1; 32532a47e72SYuval Mintz } 32632a47e72SYuval Mintz } 32732a47e72SYuval Mintz 32832a47e72SYuval Mintz static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn) 32932a47e72SYuval Mintz { 33032a47e72SYuval Mintz struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 33132a47e72SYuval Mintz void **p_v_addr; 33232a47e72SYuval Mintz u16 num_vfs = 0; 33332a47e72SYuval Mintz 33432a47e72SYuval Mintz num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; 33532a47e72SYuval Mintz 33632a47e72SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 33732a47e72SYuval Mintz "qed_iov_allocate_vfdb for %d VFs\n", num_vfs); 33832a47e72SYuval Mintz 33932a47e72SYuval Mintz /* Allocate PF Mailbox buffer (per-VF) */ 34032a47e72SYuval Mintz p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; 34132a47e72SYuval Mintz p_v_addr = &p_iov_info->mbx_msg_virt_addr; 34232a47e72SYuval Mintz *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 34332a47e72SYuval Mintz p_iov_info->mbx_msg_size, 34432a47e72SYuval Mintz &p_iov_info->mbx_msg_phys_addr, 34532a47e72SYuval Mintz GFP_KERNEL); 34632a47e72SYuval Mintz if (!*p_v_addr) 34732a47e72SYuval Mintz return -ENOMEM; 34832a47e72SYuval Mintz 34932a47e72SYuval Mintz /* Allocate PF Mailbox Reply buffer (per-VF) */ 35032a47e72SYuval Mintz p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs; 35132a47e72SYuval Mintz p_v_addr = &p_iov_info->mbx_reply_virt_addr; 35232a47e72SYuval Mintz *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 35332a47e72SYuval Mintz p_iov_info->mbx_reply_size, 35432a47e72SYuval Mintz &p_iov_info->mbx_reply_phys_addr, 35532a47e72SYuval Mintz GFP_KERNEL); 35632a47e72SYuval Mintz if (!*p_v_addr) 35732a47e72SYuval Mintz return -ENOMEM; 35832a47e72SYuval Mintz 35932a47e72SYuval Mintz p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) * 36032a47e72SYuval Mintz num_vfs; 36132a47e72SYuval Mintz p_v_addr = &p_iov_info->p_bulletins; 36232a47e72SYuval Mintz *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 36332a47e72SYuval Mintz p_iov_info->bulletins_size, 36432a47e72SYuval Mintz &p_iov_info->bulletins_phys, 36532a47e72SYuval Mintz GFP_KERNEL); 36632a47e72SYuval Mintz if (!*p_v_addr) 36732a47e72SYuval Mintz return -ENOMEM; 36832a47e72SYuval Mintz 36932a47e72SYuval Mintz DP_VERBOSE(p_hwfn, 37032a47e72SYuval Mintz QED_MSG_IOV, 37132a47e72SYuval Mintz "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n", 37232a47e72SYuval Mintz p_iov_info->mbx_msg_virt_addr, 37332a47e72SYuval Mintz (u64) p_iov_info->mbx_msg_phys_addr, 37432a47e72SYuval Mintz p_iov_info->mbx_reply_virt_addr, 37532a47e72SYuval Mintz (u64) p_iov_info->mbx_reply_phys_addr, 37632a47e72SYuval Mintz p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys); 37732a47e72SYuval Mintz 37832a47e72SYuval Mintz return 0; 37932a47e72SYuval Mintz } 38032a47e72SYuval Mintz 38132a47e72SYuval Mintz static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn) 38232a47e72SYuval Mintz { 38332a47e72SYuval Mintz struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 38432a47e72SYuval Mintz 38532a47e72SYuval Mintz if (p_hwfn->pf_iov_info->mbx_msg_virt_addr) 38632a47e72SYuval Mintz dma_free_coherent(&p_hwfn->cdev->pdev->dev, 38732a47e72SYuval Mintz p_iov_info->mbx_msg_size, 38832a47e72SYuval Mintz p_iov_info->mbx_msg_virt_addr, 38932a47e72SYuval Mintz p_iov_info->mbx_msg_phys_addr); 39032a47e72SYuval Mintz 39132a47e72SYuval Mintz if (p_hwfn->pf_iov_info->mbx_reply_virt_addr) 39232a47e72SYuval Mintz dma_free_coherent(&p_hwfn->cdev->pdev->dev, 39332a47e72SYuval Mintz p_iov_info->mbx_reply_size, 39432a47e72SYuval Mintz p_iov_info->mbx_reply_virt_addr, 39532a47e72SYuval Mintz p_iov_info->mbx_reply_phys_addr); 39632a47e72SYuval Mintz 39732a47e72SYuval Mintz if (p_iov_info->p_bulletins) 39832a47e72SYuval Mintz dma_free_coherent(&p_hwfn->cdev->pdev->dev, 39932a47e72SYuval Mintz p_iov_info->bulletins_size, 40032a47e72SYuval Mintz p_iov_info->p_bulletins, 40132a47e72SYuval Mintz p_iov_info->bulletins_phys); 40232a47e72SYuval Mintz } 40332a47e72SYuval Mintz 40432a47e72SYuval Mintz int qed_iov_alloc(struct qed_hwfn *p_hwfn) 40532a47e72SYuval Mintz { 40632a47e72SYuval Mintz struct qed_pf_iov *p_sriov; 40732a47e72SYuval Mintz 40832a47e72SYuval Mintz if (!IS_PF_SRIOV(p_hwfn)) { 40932a47e72SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 41032a47e72SYuval Mintz "No SR-IOV - no need for IOV db\n"); 41132a47e72SYuval Mintz return 0; 41232a47e72SYuval Mintz } 41332a47e72SYuval Mintz 41432a47e72SYuval Mintz p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL); 41532a47e72SYuval Mintz if (!p_sriov) { 41632a47e72SYuval Mintz DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n"); 41732a47e72SYuval Mintz return -ENOMEM; 41832a47e72SYuval Mintz } 41932a47e72SYuval Mintz 42032a47e72SYuval Mintz p_hwfn->pf_iov_info = p_sriov; 42132a47e72SYuval Mintz 42232a47e72SYuval Mintz return qed_iov_allocate_vfdb(p_hwfn); 42332a47e72SYuval Mintz } 42432a47e72SYuval Mintz 42532a47e72SYuval Mintz void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 42632a47e72SYuval Mintz { 42732a47e72SYuval Mintz if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn)) 42832a47e72SYuval Mintz return; 42932a47e72SYuval Mintz 43032a47e72SYuval Mintz qed_iov_setup_vfdb(p_hwfn); 43132a47e72SYuval Mintz qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt); 43232a47e72SYuval Mintz } 43332a47e72SYuval Mintz 43432a47e72SYuval Mintz void qed_iov_free(struct qed_hwfn *p_hwfn) 43532a47e72SYuval Mintz { 43632a47e72SYuval Mintz if (IS_PF_SRIOV_ALLOC(p_hwfn)) { 43732a47e72SYuval Mintz qed_iov_free_vfdb(p_hwfn); 43832a47e72SYuval Mintz kfree(p_hwfn->pf_iov_info); 43932a47e72SYuval Mintz } 44032a47e72SYuval Mintz } 44132a47e72SYuval Mintz 44232a47e72SYuval Mintz void qed_iov_free_hw_info(struct qed_dev *cdev) 44332a47e72SYuval Mintz { 44432a47e72SYuval Mintz kfree(cdev->p_iov_info); 44532a47e72SYuval Mintz cdev->p_iov_info = NULL; 44632a47e72SYuval Mintz } 44732a47e72SYuval Mintz 44832a47e72SYuval Mintz int qed_iov_hw_info(struct qed_hwfn *p_hwfn) 44932a47e72SYuval Mintz { 45032a47e72SYuval Mintz struct qed_dev *cdev = p_hwfn->cdev; 45132a47e72SYuval Mintz int pos; 45232a47e72SYuval Mintz int rc; 45332a47e72SYuval Mintz 4541408cc1fSYuval Mintz if (IS_VF(p_hwfn->cdev)) 4551408cc1fSYuval Mintz return 0; 4561408cc1fSYuval Mintz 45732a47e72SYuval Mintz /* Learn the PCI configuration */ 45832a47e72SYuval Mintz pos = pci_find_ext_capability(p_hwfn->cdev->pdev, 45932a47e72SYuval Mintz PCI_EXT_CAP_ID_SRIOV); 46032a47e72SYuval Mintz if (!pos) { 46132a47e72SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n"); 46232a47e72SYuval Mintz return 0; 46332a47e72SYuval Mintz } 46432a47e72SYuval Mintz 46532a47e72SYuval Mintz /* Allocate a new struct for IOV information */ 46632a47e72SYuval Mintz cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL); 46732a47e72SYuval Mintz if (!cdev->p_iov_info) { 46832a47e72SYuval Mintz DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n"); 46932a47e72SYuval Mintz return -ENOMEM; 47032a47e72SYuval Mintz } 47132a47e72SYuval Mintz cdev->p_iov_info->pos = pos; 47232a47e72SYuval Mintz 47332a47e72SYuval Mintz rc = qed_iov_pci_cfg_info(cdev); 47432a47e72SYuval Mintz if (rc) 47532a47e72SYuval Mintz return rc; 47632a47e72SYuval Mintz 47732a47e72SYuval Mintz /* We want PF IOV to be synonemous with the existance of p_iov_info; 47832a47e72SYuval Mintz * In case the capability is published but there are no VFs, simply 47932a47e72SYuval Mintz * de-allocate the struct. 48032a47e72SYuval Mintz */ 48132a47e72SYuval Mintz if (!cdev->p_iov_info->total_vfs) { 48232a47e72SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 48332a47e72SYuval Mintz "IOV capabilities, but no VFs are published\n"); 48432a47e72SYuval Mintz kfree(cdev->p_iov_info); 48532a47e72SYuval Mintz cdev->p_iov_info = NULL; 48632a47e72SYuval Mintz return 0; 48732a47e72SYuval Mintz } 48832a47e72SYuval Mintz 48932a47e72SYuval Mintz /* Calculate the first VF index - this is a bit tricky; Basically, 49032a47e72SYuval Mintz * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin 49132a47e72SYuval Mintz * after the first engine's VFs. 49232a47e72SYuval Mintz */ 49332a47e72SYuval Mintz cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset + 49432a47e72SYuval Mintz p_hwfn->abs_pf_id - 16; 49532a47e72SYuval Mintz if (QED_PATH_ID(p_hwfn)) 49632a47e72SYuval Mintz cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB; 49732a47e72SYuval Mintz 49832a47e72SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 49932a47e72SYuval Mintz "First VF in hwfn 0x%08x\n", 50032a47e72SYuval Mintz cdev->p_iov_info->first_vf_in_pf); 50132a47e72SYuval Mintz 50232a47e72SYuval Mintz return 0; 50332a47e72SYuval Mintz } 50432a47e72SYuval Mintz 50537bff2b9SYuval Mintz static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) 50637bff2b9SYuval Mintz { 50737bff2b9SYuval Mintz /* Check PF supports sriov */ 508b0409fa0SYuval Mintz if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || 509b0409fa0SYuval Mintz !IS_PF_SRIOV_ALLOC(p_hwfn)) 51037bff2b9SYuval Mintz return false; 51137bff2b9SYuval Mintz 51237bff2b9SYuval Mintz /* Check VF validity */ 513b0409fa0SYuval Mintz if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true)) 51437bff2b9SYuval Mintz return false; 51537bff2b9SYuval Mintz 51637bff2b9SYuval Mintz return true; 51737bff2b9SYuval Mintz } 51837bff2b9SYuval Mintz 5190b55e27dSYuval Mintz static void qed_iov_set_vf_to_disable(struct qed_dev *cdev, 5200b55e27dSYuval Mintz u16 rel_vf_id, u8 to_disable) 5210b55e27dSYuval Mintz { 5220b55e27dSYuval Mintz struct qed_vf_info *vf; 5230b55e27dSYuval Mintz int i; 5240b55e27dSYuval Mintz 5250b55e27dSYuval Mintz for_each_hwfn(cdev, i) { 5260b55e27dSYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 5270b55e27dSYuval Mintz 5280b55e27dSYuval Mintz vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); 5290b55e27dSYuval Mintz if (!vf) 5300b55e27dSYuval Mintz continue; 5310b55e27dSYuval Mintz 5320b55e27dSYuval Mintz vf->to_disable = to_disable; 5330b55e27dSYuval Mintz } 5340b55e27dSYuval Mintz } 5350b55e27dSYuval Mintz 5360b55e27dSYuval Mintz void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable) 5370b55e27dSYuval Mintz { 5380b55e27dSYuval Mintz u16 i; 5390b55e27dSYuval Mintz 5400b55e27dSYuval Mintz if (!IS_QED_SRIOV(cdev)) 5410b55e27dSYuval Mintz return; 5420b55e27dSYuval Mintz 5430b55e27dSYuval Mintz for (i = 0; i < cdev->p_iov_info->total_vfs; i++) 5440b55e27dSYuval Mintz qed_iov_set_vf_to_disable(cdev, i, to_disable); 5450b55e27dSYuval Mintz } 5460b55e27dSYuval Mintz 5471408cc1fSYuval Mintz static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn, 5481408cc1fSYuval Mintz struct qed_ptt *p_ptt, u8 abs_vfid) 5491408cc1fSYuval Mintz { 5501408cc1fSYuval Mintz qed_wr(p_hwfn, p_ptt, 5511408cc1fSYuval Mintz PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4, 5521408cc1fSYuval Mintz 1 << (abs_vfid & 0x1f)); 5531408cc1fSYuval Mintz } 5541408cc1fSYuval Mintz 555dacd88d6SYuval Mintz static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn, 556dacd88d6SYuval Mintz struct qed_ptt *p_ptt, struct qed_vf_info *vf) 557dacd88d6SYuval Mintz { 558dacd88d6SYuval Mintz int i; 559dacd88d6SYuval Mintz 560dacd88d6SYuval Mintz /* Set VF masks and configuration - pretend */ 561dacd88d6SYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 562dacd88d6SYuval Mintz 563dacd88d6SYuval Mintz qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0); 564dacd88d6SYuval Mintz 565dacd88d6SYuval Mintz /* unpretend */ 566dacd88d6SYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 567dacd88d6SYuval Mintz 568dacd88d6SYuval Mintz /* iterate over all queues, clear sb consumer */ 569b2b897ebSYuval Mintz for (i = 0; i < vf->num_sbs; i++) 570b2b897ebSYuval Mintz qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 571b2b897ebSYuval Mintz vf->igu_sbs[i], 572b2b897ebSYuval Mintz vf->opaque_fid, true); 573dacd88d6SYuval Mintz } 574dacd88d6SYuval Mintz 5750b55e27dSYuval Mintz static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, 5760b55e27dSYuval Mintz struct qed_ptt *p_ptt, 5770b55e27dSYuval Mintz struct qed_vf_info *vf, bool enable) 5780b55e27dSYuval Mintz { 5790b55e27dSYuval Mintz u32 igu_vf_conf; 5800b55e27dSYuval Mintz 5810b55e27dSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 5820b55e27dSYuval Mintz 5830b55e27dSYuval Mintz igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION); 5840b55e27dSYuval Mintz 5850b55e27dSYuval Mintz if (enable) 5860b55e27dSYuval Mintz igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN; 5870b55e27dSYuval Mintz else 5880b55e27dSYuval Mintz igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN; 5890b55e27dSYuval Mintz 5900b55e27dSYuval Mintz qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf); 5910b55e27dSYuval Mintz 5920b55e27dSYuval Mintz /* unpretend */ 5930b55e27dSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 5940b55e27dSYuval Mintz } 5950b55e27dSYuval Mintz 5961408cc1fSYuval Mintz static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, 5971408cc1fSYuval Mintz struct qed_ptt *p_ptt, 5981408cc1fSYuval Mintz struct qed_vf_info *vf) 5991408cc1fSYuval Mintz { 6001408cc1fSYuval Mintz u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN; 6011408cc1fSYuval Mintz int rc; 6021408cc1fSYuval Mintz 6030b55e27dSYuval Mintz if (vf->to_disable) 6040b55e27dSYuval Mintz return 0; 6050b55e27dSYuval Mintz 6061408cc1fSYuval Mintz DP_VERBOSE(p_hwfn, 6071408cc1fSYuval Mintz QED_MSG_IOV, 6081408cc1fSYuval Mintz "Enable internal access for vf %x [abs %x]\n", 6091408cc1fSYuval Mintz vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf)); 6101408cc1fSYuval Mintz 6111408cc1fSYuval Mintz qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf)); 6121408cc1fSYuval Mintz 613b2b897ebSYuval Mintz qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); 614b2b897ebSYuval Mintz 6151408cc1fSYuval Mintz rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs); 6161408cc1fSYuval Mintz if (rc) 6171408cc1fSYuval Mintz return rc; 6181408cc1fSYuval Mintz 6191408cc1fSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 6201408cc1fSYuval Mintz 6211408cc1fSYuval Mintz SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id); 6221408cc1fSYuval Mintz STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf); 6231408cc1fSYuval Mintz 6241408cc1fSYuval Mintz qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id, 6251408cc1fSYuval Mintz p_hwfn->hw_info.hw_mode); 6261408cc1fSYuval Mintz 6271408cc1fSYuval Mintz /* unpretend */ 6281408cc1fSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 6291408cc1fSYuval Mintz 6301408cc1fSYuval Mintz vf->state = VF_FREE; 6311408cc1fSYuval Mintz 6321408cc1fSYuval Mintz return rc; 6331408cc1fSYuval Mintz } 6341408cc1fSYuval Mintz 6350b55e27dSYuval Mintz /** 6360b55e27dSYuval Mintz * @brief qed_iov_config_perm_table - configure the permission 6370b55e27dSYuval Mintz * zone table. 6380b55e27dSYuval Mintz * In E4, queue zone permission table size is 320x9. There 6390b55e27dSYuval Mintz * are 320 VF queues for single engine device (256 for dual 6400b55e27dSYuval Mintz * engine device), and each entry has the following format: 6410b55e27dSYuval Mintz * {Valid, VF[7:0]} 6420b55e27dSYuval Mintz * @param p_hwfn 6430b55e27dSYuval Mintz * @param p_ptt 6440b55e27dSYuval Mintz * @param vf 6450b55e27dSYuval Mintz * @param enable 6460b55e27dSYuval Mintz */ 6470b55e27dSYuval Mintz static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn, 6480b55e27dSYuval Mintz struct qed_ptt *p_ptt, 6490b55e27dSYuval Mintz struct qed_vf_info *vf, u8 enable) 6500b55e27dSYuval Mintz { 6510b55e27dSYuval Mintz u32 reg_addr, val; 6520b55e27dSYuval Mintz u16 qzone_id = 0; 6530b55e27dSYuval Mintz int qid; 6540b55e27dSYuval Mintz 6550b55e27dSYuval Mintz for (qid = 0; qid < vf->num_rxqs; qid++) { 6560b55e27dSYuval Mintz qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid, 6570b55e27dSYuval Mintz &qzone_id); 6580b55e27dSYuval Mintz 6590b55e27dSYuval Mintz reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4; 6600b55e27dSYuval Mintz val = enable ? (vf->abs_vf_id | (1 << 8)) : 0; 6610b55e27dSYuval Mintz qed_wr(p_hwfn, p_ptt, reg_addr, val); 6620b55e27dSYuval Mintz } 6630b55e27dSYuval Mintz } 6640b55e27dSYuval Mintz 665dacd88d6SYuval Mintz static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn, 666dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 667dacd88d6SYuval Mintz struct qed_vf_info *vf) 668dacd88d6SYuval Mintz { 669dacd88d6SYuval Mintz /* Reset vf in IGU - interrupts are still disabled */ 670dacd88d6SYuval Mintz qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); 671dacd88d6SYuval Mintz 672dacd88d6SYuval Mintz qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1); 673dacd88d6SYuval Mintz 674dacd88d6SYuval Mintz /* Permission Table */ 675dacd88d6SYuval Mintz qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true); 676dacd88d6SYuval Mintz } 677dacd88d6SYuval Mintz 6781408cc1fSYuval Mintz static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, 6791408cc1fSYuval Mintz struct qed_ptt *p_ptt, 6801408cc1fSYuval Mintz struct qed_vf_info *vf, u16 num_rx_queues) 6811408cc1fSYuval Mintz { 6821408cc1fSYuval Mintz struct qed_igu_block *igu_blocks; 6831408cc1fSYuval Mintz int qid = 0, igu_id = 0; 6841408cc1fSYuval Mintz u32 val = 0; 6851408cc1fSYuval Mintz 6861408cc1fSYuval Mintz igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks; 6871408cc1fSYuval Mintz 6881408cc1fSYuval Mintz if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks) 6891408cc1fSYuval Mintz num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks; 6901408cc1fSYuval Mintz p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues; 6911408cc1fSYuval Mintz 6921408cc1fSYuval Mintz SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); 6931408cc1fSYuval Mintz SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); 6941408cc1fSYuval Mintz SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0); 6951408cc1fSYuval Mintz 6961408cc1fSYuval Mintz while ((qid < num_rx_queues) && 6971408cc1fSYuval Mintz (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) { 6981408cc1fSYuval Mintz if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) { 6991408cc1fSYuval Mintz struct cau_sb_entry sb_entry; 7001408cc1fSYuval Mintz 7011408cc1fSYuval Mintz vf->igu_sbs[qid] = (u16)igu_id; 7021408cc1fSYuval Mintz igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE; 7031408cc1fSYuval Mintz 7041408cc1fSYuval Mintz SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid); 7051408cc1fSYuval Mintz 7061408cc1fSYuval Mintz qed_wr(p_hwfn, p_ptt, 7071408cc1fSYuval Mintz IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id, 7081408cc1fSYuval Mintz val); 7091408cc1fSYuval Mintz 7101408cc1fSYuval Mintz /* Configure igu sb in CAU which were marked valid */ 7111408cc1fSYuval Mintz qed_init_cau_sb_entry(p_hwfn, &sb_entry, 7121408cc1fSYuval Mintz p_hwfn->rel_pf_id, 7131408cc1fSYuval Mintz vf->abs_vf_id, 1); 7141408cc1fSYuval Mintz qed_dmae_host2grc(p_hwfn, p_ptt, 7151408cc1fSYuval Mintz (u64)(uintptr_t)&sb_entry, 7161408cc1fSYuval Mintz CAU_REG_SB_VAR_MEMORY + 7171408cc1fSYuval Mintz igu_id * sizeof(u64), 2, 0); 7181408cc1fSYuval Mintz qid++; 7191408cc1fSYuval Mintz } 7201408cc1fSYuval Mintz igu_id++; 7211408cc1fSYuval Mintz } 7221408cc1fSYuval Mintz 7231408cc1fSYuval Mintz vf->num_sbs = (u8) num_rx_queues; 7241408cc1fSYuval Mintz 7251408cc1fSYuval Mintz return vf->num_sbs; 7261408cc1fSYuval Mintz } 7271408cc1fSYuval Mintz 7280b55e27dSYuval Mintz static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, 7290b55e27dSYuval Mintz struct qed_ptt *p_ptt, 7300b55e27dSYuval Mintz struct qed_vf_info *vf) 7310b55e27dSYuval Mintz { 7320b55e27dSYuval Mintz struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 7330b55e27dSYuval Mintz int idx, igu_id; 7340b55e27dSYuval Mintz u32 addr, val; 7350b55e27dSYuval Mintz 7360b55e27dSYuval Mintz /* Invalidate igu CAM lines and mark them as free */ 7370b55e27dSYuval Mintz for (idx = 0; idx < vf->num_sbs; idx++) { 7380b55e27dSYuval Mintz igu_id = vf->igu_sbs[idx]; 7390b55e27dSYuval Mintz addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id; 7400b55e27dSYuval Mintz 7410b55e27dSYuval Mintz val = qed_rd(p_hwfn, p_ptt, addr); 7420b55e27dSYuval Mintz SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); 7430b55e27dSYuval Mintz qed_wr(p_hwfn, p_ptt, addr, val); 7440b55e27dSYuval Mintz 7450b55e27dSYuval Mintz p_info->igu_map.igu_blocks[igu_id].status |= 7460b55e27dSYuval Mintz QED_IGU_STATUS_FREE; 7470b55e27dSYuval Mintz 7480b55e27dSYuval Mintz p_hwfn->hw_info.p_igu_info->free_blks++; 7490b55e27dSYuval Mintz } 7500b55e27dSYuval Mintz 7510b55e27dSYuval Mintz vf->num_sbs = 0; 7520b55e27dSYuval Mintz } 7530b55e27dSYuval Mintz 7541408cc1fSYuval Mintz static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, 7551408cc1fSYuval Mintz struct qed_ptt *p_ptt, 7561408cc1fSYuval Mintz u16 rel_vf_id, u16 num_rx_queues) 7571408cc1fSYuval Mintz { 7581408cc1fSYuval Mintz u8 num_of_vf_avaiable_chains = 0; 7591408cc1fSYuval Mintz struct qed_vf_info *vf = NULL; 7601408cc1fSYuval Mintz int rc = 0; 7611408cc1fSYuval Mintz u32 cids; 7621408cc1fSYuval Mintz u8 i; 7631408cc1fSYuval Mintz 7641408cc1fSYuval Mintz vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); 7651408cc1fSYuval Mintz if (!vf) { 7661408cc1fSYuval Mintz DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n"); 7671408cc1fSYuval Mintz return -EINVAL; 7681408cc1fSYuval Mintz } 7691408cc1fSYuval Mintz 7701408cc1fSYuval Mintz if (vf->b_init) { 7711408cc1fSYuval Mintz DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id); 7721408cc1fSYuval Mintz return -EINVAL; 7731408cc1fSYuval Mintz } 7741408cc1fSYuval Mintz 7751408cc1fSYuval Mintz /* Limit number of queues according to number of CIDs */ 7761408cc1fSYuval Mintz qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids); 7771408cc1fSYuval Mintz DP_VERBOSE(p_hwfn, 7781408cc1fSYuval Mintz QED_MSG_IOV, 7791408cc1fSYuval Mintz "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n", 7801408cc1fSYuval Mintz vf->relative_vf_id, num_rx_queues, (u16) cids); 7811408cc1fSYuval Mintz num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids)); 7821408cc1fSYuval Mintz 7831408cc1fSYuval Mintz num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn, 7841408cc1fSYuval Mintz p_ptt, 7851408cc1fSYuval Mintz vf, 7861408cc1fSYuval Mintz num_rx_queues); 7871408cc1fSYuval Mintz if (!num_of_vf_avaiable_chains) { 7881408cc1fSYuval Mintz DP_ERR(p_hwfn, "no available igu sbs\n"); 7891408cc1fSYuval Mintz return -ENOMEM; 7901408cc1fSYuval Mintz } 7911408cc1fSYuval Mintz 7921408cc1fSYuval Mintz /* Choose queue number and index ranges */ 7931408cc1fSYuval Mintz vf->num_rxqs = num_of_vf_avaiable_chains; 7941408cc1fSYuval Mintz vf->num_txqs = num_of_vf_avaiable_chains; 7951408cc1fSYuval Mintz 7961408cc1fSYuval Mintz for (i = 0; i < vf->num_rxqs; i++) { 7971408cc1fSYuval Mintz u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn, 7981408cc1fSYuval Mintz vf->igu_sbs[i]); 7991408cc1fSYuval Mintz 8001408cc1fSYuval Mintz if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) { 8011408cc1fSYuval Mintz DP_NOTICE(p_hwfn, 8021408cc1fSYuval Mintz "VF[%d] will require utilizing of out-of-bounds queues - %04x\n", 8031408cc1fSYuval Mintz vf->relative_vf_id, queue_id); 8041408cc1fSYuval Mintz return -EINVAL; 8051408cc1fSYuval Mintz } 8061408cc1fSYuval Mintz 8071408cc1fSYuval Mintz /* CIDs are per-VF, so no problem having them 0-based. */ 8081408cc1fSYuval Mintz vf->vf_queues[i].fw_rx_qid = queue_id; 8091408cc1fSYuval Mintz vf->vf_queues[i].fw_tx_qid = queue_id; 8101408cc1fSYuval Mintz vf->vf_queues[i].fw_cid = i; 8111408cc1fSYuval Mintz 8121408cc1fSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 8131408cc1fSYuval Mintz "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n", 8141408cc1fSYuval Mintz vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i); 8151408cc1fSYuval Mintz } 8161408cc1fSYuval Mintz rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf); 8171408cc1fSYuval Mintz if (!rc) { 8181408cc1fSYuval Mintz vf->b_init = true; 8191408cc1fSYuval Mintz 8201408cc1fSYuval Mintz if (IS_LEAD_HWFN(p_hwfn)) 8211408cc1fSYuval Mintz p_hwfn->cdev->p_iov_info->num_vfs++; 8221408cc1fSYuval Mintz } 8231408cc1fSYuval Mintz 8241408cc1fSYuval Mintz return rc; 8251408cc1fSYuval Mintz } 8261408cc1fSYuval Mintz 827079d20a6SManish Chopra static void qed_iov_set_link(struct qed_hwfn *p_hwfn, 828079d20a6SManish Chopra u16 vfid, 829079d20a6SManish Chopra struct qed_mcp_link_params *params, 830079d20a6SManish Chopra struct qed_mcp_link_state *link, 831079d20a6SManish Chopra struct qed_mcp_link_capabilities *p_caps) 832079d20a6SManish Chopra { 833079d20a6SManish Chopra struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, 834079d20a6SManish Chopra vfid, 835079d20a6SManish Chopra false); 836079d20a6SManish Chopra struct qed_bulletin_content *p_bulletin; 837079d20a6SManish Chopra 838079d20a6SManish Chopra if (!p_vf) 839079d20a6SManish Chopra return; 840079d20a6SManish Chopra 841079d20a6SManish Chopra p_bulletin = p_vf->bulletin.p_virt; 842079d20a6SManish Chopra p_bulletin->req_autoneg = params->speed.autoneg; 843079d20a6SManish Chopra p_bulletin->req_adv_speed = params->speed.advertised_speeds; 844079d20a6SManish Chopra p_bulletin->req_forced_speed = params->speed.forced_speed; 845079d20a6SManish Chopra p_bulletin->req_autoneg_pause = params->pause.autoneg; 846079d20a6SManish Chopra p_bulletin->req_forced_rx = params->pause.forced_rx; 847079d20a6SManish Chopra p_bulletin->req_forced_tx = params->pause.forced_tx; 848079d20a6SManish Chopra p_bulletin->req_loopback = params->loopback_mode; 849079d20a6SManish Chopra 850079d20a6SManish Chopra p_bulletin->link_up = link->link_up; 851079d20a6SManish Chopra p_bulletin->speed = link->speed; 852079d20a6SManish Chopra p_bulletin->full_duplex = link->full_duplex; 853079d20a6SManish Chopra p_bulletin->autoneg = link->an; 854079d20a6SManish Chopra p_bulletin->autoneg_complete = link->an_complete; 855079d20a6SManish Chopra p_bulletin->parallel_detection = link->parallel_detection; 856079d20a6SManish Chopra p_bulletin->pfc_enabled = link->pfc_enabled; 857079d20a6SManish Chopra p_bulletin->partner_adv_speed = link->partner_adv_speed; 858079d20a6SManish Chopra p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; 859079d20a6SManish Chopra p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; 860079d20a6SManish Chopra p_bulletin->partner_adv_pause = link->partner_adv_pause; 861079d20a6SManish Chopra p_bulletin->sfp_tx_fault = link->sfp_tx_fault; 862079d20a6SManish Chopra 863079d20a6SManish Chopra p_bulletin->capability_speed = p_caps->speed_capabilities; 864079d20a6SManish Chopra } 865079d20a6SManish Chopra 8660b55e27dSYuval Mintz static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, 8670b55e27dSYuval Mintz struct qed_ptt *p_ptt, u16 rel_vf_id) 8680b55e27dSYuval Mintz { 869079d20a6SManish Chopra struct qed_mcp_link_capabilities caps; 870079d20a6SManish Chopra struct qed_mcp_link_params params; 871079d20a6SManish Chopra struct qed_mcp_link_state link; 8720b55e27dSYuval Mintz struct qed_vf_info *vf = NULL; 8730b55e27dSYuval Mintz 8740b55e27dSYuval Mintz vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 8750b55e27dSYuval Mintz if (!vf) { 8760b55e27dSYuval Mintz DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n"); 8770b55e27dSYuval Mintz return -EINVAL; 8780b55e27dSYuval Mintz } 8790b55e27dSYuval Mintz 88036558c3dSYuval Mintz if (vf->bulletin.p_virt) 88136558c3dSYuval Mintz memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt)); 88236558c3dSYuval Mintz 88336558c3dSYuval Mintz memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info)); 88436558c3dSYuval Mintz 885079d20a6SManish Chopra /* Get the link configuration back in bulletin so 886079d20a6SManish Chopra * that when VFs are re-enabled they get the actual 887079d20a6SManish Chopra * link configuration. 888079d20a6SManish Chopra */ 889079d20a6SManish Chopra memcpy(¶ms, qed_mcp_get_link_params(p_hwfn), sizeof(params)); 890079d20a6SManish Chopra memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link)); 891079d20a6SManish Chopra memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); 892079d20a6SManish Chopra qed_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps); 893079d20a6SManish Chopra 8941fe614d1SYuval Mintz /* Forget the VF's acquisition message */ 8951fe614d1SYuval Mintz memset(&vf->acquire, 0, sizeof(vf->acquire)); 8960b55e27dSYuval Mintz 8970b55e27dSYuval Mintz /* disablng interrupts and resetting permission table was done during 8980b55e27dSYuval Mintz * vf-close, however, we could get here without going through vf_close 8990b55e27dSYuval Mintz */ 9000b55e27dSYuval Mintz /* Disable Interrupts for VF */ 9010b55e27dSYuval Mintz qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); 9020b55e27dSYuval Mintz 9030b55e27dSYuval Mintz /* Reset Permission table */ 9040b55e27dSYuval Mintz qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); 9050b55e27dSYuval Mintz 9060b55e27dSYuval Mintz vf->num_rxqs = 0; 9070b55e27dSYuval Mintz vf->num_txqs = 0; 9080b55e27dSYuval Mintz qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf); 9090b55e27dSYuval Mintz 9100b55e27dSYuval Mintz if (vf->b_init) { 9110b55e27dSYuval Mintz vf->b_init = false; 9120b55e27dSYuval Mintz 9130b55e27dSYuval Mintz if (IS_LEAD_HWFN(p_hwfn)) 9140b55e27dSYuval Mintz p_hwfn->cdev->p_iov_info->num_vfs--; 9150b55e27dSYuval Mintz } 9160b55e27dSYuval Mintz 9170b55e27dSYuval Mintz return 0; 9180b55e27dSYuval Mintz } 9190b55e27dSYuval Mintz 92037bff2b9SYuval Mintz static bool qed_iov_tlv_supported(u16 tlvtype) 92137bff2b9SYuval Mintz { 92237bff2b9SYuval Mintz return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX; 92337bff2b9SYuval Mintz } 92437bff2b9SYuval Mintz 92537bff2b9SYuval Mintz /* place a given tlv on the tlv buffer, continuing current tlv list */ 92637bff2b9SYuval Mintz void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length) 92737bff2b9SYuval Mintz { 92837bff2b9SYuval Mintz struct channel_tlv *tl = (struct channel_tlv *)*offset; 92937bff2b9SYuval Mintz 93037bff2b9SYuval Mintz tl->type = type; 93137bff2b9SYuval Mintz tl->length = length; 93237bff2b9SYuval Mintz 93337bff2b9SYuval Mintz /* Offset should keep pointing to next TLV (the end of the last) */ 93437bff2b9SYuval Mintz *offset += length; 93537bff2b9SYuval Mintz 93637bff2b9SYuval Mintz /* Return a pointer to the start of the added tlv */ 93737bff2b9SYuval Mintz return *offset - length; 93837bff2b9SYuval Mintz } 93937bff2b9SYuval Mintz 94037bff2b9SYuval Mintz /* list the types and lengths of the tlvs on the buffer */ 94137bff2b9SYuval Mintz void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list) 94237bff2b9SYuval Mintz { 94337bff2b9SYuval Mintz u16 i = 1, total_length = 0; 94437bff2b9SYuval Mintz struct channel_tlv *tlv; 94537bff2b9SYuval Mintz 94637bff2b9SYuval Mintz do { 94737bff2b9SYuval Mintz tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length); 94837bff2b9SYuval Mintz 94937bff2b9SYuval Mintz /* output tlv */ 95037bff2b9SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 95137bff2b9SYuval Mintz "TLV number %d: type %d, length %d\n", 95237bff2b9SYuval Mintz i, tlv->type, tlv->length); 95337bff2b9SYuval Mintz 95437bff2b9SYuval Mintz if (tlv->type == CHANNEL_TLV_LIST_END) 95537bff2b9SYuval Mintz return; 95637bff2b9SYuval Mintz 95737bff2b9SYuval Mintz /* Validate entry - protect against malicious VFs */ 95837bff2b9SYuval Mintz if (!tlv->length) { 95937bff2b9SYuval Mintz DP_NOTICE(p_hwfn, "TLV of length 0 found\n"); 96037bff2b9SYuval Mintz return; 96137bff2b9SYuval Mintz } 96237bff2b9SYuval Mintz 96337bff2b9SYuval Mintz total_length += tlv->length; 96437bff2b9SYuval Mintz 96537bff2b9SYuval Mintz if (total_length >= sizeof(struct tlv_buffer_size)) { 96637bff2b9SYuval Mintz DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n"); 96737bff2b9SYuval Mintz return; 96837bff2b9SYuval Mintz } 96937bff2b9SYuval Mintz 97037bff2b9SYuval Mintz i++; 97137bff2b9SYuval Mintz } while (1); 97237bff2b9SYuval Mintz } 97337bff2b9SYuval Mintz 97437bff2b9SYuval Mintz static void qed_iov_send_response(struct qed_hwfn *p_hwfn, 97537bff2b9SYuval Mintz struct qed_ptt *p_ptt, 97637bff2b9SYuval Mintz struct qed_vf_info *p_vf, 97737bff2b9SYuval Mintz u16 length, u8 status) 97837bff2b9SYuval Mintz { 97937bff2b9SYuval Mintz struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 98037bff2b9SYuval Mintz struct qed_dmae_params params; 98137bff2b9SYuval Mintz u8 eng_vf_id; 98237bff2b9SYuval Mintz 98337bff2b9SYuval Mintz mbx->reply_virt->default_resp.hdr.status = status; 98437bff2b9SYuval Mintz 98537bff2b9SYuval Mintz qed_dp_tlv_list(p_hwfn, mbx->reply_virt); 98637bff2b9SYuval Mintz 98737bff2b9SYuval Mintz eng_vf_id = p_vf->abs_vf_id; 98837bff2b9SYuval Mintz 98937bff2b9SYuval Mintz memset(¶ms, 0, sizeof(struct qed_dmae_params)); 99037bff2b9SYuval Mintz params.flags = QED_DMAE_FLAG_VF_DST; 99137bff2b9SYuval Mintz params.dst_vfid = eng_vf_id; 99237bff2b9SYuval Mintz 99337bff2b9SYuval Mintz qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64), 99437bff2b9SYuval Mintz mbx->req_virt->first_tlv.reply_address + 99537bff2b9SYuval Mintz sizeof(u64), 99637bff2b9SYuval Mintz (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4, 99737bff2b9SYuval Mintz ¶ms); 99837bff2b9SYuval Mintz 99937bff2b9SYuval Mintz qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, 100037bff2b9SYuval Mintz mbx->req_virt->first_tlv.reply_address, 100137bff2b9SYuval Mintz sizeof(u64) / 4, ¶ms); 100237bff2b9SYuval Mintz 100337bff2b9SYuval Mintz REG_WR(p_hwfn, 100437bff2b9SYuval Mintz GTT_BAR0_MAP_REG_USDM_RAM + 100537bff2b9SYuval Mintz USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1); 100637bff2b9SYuval Mintz } 100737bff2b9SYuval Mintz 1008dacd88d6SYuval Mintz static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn, 1009dacd88d6SYuval Mintz enum qed_iov_vport_update_flag flag) 1010dacd88d6SYuval Mintz { 1011dacd88d6SYuval Mintz switch (flag) { 1012dacd88d6SYuval Mintz case QED_IOV_VP_UPDATE_ACTIVATE: 1013dacd88d6SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 101417b235c1SYuval Mintz case QED_IOV_VP_UPDATE_VLAN_STRIP: 101517b235c1SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; 101617b235c1SYuval Mintz case QED_IOV_VP_UPDATE_TX_SWITCH: 101717b235c1SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 1018dacd88d6SYuval Mintz case QED_IOV_VP_UPDATE_MCAST: 1019dacd88d6SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_MCAST; 1020dacd88d6SYuval Mintz case QED_IOV_VP_UPDATE_ACCEPT_PARAM: 1021dacd88d6SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 1022dacd88d6SYuval Mintz case QED_IOV_VP_UPDATE_RSS: 1023dacd88d6SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_RSS; 102417b235c1SYuval Mintz case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN: 102517b235c1SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 102617b235c1SYuval Mintz case QED_IOV_VP_UPDATE_SGE_TPA: 102717b235c1SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; 1028dacd88d6SYuval Mintz default: 1029dacd88d6SYuval Mintz return 0; 1030dacd88d6SYuval Mintz } 1031dacd88d6SYuval Mintz } 1032dacd88d6SYuval Mintz 1033dacd88d6SYuval Mintz static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn, 1034dacd88d6SYuval Mintz struct qed_vf_info *p_vf, 1035dacd88d6SYuval Mintz struct qed_iov_vf_mbx *p_mbx, 1036dacd88d6SYuval Mintz u8 status, 1037dacd88d6SYuval Mintz u16 tlvs_mask, u16 tlvs_accepted) 1038dacd88d6SYuval Mintz { 1039dacd88d6SYuval Mintz struct pfvf_def_resp_tlv *resp; 1040dacd88d6SYuval Mintz u16 size, total_len, i; 1041dacd88d6SYuval Mintz 1042dacd88d6SYuval Mintz memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs)); 1043dacd88d6SYuval Mintz p_mbx->offset = (u8 *)p_mbx->reply_virt; 1044dacd88d6SYuval Mintz size = sizeof(struct pfvf_def_resp_tlv); 1045dacd88d6SYuval Mintz total_len = size; 1046dacd88d6SYuval Mintz 1047dacd88d6SYuval Mintz qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size); 1048dacd88d6SYuval Mintz 1049dacd88d6SYuval Mintz /* Prepare response for all extended tlvs if they are found by PF */ 1050dacd88d6SYuval Mintz for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) { 1051dacd88d6SYuval Mintz if (!(tlvs_mask & (1 << i))) 1052dacd88d6SYuval Mintz continue; 1053dacd88d6SYuval Mintz 1054dacd88d6SYuval Mintz resp = qed_add_tlv(p_hwfn, &p_mbx->offset, 1055dacd88d6SYuval Mintz qed_iov_vport_to_tlv(p_hwfn, i), size); 1056dacd88d6SYuval Mintz 1057dacd88d6SYuval Mintz if (tlvs_accepted & (1 << i)) 1058dacd88d6SYuval Mintz resp->hdr.status = status; 1059dacd88d6SYuval Mintz else 1060dacd88d6SYuval Mintz resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED; 1061dacd88d6SYuval Mintz 1062dacd88d6SYuval Mintz DP_VERBOSE(p_hwfn, 1063dacd88d6SYuval Mintz QED_MSG_IOV, 1064dacd88d6SYuval Mintz "VF[%d] - vport_update response: TLV %d, status %02x\n", 1065dacd88d6SYuval Mintz p_vf->relative_vf_id, 1066dacd88d6SYuval Mintz qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status); 1067dacd88d6SYuval Mintz 1068dacd88d6SYuval Mintz total_len += size; 1069dacd88d6SYuval Mintz } 1070dacd88d6SYuval Mintz 1071dacd88d6SYuval Mintz qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END, 1072dacd88d6SYuval Mintz sizeof(struct channel_list_end_tlv)); 1073dacd88d6SYuval Mintz 1074dacd88d6SYuval Mintz return total_len; 1075dacd88d6SYuval Mintz } 1076dacd88d6SYuval Mintz 107737bff2b9SYuval Mintz static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn, 107837bff2b9SYuval Mintz struct qed_ptt *p_ptt, 107937bff2b9SYuval Mintz struct qed_vf_info *vf_info, 108037bff2b9SYuval Mintz u16 type, u16 length, u8 status) 108137bff2b9SYuval Mintz { 108237bff2b9SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx; 108337bff2b9SYuval Mintz 108437bff2b9SYuval Mintz mbx->offset = (u8 *)mbx->reply_virt; 108537bff2b9SYuval Mintz 108637bff2b9SYuval Mintz qed_add_tlv(p_hwfn, &mbx->offset, type, length); 108737bff2b9SYuval Mintz qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 108837bff2b9SYuval Mintz sizeof(struct channel_list_end_tlv)); 108937bff2b9SYuval Mintz 109037bff2b9SYuval Mintz qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status); 109137bff2b9SYuval Mintz } 109237bff2b9SYuval Mintz 10930b55e27dSYuval Mintz struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn, 10940b55e27dSYuval Mintz u16 relative_vf_id, 10950b55e27dSYuval Mintz bool b_enabled_only) 10960b55e27dSYuval Mintz { 10970b55e27dSYuval Mintz struct qed_vf_info *vf = NULL; 10980b55e27dSYuval Mintz 10990b55e27dSYuval Mintz vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only); 11000b55e27dSYuval Mintz if (!vf) 11010b55e27dSYuval Mintz return NULL; 11020b55e27dSYuval Mintz 11030b55e27dSYuval Mintz return &vf->p_vf_info; 11040b55e27dSYuval Mintz } 11050b55e27dSYuval Mintz 11060b55e27dSYuval Mintz void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid) 11070b55e27dSYuval Mintz { 11080b55e27dSYuval Mintz struct qed_public_vf_info *vf_info; 11090b55e27dSYuval Mintz 11100b55e27dSYuval Mintz vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false); 11110b55e27dSYuval Mintz 11120b55e27dSYuval Mintz if (!vf_info) 11130b55e27dSYuval Mintz return; 11140b55e27dSYuval Mintz 11150b55e27dSYuval Mintz /* Clear the VF mac */ 11160b55e27dSYuval Mintz memset(vf_info->mac, 0, ETH_ALEN); 11170b55e27dSYuval Mintz } 11180b55e27dSYuval Mintz 11190b55e27dSYuval Mintz static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, 11200b55e27dSYuval Mintz struct qed_vf_info *p_vf) 11210b55e27dSYuval Mintz { 11220b55e27dSYuval Mintz u32 i; 11230b55e27dSYuval Mintz 11240b55e27dSYuval Mintz p_vf->vf_bulletin = 0; 1125dacd88d6SYuval Mintz p_vf->vport_instance = 0; 11260b55e27dSYuval Mintz p_vf->num_mac_filters = 0; 11270b55e27dSYuval Mintz p_vf->num_vlan_filters = 0; 112808feecd7SYuval Mintz p_vf->configured_features = 0; 11290b55e27dSYuval Mintz 11300b55e27dSYuval Mintz /* If VF previously requested less resources, go back to default */ 11310b55e27dSYuval Mintz p_vf->num_rxqs = p_vf->num_sbs; 11320b55e27dSYuval Mintz p_vf->num_txqs = p_vf->num_sbs; 11330b55e27dSYuval Mintz 1134dacd88d6SYuval Mintz p_vf->num_active_rxqs = 0; 1135dacd88d6SYuval Mintz 11360b55e27dSYuval Mintz for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) 11370b55e27dSYuval Mintz p_vf->vf_queues[i].rxq_active = 0; 11380b55e27dSYuval Mintz 113908feecd7SYuval Mintz memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); 11401fe614d1SYuval Mintz memset(&p_vf->acquire, 0, sizeof(p_vf->acquire)); 11410b55e27dSYuval Mintz qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id); 11420b55e27dSYuval Mintz } 11430b55e27dSYuval Mintz 11441408cc1fSYuval Mintz static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, 114537bff2b9SYuval Mintz struct qed_ptt *p_ptt, 11461408cc1fSYuval Mintz struct qed_vf_info *vf) 114737bff2b9SYuval Mintz { 11481408cc1fSYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 11491408cc1fSYuval Mintz struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp; 11501408cc1fSYuval Mintz struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 11511408cc1fSYuval Mintz struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire; 11521fe614d1SYuval Mintz u8 i, vfpf_status = PFVF_STATUS_NOT_SUPPORTED; 11531408cc1fSYuval Mintz struct pf_vf_resc *resc = &resp->resc; 11541fe614d1SYuval Mintz int rc; 11551fe614d1SYuval Mintz 11561fe614d1SYuval Mintz memset(resp, 0, sizeof(*resp)); 11571408cc1fSYuval Mintz 11581408cc1fSYuval Mintz /* Validate FW compatibility */ 11591fe614d1SYuval Mintz if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) { 11601408cc1fSYuval Mintz DP_INFO(p_hwfn, 11611fe614d1SYuval Mintz "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n", 11621408cc1fSYuval Mintz vf->abs_vf_id, 11631fe614d1SYuval Mintz req->vfdev_info.eth_fp_hsi_major, 11641fe614d1SYuval Mintz req->vfdev_info.eth_fp_hsi_minor, 11651fe614d1SYuval Mintz ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); 11661fe614d1SYuval Mintz 11671fe614d1SYuval Mintz /* Write the PF version so that VF would know which version 11681fe614d1SYuval Mintz * is supported. 11691fe614d1SYuval Mintz */ 11701fe614d1SYuval Mintz pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; 11711fe614d1SYuval Mintz pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR; 11721fe614d1SYuval Mintz 11731408cc1fSYuval Mintz goto out; 11741408cc1fSYuval Mintz } 11751408cc1fSYuval Mintz 11761408cc1fSYuval Mintz /* On 100g PFs, prevent old VFs from loading */ 11771408cc1fSYuval Mintz if ((p_hwfn->cdev->num_hwfns > 1) && 11781408cc1fSYuval Mintz !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) { 11791408cc1fSYuval Mintz DP_INFO(p_hwfn, 11801408cc1fSYuval Mintz "VF[%d] is running an old driver that doesn't support 100g\n", 11811408cc1fSYuval Mintz vf->abs_vf_id); 11821408cc1fSYuval Mintz goto out; 11831408cc1fSYuval Mintz } 11841408cc1fSYuval Mintz 11851fe614d1SYuval Mintz /* Store the acquire message */ 11861fe614d1SYuval Mintz memcpy(&vf->acquire, req, sizeof(vf->acquire)); 11871408cc1fSYuval Mintz 11881408cc1fSYuval Mintz /* Fill in vf info stuff */ 11891408cc1fSYuval Mintz vf->opaque_fid = req->vfdev_info.opaque_fid; 11901408cc1fSYuval Mintz vf->num_mac_filters = 1; 11911408cc1fSYuval Mintz vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; 11921408cc1fSYuval Mintz 11931408cc1fSYuval Mintz vf->vf_bulletin = req->bulletin_addr; 11941408cc1fSYuval Mintz vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ? 11951408cc1fSYuval Mintz vf->bulletin.size : req->bulletin_size; 11961408cc1fSYuval Mintz 11971408cc1fSYuval Mintz /* fill in pfdev info */ 11981408cc1fSYuval Mintz pfdev_info->chip_num = p_hwfn->cdev->chip_num; 11991408cc1fSYuval Mintz pfdev_info->db_size = 0; 12001408cc1fSYuval Mintz pfdev_info->indices_per_sb = PIS_PER_SB; 12011408cc1fSYuval Mintz 12021408cc1fSYuval Mintz pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | 12031408cc1fSYuval Mintz PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE; 12041408cc1fSYuval Mintz if (p_hwfn->cdev->num_hwfns > 1) 12051408cc1fSYuval Mintz pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; 12061408cc1fSYuval Mintz 12071408cc1fSYuval Mintz pfdev_info->stats_info.mstats.address = 12081408cc1fSYuval Mintz PXP_VF_BAR0_START_MSDM_ZONE_B + 12091408cc1fSYuval Mintz offsetof(struct mstorm_vf_zone, non_trigger.eth_queue_stat); 12101408cc1fSYuval Mintz pfdev_info->stats_info.mstats.len = 12111408cc1fSYuval Mintz sizeof(struct eth_mstorm_per_queue_stat); 12121408cc1fSYuval Mintz 12131408cc1fSYuval Mintz pfdev_info->stats_info.ustats.address = 12141408cc1fSYuval Mintz PXP_VF_BAR0_START_USDM_ZONE_B + 12151408cc1fSYuval Mintz offsetof(struct ustorm_vf_zone, non_trigger.eth_queue_stat); 12161408cc1fSYuval Mintz pfdev_info->stats_info.ustats.len = 12171408cc1fSYuval Mintz sizeof(struct eth_ustorm_per_queue_stat); 12181408cc1fSYuval Mintz 12191408cc1fSYuval Mintz pfdev_info->stats_info.pstats.address = 12201408cc1fSYuval Mintz PXP_VF_BAR0_START_PSDM_ZONE_B + 12211408cc1fSYuval Mintz offsetof(struct pstorm_vf_zone, non_trigger.eth_queue_stat); 12221408cc1fSYuval Mintz pfdev_info->stats_info.pstats.len = 12231408cc1fSYuval Mintz sizeof(struct eth_pstorm_per_queue_stat); 12241408cc1fSYuval Mintz 12251408cc1fSYuval Mintz pfdev_info->stats_info.tstats.address = 0; 12261408cc1fSYuval Mintz pfdev_info->stats_info.tstats.len = 0; 12271408cc1fSYuval Mintz 12281408cc1fSYuval Mintz memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); 12291408cc1fSYuval Mintz 12301408cc1fSYuval Mintz pfdev_info->fw_major = FW_MAJOR_VERSION; 12311408cc1fSYuval Mintz pfdev_info->fw_minor = FW_MINOR_VERSION; 12321408cc1fSYuval Mintz pfdev_info->fw_rev = FW_REVISION_VERSION; 12331408cc1fSYuval Mintz pfdev_info->fw_eng = FW_ENGINEERING_VERSION; 12341fe614d1SYuval Mintz pfdev_info->minor_fp_hsi = min_t(u8, 12351fe614d1SYuval Mintz ETH_HSI_VER_MINOR, 12361fe614d1SYuval Mintz req->vfdev_info.eth_fp_hsi_minor); 12371408cc1fSYuval Mintz pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX; 12381408cc1fSYuval Mintz qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL); 12391408cc1fSYuval Mintz 12401408cc1fSYuval Mintz pfdev_info->dev_type = p_hwfn->cdev->type; 12411408cc1fSYuval Mintz pfdev_info->chip_rev = p_hwfn->cdev->chip_rev; 12421408cc1fSYuval Mintz 12431408cc1fSYuval Mintz resc->num_rxqs = vf->num_rxqs; 12441408cc1fSYuval Mintz resc->num_txqs = vf->num_txqs; 12451408cc1fSYuval Mintz resc->num_sbs = vf->num_sbs; 12461408cc1fSYuval Mintz for (i = 0; i < resc->num_sbs; i++) { 12471408cc1fSYuval Mintz resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i]; 12481408cc1fSYuval Mintz resc->hw_sbs[i].sb_qid = 0; 12491408cc1fSYuval Mintz } 12501408cc1fSYuval Mintz 12511408cc1fSYuval Mintz for (i = 0; i < resc->num_rxqs; i++) { 12521408cc1fSYuval Mintz qed_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid, 12531408cc1fSYuval Mintz (u16 *)&resc->hw_qid[i]); 12541408cc1fSYuval Mintz resc->cid[i] = vf->vf_queues[i].fw_cid; 12551408cc1fSYuval Mintz } 12561408cc1fSYuval Mintz 12571408cc1fSYuval Mintz resc->num_mac_filters = min_t(u8, vf->num_mac_filters, 12581408cc1fSYuval Mintz req->resc_request.num_mac_filters); 12591408cc1fSYuval Mintz resc->num_vlan_filters = min_t(u8, vf->num_vlan_filters, 12601408cc1fSYuval Mintz req->resc_request.num_vlan_filters); 12611408cc1fSYuval Mintz 12621408cc1fSYuval Mintz /* This isn't really required as VF isn't limited, but some VFs might 12631408cc1fSYuval Mintz * actually test this value, so need to provide it. 12641408cc1fSYuval Mintz */ 12651408cc1fSYuval Mintz resc->num_mc_filters = req->resc_request.num_mc_filters; 12661408cc1fSYuval Mintz 12671fe614d1SYuval Mintz /* Start the VF in FW */ 12681fe614d1SYuval Mintz rc = qed_sp_vf_start(p_hwfn, vf); 12691fe614d1SYuval Mintz if (rc) { 12701fe614d1SYuval Mintz DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id); 12711fe614d1SYuval Mintz vfpf_status = PFVF_STATUS_FAILURE; 12721fe614d1SYuval Mintz goto out; 12731fe614d1SYuval Mintz } 12741fe614d1SYuval Mintz 12751408cc1fSYuval Mintz /* Fill agreed size of bulletin board in response */ 12761408cc1fSYuval Mintz resp->bulletin_size = vf->bulletin.size; 127736558c3dSYuval Mintz qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt); 12781408cc1fSYuval Mintz 12791408cc1fSYuval Mintz DP_VERBOSE(p_hwfn, 12801408cc1fSYuval Mintz QED_MSG_IOV, 12811408cc1fSYuval Mintz "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n" 12821408cc1fSYuval Mintz "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n", 12831408cc1fSYuval Mintz vf->abs_vf_id, 12841408cc1fSYuval Mintz resp->pfdev_info.chip_num, 12851408cc1fSYuval Mintz resp->pfdev_info.db_size, 12861408cc1fSYuval Mintz resp->pfdev_info.indices_per_sb, 12871408cc1fSYuval Mintz resp->pfdev_info.capabilities, 12881408cc1fSYuval Mintz resc->num_rxqs, 12891408cc1fSYuval Mintz resc->num_txqs, 12901408cc1fSYuval Mintz resc->num_sbs, 12911408cc1fSYuval Mintz resc->num_mac_filters, 12921408cc1fSYuval Mintz resc->num_vlan_filters); 12931408cc1fSYuval Mintz vf->state = VF_ACQUIRED; 12941408cc1fSYuval Mintz 12951408cc1fSYuval Mintz /* Prepare Response */ 12961408cc1fSYuval Mintz out: 12971408cc1fSYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE, 12981408cc1fSYuval Mintz sizeof(struct pfvf_acquire_resp_tlv), vfpf_status); 129937bff2b9SYuval Mintz } 130037bff2b9SYuval Mintz 13016ddc7608SYuval Mintz static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, 13026ddc7608SYuval Mintz struct qed_vf_info *p_vf, bool val) 13036ddc7608SYuval Mintz { 13046ddc7608SYuval Mintz struct qed_sp_vport_update_params params; 13056ddc7608SYuval Mintz int rc; 13066ddc7608SYuval Mintz 13076ddc7608SYuval Mintz if (val == p_vf->spoof_chk) { 13086ddc7608SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 13096ddc7608SYuval Mintz "Spoofchk value[%d] is already configured\n", val); 13106ddc7608SYuval Mintz return 0; 13116ddc7608SYuval Mintz } 13126ddc7608SYuval Mintz 13136ddc7608SYuval Mintz memset(¶ms, 0, sizeof(struct qed_sp_vport_update_params)); 13146ddc7608SYuval Mintz params.opaque_fid = p_vf->opaque_fid; 13156ddc7608SYuval Mintz params.vport_id = p_vf->vport_id; 13166ddc7608SYuval Mintz params.update_anti_spoofing_en_flg = 1; 13176ddc7608SYuval Mintz params.anti_spoofing_en = val; 13186ddc7608SYuval Mintz 13196ddc7608SYuval Mintz rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); 13206ddc7608SYuval Mintz if (rc) { 13216ddc7608SYuval Mintz p_vf->spoof_chk = val; 13226ddc7608SYuval Mintz p_vf->req_spoofchk_val = p_vf->spoof_chk; 13236ddc7608SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 13246ddc7608SYuval Mintz "Spoofchk val[%d] configured\n", val); 13256ddc7608SYuval Mintz } else { 13266ddc7608SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 13276ddc7608SYuval Mintz "Spoofchk configuration[val:%d] failed for VF[%d]\n", 13286ddc7608SYuval Mintz val, p_vf->relative_vf_id); 13296ddc7608SYuval Mintz } 13306ddc7608SYuval Mintz 13316ddc7608SYuval Mintz return rc; 13326ddc7608SYuval Mintz } 13336ddc7608SYuval Mintz 133408feecd7SYuval Mintz static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn, 133508feecd7SYuval Mintz struct qed_vf_info *p_vf) 133608feecd7SYuval Mintz { 133708feecd7SYuval Mintz struct qed_filter_ucast filter; 133808feecd7SYuval Mintz int rc = 0; 133908feecd7SYuval Mintz int i; 134008feecd7SYuval Mintz 134108feecd7SYuval Mintz memset(&filter, 0, sizeof(filter)); 134208feecd7SYuval Mintz filter.is_rx_filter = 1; 134308feecd7SYuval Mintz filter.is_tx_filter = 1; 134408feecd7SYuval Mintz filter.vport_to_add_to = p_vf->vport_id; 134508feecd7SYuval Mintz filter.opcode = QED_FILTER_ADD; 134608feecd7SYuval Mintz 134708feecd7SYuval Mintz /* Reconfigure vlans */ 134808feecd7SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { 134908feecd7SYuval Mintz if (!p_vf->shadow_config.vlans[i].used) 135008feecd7SYuval Mintz continue; 135108feecd7SYuval Mintz 135208feecd7SYuval Mintz filter.type = QED_FILTER_VLAN; 135308feecd7SYuval Mintz filter.vlan = p_vf->shadow_config.vlans[i].vid; 135408feecd7SYuval Mintz DP_VERBOSE(p_hwfn, 135508feecd7SYuval Mintz QED_MSG_IOV, 135608feecd7SYuval Mintz "Reconfiguring VLAN [0x%04x] for VF [%04x]\n", 135708feecd7SYuval Mintz filter.vlan, p_vf->relative_vf_id); 135808feecd7SYuval Mintz rc = qed_sp_eth_filter_ucast(p_hwfn, 135908feecd7SYuval Mintz p_vf->opaque_fid, 136008feecd7SYuval Mintz &filter, 136108feecd7SYuval Mintz QED_SPQ_MODE_CB, NULL); 136208feecd7SYuval Mintz if (rc) { 136308feecd7SYuval Mintz DP_NOTICE(p_hwfn, 136408feecd7SYuval Mintz "Failed to configure VLAN [%04x] to VF [%04x]\n", 136508feecd7SYuval Mintz filter.vlan, p_vf->relative_vf_id); 136608feecd7SYuval Mintz break; 136708feecd7SYuval Mintz } 136808feecd7SYuval Mintz } 136908feecd7SYuval Mintz 137008feecd7SYuval Mintz return rc; 137108feecd7SYuval Mintz } 137208feecd7SYuval Mintz 137308feecd7SYuval Mintz static int 137408feecd7SYuval Mintz qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn, 137508feecd7SYuval Mintz struct qed_vf_info *p_vf, u64 events) 137608feecd7SYuval Mintz { 137708feecd7SYuval Mintz int rc = 0; 137808feecd7SYuval Mintz 137908feecd7SYuval Mintz if ((events & (1 << VLAN_ADDR_FORCED)) && 138008feecd7SYuval Mintz !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) 138108feecd7SYuval Mintz rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf); 138208feecd7SYuval Mintz 138308feecd7SYuval Mintz return rc; 138408feecd7SYuval Mintz } 138508feecd7SYuval Mintz 138608feecd7SYuval Mintz static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, 138708feecd7SYuval Mintz struct qed_vf_info *p_vf, u64 events) 138808feecd7SYuval Mintz { 138908feecd7SYuval Mintz int rc = 0; 139008feecd7SYuval Mintz struct qed_filter_ucast filter; 139108feecd7SYuval Mintz 139208feecd7SYuval Mintz if (!p_vf->vport_instance) 139308feecd7SYuval Mintz return -EINVAL; 139408feecd7SYuval Mintz 1395eff16960SYuval Mintz if (events & (1 << MAC_ADDR_FORCED)) { 1396eff16960SYuval Mintz /* Since there's no way [currently] of removing the MAC, 1397eff16960SYuval Mintz * we can always assume this means we need to force it. 1398eff16960SYuval Mintz */ 1399eff16960SYuval Mintz memset(&filter, 0, sizeof(filter)); 1400eff16960SYuval Mintz filter.type = QED_FILTER_MAC; 1401eff16960SYuval Mintz filter.opcode = QED_FILTER_REPLACE; 1402eff16960SYuval Mintz filter.is_rx_filter = 1; 1403eff16960SYuval Mintz filter.is_tx_filter = 1; 1404eff16960SYuval Mintz filter.vport_to_add_to = p_vf->vport_id; 1405eff16960SYuval Mintz ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac); 1406eff16960SYuval Mintz 1407eff16960SYuval Mintz rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, 1408eff16960SYuval Mintz &filter, QED_SPQ_MODE_CB, NULL); 1409eff16960SYuval Mintz if (rc) { 1410eff16960SYuval Mintz DP_NOTICE(p_hwfn, 1411eff16960SYuval Mintz "PF failed to configure MAC for VF\n"); 1412eff16960SYuval Mintz return rc; 1413eff16960SYuval Mintz } 1414eff16960SYuval Mintz 1415eff16960SYuval Mintz p_vf->configured_features |= 1 << MAC_ADDR_FORCED; 1416eff16960SYuval Mintz } 1417eff16960SYuval Mintz 141808feecd7SYuval Mintz if (events & (1 << VLAN_ADDR_FORCED)) { 141908feecd7SYuval Mintz struct qed_sp_vport_update_params vport_update; 142008feecd7SYuval Mintz u8 removal; 142108feecd7SYuval Mintz int i; 142208feecd7SYuval Mintz 142308feecd7SYuval Mintz memset(&filter, 0, sizeof(filter)); 142408feecd7SYuval Mintz filter.type = QED_FILTER_VLAN; 142508feecd7SYuval Mintz filter.is_rx_filter = 1; 142608feecd7SYuval Mintz filter.is_tx_filter = 1; 142708feecd7SYuval Mintz filter.vport_to_add_to = p_vf->vport_id; 142808feecd7SYuval Mintz filter.vlan = p_vf->bulletin.p_virt->pvid; 142908feecd7SYuval Mintz filter.opcode = filter.vlan ? QED_FILTER_REPLACE : 143008feecd7SYuval Mintz QED_FILTER_FLUSH; 143108feecd7SYuval Mintz 143208feecd7SYuval Mintz /* Send the ramrod */ 143308feecd7SYuval Mintz rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, 143408feecd7SYuval Mintz &filter, QED_SPQ_MODE_CB, NULL); 143508feecd7SYuval Mintz if (rc) { 143608feecd7SYuval Mintz DP_NOTICE(p_hwfn, 143708feecd7SYuval Mintz "PF failed to configure VLAN for VF\n"); 143808feecd7SYuval Mintz return rc; 143908feecd7SYuval Mintz } 144008feecd7SYuval Mintz 144108feecd7SYuval Mintz /* Update the default-vlan & silent vlan stripping */ 144208feecd7SYuval Mintz memset(&vport_update, 0, sizeof(vport_update)); 144308feecd7SYuval Mintz vport_update.opaque_fid = p_vf->opaque_fid; 144408feecd7SYuval Mintz vport_update.vport_id = p_vf->vport_id; 144508feecd7SYuval Mintz vport_update.update_default_vlan_enable_flg = 1; 144608feecd7SYuval Mintz vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0; 144708feecd7SYuval Mintz vport_update.update_default_vlan_flg = 1; 144808feecd7SYuval Mintz vport_update.default_vlan = filter.vlan; 144908feecd7SYuval Mintz 145008feecd7SYuval Mintz vport_update.update_inner_vlan_removal_flg = 1; 145108feecd7SYuval Mintz removal = filter.vlan ? 1 145208feecd7SYuval Mintz : p_vf->shadow_config.inner_vlan_removal; 145308feecd7SYuval Mintz vport_update.inner_vlan_removal_flg = removal; 145408feecd7SYuval Mintz vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0; 145508feecd7SYuval Mintz rc = qed_sp_vport_update(p_hwfn, 145608feecd7SYuval Mintz &vport_update, 145708feecd7SYuval Mintz QED_SPQ_MODE_EBLOCK, NULL); 145808feecd7SYuval Mintz if (rc) { 145908feecd7SYuval Mintz DP_NOTICE(p_hwfn, 146008feecd7SYuval Mintz "PF failed to configure VF vport for vlan\n"); 146108feecd7SYuval Mintz return rc; 146208feecd7SYuval Mintz } 146308feecd7SYuval Mintz 146408feecd7SYuval Mintz /* Update all the Rx queues */ 146508feecd7SYuval Mintz for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { 146608feecd7SYuval Mintz u16 qid; 146708feecd7SYuval Mintz 146808feecd7SYuval Mintz if (!p_vf->vf_queues[i].rxq_active) 146908feecd7SYuval Mintz continue; 147008feecd7SYuval Mintz 147108feecd7SYuval Mintz qid = p_vf->vf_queues[i].fw_rx_qid; 147208feecd7SYuval Mintz 147308feecd7SYuval Mintz rc = qed_sp_eth_rx_queues_update(p_hwfn, qid, 147408feecd7SYuval Mintz 1, 0, 1, 147508feecd7SYuval Mintz QED_SPQ_MODE_EBLOCK, 147608feecd7SYuval Mintz NULL); 147708feecd7SYuval Mintz if (rc) { 147808feecd7SYuval Mintz DP_NOTICE(p_hwfn, 147908feecd7SYuval Mintz "Failed to send Rx update fo queue[0x%04x]\n", 148008feecd7SYuval Mintz qid); 148108feecd7SYuval Mintz return rc; 148208feecd7SYuval Mintz } 148308feecd7SYuval Mintz } 148408feecd7SYuval Mintz 148508feecd7SYuval Mintz if (filter.vlan) 148608feecd7SYuval Mintz p_vf->configured_features |= 1 << VLAN_ADDR_FORCED; 148708feecd7SYuval Mintz else 148808feecd7SYuval Mintz p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED); 148908feecd7SYuval Mintz } 149008feecd7SYuval Mintz 149108feecd7SYuval Mintz /* If forced features are terminated, we need to configure the shadow 149208feecd7SYuval Mintz * configuration back again. 149308feecd7SYuval Mintz */ 149408feecd7SYuval Mintz if (events) 149508feecd7SYuval Mintz qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events); 149608feecd7SYuval Mintz 149708feecd7SYuval Mintz return rc; 149808feecd7SYuval Mintz } 149908feecd7SYuval Mintz 1500dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, 1501dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 1502dacd88d6SYuval Mintz struct qed_vf_info *vf) 1503dacd88d6SYuval Mintz { 1504dacd88d6SYuval Mintz struct qed_sp_vport_start_params params = { 0 }; 1505dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 1506dacd88d6SYuval Mintz struct vfpf_vport_start_tlv *start; 1507dacd88d6SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 1508dacd88d6SYuval Mintz struct qed_vf_info *vf_info; 150908feecd7SYuval Mintz u64 *p_bitmap; 1510dacd88d6SYuval Mintz int sb_id; 1511dacd88d6SYuval Mintz int rc; 1512dacd88d6SYuval Mintz 1513dacd88d6SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true); 1514dacd88d6SYuval Mintz if (!vf_info) { 1515dacd88d6SYuval Mintz DP_NOTICE(p_hwfn->cdev, 1516dacd88d6SYuval Mintz "Failed to get VF info, invalid vfid [%d]\n", 1517dacd88d6SYuval Mintz vf->relative_vf_id); 1518dacd88d6SYuval Mintz return; 1519dacd88d6SYuval Mintz } 1520dacd88d6SYuval Mintz 1521dacd88d6SYuval Mintz vf->state = VF_ENABLED; 1522dacd88d6SYuval Mintz start = &mbx->req_virt->start_vport; 1523dacd88d6SYuval Mintz 1524dacd88d6SYuval Mintz /* Initialize Status block in CAU */ 1525dacd88d6SYuval Mintz for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) { 1526dacd88d6SYuval Mintz if (!start->sb_addr[sb_id]) { 1527dacd88d6SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1528dacd88d6SYuval Mintz "VF[%d] did not fill the address of SB %d\n", 1529dacd88d6SYuval Mintz vf->relative_vf_id, sb_id); 1530dacd88d6SYuval Mintz break; 1531dacd88d6SYuval Mintz } 1532dacd88d6SYuval Mintz 1533dacd88d6SYuval Mintz qed_int_cau_conf_sb(p_hwfn, p_ptt, 1534dacd88d6SYuval Mintz start->sb_addr[sb_id], 1535dacd88d6SYuval Mintz vf->igu_sbs[sb_id], 1536dacd88d6SYuval Mintz vf->abs_vf_id, 1); 1537dacd88d6SYuval Mintz } 1538dacd88d6SYuval Mintz qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf); 1539dacd88d6SYuval Mintz 1540dacd88d6SYuval Mintz vf->mtu = start->mtu; 154108feecd7SYuval Mintz vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal; 154208feecd7SYuval Mintz 154308feecd7SYuval Mintz /* Take into consideration configuration forced by hypervisor; 154408feecd7SYuval Mintz * If none is configured, use the supplied VF values [for old 154508feecd7SYuval Mintz * vfs that would still be fine, since they passed '0' as padding]. 154608feecd7SYuval Mintz */ 154708feecd7SYuval Mintz p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap; 154808feecd7SYuval Mintz if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) { 154908feecd7SYuval Mintz u8 vf_req = start->only_untagged; 155008feecd7SYuval Mintz 155108feecd7SYuval Mintz vf_info->bulletin.p_virt->default_only_untagged = vf_req; 155208feecd7SYuval Mintz *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT; 155308feecd7SYuval Mintz } 1554dacd88d6SYuval Mintz 1555dacd88d6SYuval Mintz params.tpa_mode = start->tpa_mode; 1556dacd88d6SYuval Mintz params.remove_inner_vlan = start->inner_vlan_removal; 1557831bfb0eSYuval Mintz params.tx_switching = true; 1558dacd88d6SYuval Mintz 155908feecd7SYuval Mintz params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged; 1560dacd88d6SYuval Mintz params.drop_ttl0 = false; 1561dacd88d6SYuval Mintz params.concrete_fid = vf->concrete_fid; 1562dacd88d6SYuval Mintz params.opaque_fid = vf->opaque_fid; 1563dacd88d6SYuval Mintz params.vport_id = vf->vport_id; 1564dacd88d6SYuval Mintz params.max_buffers_per_cqe = start->max_buffers_per_cqe; 1565dacd88d6SYuval Mintz params.mtu = vf->mtu; 1566dacd88d6SYuval Mintz 1567dacd88d6SYuval Mintz rc = qed_sp_eth_vport_start(p_hwfn, ¶ms); 1568dacd88d6SYuval Mintz if (rc != 0) { 1569dacd88d6SYuval Mintz DP_ERR(p_hwfn, 1570dacd88d6SYuval Mintz "qed_iov_vf_mbx_start_vport returned error %d\n", rc); 1571dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 1572dacd88d6SYuval Mintz } else { 1573dacd88d6SYuval Mintz vf->vport_instance++; 157408feecd7SYuval Mintz 157508feecd7SYuval Mintz /* Force configuration if needed on the newly opened vport */ 157608feecd7SYuval Mintz qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap); 15776ddc7608SYuval Mintz 15786ddc7608SYuval Mintz __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val); 1579dacd88d6SYuval Mintz } 1580dacd88d6SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START, 1581dacd88d6SYuval Mintz sizeof(struct pfvf_def_resp_tlv), status); 1582dacd88d6SYuval Mintz } 1583dacd88d6SYuval Mintz 1584dacd88d6SYuval Mintz static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, 1585dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 1586dacd88d6SYuval Mintz struct qed_vf_info *vf) 1587dacd88d6SYuval Mintz { 1588dacd88d6SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 1589dacd88d6SYuval Mintz int rc; 1590dacd88d6SYuval Mintz 1591dacd88d6SYuval Mintz vf->vport_instance--; 15926ddc7608SYuval Mintz vf->spoof_chk = false; 1593dacd88d6SYuval Mintz 1594dacd88d6SYuval Mintz rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); 1595dacd88d6SYuval Mintz if (rc != 0) { 1596dacd88d6SYuval Mintz DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n", 1597dacd88d6SYuval Mintz rc); 1598dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 1599dacd88d6SYuval Mintz } 1600dacd88d6SYuval Mintz 160108feecd7SYuval Mintz /* Forget the configuration on the vport */ 160208feecd7SYuval Mintz vf->configured_features = 0; 160308feecd7SYuval Mintz memset(&vf->shadow_config, 0, sizeof(vf->shadow_config)); 160408feecd7SYuval Mintz 1605dacd88d6SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN, 1606dacd88d6SYuval Mintz sizeof(struct pfvf_def_resp_tlv), status); 1607dacd88d6SYuval Mintz } 1608dacd88d6SYuval Mintz 1609dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, 1610dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 1611dacd88d6SYuval Mintz struct qed_vf_info *vf, u8 status) 1612dacd88d6SYuval Mintz { 1613dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 1614dacd88d6SYuval Mintz struct pfvf_start_queue_resp_tlv *p_tlv; 1615dacd88d6SYuval Mintz struct vfpf_start_rxq_tlv *req; 1616dacd88d6SYuval Mintz 1617dacd88d6SYuval Mintz mbx->offset = (u8 *)mbx->reply_virt; 1618dacd88d6SYuval Mintz 1619dacd88d6SYuval Mintz p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ, 1620dacd88d6SYuval Mintz sizeof(*p_tlv)); 1621dacd88d6SYuval Mintz qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 1622dacd88d6SYuval Mintz sizeof(struct channel_list_end_tlv)); 1623dacd88d6SYuval Mintz 1624dacd88d6SYuval Mintz /* Update the TLV with the response */ 1625dacd88d6SYuval Mintz if (status == PFVF_STATUS_SUCCESS) { 1626dacd88d6SYuval Mintz req = &mbx->req_virt->start_rxq; 1627351a4dedSYuval Mintz p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B + 1628351a4dedSYuval Mintz offsetof(struct mstorm_vf_zone, 1629351a4dedSYuval Mintz non_trigger.eth_rx_queue_producers) + 1630351a4dedSYuval Mintz sizeof(struct eth_rx_prod_data) * req->rx_qid; 1631dacd88d6SYuval Mintz } 1632dacd88d6SYuval Mintz 1633dacd88d6SYuval Mintz qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status); 1634dacd88d6SYuval Mintz } 1635dacd88d6SYuval Mintz 1636dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, 1637dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 1638dacd88d6SYuval Mintz struct qed_vf_info *vf) 1639dacd88d6SYuval Mintz { 1640dacd88d6SYuval Mintz struct qed_queue_start_common_params params; 1641dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 1642dacd88d6SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 1643dacd88d6SYuval Mintz struct vfpf_start_rxq_tlv *req; 1644dacd88d6SYuval Mintz int rc; 1645dacd88d6SYuval Mintz 1646dacd88d6SYuval Mintz memset(¶ms, 0, sizeof(params)); 1647dacd88d6SYuval Mintz req = &mbx->req_virt->start_rxq; 1648dacd88d6SYuval Mintz params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid; 1649351a4dedSYuval Mintz params.vf_qid = req->rx_qid; 1650dacd88d6SYuval Mintz params.vport_id = vf->vport_id; 1651dacd88d6SYuval Mintz params.sb = req->hw_sb; 1652dacd88d6SYuval Mintz params.sb_idx = req->sb_index; 1653dacd88d6SYuval Mintz 1654dacd88d6SYuval Mintz rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid, 1655dacd88d6SYuval Mintz vf->vf_queues[req->rx_qid].fw_cid, 1656dacd88d6SYuval Mintz ¶ms, 1657dacd88d6SYuval Mintz vf->abs_vf_id + 0x10, 1658dacd88d6SYuval Mintz req->bd_max_bytes, 1659dacd88d6SYuval Mintz req->rxq_addr, 1660dacd88d6SYuval Mintz req->cqe_pbl_addr, req->cqe_pbl_size); 1661dacd88d6SYuval Mintz 1662dacd88d6SYuval Mintz if (rc) { 1663dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 1664dacd88d6SYuval Mintz } else { 1665dacd88d6SYuval Mintz vf->vf_queues[req->rx_qid].rxq_active = true; 1666dacd88d6SYuval Mintz vf->num_active_rxqs++; 1667dacd88d6SYuval Mintz } 1668dacd88d6SYuval Mintz 1669dacd88d6SYuval Mintz qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status); 1670dacd88d6SYuval Mintz } 1671dacd88d6SYuval Mintz 1672dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, 1673dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 1674dacd88d6SYuval Mintz struct qed_vf_info *vf) 1675dacd88d6SYuval Mintz { 1676dacd88d6SYuval Mintz u16 length = sizeof(struct pfvf_def_resp_tlv); 1677dacd88d6SYuval Mintz struct qed_queue_start_common_params params; 1678dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 1679dacd88d6SYuval Mintz union qed_qm_pq_params pq_params; 1680dacd88d6SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 1681dacd88d6SYuval Mintz struct vfpf_start_txq_tlv *req; 1682dacd88d6SYuval Mintz int rc; 1683dacd88d6SYuval Mintz 1684dacd88d6SYuval Mintz /* Prepare the parameters which would choose the right PQ */ 1685dacd88d6SYuval Mintz memset(&pq_params, 0, sizeof(pq_params)); 1686dacd88d6SYuval Mintz pq_params.eth.is_vf = 1; 1687dacd88d6SYuval Mintz pq_params.eth.vf_id = vf->relative_vf_id; 1688dacd88d6SYuval Mintz 1689dacd88d6SYuval Mintz memset(¶ms, 0, sizeof(params)); 1690dacd88d6SYuval Mintz req = &mbx->req_virt->start_txq; 1691dacd88d6SYuval Mintz params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid; 1692dacd88d6SYuval Mintz params.vport_id = vf->vport_id; 1693dacd88d6SYuval Mintz params.sb = req->hw_sb; 1694dacd88d6SYuval Mintz params.sb_idx = req->sb_index; 1695dacd88d6SYuval Mintz 1696dacd88d6SYuval Mintz rc = qed_sp_eth_txq_start_ramrod(p_hwfn, 1697dacd88d6SYuval Mintz vf->opaque_fid, 1698dacd88d6SYuval Mintz vf->vf_queues[req->tx_qid].fw_cid, 1699dacd88d6SYuval Mintz ¶ms, 1700dacd88d6SYuval Mintz vf->abs_vf_id + 0x10, 1701dacd88d6SYuval Mintz req->pbl_addr, 1702dacd88d6SYuval Mintz req->pbl_size, &pq_params); 1703dacd88d6SYuval Mintz 1704dacd88d6SYuval Mintz if (rc) 1705dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 1706dacd88d6SYuval Mintz else 1707dacd88d6SYuval Mintz vf->vf_queues[req->tx_qid].txq_active = true; 1708dacd88d6SYuval Mintz 1709dacd88d6SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ, 1710dacd88d6SYuval Mintz length, status); 1711dacd88d6SYuval Mintz } 1712dacd88d6SYuval Mintz 1713dacd88d6SYuval Mintz static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, 1714dacd88d6SYuval Mintz struct qed_vf_info *vf, 1715dacd88d6SYuval Mintz u16 rxq_id, u8 num_rxqs, bool cqe_completion) 1716dacd88d6SYuval Mintz { 1717dacd88d6SYuval Mintz int rc = 0; 1718dacd88d6SYuval Mintz int qid; 1719dacd88d6SYuval Mintz 1720dacd88d6SYuval Mintz if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues)) 1721dacd88d6SYuval Mintz return -EINVAL; 1722dacd88d6SYuval Mintz 1723dacd88d6SYuval Mintz for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) { 1724dacd88d6SYuval Mintz if (vf->vf_queues[qid].rxq_active) { 1725dacd88d6SYuval Mintz rc = qed_sp_eth_rx_queue_stop(p_hwfn, 1726dacd88d6SYuval Mintz vf->vf_queues[qid]. 1727dacd88d6SYuval Mintz fw_rx_qid, false, 1728dacd88d6SYuval Mintz cqe_completion); 1729dacd88d6SYuval Mintz 1730dacd88d6SYuval Mintz if (rc) 1731dacd88d6SYuval Mintz return rc; 1732dacd88d6SYuval Mintz } 1733dacd88d6SYuval Mintz vf->vf_queues[qid].rxq_active = false; 1734dacd88d6SYuval Mintz vf->num_active_rxqs--; 1735dacd88d6SYuval Mintz } 1736dacd88d6SYuval Mintz 1737dacd88d6SYuval Mintz return rc; 1738dacd88d6SYuval Mintz } 1739dacd88d6SYuval Mintz 1740dacd88d6SYuval Mintz static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn, 1741dacd88d6SYuval Mintz struct qed_vf_info *vf, u16 txq_id, u8 num_txqs) 1742dacd88d6SYuval Mintz { 1743dacd88d6SYuval Mintz int rc = 0; 1744dacd88d6SYuval Mintz int qid; 1745dacd88d6SYuval Mintz 1746dacd88d6SYuval Mintz if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues)) 1747dacd88d6SYuval Mintz return -EINVAL; 1748dacd88d6SYuval Mintz 1749dacd88d6SYuval Mintz for (qid = txq_id; qid < txq_id + num_txqs; qid++) { 1750dacd88d6SYuval Mintz if (vf->vf_queues[qid].txq_active) { 1751dacd88d6SYuval Mintz rc = qed_sp_eth_tx_queue_stop(p_hwfn, 1752dacd88d6SYuval Mintz vf->vf_queues[qid]. 1753dacd88d6SYuval Mintz fw_tx_qid); 1754dacd88d6SYuval Mintz 1755dacd88d6SYuval Mintz if (rc) 1756dacd88d6SYuval Mintz return rc; 1757dacd88d6SYuval Mintz } 1758dacd88d6SYuval Mintz vf->vf_queues[qid].txq_active = false; 1759dacd88d6SYuval Mintz } 1760dacd88d6SYuval Mintz return rc; 1761dacd88d6SYuval Mintz } 1762dacd88d6SYuval Mintz 1763dacd88d6SYuval Mintz static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn, 1764dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 1765dacd88d6SYuval Mintz struct qed_vf_info *vf) 1766dacd88d6SYuval Mintz { 1767dacd88d6SYuval Mintz u16 length = sizeof(struct pfvf_def_resp_tlv); 1768dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 1769dacd88d6SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 1770dacd88d6SYuval Mintz struct vfpf_stop_rxqs_tlv *req; 1771dacd88d6SYuval Mintz int rc; 1772dacd88d6SYuval Mintz 1773dacd88d6SYuval Mintz /* We give the option of starting from qid != 0, in this case we 1774dacd88d6SYuval Mintz * need to make sure that qid + num_qs doesn't exceed the actual 1775dacd88d6SYuval Mintz * amount of queues that exist. 1776dacd88d6SYuval Mintz */ 1777dacd88d6SYuval Mintz req = &mbx->req_virt->stop_rxqs; 1778dacd88d6SYuval Mintz rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid, 1779dacd88d6SYuval Mintz req->num_rxqs, req->cqe_completion); 1780dacd88d6SYuval Mintz if (rc) 1781dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 1782dacd88d6SYuval Mintz 1783dacd88d6SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS, 1784dacd88d6SYuval Mintz length, status); 1785dacd88d6SYuval Mintz } 1786dacd88d6SYuval Mintz 1787dacd88d6SYuval Mintz static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn, 1788dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 1789dacd88d6SYuval Mintz struct qed_vf_info *vf) 1790dacd88d6SYuval Mintz { 1791dacd88d6SYuval Mintz u16 length = sizeof(struct pfvf_def_resp_tlv); 1792dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 1793dacd88d6SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 1794dacd88d6SYuval Mintz struct vfpf_stop_txqs_tlv *req; 1795dacd88d6SYuval Mintz int rc; 1796dacd88d6SYuval Mintz 1797dacd88d6SYuval Mintz /* We give the option of starting from qid != 0, in this case we 1798dacd88d6SYuval Mintz * need to make sure that qid + num_qs doesn't exceed the actual 1799dacd88d6SYuval Mintz * amount of queues that exist. 1800dacd88d6SYuval Mintz */ 1801dacd88d6SYuval Mintz req = &mbx->req_virt->stop_txqs; 1802dacd88d6SYuval Mintz rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs); 1803dacd88d6SYuval Mintz if (rc) 1804dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 1805dacd88d6SYuval Mintz 1806dacd88d6SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS, 1807dacd88d6SYuval Mintz length, status); 1808dacd88d6SYuval Mintz } 1809dacd88d6SYuval Mintz 181017b235c1SYuval Mintz static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, 181117b235c1SYuval Mintz struct qed_ptt *p_ptt, 181217b235c1SYuval Mintz struct qed_vf_info *vf) 181317b235c1SYuval Mintz { 181417b235c1SYuval Mintz u16 length = sizeof(struct pfvf_def_resp_tlv); 181517b235c1SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 181617b235c1SYuval Mintz struct vfpf_update_rxq_tlv *req; 181717b235c1SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 181817b235c1SYuval Mintz u8 complete_event_flg; 181917b235c1SYuval Mintz u8 complete_cqe_flg; 182017b235c1SYuval Mintz u16 qid; 182117b235c1SYuval Mintz int rc; 182217b235c1SYuval Mintz u8 i; 182317b235c1SYuval Mintz 182417b235c1SYuval Mintz req = &mbx->req_virt->update_rxq; 182517b235c1SYuval Mintz complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG); 182617b235c1SYuval Mintz complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); 182717b235c1SYuval Mintz 182817b235c1SYuval Mintz for (i = 0; i < req->num_rxqs; i++) { 182917b235c1SYuval Mintz qid = req->rx_qid + i; 183017b235c1SYuval Mintz 183117b235c1SYuval Mintz if (!vf->vf_queues[qid].rxq_active) { 183217b235c1SYuval Mintz DP_NOTICE(p_hwfn, "VF rx_qid = %d isn`t active!\n", 183317b235c1SYuval Mintz qid); 183417b235c1SYuval Mintz status = PFVF_STATUS_FAILURE; 183517b235c1SYuval Mintz break; 183617b235c1SYuval Mintz } 183717b235c1SYuval Mintz 183817b235c1SYuval Mintz rc = qed_sp_eth_rx_queues_update(p_hwfn, 183917b235c1SYuval Mintz vf->vf_queues[qid].fw_rx_qid, 184017b235c1SYuval Mintz 1, 184117b235c1SYuval Mintz complete_cqe_flg, 184217b235c1SYuval Mintz complete_event_flg, 184317b235c1SYuval Mintz QED_SPQ_MODE_EBLOCK, NULL); 184417b235c1SYuval Mintz 184517b235c1SYuval Mintz if (rc) { 184617b235c1SYuval Mintz status = PFVF_STATUS_FAILURE; 184717b235c1SYuval Mintz break; 184817b235c1SYuval Mintz } 184917b235c1SYuval Mintz } 185017b235c1SYuval Mintz 185117b235c1SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ, 185217b235c1SYuval Mintz length, status); 185317b235c1SYuval Mintz } 185417b235c1SYuval Mintz 1855dacd88d6SYuval Mintz void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, 1856dacd88d6SYuval Mintz void *p_tlvs_list, u16 req_type) 1857dacd88d6SYuval Mintz { 1858dacd88d6SYuval Mintz struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list; 1859dacd88d6SYuval Mintz int len = 0; 1860dacd88d6SYuval Mintz 1861dacd88d6SYuval Mintz do { 1862dacd88d6SYuval Mintz if (!p_tlv->length) { 1863dacd88d6SYuval Mintz DP_NOTICE(p_hwfn, "Zero length TLV found\n"); 1864dacd88d6SYuval Mintz return NULL; 1865dacd88d6SYuval Mintz } 1866dacd88d6SYuval Mintz 1867dacd88d6SYuval Mintz if (p_tlv->type == req_type) { 1868dacd88d6SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1869dacd88d6SYuval Mintz "Extended tlv type %d, length %d found\n", 1870dacd88d6SYuval Mintz p_tlv->type, p_tlv->length); 1871dacd88d6SYuval Mintz return p_tlv; 1872dacd88d6SYuval Mintz } 1873dacd88d6SYuval Mintz 1874dacd88d6SYuval Mintz len += p_tlv->length; 1875dacd88d6SYuval Mintz p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length); 1876dacd88d6SYuval Mintz 1877dacd88d6SYuval Mintz if ((len + p_tlv->length) > TLV_BUFFER_SIZE) { 1878dacd88d6SYuval Mintz DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n"); 1879dacd88d6SYuval Mintz return NULL; 1880dacd88d6SYuval Mintz } 1881dacd88d6SYuval Mintz } while (p_tlv->type != CHANNEL_TLV_LIST_END); 1882dacd88d6SYuval Mintz 1883dacd88d6SYuval Mintz return NULL; 1884dacd88d6SYuval Mintz } 1885dacd88d6SYuval Mintz 1886dacd88d6SYuval Mintz static void 1887dacd88d6SYuval Mintz qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn, 1888dacd88d6SYuval Mintz struct qed_sp_vport_update_params *p_data, 1889dacd88d6SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 1890dacd88d6SYuval Mintz { 1891dacd88d6SYuval Mintz struct vfpf_vport_update_activate_tlv *p_act_tlv; 1892dacd88d6SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 1893dacd88d6SYuval Mintz 1894dacd88d6SYuval Mintz p_act_tlv = (struct vfpf_vport_update_activate_tlv *) 1895dacd88d6SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 1896dacd88d6SYuval Mintz if (!p_act_tlv) 1897dacd88d6SYuval Mintz return; 1898dacd88d6SYuval Mintz 1899dacd88d6SYuval Mintz p_data->update_vport_active_rx_flg = p_act_tlv->update_rx; 1900dacd88d6SYuval Mintz p_data->vport_active_rx_flg = p_act_tlv->active_rx; 1901dacd88d6SYuval Mintz p_data->update_vport_active_tx_flg = p_act_tlv->update_tx; 1902dacd88d6SYuval Mintz p_data->vport_active_tx_flg = p_act_tlv->active_tx; 1903dacd88d6SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE; 1904dacd88d6SYuval Mintz } 1905dacd88d6SYuval Mintz 1906dacd88d6SYuval Mintz static void 190717b235c1SYuval Mintz qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn, 190817b235c1SYuval Mintz struct qed_sp_vport_update_params *p_data, 190917b235c1SYuval Mintz struct qed_vf_info *p_vf, 191017b235c1SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 191117b235c1SYuval Mintz { 191217b235c1SYuval Mintz struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv; 191317b235c1SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; 191417b235c1SYuval Mintz 191517b235c1SYuval Mintz p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *) 191617b235c1SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 191717b235c1SYuval Mintz if (!p_vlan_tlv) 191817b235c1SYuval Mintz return; 191917b235c1SYuval Mintz 192008feecd7SYuval Mintz p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan; 192108feecd7SYuval Mintz 192208feecd7SYuval Mintz /* Ignore the VF request if we're forcing a vlan */ 192308feecd7SYuval Mintz if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) { 192417b235c1SYuval Mintz p_data->update_inner_vlan_removal_flg = 1; 192517b235c1SYuval Mintz p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan; 192608feecd7SYuval Mintz } 192717b235c1SYuval Mintz 192817b235c1SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP; 192917b235c1SYuval Mintz } 193017b235c1SYuval Mintz 193117b235c1SYuval Mintz static void 193217b235c1SYuval Mintz qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn, 193317b235c1SYuval Mintz struct qed_sp_vport_update_params *p_data, 193417b235c1SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 193517b235c1SYuval Mintz { 193617b235c1SYuval Mintz struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; 193717b235c1SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 193817b235c1SYuval Mintz 193917b235c1SYuval Mintz p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *) 194017b235c1SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 194117b235c1SYuval Mintz tlv); 194217b235c1SYuval Mintz if (!p_tx_switch_tlv) 194317b235c1SYuval Mintz return; 194417b235c1SYuval Mintz 194517b235c1SYuval Mintz p_data->update_tx_switching_flg = 1; 194617b235c1SYuval Mintz p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching; 194717b235c1SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH; 194817b235c1SYuval Mintz } 194917b235c1SYuval Mintz 195017b235c1SYuval Mintz static void 1951dacd88d6SYuval Mintz qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn, 1952dacd88d6SYuval Mintz struct qed_sp_vport_update_params *p_data, 1953dacd88d6SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 1954dacd88d6SYuval Mintz { 1955dacd88d6SYuval Mintz struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; 1956dacd88d6SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST; 1957dacd88d6SYuval Mintz 1958dacd88d6SYuval Mintz p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *) 1959dacd88d6SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 1960dacd88d6SYuval Mintz if (!p_mcast_tlv) 1961dacd88d6SYuval Mintz return; 1962dacd88d6SYuval Mintz 1963dacd88d6SYuval Mintz p_data->update_approx_mcast_flg = 1; 1964dacd88d6SYuval Mintz memcpy(p_data->bins, p_mcast_tlv->bins, 1965dacd88d6SYuval Mintz sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); 1966dacd88d6SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST; 1967dacd88d6SYuval Mintz } 1968dacd88d6SYuval Mintz 1969dacd88d6SYuval Mintz static void 1970dacd88d6SYuval Mintz qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn, 1971dacd88d6SYuval Mintz struct qed_sp_vport_update_params *p_data, 1972dacd88d6SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 1973dacd88d6SYuval Mintz { 1974dacd88d6SYuval Mintz struct qed_filter_accept_flags *p_flags = &p_data->accept_flags; 1975dacd88d6SYuval Mintz struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; 1976dacd88d6SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 1977dacd88d6SYuval Mintz 1978dacd88d6SYuval Mintz p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *) 1979dacd88d6SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 1980dacd88d6SYuval Mintz if (!p_accept_tlv) 1981dacd88d6SYuval Mintz return; 1982dacd88d6SYuval Mintz 1983dacd88d6SYuval Mintz p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode; 1984dacd88d6SYuval Mintz p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter; 1985dacd88d6SYuval Mintz p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode; 1986dacd88d6SYuval Mintz p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter; 1987dacd88d6SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM; 1988dacd88d6SYuval Mintz } 1989dacd88d6SYuval Mintz 1990dacd88d6SYuval Mintz static void 199117b235c1SYuval Mintz qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn, 199217b235c1SYuval Mintz struct qed_sp_vport_update_params *p_data, 199317b235c1SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 199417b235c1SYuval Mintz { 199517b235c1SYuval Mintz struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan; 199617b235c1SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 199717b235c1SYuval Mintz 199817b235c1SYuval Mintz p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *) 199917b235c1SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 200017b235c1SYuval Mintz tlv); 200117b235c1SYuval Mintz if (!p_accept_any_vlan) 200217b235c1SYuval Mintz return; 200317b235c1SYuval Mintz 200417b235c1SYuval Mintz p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan; 200517b235c1SYuval Mintz p_data->update_accept_any_vlan_flg = 200617b235c1SYuval Mintz p_accept_any_vlan->update_accept_any_vlan_flg; 200717b235c1SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN; 200817b235c1SYuval Mintz } 200917b235c1SYuval Mintz 201017b235c1SYuval Mintz static void 2011dacd88d6SYuval Mintz qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, 2012dacd88d6SYuval Mintz struct qed_vf_info *vf, 2013dacd88d6SYuval Mintz struct qed_sp_vport_update_params *p_data, 2014dacd88d6SYuval Mintz struct qed_rss_params *p_rss, 2015dacd88d6SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2016dacd88d6SYuval Mintz { 2017dacd88d6SYuval Mintz struct vfpf_vport_update_rss_tlv *p_rss_tlv; 2018dacd88d6SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS; 2019dacd88d6SYuval Mintz u16 i, q_idx, max_q_idx; 2020dacd88d6SYuval Mintz u16 table_size; 2021dacd88d6SYuval Mintz 2022dacd88d6SYuval Mintz p_rss_tlv = (struct vfpf_vport_update_rss_tlv *) 2023dacd88d6SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2024dacd88d6SYuval Mintz if (!p_rss_tlv) { 2025dacd88d6SYuval Mintz p_data->rss_params = NULL; 2026dacd88d6SYuval Mintz return; 2027dacd88d6SYuval Mintz } 2028dacd88d6SYuval Mintz 2029dacd88d6SYuval Mintz memset(p_rss, 0, sizeof(struct qed_rss_params)); 2030dacd88d6SYuval Mintz 2031dacd88d6SYuval Mintz p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags & 2032dacd88d6SYuval Mintz VFPF_UPDATE_RSS_CONFIG_FLAG); 2033dacd88d6SYuval Mintz p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags & 2034dacd88d6SYuval Mintz VFPF_UPDATE_RSS_CAPS_FLAG); 2035dacd88d6SYuval Mintz p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags & 2036dacd88d6SYuval Mintz VFPF_UPDATE_RSS_IND_TABLE_FLAG); 2037dacd88d6SYuval Mintz p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags & 2038dacd88d6SYuval Mintz VFPF_UPDATE_RSS_KEY_FLAG); 2039dacd88d6SYuval Mintz 2040dacd88d6SYuval Mintz p_rss->rss_enable = p_rss_tlv->rss_enable; 2041dacd88d6SYuval Mintz p_rss->rss_eng_id = vf->relative_vf_id + 1; 2042dacd88d6SYuval Mintz p_rss->rss_caps = p_rss_tlv->rss_caps; 2043dacd88d6SYuval Mintz p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log; 2044dacd88d6SYuval Mintz memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table, 2045dacd88d6SYuval Mintz sizeof(p_rss->rss_ind_table)); 2046dacd88d6SYuval Mintz memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key)); 2047dacd88d6SYuval Mintz 2048dacd88d6SYuval Mintz table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table), 2049dacd88d6SYuval Mintz (1 << p_rss_tlv->rss_table_size_log)); 2050dacd88d6SYuval Mintz 2051dacd88d6SYuval Mintz max_q_idx = ARRAY_SIZE(vf->vf_queues); 2052dacd88d6SYuval Mintz 2053dacd88d6SYuval Mintz for (i = 0; i < table_size; i++) { 2054dacd88d6SYuval Mintz u16 index = vf->vf_queues[0].fw_rx_qid; 2055dacd88d6SYuval Mintz 2056dacd88d6SYuval Mintz q_idx = p_rss->rss_ind_table[i]; 2057dacd88d6SYuval Mintz if (q_idx >= max_q_idx) 2058dacd88d6SYuval Mintz DP_NOTICE(p_hwfn, 2059dacd88d6SYuval Mintz "rss_ind_table[%d] = %d, rxq is out of range\n", 2060dacd88d6SYuval Mintz i, q_idx); 2061dacd88d6SYuval Mintz else if (!vf->vf_queues[q_idx].rxq_active) 2062dacd88d6SYuval Mintz DP_NOTICE(p_hwfn, 2063dacd88d6SYuval Mintz "rss_ind_table[%d] = %d, rxq is not active\n", 2064dacd88d6SYuval Mintz i, q_idx); 2065dacd88d6SYuval Mintz else 2066dacd88d6SYuval Mintz index = vf->vf_queues[q_idx].fw_rx_qid; 2067dacd88d6SYuval Mintz p_rss->rss_ind_table[i] = index; 2068dacd88d6SYuval Mintz } 2069dacd88d6SYuval Mintz 2070dacd88d6SYuval Mintz p_data->rss_params = p_rss; 2071dacd88d6SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS; 2072dacd88d6SYuval Mintz } 2073dacd88d6SYuval Mintz 207417b235c1SYuval Mintz static void 207517b235c1SYuval Mintz qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn, 207617b235c1SYuval Mintz struct qed_vf_info *vf, 207717b235c1SYuval Mintz struct qed_sp_vport_update_params *p_data, 207817b235c1SYuval Mintz struct qed_sge_tpa_params *p_sge_tpa, 207917b235c1SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 208017b235c1SYuval Mintz { 208117b235c1SYuval Mintz struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv; 208217b235c1SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; 208317b235c1SYuval Mintz 208417b235c1SYuval Mintz p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *) 208517b235c1SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 208617b235c1SYuval Mintz 208717b235c1SYuval Mintz if (!p_sge_tpa_tlv) { 208817b235c1SYuval Mintz p_data->sge_tpa_params = NULL; 208917b235c1SYuval Mintz return; 209017b235c1SYuval Mintz } 209117b235c1SYuval Mintz 209217b235c1SYuval Mintz memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params)); 209317b235c1SYuval Mintz 209417b235c1SYuval Mintz p_sge_tpa->update_tpa_en_flg = 209517b235c1SYuval Mintz !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG); 209617b235c1SYuval Mintz p_sge_tpa->update_tpa_param_flg = 209717b235c1SYuval Mintz !!(p_sge_tpa_tlv->update_sge_tpa_flags & 209817b235c1SYuval Mintz VFPF_UPDATE_TPA_PARAM_FLAG); 209917b235c1SYuval Mintz 210017b235c1SYuval Mintz p_sge_tpa->tpa_ipv4_en_flg = 210117b235c1SYuval Mintz !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG); 210217b235c1SYuval Mintz p_sge_tpa->tpa_ipv6_en_flg = 210317b235c1SYuval Mintz !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG); 210417b235c1SYuval Mintz p_sge_tpa->tpa_pkt_split_flg = 210517b235c1SYuval Mintz !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG); 210617b235c1SYuval Mintz p_sge_tpa->tpa_hdr_data_split_flg = 210717b235c1SYuval Mintz !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG); 210817b235c1SYuval Mintz p_sge_tpa->tpa_gro_consistent_flg = 210917b235c1SYuval Mintz !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG); 211017b235c1SYuval Mintz 211117b235c1SYuval Mintz p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num; 211217b235c1SYuval Mintz p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size; 211317b235c1SYuval Mintz p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start; 211417b235c1SYuval Mintz p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont; 211517b235c1SYuval Mintz p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe; 211617b235c1SYuval Mintz 211717b235c1SYuval Mintz p_data->sge_tpa_params = p_sge_tpa; 211817b235c1SYuval Mintz 211917b235c1SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA; 212017b235c1SYuval Mintz } 212117b235c1SYuval Mintz 2122dacd88d6SYuval Mintz static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, 2123dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 2124dacd88d6SYuval Mintz struct qed_vf_info *vf) 2125dacd88d6SYuval Mintz { 2126dacd88d6SYuval Mintz struct qed_sp_vport_update_params params; 2127dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 212817b235c1SYuval Mintz struct qed_sge_tpa_params sge_tpa_params; 2129dacd88d6SYuval Mintz struct qed_rss_params rss_params; 2130dacd88d6SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 2131dacd88d6SYuval Mintz u16 tlvs_mask = 0; 2132dacd88d6SYuval Mintz u16 length; 2133dacd88d6SYuval Mintz int rc; 2134dacd88d6SYuval Mintz 2135dacd88d6SYuval Mintz memset(¶ms, 0, sizeof(params)); 2136dacd88d6SYuval Mintz params.opaque_fid = vf->opaque_fid; 2137dacd88d6SYuval Mintz params.vport_id = vf->vport_id; 2138dacd88d6SYuval Mintz params.rss_params = NULL; 2139dacd88d6SYuval Mintz 2140dacd88d6SYuval Mintz /* Search for extended tlvs list and update values 2141dacd88d6SYuval Mintz * from VF in struct qed_sp_vport_update_params. 2142dacd88d6SYuval Mintz */ 2143dacd88d6SYuval Mintz qed_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask); 214417b235c1SYuval Mintz qed_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask); 214517b235c1SYuval Mintz qed_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask); 2146dacd88d6SYuval Mintz qed_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask); 2147dacd88d6SYuval Mintz qed_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask); 2148dacd88d6SYuval Mintz qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, &rss_params, 2149dacd88d6SYuval Mintz mbx, &tlvs_mask); 215017b235c1SYuval Mintz qed_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask); 215117b235c1SYuval Mintz qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms, 215217b235c1SYuval Mintz &sge_tpa_params, mbx, &tlvs_mask); 2153dacd88d6SYuval Mintz 2154dacd88d6SYuval Mintz /* Just log a message if there is no single extended tlv in buffer. 2155dacd88d6SYuval Mintz * When all features of vport update ramrod would be requested by VF 2156dacd88d6SYuval Mintz * as extended TLVs in buffer then an error can be returned in response 2157dacd88d6SYuval Mintz * if there is no extended TLV present in buffer. 2158dacd88d6SYuval Mintz */ 2159dacd88d6SYuval Mintz if (!tlvs_mask) { 2160dacd88d6SYuval Mintz DP_NOTICE(p_hwfn, 2161dacd88d6SYuval Mintz "No feature tlvs found for vport update\n"); 2162dacd88d6SYuval Mintz status = PFVF_STATUS_NOT_SUPPORTED; 2163dacd88d6SYuval Mintz goto out; 2164dacd88d6SYuval Mintz } 2165dacd88d6SYuval Mintz 2166dacd88d6SYuval Mintz rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); 2167dacd88d6SYuval Mintz 2168dacd88d6SYuval Mintz if (rc) 2169dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 2170dacd88d6SYuval Mintz 2171dacd88d6SYuval Mintz out: 2172dacd88d6SYuval Mintz length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status, 2173dacd88d6SYuval Mintz tlvs_mask, tlvs_mask); 2174dacd88d6SYuval Mintz qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); 2175dacd88d6SYuval Mintz } 2176dacd88d6SYuval Mintz 217708feecd7SYuval Mintz static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn, 217808feecd7SYuval Mintz struct qed_vf_info *p_vf, 217908feecd7SYuval Mintz struct qed_filter_ucast *p_params) 218008feecd7SYuval Mintz { 218108feecd7SYuval Mintz int i; 218208feecd7SYuval Mintz 218308feecd7SYuval Mintz if (p_params->type == QED_FILTER_MAC) 218408feecd7SYuval Mintz return 0; 218508feecd7SYuval Mintz 218608feecd7SYuval Mintz /* First remove entries and then add new ones */ 218708feecd7SYuval Mintz if (p_params->opcode == QED_FILTER_REMOVE) { 218808feecd7SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) 218908feecd7SYuval Mintz if (p_vf->shadow_config.vlans[i].used && 219008feecd7SYuval Mintz p_vf->shadow_config.vlans[i].vid == 219108feecd7SYuval Mintz p_params->vlan) { 219208feecd7SYuval Mintz p_vf->shadow_config.vlans[i].used = false; 219308feecd7SYuval Mintz break; 219408feecd7SYuval Mintz } 219508feecd7SYuval Mintz if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { 219608feecd7SYuval Mintz DP_VERBOSE(p_hwfn, 219708feecd7SYuval Mintz QED_MSG_IOV, 219808feecd7SYuval Mintz "VF [%d] - Tries to remove a non-existing vlan\n", 219908feecd7SYuval Mintz p_vf->relative_vf_id); 220008feecd7SYuval Mintz return -EINVAL; 220108feecd7SYuval Mintz } 220208feecd7SYuval Mintz } else if (p_params->opcode == QED_FILTER_REPLACE || 220308feecd7SYuval Mintz p_params->opcode == QED_FILTER_FLUSH) { 220408feecd7SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) 220508feecd7SYuval Mintz p_vf->shadow_config.vlans[i].used = false; 220608feecd7SYuval Mintz } 220708feecd7SYuval Mintz 220808feecd7SYuval Mintz /* In forced mode, we're willing to remove entries - but we don't add 220908feecd7SYuval Mintz * new ones. 221008feecd7SYuval Mintz */ 221108feecd7SYuval Mintz if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)) 221208feecd7SYuval Mintz return 0; 221308feecd7SYuval Mintz 221408feecd7SYuval Mintz if (p_params->opcode == QED_FILTER_ADD || 221508feecd7SYuval Mintz p_params->opcode == QED_FILTER_REPLACE) { 221608feecd7SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { 221708feecd7SYuval Mintz if (p_vf->shadow_config.vlans[i].used) 221808feecd7SYuval Mintz continue; 221908feecd7SYuval Mintz 222008feecd7SYuval Mintz p_vf->shadow_config.vlans[i].used = true; 222108feecd7SYuval Mintz p_vf->shadow_config.vlans[i].vid = p_params->vlan; 222208feecd7SYuval Mintz break; 222308feecd7SYuval Mintz } 222408feecd7SYuval Mintz 222508feecd7SYuval Mintz if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { 222608feecd7SYuval Mintz DP_VERBOSE(p_hwfn, 222708feecd7SYuval Mintz QED_MSG_IOV, 222808feecd7SYuval Mintz "VF [%d] - Tries to configure more than %d vlan filters\n", 222908feecd7SYuval Mintz p_vf->relative_vf_id, 223008feecd7SYuval Mintz QED_ETH_VF_NUM_VLAN_FILTERS + 1); 223108feecd7SYuval Mintz return -EINVAL; 223208feecd7SYuval Mintz } 223308feecd7SYuval Mintz } 223408feecd7SYuval Mintz 223508feecd7SYuval Mintz return 0; 223608feecd7SYuval Mintz } 223708feecd7SYuval Mintz 2238dacd88d6SYuval Mintz int qed_iov_chk_ucast(struct qed_hwfn *hwfn, 2239dacd88d6SYuval Mintz int vfid, struct qed_filter_ucast *params) 2240dacd88d6SYuval Mintz { 2241dacd88d6SYuval Mintz struct qed_public_vf_info *vf; 2242dacd88d6SYuval Mintz 2243dacd88d6SYuval Mintz vf = qed_iov_get_public_vf_info(hwfn, vfid, true); 2244dacd88d6SYuval Mintz if (!vf) 2245dacd88d6SYuval Mintz return -EINVAL; 2246dacd88d6SYuval Mintz 2247dacd88d6SYuval Mintz /* No real decision to make; Store the configured MAC */ 2248dacd88d6SYuval Mintz if (params->type == QED_FILTER_MAC || 2249dacd88d6SYuval Mintz params->type == QED_FILTER_MAC_VLAN) 2250dacd88d6SYuval Mintz ether_addr_copy(vf->mac, params->mac); 2251dacd88d6SYuval Mintz 2252dacd88d6SYuval Mintz return 0; 2253dacd88d6SYuval Mintz } 2254dacd88d6SYuval Mintz 2255dacd88d6SYuval Mintz static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn, 2256dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 2257dacd88d6SYuval Mintz struct qed_vf_info *vf) 2258dacd88d6SYuval Mintz { 225908feecd7SYuval Mintz struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt; 2260dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 2261dacd88d6SYuval Mintz struct vfpf_ucast_filter_tlv *req; 2262dacd88d6SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 2263dacd88d6SYuval Mintz struct qed_filter_ucast params; 2264dacd88d6SYuval Mintz int rc; 2265dacd88d6SYuval Mintz 2266dacd88d6SYuval Mintz /* Prepare the unicast filter params */ 2267dacd88d6SYuval Mintz memset(¶ms, 0, sizeof(struct qed_filter_ucast)); 2268dacd88d6SYuval Mintz req = &mbx->req_virt->ucast_filter; 2269dacd88d6SYuval Mintz params.opcode = (enum qed_filter_opcode)req->opcode; 2270dacd88d6SYuval Mintz params.type = (enum qed_filter_ucast_type)req->type; 2271dacd88d6SYuval Mintz 2272dacd88d6SYuval Mintz params.is_rx_filter = 1; 2273dacd88d6SYuval Mintz params.is_tx_filter = 1; 2274dacd88d6SYuval Mintz params.vport_to_remove_from = vf->vport_id; 2275dacd88d6SYuval Mintz params.vport_to_add_to = vf->vport_id; 2276dacd88d6SYuval Mintz memcpy(params.mac, req->mac, ETH_ALEN); 2277dacd88d6SYuval Mintz params.vlan = req->vlan; 2278dacd88d6SYuval Mintz 2279dacd88d6SYuval Mintz DP_VERBOSE(p_hwfn, 2280dacd88d6SYuval Mintz QED_MSG_IOV, 2281dacd88d6SYuval Mintz "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n", 2282dacd88d6SYuval Mintz vf->abs_vf_id, params.opcode, params.type, 2283dacd88d6SYuval Mintz params.is_rx_filter ? "RX" : "", 2284dacd88d6SYuval Mintz params.is_tx_filter ? "TX" : "", 2285dacd88d6SYuval Mintz params.vport_to_add_to, 2286dacd88d6SYuval Mintz params.mac[0], params.mac[1], 2287dacd88d6SYuval Mintz params.mac[2], params.mac[3], 2288dacd88d6SYuval Mintz params.mac[4], params.mac[5], params.vlan); 2289dacd88d6SYuval Mintz 2290dacd88d6SYuval Mintz if (!vf->vport_instance) { 2291dacd88d6SYuval Mintz DP_VERBOSE(p_hwfn, 2292dacd88d6SYuval Mintz QED_MSG_IOV, 2293dacd88d6SYuval Mintz "No VPORT instance available for VF[%d], failing ucast MAC configuration\n", 2294dacd88d6SYuval Mintz vf->abs_vf_id); 2295dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 2296dacd88d6SYuval Mintz goto out; 2297dacd88d6SYuval Mintz } 2298dacd88d6SYuval Mintz 229908feecd7SYuval Mintz /* Update shadow copy of the VF configuration */ 230008feecd7SYuval Mintz if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms)) { 230108feecd7SYuval Mintz status = PFVF_STATUS_FAILURE; 230208feecd7SYuval Mintz goto out; 230308feecd7SYuval Mintz } 230408feecd7SYuval Mintz 230508feecd7SYuval Mintz /* Determine if the unicast filtering is acceptible by PF */ 230608feecd7SYuval Mintz if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) && 230708feecd7SYuval Mintz (params.type == QED_FILTER_VLAN || 230808feecd7SYuval Mintz params.type == QED_FILTER_MAC_VLAN)) { 230908feecd7SYuval Mintz /* Once VLAN is forced or PVID is set, do not allow 231008feecd7SYuval Mintz * to add/replace any further VLANs. 231108feecd7SYuval Mintz */ 231208feecd7SYuval Mintz if (params.opcode == QED_FILTER_ADD || 231308feecd7SYuval Mintz params.opcode == QED_FILTER_REPLACE) 231408feecd7SYuval Mintz status = PFVF_STATUS_FORCED; 231508feecd7SYuval Mintz goto out; 231608feecd7SYuval Mintz } 231708feecd7SYuval Mintz 2318eff16960SYuval Mintz if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) && 2319eff16960SYuval Mintz (params.type == QED_FILTER_MAC || 2320eff16960SYuval Mintz params.type == QED_FILTER_MAC_VLAN)) { 2321eff16960SYuval Mintz if (!ether_addr_equal(p_bulletin->mac, params.mac) || 2322eff16960SYuval Mintz (params.opcode != QED_FILTER_ADD && 2323eff16960SYuval Mintz params.opcode != QED_FILTER_REPLACE)) 2324eff16960SYuval Mintz status = PFVF_STATUS_FORCED; 2325eff16960SYuval Mintz goto out; 2326eff16960SYuval Mintz } 2327eff16960SYuval Mintz 2328dacd88d6SYuval Mintz rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, ¶ms); 2329dacd88d6SYuval Mintz if (rc) { 2330dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 2331dacd88d6SYuval Mintz goto out; 2332dacd88d6SYuval Mintz } 2333dacd88d6SYuval Mintz 2334dacd88d6SYuval Mintz rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms, 2335dacd88d6SYuval Mintz QED_SPQ_MODE_CB, NULL); 2336dacd88d6SYuval Mintz if (rc) 2337dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 2338dacd88d6SYuval Mintz 2339dacd88d6SYuval Mintz out: 2340dacd88d6SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER, 2341dacd88d6SYuval Mintz sizeof(struct pfvf_def_resp_tlv), status); 2342dacd88d6SYuval Mintz } 2343dacd88d6SYuval Mintz 23440b55e27dSYuval Mintz static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn, 23450b55e27dSYuval Mintz struct qed_ptt *p_ptt, 23460b55e27dSYuval Mintz struct qed_vf_info *vf) 23470b55e27dSYuval Mintz { 23480b55e27dSYuval Mintz int i; 23490b55e27dSYuval Mintz 23500b55e27dSYuval Mintz /* Reset the SBs */ 23510b55e27dSYuval Mintz for (i = 0; i < vf->num_sbs; i++) 23520b55e27dSYuval Mintz qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 23530b55e27dSYuval Mintz vf->igu_sbs[i], 23540b55e27dSYuval Mintz vf->opaque_fid, false); 23550b55e27dSYuval Mintz 23560b55e27dSYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP, 23570b55e27dSYuval Mintz sizeof(struct pfvf_def_resp_tlv), 23580b55e27dSYuval Mintz PFVF_STATUS_SUCCESS); 23590b55e27dSYuval Mintz } 23600b55e27dSYuval Mintz 23610b55e27dSYuval Mintz static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn, 23620b55e27dSYuval Mintz struct qed_ptt *p_ptt, struct qed_vf_info *vf) 23630b55e27dSYuval Mintz { 23640b55e27dSYuval Mintz u16 length = sizeof(struct pfvf_def_resp_tlv); 23650b55e27dSYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 23660b55e27dSYuval Mintz 23670b55e27dSYuval Mintz /* Disable Interrupts for VF */ 23680b55e27dSYuval Mintz qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); 23690b55e27dSYuval Mintz 23700b55e27dSYuval Mintz /* Reset Permission table */ 23710b55e27dSYuval Mintz qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); 23720b55e27dSYuval Mintz 23730b55e27dSYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE, 23740b55e27dSYuval Mintz length, status); 23750b55e27dSYuval Mintz } 23760b55e27dSYuval Mintz 23770b55e27dSYuval Mintz static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn, 23780b55e27dSYuval Mintz struct qed_ptt *p_ptt, 23790b55e27dSYuval Mintz struct qed_vf_info *p_vf) 23800b55e27dSYuval Mintz { 23810b55e27dSYuval Mintz u16 length = sizeof(struct pfvf_def_resp_tlv); 23821fe614d1SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 23831fe614d1SYuval Mintz int rc = 0; 23840b55e27dSYuval Mintz 23850b55e27dSYuval Mintz qed_iov_vf_cleanup(p_hwfn, p_vf); 23860b55e27dSYuval Mintz 23871fe614d1SYuval Mintz if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) { 23881fe614d1SYuval Mintz /* Stopping the VF */ 23891fe614d1SYuval Mintz rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid, 23901fe614d1SYuval Mintz p_vf->opaque_fid); 23911fe614d1SYuval Mintz 23921fe614d1SYuval Mintz if (rc) { 23931fe614d1SYuval Mintz DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n", 23941fe614d1SYuval Mintz rc); 23951fe614d1SYuval Mintz status = PFVF_STATUS_FAILURE; 23961fe614d1SYuval Mintz } 23971fe614d1SYuval Mintz 23981fe614d1SYuval Mintz p_vf->state = VF_STOPPED; 23991fe614d1SYuval Mintz } 24001fe614d1SYuval Mintz 24010b55e27dSYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE, 24021fe614d1SYuval Mintz length, status); 24030b55e27dSYuval Mintz } 24040b55e27dSYuval Mintz 24050b55e27dSYuval Mintz static int 24060b55e27dSYuval Mintz qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, 24070b55e27dSYuval Mintz struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) 24080b55e27dSYuval Mintz { 24090b55e27dSYuval Mintz int cnt; 24100b55e27dSYuval Mintz u32 val; 24110b55e27dSYuval Mintz 24120b55e27dSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid); 24130b55e27dSYuval Mintz 24140b55e27dSYuval Mintz for (cnt = 0; cnt < 50; cnt++) { 24150b55e27dSYuval Mintz val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT); 24160b55e27dSYuval Mintz if (!val) 24170b55e27dSYuval Mintz break; 24180b55e27dSYuval Mintz msleep(20); 24190b55e27dSYuval Mintz } 24200b55e27dSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 24210b55e27dSYuval Mintz 24220b55e27dSYuval Mintz if (cnt == 50) { 24230b55e27dSYuval Mintz DP_ERR(p_hwfn, 24240b55e27dSYuval Mintz "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n", 24250b55e27dSYuval Mintz p_vf->abs_vf_id, val); 24260b55e27dSYuval Mintz return -EBUSY; 24270b55e27dSYuval Mintz } 24280b55e27dSYuval Mintz 24290b55e27dSYuval Mintz return 0; 24300b55e27dSYuval Mintz } 24310b55e27dSYuval Mintz 24320b55e27dSYuval Mintz static int 24330b55e27dSYuval Mintz qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, 24340b55e27dSYuval Mintz struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) 24350b55e27dSYuval Mintz { 24360b55e27dSYuval Mintz u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS]; 24370b55e27dSYuval Mintz int i, cnt; 24380b55e27dSYuval Mintz 24390b55e27dSYuval Mintz /* Read initial consumers & producers */ 24400b55e27dSYuval Mintz for (i = 0; i < MAX_NUM_VOQS; i++) { 24410b55e27dSYuval Mintz u32 prod; 24420b55e27dSYuval Mintz 24430b55e27dSYuval Mintz cons[i] = qed_rd(p_hwfn, p_ptt, 24440b55e27dSYuval Mintz PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + 24450b55e27dSYuval Mintz i * 0x40); 24460b55e27dSYuval Mintz prod = qed_rd(p_hwfn, p_ptt, 24470b55e27dSYuval Mintz PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 + 24480b55e27dSYuval Mintz i * 0x40); 24490b55e27dSYuval Mintz distance[i] = prod - cons[i]; 24500b55e27dSYuval Mintz } 24510b55e27dSYuval Mintz 24520b55e27dSYuval Mintz /* Wait for consumers to pass the producers */ 24530b55e27dSYuval Mintz i = 0; 24540b55e27dSYuval Mintz for (cnt = 0; cnt < 50; cnt++) { 24550b55e27dSYuval Mintz for (; i < MAX_NUM_VOQS; i++) { 24560b55e27dSYuval Mintz u32 tmp; 24570b55e27dSYuval Mintz 24580b55e27dSYuval Mintz tmp = qed_rd(p_hwfn, p_ptt, 24590b55e27dSYuval Mintz PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + 24600b55e27dSYuval Mintz i * 0x40); 24610b55e27dSYuval Mintz if (distance[i] > tmp - cons[i]) 24620b55e27dSYuval Mintz break; 24630b55e27dSYuval Mintz } 24640b55e27dSYuval Mintz 24650b55e27dSYuval Mintz if (i == MAX_NUM_VOQS) 24660b55e27dSYuval Mintz break; 24670b55e27dSYuval Mintz 24680b55e27dSYuval Mintz msleep(20); 24690b55e27dSYuval Mintz } 24700b55e27dSYuval Mintz 24710b55e27dSYuval Mintz if (cnt == 50) { 24720b55e27dSYuval Mintz DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n", 24730b55e27dSYuval Mintz p_vf->abs_vf_id, i); 24740b55e27dSYuval Mintz return -EBUSY; 24750b55e27dSYuval Mintz } 24760b55e27dSYuval Mintz 24770b55e27dSYuval Mintz return 0; 24780b55e27dSYuval Mintz } 24790b55e27dSYuval Mintz 24800b55e27dSYuval Mintz static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn, 24810b55e27dSYuval Mintz struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) 24820b55e27dSYuval Mintz { 24830b55e27dSYuval Mintz int rc; 24840b55e27dSYuval Mintz 24850b55e27dSYuval Mintz rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt); 24860b55e27dSYuval Mintz if (rc) 24870b55e27dSYuval Mintz return rc; 24880b55e27dSYuval Mintz 24890b55e27dSYuval Mintz rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt); 24900b55e27dSYuval Mintz if (rc) 24910b55e27dSYuval Mintz return rc; 24920b55e27dSYuval Mintz 24930b55e27dSYuval Mintz return 0; 24940b55e27dSYuval Mintz } 24950b55e27dSYuval Mintz 24960b55e27dSYuval Mintz static int 24970b55e27dSYuval Mintz qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn, 24980b55e27dSYuval Mintz struct qed_ptt *p_ptt, 24990b55e27dSYuval Mintz u16 rel_vf_id, u32 *ack_vfs) 25000b55e27dSYuval Mintz { 25010b55e27dSYuval Mintz struct qed_vf_info *p_vf; 25020b55e27dSYuval Mintz int rc = 0; 25030b55e27dSYuval Mintz 25040b55e27dSYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); 25050b55e27dSYuval Mintz if (!p_vf) 25060b55e27dSYuval Mintz return 0; 25070b55e27dSYuval Mintz 25080b55e27dSYuval Mintz if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] & 25090b55e27dSYuval Mintz (1ULL << (rel_vf_id % 64))) { 25100b55e27dSYuval Mintz u16 vfid = p_vf->abs_vf_id; 25110b55e27dSYuval Mintz 25120b55e27dSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 25130b55e27dSYuval Mintz "VF[%d] - Handling FLR\n", vfid); 25140b55e27dSYuval Mintz 25150b55e27dSYuval Mintz qed_iov_vf_cleanup(p_hwfn, p_vf); 25160b55e27dSYuval Mintz 25170b55e27dSYuval Mintz /* If VF isn't active, no need for anything but SW */ 25180b55e27dSYuval Mintz if (!p_vf->b_init) 25190b55e27dSYuval Mintz goto cleanup; 25200b55e27dSYuval Mintz 25210b55e27dSYuval Mintz rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt); 25220b55e27dSYuval Mintz if (rc) 25230b55e27dSYuval Mintz goto cleanup; 25240b55e27dSYuval Mintz 25250b55e27dSYuval Mintz rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true); 25260b55e27dSYuval Mintz if (rc) { 25270b55e27dSYuval Mintz DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid); 25280b55e27dSYuval Mintz return rc; 25290b55e27dSYuval Mintz } 25300b55e27dSYuval Mintz 25310b55e27dSYuval Mintz /* VF_STOPPED has to be set only after final cleanup 25320b55e27dSYuval Mintz * but prior to re-enabling the VF. 25330b55e27dSYuval Mintz */ 25340b55e27dSYuval Mintz p_vf->state = VF_STOPPED; 25350b55e27dSYuval Mintz 25360b55e27dSYuval Mintz rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf); 25370b55e27dSYuval Mintz if (rc) { 25380b55e27dSYuval Mintz DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n", 25390b55e27dSYuval Mintz vfid); 25400b55e27dSYuval Mintz return rc; 25410b55e27dSYuval Mintz } 25420b55e27dSYuval Mintz cleanup: 25430b55e27dSYuval Mintz /* Mark VF for ack and clean pending state */ 25440b55e27dSYuval Mintz if (p_vf->state == VF_RESET) 25450b55e27dSYuval Mintz p_vf->state = VF_STOPPED; 25460b55e27dSYuval Mintz ack_vfs[vfid / 32] |= (1 << (vfid % 32)); 25470b55e27dSYuval Mintz p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= 25480b55e27dSYuval Mintz ~(1ULL << (rel_vf_id % 64)); 25490b55e27dSYuval Mintz p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &= 25500b55e27dSYuval Mintz ~(1ULL << (rel_vf_id % 64)); 25510b55e27dSYuval Mintz } 25520b55e27dSYuval Mintz 25530b55e27dSYuval Mintz return rc; 25540b55e27dSYuval Mintz } 25550b55e27dSYuval Mintz 25560b55e27dSYuval Mintz int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 25570b55e27dSYuval Mintz { 25580b55e27dSYuval Mintz u32 ack_vfs[VF_MAX_STATIC / 32]; 25590b55e27dSYuval Mintz int rc = 0; 25600b55e27dSYuval Mintz u16 i; 25610b55e27dSYuval Mintz 25620b55e27dSYuval Mintz memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32)); 25630b55e27dSYuval Mintz 25640b55e27dSYuval Mintz /* Since BRB <-> PRS interface can't be tested as part of the flr 25650b55e27dSYuval Mintz * polling due to HW limitations, simply sleep a bit. And since 25660b55e27dSYuval Mintz * there's no need to wait per-vf, do it before looping. 25670b55e27dSYuval Mintz */ 25680b55e27dSYuval Mintz msleep(100); 25690b55e27dSYuval Mintz 25700b55e27dSYuval Mintz for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) 25710b55e27dSYuval Mintz qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs); 25720b55e27dSYuval Mintz 25730b55e27dSYuval Mintz rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs); 25740b55e27dSYuval Mintz return rc; 25750b55e27dSYuval Mintz } 25760b55e27dSYuval Mintz 25770b55e27dSYuval Mintz int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) 25780b55e27dSYuval Mintz { 25790b55e27dSYuval Mintz u16 i, found = 0; 25800b55e27dSYuval Mintz 25810b55e27dSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n"); 25820b55e27dSYuval Mintz for (i = 0; i < (VF_MAX_STATIC / 32); i++) 25830b55e27dSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 25840b55e27dSYuval Mintz "[%08x,...,%08x]: %08x\n", 25850b55e27dSYuval Mintz i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]); 25860b55e27dSYuval Mintz 25870b55e27dSYuval Mintz if (!p_hwfn->cdev->p_iov_info) { 25880b55e27dSYuval Mintz DP_NOTICE(p_hwfn, "VF flr but no IOV\n"); 25890b55e27dSYuval Mintz return 0; 25900b55e27dSYuval Mintz } 25910b55e27dSYuval Mintz 25920b55e27dSYuval Mintz /* Mark VFs */ 25930b55e27dSYuval Mintz for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) { 25940b55e27dSYuval Mintz struct qed_vf_info *p_vf; 25950b55e27dSYuval Mintz u8 vfid; 25960b55e27dSYuval Mintz 25970b55e27dSYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, i, false); 25980b55e27dSYuval Mintz if (!p_vf) 25990b55e27dSYuval Mintz continue; 26000b55e27dSYuval Mintz 26010b55e27dSYuval Mintz vfid = p_vf->abs_vf_id; 26020b55e27dSYuval Mintz if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) { 26030b55e27dSYuval Mintz u64 *p_flr = p_hwfn->pf_iov_info->pending_flr; 26040b55e27dSYuval Mintz u16 rel_vf_id = p_vf->relative_vf_id; 26050b55e27dSYuval Mintz 26060b55e27dSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 26070b55e27dSYuval Mintz "VF[%d] [rel %d] got FLR-ed\n", 26080b55e27dSYuval Mintz vfid, rel_vf_id); 26090b55e27dSYuval Mintz 26100b55e27dSYuval Mintz p_vf->state = VF_RESET; 26110b55e27dSYuval Mintz 26120b55e27dSYuval Mintz /* No need to lock here, since pending_flr should 26130b55e27dSYuval Mintz * only change here and before ACKing MFw. Since 26140b55e27dSYuval Mintz * MFW will not trigger an additional attention for 26150b55e27dSYuval Mintz * VF flr until ACKs, we're safe. 26160b55e27dSYuval Mintz */ 26170b55e27dSYuval Mintz p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64); 26180b55e27dSYuval Mintz found = 1; 26190b55e27dSYuval Mintz } 26200b55e27dSYuval Mintz } 26210b55e27dSYuval Mintz 26220b55e27dSYuval Mintz return found; 26230b55e27dSYuval Mintz } 26240b55e27dSYuval Mintz 262573390ac9SYuval Mintz static void qed_iov_get_link(struct qed_hwfn *p_hwfn, 262673390ac9SYuval Mintz u16 vfid, 262773390ac9SYuval Mintz struct qed_mcp_link_params *p_params, 262873390ac9SYuval Mintz struct qed_mcp_link_state *p_link, 262973390ac9SYuval Mintz struct qed_mcp_link_capabilities *p_caps) 263073390ac9SYuval Mintz { 263173390ac9SYuval Mintz struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, 263273390ac9SYuval Mintz vfid, 263373390ac9SYuval Mintz false); 263473390ac9SYuval Mintz struct qed_bulletin_content *p_bulletin; 263573390ac9SYuval Mintz 263673390ac9SYuval Mintz if (!p_vf) 263773390ac9SYuval Mintz return; 263873390ac9SYuval Mintz 263973390ac9SYuval Mintz p_bulletin = p_vf->bulletin.p_virt; 264073390ac9SYuval Mintz 264173390ac9SYuval Mintz if (p_params) 264273390ac9SYuval Mintz __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin); 264373390ac9SYuval Mintz if (p_link) 264473390ac9SYuval Mintz __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin); 264573390ac9SYuval Mintz if (p_caps) 264673390ac9SYuval Mintz __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin); 264773390ac9SYuval Mintz } 264873390ac9SYuval Mintz 264937bff2b9SYuval Mintz static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, 265037bff2b9SYuval Mintz struct qed_ptt *p_ptt, int vfid) 265137bff2b9SYuval Mintz { 265237bff2b9SYuval Mintz struct qed_iov_vf_mbx *mbx; 265337bff2b9SYuval Mintz struct qed_vf_info *p_vf; 265437bff2b9SYuval Mintz int i; 265537bff2b9SYuval Mintz 265637bff2b9SYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 265737bff2b9SYuval Mintz if (!p_vf) 265837bff2b9SYuval Mintz return; 265937bff2b9SYuval Mintz 266037bff2b9SYuval Mintz mbx = &p_vf->vf_mbx; 266137bff2b9SYuval Mintz 266237bff2b9SYuval Mintz /* qed_iov_process_mbx_request */ 266337bff2b9SYuval Mintz DP_VERBOSE(p_hwfn, 266437bff2b9SYuval Mintz QED_MSG_IOV, 266537bff2b9SYuval Mintz "qed_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id); 266637bff2b9SYuval Mintz 266737bff2b9SYuval Mintz mbx->first_tlv = mbx->req_virt->first_tlv; 266837bff2b9SYuval Mintz 266937bff2b9SYuval Mintz /* check if tlv type is known */ 267037bff2b9SYuval Mintz if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { 26711408cc1fSYuval Mintz switch (mbx->first_tlv.tl.type) { 26721408cc1fSYuval Mintz case CHANNEL_TLV_ACQUIRE: 26731408cc1fSYuval Mintz qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf); 26741408cc1fSYuval Mintz break; 2675dacd88d6SYuval Mintz case CHANNEL_TLV_VPORT_START: 2676dacd88d6SYuval Mintz qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf); 2677dacd88d6SYuval Mintz break; 2678dacd88d6SYuval Mintz case CHANNEL_TLV_VPORT_TEARDOWN: 2679dacd88d6SYuval Mintz qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf); 2680dacd88d6SYuval Mintz break; 2681dacd88d6SYuval Mintz case CHANNEL_TLV_START_RXQ: 2682dacd88d6SYuval Mintz qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf); 2683dacd88d6SYuval Mintz break; 2684dacd88d6SYuval Mintz case CHANNEL_TLV_START_TXQ: 2685dacd88d6SYuval Mintz qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf); 2686dacd88d6SYuval Mintz break; 2687dacd88d6SYuval Mintz case CHANNEL_TLV_STOP_RXQS: 2688dacd88d6SYuval Mintz qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf); 2689dacd88d6SYuval Mintz break; 2690dacd88d6SYuval Mintz case CHANNEL_TLV_STOP_TXQS: 2691dacd88d6SYuval Mintz qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf); 2692dacd88d6SYuval Mintz break; 269317b235c1SYuval Mintz case CHANNEL_TLV_UPDATE_RXQ: 269417b235c1SYuval Mintz qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf); 269517b235c1SYuval Mintz break; 2696dacd88d6SYuval Mintz case CHANNEL_TLV_VPORT_UPDATE: 2697dacd88d6SYuval Mintz qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf); 2698dacd88d6SYuval Mintz break; 2699dacd88d6SYuval Mintz case CHANNEL_TLV_UCAST_FILTER: 2700dacd88d6SYuval Mintz qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf); 2701dacd88d6SYuval Mintz break; 27020b55e27dSYuval Mintz case CHANNEL_TLV_CLOSE: 27030b55e27dSYuval Mintz qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf); 27040b55e27dSYuval Mintz break; 27050b55e27dSYuval Mintz case CHANNEL_TLV_INT_CLEANUP: 27060b55e27dSYuval Mintz qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf); 27070b55e27dSYuval Mintz break; 27080b55e27dSYuval Mintz case CHANNEL_TLV_RELEASE: 27090b55e27dSYuval Mintz qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); 27100b55e27dSYuval Mintz break; 27111408cc1fSYuval Mintz } 271237bff2b9SYuval Mintz } else { 271337bff2b9SYuval Mintz /* unknown TLV - this may belong to a VF driver from the future 271437bff2b9SYuval Mintz * - a version written after this PF driver was written, which 271537bff2b9SYuval Mintz * supports features unknown as of yet. Too bad since we don't 271637bff2b9SYuval Mintz * support them. Or this may be because someone wrote a crappy 271737bff2b9SYuval Mintz * VF driver and is sending garbage over the channel. 271837bff2b9SYuval Mintz */ 271937bff2b9SYuval Mintz DP_ERR(p_hwfn, 272037bff2b9SYuval Mintz "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n", 272137bff2b9SYuval Mintz mbx->first_tlv.tl.type, mbx->first_tlv.tl.length); 272237bff2b9SYuval Mintz 272337bff2b9SYuval Mintz for (i = 0; i < 20; i++) { 272437bff2b9SYuval Mintz DP_VERBOSE(p_hwfn, 272537bff2b9SYuval Mintz QED_MSG_IOV, 272637bff2b9SYuval Mintz "%x ", 272737bff2b9SYuval Mintz mbx->req_virt->tlv_buf_size.tlv_buffer[i]); 272837bff2b9SYuval Mintz } 272937bff2b9SYuval Mintz } 273037bff2b9SYuval Mintz } 273137bff2b9SYuval Mintz 273237bff2b9SYuval Mintz void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid) 273337bff2b9SYuval Mintz { 273437bff2b9SYuval Mintz u64 add_bit = 1ULL << (vfid % 64); 273537bff2b9SYuval Mintz 273637bff2b9SYuval Mintz p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit; 273737bff2b9SYuval Mintz } 273837bff2b9SYuval Mintz 273937bff2b9SYuval Mintz static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn, 274037bff2b9SYuval Mintz u64 *events) 274137bff2b9SYuval Mintz { 274237bff2b9SYuval Mintz u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events; 274337bff2b9SYuval Mintz 274437bff2b9SYuval Mintz memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH); 274537bff2b9SYuval Mintz memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH); 274637bff2b9SYuval Mintz } 274737bff2b9SYuval Mintz 274837bff2b9SYuval Mintz static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, 274937bff2b9SYuval Mintz u16 abs_vfid, struct regpair *vf_msg) 275037bff2b9SYuval Mintz { 275137bff2b9SYuval Mintz u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf; 275237bff2b9SYuval Mintz struct qed_vf_info *p_vf; 275337bff2b9SYuval Mintz 275437bff2b9SYuval Mintz if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) { 275537bff2b9SYuval Mintz DP_VERBOSE(p_hwfn, 275637bff2b9SYuval Mintz QED_MSG_IOV, 275737bff2b9SYuval Mintz "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n", 275837bff2b9SYuval Mintz abs_vfid); 275937bff2b9SYuval Mintz return 0; 276037bff2b9SYuval Mintz } 276137bff2b9SYuval Mintz p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min]; 276237bff2b9SYuval Mintz 276337bff2b9SYuval Mintz /* List the physical address of the request so that handler 276437bff2b9SYuval Mintz * could later on copy the message from it. 276537bff2b9SYuval Mintz */ 276637bff2b9SYuval Mintz p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo; 276737bff2b9SYuval Mintz 276837bff2b9SYuval Mintz /* Mark the event and schedule the workqueue */ 276937bff2b9SYuval Mintz qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id); 277037bff2b9SYuval Mintz qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG); 277137bff2b9SYuval Mintz 277237bff2b9SYuval Mintz return 0; 277337bff2b9SYuval Mintz } 277437bff2b9SYuval Mintz 277537bff2b9SYuval Mintz int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, 277637bff2b9SYuval Mintz u8 opcode, __le16 echo, union event_ring_data *data) 277737bff2b9SYuval Mintz { 277837bff2b9SYuval Mintz switch (opcode) { 277937bff2b9SYuval Mintz case COMMON_EVENT_VF_PF_CHANNEL: 278037bff2b9SYuval Mintz return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo), 278137bff2b9SYuval Mintz &data->vf_pf_channel.msg_addr); 278237bff2b9SYuval Mintz default: 278337bff2b9SYuval Mintz DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n", 278437bff2b9SYuval Mintz opcode); 278537bff2b9SYuval Mintz return -EINVAL; 278637bff2b9SYuval Mintz } 278737bff2b9SYuval Mintz } 278837bff2b9SYuval Mintz 278932a47e72SYuval Mintz u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) 279032a47e72SYuval Mintz { 279132a47e72SYuval Mintz struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 279232a47e72SYuval Mintz u16 i; 279332a47e72SYuval Mintz 279432a47e72SYuval Mintz if (!p_iov) 279532a47e72SYuval Mintz goto out; 279632a47e72SYuval Mintz 279732a47e72SYuval Mintz for (i = rel_vf_id; i < p_iov->total_vfs; i++) 279832a47e72SYuval Mintz if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true)) 279932a47e72SYuval Mintz return i; 280032a47e72SYuval Mintz 280132a47e72SYuval Mintz out: 280232a47e72SYuval Mintz return MAX_NUM_VFS; 280332a47e72SYuval Mintz } 280437bff2b9SYuval Mintz 280537bff2b9SYuval Mintz static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt, 280637bff2b9SYuval Mintz int vfid) 280737bff2b9SYuval Mintz { 280837bff2b9SYuval Mintz struct qed_dmae_params params; 280937bff2b9SYuval Mintz struct qed_vf_info *vf_info; 281037bff2b9SYuval Mintz 281137bff2b9SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 281237bff2b9SYuval Mintz if (!vf_info) 281337bff2b9SYuval Mintz return -EINVAL; 281437bff2b9SYuval Mintz 281537bff2b9SYuval Mintz memset(¶ms, 0, sizeof(struct qed_dmae_params)); 281637bff2b9SYuval Mintz params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST; 281737bff2b9SYuval Mintz params.src_vfid = vf_info->abs_vf_id; 281837bff2b9SYuval Mintz 281937bff2b9SYuval Mintz if (qed_dmae_host2host(p_hwfn, ptt, 282037bff2b9SYuval Mintz vf_info->vf_mbx.pending_req, 282137bff2b9SYuval Mintz vf_info->vf_mbx.req_phys, 282237bff2b9SYuval Mintz sizeof(union vfpf_tlvs) / 4, ¶ms)) { 282337bff2b9SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 282437bff2b9SYuval Mintz "Failed to copy message from VF 0x%02x\n", vfid); 282537bff2b9SYuval Mintz 282637bff2b9SYuval Mintz return -EIO; 282737bff2b9SYuval Mintz } 282837bff2b9SYuval Mintz 282937bff2b9SYuval Mintz return 0; 283037bff2b9SYuval Mintz } 283137bff2b9SYuval Mintz 2832eff16960SYuval Mintz static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn, 2833eff16960SYuval Mintz u8 *mac, int vfid) 2834eff16960SYuval Mintz { 2835eff16960SYuval Mintz struct qed_vf_info *vf_info; 2836eff16960SYuval Mintz u64 feature; 2837eff16960SYuval Mintz 2838eff16960SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 2839eff16960SYuval Mintz if (!vf_info) { 2840eff16960SYuval Mintz DP_NOTICE(p_hwfn->cdev, 2841eff16960SYuval Mintz "Can not set forced MAC, invalid vfid [%d]\n", vfid); 2842eff16960SYuval Mintz return; 2843eff16960SYuval Mintz } 2844eff16960SYuval Mintz 2845eff16960SYuval Mintz feature = 1 << MAC_ADDR_FORCED; 2846eff16960SYuval Mintz memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); 2847eff16960SYuval Mintz 2848eff16960SYuval Mintz vf_info->bulletin.p_virt->valid_bitmap |= feature; 2849eff16960SYuval Mintz /* Forced MAC will disable MAC_ADDR */ 2850eff16960SYuval Mintz vf_info->bulletin.p_virt->valid_bitmap &= 2851eff16960SYuval Mintz ~(1 << VFPF_BULLETIN_MAC_ADDR); 2852eff16960SYuval Mintz 2853eff16960SYuval Mintz qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); 2854eff16960SYuval Mintz } 2855eff16960SYuval Mintz 285608feecd7SYuval Mintz void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, 285708feecd7SYuval Mintz u16 pvid, int vfid) 285808feecd7SYuval Mintz { 285908feecd7SYuval Mintz struct qed_vf_info *vf_info; 286008feecd7SYuval Mintz u64 feature; 286108feecd7SYuval Mintz 286208feecd7SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 286308feecd7SYuval Mintz if (!vf_info) { 286408feecd7SYuval Mintz DP_NOTICE(p_hwfn->cdev, 286508feecd7SYuval Mintz "Can not set forced MAC, invalid vfid [%d]\n", vfid); 286608feecd7SYuval Mintz return; 286708feecd7SYuval Mintz } 286808feecd7SYuval Mintz 286908feecd7SYuval Mintz feature = 1 << VLAN_ADDR_FORCED; 287008feecd7SYuval Mintz vf_info->bulletin.p_virt->pvid = pvid; 287108feecd7SYuval Mintz if (pvid) 287208feecd7SYuval Mintz vf_info->bulletin.p_virt->valid_bitmap |= feature; 287308feecd7SYuval Mintz else 287408feecd7SYuval Mintz vf_info->bulletin.p_virt->valid_bitmap &= ~feature; 287508feecd7SYuval Mintz 287608feecd7SYuval Mintz qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); 287708feecd7SYuval Mintz } 287808feecd7SYuval Mintz 28796ddc7608SYuval Mintz static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid) 28806ddc7608SYuval Mintz { 28816ddc7608SYuval Mintz struct qed_vf_info *p_vf_info; 28826ddc7608SYuval Mintz 28836ddc7608SYuval Mintz p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 28846ddc7608SYuval Mintz if (!p_vf_info) 28856ddc7608SYuval Mintz return false; 28866ddc7608SYuval Mintz 28876ddc7608SYuval Mintz return !!p_vf_info->vport_instance; 28886ddc7608SYuval Mintz } 28896ddc7608SYuval Mintz 28900b55e27dSYuval Mintz bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid) 28910b55e27dSYuval Mintz { 28920b55e27dSYuval Mintz struct qed_vf_info *p_vf_info; 28930b55e27dSYuval Mintz 28940b55e27dSYuval Mintz p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 28950b55e27dSYuval Mintz if (!p_vf_info) 28960b55e27dSYuval Mintz return true; 28970b55e27dSYuval Mintz 28980b55e27dSYuval Mintz return p_vf_info->state == VF_STOPPED; 28990b55e27dSYuval Mintz } 29000b55e27dSYuval Mintz 290173390ac9SYuval Mintz static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid) 290273390ac9SYuval Mintz { 290373390ac9SYuval Mintz struct qed_vf_info *vf_info; 290473390ac9SYuval Mintz 290573390ac9SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 290673390ac9SYuval Mintz if (!vf_info) 290773390ac9SYuval Mintz return false; 290873390ac9SYuval Mintz 290973390ac9SYuval Mintz return vf_info->spoof_chk; 291073390ac9SYuval Mintz } 291173390ac9SYuval Mintz 29126ddc7608SYuval Mintz int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val) 29136ddc7608SYuval Mintz { 29146ddc7608SYuval Mintz struct qed_vf_info *vf; 29156ddc7608SYuval Mintz int rc = -EINVAL; 29166ddc7608SYuval Mintz 29176ddc7608SYuval Mintz if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { 29186ddc7608SYuval Mintz DP_NOTICE(p_hwfn, 29196ddc7608SYuval Mintz "SR-IOV sanity check failed, can't set spoofchk\n"); 29206ddc7608SYuval Mintz goto out; 29216ddc7608SYuval Mintz } 29226ddc7608SYuval Mintz 29236ddc7608SYuval Mintz vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 29246ddc7608SYuval Mintz if (!vf) 29256ddc7608SYuval Mintz goto out; 29266ddc7608SYuval Mintz 29276ddc7608SYuval Mintz if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) { 29286ddc7608SYuval Mintz /* After VF VPORT start PF will configure spoof check */ 29296ddc7608SYuval Mintz vf->req_spoofchk_val = val; 29306ddc7608SYuval Mintz rc = 0; 29316ddc7608SYuval Mintz goto out; 29326ddc7608SYuval Mintz } 29336ddc7608SYuval Mintz 29346ddc7608SYuval Mintz rc = __qed_iov_spoofchk_set(p_hwfn, vf, val); 29356ddc7608SYuval Mintz 29366ddc7608SYuval Mintz out: 29376ddc7608SYuval Mintz return rc; 29386ddc7608SYuval Mintz } 29396ddc7608SYuval Mintz 2940eff16960SYuval Mintz static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn, 2941eff16960SYuval Mintz u16 rel_vf_id) 2942eff16960SYuval Mintz { 2943eff16960SYuval Mintz struct qed_vf_info *p_vf; 2944eff16960SYuval Mintz 2945eff16960SYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 2946eff16960SYuval Mintz if (!p_vf || !p_vf->bulletin.p_virt) 2947eff16960SYuval Mintz return NULL; 2948eff16960SYuval Mintz 2949eff16960SYuval Mintz if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))) 2950eff16960SYuval Mintz return NULL; 2951eff16960SYuval Mintz 2952eff16960SYuval Mintz return p_vf->bulletin.p_virt->mac; 2953eff16960SYuval Mintz } 2954eff16960SYuval Mintz 295508feecd7SYuval Mintz u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id) 295608feecd7SYuval Mintz { 295708feecd7SYuval Mintz struct qed_vf_info *p_vf; 295808feecd7SYuval Mintz 295908feecd7SYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 296008feecd7SYuval Mintz if (!p_vf || !p_vf->bulletin.p_virt) 296108feecd7SYuval Mintz return 0; 296208feecd7SYuval Mintz 296308feecd7SYuval Mintz if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))) 296408feecd7SYuval Mintz return 0; 296508feecd7SYuval Mintz 296608feecd7SYuval Mintz return p_vf->bulletin.p_virt->pvid; 296708feecd7SYuval Mintz } 296808feecd7SYuval Mintz 2969733def6aSYuval Mintz static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn, 2970733def6aSYuval Mintz struct qed_ptt *p_ptt, int vfid, int val) 2971733def6aSYuval Mintz { 2972733def6aSYuval Mintz struct qed_vf_info *vf; 2973733def6aSYuval Mintz u8 abs_vp_id = 0; 2974733def6aSYuval Mintz int rc; 2975733def6aSYuval Mintz 2976733def6aSYuval Mintz vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 2977733def6aSYuval Mintz if (!vf) 2978733def6aSYuval Mintz return -EINVAL; 2979733def6aSYuval Mintz 2980733def6aSYuval Mintz rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id); 2981733def6aSYuval Mintz if (rc) 2982733def6aSYuval Mintz return rc; 2983733def6aSYuval Mintz 2984733def6aSYuval Mintz return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val); 2985733def6aSYuval Mintz } 2986733def6aSYuval Mintz 2987733def6aSYuval Mintz int qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate) 2988733def6aSYuval Mintz { 2989733def6aSYuval Mintz struct qed_vf_info *vf; 2990733def6aSYuval Mintz u8 vport_id; 2991733def6aSYuval Mintz int i; 2992733def6aSYuval Mintz 2993733def6aSYuval Mintz for_each_hwfn(cdev, i) { 2994733def6aSYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2995733def6aSYuval Mintz 2996733def6aSYuval Mintz if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { 2997733def6aSYuval Mintz DP_NOTICE(p_hwfn, 2998733def6aSYuval Mintz "SR-IOV sanity check failed, can't set min rate\n"); 2999733def6aSYuval Mintz return -EINVAL; 3000733def6aSYuval Mintz } 3001733def6aSYuval Mintz } 3002733def6aSYuval Mintz 3003733def6aSYuval Mintz vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true); 3004733def6aSYuval Mintz vport_id = vf->vport_id; 3005733def6aSYuval Mintz 3006733def6aSYuval Mintz return qed_configure_vport_wfq(cdev, vport_id, rate); 3007733def6aSYuval Mintz } 3008733def6aSYuval Mintz 300973390ac9SYuval Mintz static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid) 301073390ac9SYuval Mintz { 301173390ac9SYuval Mintz struct qed_wfq_data *vf_vp_wfq; 301273390ac9SYuval Mintz struct qed_vf_info *vf_info; 301373390ac9SYuval Mintz 301473390ac9SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 301573390ac9SYuval Mintz if (!vf_info) 301673390ac9SYuval Mintz return 0; 301773390ac9SYuval Mintz 301873390ac9SYuval Mintz vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id]; 301973390ac9SYuval Mintz 302073390ac9SYuval Mintz if (vf_vp_wfq->configured) 302173390ac9SYuval Mintz return vf_vp_wfq->min_speed; 302273390ac9SYuval Mintz else 302373390ac9SYuval Mintz return 0; 302473390ac9SYuval Mintz } 302573390ac9SYuval Mintz 302637bff2b9SYuval Mintz /** 302737bff2b9SYuval Mintz * qed_schedule_iov - schedules IOV task for VF and PF 302837bff2b9SYuval Mintz * @hwfn: hardware function pointer 302937bff2b9SYuval Mintz * @flag: IOV flag for VF/PF 303037bff2b9SYuval Mintz */ 303137bff2b9SYuval Mintz void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag) 303237bff2b9SYuval Mintz { 303337bff2b9SYuval Mintz smp_mb__before_atomic(); 303437bff2b9SYuval Mintz set_bit(flag, &hwfn->iov_task_flags); 303537bff2b9SYuval Mintz smp_mb__after_atomic(); 303637bff2b9SYuval Mintz DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); 303737bff2b9SYuval Mintz queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0); 303837bff2b9SYuval Mintz } 303937bff2b9SYuval Mintz 30401408cc1fSYuval Mintz void qed_vf_start_iov_wq(struct qed_dev *cdev) 30411408cc1fSYuval Mintz { 30421408cc1fSYuval Mintz int i; 30431408cc1fSYuval Mintz 30441408cc1fSYuval Mintz for_each_hwfn(cdev, i) 30451408cc1fSYuval Mintz queue_delayed_work(cdev->hwfns[i].iov_wq, 30461408cc1fSYuval Mintz &cdev->hwfns[i].iov_task, 0); 30471408cc1fSYuval Mintz } 30481408cc1fSYuval Mintz 30490b55e27dSYuval Mintz int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) 30500b55e27dSYuval Mintz { 30510b55e27dSYuval Mintz int i, j; 30520b55e27dSYuval Mintz 30530b55e27dSYuval Mintz for_each_hwfn(cdev, i) 30540b55e27dSYuval Mintz if (cdev->hwfns[i].iov_wq) 30550b55e27dSYuval Mintz flush_workqueue(cdev->hwfns[i].iov_wq); 30560b55e27dSYuval Mintz 30570b55e27dSYuval Mintz /* Mark VFs for disablement */ 30580b55e27dSYuval Mintz qed_iov_set_vfs_to_disable(cdev, true); 30590b55e27dSYuval Mintz 30600b55e27dSYuval Mintz if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled) 30610b55e27dSYuval Mintz pci_disable_sriov(cdev->pdev); 30620b55e27dSYuval Mintz 30630b55e27dSYuval Mintz for_each_hwfn(cdev, i) { 30640b55e27dSYuval Mintz struct qed_hwfn *hwfn = &cdev->hwfns[i]; 30650b55e27dSYuval Mintz struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 30660b55e27dSYuval Mintz 30670b55e27dSYuval Mintz /* Failure to acquire the ptt in 100g creates an odd error 30680b55e27dSYuval Mintz * where the first engine has already relased IOV. 30690b55e27dSYuval Mintz */ 30700b55e27dSYuval Mintz if (!ptt) { 30710b55e27dSYuval Mintz DP_ERR(hwfn, "Failed to acquire ptt\n"); 30720b55e27dSYuval Mintz return -EBUSY; 30730b55e27dSYuval Mintz } 30740b55e27dSYuval Mintz 3075733def6aSYuval Mintz /* Clean WFQ db and configure equal weight for all vports */ 3076733def6aSYuval Mintz qed_clean_wfq_db(hwfn, ptt); 3077733def6aSYuval Mintz 30780b55e27dSYuval Mintz qed_for_each_vf(hwfn, j) { 30790b55e27dSYuval Mintz int k; 30800b55e27dSYuval Mintz 30810b55e27dSYuval Mintz if (!qed_iov_is_valid_vfid(hwfn, j, true)) 30820b55e27dSYuval Mintz continue; 30830b55e27dSYuval Mintz 30840b55e27dSYuval Mintz /* Wait until VF is disabled before releasing */ 30850b55e27dSYuval Mintz for (k = 0; k < 100; k++) { 30860b55e27dSYuval Mintz if (!qed_iov_is_vf_stopped(hwfn, j)) 30870b55e27dSYuval Mintz msleep(20); 30880b55e27dSYuval Mintz else 30890b55e27dSYuval Mintz break; 30900b55e27dSYuval Mintz } 30910b55e27dSYuval Mintz 30920b55e27dSYuval Mintz if (k < 100) 30930b55e27dSYuval Mintz qed_iov_release_hw_for_vf(&cdev->hwfns[i], 30940b55e27dSYuval Mintz ptt, j); 30950b55e27dSYuval Mintz else 30960b55e27dSYuval Mintz DP_ERR(hwfn, 30970b55e27dSYuval Mintz "Timeout waiting for VF's FLR to end\n"); 30980b55e27dSYuval Mintz } 30990b55e27dSYuval Mintz 31000b55e27dSYuval Mintz qed_ptt_release(hwfn, ptt); 31010b55e27dSYuval Mintz } 31020b55e27dSYuval Mintz 31030b55e27dSYuval Mintz qed_iov_set_vfs_to_disable(cdev, false); 31040b55e27dSYuval Mintz 31050b55e27dSYuval Mintz return 0; 31060b55e27dSYuval Mintz } 31070b55e27dSYuval Mintz 31080b55e27dSYuval Mintz static int qed_sriov_enable(struct qed_dev *cdev, int num) 31090b55e27dSYuval Mintz { 31100b55e27dSYuval Mintz struct qed_sb_cnt_info sb_cnt_info; 31110b55e27dSYuval Mintz int i, j, rc; 31120b55e27dSYuval Mintz 31130b55e27dSYuval Mintz if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { 31140b55e27dSYuval Mintz DP_NOTICE(cdev, "Can start at most %d VFs\n", 31150b55e27dSYuval Mintz RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1); 31160b55e27dSYuval Mintz return -EINVAL; 31170b55e27dSYuval Mintz } 31180b55e27dSYuval Mintz 31190b55e27dSYuval Mintz /* Initialize HW for VF access */ 31200b55e27dSYuval Mintz for_each_hwfn(cdev, j) { 31210b55e27dSYuval Mintz struct qed_hwfn *hwfn = &cdev->hwfns[j]; 31220b55e27dSYuval Mintz struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 31230b55e27dSYuval Mintz int num_sbs = 0, limit = 16; 31240b55e27dSYuval Mintz 31250b55e27dSYuval Mintz if (!ptt) { 31260b55e27dSYuval Mintz DP_ERR(hwfn, "Failed to acquire ptt\n"); 31270b55e27dSYuval Mintz rc = -EBUSY; 31280b55e27dSYuval Mintz goto err; 31290b55e27dSYuval Mintz } 31300b55e27dSYuval Mintz 313183f34bd4SYuval Mintz if (IS_MF_DEFAULT(hwfn)) 313283f34bd4SYuval Mintz limit = MAX_NUM_VFS_BB / hwfn->num_funcs_on_engine; 313383f34bd4SYuval Mintz 31340b55e27dSYuval Mintz memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 31350b55e27dSYuval Mintz qed_int_get_num_sbs(hwfn, &sb_cnt_info); 31360b55e27dSYuval Mintz num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit); 31370b55e27dSYuval Mintz 31380b55e27dSYuval Mintz for (i = 0; i < num; i++) { 31390b55e27dSYuval Mintz if (!qed_iov_is_valid_vfid(hwfn, i, false)) 31400b55e27dSYuval Mintz continue; 31410b55e27dSYuval Mintz 31420b55e27dSYuval Mintz rc = qed_iov_init_hw_for_vf(hwfn, 31430b55e27dSYuval Mintz ptt, i, num_sbs / num); 31440b55e27dSYuval Mintz if (rc) { 31450b55e27dSYuval Mintz DP_ERR(cdev, "Failed to enable VF[%d]\n", i); 31460b55e27dSYuval Mintz qed_ptt_release(hwfn, ptt); 31470b55e27dSYuval Mintz goto err; 31480b55e27dSYuval Mintz } 31490b55e27dSYuval Mintz } 31500b55e27dSYuval Mintz 31510b55e27dSYuval Mintz qed_ptt_release(hwfn, ptt); 31520b55e27dSYuval Mintz } 31530b55e27dSYuval Mintz 31540b55e27dSYuval Mintz /* Enable SRIOV PCIe functions */ 31550b55e27dSYuval Mintz rc = pci_enable_sriov(cdev->pdev, num); 31560b55e27dSYuval Mintz if (rc) { 31570b55e27dSYuval Mintz DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc); 31580b55e27dSYuval Mintz goto err; 31590b55e27dSYuval Mintz } 31600b55e27dSYuval Mintz 31610b55e27dSYuval Mintz return num; 31620b55e27dSYuval Mintz 31630b55e27dSYuval Mintz err: 31640b55e27dSYuval Mintz qed_sriov_disable(cdev, false); 31650b55e27dSYuval Mintz return rc; 31660b55e27dSYuval Mintz } 31670b55e27dSYuval Mintz 31680b55e27dSYuval Mintz static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param) 31690b55e27dSYuval Mintz { 31700b55e27dSYuval Mintz if (!IS_QED_SRIOV(cdev)) { 31710b55e27dSYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n"); 31720b55e27dSYuval Mintz return -EOPNOTSUPP; 31730b55e27dSYuval Mintz } 31740b55e27dSYuval Mintz 31750b55e27dSYuval Mintz if (num_vfs_param) 31760b55e27dSYuval Mintz return qed_sriov_enable(cdev, num_vfs_param); 31770b55e27dSYuval Mintz else 31780b55e27dSYuval Mintz return qed_sriov_disable(cdev, true); 31790b55e27dSYuval Mintz } 31800b55e27dSYuval Mintz 3181eff16960SYuval Mintz static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid) 3182eff16960SYuval Mintz { 3183eff16960SYuval Mintz int i; 3184eff16960SYuval Mintz 3185eff16960SYuval Mintz if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { 3186eff16960SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 3187eff16960SYuval Mintz "Cannot set a VF MAC; Sriov is not enabled\n"); 3188eff16960SYuval Mintz return -EINVAL; 3189eff16960SYuval Mintz } 3190eff16960SYuval Mintz 3191eff16960SYuval Mintz if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) { 3192eff16960SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 3193eff16960SYuval Mintz "Cannot set VF[%d] MAC (VF is not active)\n", vfid); 3194eff16960SYuval Mintz return -EINVAL; 3195eff16960SYuval Mintz } 3196eff16960SYuval Mintz 3197eff16960SYuval Mintz for_each_hwfn(cdev, i) { 3198eff16960SYuval Mintz struct qed_hwfn *hwfn = &cdev->hwfns[i]; 3199eff16960SYuval Mintz struct qed_public_vf_info *vf_info; 3200eff16960SYuval Mintz 3201eff16960SYuval Mintz vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); 3202eff16960SYuval Mintz if (!vf_info) 3203eff16960SYuval Mintz continue; 3204eff16960SYuval Mintz 3205eff16960SYuval Mintz /* Set the forced MAC, and schedule the IOV task */ 3206eff16960SYuval Mintz ether_addr_copy(vf_info->forced_mac, mac); 3207eff16960SYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); 3208eff16960SYuval Mintz } 3209eff16960SYuval Mintz 3210eff16960SYuval Mintz return 0; 3211eff16960SYuval Mintz } 3212eff16960SYuval Mintz 321308feecd7SYuval Mintz static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid) 321408feecd7SYuval Mintz { 321508feecd7SYuval Mintz int i; 321608feecd7SYuval Mintz 321708feecd7SYuval Mintz if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { 321808feecd7SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 321908feecd7SYuval Mintz "Cannot set a VF MAC; Sriov is not enabled\n"); 322008feecd7SYuval Mintz return -EINVAL; 322108feecd7SYuval Mintz } 322208feecd7SYuval Mintz 322308feecd7SYuval Mintz if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) { 322408feecd7SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 322508feecd7SYuval Mintz "Cannot set VF[%d] MAC (VF is not active)\n", vfid); 322608feecd7SYuval Mintz return -EINVAL; 322708feecd7SYuval Mintz } 322808feecd7SYuval Mintz 322908feecd7SYuval Mintz for_each_hwfn(cdev, i) { 323008feecd7SYuval Mintz struct qed_hwfn *hwfn = &cdev->hwfns[i]; 323108feecd7SYuval Mintz struct qed_public_vf_info *vf_info; 323208feecd7SYuval Mintz 323308feecd7SYuval Mintz vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); 323408feecd7SYuval Mintz if (!vf_info) 323508feecd7SYuval Mintz continue; 323608feecd7SYuval Mintz 323708feecd7SYuval Mintz /* Set the forced vlan, and schedule the IOV task */ 323808feecd7SYuval Mintz vf_info->forced_vlan = vid; 323908feecd7SYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); 324008feecd7SYuval Mintz } 324108feecd7SYuval Mintz 324208feecd7SYuval Mintz return 0; 324308feecd7SYuval Mintz } 324408feecd7SYuval Mintz 324573390ac9SYuval Mintz static int qed_get_vf_config(struct qed_dev *cdev, 324673390ac9SYuval Mintz int vf_id, struct ifla_vf_info *ivi) 324773390ac9SYuval Mintz { 324873390ac9SYuval Mintz struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 324973390ac9SYuval Mintz struct qed_public_vf_info *vf_info; 325073390ac9SYuval Mintz struct qed_mcp_link_state link; 325173390ac9SYuval Mintz u32 tx_rate; 325273390ac9SYuval Mintz 325373390ac9SYuval Mintz /* Sanitize request */ 325473390ac9SYuval Mintz if (IS_VF(cdev)) 325573390ac9SYuval Mintz return -EINVAL; 325673390ac9SYuval Mintz 325773390ac9SYuval Mintz if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) { 325873390ac9SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 325973390ac9SYuval Mintz "VF index [%d] isn't active\n", vf_id); 326073390ac9SYuval Mintz return -EINVAL; 326173390ac9SYuval Mintz } 326273390ac9SYuval Mintz 326373390ac9SYuval Mintz vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); 326473390ac9SYuval Mintz 326573390ac9SYuval Mintz qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); 326673390ac9SYuval Mintz 326773390ac9SYuval Mintz /* Fill information about VF */ 326873390ac9SYuval Mintz ivi->vf = vf_id; 326973390ac9SYuval Mintz 327073390ac9SYuval Mintz if (is_valid_ether_addr(vf_info->forced_mac)) 327173390ac9SYuval Mintz ether_addr_copy(ivi->mac, vf_info->forced_mac); 327273390ac9SYuval Mintz else 327373390ac9SYuval Mintz ether_addr_copy(ivi->mac, vf_info->mac); 327473390ac9SYuval Mintz 327573390ac9SYuval Mintz ivi->vlan = vf_info->forced_vlan; 327673390ac9SYuval Mintz ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id); 327773390ac9SYuval Mintz ivi->linkstate = vf_info->link_state; 327873390ac9SYuval Mintz tx_rate = vf_info->tx_rate; 327973390ac9SYuval Mintz ivi->max_tx_rate = tx_rate ? tx_rate : link.speed; 328073390ac9SYuval Mintz ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id); 328173390ac9SYuval Mintz 328273390ac9SYuval Mintz return 0; 328373390ac9SYuval Mintz } 328473390ac9SYuval Mintz 328536558c3dSYuval Mintz void qed_inform_vf_link_state(struct qed_hwfn *hwfn) 328636558c3dSYuval Mintz { 328736558c3dSYuval Mintz struct qed_mcp_link_capabilities caps; 328836558c3dSYuval Mintz struct qed_mcp_link_params params; 328936558c3dSYuval Mintz struct qed_mcp_link_state link; 329036558c3dSYuval Mintz int i; 329136558c3dSYuval Mintz 329236558c3dSYuval Mintz if (!hwfn->pf_iov_info) 329336558c3dSYuval Mintz return; 329436558c3dSYuval Mintz 329536558c3dSYuval Mintz /* Update bulletin of all future possible VFs with link configuration */ 329636558c3dSYuval Mintz for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) { 3297733def6aSYuval Mintz struct qed_public_vf_info *vf_info; 3298733def6aSYuval Mintz 3299733def6aSYuval Mintz vf_info = qed_iov_get_public_vf_info(hwfn, i, false); 3300733def6aSYuval Mintz if (!vf_info) 3301733def6aSYuval Mintz continue; 3302733def6aSYuval Mintz 330336558c3dSYuval Mintz memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params)); 330436558c3dSYuval Mintz memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); 330536558c3dSYuval Mintz memcpy(&caps, qed_mcp_get_link_capabilities(hwfn), 330636558c3dSYuval Mintz sizeof(caps)); 330736558c3dSYuval Mintz 3308733def6aSYuval Mintz /* Modify link according to the VF's configured link state */ 3309733def6aSYuval Mintz switch (vf_info->link_state) { 3310733def6aSYuval Mintz case IFLA_VF_LINK_STATE_DISABLE: 3311733def6aSYuval Mintz link.link_up = false; 3312733def6aSYuval Mintz break; 3313733def6aSYuval Mintz case IFLA_VF_LINK_STATE_ENABLE: 3314733def6aSYuval Mintz link.link_up = true; 3315733def6aSYuval Mintz /* Set speed according to maximum supported by HW. 3316733def6aSYuval Mintz * that is 40G for regular devices and 100G for CMT 3317733def6aSYuval Mintz * mode devices. 3318733def6aSYuval Mintz */ 3319733def6aSYuval Mintz link.speed = (hwfn->cdev->num_hwfns > 1) ? 3320733def6aSYuval Mintz 100000 : 40000; 3321733def6aSYuval Mintz default: 3322733def6aSYuval Mintz /* In auto mode pass PF link image to VF */ 3323733def6aSYuval Mintz break; 3324733def6aSYuval Mintz } 3325733def6aSYuval Mintz 3326733def6aSYuval Mintz if (link.link_up && vf_info->tx_rate) { 3327733def6aSYuval Mintz struct qed_ptt *ptt; 3328733def6aSYuval Mintz int rate; 3329733def6aSYuval Mintz 3330733def6aSYuval Mintz rate = min_t(int, vf_info->tx_rate, link.speed); 3331733def6aSYuval Mintz 3332733def6aSYuval Mintz ptt = qed_ptt_acquire(hwfn); 3333733def6aSYuval Mintz if (!ptt) { 3334733def6aSYuval Mintz DP_NOTICE(hwfn, "Failed to acquire PTT\n"); 3335733def6aSYuval Mintz return; 3336733def6aSYuval Mintz } 3337733def6aSYuval Mintz 3338733def6aSYuval Mintz if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) { 3339733def6aSYuval Mintz vf_info->tx_rate = rate; 3340733def6aSYuval Mintz link.speed = rate; 3341733def6aSYuval Mintz } 3342733def6aSYuval Mintz 3343733def6aSYuval Mintz qed_ptt_release(hwfn, ptt); 3344733def6aSYuval Mintz } 3345733def6aSYuval Mintz 334636558c3dSYuval Mintz qed_iov_set_link(hwfn, i, ¶ms, &link, &caps); 334736558c3dSYuval Mintz } 334836558c3dSYuval Mintz 334936558c3dSYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 335036558c3dSYuval Mintz } 335136558c3dSYuval Mintz 3352733def6aSYuval Mintz static int qed_set_vf_link_state(struct qed_dev *cdev, 3353733def6aSYuval Mintz int vf_id, int link_state) 3354733def6aSYuval Mintz { 3355733def6aSYuval Mintz int i; 3356733def6aSYuval Mintz 3357733def6aSYuval Mintz /* Sanitize request */ 3358733def6aSYuval Mintz if (IS_VF(cdev)) 3359733def6aSYuval Mintz return -EINVAL; 3360733def6aSYuval Mintz 3361733def6aSYuval Mintz if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) { 3362733def6aSYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 3363733def6aSYuval Mintz "VF index [%d] isn't active\n", vf_id); 3364733def6aSYuval Mintz return -EINVAL; 3365733def6aSYuval Mintz } 3366733def6aSYuval Mintz 3367733def6aSYuval Mintz /* Handle configuration of link state */ 3368733def6aSYuval Mintz for_each_hwfn(cdev, i) { 3369733def6aSYuval Mintz struct qed_hwfn *hwfn = &cdev->hwfns[i]; 3370733def6aSYuval Mintz struct qed_public_vf_info *vf; 3371733def6aSYuval Mintz 3372733def6aSYuval Mintz vf = qed_iov_get_public_vf_info(hwfn, vf_id, true); 3373733def6aSYuval Mintz if (!vf) 3374733def6aSYuval Mintz continue; 3375733def6aSYuval Mintz 3376733def6aSYuval Mintz if (vf->link_state == link_state) 3377733def6aSYuval Mintz continue; 3378733def6aSYuval Mintz 3379733def6aSYuval Mintz vf->link_state = link_state; 3380733def6aSYuval Mintz qed_inform_vf_link_state(&cdev->hwfns[i]); 3381733def6aSYuval Mintz } 3382733def6aSYuval Mintz 3383733def6aSYuval Mintz return 0; 3384733def6aSYuval Mintz } 3385733def6aSYuval Mintz 33866ddc7608SYuval Mintz static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val) 33876ddc7608SYuval Mintz { 33886ddc7608SYuval Mintz int i, rc = -EINVAL; 33896ddc7608SYuval Mintz 33906ddc7608SYuval Mintz for_each_hwfn(cdev, i) { 33916ddc7608SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 33926ddc7608SYuval Mintz 33936ddc7608SYuval Mintz rc = qed_iov_spoofchk_set(p_hwfn, vfid, val); 33946ddc7608SYuval Mintz if (rc) 33956ddc7608SYuval Mintz break; 33966ddc7608SYuval Mintz } 33976ddc7608SYuval Mintz 33986ddc7608SYuval Mintz return rc; 33996ddc7608SYuval Mintz } 34006ddc7608SYuval Mintz 3401733def6aSYuval Mintz static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate) 3402733def6aSYuval Mintz { 3403733def6aSYuval Mintz int i; 3404733def6aSYuval Mintz 3405733def6aSYuval Mintz for_each_hwfn(cdev, i) { 3406733def6aSYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 3407733def6aSYuval Mintz struct qed_public_vf_info *vf; 3408733def6aSYuval Mintz 3409733def6aSYuval Mintz if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { 3410733def6aSYuval Mintz DP_NOTICE(p_hwfn, 3411733def6aSYuval Mintz "SR-IOV sanity check failed, can't set tx rate\n"); 3412733def6aSYuval Mintz return -EINVAL; 3413733def6aSYuval Mintz } 3414733def6aSYuval Mintz 3415733def6aSYuval Mintz vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true); 3416733def6aSYuval Mintz 3417733def6aSYuval Mintz vf->tx_rate = rate; 3418733def6aSYuval Mintz 3419733def6aSYuval Mintz qed_inform_vf_link_state(p_hwfn); 3420733def6aSYuval Mintz } 3421733def6aSYuval Mintz 3422733def6aSYuval Mintz return 0; 3423733def6aSYuval Mintz } 3424733def6aSYuval Mintz 3425733def6aSYuval Mintz static int qed_set_vf_rate(struct qed_dev *cdev, 3426733def6aSYuval Mintz int vfid, u32 min_rate, u32 max_rate) 3427733def6aSYuval Mintz { 3428733def6aSYuval Mintz int rc_min = 0, rc_max = 0; 3429733def6aSYuval Mintz 3430733def6aSYuval Mintz if (max_rate) 3431733def6aSYuval Mintz rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate); 3432733def6aSYuval Mintz 3433733def6aSYuval Mintz if (min_rate) 3434733def6aSYuval Mintz rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate); 3435733def6aSYuval Mintz 3436733def6aSYuval Mintz if (rc_max | rc_min) 3437733def6aSYuval Mintz return -EINVAL; 3438733def6aSYuval Mintz 3439733def6aSYuval Mintz return 0; 3440733def6aSYuval Mintz } 3441733def6aSYuval Mintz 344237bff2b9SYuval Mintz static void qed_handle_vf_msg(struct qed_hwfn *hwfn) 344337bff2b9SYuval Mintz { 344437bff2b9SYuval Mintz u64 events[QED_VF_ARRAY_LENGTH]; 344537bff2b9SYuval Mintz struct qed_ptt *ptt; 344637bff2b9SYuval Mintz int i; 344737bff2b9SYuval Mintz 344837bff2b9SYuval Mintz ptt = qed_ptt_acquire(hwfn); 344937bff2b9SYuval Mintz if (!ptt) { 345037bff2b9SYuval Mintz DP_VERBOSE(hwfn, QED_MSG_IOV, 345137bff2b9SYuval Mintz "Can't acquire PTT; re-scheduling\n"); 345237bff2b9SYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG); 345337bff2b9SYuval Mintz return; 345437bff2b9SYuval Mintz } 345537bff2b9SYuval Mintz 345637bff2b9SYuval Mintz qed_iov_pf_get_and_clear_pending_events(hwfn, events); 345737bff2b9SYuval Mintz 345837bff2b9SYuval Mintz DP_VERBOSE(hwfn, QED_MSG_IOV, 345937bff2b9SYuval Mintz "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n", 346037bff2b9SYuval Mintz events[0], events[1], events[2]); 346137bff2b9SYuval Mintz 346237bff2b9SYuval Mintz qed_for_each_vf(hwfn, i) { 346337bff2b9SYuval Mintz /* Skip VFs with no pending messages */ 346437bff2b9SYuval Mintz if (!(events[i / 64] & (1ULL << (i % 64)))) 346537bff2b9SYuval Mintz continue; 346637bff2b9SYuval Mintz 346737bff2b9SYuval Mintz DP_VERBOSE(hwfn, QED_MSG_IOV, 346837bff2b9SYuval Mintz "Handling VF message from VF 0x%02x [Abs 0x%02x]\n", 346937bff2b9SYuval Mintz i, hwfn->cdev->p_iov_info->first_vf_in_pf + i); 347037bff2b9SYuval Mintz 347137bff2b9SYuval Mintz /* Copy VF's message to PF's request buffer for that VF */ 347237bff2b9SYuval Mintz if (qed_iov_copy_vf_msg(hwfn, ptt, i)) 347337bff2b9SYuval Mintz continue; 347437bff2b9SYuval Mintz 347537bff2b9SYuval Mintz qed_iov_process_mbx_req(hwfn, ptt, i); 347637bff2b9SYuval Mintz } 347737bff2b9SYuval Mintz 347837bff2b9SYuval Mintz qed_ptt_release(hwfn, ptt); 347937bff2b9SYuval Mintz } 348037bff2b9SYuval Mintz 348108feecd7SYuval Mintz static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn) 348208feecd7SYuval Mintz { 348308feecd7SYuval Mintz int i; 348408feecd7SYuval Mintz 348508feecd7SYuval Mintz qed_for_each_vf(hwfn, i) { 348608feecd7SYuval Mintz struct qed_public_vf_info *info; 348708feecd7SYuval Mintz bool update = false; 3488eff16960SYuval Mintz u8 *mac; 348908feecd7SYuval Mintz 349008feecd7SYuval Mintz info = qed_iov_get_public_vf_info(hwfn, i, true); 349108feecd7SYuval Mintz if (!info) 349208feecd7SYuval Mintz continue; 349308feecd7SYuval Mintz 349408feecd7SYuval Mintz /* Update data on bulletin board */ 3495eff16960SYuval Mintz mac = qed_iov_bulletin_get_forced_mac(hwfn, i); 3496eff16960SYuval Mintz if (is_valid_ether_addr(info->forced_mac) && 3497eff16960SYuval Mintz (!mac || !ether_addr_equal(mac, info->forced_mac))) { 3498eff16960SYuval Mintz DP_VERBOSE(hwfn, 3499eff16960SYuval Mintz QED_MSG_IOV, 3500eff16960SYuval Mintz "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n", 3501eff16960SYuval Mintz i, 3502eff16960SYuval Mintz hwfn->cdev->p_iov_info->first_vf_in_pf + i); 3503eff16960SYuval Mintz 3504eff16960SYuval Mintz /* Update bulletin board with forced MAC */ 3505eff16960SYuval Mintz qed_iov_bulletin_set_forced_mac(hwfn, 3506eff16960SYuval Mintz info->forced_mac, i); 3507eff16960SYuval Mintz update = true; 3508eff16960SYuval Mintz } 350908feecd7SYuval Mintz 351008feecd7SYuval Mintz if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^ 351108feecd7SYuval Mintz info->forced_vlan) { 351208feecd7SYuval Mintz DP_VERBOSE(hwfn, 351308feecd7SYuval Mintz QED_MSG_IOV, 351408feecd7SYuval Mintz "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n", 351508feecd7SYuval Mintz info->forced_vlan, 351608feecd7SYuval Mintz i, 351708feecd7SYuval Mintz hwfn->cdev->p_iov_info->first_vf_in_pf + i); 351808feecd7SYuval Mintz qed_iov_bulletin_set_forced_vlan(hwfn, 351908feecd7SYuval Mintz info->forced_vlan, i); 352008feecd7SYuval Mintz update = true; 352108feecd7SYuval Mintz } 352208feecd7SYuval Mintz 352308feecd7SYuval Mintz if (update) 352408feecd7SYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 352508feecd7SYuval Mintz } 352608feecd7SYuval Mintz } 352708feecd7SYuval Mintz 352836558c3dSYuval Mintz static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) 352936558c3dSYuval Mintz { 353036558c3dSYuval Mintz struct qed_ptt *ptt; 353136558c3dSYuval Mintz int i; 353236558c3dSYuval Mintz 353336558c3dSYuval Mintz ptt = qed_ptt_acquire(hwfn); 353436558c3dSYuval Mintz if (!ptt) { 353536558c3dSYuval Mintz DP_NOTICE(hwfn, "Failed allocating a ptt entry\n"); 353636558c3dSYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 353736558c3dSYuval Mintz return; 353836558c3dSYuval Mintz } 353936558c3dSYuval Mintz 354036558c3dSYuval Mintz qed_for_each_vf(hwfn, i) 354136558c3dSYuval Mintz qed_iov_post_vf_bulletin(hwfn, i, ptt); 354236558c3dSYuval Mintz 354336558c3dSYuval Mintz qed_ptt_release(hwfn, ptt); 354436558c3dSYuval Mintz } 354536558c3dSYuval Mintz 354637bff2b9SYuval Mintz void qed_iov_pf_task(struct work_struct *work) 354737bff2b9SYuval Mintz { 354837bff2b9SYuval Mintz struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 354937bff2b9SYuval Mintz iov_task.work); 35500b55e27dSYuval Mintz int rc; 355137bff2b9SYuval Mintz 355237bff2b9SYuval Mintz if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) 355337bff2b9SYuval Mintz return; 355437bff2b9SYuval Mintz 35550b55e27dSYuval Mintz if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) { 35560b55e27dSYuval Mintz struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 35570b55e27dSYuval Mintz 35580b55e27dSYuval Mintz if (!ptt) { 35590b55e27dSYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); 35600b55e27dSYuval Mintz return; 35610b55e27dSYuval Mintz } 35620b55e27dSYuval Mintz 35630b55e27dSYuval Mintz rc = qed_iov_vf_flr_cleanup(hwfn, ptt); 35640b55e27dSYuval Mintz if (rc) 35650b55e27dSYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); 35660b55e27dSYuval Mintz 35670b55e27dSYuval Mintz qed_ptt_release(hwfn, ptt); 35680b55e27dSYuval Mintz } 35690b55e27dSYuval Mintz 357037bff2b9SYuval Mintz if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags)) 357137bff2b9SYuval Mintz qed_handle_vf_msg(hwfn); 357208feecd7SYuval Mintz 357308feecd7SYuval Mintz if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG, 357408feecd7SYuval Mintz &hwfn->iov_task_flags)) 357508feecd7SYuval Mintz qed_handle_pf_set_vf_unicast(hwfn); 357608feecd7SYuval Mintz 357736558c3dSYuval Mintz if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG, 357836558c3dSYuval Mintz &hwfn->iov_task_flags)) 357936558c3dSYuval Mintz qed_handle_bulletin_post(hwfn); 358037bff2b9SYuval Mintz } 358137bff2b9SYuval Mintz 358237bff2b9SYuval Mintz void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) 358337bff2b9SYuval Mintz { 358437bff2b9SYuval Mintz int i; 358537bff2b9SYuval Mintz 358637bff2b9SYuval Mintz for_each_hwfn(cdev, i) { 358737bff2b9SYuval Mintz if (!cdev->hwfns[i].iov_wq) 358837bff2b9SYuval Mintz continue; 358937bff2b9SYuval Mintz 359037bff2b9SYuval Mintz if (schedule_first) { 359137bff2b9SYuval Mintz qed_schedule_iov(&cdev->hwfns[i], 359237bff2b9SYuval Mintz QED_IOV_WQ_STOP_WQ_FLAG); 359337bff2b9SYuval Mintz cancel_delayed_work_sync(&cdev->hwfns[i].iov_task); 359437bff2b9SYuval Mintz } 359537bff2b9SYuval Mintz 359637bff2b9SYuval Mintz flush_workqueue(cdev->hwfns[i].iov_wq); 359737bff2b9SYuval Mintz destroy_workqueue(cdev->hwfns[i].iov_wq); 359837bff2b9SYuval Mintz } 359937bff2b9SYuval Mintz } 360037bff2b9SYuval Mintz 360137bff2b9SYuval Mintz int qed_iov_wq_start(struct qed_dev *cdev) 360237bff2b9SYuval Mintz { 360337bff2b9SYuval Mintz char name[NAME_SIZE]; 360437bff2b9SYuval Mintz int i; 360537bff2b9SYuval Mintz 360637bff2b9SYuval Mintz for_each_hwfn(cdev, i) { 360737bff2b9SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 360837bff2b9SYuval Mintz 360936558c3dSYuval Mintz /* PFs needs a dedicated workqueue only if they support IOV. 361036558c3dSYuval Mintz * VFs always require one. 361136558c3dSYuval Mintz */ 361236558c3dSYuval Mintz if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn)) 361337bff2b9SYuval Mintz continue; 361437bff2b9SYuval Mintz 361537bff2b9SYuval Mintz snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x", 361637bff2b9SYuval Mintz cdev->pdev->bus->number, 361737bff2b9SYuval Mintz PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id); 361837bff2b9SYuval Mintz 361937bff2b9SYuval Mintz p_hwfn->iov_wq = create_singlethread_workqueue(name); 362037bff2b9SYuval Mintz if (!p_hwfn->iov_wq) { 362137bff2b9SYuval Mintz DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n"); 362237bff2b9SYuval Mintz return -ENOMEM; 362337bff2b9SYuval Mintz } 362437bff2b9SYuval Mintz 362536558c3dSYuval Mintz if (IS_PF(cdev)) 362637bff2b9SYuval Mintz INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task); 362736558c3dSYuval Mintz else 362836558c3dSYuval Mintz INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task); 362937bff2b9SYuval Mintz } 363037bff2b9SYuval Mintz 363137bff2b9SYuval Mintz return 0; 363237bff2b9SYuval Mintz } 36330b55e27dSYuval Mintz 36340b55e27dSYuval Mintz const struct qed_iov_hv_ops qed_iov_ops_pass = { 36350b55e27dSYuval Mintz .configure = &qed_sriov_configure, 3636eff16960SYuval Mintz .set_mac = &qed_sriov_pf_set_mac, 363708feecd7SYuval Mintz .set_vlan = &qed_sriov_pf_set_vlan, 363873390ac9SYuval Mintz .get_config = &qed_get_vf_config, 3639733def6aSYuval Mintz .set_link_state = &qed_set_vf_link_state, 36406ddc7608SYuval Mintz .set_spoof = &qed_spoof_configure, 3641733def6aSYuval Mintz .set_rate = &qed_set_vf_rate, 36420b55e27dSYuval Mintz }; 3643