132a47e72SYuval Mintz /* QLogic qed NIC Driver 2e8f1cb50SMintz, Yuval * Copyright (c) 2015-2017 QLogic Corporation 332a47e72SYuval Mintz * 4e8f1cb50SMintz, Yuval * This software is available to you under a choice of one of two 5e8f1cb50SMintz, Yuval * licenses. You may choose to be licensed under the terms of the GNU 6e8f1cb50SMintz, Yuval * General Public License (GPL) Version 2, available from the file 7e8f1cb50SMintz, Yuval * COPYING in the main directory of this source tree, or the 8e8f1cb50SMintz, Yuval * OpenIB.org BSD license below: 9e8f1cb50SMintz, Yuval * 10e8f1cb50SMintz, Yuval * Redistribution and use in source and binary forms, with or 11e8f1cb50SMintz, Yuval * without modification, are permitted provided that the following 12e8f1cb50SMintz, Yuval * conditions are met: 13e8f1cb50SMintz, Yuval * 14e8f1cb50SMintz, Yuval * - Redistributions of source code must retain the above 15e8f1cb50SMintz, Yuval * copyright notice, this list of conditions and the following 16e8f1cb50SMintz, Yuval * disclaimer. 17e8f1cb50SMintz, Yuval * 18e8f1cb50SMintz, Yuval * - Redistributions in binary form must reproduce the above 19e8f1cb50SMintz, Yuval * copyright notice, this list of conditions and the following 20e8f1cb50SMintz, Yuval * disclaimer in the documentation and /or other materials 21e8f1cb50SMintz, Yuval * provided with the distribution. 22e8f1cb50SMintz, Yuval * 23e8f1cb50SMintz, Yuval * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24e8f1cb50SMintz, Yuval * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25e8f1cb50SMintz, Yuval * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26e8f1cb50SMintz, Yuval * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27e8f1cb50SMintz, Yuval * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28e8f1cb50SMintz, Yuval * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29e8f1cb50SMintz, Yuval * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30e8f1cb50SMintz, Yuval * SOFTWARE. 3132a47e72SYuval Mintz */ 3232a47e72SYuval Mintz 33dacd88d6SYuval Mintz #include <linux/etherdevice.h> 3436558c3dSYuval Mintz #include <linux/crc32.h> 35f29ffdb6SMintz, Yuval #include <linux/vmalloc.h> 360b55e27dSYuval Mintz #include <linux/qed/qed_iov_if.h> 371408cc1fSYuval Mintz #include "qed_cxt.h" 381408cc1fSYuval Mintz #include "qed_hsi.h" 3932a47e72SYuval Mintz #include "qed_hw.h" 401408cc1fSYuval Mintz #include "qed_init_ops.h" 4132a47e72SYuval Mintz #include "qed_int.h" 421408cc1fSYuval Mintz #include "qed_mcp.h" 4332a47e72SYuval Mintz #include "qed_reg_addr.h" 441408cc1fSYuval Mintz #include "qed_sp.h" 4532a47e72SYuval Mintz #include "qed_sriov.h" 4632a47e72SYuval Mintz #include "qed_vf.h" 4732a47e72SYuval Mintz 481408cc1fSYuval Mintz /* IOV ramrods */ 491fe614d1SYuval Mintz static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) 501408cc1fSYuval Mintz { 511408cc1fSYuval Mintz struct vf_start_ramrod_data *p_ramrod = NULL; 521408cc1fSYuval Mintz struct qed_spq_entry *p_ent = NULL; 531408cc1fSYuval Mintz struct qed_sp_init_data init_data; 541408cc1fSYuval Mintz int rc = -EINVAL; 551fe614d1SYuval Mintz u8 fp_minor; 561408cc1fSYuval Mintz 571408cc1fSYuval Mintz /* Get SPQ entry */ 581408cc1fSYuval Mintz memset(&init_data, 0, sizeof(init_data)); 591408cc1fSYuval Mintz init_data.cid = qed_spq_get_cid(p_hwfn); 601fe614d1SYuval Mintz init_data.opaque_fid = p_vf->opaque_fid; 611408cc1fSYuval Mintz init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 621408cc1fSYuval Mintz 631408cc1fSYuval Mintz rc = qed_sp_init_request(p_hwfn, &p_ent, 641408cc1fSYuval Mintz COMMON_RAMROD_VF_START, 651408cc1fSYuval Mintz PROTOCOLID_COMMON, &init_data); 661408cc1fSYuval Mintz if (rc) 671408cc1fSYuval Mintz return rc; 681408cc1fSYuval Mintz 691408cc1fSYuval Mintz p_ramrod = &p_ent->ramrod.vf_start; 701408cc1fSYuval Mintz 711fe614d1SYuval Mintz p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID); 721fe614d1SYuval Mintz p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid); 731408cc1fSYuval Mintz 741fe614d1SYuval Mintz switch (p_hwfn->hw_info.personality) { 751fe614d1SYuval Mintz case QED_PCI_ETH: 761408cc1fSYuval Mintz p_ramrod->personality = PERSONALITY_ETH; 771fe614d1SYuval Mintz break; 781fe614d1SYuval Mintz case QED_PCI_ETH_ROCE: 791fe614d1SYuval Mintz p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; 801fe614d1SYuval Mintz break; 811fe614d1SYuval Mintz default: 821fe614d1SYuval Mintz DP_NOTICE(p_hwfn, "Unknown VF personality %d\n", 831fe614d1SYuval Mintz p_hwfn->hw_info.personality); 841fe614d1SYuval Mintz return -EINVAL; 851fe614d1SYuval Mintz } 861fe614d1SYuval Mintz 871fe614d1SYuval Mintz fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor; 88a044df83SYuval Mintz if (fp_minor > ETH_HSI_VER_MINOR && 89a044df83SYuval Mintz fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) { 901fe614d1SYuval Mintz DP_VERBOSE(p_hwfn, 911fe614d1SYuval Mintz QED_MSG_IOV, 921fe614d1SYuval Mintz "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n", 931fe614d1SYuval Mintz p_vf->abs_vf_id, 941fe614d1SYuval Mintz ETH_HSI_VER_MAJOR, 951fe614d1SYuval Mintz fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); 961fe614d1SYuval Mintz fp_minor = ETH_HSI_VER_MINOR; 971fe614d1SYuval Mintz } 981fe614d1SYuval Mintz 99351a4dedSYuval Mintz p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; 1001fe614d1SYuval Mintz p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor; 1011fe614d1SYuval Mintz 1021fe614d1SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1031fe614d1SYuval Mintz "VF[%d] - Starting using HSI %02x.%02x\n", 1041fe614d1SYuval Mintz p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor); 1051408cc1fSYuval Mintz 1061408cc1fSYuval Mintz return qed_spq_post(p_hwfn, p_ent, NULL); 1071408cc1fSYuval Mintz } 1081408cc1fSYuval Mintz 1090b55e27dSYuval Mintz static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn, 1100b55e27dSYuval Mintz u32 concrete_vfid, u16 opaque_vfid) 1110b55e27dSYuval Mintz { 1120b55e27dSYuval Mintz struct vf_stop_ramrod_data *p_ramrod = NULL; 1130b55e27dSYuval Mintz struct qed_spq_entry *p_ent = NULL; 1140b55e27dSYuval Mintz struct qed_sp_init_data init_data; 1150b55e27dSYuval Mintz int rc = -EINVAL; 1160b55e27dSYuval Mintz 1170b55e27dSYuval Mintz /* Get SPQ entry */ 1180b55e27dSYuval Mintz memset(&init_data, 0, sizeof(init_data)); 1190b55e27dSYuval Mintz init_data.cid = qed_spq_get_cid(p_hwfn); 1200b55e27dSYuval Mintz init_data.opaque_fid = opaque_vfid; 1210b55e27dSYuval Mintz init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1220b55e27dSYuval Mintz 1230b55e27dSYuval Mintz rc = qed_sp_init_request(p_hwfn, &p_ent, 1240b55e27dSYuval Mintz COMMON_RAMROD_VF_STOP, 1250b55e27dSYuval Mintz PROTOCOLID_COMMON, &init_data); 1260b55e27dSYuval Mintz if (rc) 1270b55e27dSYuval Mintz return rc; 1280b55e27dSYuval Mintz 1290b55e27dSYuval Mintz p_ramrod = &p_ent->ramrod.vf_stop; 1300b55e27dSYuval Mintz 1310b55e27dSYuval Mintz p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); 1320b55e27dSYuval Mintz 1330b55e27dSYuval Mintz return qed_spq_post(p_hwfn, p_ent, NULL); 1340b55e27dSYuval Mintz } 1350b55e27dSYuval Mintz 136ba56947aSBaoyou Xie static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, 1377eff82b0SYuval Mintz int rel_vf_id, 1387eff82b0SYuval Mintz bool b_enabled_only, bool b_non_malicious) 13932a47e72SYuval Mintz { 14032a47e72SYuval Mintz if (!p_hwfn->pf_iov_info) { 14132a47e72SYuval Mintz DP_NOTICE(p_hwfn->cdev, "No iov info\n"); 14232a47e72SYuval Mintz return false; 14332a47e72SYuval Mintz } 14432a47e72SYuval Mintz 14532a47e72SYuval Mintz if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) || 14632a47e72SYuval Mintz (rel_vf_id < 0)) 14732a47e72SYuval Mintz return false; 14832a47e72SYuval Mintz 14932a47e72SYuval Mintz if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) && 15032a47e72SYuval Mintz b_enabled_only) 15132a47e72SYuval Mintz return false; 15232a47e72SYuval Mintz 1537eff82b0SYuval Mintz if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) && 1547eff82b0SYuval Mintz b_non_malicious) 1557eff82b0SYuval Mintz return false; 1567eff82b0SYuval Mintz 15732a47e72SYuval Mintz return true; 15832a47e72SYuval Mintz } 15932a47e72SYuval Mintz 16037bff2b9SYuval Mintz static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, 16137bff2b9SYuval Mintz u16 relative_vf_id, 16237bff2b9SYuval Mintz bool b_enabled_only) 16337bff2b9SYuval Mintz { 16437bff2b9SYuval Mintz struct qed_vf_info *vf = NULL; 16537bff2b9SYuval Mintz 16637bff2b9SYuval Mintz if (!p_hwfn->pf_iov_info) { 16737bff2b9SYuval Mintz DP_NOTICE(p_hwfn->cdev, "No iov info\n"); 16837bff2b9SYuval Mintz return NULL; 16937bff2b9SYuval Mintz } 17037bff2b9SYuval Mintz 1717eff82b0SYuval Mintz if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, 1727eff82b0SYuval Mintz b_enabled_only, false)) 17337bff2b9SYuval Mintz vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; 17437bff2b9SYuval Mintz else 17537bff2b9SYuval Mintz DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n", 17637bff2b9SYuval Mintz relative_vf_id); 17737bff2b9SYuval Mintz 17837bff2b9SYuval Mintz return vf; 17937bff2b9SYuval Mintz } 18037bff2b9SYuval Mintz 18141086467SYuval Mintz static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn, 18241086467SYuval Mintz struct qed_vf_info *p_vf, u16 rx_qid) 18341086467SYuval Mintz { 18441086467SYuval Mintz if (rx_qid >= p_vf->num_rxqs) 18541086467SYuval Mintz DP_VERBOSE(p_hwfn, 18641086467SYuval Mintz QED_MSG_IOV, 18741086467SYuval Mintz "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n", 18841086467SYuval Mintz p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs); 18941086467SYuval Mintz return rx_qid < p_vf->num_rxqs; 19041086467SYuval Mintz } 19141086467SYuval Mintz 19241086467SYuval Mintz static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn, 19341086467SYuval Mintz struct qed_vf_info *p_vf, u16 tx_qid) 19441086467SYuval Mintz { 19541086467SYuval Mintz if (tx_qid >= p_vf->num_txqs) 19641086467SYuval Mintz DP_VERBOSE(p_hwfn, 19741086467SYuval Mintz QED_MSG_IOV, 19841086467SYuval Mintz "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n", 19941086467SYuval Mintz p_vf->abs_vf_id, tx_qid, p_vf->num_txqs); 20041086467SYuval Mintz return tx_qid < p_vf->num_txqs; 20141086467SYuval Mintz } 20241086467SYuval Mintz 20341086467SYuval Mintz static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn, 20441086467SYuval Mintz struct qed_vf_info *p_vf, u16 sb_idx) 20541086467SYuval Mintz { 20641086467SYuval Mintz int i; 20741086467SYuval Mintz 20841086467SYuval Mintz for (i = 0; i < p_vf->num_sbs; i++) 20941086467SYuval Mintz if (p_vf->igu_sbs[i] == sb_idx) 21041086467SYuval Mintz return true; 21141086467SYuval Mintz 21241086467SYuval Mintz DP_VERBOSE(p_hwfn, 21341086467SYuval Mintz QED_MSG_IOV, 21441086467SYuval Mintz "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n", 21541086467SYuval Mintz p_vf->abs_vf_id, sb_idx, p_vf->num_sbs); 21641086467SYuval Mintz 21741086467SYuval Mintz return false; 21841086467SYuval Mintz } 21941086467SYuval Mintz 220ba56947aSBaoyou Xie static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn, 22136558c3dSYuval Mintz int vfid, struct qed_ptt *p_ptt) 22236558c3dSYuval Mintz { 22336558c3dSYuval Mintz struct qed_bulletin_content *p_bulletin; 22436558c3dSYuval Mintz int crc_size = sizeof(p_bulletin->crc); 22536558c3dSYuval Mintz struct qed_dmae_params params; 22636558c3dSYuval Mintz struct qed_vf_info *p_vf; 22736558c3dSYuval Mintz 22836558c3dSYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 22936558c3dSYuval Mintz if (!p_vf) 23036558c3dSYuval Mintz return -EINVAL; 23136558c3dSYuval Mintz 23236558c3dSYuval Mintz if (!p_vf->vf_bulletin) 23336558c3dSYuval Mintz return -EINVAL; 23436558c3dSYuval Mintz 23536558c3dSYuval Mintz p_bulletin = p_vf->bulletin.p_virt; 23636558c3dSYuval Mintz 23736558c3dSYuval Mintz /* Increment bulletin board version and compute crc */ 23836558c3dSYuval Mintz p_bulletin->version++; 23936558c3dSYuval Mintz p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size, 24036558c3dSYuval Mintz p_vf->bulletin.size - crc_size); 24136558c3dSYuval Mintz 24236558c3dSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 24336558c3dSYuval Mintz "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n", 24436558c3dSYuval Mintz p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc); 24536558c3dSYuval Mintz 24636558c3dSYuval Mintz /* propagate bulletin board via dmae to vm memory */ 24736558c3dSYuval Mintz memset(¶ms, 0, sizeof(params)); 24836558c3dSYuval Mintz params.flags = QED_DMAE_FLAG_VF_DST; 24936558c3dSYuval Mintz params.dst_vfid = p_vf->abs_vf_id; 25036558c3dSYuval Mintz return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys, 25136558c3dSYuval Mintz p_vf->vf_bulletin, p_vf->bulletin.size / 4, 25236558c3dSYuval Mintz ¶ms); 25336558c3dSYuval Mintz } 25436558c3dSYuval Mintz 25532a47e72SYuval Mintz static int qed_iov_pci_cfg_info(struct qed_dev *cdev) 25632a47e72SYuval Mintz { 25732a47e72SYuval Mintz struct qed_hw_sriov_info *iov = cdev->p_iov_info; 25832a47e72SYuval Mintz int pos = iov->pos; 25932a47e72SYuval Mintz 26032a47e72SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos); 26132a47e72SYuval Mintz pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 26232a47e72SYuval Mintz 26332a47e72SYuval Mintz pci_read_config_word(cdev->pdev, 26432a47e72SYuval Mintz pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs); 26532a47e72SYuval Mintz pci_read_config_word(cdev->pdev, 26632a47e72SYuval Mintz pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs); 26732a47e72SYuval Mintz 26832a47e72SYuval Mintz pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs); 26932a47e72SYuval Mintz if (iov->num_vfs) { 27032a47e72SYuval Mintz DP_VERBOSE(cdev, 27132a47e72SYuval Mintz QED_MSG_IOV, 27232a47e72SYuval Mintz "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n"); 27332a47e72SYuval Mintz iov->num_vfs = 0; 27432a47e72SYuval Mintz } 27532a47e72SYuval Mintz 27632a47e72SYuval Mintz pci_read_config_word(cdev->pdev, 27732a47e72SYuval Mintz pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 27832a47e72SYuval Mintz 27932a47e72SYuval Mintz pci_read_config_word(cdev->pdev, 28032a47e72SYuval Mintz pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 28132a47e72SYuval Mintz 28232a47e72SYuval Mintz pci_read_config_word(cdev->pdev, 28332a47e72SYuval Mintz pos + PCI_SRIOV_VF_DID, &iov->vf_device_id); 28432a47e72SYuval Mintz 28532a47e72SYuval Mintz pci_read_config_dword(cdev->pdev, 28632a47e72SYuval Mintz pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 28732a47e72SYuval Mintz 28832a47e72SYuval Mintz pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap); 28932a47e72SYuval Mintz 29032a47e72SYuval Mintz pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 29132a47e72SYuval Mintz 29232a47e72SYuval Mintz DP_VERBOSE(cdev, 29332a47e72SYuval Mintz QED_MSG_IOV, 29432a47e72SYuval Mintz "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 29532a47e72SYuval Mintz iov->nres, 29632a47e72SYuval Mintz iov->cap, 29732a47e72SYuval Mintz iov->ctrl, 29832a47e72SYuval Mintz iov->total_vfs, 29932a47e72SYuval Mintz iov->initial_vfs, 30032a47e72SYuval Mintz iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 30132a47e72SYuval Mintz 30232a47e72SYuval Mintz /* Some sanity checks */ 30332a47e72SYuval Mintz if (iov->num_vfs > NUM_OF_VFS(cdev) || 30432a47e72SYuval Mintz iov->total_vfs > NUM_OF_VFS(cdev)) { 30532a47e72SYuval Mintz /* This can happen only due to a bug. In this case we set 30632a47e72SYuval Mintz * num_vfs to zero to avoid memory corruption in the code that 30732a47e72SYuval Mintz * assumes max number of vfs 30832a47e72SYuval Mintz */ 30932a47e72SYuval Mintz DP_NOTICE(cdev, 31032a47e72SYuval Mintz "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n", 31132a47e72SYuval Mintz iov->num_vfs); 31232a47e72SYuval Mintz 31332a47e72SYuval Mintz iov->num_vfs = 0; 31432a47e72SYuval Mintz iov->total_vfs = 0; 31532a47e72SYuval Mintz } 31632a47e72SYuval Mintz 31732a47e72SYuval Mintz return 0; 31832a47e72SYuval Mintz } 31932a47e72SYuval Mintz 32032a47e72SYuval Mintz static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn, 32132a47e72SYuval Mintz struct qed_ptt *p_ptt) 32232a47e72SYuval Mintz { 32332a47e72SYuval Mintz struct qed_igu_block *p_sb; 32432a47e72SYuval Mintz u16 sb_id; 32532a47e72SYuval Mintz u32 val; 32632a47e72SYuval Mintz 32732a47e72SYuval Mintz if (!p_hwfn->hw_info.p_igu_info) { 32832a47e72SYuval Mintz DP_ERR(p_hwfn, 32932a47e72SYuval Mintz "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n"); 33032a47e72SYuval Mintz return; 33132a47e72SYuval Mintz } 33232a47e72SYuval Mintz 33332a47e72SYuval Mintz for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); 33432a47e72SYuval Mintz sb_id++) { 33532a47e72SYuval Mintz p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id]; 33632a47e72SYuval Mintz if ((p_sb->status & QED_IGU_STATUS_FREE) && 33732a47e72SYuval Mintz !(p_sb->status & QED_IGU_STATUS_PF)) { 33832a47e72SYuval Mintz val = qed_rd(p_hwfn, p_ptt, 33932a47e72SYuval Mintz IGU_REG_MAPPING_MEMORY + sb_id * 4); 34032a47e72SYuval Mintz SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); 34132a47e72SYuval Mintz qed_wr(p_hwfn, p_ptt, 34232a47e72SYuval Mintz IGU_REG_MAPPING_MEMORY + 4 * sb_id, val); 34332a47e72SYuval Mintz } 34432a47e72SYuval Mintz } 34532a47e72SYuval Mintz } 34632a47e72SYuval Mintz 34732a47e72SYuval Mintz static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn) 34832a47e72SYuval Mintz { 34932a47e72SYuval Mintz struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 35032a47e72SYuval Mintz struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 35132a47e72SYuval Mintz struct qed_bulletin_content *p_bulletin_virt; 35232a47e72SYuval Mintz dma_addr_t req_p, rply_p, bulletin_p; 35332a47e72SYuval Mintz union pfvf_tlvs *p_reply_virt_addr; 35432a47e72SYuval Mintz union vfpf_tlvs *p_req_virt_addr; 35532a47e72SYuval Mintz u8 idx = 0; 35632a47e72SYuval Mintz 35732a47e72SYuval Mintz memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array)); 35832a47e72SYuval Mintz 35932a47e72SYuval Mintz p_req_virt_addr = p_iov_info->mbx_msg_virt_addr; 36032a47e72SYuval Mintz req_p = p_iov_info->mbx_msg_phys_addr; 36132a47e72SYuval Mintz p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr; 36232a47e72SYuval Mintz rply_p = p_iov_info->mbx_reply_phys_addr; 36332a47e72SYuval Mintz p_bulletin_virt = p_iov_info->p_bulletins; 36432a47e72SYuval Mintz bulletin_p = p_iov_info->bulletins_phys; 36532a47e72SYuval Mintz if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) { 36632a47e72SYuval Mintz DP_ERR(p_hwfn, 36732a47e72SYuval Mintz "qed_iov_setup_vfdb called without allocating mem first\n"); 36832a47e72SYuval Mintz return; 36932a47e72SYuval Mintz } 37032a47e72SYuval Mintz 37132a47e72SYuval Mintz for (idx = 0; idx < p_iov->total_vfs; idx++) { 37232a47e72SYuval Mintz struct qed_vf_info *vf = &p_iov_info->vfs_array[idx]; 37332a47e72SYuval Mintz u32 concrete; 37432a47e72SYuval Mintz 37532a47e72SYuval Mintz vf->vf_mbx.req_virt = p_req_virt_addr + idx; 37632a47e72SYuval Mintz vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs); 37732a47e72SYuval Mintz vf->vf_mbx.reply_virt = p_reply_virt_addr + idx; 37832a47e72SYuval Mintz vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs); 37932a47e72SYuval Mintz 38032a47e72SYuval Mintz vf->state = VF_STOPPED; 38132a47e72SYuval Mintz vf->b_init = false; 38232a47e72SYuval Mintz 38332a47e72SYuval Mintz vf->bulletin.phys = idx * 38432a47e72SYuval Mintz sizeof(struct qed_bulletin_content) + 38532a47e72SYuval Mintz bulletin_p; 38632a47e72SYuval Mintz vf->bulletin.p_virt = p_bulletin_virt + idx; 38732a47e72SYuval Mintz vf->bulletin.size = sizeof(struct qed_bulletin_content); 38832a47e72SYuval Mintz 38932a47e72SYuval Mintz vf->relative_vf_id = idx; 39032a47e72SYuval Mintz vf->abs_vf_id = idx + p_iov->first_vf_in_pf; 39132a47e72SYuval Mintz concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id); 39232a47e72SYuval Mintz vf->concrete_fid = concrete; 39332a47e72SYuval Mintz vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) | 39432a47e72SYuval Mintz (vf->abs_vf_id << 8); 39532a47e72SYuval Mintz vf->vport_id = idx + 1; 3961cf2b1a9SYuval Mintz 3971cf2b1a9SYuval Mintz vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; 3981cf2b1a9SYuval Mintz vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; 39932a47e72SYuval Mintz } 40032a47e72SYuval Mintz } 40132a47e72SYuval Mintz 40232a47e72SYuval Mintz static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn) 40332a47e72SYuval Mintz { 40432a47e72SYuval Mintz struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 40532a47e72SYuval Mintz void **p_v_addr; 40632a47e72SYuval Mintz u16 num_vfs = 0; 40732a47e72SYuval Mintz 40832a47e72SYuval Mintz num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; 40932a47e72SYuval Mintz 41032a47e72SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 41132a47e72SYuval Mintz "qed_iov_allocate_vfdb for %d VFs\n", num_vfs); 41232a47e72SYuval Mintz 41332a47e72SYuval Mintz /* Allocate PF Mailbox buffer (per-VF) */ 41432a47e72SYuval Mintz p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; 41532a47e72SYuval Mintz p_v_addr = &p_iov_info->mbx_msg_virt_addr; 41632a47e72SYuval Mintz *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 41732a47e72SYuval Mintz p_iov_info->mbx_msg_size, 41832a47e72SYuval Mintz &p_iov_info->mbx_msg_phys_addr, 41932a47e72SYuval Mintz GFP_KERNEL); 42032a47e72SYuval Mintz if (!*p_v_addr) 42132a47e72SYuval Mintz return -ENOMEM; 42232a47e72SYuval Mintz 42332a47e72SYuval Mintz /* Allocate PF Mailbox Reply buffer (per-VF) */ 42432a47e72SYuval Mintz p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs; 42532a47e72SYuval Mintz p_v_addr = &p_iov_info->mbx_reply_virt_addr; 42632a47e72SYuval Mintz *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 42732a47e72SYuval Mintz p_iov_info->mbx_reply_size, 42832a47e72SYuval Mintz &p_iov_info->mbx_reply_phys_addr, 42932a47e72SYuval Mintz GFP_KERNEL); 43032a47e72SYuval Mintz if (!*p_v_addr) 43132a47e72SYuval Mintz return -ENOMEM; 43232a47e72SYuval Mintz 43332a47e72SYuval Mintz p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) * 43432a47e72SYuval Mintz num_vfs; 43532a47e72SYuval Mintz p_v_addr = &p_iov_info->p_bulletins; 43632a47e72SYuval Mintz *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 43732a47e72SYuval Mintz p_iov_info->bulletins_size, 43832a47e72SYuval Mintz &p_iov_info->bulletins_phys, 43932a47e72SYuval Mintz GFP_KERNEL); 44032a47e72SYuval Mintz if (!*p_v_addr) 44132a47e72SYuval Mintz return -ENOMEM; 44232a47e72SYuval Mintz 44332a47e72SYuval Mintz DP_VERBOSE(p_hwfn, 44432a47e72SYuval Mintz QED_MSG_IOV, 44532a47e72SYuval Mintz "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n", 44632a47e72SYuval Mintz p_iov_info->mbx_msg_virt_addr, 44732a47e72SYuval Mintz (u64) p_iov_info->mbx_msg_phys_addr, 44832a47e72SYuval Mintz p_iov_info->mbx_reply_virt_addr, 44932a47e72SYuval Mintz (u64) p_iov_info->mbx_reply_phys_addr, 45032a47e72SYuval Mintz p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys); 45132a47e72SYuval Mintz 45232a47e72SYuval Mintz return 0; 45332a47e72SYuval Mintz } 45432a47e72SYuval Mintz 45532a47e72SYuval Mintz static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn) 45632a47e72SYuval Mintz { 45732a47e72SYuval Mintz struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 45832a47e72SYuval Mintz 45932a47e72SYuval Mintz if (p_hwfn->pf_iov_info->mbx_msg_virt_addr) 46032a47e72SYuval Mintz dma_free_coherent(&p_hwfn->cdev->pdev->dev, 46132a47e72SYuval Mintz p_iov_info->mbx_msg_size, 46232a47e72SYuval Mintz p_iov_info->mbx_msg_virt_addr, 46332a47e72SYuval Mintz p_iov_info->mbx_msg_phys_addr); 46432a47e72SYuval Mintz 46532a47e72SYuval Mintz if (p_hwfn->pf_iov_info->mbx_reply_virt_addr) 46632a47e72SYuval Mintz dma_free_coherent(&p_hwfn->cdev->pdev->dev, 46732a47e72SYuval Mintz p_iov_info->mbx_reply_size, 46832a47e72SYuval Mintz p_iov_info->mbx_reply_virt_addr, 46932a47e72SYuval Mintz p_iov_info->mbx_reply_phys_addr); 47032a47e72SYuval Mintz 47132a47e72SYuval Mintz if (p_iov_info->p_bulletins) 47232a47e72SYuval Mintz dma_free_coherent(&p_hwfn->cdev->pdev->dev, 47332a47e72SYuval Mintz p_iov_info->bulletins_size, 47432a47e72SYuval Mintz p_iov_info->p_bulletins, 47532a47e72SYuval Mintz p_iov_info->bulletins_phys); 47632a47e72SYuval Mintz } 47732a47e72SYuval Mintz 47832a47e72SYuval Mintz int qed_iov_alloc(struct qed_hwfn *p_hwfn) 47932a47e72SYuval Mintz { 48032a47e72SYuval Mintz struct qed_pf_iov *p_sriov; 48132a47e72SYuval Mintz 48232a47e72SYuval Mintz if (!IS_PF_SRIOV(p_hwfn)) { 48332a47e72SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 48432a47e72SYuval Mintz "No SR-IOV - no need for IOV db\n"); 48532a47e72SYuval Mintz return 0; 48632a47e72SYuval Mintz } 48732a47e72SYuval Mintz 48832a47e72SYuval Mintz p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL); 4892591c280SJoe Perches if (!p_sriov) 49032a47e72SYuval Mintz return -ENOMEM; 49132a47e72SYuval Mintz 49232a47e72SYuval Mintz p_hwfn->pf_iov_info = p_sriov; 49332a47e72SYuval Mintz 49432a47e72SYuval Mintz return qed_iov_allocate_vfdb(p_hwfn); 49532a47e72SYuval Mintz } 49632a47e72SYuval Mintz 49732a47e72SYuval Mintz void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 49832a47e72SYuval Mintz { 49932a47e72SYuval Mintz if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn)) 50032a47e72SYuval Mintz return; 50132a47e72SYuval Mintz 50232a47e72SYuval Mintz qed_iov_setup_vfdb(p_hwfn); 50332a47e72SYuval Mintz qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt); 50432a47e72SYuval Mintz } 50532a47e72SYuval Mintz 50632a47e72SYuval Mintz void qed_iov_free(struct qed_hwfn *p_hwfn) 50732a47e72SYuval Mintz { 50832a47e72SYuval Mintz if (IS_PF_SRIOV_ALLOC(p_hwfn)) { 50932a47e72SYuval Mintz qed_iov_free_vfdb(p_hwfn); 51032a47e72SYuval Mintz kfree(p_hwfn->pf_iov_info); 51132a47e72SYuval Mintz } 51232a47e72SYuval Mintz } 51332a47e72SYuval Mintz 51432a47e72SYuval Mintz void qed_iov_free_hw_info(struct qed_dev *cdev) 51532a47e72SYuval Mintz { 51632a47e72SYuval Mintz kfree(cdev->p_iov_info); 51732a47e72SYuval Mintz cdev->p_iov_info = NULL; 51832a47e72SYuval Mintz } 51932a47e72SYuval Mintz 52032a47e72SYuval Mintz int qed_iov_hw_info(struct qed_hwfn *p_hwfn) 52132a47e72SYuval Mintz { 52232a47e72SYuval Mintz struct qed_dev *cdev = p_hwfn->cdev; 52332a47e72SYuval Mintz int pos; 52432a47e72SYuval Mintz int rc; 52532a47e72SYuval Mintz 5261408cc1fSYuval Mintz if (IS_VF(p_hwfn->cdev)) 5271408cc1fSYuval Mintz return 0; 5281408cc1fSYuval Mintz 52932a47e72SYuval Mintz /* Learn the PCI configuration */ 53032a47e72SYuval Mintz pos = pci_find_ext_capability(p_hwfn->cdev->pdev, 53132a47e72SYuval Mintz PCI_EXT_CAP_ID_SRIOV); 53232a47e72SYuval Mintz if (!pos) { 53332a47e72SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n"); 53432a47e72SYuval Mintz return 0; 53532a47e72SYuval Mintz } 53632a47e72SYuval Mintz 53732a47e72SYuval Mintz /* Allocate a new struct for IOV information */ 53832a47e72SYuval Mintz cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL); 5392591c280SJoe Perches if (!cdev->p_iov_info) 54032a47e72SYuval Mintz return -ENOMEM; 5412591c280SJoe Perches 54232a47e72SYuval Mintz cdev->p_iov_info->pos = pos; 54332a47e72SYuval Mintz 54432a47e72SYuval Mintz rc = qed_iov_pci_cfg_info(cdev); 54532a47e72SYuval Mintz if (rc) 54632a47e72SYuval Mintz return rc; 54732a47e72SYuval Mintz 54832a47e72SYuval Mintz /* We want PF IOV to be synonemous with the existance of p_iov_info; 54932a47e72SYuval Mintz * In case the capability is published but there are no VFs, simply 55032a47e72SYuval Mintz * de-allocate the struct. 55132a47e72SYuval Mintz */ 55232a47e72SYuval Mintz if (!cdev->p_iov_info->total_vfs) { 55332a47e72SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 55432a47e72SYuval Mintz "IOV capabilities, but no VFs are published\n"); 55532a47e72SYuval Mintz kfree(cdev->p_iov_info); 55632a47e72SYuval Mintz cdev->p_iov_info = NULL; 55732a47e72SYuval Mintz return 0; 55832a47e72SYuval Mintz } 55932a47e72SYuval Mintz 5609c79ddaaSMintz, Yuval /* First VF index based on offset is tricky: 5619c79ddaaSMintz, Yuval * - If ARI is supported [likely], offset - (16 - pf_id) would 5629c79ddaaSMintz, Yuval * provide the number for eng0. 2nd engine Vfs would begin 56332a47e72SYuval Mintz * after the first engine's VFs. 5649c79ddaaSMintz, Yuval * - If !ARI, VFs would start on next device. 5659c79ddaaSMintz, Yuval * so offset - (256 - pf_id) would provide the number. 5669c79ddaaSMintz, Yuval * Utilize the fact that (256 - pf_id) is achieved only by later 5679c79ddaaSMintz, Yuval * to diffrentiate between the two. 56832a47e72SYuval Mintz */ 5699c79ddaaSMintz, Yuval 5709c79ddaaSMintz, Yuval if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) { 5719c79ddaaSMintz, Yuval u32 first = p_hwfn->cdev->p_iov_info->offset + 57232a47e72SYuval Mintz p_hwfn->abs_pf_id - 16; 5739c79ddaaSMintz, Yuval 5749c79ddaaSMintz, Yuval cdev->p_iov_info->first_vf_in_pf = first; 5759c79ddaaSMintz, Yuval 57632a47e72SYuval Mintz if (QED_PATH_ID(p_hwfn)) 57732a47e72SYuval Mintz cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB; 5789c79ddaaSMintz, Yuval } else { 5799c79ddaaSMintz, Yuval u32 first = p_hwfn->cdev->p_iov_info->offset + 5809c79ddaaSMintz, Yuval p_hwfn->abs_pf_id - 256; 5819c79ddaaSMintz, Yuval 5829c79ddaaSMintz, Yuval cdev->p_iov_info->first_vf_in_pf = first; 5839c79ddaaSMintz, Yuval } 58432a47e72SYuval Mintz 58532a47e72SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 58632a47e72SYuval Mintz "First VF in hwfn 0x%08x\n", 58732a47e72SYuval Mintz cdev->p_iov_info->first_vf_in_pf); 58832a47e72SYuval Mintz 58932a47e72SYuval Mintz return 0; 59032a47e72SYuval Mintz } 59132a47e72SYuval Mintz 5927eff82b0SYuval Mintz bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, 5937eff82b0SYuval Mintz int vfid, bool b_fail_malicious) 59437bff2b9SYuval Mintz { 59537bff2b9SYuval Mintz /* Check PF supports sriov */ 596b0409fa0SYuval Mintz if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || 597b0409fa0SYuval Mintz !IS_PF_SRIOV_ALLOC(p_hwfn)) 59837bff2b9SYuval Mintz return false; 59937bff2b9SYuval Mintz 60037bff2b9SYuval Mintz /* Check VF validity */ 6017eff82b0SYuval Mintz if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious)) 60237bff2b9SYuval Mintz return false; 60337bff2b9SYuval Mintz 60437bff2b9SYuval Mintz return true; 60537bff2b9SYuval Mintz } 60637bff2b9SYuval Mintz 6077eff82b0SYuval Mintz bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) 6087eff82b0SYuval Mintz { 6097eff82b0SYuval Mintz return _qed_iov_pf_sanity_check(p_hwfn, vfid, true); 6107eff82b0SYuval Mintz } 6117eff82b0SYuval Mintz 6120b55e27dSYuval Mintz static void qed_iov_set_vf_to_disable(struct qed_dev *cdev, 6130b55e27dSYuval Mintz u16 rel_vf_id, u8 to_disable) 6140b55e27dSYuval Mintz { 6150b55e27dSYuval Mintz struct qed_vf_info *vf; 6160b55e27dSYuval Mintz int i; 6170b55e27dSYuval Mintz 6180b55e27dSYuval Mintz for_each_hwfn(cdev, i) { 6190b55e27dSYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 6200b55e27dSYuval Mintz 6210b55e27dSYuval Mintz vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); 6220b55e27dSYuval Mintz if (!vf) 6230b55e27dSYuval Mintz continue; 6240b55e27dSYuval Mintz 6250b55e27dSYuval Mintz vf->to_disable = to_disable; 6260b55e27dSYuval Mintz } 6270b55e27dSYuval Mintz } 6280b55e27dSYuval Mintz 629ba56947aSBaoyou Xie static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable) 6300b55e27dSYuval Mintz { 6310b55e27dSYuval Mintz u16 i; 6320b55e27dSYuval Mintz 6330b55e27dSYuval Mintz if (!IS_QED_SRIOV(cdev)) 6340b55e27dSYuval Mintz return; 6350b55e27dSYuval Mintz 6360b55e27dSYuval Mintz for (i = 0; i < cdev->p_iov_info->total_vfs; i++) 6370b55e27dSYuval Mintz qed_iov_set_vf_to_disable(cdev, i, to_disable); 6380b55e27dSYuval Mintz } 6390b55e27dSYuval Mintz 6401408cc1fSYuval Mintz static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn, 6411408cc1fSYuval Mintz struct qed_ptt *p_ptt, u8 abs_vfid) 6421408cc1fSYuval Mintz { 6431408cc1fSYuval Mintz qed_wr(p_hwfn, p_ptt, 6441408cc1fSYuval Mintz PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4, 6451408cc1fSYuval Mintz 1 << (abs_vfid & 0x1f)); 6461408cc1fSYuval Mintz } 6471408cc1fSYuval Mintz 648dacd88d6SYuval Mintz static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn, 649dacd88d6SYuval Mintz struct qed_ptt *p_ptt, struct qed_vf_info *vf) 650dacd88d6SYuval Mintz { 651dacd88d6SYuval Mintz int i; 652dacd88d6SYuval Mintz 653dacd88d6SYuval Mintz /* Set VF masks and configuration - pretend */ 654dacd88d6SYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 655dacd88d6SYuval Mintz 656dacd88d6SYuval Mintz qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0); 657dacd88d6SYuval Mintz 658dacd88d6SYuval Mintz /* unpretend */ 659dacd88d6SYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 660dacd88d6SYuval Mintz 661dacd88d6SYuval Mintz /* iterate over all queues, clear sb consumer */ 662b2b897ebSYuval Mintz for (i = 0; i < vf->num_sbs; i++) 663b2b897ebSYuval Mintz qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 664b2b897ebSYuval Mintz vf->igu_sbs[i], 665b2b897ebSYuval Mintz vf->opaque_fid, true); 666dacd88d6SYuval Mintz } 667dacd88d6SYuval Mintz 6680b55e27dSYuval Mintz static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, 6690b55e27dSYuval Mintz struct qed_ptt *p_ptt, 6700b55e27dSYuval Mintz struct qed_vf_info *vf, bool enable) 6710b55e27dSYuval Mintz { 6720b55e27dSYuval Mintz u32 igu_vf_conf; 6730b55e27dSYuval Mintz 6740b55e27dSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 6750b55e27dSYuval Mintz 6760b55e27dSYuval Mintz igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION); 6770b55e27dSYuval Mintz 6780b55e27dSYuval Mintz if (enable) 6790b55e27dSYuval Mintz igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN; 6800b55e27dSYuval Mintz else 6810b55e27dSYuval Mintz igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN; 6820b55e27dSYuval Mintz 6830b55e27dSYuval Mintz qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf); 6840b55e27dSYuval Mintz 6850b55e27dSYuval Mintz /* unpretend */ 6860b55e27dSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 6870b55e27dSYuval Mintz } 6880b55e27dSYuval Mintz 6891408cc1fSYuval Mintz static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, 6901408cc1fSYuval Mintz struct qed_ptt *p_ptt, 6911408cc1fSYuval Mintz struct qed_vf_info *vf) 6921408cc1fSYuval Mintz { 6931408cc1fSYuval Mintz u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN; 6941408cc1fSYuval Mintz int rc; 6951408cc1fSYuval Mintz 6964e9b2a67SMintz, Yuval /* It's possible VF was previously considered malicious - 6974e9b2a67SMintz, Yuval * clear the indication even if we're only going to disable VF. 6984e9b2a67SMintz, Yuval */ 6994e9b2a67SMintz, Yuval vf->b_malicious = false; 7004e9b2a67SMintz, Yuval 7010b55e27dSYuval Mintz if (vf->to_disable) 7020b55e27dSYuval Mintz return 0; 7030b55e27dSYuval Mintz 7041408cc1fSYuval Mintz DP_VERBOSE(p_hwfn, 7051408cc1fSYuval Mintz QED_MSG_IOV, 7061408cc1fSYuval Mintz "Enable internal access for vf %x [abs %x]\n", 7071408cc1fSYuval Mintz vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf)); 7081408cc1fSYuval Mintz 7091408cc1fSYuval Mintz qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf)); 7101408cc1fSYuval Mintz 711b2b897ebSYuval Mintz qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); 712b2b897ebSYuval Mintz 7131408cc1fSYuval Mintz rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs); 7141408cc1fSYuval Mintz if (rc) 7151408cc1fSYuval Mintz return rc; 7161408cc1fSYuval Mintz 7171408cc1fSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 7181408cc1fSYuval Mintz 7191408cc1fSYuval Mintz SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id); 7201408cc1fSYuval Mintz STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf); 7211408cc1fSYuval Mintz 7221408cc1fSYuval Mintz qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id, 7231408cc1fSYuval Mintz p_hwfn->hw_info.hw_mode); 7241408cc1fSYuval Mintz 7251408cc1fSYuval Mintz /* unpretend */ 7261408cc1fSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 7271408cc1fSYuval Mintz 7281408cc1fSYuval Mintz vf->state = VF_FREE; 7291408cc1fSYuval Mintz 7301408cc1fSYuval Mintz return rc; 7311408cc1fSYuval Mintz } 7321408cc1fSYuval Mintz 7330b55e27dSYuval Mintz /** 7340b55e27dSYuval Mintz * @brief qed_iov_config_perm_table - configure the permission 7350b55e27dSYuval Mintz * zone table. 7360b55e27dSYuval Mintz * In E4, queue zone permission table size is 320x9. There 7370b55e27dSYuval Mintz * are 320 VF queues for single engine device (256 for dual 7380b55e27dSYuval Mintz * engine device), and each entry has the following format: 7390b55e27dSYuval Mintz * {Valid, VF[7:0]} 7400b55e27dSYuval Mintz * @param p_hwfn 7410b55e27dSYuval Mintz * @param p_ptt 7420b55e27dSYuval Mintz * @param vf 7430b55e27dSYuval Mintz * @param enable 7440b55e27dSYuval Mintz */ 7450b55e27dSYuval Mintz static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn, 7460b55e27dSYuval Mintz struct qed_ptt *p_ptt, 7470b55e27dSYuval Mintz struct qed_vf_info *vf, u8 enable) 7480b55e27dSYuval Mintz { 7490b55e27dSYuval Mintz u32 reg_addr, val; 7500b55e27dSYuval Mintz u16 qzone_id = 0; 7510b55e27dSYuval Mintz int qid; 7520b55e27dSYuval Mintz 7530b55e27dSYuval Mintz for (qid = 0; qid < vf->num_rxqs; qid++) { 7540b55e27dSYuval Mintz qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid, 7550b55e27dSYuval Mintz &qzone_id); 7560b55e27dSYuval Mintz 7570b55e27dSYuval Mintz reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4; 7581a635e48SYuval Mintz val = enable ? (vf->abs_vf_id | BIT(8)) : 0; 7590b55e27dSYuval Mintz qed_wr(p_hwfn, p_ptt, reg_addr, val); 7600b55e27dSYuval Mintz } 7610b55e27dSYuval Mintz } 7620b55e27dSYuval Mintz 763dacd88d6SYuval Mintz static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn, 764dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 765dacd88d6SYuval Mintz struct qed_vf_info *vf) 766dacd88d6SYuval Mintz { 767dacd88d6SYuval Mintz /* Reset vf in IGU - interrupts are still disabled */ 768dacd88d6SYuval Mintz qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); 769dacd88d6SYuval Mintz 770dacd88d6SYuval Mintz qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1); 771dacd88d6SYuval Mintz 772dacd88d6SYuval Mintz /* Permission Table */ 773dacd88d6SYuval Mintz qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true); 774dacd88d6SYuval Mintz } 775dacd88d6SYuval Mintz 7761408cc1fSYuval Mintz static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, 7771408cc1fSYuval Mintz struct qed_ptt *p_ptt, 7781408cc1fSYuval Mintz struct qed_vf_info *vf, u16 num_rx_queues) 7791408cc1fSYuval Mintz { 7801408cc1fSYuval Mintz struct qed_igu_block *igu_blocks; 7811408cc1fSYuval Mintz int qid = 0, igu_id = 0; 7821408cc1fSYuval Mintz u32 val = 0; 7831408cc1fSYuval Mintz 7841408cc1fSYuval Mintz igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks; 7851408cc1fSYuval Mintz 7861408cc1fSYuval Mintz if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks) 7871408cc1fSYuval Mintz num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks; 7881408cc1fSYuval Mintz p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues; 7891408cc1fSYuval Mintz 7901408cc1fSYuval Mintz SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); 7911408cc1fSYuval Mintz SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); 7921408cc1fSYuval Mintz SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0); 7931408cc1fSYuval Mintz 7941408cc1fSYuval Mintz while ((qid < num_rx_queues) && 7951408cc1fSYuval Mintz (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) { 7961408cc1fSYuval Mintz if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) { 7971408cc1fSYuval Mintz struct cau_sb_entry sb_entry; 7981408cc1fSYuval Mintz 7991408cc1fSYuval Mintz vf->igu_sbs[qid] = (u16)igu_id; 8001408cc1fSYuval Mintz igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE; 8011408cc1fSYuval Mintz 8021408cc1fSYuval Mintz SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid); 8031408cc1fSYuval Mintz 8041408cc1fSYuval Mintz qed_wr(p_hwfn, p_ptt, 8051408cc1fSYuval Mintz IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id, 8061408cc1fSYuval Mintz val); 8071408cc1fSYuval Mintz 8081408cc1fSYuval Mintz /* Configure igu sb in CAU which were marked valid */ 8091408cc1fSYuval Mintz qed_init_cau_sb_entry(p_hwfn, &sb_entry, 8101408cc1fSYuval Mintz p_hwfn->rel_pf_id, 8111408cc1fSYuval Mintz vf->abs_vf_id, 1); 8121408cc1fSYuval Mintz qed_dmae_host2grc(p_hwfn, p_ptt, 8131408cc1fSYuval Mintz (u64)(uintptr_t)&sb_entry, 8141408cc1fSYuval Mintz CAU_REG_SB_VAR_MEMORY + 8151408cc1fSYuval Mintz igu_id * sizeof(u64), 2, 0); 8161408cc1fSYuval Mintz qid++; 8171408cc1fSYuval Mintz } 8181408cc1fSYuval Mintz igu_id++; 8191408cc1fSYuval Mintz } 8201408cc1fSYuval Mintz 8211408cc1fSYuval Mintz vf->num_sbs = (u8) num_rx_queues; 8221408cc1fSYuval Mintz 8231408cc1fSYuval Mintz return vf->num_sbs; 8241408cc1fSYuval Mintz } 8251408cc1fSYuval Mintz 8260b55e27dSYuval Mintz static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, 8270b55e27dSYuval Mintz struct qed_ptt *p_ptt, 8280b55e27dSYuval Mintz struct qed_vf_info *vf) 8290b55e27dSYuval Mintz { 8300b55e27dSYuval Mintz struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 8310b55e27dSYuval Mintz int idx, igu_id; 8320b55e27dSYuval Mintz u32 addr, val; 8330b55e27dSYuval Mintz 8340b55e27dSYuval Mintz /* Invalidate igu CAM lines and mark them as free */ 8350b55e27dSYuval Mintz for (idx = 0; idx < vf->num_sbs; idx++) { 8360b55e27dSYuval Mintz igu_id = vf->igu_sbs[idx]; 8370b55e27dSYuval Mintz addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id; 8380b55e27dSYuval Mintz 8390b55e27dSYuval Mintz val = qed_rd(p_hwfn, p_ptt, addr); 8400b55e27dSYuval Mintz SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); 8410b55e27dSYuval Mintz qed_wr(p_hwfn, p_ptt, addr, val); 8420b55e27dSYuval Mintz 8430b55e27dSYuval Mintz p_info->igu_map.igu_blocks[igu_id].status |= 8440b55e27dSYuval Mintz QED_IGU_STATUS_FREE; 8450b55e27dSYuval Mintz 8460b55e27dSYuval Mintz p_hwfn->hw_info.p_igu_info->free_blks++; 8470b55e27dSYuval Mintz } 8480b55e27dSYuval Mintz 8490b55e27dSYuval Mintz vf->num_sbs = 0; 8500b55e27dSYuval Mintz } 8510b55e27dSYuval Mintz 85233b2fbd0SMintz, Yuval static void qed_iov_set_link(struct qed_hwfn *p_hwfn, 85333b2fbd0SMintz, Yuval u16 vfid, 85433b2fbd0SMintz, Yuval struct qed_mcp_link_params *params, 85533b2fbd0SMintz, Yuval struct qed_mcp_link_state *link, 85633b2fbd0SMintz, Yuval struct qed_mcp_link_capabilities *p_caps) 85733b2fbd0SMintz, Yuval { 85833b2fbd0SMintz, Yuval struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, 85933b2fbd0SMintz, Yuval vfid, 86033b2fbd0SMintz, Yuval false); 86133b2fbd0SMintz, Yuval struct qed_bulletin_content *p_bulletin; 86233b2fbd0SMintz, Yuval 86333b2fbd0SMintz, Yuval if (!p_vf) 86433b2fbd0SMintz, Yuval return; 86533b2fbd0SMintz, Yuval 86633b2fbd0SMintz, Yuval p_bulletin = p_vf->bulletin.p_virt; 86733b2fbd0SMintz, Yuval p_bulletin->req_autoneg = params->speed.autoneg; 86833b2fbd0SMintz, Yuval p_bulletin->req_adv_speed = params->speed.advertised_speeds; 86933b2fbd0SMintz, Yuval p_bulletin->req_forced_speed = params->speed.forced_speed; 87033b2fbd0SMintz, Yuval p_bulletin->req_autoneg_pause = params->pause.autoneg; 87133b2fbd0SMintz, Yuval p_bulletin->req_forced_rx = params->pause.forced_rx; 87233b2fbd0SMintz, Yuval p_bulletin->req_forced_tx = params->pause.forced_tx; 87333b2fbd0SMintz, Yuval p_bulletin->req_loopback = params->loopback_mode; 87433b2fbd0SMintz, Yuval 87533b2fbd0SMintz, Yuval p_bulletin->link_up = link->link_up; 87633b2fbd0SMintz, Yuval p_bulletin->speed = link->speed; 87733b2fbd0SMintz, Yuval p_bulletin->full_duplex = link->full_duplex; 87833b2fbd0SMintz, Yuval p_bulletin->autoneg = link->an; 87933b2fbd0SMintz, Yuval p_bulletin->autoneg_complete = link->an_complete; 88033b2fbd0SMintz, Yuval p_bulletin->parallel_detection = link->parallel_detection; 88133b2fbd0SMintz, Yuval p_bulletin->pfc_enabled = link->pfc_enabled; 88233b2fbd0SMintz, Yuval p_bulletin->partner_adv_speed = link->partner_adv_speed; 88333b2fbd0SMintz, Yuval p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; 88433b2fbd0SMintz, Yuval p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; 88533b2fbd0SMintz, Yuval p_bulletin->partner_adv_pause = link->partner_adv_pause; 88633b2fbd0SMintz, Yuval p_bulletin->sfp_tx_fault = link->sfp_tx_fault; 88733b2fbd0SMintz, Yuval 88833b2fbd0SMintz, Yuval p_bulletin->capability_speed = p_caps->speed_capabilities; 88933b2fbd0SMintz, Yuval } 89033b2fbd0SMintz, Yuval 8911408cc1fSYuval Mintz static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, 8921408cc1fSYuval Mintz struct qed_ptt *p_ptt, 8933da7a37aSMintz, Yuval struct qed_iov_vf_init_params *p_params) 8941408cc1fSYuval Mintz { 89533b2fbd0SMintz, Yuval struct qed_mcp_link_capabilities link_caps; 89633b2fbd0SMintz, Yuval struct qed_mcp_link_params link_params; 89733b2fbd0SMintz, Yuval struct qed_mcp_link_state link_state; 8981408cc1fSYuval Mintz u8 num_of_vf_avaiable_chains = 0; 8991408cc1fSYuval Mintz struct qed_vf_info *vf = NULL; 9003da7a37aSMintz, Yuval u16 qid, num_irqs; 9011408cc1fSYuval Mintz int rc = 0; 9021408cc1fSYuval Mintz u32 cids; 9031408cc1fSYuval Mintz u8 i; 9041408cc1fSYuval Mintz 9053da7a37aSMintz, Yuval vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false); 9061408cc1fSYuval Mintz if (!vf) { 9071408cc1fSYuval Mintz DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n"); 9081408cc1fSYuval Mintz return -EINVAL; 9091408cc1fSYuval Mintz } 9101408cc1fSYuval Mintz 9111408cc1fSYuval Mintz if (vf->b_init) { 9123da7a37aSMintz, Yuval DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", 9133da7a37aSMintz, Yuval p_params->rel_vf_id); 9141408cc1fSYuval Mintz return -EINVAL; 9151408cc1fSYuval Mintz } 9161408cc1fSYuval Mintz 9173da7a37aSMintz, Yuval /* Perform sanity checking on the requested queue_id */ 9183da7a37aSMintz, Yuval for (i = 0; i < p_params->num_queues; i++) { 9193da7a37aSMintz, Yuval u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE); 9203da7a37aSMintz, Yuval u16 max_vf_qzone = min_vf_qzone + 9213da7a37aSMintz, Yuval FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1; 9223da7a37aSMintz, Yuval 9233da7a37aSMintz, Yuval qid = p_params->req_rx_queue[i]; 9243da7a37aSMintz, Yuval if (qid < min_vf_qzone || qid > max_vf_qzone) { 9253da7a37aSMintz, Yuval DP_NOTICE(p_hwfn, 9263da7a37aSMintz, Yuval "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n", 9273da7a37aSMintz, Yuval qid, 9283da7a37aSMintz, Yuval p_params->rel_vf_id, 9293da7a37aSMintz, Yuval min_vf_qzone, max_vf_qzone); 9303da7a37aSMintz, Yuval return -EINVAL; 9313da7a37aSMintz, Yuval } 9323da7a37aSMintz, Yuval 9333da7a37aSMintz, Yuval qid = p_params->req_tx_queue[i]; 9343da7a37aSMintz, Yuval if (qid > max_vf_qzone) { 9353da7a37aSMintz, Yuval DP_NOTICE(p_hwfn, 9363da7a37aSMintz, Yuval "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n", 9373da7a37aSMintz, Yuval qid, p_params->rel_vf_id, max_vf_qzone); 9383da7a37aSMintz, Yuval return -EINVAL; 9393da7a37aSMintz, Yuval } 9403da7a37aSMintz, Yuval 9413da7a37aSMintz, Yuval /* If client *really* wants, Tx qid can be shared with PF */ 9423da7a37aSMintz, Yuval if (qid < min_vf_qzone) 9433da7a37aSMintz, Yuval DP_VERBOSE(p_hwfn, 9443da7a37aSMintz, Yuval QED_MSG_IOV, 9453da7a37aSMintz, Yuval "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n", 9463da7a37aSMintz, Yuval p_params->rel_vf_id, qid, i); 9473da7a37aSMintz, Yuval } 9483da7a37aSMintz, Yuval 9491408cc1fSYuval Mintz /* Limit number of queues according to number of CIDs */ 9501408cc1fSYuval Mintz qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids); 9511408cc1fSYuval Mintz DP_VERBOSE(p_hwfn, 9521408cc1fSYuval Mintz QED_MSG_IOV, 9531408cc1fSYuval Mintz "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n", 9543da7a37aSMintz, Yuval vf->relative_vf_id, p_params->num_queues, (u16)cids); 9553da7a37aSMintz, Yuval num_irqs = min_t(u16, p_params->num_queues, ((u16)cids)); 9561408cc1fSYuval Mintz 9571408cc1fSYuval Mintz num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn, 9581408cc1fSYuval Mintz p_ptt, 9593da7a37aSMintz, Yuval vf, num_irqs); 9601408cc1fSYuval Mintz if (!num_of_vf_avaiable_chains) { 9611408cc1fSYuval Mintz DP_ERR(p_hwfn, "no available igu sbs\n"); 9621408cc1fSYuval Mintz return -ENOMEM; 9631408cc1fSYuval Mintz } 9641408cc1fSYuval Mintz 9651408cc1fSYuval Mintz /* Choose queue number and index ranges */ 9661408cc1fSYuval Mintz vf->num_rxqs = num_of_vf_avaiable_chains; 9671408cc1fSYuval Mintz vf->num_txqs = num_of_vf_avaiable_chains; 9681408cc1fSYuval Mintz 9691408cc1fSYuval Mintz for (i = 0; i < vf->num_rxqs; i++) { 9703da7a37aSMintz, Yuval struct qed_vf_q_info *p_queue = &vf->vf_queues[i]; 9711408cc1fSYuval Mintz 9723da7a37aSMintz, Yuval p_queue->fw_rx_qid = p_params->req_rx_queue[i]; 9733da7a37aSMintz, Yuval p_queue->fw_tx_qid = p_params->req_tx_queue[i]; 9741408cc1fSYuval Mintz 9751408cc1fSYuval Mintz /* CIDs are per-VF, so no problem having them 0-based. */ 9763da7a37aSMintz, Yuval p_queue->fw_cid = i; 9771408cc1fSYuval Mintz 9781408cc1fSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 9793da7a37aSMintz, Yuval "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x] CID %04x\n", 9803da7a37aSMintz, Yuval vf->relative_vf_id, 9813da7a37aSMintz, Yuval i, vf->igu_sbs[i], 9823da7a37aSMintz, Yuval p_queue->fw_rx_qid, 9833da7a37aSMintz, Yuval p_queue->fw_tx_qid, p_queue->fw_cid); 9841408cc1fSYuval Mintz } 9853da7a37aSMintz, Yuval 98633b2fbd0SMintz, Yuval /* Update the link configuration in bulletin */ 98733b2fbd0SMintz, Yuval memcpy(&link_params, qed_mcp_get_link_params(p_hwfn), 98833b2fbd0SMintz, Yuval sizeof(link_params)); 98933b2fbd0SMintz, Yuval memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state)); 99033b2fbd0SMintz, Yuval memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn), 99133b2fbd0SMintz, Yuval sizeof(link_caps)); 99233b2fbd0SMintz, Yuval qed_iov_set_link(p_hwfn, p_params->rel_vf_id, 99333b2fbd0SMintz, Yuval &link_params, &link_state, &link_caps); 99433b2fbd0SMintz, Yuval 9951408cc1fSYuval Mintz rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf); 9961408cc1fSYuval Mintz if (!rc) { 9971408cc1fSYuval Mintz vf->b_init = true; 9981408cc1fSYuval Mintz 9991408cc1fSYuval Mintz if (IS_LEAD_HWFN(p_hwfn)) 10001408cc1fSYuval Mintz p_hwfn->cdev->p_iov_info->num_vfs++; 10011408cc1fSYuval Mintz } 10021408cc1fSYuval Mintz 10031408cc1fSYuval Mintz return rc; 10041408cc1fSYuval Mintz } 10051408cc1fSYuval Mintz 10060b55e27dSYuval Mintz static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, 10070b55e27dSYuval Mintz struct qed_ptt *p_ptt, u16 rel_vf_id) 10080b55e27dSYuval Mintz { 1009079d20a6SManish Chopra struct qed_mcp_link_capabilities caps; 1010079d20a6SManish Chopra struct qed_mcp_link_params params; 1011079d20a6SManish Chopra struct qed_mcp_link_state link; 10120b55e27dSYuval Mintz struct qed_vf_info *vf = NULL; 10130b55e27dSYuval Mintz 10140b55e27dSYuval Mintz vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 10150b55e27dSYuval Mintz if (!vf) { 10160b55e27dSYuval Mintz DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n"); 10170b55e27dSYuval Mintz return -EINVAL; 10180b55e27dSYuval Mintz } 10190b55e27dSYuval Mintz 102036558c3dSYuval Mintz if (vf->bulletin.p_virt) 102136558c3dSYuval Mintz memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt)); 102236558c3dSYuval Mintz 102336558c3dSYuval Mintz memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info)); 102436558c3dSYuval Mintz 1025079d20a6SManish Chopra /* Get the link configuration back in bulletin so 1026079d20a6SManish Chopra * that when VFs are re-enabled they get the actual 1027079d20a6SManish Chopra * link configuration. 1028079d20a6SManish Chopra */ 1029079d20a6SManish Chopra memcpy(¶ms, qed_mcp_get_link_params(p_hwfn), sizeof(params)); 1030079d20a6SManish Chopra memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link)); 1031079d20a6SManish Chopra memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); 1032079d20a6SManish Chopra qed_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps); 1033079d20a6SManish Chopra 10341fe614d1SYuval Mintz /* Forget the VF's acquisition message */ 10351fe614d1SYuval Mintz memset(&vf->acquire, 0, sizeof(vf->acquire)); 10360b55e27dSYuval Mintz 10370b55e27dSYuval Mintz /* disablng interrupts and resetting permission table was done during 10380b55e27dSYuval Mintz * vf-close, however, we could get here without going through vf_close 10390b55e27dSYuval Mintz */ 10400b55e27dSYuval Mintz /* Disable Interrupts for VF */ 10410b55e27dSYuval Mintz qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); 10420b55e27dSYuval Mintz 10430b55e27dSYuval Mintz /* Reset Permission table */ 10440b55e27dSYuval Mintz qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); 10450b55e27dSYuval Mintz 10460b55e27dSYuval Mintz vf->num_rxqs = 0; 10470b55e27dSYuval Mintz vf->num_txqs = 0; 10480b55e27dSYuval Mintz qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf); 10490b55e27dSYuval Mintz 10500b55e27dSYuval Mintz if (vf->b_init) { 10510b55e27dSYuval Mintz vf->b_init = false; 10520b55e27dSYuval Mintz 10530b55e27dSYuval Mintz if (IS_LEAD_HWFN(p_hwfn)) 10540b55e27dSYuval Mintz p_hwfn->cdev->p_iov_info->num_vfs--; 10550b55e27dSYuval Mintz } 10560b55e27dSYuval Mintz 10570b55e27dSYuval Mintz return 0; 10580b55e27dSYuval Mintz } 10590b55e27dSYuval Mintz 106037bff2b9SYuval Mintz static bool qed_iov_tlv_supported(u16 tlvtype) 106137bff2b9SYuval Mintz { 106237bff2b9SYuval Mintz return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX; 106337bff2b9SYuval Mintz } 106437bff2b9SYuval Mintz 106537bff2b9SYuval Mintz /* place a given tlv on the tlv buffer, continuing current tlv list */ 106637bff2b9SYuval Mintz void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length) 106737bff2b9SYuval Mintz { 106837bff2b9SYuval Mintz struct channel_tlv *tl = (struct channel_tlv *)*offset; 106937bff2b9SYuval Mintz 107037bff2b9SYuval Mintz tl->type = type; 107137bff2b9SYuval Mintz tl->length = length; 107237bff2b9SYuval Mintz 107337bff2b9SYuval Mintz /* Offset should keep pointing to next TLV (the end of the last) */ 107437bff2b9SYuval Mintz *offset += length; 107537bff2b9SYuval Mintz 107637bff2b9SYuval Mintz /* Return a pointer to the start of the added tlv */ 107737bff2b9SYuval Mintz return *offset - length; 107837bff2b9SYuval Mintz } 107937bff2b9SYuval Mintz 108037bff2b9SYuval Mintz /* list the types and lengths of the tlvs on the buffer */ 108137bff2b9SYuval Mintz void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list) 108237bff2b9SYuval Mintz { 108337bff2b9SYuval Mintz u16 i = 1, total_length = 0; 108437bff2b9SYuval Mintz struct channel_tlv *tlv; 108537bff2b9SYuval Mintz 108637bff2b9SYuval Mintz do { 108737bff2b9SYuval Mintz tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length); 108837bff2b9SYuval Mintz 108937bff2b9SYuval Mintz /* output tlv */ 109037bff2b9SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 109137bff2b9SYuval Mintz "TLV number %d: type %d, length %d\n", 109237bff2b9SYuval Mintz i, tlv->type, tlv->length); 109337bff2b9SYuval Mintz 109437bff2b9SYuval Mintz if (tlv->type == CHANNEL_TLV_LIST_END) 109537bff2b9SYuval Mintz return; 109637bff2b9SYuval Mintz 109737bff2b9SYuval Mintz /* Validate entry - protect against malicious VFs */ 109837bff2b9SYuval Mintz if (!tlv->length) { 109937bff2b9SYuval Mintz DP_NOTICE(p_hwfn, "TLV of length 0 found\n"); 110037bff2b9SYuval Mintz return; 110137bff2b9SYuval Mintz } 110237bff2b9SYuval Mintz 110337bff2b9SYuval Mintz total_length += tlv->length; 110437bff2b9SYuval Mintz 110537bff2b9SYuval Mintz if (total_length >= sizeof(struct tlv_buffer_size)) { 110637bff2b9SYuval Mintz DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n"); 110737bff2b9SYuval Mintz return; 110837bff2b9SYuval Mintz } 110937bff2b9SYuval Mintz 111037bff2b9SYuval Mintz i++; 111137bff2b9SYuval Mintz } while (1); 111237bff2b9SYuval Mintz } 111337bff2b9SYuval Mintz 111437bff2b9SYuval Mintz static void qed_iov_send_response(struct qed_hwfn *p_hwfn, 111537bff2b9SYuval Mintz struct qed_ptt *p_ptt, 111637bff2b9SYuval Mintz struct qed_vf_info *p_vf, 111737bff2b9SYuval Mintz u16 length, u8 status) 111837bff2b9SYuval Mintz { 111937bff2b9SYuval Mintz struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 112037bff2b9SYuval Mintz struct qed_dmae_params params; 112137bff2b9SYuval Mintz u8 eng_vf_id; 112237bff2b9SYuval Mintz 112337bff2b9SYuval Mintz mbx->reply_virt->default_resp.hdr.status = status; 112437bff2b9SYuval Mintz 112537bff2b9SYuval Mintz qed_dp_tlv_list(p_hwfn, mbx->reply_virt); 112637bff2b9SYuval Mintz 112737bff2b9SYuval Mintz eng_vf_id = p_vf->abs_vf_id; 112837bff2b9SYuval Mintz 112937bff2b9SYuval Mintz memset(¶ms, 0, sizeof(struct qed_dmae_params)); 113037bff2b9SYuval Mintz params.flags = QED_DMAE_FLAG_VF_DST; 113137bff2b9SYuval Mintz params.dst_vfid = eng_vf_id; 113237bff2b9SYuval Mintz 113337bff2b9SYuval Mintz qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64), 113437bff2b9SYuval Mintz mbx->req_virt->first_tlv.reply_address + 113537bff2b9SYuval Mintz sizeof(u64), 113637bff2b9SYuval Mintz (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4, 113737bff2b9SYuval Mintz ¶ms); 113837bff2b9SYuval Mintz 1139d9194081SMintz, Yuval /* Once PF copies the rc to the VF, the latter can continue 1140d9194081SMintz, Yuval * and send an additional message. So we have to make sure the 1141d9194081SMintz, Yuval * channel would be re-set to ready prior to that. 1142d9194081SMintz, Yuval */ 114337bff2b9SYuval Mintz REG_WR(p_hwfn, 114437bff2b9SYuval Mintz GTT_BAR0_MAP_REG_USDM_RAM + 114537bff2b9SYuval Mintz USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1); 1146d9194081SMintz, Yuval 1147d9194081SMintz, Yuval qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, 1148d9194081SMintz, Yuval mbx->req_virt->first_tlv.reply_address, 1149d9194081SMintz, Yuval sizeof(u64) / 4, ¶ms); 115037bff2b9SYuval Mintz } 115137bff2b9SYuval Mintz 1152dacd88d6SYuval Mintz static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn, 1153dacd88d6SYuval Mintz enum qed_iov_vport_update_flag flag) 1154dacd88d6SYuval Mintz { 1155dacd88d6SYuval Mintz switch (flag) { 1156dacd88d6SYuval Mintz case QED_IOV_VP_UPDATE_ACTIVATE: 1157dacd88d6SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 115817b235c1SYuval Mintz case QED_IOV_VP_UPDATE_VLAN_STRIP: 115917b235c1SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; 116017b235c1SYuval Mintz case QED_IOV_VP_UPDATE_TX_SWITCH: 116117b235c1SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 1162dacd88d6SYuval Mintz case QED_IOV_VP_UPDATE_MCAST: 1163dacd88d6SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_MCAST; 1164dacd88d6SYuval Mintz case QED_IOV_VP_UPDATE_ACCEPT_PARAM: 1165dacd88d6SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 1166dacd88d6SYuval Mintz case QED_IOV_VP_UPDATE_RSS: 1167dacd88d6SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_RSS; 116817b235c1SYuval Mintz case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN: 116917b235c1SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 117017b235c1SYuval Mintz case QED_IOV_VP_UPDATE_SGE_TPA: 117117b235c1SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; 1172dacd88d6SYuval Mintz default: 1173dacd88d6SYuval Mintz return 0; 1174dacd88d6SYuval Mintz } 1175dacd88d6SYuval Mintz } 1176dacd88d6SYuval Mintz 1177dacd88d6SYuval Mintz static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn, 1178dacd88d6SYuval Mintz struct qed_vf_info *p_vf, 1179dacd88d6SYuval Mintz struct qed_iov_vf_mbx *p_mbx, 1180dacd88d6SYuval Mintz u8 status, 1181dacd88d6SYuval Mintz u16 tlvs_mask, u16 tlvs_accepted) 1182dacd88d6SYuval Mintz { 1183dacd88d6SYuval Mintz struct pfvf_def_resp_tlv *resp; 1184dacd88d6SYuval Mintz u16 size, total_len, i; 1185dacd88d6SYuval Mintz 1186dacd88d6SYuval Mintz memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs)); 1187dacd88d6SYuval Mintz p_mbx->offset = (u8 *)p_mbx->reply_virt; 1188dacd88d6SYuval Mintz size = sizeof(struct pfvf_def_resp_tlv); 1189dacd88d6SYuval Mintz total_len = size; 1190dacd88d6SYuval Mintz 1191dacd88d6SYuval Mintz qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size); 1192dacd88d6SYuval Mintz 1193dacd88d6SYuval Mintz /* Prepare response for all extended tlvs if they are found by PF */ 1194dacd88d6SYuval Mintz for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) { 11951a635e48SYuval Mintz if (!(tlvs_mask & BIT(i))) 1196dacd88d6SYuval Mintz continue; 1197dacd88d6SYuval Mintz 1198dacd88d6SYuval Mintz resp = qed_add_tlv(p_hwfn, &p_mbx->offset, 1199dacd88d6SYuval Mintz qed_iov_vport_to_tlv(p_hwfn, i), size); 1200dacd88d6SYuval Mintz 12011a635e48SYuval Mintz if (tlvs_accepted & BIT(i)) 1202dacd88d6SYuval Mintz resp->hdr.status = status; 1203dacd88d6SYuval Mintz else 1204dacd88d6SYuval Mintz resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED; 1205dacd88d6SYuval Mintz 1206dacd88d6SYuval Mintz DP_VERBOSE(p_hwfn, 1207dacd88d6SYuval Mintz QED_MSG_IOV, 1208dacd88d6SYuval Mintz "VF[%d] - vport_update response: TLV %d, status %02x\n", 1209dacd88d6SYuval Mintz p_vf->relative_vf_id, 1210dacd88d6SYuval Mintz qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status); 1211dacd88d6SYuval Mintz 1212dacd88d6SYuval Mintz total_len += size; 1213dacd88d6SYuval Mintz } 1214dacd88d6SYuval Mintz 1215dacd88d6SYuval Mintz qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END, 1216dacd88d6SYuval Mintz sizeof(struct channel_list_end_tlv)); 1217dacd88d6SYuval Mintz 1218dacd88d6SYuval Mintz return total_len; 1219dacd88d6SYuval Mintz } 1220dacd88d6SYuval Mintz 122137bff2b9SYuval Mintz static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn, 122237bff2b9SYuval Mintz struct qed_ptt *p_ptt, 122337bff2b9SYuval Mintz struct qed_vf_info *vf_info, 122437bff2b9SYuval Mintz u16 type, u16 length, u8 status) 122537bff2b9SYuval Mintz { 122637bff2b9SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx; 122737bff2b9SYuval Mintz 122837bff2b9SYuval Mintz mbx->offset = (u8 *)mbx->reply_virt; 122937bff2b9SYuval Mintz 123037bff2b9SYuval Mintz qed_add_tlv(p_hwfn, &mbx->offset, type, length); 123137bff2b9SYuval Mintz qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 123237bff2b9SYuval Mintz sizeof(struct channel_list_end_tlv)); 123337bff2b9SYuval Mintz 123437bff2b9SYuval Mintz qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status); 123537bff2b9SYuval Mintz } 123637bff2b9SYuval Mintz 1237ba56947aSBaoyou Xie static struct 1238ba56947aSBaoyou Xie qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn, 12390b55e27dSYuval Mintz u16 relative_vf_id, 12400b55e27dSYuval Mintz bool b_enabled_only) 12410b55e27dSYuval Mintz { 12420b55e27dSYuval Mintz struct qed_vf_info *vf = NULL; 12430b55e27dSYuval Mintz 12440b55e27dSYuval Mintz vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only); 12450b55e27dSYuval Mintz if (!vf) 12460b55e27dSYuval Mintz return NULL; 12470b55e27dSYuval Mintz 12480b55e27dSYuval Mintz return &vf->p_vf_info; 12490b55e27dSYuval Mintz } 12500b55e27dSYuval Mintz 1251ba56947aSBaoyou Xie static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid) 12520b55e27dSYuval Mintz { 12530b55e27dSYuval Mintz struct qed_public_vf_info *vf_info; 12540b55e27dSYuval Mintz 12550b55e27dSYuval Mintz vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false); 12560b55e27dSYuval Mintz 12570b55e27dSYuval Mintz if (!vf_info) 12580b55e27dSYuval Mintz return; 12590b55e27dSYuval Mintz 12600b55e27dSYuval Mintz /* Clear the VF mac */ 12610ee28e31SShyam Saini eth_zero_addr(vf_info->mac); 1262f990c82cSMintz, Yuval 1263f990c82cSMintz, Yuval vf_info->rx_accept_mode = 0; 1264f990c82cSMintz, Yuval vf_info->tx_accept_mode = 0; 12650b55e27dSYuval Mintz } 12660b55e27dSYuval Mintz 12670b55e27dSYuval Mintz static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, 12680b55e27dSYuval Mintz struct qed_vf_info *p_vf) 12690b55e27dSYuval Mintz { 12700b55e27dSYuval Mintz u32 i; 12710b55e27dSYuval Mintz 12720b55e27dSYuval Mintz p_vf->vf_bulletin = 0; 1273dacd88d6SYuval Mintz p_vf->vport_instance = 0; 127408feecd7SYuval Mintz p_vf->configured_features = 0; 12750b55e27dSYuval Mintz 12760b55e27dSYuval Mintz /* If VF previously requested less resources, go back to default */ 12770b55e27dSYuval Mintz p_vf->num_rxqs = p_vf->num_sbs; 12780b55e27dSYuval Mintz p_vf->num_txqs = p_vf->num_sbs; 12790b55e27dSYuval Mintz 1280dacd88d6SYuval Mintz p_vf->num_active_rxqs = 0; 1281dacd88d6SYuval Mintz 12823da7a37aSMintz, Yuval for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { 12833da7a37aSMintz, Yuval struct qed_vf_q_info *p_queue = &p_vf->vf_queues[i]; 12843da7a37aSMintz, Yuval 12853da7a37aSMintz, Yuval if (p_queue->p_rx_cid) { 12863da7a37aSMintz, Yuval qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid); 12873da7a37aSMintz, Yuval p_queue->p_rx_cid = NULL; 12883da7a37aSMintz, Yuval } 12893da7a37aSMintz, Yuval 12903da7a37aSMintz, Yuval if (p_queue->p_tx_cid) { 12913da7a37aSMintz, Yuval qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid); 12923da7a37aSMintz, Yuval p_queue->p_tx_cid = NULL; 12933da7a37aSMintz, Yuval } 12943da7a37aSMintz, Yuval } 12950b55e27dSYuval Mintz 129608feecd7SYuval Mintz memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); 12971fe614d1SYuval Mintz memset(&p_vf->acquire, 0, sizeof(p_vf->acquire)); 12980b55e27dSYuval Mintz qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id); 12990b55e27dSYuval Mintz } 13000b55e27dSYuval Mintz 13011cf2b1a9SYuval Mintz static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, 13021cf2b1a9SYuval Mintz struct qed_ptt *p_ptt, 13031cf2b1a9SYuval Mintz struct qed_vf_info *p_vf, 13041cf2b1a9SYuval Mintz struct vf_pf_resc_request *p_req, 13051cf2b1a9SYuval Mintz struct pf_vf_resc *p_resp) 13061cf2b1a9SYuval Mintz { 13071cf2b1a9SYuval Mintz int i; 13081cf2b1a9SYuval Mintz 13091cf2b1a9SYuval Mintz /* Queue related information */ 13101cf2b1a9SYuval Mintz p_resp->num_rxqs = p_vf->num_rxqs; 13111cf2b1a9SYuval Mintz p_resp->num_txqs = p_vf->num_txqs; 13121cf2b1a9SYuval Mintz p_resp->num_sbs = p_vf->num_sbs; 13131cf2b1a9SYuval Mintz 13141cf2b1a9SYuval Mintz for (i = 0; i < p_resp->num_sbs; i++) { 13151cf2b1a9SYuval Mintz p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i]; 13161cf2b1a9SYuval Mintz p_resp->hw_sbs[i].sb_qid = 0; 13171cf2b1a9SYuval Mintz } 13181cf2b1a9SYuval Mintz 13191cf2b1a9SYuval Mintz /* These fields are filled for backward compatibility. 13201cf2b1a9SYuval Mintz * Unused by modern vfs. 13211cf2b1a9SYuval Mintz */ 13221cf2b1a9SYuval Mintz for (i = 0; i < p_resp->num_rxqs; i++) { 13231cf2b1a9SYuval Mintz qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid, 13241cf2b1a9SYuval Mintz (u16 *)&p_resp->hw_qid[i]); 13251cf2b1a9SYuval Mintz p_resp->cid[i] = p_vf->vf_queues[i].fw_cid; 13261cf2b1a9SYuval Mintz } 13271cf2b1a9SYuval Mintz 13281cf2b1a9SYuval Mintz /* Filter related information */ 13291cf2b1a9SYuval Mintz p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters, 13301cf2b1a9SYuval Mintz p_req->num_mac_filters); 13311cf2b1a9SYuval Mintz p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters, 13321cf2b1a9SYuval Mintz p_req->num_vlan_filters); 13331cf2b1a9SYuval Mintz 13341cf2b1a9SYuval Mintz /* This isn't really needed/enforced, but some legacy VFs might depend 13351cf2b1a9SYuval Mintz * on the correct filling of this field. 13361cf2b1a9SYuval Mintz */ 13371cf2b1a9SYuval Mintz p_resp->num_mc_filters = QED_MAX_MC_ADDRS; 13381cf2b1a9SYuval Mintz 13391cf2b1a9SYuval Mintz /* Validate sufficient resources for VF */ 13401cf2b1a9SYuval Mintz if (p_resp->num_rxqs < p_req->num_rxqs || 13411cf2b1a9SYuval Mintz p_resp->num_txqs < p_req->num_txqs || 13421cf2b1a9SYuval Mintz p_resp->num_sbs < p_req->num_sbs || 13431cf2b1a9SYuval Mintz p_resp->num_mac_filters < p_req->num_mac_filters || 13441cf2b1a9SYuval Mintz p_resp->num_vlan_filters < p_req->num_vlan_filters || 13451cf2b1a9SYuval Mintz p_resp->num_mc_filters < p_req->num_mc_filters) { 13461cf2b1a9SYuval Mintz DP_VERBOSE(p_hwfn, 13471cf2b1a9SYuval Mintz QED_MSG_IOV, 13481cf2b1a9SYuval Mintz "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n", 13491cf2b1a9SYuval Mintz p_vf->abs_vf_id, 13501cf2b1a9SYuval Mintz p_req->num_rxqs, 13511cf2b1a9SYuval Mintz p_resp->num_rxqs, 13521cf2b1a9SYuval Mintz p_req->num_rxqs, 13531cf2b1a9SYuval Mintz p_resp->num_txqs, 13541cf2b1a9SYuval Mintz p_req->num_sbs, 13551cf2b1a9SYuval Mintz p_resp->num_sbs, 13561cf2b1a9SYuval Mintz p_req->num_mac_filters, 13571cf2b1a9SYuval Mintz p_resp->num_mac_filters, 13581cf2b1a9SYuval Mintz p_req->num_vlan_filters, 13591cf2b1a9SYuval Mintz p_resp->num_vlan_filters, 13601cf2b1a9SYuval Mintz p_req->num_mc_filters, p_resp->num_mc_filters); 1361a044df83SYuval Mintz 1362a044df83SYuval Mintz /* Some legacy OSes are incapable of correctly handling this 1363a044df83SYuval Mintz * failure. 1364a044df83SYuval Mintz */ 1365a044df83SYuval Mintz if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor == 1366a044df83SYuval Mintz ETH_HSI_VER_NO_PKT_LEN_TUNN) && 1367a044df83SYuval Mintz (p_vf->acquire.vfdev_info.os_type == 1368a044df83SYuval Mintz VFPF_ACQUIRE_OS_WINDOWS)) 1369a044df83SYuval Mintz return PFVF_STATUS_SUCCESS; 1370a044df83SYuval Mintz 13711cf2b1a9SYuval Mintz return PFVF_STATUS_NO_RESOURCE; 13721cf2b1a9SYuval Mintz } 13731cf2b1a9SYuval Mintz 13741cf2b1a9SYuval Mintz return PFVF_STATUS_SUCCESS; 13751cf2b1a9SYuval Mintz } 13761cf2b1a9SYuval Mintz 13771cf2b1a9SYuval Mintz static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn, 13781cf2b1a9SYuval Mintz struct pfvf_stats_info *p_stats) 13791cf2b1a9SYuval Mintz { 13801cf2b1a9SYuval Mintz p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B + 13811cf2b1a9SYuval Mintz offsetof(struct mstorm_vf_zone, 13821cf2b1a9SYuval Mintz non_trigger.eth_queue_stat); 13831cf2b1a9SYuval Mintz p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat); 13841cf2b1a9SYuval Mintz p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B + 13851cf2b1a9SYuval Mintz offsetof(struct ustorm_vf_zone, 13861cf2b1a9SYuval Mintz non_trigger.eth_queue_stat); 13871cf2b1a9SYuval Mintz p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat); 13881cf2b1a9SYuval Mintz p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B + 13891cf2b1a9SYuval Mintz offsetof(struct pstorm_vf_zone, 13901cf2b1a9SYuval Mintz non_trigger.eth_queue_stat); 13911cf2b1a9SYuval Mintz p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat); 13921cf2b1a9SYuval Mintz p_stats->tstats.address = 0; 13931cf2b1a9SYuval Mintz p_stats->tstats.len = 0; 13941cf2b1a9SYuval Mintz } 13951cf2b1a9SYuval Mintz 13961408cc1fSYuval Mintz static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, 139737bff2b9SYuval Mintz struct qed_ptt *p_ptt, 13981408cc1fSYuval Mintz struct qed_vf_info *vf) 139937bff2b9SYuval Mintz { 14001408cc1fSYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 14011408cc1fSYuval Mintz struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp; 14021408cc1fSYuval Mintz struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 14031408cc1fSYuval Mintz struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire; 14041cf2b1a9SYuval Mintz u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED; 14051408cc1fSYuval Mintz struct pf_vf_resc *resc = &resp->resc; 14061fe614d1SYuval Mintz int rc; 14071fe614d1SYuval Mintz 14081fe614d1SYuval Mintz memset(resp, 0, sizeof(*resp)); 14091408cc1fSYuval Mintz 141005fafbfbSYuval Mintz /* Write the PF version so that VF would know which version 141105fafbfbSYuval Mintz * is supported - might be later overriden. This guarantees that 141205fafbfbSYuval Mintz * VF could recognize legacy PF based on lack of versions in reply. 141305fafbfbSYuval Mintz */ 141405fafbfbSYuval Mintz pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; 141505fafbfbSYuval Mintz pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR; 141605fafbfbSYuval Mintz 1417a044df83SYuval Mintz if (vf->state != VF_FREE && vf->state != VF_STOPPED) { 1418a044df83SYuval Mintz DP_VERBOSE(p_hwfn, 1419a044df83SYuval Mintz QED_MSG_IOV, 1420a044df83SYuval Mintz "VF[%d] sent ACQUIRE but is already in state %d - fail request\n", 1421a044df83SYuval Mintz vf->abs_vf_id, vf->state); 1422a044df83SYuval Mintz goto out; 1423a044df83SYuval Mintz } 1424a044df83SYuval Mintz 14251408cc1fSYuval Mintz /* Validate FW compatibility */ 14261fe614d1SYuval Mintz if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) { 1427a044df83SYuval Mintz if (req->vfdev_info.capabilities & 1428a044df83SYuval Mintz VFPF_ACQUIRE_CAP_PRE_FP_HSI) { 1429a044df83SYuval Mintz struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info; 1430a044df83SYuval Mintz 1431a044df83SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1432a044df83SYuval Mintz "VF[%d] is pre-fastpath HSI\n", 1433a044df83SYuval Mintz vf->abs_vf_id); 1434a044df83SYuval Mintz p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR; 1435a044df83SYuval Mintz p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; 1436a044df83SYuval Mintz } else { 14371408cc1fSYuval Mintz DP_INFO(p_hwfn, 14381fe614d1SYuval Mintz "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n", 14391408cc1fSYuval Mintz vf->abs_vf_id, 14401fe614d1SYuval Mintz req->vfdev_info.eth_fp_hsi_major, 14411fe614d1SYuval Mintz req->vfdev_info.eth_fp_hsi_minor, 14421fe614d1SYuval Mintz ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); 14431fe614d1SYuval Mintz 14441408cc1fSYuval Mintz goto out; 14451408cc1fSYuval Mintz } 1446a044df83SYuval Mintz } 14471408cc1fSYuval Mintz 14481408cc1fSYuval Mintz /* On 100g PFs, prevent old VFs from loading */ 14491408cc1fSYuval Mintz if ((p_hwfn->cdev->num_hwfns > 1) && 14501408cc1fSYuval Mintz !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) { 14511408cc1fSYuval Mintz DP_INFO(p_hwfn, 14521408cc1fSYuval Mintz "VF[%d] is running an old driver that doesn't support 100g\n", 14531408cc1fSYuval Mintz vf->abs_vf_id); 14541408cc1fSYuval Mintz goto out; 14551408cc1fSYuval Mintz } 14561408cc1fSYuval Mintz 14571fe614d1SYuval Mintz /* Store the acquire message */ 14581fe614d1SYuval Mintz memcpy(&vf->acquire, req, sizeof(vf->acquire)); 14591408cc1fSYuval Mintz 14601408cc1fSYuval Mintz vf->opaque_fid = req->vfdev_info.opaque_fid; 14611408cc1fSYuval Mintz 14621408cc1fSYuval Mintz vf->vf_bulletin = req->bulletin_addr; 14631408cc1fSYuval Mintz vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ? 14641408cc1fSYuval Mintz vf->bulletin.size : req->bulletin_size; 14651408cc1fSYuval Mintz 14661408cc1fSYuval Mintz /* fill in pfdev info */ 14671408cc1fSYuval Mintz pfdev_info->chip_num = p_hwfn->cdev->chip_num; 14681408cc1fSYuval Mintz pfdev_info->db_size = 0; 14691408cc1fSYuval Mintz pfdev_info->indices_per_sb = PIS_PER_SB; 14701408cc1fSYuval Mintz 14711408cc1fSYuval Mintz pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | 14721408cc1fSYuval Mintz PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE; 14731408cc1fSYuval Mintz if (p_hwfn->cdev->num_hwfns > 1) 14741408cc1fSYuval Mintz pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; 14751408cc1fSYuval Mintz 14761cf2b1a9SYuval Mintz qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info); 14771408cc1fSYuval Mintz 14781408cc1fSYuval Mintz memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); 14791408cc1fSYuval Mintz 14801408cc1fSYuval Mintz pfdev_info->fw_major = FW_MAJOR_VERSION; 14811408cc1fSYuval Mintz pfdev_info->fw_minor = FW_MINOR_VERSION; 14821408cc1fSYuval Mintz pfdev_info->fw_rev = FW_REVISION_VERSION; 14831408cc1fSYuval Mintz pfdev_info->fw_eng = FW_ENGINEERING_VERSION; 1484a044df83SYuval Mintz 1485a044df83SYuval Mintz /* Incorrect when legacy, but doesn't matter as legacy isn't reading 1486a044df83SYuval Mintz * this field. 1487a044df83SYuval Mintz */ 14881a635e48SYuval Mintz pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR, 14891fe614d1SYuval Mintz req->vfdev_info.eth_fp_hsi_minor); 14901408cc1fSYuval Mintz pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX; 14911408cc1fSYuval Mintz qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL); 14921408cc1fSYuval Mintz 14931408cc1fSYuval Mintz pfdev_info->dev_type = p_hwfn->cdev->type; 14941408cc1fSYuval Mintz pfdev_info->chip_rev = p_hwfn->cdev->chip_rev; 14951408cc1fSYuval Mintz 14961cf2b1a9SYuval Mintz /* Fill resources available to VF; Make sure there are enough to 14971cf2b1a9SYuval Mintz * satisfy the VF's request. 14981408cc1fSYuval Mintz */ 14991cf2b1a9SYuval Mintz vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf, 15001cf2b1a9SYuval Mintz &req->resc_request, resc); 15011cf2b1a9SYuval Mintz if (vfpf_status != PFVF_STATUS_SUCCESS) 15021cf2b1a9SYuval Mintz goto out; 15031408cc1fSYuval Mintz 15041fe614d1SYuval Mintz /* Start the VF in FW */ 15051fe614d1SYuval Mintz rc = qed_sp_vf_start(p_hwfn, vf); 15061fe614d1SYuval Mintz if (rc) { 15071fe614d1SYuval Mintz DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id); 15081fe614d1SYuval Mintz vfpf_status = PFVF_STATUS_FAILURE; 15091fe614d1SYuval Mintz goto out; 15101fe614d1SYuval Mintz } 15111fe614d1SYuval Mintz 15121408cc1fSYuval Mintz /* Fill agreed size of bulletin board in response */ 15131408cc1fSYuval Mintz resp->bulletin_size = vf->bulletin.size; 151436558c3dSYuval Mintz qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt); 15151408cc1fSYuval Mintz 15161408cc1fSYuval Mintz DP_VERBOSE(p_hwfn, 15171408cc1fSYuval Mintz QED_MSG_IOV, 15181408cc1fSYuval Mintz "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n" 15191408cc1fSYuval Mintz "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n", 15201408cc1fSYuval Mintz vf->abs_vf_id, 15211408cc1fSYuval Mintz resp->pfdev_info.chip_num, 15221408cc1fSYuval Mintz resp->pfdev_info.db_size, 15231408cc1fSYuval Mintz resp->pfdev_info.indices_per_sb, 15241408cc1fSYuval Mintz resp->pfdev_info.capabilities, 15251408cc1fSYuval Mintz resc->num_rxqs, 15261408cc1fSYuval Mintz resc->num_txqs, 15271408cc1fSYuval Mintz resc->num_sbs, 15281408cc1fSYuval Mintz resc->num_mac_filters, 15291408cc1fSYuval Mintz resc->num_vlan_filters); 15301408cc1fSYuval Mintz vf->state = VF_ACQUIRED; 15311408cc1fSYuval Mintz 15321408cc1fSYuval Mintz /* Prepare Response */ 15331408cc1fSYuval Mintz out: 15341408cc1fSYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE, 15351408cc1fSYuval Mintz sizeof(struct pfvf_acquire_resp_tlv), vfpf_status); 153637bff2b9SYuval Mintz } 153737bff2b9SYuval Mintz 15386ddc7608SYuval Mintz static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, 15396ddc7608SYuval Mintz struct qed_vf_info *p_vf, bool val) 15406ddc7608SYuval Mintz { 15416ddc7608SYuval Mintz struct qed_sp_vport_update_params params; 15426ddc7608SYuval Mintz int rc; 15436ddc7608SYuval Mintz 15446ddc7608SYuval Mintz if (val == p_vf->spoof_chk) { 15456ddc7608SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 15466ddc7608SYuval Mintz "Spoofchk value[%d] is already configured\n", val); 15476ddc7608SYuval Mintz return 0; 15486ddc7608SYuval Mintz } 15496ddc7608SYuval Mintz 15506ddc7608SYuval Mintz memset(¶ms, 0, sizeof(struct qed_sp_vport_update_params)); 15516ddc7608SYuval Mintz params.opaque_fid = p_vf->opaque_fid; 15526ddc7608SYuval Mintz params.vport_id = p_vf->vport_id; 15536ddc7608SYuval Mintz params.update_anti_spoofing_en_flg = 1; 15546ddc7608SYuval Mintz params.anti_spoofing_en = val; 15556ddc7608SYuval Mintz 15566ddc7608SYuval Mintz rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); 1557cb1fa088SYuval Mintz if (!rc) { 15586ddc7608SYuval Mintz p_vf->spoof_chk = val; 15596ddc7608SYuval Mintz p_vf->req_spoofchk_val = p_vf->spoof_chk; 15606ddc7608SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 15616ddc7608SYuval Mintz "Spoofchk val[%d] configured\n", val); 15626ddc7608SYuval Mintz } else { 15636ddc7608SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 15646ddc7608SYuval Mintz "Spoofchk configuration[val:%d] failed for VF[%d]\n", 15656ddc7608SYuval Mintz val, p_vf->relative_vf_id); 15666ddc7608SYuval Mintz } 15676ddc7608SYuval Mintz 15686ddc7608SYuval Mintz return rc; 15696ddc7608SYuval Mintz } 15706ddc7608SYuval Mintz 157108feecd7SYuval Mintz static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn, 157208feecd7SYuval Mintz struct qed_vf_info *p_vf) 157308feecd7SYuval Mintz { 157408feecd7SYuval Mintz struct qed_filter_ucast filter; 157508feecd7SYuval Mintz int rc = 0; 157608feecd7SYuval Mintz int i; 157708feecd7SYuval Mintz 157808feecd7SYuval Mintz memset(&filter, 0, sizeof(filter)); 157908feecd7SYuval Mintz filter.is_rx_filter = 1; 158008feecd7SYuval Mintz filter.is_tx_filter = 1; 158108feecd7SYuval Mintz filter.vport_to_add_to = p_vf->vport_id; 158208feecd7SYuval Mintz filter.opcode = QED_FILTER_ADD; 158308feecd7SYuval Mintz 158408feecd7SYuval Mintz /* Reconfigure vlans */ 158508feecd7SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { 158608feecd7SYuval Mintz if (!p_vf->shadow_config.vlans[i].used) 158708feecd7SYuval Mintz continue; 158808feecd7SYuval Mintz 158908feecd7SYuval Mintz filter.type = QED_FILTER_VLAN; 159008feecd7SYuval Mintz filter.vlan = p_vf->shadow_config.vlans[i].vid; 15911a635e48SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 159208feecd7SYuval Mintz "Reconfiguring VLAN [0x%04x] for VF [%04x]\n", 159308feecd7SYuval Mintz filter.vlan, p_vf->relative_vf_id); 15941a635e48SYuval Mintz rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, 15951a635e48SYuval Mintz &filter, QED_SPQ_MODE_CB, NULL); 159608feecd7SYuval Mintz if (rc) { 159708feecd7SYuval Mintz DP_NOTICE(p_hwfn, 159808feecd7SYuval Mintz "Failed to configure VLAN [%04x] to VF [%04x]\n", 159908feecd7SYuval Mintz filter.vlan, p_vf->relative_vf_id); 160008feecd7SYuval Mintz break; 160108feecd7SYuval Mintz } 160208feecd7SYuval Mintz } 160308feecd7SYuval Mintz 160408feecd7SYuval Mintz return rc; 160508feecd7SYuval Mintz } 160608feecd7SYuval Mintz 160708feecd7SYuval Mintz static int 160808feecd7SYuval Mintz qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn, 160908feecd7SYuval Mintz struct qed_vf_info *p_vf, u64 events) 161008feecd7SYuval Mintz { 161108feecd7SYuval Mintz int rc = 0; 161208feecd7SYuval Mintz 16131a635e48SYuval Mintz if ((events & BIT(VLAN_ADDR_FORCED)) && 161408feecd7SYuval Mintz !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) 161508feecd7SYuval Mintz rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf); 161608feecd7SYuval Mintz 161708feecd7SYuval Mintz return rc; 161808feecd7SYuval Mintz } 161908feecd7SYuval Mintz 162008feecd7SYuval Mintz static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, 162108feecd7SYuval Mintz struct qed_vf_info *p_vf, u64 events) 162208feecd7SYuval Mintz { 162308feecd7SYuval Mintz int rc = 0; 162408feecd7SYuval Mintz struct qed_filter_ucast filter; 162508feecd7SYuval Mintz 162608feecd7SYuval Mintz if (!p_vf->vport_instance) 162708feecd7SYuval Mintz return -EINVAL; 162808feecd7SYuval Mintz 16291a635e48SYuval Mintz if (events & BIT(MAC_ADDR_FORCED)) { 1630eff16960SYuval Mintz /* Since there's no way [currently] of removing the MAC, 1631eff16960SYuval Mintz * we can always assume this means we need to force it. 1632eff16960SYuval Mintz */ 1633eff16960SYuval Mintz memset(&filter, 0, sizeof(filter)); 1634eff16960SYuval Mintz filter.type = QED_FILTER_MAC; 1635eff16960SYuval Mintz filter.opcode = QED_FILTER_REPLACE; 1636eff16960SYuval Mintz filter.is_rx_filter = 1; 1637eff16960SYuval Mintz filter.is_tx_filter = 1; 1638eff16960SYuval Mintz filter.vport_to_add_to = p_vf->vport_id; 1639eff16960SYuval Mintz ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac); 1640eff16960SYuval Mintz 1641eff16960SYuval Mintz rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, 1642eff16960SYuval Mintz &filter, QED_SPQ_MODE_CB, NULL); 1643eff16960SYuval Mintz if (rc) { 1644eff16960SYuval Mintz DP_NOTICE(p_hwfn, 1645eff16960SYuval Mintz "PF failed to configure MAC for VF\n"); 1646eff16960SYuval Mintz return rc; 1647eff16960SYuval Mintz } 1648eff16960SYuval Mintz 1649eff16960SYuval Mintz p_vf->configured_features |= 1 << MAC_ADDR_FORCED; 1650eff16960SYuval Mintz } 1651eff16960SYuval Mintz 16521a635e48SYuval Mintz if (events & BIT(VLAN_ADDR_FORCED)) { 165308feecd7SYuval Mintz struct qed_sp_vport_update_params vport_update; 165408feecd7SYuval Mintz u8 removal; 165508feecd7SYuval Mintz int i; 165608feecd7SYuval Mintz 165708feecd7SYuval Mintz memset(&filter, 0, sizeof(filter)); 165808feecd7SYuval Mintz filter.type = QED_FILTER_VLAN; 165908feecd7SYuval Mintz filter.is_rx_filter = 1; 166008feecd7SYuval Mintz filter.is_tx_filter = 1; 166108feecd7SYuval Mintz filter.vport_to_add_to = p_vf->vport_id; 166208feecd7SYuval Mintz filter.vlan = p_vf->bulletin.p_virt->pvid; 166308feecd7SYuval Mintz filter.opcode = filter.vlan ? QED_FILTER_REPLACE : 166408feecd7SYuval Mintz QED_FILTER_FLUSH; 166508feecd7SYuval Mintz 166608feecd7SYuval Mintz /* Send the ramrod */ 166708feecd7SYuval Mintz rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, 166808feecd7SYuval Mintz &filter, QED_SPQ_MODE_CB, NULL); 166908feecd7SYuval Mintz if (rc) { 167008feecd7SYuval Mintz DP_NOTICE(p_hwfn, 167108feecd7SYuval Mintz "PF failed to configure VLAN for VF\n"); 167208feecd7SYuval Mintz return rc; 167308feecd7SYuval Mintz } 167408feecd7SYuval Mintz 167508feecd7SYuval Mintz /* Update the default-vlan & silent vlan stripping */ 167608feecd7SYuval Mintz memset(&vport_update, 0, sizeof(vport_update)); 167708feecd7SYuval Mintz vport_update.opaque_fid = p_vf->opaque_fid; 167808feecd7SYuval Mintz vport_update.vport_id = p_vf->vport_id; 167908feecd7SYuval Mintz vport_update.update_default_vlan_enable_flg = 1; 168008feecd7SYuval Mintz vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0; 168108feecd7SYuval Mintz vport_update.update_default_vlan_flg = 1; 168208feecd7SYuval Mintz vport_update.default_vlan = filter.vlan; 168308feecd7SYuval Mintz 168408feecd7SYuval Mintz vport_update.update_inner_vlan_removal_flg = 1; 168508feecd7SYuval Mintz removal = filter.vlan ? 1 168608feecd7SYuval Mintz : p_vf->shadow_config.inner_vlan_removal; 168708feecd7SYuval Mintz vport_update.inner_vlan_removal_flg = removal; 168808feecd7SYuval Mintz vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0; 168908feecd7SYuval Mintz rc = qed_sp_vport_update(p_hwfn, 169008feecd7SYuval Mintz &vport_update, 169108feecd7SYuval Mintz QED_SPQ_MODE_EBLOCK, NULL); 169208feecd7SYuval Mintz if (rc) { 169308feecd7SYuval Mintz DP_NOTICE(p_hwfn, 169408feecd7SYuval Mintz "PF failed to configure VF vport for vlan\n"); 169508feecd7SYuval Mintz return rc; 169608feecd7SYuval Mintz } 169708feecd7SYuval Mintz 169808feecd7SYuval Mintz /* Update all the Rx queues */ 169908feecd7SYuval Mintz for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { 17003da7a37aSMintz, Yuval struct qed_queue_cid *p_cid; 170108feecd7SYuval Mintz 17023da7a37aSMintz, Yuval p_cid = p_vf->vf_queues[i].p_rx_cid; 17033da7a37aSMintz, Yuval if (!p_cid) 170408feecd7SYuval Mintz continue; 170508feecd7SYuval Mintz 17063da7a37aSMintz, Yuval rc = qed_sp_eth_rx_queues_update(p_hwfn, 17073da7a37aSMintz, Yuval (void **)&p_cid, 170808feecd7SYuval Mintz 1, 0, 1, 170908feecd7SYuval Mintz QED_SPQ_MODE_EBLOCK, 171008feecd7SYuval Mintz NULL); 171108feecd7SYuval Mintz if (rc) { 171208feecd7SYuval Mintz DP_NOTICE(p_hwfn, 171308feecd7SYuval Mintz "Failed to send Rx update fo queue[0x%04x]\n", 17143da7a37aSMintz, Yuval p_cid->rel.queue_id); 171508feecd7SYuval Mintz return rc; 171608feecd7SYuval Mintz } 171708feecd7SYuval Mintz } 171808feecd7SYuval Mintz 171908feecd7SYuval Mintz if (filter.vlan) 172008feecd7SYuval Mintz p_vf->configured_features |= 1 << VLAN_ADDR_FORCED; 172108feecd7SYuval Mintz else 17221a635e48SYuval Mintz p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED); 172308feecd7SYuval Mintz } 172408feecd7SYuval Mintz 172508feecd7SYuval Mintz /* If forced features are terminated, we need to configure the shadow 172608feecd7SYuval Mintz * configuration back again. 172708feecd7SYuval Mintz */ 172808feecd7SYuval Mintz if (events) 172908feecd7SYuval Mintz qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events); 173008feecd7SYuval Mintz 173108feecd7SYuval Mintz return rc; 173208feecd7SYuval Mintz } 173308feecd7SYuval Mintz 1734dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, 1735dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 1736dacd88d6SYuval Mintz struct qed_vf_info *vf) 1737dacd88d6SYuval Mintz { 1738dacd88d6SYuval Mintz struct qed_sp_vport_start_params params = { 0 }; 1739dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 1740dacd88d6SYuval Mintz struct vfpf_vport_start_tlv *start; 1741dacd88d6SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 1742dacd88d6SYuval Mintz struct qed_vf_info *vf_info; 174308feecd7SYuval Mintz u64 *p_bitmap; 1744dacd88d6SYuval Mintz int sb_id; 1745dacd88d6SYuval Mintz int rc; 1746dacd88d6SYuval Mintz 1747dacd88d6SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true); 1748dacd88d6SYuval Mintz if (!vf_info) { 1749dacd88d6SYuval Mintz DP_NOTICE(p_hwfn->cdev, 1750dacd88d6SYuval Mintz "Failed to get VF info, invalid vfid [%d]\n", 1751dacd88d6SYuval Mintz vf->relative_vf_id); 1752dacd88d6SYuval Mintz return; 1753dacd88d6SYuval Mintz } 1754dacd88d6SYuval Mintz 1755dacd88d6SYuval Mintz vf->state = VF_ENABLED; 1756dacd88d6SYuval Mintz start = &mbx->req_virt->start_vport; 1757dacd88d6SYuval Mintz 1758dacd88d6SYuval Mintz /* Initialize Status block in CAU */ 1759dacd88d6SYuval Mintz for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) { 1760dacd88d6SYuval Mintz if (!start->sb_addr[sb_id]) { 1761dacd88d6SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1762dacd88d6SYuval Mintz "VF[%d] did not fill the address of SB %d\n", 1763dacd88d6SYuval Mintz vf->relative_vf_id, sb_id); 1764dacd88d6SYuval Mintz break; 1765dacd88d6SYuval Mintz } 1766dacd88d6SYuval Mintz 1767dacd88d6SYuval Mintz qed_int_cau_conf_sb(p_hwfn, p_ptt, 1768dacd88d6SYuval Mintz start->sb_addr[sb_id], 17691a635e48SYuval Mintz vf->igu_sbs[sb_id], vf->abs_vf_id, 1); 1770dacd88d6SYuval Mintz } 1771dacd88d6SYuval Mintz qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf); 1772dacd88d6SYuval Mintz 1773dacd88d6SYuval Mintz vf->mtu = start->mtu; 177408feecd7SYuval Mintz vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal; 177508feecd7SYuval Mintz 177608feecd7SYuval Mintz /* Take into consideration configuration forced by hypervisor; 177708feecd7SYuval Mintz * If none is configured, use the supplied VF values [for old 177808feecd7SYuval Mintz * vfs that would still be fine, since they passed '0' as padding]. 177908feecd7SYuval Mintz */ 178008feecd7SYuval Mintz p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap; 17811a635e48SYuval Mintz if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) { 178208feecd7SYuval Mintz u8 vf_req = start->only_untagged; 178308feecd7SYuval Mintz 178408feecd7SYuval Mintz vf_info->bulletin.p_virt->default_only_untagged = vf_req; 178508feecd7SYuval Mintz *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT; 178608feecd7SYuval Mintz } 1787dacd88d6SYuval Mintz 1788dacd88d6SYuval Mintz params.tpa_mode = start->tpa_mode; 1789dacd88d6SYuval Mintz params.remove_inner_vlan = start->inner_vlan_removal; 1790831bfb0eSYuval Mintz params.tx_switching = true; 1791dacd88d6SYuval Mintz 179208feecd7SYuval Mintz params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged; 1793dacd88d6SYuval Mintz params.drop_ttl0 = false; 1794dacd88d6SYuval Mintz params.concrete_fid = vf->concrete_fid; 1795dacd88d6SYuval Mintz params.opaque_fid = vf->opaque_fid; 1796dacd88d6SYuval Mintz params.vport_id = vf->vport_id; 1797dacd88d6SYuval Mintz params.max_buffers_per_cqe = start->max_buffers_per_cqe; 1798dacd88d6SYuval Mintz params.mtu = vf->mtu; 179911a85d75SYuval Mintz params.check_mac = true; 1800dacd88d6SYuval Mintz 1801dacd88d6SYuval Mintz rc = qed_sp_eth_vport_start(p_hwfn, ¶ms); 18021a635e48SYuval Mintz if (rc) { 1803dacd88d6SYuval Mintz DP_ERR(p_hwfn, 1804dacd88d6SYuval Mintz "qed_iov_vf_mbx_start_vport returned error %d\n", rc); 1805dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 1806dacd88d6SYuval Mintz } else { 1807dacd88d6SYuval Mintz vf->vport_instance++; 180808feecd7SYuval Mintz 180908feecd7SYuval Mintz /* Force configuration if needed on the newly opened vport */ 181008feecd7SYuval Mintz qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap); 18116ddc7608SYuval Mintz 18126ddc7608SYuval Mintz __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val); 1813dacd88d6SYuval Mintz } 1814dacd88d6SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START, 1815dacd88d6SYuval Mintz sizeof(struct pfvf_def_resp_tlv), status); 1816dacd88d6SYuval Mintz } 1817dacd88d6SYuval Mintz 1818dacd88d6SYuval Mintz static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, 1819dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 1820dacd88d6SYuval Mintz struct qed_vf_info *vf) 1821dacd88d6SYuval Mintz { 1822dacd88d6SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 1823dacd88d6SYuval Mintz int rc; 1824dacd88d6SYuval Mintz 1825dacd88d6SYuval Mintz vf->vport_instance--; 18266ddc7608SYuval Mintz vf->spoof_chk = false; 1827dacd88d6SYuval Mintz 1828dacd88d6SYuval Mintz rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); 18291a635e48SYuval Mintz if (rc) { 1830dacd88d6SYuval Mintz DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n", 1831dacd88d6SYuval Mintz rc); 1832dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 1833dacd88d6SYuval Mintz } 1834dacd88d6SYuval Mintz 183508feecd7SYuval Mintz /* Forget the configuration on the vport */ 183608feecd7SYuval Mintz vf->configured_features = 0; 183708feecd7SYuval Mintz memset(&vf->shadow_config, 0, sizeof(vf->shadow_config)); 183808feecd7SYuval Mintz 1839dacd88d6SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN, 1840dacd88d6SYuval Mintz sizeof(struct pfvf_def_resp_tlv), status); 1841dacd88d6SYuval Mintz } 1842dacd88d6SYuval Mintz 1843dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, 1844dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 1845a044df83SYuval Mintz struct qed_vf_info *vf, 1846a044df83SYuval Mintz u8 status, bool b_legacy) 1847dacd88d6SYuval Mintz { 1848dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 1849dacd88d6SYuval Mintz struct pfvf_start_queue_resp_tlv *p_tlv; 1850dacd88d6SYuval Mintz struct vfpf_start_rxq_tlv *req; 1851a044df83SYuval Mintz u16 length; 1852dacd88d6SYuval Mintz 1853dacd88d6SYuval Mintz mbx->offset = (u8 *)mbx->reply_virt; 1854dacd88d6SYuval Mintz 1855a044df83SYuval Mintz /* Taking a bigger struct instead of adding a TLV to list was a 1856a044df83SYuval Mintz * mistake, but one which we're now stuck with, as some older 1857a044df83SYuval Mintz * clients assume the size of the previous response. 1858a044df83SYuval Mintz */ 1859a044df83SYuval Mintz if (!b_legacy) 1860a044df83SYuval Mintz length = sizeof(*p_tlv); 1861a044df83SYuval Mintz else 1862a044df83SYuval Mintz length = sizeof(struct pfvf_def_resp_tlv); 1863a044df83SYuval Mintz 1864dacd88d6SYuval Mintz p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ, 1865a044df83SYuval Mintz length); 1866dacd88d6SYuval Mintz qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 1867dacd88d6SYuval Mintz sizeof(struct channel_list_end_tlv)); 1868dacd88d6SYuval Mintz 1869dacd88d6SYuval Mintz /* Update the TLV with the response */ 1870a044df83SYuval Mintz if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { 1871dacd88d6SYuval Mintz req = &mbx->req_virt->start_rxq; 1872351a4dedSYuval Mintz p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B + 1873351a4dedSYuval Mintz offsetof(struct mstorm_vf_zone, 1874351a4dedSYuval Mintz non_trigger.eth_rx_queue_producers) + 1875351a4dedSYuval Mintz sizeof(struct eth_rx_prod_data) * req->rx_qid; 1876dacd88d6SYuval Mintz } 1877dacd88d6SYuval Mintz 1878a044df83SYuval Mintz qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); 1879dacd88d6SYuval Mintz } 1880dacd88d6SYuval Mintz 1881dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, 1882dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 1883dacd88d6SYuval Mintz struct qed_vf_info *vf) 1884dacd88d6SYuval Mintz { 1885dacd88d6SYuval Mintz struct qed_queue_start_common_params params; 1886dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 188741086467SYuval Mintz u8 status = PFVF_STATUS_NO_RESOURCE; 18883da7a37aSMintz, Yuval struct qed_vf_q_info *p_queue; 1889dacd88d6SYuval Mintz struct vfpf_start_rxq_tlv *req; 1890a044df83SYuval Mintz bool b_legacy_vf = false; 1891dacd88d6SYuval Mintz int rc; 1892dacd88d6SYuval Mintz 1893dacd88d6SYuval Mintz req = &mbx->req_virt->start_rxq; 189441086467SYuval Mintz 189541086467SYuval Mintz if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) || 189641086467SYuval Mintz !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) 189741086467SYuval Mintz goto out; 189841086467SYuval Mintz 18993da7a37aSMintz, Yuval /* Acquire a new queue-cid */ 19003da7a37aSMintz, Yuval p_queue = &vf->vf_queues[req->rx_qid]; 19013da7a37aSMintz, Yuval 19023da7a37aSMintz, Yuval memset(¶ms, 0, sizeof(params)); 19033da7a37aSMintz, Yuval params.queue_id = p_queue->fw_rx_qid; 1904dacd88d6SYuval Mintz params.vport_id = vf->vport_id; 19053da7a37aSMintz, Yuval params.stats_id = vf->abs_vf_id + 0x10; 1906dacd88d6SYuval Mintz params.sb = req->hw_sb; 1907dacd88d6SYuval Mintz params.sb_idx = req->sb_index; 1908dacd88d6SYuval Mintz 19093da7a37aSMintz, Yuval p_queue->p_rx_cid = _qed_eth_queue_to_cid(p_hwfn, 19103da7a37aSMintz, Yuval vf->opaque_fid, 19113da7a37aSMintz, Yuval p_queue->fw_cid, 19123da7a37aSMintz, Yuval req->rx_qid, ¶ms); 19133da7a37aSMintz, Yuval if (!p_queue->p_rx_cid) 19143da7a37aSMintz, Yuval goto out; 19153da7a37aSMintz, Yuval 1916a044df83SYuval Mintz /* Legacy VFs have their Producers in a different location, which they 1917a044df83SYuval Mintz * calculate on their own and clean the producer prior to this. 1918a044df83SYuval Mintz */ 1919a044df83SYuval Mintz if (vf->acquire.vfdev_info.eth_fp_hsi_minor == 1920a044df83SYuval Mintz ETH_HSI_VER_NO_PKT_LEN_TUNN) { 1921a044df83SYuval Mintz b_legacy_vf = true; 1922a044df83SYuval Mintz } else { 1923a044df83SYuval Mintz REG_WR(p_hwfn, 1924a044df83SYuval Mintz GTT_BAR0_MAP_REG_MSDM_RAM + 1925a044df83SYuval Mintz MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid), 1926a044df83SYuval Mintz 0); 1927a044df83SYuval Mintz } 19283da7a37aSMintz, Yuval p_queue->p_rx_cid->b_legacy_vf = b_legacy_vf; 1929a044df83SYuval Mintz 19303da7a37aSMintz, Yuval rc = qed_eth_rxq_start_ramrod(p_hwfn, 19313da7a37aSMintz, Yuval p_queue->p_rx_cid, 1932dacd88d6SYuval Mintz req->bd_max_bytes, 1933dacd88d6SYuval Mintz req->rxq_addr, 19343da7a37aSMintz, Yuval req->cqe_pbl_addr, req->cqe_pbl_size); 1935dacd88d6SYuval Mintz if (rc) { 1936dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 19373da7a37aSMintz, Yuval qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid); 19383da7a37aSMintz, Yuval p_queue->p_rx_cid = NULL; 1939dacd88d6SYuval Mintz } else { 194041086467SYuval Mintz status = PFVF_STATUS_SUCCESS; 1941dacd88d6SYuval Mintz vf->num_active_rxqs++; 1942dacd88d6SYuval Mintz } 1943dacd88d6SYuval Mintz 194441086467SYuval Mintz out: 1945a044df83SYuval Mintz qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf); 1946dacd88d6SYuval Mintz } 1947dacd88d6SYuval Mintz 19485040acf5SYuval Mintz static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, 19495040acf5SYuval Mintz struct qed_ptt *p_ptt, 19505040acf5SYuval Mintz struct qed_vf_info *p_vf, u8 status) 19515040acf5SYuval Mintz { 19525040acf5SYuval Mintz struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 19535040acf5SYuval Mintz struct pfvf_start_queue_resp_tlv *p_tlv; 1954a044df83SYuval Mintz bool b_legacy = false; 1955a044df83SYuval Mintz u16 length; 19565040acf5SYuval Mintz 19575040acf5SYuval Mintz mbx->offset = (u8 *)mbx->reply_virt; 19585040acf5SYuval Mintz 1959a044df83SYuval Mintz /* Taking a bigger struct instead of adding a TLV to list was a 1960a044df83SYuval Mintz * mistake, but one which we're now stuck with, as some older 1961a044df83SYuval Mintz * clients assume the size of the previous response. 1962a044df83SYuval Mintz */ 1963a044df83SYuval Mintz if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == 1964a044df83SYuval Mintz ETH_HSI_VER_NO_PKT_LEN_TUNN) 1965a044df83SYuval Mintz b_legacy = true; 1966a044df83SYuval Mintz 1967a044df83SYuval Mintz if (!b_legacy) 1968a044df83SYuval Mintz length = sizeof(*p_tlv); 1969a044df83SYuval Mintz else 1970a044df83SYuval Mintz length = sizeof(struct pfvf_def_resp_tlv); 1971a044df83SYuval Mintz 19725040acf5SYuval Mintz p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ, 1973a044df83SYuval Mintz length); 19745040acf5SYuval Mintz qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 19755040acf5SYuval Mintz sizeof(struct channel_list_end_tlv)); 19765040acf5SYuval Mintz 19775040acf5SYuval Mintz /* Update the TLV with the response */ 1978a044df83SYuval Mintz if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { 19795040acf5SYuval Mintz u16 qid = mbx->req_virt->start_txq.tx_qid; 19805040acf5SYuval Mintz 198151ff1725SRam Amrani p_tlv->offset = qed_db_addr_vf(p_vf->vf_queues[qid].fw_cid, 19825040acf5SYuval Mintz DQ_DEMS_LEGACY); 19835040acf5SYuval Mintz } 19845040acf5SYuval Mintz 1985a044df83SYuval Mintz qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status); 19865040acf5SYuval Mintz } 19875040acf5SYuval Mintz 1988dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, 1989dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 1990dacd88d6SYuval Mintz struct qed_vf_info *vf) 1991dacd88d6SYuval Mintz { 1992dacd88d6SYuval Mintz struct qed_queue_start_common_params params; 1993dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 199441086467SYuval Mintz u8 status = PFVF_STATUS_NO_RESOURCE; 1995dacd88d6SYuval Mintz union qed_qm_pq_params pq_params; 1996dacd88d6SYuval Mintz struct vfpf_start_txq_tlv *req; 19973da7a37aSMintz, Yuval struct qed_vf_q_info *p_queue; 1998dacd88d6SYuval Mintz int rc; 19993da7a37aSMintz, Yuval u16 pq; 2000dacd88d6SYuval Mintz 2001dacd88d6SYuval Mintz /* Prepare the parameters which would choose the right PQ */ 2002dacd88d6SYuval Mintz memset(&pq_params, 0, sizeof(pq_params)); 2003dacd88d6SYuval Mintz pq_params.eth.is_vf = 1; 2004dacd88d6SYuval Mintz pq_params.eth.vf_id = vf->relative_vf_id; 2005dacd88d6SYuval Mintz 2006dacd88d6SYuval Mintz memset(¶ms, 0, sizeof(params)); 2007dacd88d6SYuval Mintz req = &mbx->req_virt->start_txq; 200841086467SYuval Mintz 200941086467SYuval Mintz if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid) || 201041086467SYuval Mintz !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) 201141086467SYuval Mintz goto out; 201241086467SYuval Mintz 20133da7a37aSMintz, Yuval /* Acquire a new queue-cid */ 20143da7a37aSMintz, Yuval p_queue = &vf->vf_queues[req->tx_qid]; 20153da7a37aSMintz, Yuval 20163da7a37aSMintz, Yuval params.queue_id = p_queue->fw_tx_qid; 2017dacd88d6SYuval Mintz params.vport_id = vf->vport_id; 20183da7a37aSMintz, Yuval params.stats_id = vf->abs_vf_id + 0x10; 2019dacd88d6SYuval Mintz params.sb = req->hw_sb; 2020dacd88d6SYuval Mintz params.sb_idx = req->sb_index; 2021dacd88d6SYuval Mintz 20223da7a37aSMintz, Yuval p_queue->p_tx_cid = _qed_eth_queue_to_cid(p_hwfn, 2023dacd88d6SYuval Mintz vf->opaque_fid, 20243da7a37aSMintz, Yuval p_queue->fw_cid, 20253da7a37aSMintz, Yuval req->tx_qid, ¶ms); 20263da7a37aSMintz, Yuval if (!p_queue->p_tx_cid) 20273da7a37aSMintz, Yuval goto out; 2028dacd88d6SYuval Mintz 20293da7a37aSMintz, Yuval pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, &pq_params); 20303da7a37aSMintz, Yuval rc = qed_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid, 20313da7a37aSMintz, Yuval req->pbl_addr, req->pbl_size, pq); 203241086467SYuval Mintz if (rc) { 2033dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 20343da7a37aSMintz, Yuval qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid); 20353da7a37aSMintz, Yuval p_queue->p_tx_cid = NULL; 203641086467SYuval Mintz } else { 203741086467SYuval Mintz status = PFVF_STATUS_SUCCESS; 203841086467SYuval Mintz } 2039dacd88d6SYuval Mintz 204041086467SYuval Mintz out: 20415040acf5SYuval Mintz qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status); 2042dacd88d6SYuval Mintz } 2043dacd88d6SYuval Mintz 2044dacd88d6SYuval Mintz static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, 2045dacd88d6SYuval Mintz struct qed_vf_info *vf, 2046dacd88d6SYuval Mintz u16 rxq_id, u8 num_rxqs, bool cqe_completion) 2047dacd88d6SYuval Mintz { 20483da7a37aSMintz, Yuval struct qed_vf_q_info *p_queue; 2049dacd88d6SYuval Mintz int rc = 0; 2050dacd88d6SYuval Mintz int qid; 2051dacd88d6SYuval Mintz 2052dacd88d6SYuval Mintz if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues)) 2053dacd88d6SYuval Mintz return -EINVAL; 2054dacd88d6SYuval Mintz 2055dacd88d6SYuval Mintz for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) { 20563da7a37aSMintz, Yuval p_queue = &vf->vf_queues[qid]; 2057dacd88d6SYuval Mintz 20583da7a37aSMintz, Yuval if (!p_queue->p_rx_cid) 20593da7a37aSMintz, Yuval continue; 20603da7a37aSMintz, Yuval 20613da7a37aSMintz, Yuval rc = qed_eth_rx_queue_stop(p_hwfn, 20623da7a37aSMintz, Yuval p_queue->p_rx_cid, 20633da7a37aSMintz, Yuval false, cqe_completion); 2064dacd88d6SYuval Mintz if (rc) 2065dacd88d6SYuval Mintz return rc; 20663da7a37aSMintz, Yuval 20673da7a37aSMintz, Yuval vf->vf_queues[qid].p_rx_cid = NULL; 2068dacd88d6SYuval Mintz vf->num_active_rxqs--; 2069dacd88d6SYuval Mintz } 2070dacd88d6SYuval Mintz 2071dacd88d6SYuval Mintz return rc; 2072dacd88d6SYuval Mintz } 2073dacd88d6SYuval Mintz 2074dacd88d6SYuval Mintz static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn, 2075dacd88d6SYuval Mintz struct qed_vf_info *vf, u16 txq_id, u8 num_txqs) 2076dacd88d6SYuval Mintz { 2077dacd88d6SYuval Mintz int rc = 0; 20783da7a37aSMintz, Yuval struct qed_vf_q_info *p_queue; 2079dacd88d6SYuval Mintz int qid; 2080dacd88d6SYuval Mintz 2081dacd88d6SYuval Mintz if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues)) 2082dacd88d6SYuval Mintz return -EINVAL; 2083dacd88d6SYuval Mintz 2084dacd88d6SYuval Mintz for (qid = txq_id; qid < txq_id + num_txqs; qid++) { 20853da7a37aSMintz, Yuval p_queue = &vf->vf_queues[qid]; 20863da7a37aSMintz, Yuval if (!p_queue->p_tx_cid) 20873da7a37aSMintz, Yuval continue; 2088dacd88d6SYuval Mintz 20893da7a37aSMintz, Yuval rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid); 2090dacd88d6SYuval Mintz if (rc) 2091dacd88d6SYuval Mintz return rc; 20923da7a37aSMintz, Yuval 20933da7a37aSMintz, Yuval p_queue->p_tx_cid = NULL; 2094dacd88d6SYuval Mintz } 20953da7a37aSMintz, Yuval 2096dacd88d6SYuval Mintz return rc; 2097dacd88d6SYuval Mintz } 2098dacd88d6SYuval Mintz 2099dacd88d6SYuval Mintz static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn, 2100dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 2101dacd88d6SYuval Mintz struct qed_vf_info *vf) 2102dacd88d6SYuval Mintz { 2103dacd88d6SYuval Mintz u16 length = sizeof(struct pfvf_def_resp_tlv); 2104dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 2105dacd88d6SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 2106dacd88d6SYuval Mintz struct vfpf_stop_rxqs_tlv *req; 2107dacd88d6SYuval Mintz int rc; 2108dacd88d6SYuval Mintz 2109dacd88d6SYuval Mintz /* We give the option of starting from qid != 0, in this case we 2110dacd88d6SYuval Mintz * need to make sure that qid + num_qs doesn't exceed the actual 2111dacd88d6SYuval Mintz * amount of queues that exist. 2112dacd88d6SYuval Mintz */ 2113dacd88d6SYuval Mintz req = &mbx->req_virt->stop_rxqs; 2114dacd88d6SYuval Mintz rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid, 2115dacd88d6SYuval Mintz req->num_rxqs, req->cqe_completion); 2116dacd88d6SYuval Mintz if (rc) 2117dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 2118dacd88d6SYuval Mintz 2119dacd88d6SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS, 2120dacd88d6SYuval Mintz length, status); 2121dacd88d6SYuval Mintz } 2122dacd88d6SYuval Mintz 2123dacd88d6SYuval Mintz static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn, 2124dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 2125dacd88d6SYuval Mintz struct qed_vf_info *vf) 2126dacd88d6SYuval Mintz { 2127dacd88d6SYuval Mintz u16 length = sizeof(struct pfvf_def_resp_tlv); 2128dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 2129dacd88d6SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 2130dacd88d6SYuval Mintz struct vfpf_stop_txqs_tlv *req; 2131dacd88d6SYuval Mintz int rc; 2132dacd88d6SYuval Mintz 2133dacd88d6SYuval Mintz /* We give the option of starting from qid != 0, in this case we 2134dacd88d6SYuval Mintz * need to make sure that qid + num_qs doesn't exceed the actual 2135dacd88d6SYuval Mintz * amount of queues that exist. 2136dacd88d6SYuval Mintz */ 2137dacd88d6SYuval Mintz req = &mbx->req_virt->stop_txqs; 2138dacd88d6SYuval Mintz rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs); 2139dacd88d6SYuval Mintz if (rc) 2140dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 2141dacd88d6SYuval Mintz 2142dacd88d6SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS, 2143dacd88d6SYuval Mintz length, status); 2144dacd88d6SYuval Mintz } 2145dacd88d6SYuval Mintz 214617b235c1SYuval Mintz static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, 214717b235c1SYuval Mintz struct qed_ptt *p_ptt, 214817b235c1SYuval Mintz struct qed_vf_info *vf) 214917b235c1SYuval Mintz { 21503da7a37aSMintz, Yuval struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF]; 215117b235c1SYuval Mintz u16 length = sizeof(struct pfvf_def_resp_tlv); 215217b235c1SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 215317b235c1SYuval Mintz struct vfpf_update_rxq_tlv *req; 21543da7a37aSMintz, Yuval u8 status = PFVF_STATUS_FAILURE; 215517b235c1SYuval Mintz u8 complete_event_flg; 215617b235c1SYuval Mintz u8 complete_cqe_flg; 215717b235c1SYuval Mintz u16 qid; 215817b235c1SYuval Mintz int rc; 215917b235c1SYuval Mintz u8 i; 216017b235c1SYuval Mintz 216117b235c1SYuval Mintz req = &mbx->req_virt->update_rxq; 216217b235c1SYuval Mintz complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG); 216317b235c1SYuval Mintz complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); 216417b235c1SYuval Mintz 21653da7a37aSMintz, Yuval /* Validate inputs */ 21663da7a37aSMintz, Yuval if (req->num_rxqs + req->rx_qid > QED_MAX_VF_CHAINS_PER_PF || 21673da7a37aSMintz, Yuval !qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid)) { 21683da7a37aSMintz, Yuval DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n", 21693da7a37aSMintz, Yuval vf->relative_vf_id, req->rx_qid, req->num_rxqs); 21703da7a37aSMintz, Yuval goto out; 217117b235c1SYuval Mintz } 217217b235c1SYuval Mintz 21733da7a37aSMintz, Yuval for (i = 0; i < req->num_rxqs; i++) { 21743da7a37aSMintz, Yuval qid = req->rx_qid + i; 21753da7a37aSMintz, Yuval if (!vf->vf_queues[qid].p_rx_cid) { 21763da7a37aSMintz, Yuval DP_INFO(p_hwfn, 21773da7a37aSMintz, Yuval "VF[%d] rx_qid = %d isn`t active!\n", 21783da7a37aSMintz, Yuval vf->relative_vf_id, qid); 21793da7a37aSMintz, Yuval goto out; 21803da7a37aSMintz, Yuval } 21813da7a37aSMintz, Yuval 21823da7a37aSMintz, Yuval handlers[i] = vf->vf_queues[qid].p_rx_cid; 21833da7a37aSMintz, Yuval } 21843da7a37aSMintz, Yuval 21853da7a37aSMintz, Yuval rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers, 21863da7a37aSMintz, Yuval req->num_rxqs, 218717b235c1SYuval Mintz complete_cqe_flg, 218817b235c1SYuval Mintz complete_event_flg, 218917b235c1SYuval Mintz QED_SPQ_MODE_EBLOCK, NULL); 21903da7a37aSMintz, Yuval if (rc) 21913da7a37aSMintz, Yuval goto out; 219217b235c1SYuval Mintz 21933da7a37aSMintz, Yuval status = PFVF_STATUS_SUCCESS; 21943da7a37aSMintz, Yuval out: 219517b235c1SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ, 219617b235c1SYuval Mintz length, status); 219717b235c1SYuval Mintz } 219817b235c1SYuval Mintz 2199dacd88d6SYuval Mintz void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, 2200dacd88d6SYuval Mintz void *p_tlvs_list, u16 req_type) 2201dacd88d6SYuval Mintz { 2202dacd88d6SYuval Mintz struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list; 2203dacd88d6SYuval Mintz int len = 0; 2204dacd88d6SYuval Mintz 2205dacd88d6SYuval Mintz do { 2206dacd88d6SYuval Mintz if (!p_tlv->length) { 2207dacd88d6SYuval Mintz DP_NOTICE(p_hwfn, "Zero length TLV found\n"); 2208dacd88d6SYuval Mintz return NULL; 2209dacd88d6SYuval Mintz } 2210dacd88d6SYuval Mintz 2211dacd88d6SYuval Mintz if (p_tlv->type == req_type) { 2212dacd88d6SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2213dacd88d6SYuval Mintz "Extended tlv type %d, length %d found\n", 2214dacd88d6SYuval Mintz p_tlv->type, p_tlv->length); 2215dacd88d6SYuval Mintz return p_tlv; 2216dacd88d6SYuval Mintz } 2217dacd88d6SYuval Mintz 2218dacd88d6SYuval Mintz len += p_tlv->length; 2219dacd88d6SYuval Mintz p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length); 2220dacd88d6SYuval Mintz 2221dacd88d6SYuval Mintz if ((len + p_tlv->length) > TLV_BUFFER_SIZE) { 2222dacd88d6SYuval Mintz DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n"); 2223dacd88d6SYuval Mintz return NULL; 2224dacd88d6SYuval Mintz } 2225dacd88d6SYuval Mintz } while (p_tlv->type != CHANNEL_TLV_LIST_END); 2226dacd88d6SYuval Mintz 2227dacd88d6SYuval Mintz return NULL; 2228dacd88d6SYuval Mintz } 2229dacd88d6SYuval Mintz 2230dacd88d6SYuval Mintz static void 2231dacd88d6SYuval Mintz qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn, 2232dacd88d6SYuval Mintz struct qed_sp_vport_update_params *p_data, 2233dacd88d6SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2234dacd88d6SYuval Mintz { 2235dacd88d6SYuval Mintz struct vfpf_vport_update_activate_tlv *p_act_tlv; 2236dacd88d6SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 2237dacd88d6SYuval Mintz 2238dacd88d6SYuval Mintz p_act_tlv = (struct vfpf_vport_update_activate_tlv *) 2239dacd88d6SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2240dacd88d6SYuval Mintz if (!p_act_tlv) 2241dacd88d6SYuval Mintz return; 2242dacd88d6SYuval Mintz 2243dacd88d6SYuval Mintz p_data->update_vport_active_rx_flg = p_act_tlv->update_rx; 2244dacd88d6SYuval Mintz p_data->vport_active_rx_flg = p_act_tlv->active_rx; 2245dacd88d6SYuval Mintz p_data->update_vport_active_tx_flg = p_act_tlv->update_tx; 2246dacd88d6SYuval Mintz p_data->vport_active_tx_flg = p_act_tlv->active_tx; 2247dacd88d6SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE; 2248dacd88d6SYuval Mintz } 2249dacd88d6SYuval Mintz 2250dacd88d6SYuval Mintz static void 225117b235c1SYuval Mintz qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn, 225217b235c1SYuval Mintz struct qed_sp_vport_update_params *p_data, 225317b235c1SYuval Mintz struct qed_vf_info *p_vf, 225417b235c1SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 225517b235c1SYuval Mintz { 225617b235c1SYuval Mintz struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv; 225717b235c1SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; 225817b235c1SYuval Mintz 225917b235c1SYuval Mintz p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *) 226017b235c1SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 226117b235c1SYuval Mintz if (!p_vlan_tlv) 226217b235c1SYuval Mintz return; 226317b235c1SYuval Mintz 226408feecd7SYuval Mintz p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan; 226508feecd7SYuval Mintz 226608feecd7SYuval Mintz /* Ignore the VF request if we're forcing a vlan */ 22671a635e48SYuval Mintz if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) { 226817b235c1SYuval Mintz p_data->update_inner_vlan_removal_flg = 1; 226917b235c1SYuval Mintz p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan; 227008feecd7SYuval Mintz } 227117b235c1SYuval Mintz 227217b235c1SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP; 227317b235c1SYuval Mintz } 227417b235c1SYuval Mintz 227517b235c1SYuval Mintz static void 227617b235c1SYuval Mintz qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn, 227717b235c1SYuval Mintz struct qed_sp_vport_update_params *p_data, 227817b235c1SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 227917b235c1SYuval Mintz { 228017b235c1SYuval Mintz struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; 228117b235c1SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 228217b235c1SYuval Mintz 228317b235c1SYuval Mintz p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *) 228417b235c1SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 228517b235c1SYuval Mintz tlv); 228617b235c1SYuval Mintz if (!p_tx_switch_tlv) 228717b235c1SYuval Mintz return; 228817b235c1SYuval Mintz 228917b235c1SYuval Mintz p_data->update_tx_switching_flg = 1; 229017b235c1SYuval Mintz p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching; 229117b235c1SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH; 229217b235c1SYuval Mintz } 229317b235c1SYuval Mintz 229417b235c1SYuval Mintz static void 2295dacd88d6SYuval Mintz qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn, 2296dacd88d6SYuval Mintz struct qed_sp_vport_update_params *p_data, 2297dacd88d6SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2298dacd88d6SYuval Mintz { 2299dacd88d6SYuval Mintz struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; 2300dacd88d6SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST; 2301dacd88d6SYuval Mintz 2302dacd88d6SYuval Mintz p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *) 2303dacd88d6SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2304dacd88d6SYuval Mintz if (!p_mcast_tlv) 2305dacd88d6SYuval Mintz return; 2306dacd88d6SYuval Mintz 2307dacd88d6SYuval Mintz p_data->update_approx_mcast_flg = 1; 2308dacd88d6SYuval Mintz memcpy(p_data->bins, p_mcast_tlv->bins, 2309dacd88d6SYuval Mintz sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); 2310dacd88d6SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST; 2311dacd88d6SYuval Mintz } 2312dacd88d6SYuval Mintz 2313dacd88d6SYuval Mintz static void 2314dacd88d6SYuval Mintz qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn, 2315dacd88d6SYuval Mintz struct qed_sp_vport_update_params *p_data, 2316dacd88d6SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2317dacd88d6SYuval Mintz { 2318dacd88d6SYuval Mintz struct qed_filter_accept_flags *p_flags = &p_data->accept_flags; 2319dacd88d6SYuval Mintz struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; 2320dacd88d6SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 2321dacd88d6SYuval Mintz 2322dacd88d6SYuval Mintz p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *) 2323dacd88d6SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2324dacd88d6SYuval Mintz if (!p_accept_tlv) 2325dacd88d6SYuval Mintz return; 2326dacd88d6SYuval Mintz 2327dacd88d6SYuval Mintz p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode; 2328dacd88d6SYuval Mintz p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter; 2329dacd88d6SYuval Mintz p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode; 2330dacd88d6SYuval Mintz p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter; 2331dacd88d6SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM; 2332dacd88d6SYuval Mintz } 2333dacd88d6SYuval Mintz 2334dacd88d6SYuval Mintz static void 233517b235c1SYuval Mintz qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn, 233617b235c1SYuval Mintz struct qed_sp_vport_update_params *p_data, 233717b235c1SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 233817b235c1SYuval Mintz { 233917b235c1SYuval Mintz struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan; 234017b235c1SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 234117b235c1SYuval Mintz 234217b235c1SYuval Mintz p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *) 234317b235c1SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 234417b235c1SYuval Mintz tlv); 234517b235c1SYuval Mintz if (!p_accept_any_vlan) 234617b235c1SYuval Mintz return; 234717b235c1SYuval Mintz 234817b235c1SYuval Mintz p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan; 234917b235c1SYuval Mintz p_data->update_accept_any_vlan_flg = 235017b235c1SYuval Mintz p_accept_any_vlan->update_accept_any_vlan_flg; 235117b235c1SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN; 235217b235c1SYuval Mintz } 235317b235c1SYuval Mintz 235417b235c1SYuval Mintz static void 2355dacd88d6SYuval Mintz qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, 2356dacd88d6SYuval Mintz struct qed_vf_info *vf, 2357dacd88d6SYuval Mintz struct qed_sp_vport_update_params *p_data, 2358dacd88d6SYuval Mintz struct qed_rss_params *p_rss, 2359f29ffdb6SMintz, Yuval struct qed_iov_vf_mbx *p_mbx, 2360f29ffdb6SMintz, Yuval u16 *tlvs_mask, u16 *tlvs_accepted) 2361dacd88d6SYuval Mintz { 2362dacd88d6SYuval Mintz struct vfpf_vport_update_rss_tlv *p_rss_tlv; 2363dacd88d6SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS; 2364f29ffdb6SMintz, Yuval bool b_reject = false; 2365dacd88d6SYuval Mintz u16 table_size; 2366f29ffdb6SMintz, Yuval u16 i, q_idx; 2367dacd88d6SYuval Mintz 2368dacd88d6SYuval Mintz p_rss_tlv = (struct vfpf_vport_update_rss_tlv *) 2369dacd88d6SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2370dacd88d6SYuval Mintz if (!p_rss_tlv) { 2371dacd88d6SYuval Mintz p_data->rss_params = NULL; 2372dacd88d6SYuval Mintz return; 2373dacd88d6SYuval Mintz } 2374dacd88d6SYuval Mintz 2375dacd88d6SYuval Mintz memset(p_rss, 0, sizeof(struct qed_rss_params)); 2376dacd88d6SYuval Mintz 2377dacd88d6SYuval Mintz p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags & 2378dacd88d6SYuval Mintz VFPF_UPDATE_RSS_CONFIG_FLAG); 2379dacd88d6SYuval Mintz p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags & 2380dacd88d6SYuval Mintz VFPF_UPDATE_RSS_CAPS_FLAG); 2381dacd88d6SYuval Mintz p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags & 2382dacd88d6SYuval Mintz VFPF_UPDATE_RSS_IND_TABLE_FLAG); 2383dacd88d6SYuval Mintz p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags & 2384dacd88d6SYuval Mintz VFPF_UPDATE_RSS_KEY_FLAG); 2385dacd88d6SYuval Mintz 2386dacd88d6SYuval Mintz p_rss->rss_enable = p_rss_tlv->rss_enable; 2387dacd88d6SYuval Mintz p_rss->rss_eng_id = vf->relative_vf_id + 1; 2388dacd88d6SYuval Mintz p_rss->rss_caps = p_rss_tlv->rss_caps; 2389dacd88d6SYuval Mintz p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log; 2390dacd88d6SYuval Mintz memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key)); 2391dacd88d6SYuval Mintz 2392dacd88d6SYuval Mintz table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table), 2393dacd88d6SYuval Mintz (1 << p_rss_tlv->rss_table_size_log)); 2394dacd88d6SYuval Mintz 2395dacd88d6SYuval Mintz for (i = 0; i < table_size; i++) { 2396f29ffdb6SMintz, Yuval q_idx = p_rss_tlv->rss_ind_table[i]; 2397f29ffdb6SMintz, Yuval if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx)) { 2398f29ffdb6SMintz, Yuval DP_VERBOSE(p_hwfn, 2399f29ffdb6SMintz, Yuval QED_MSG_IOV, 2400f29ffdb6SMintz, Yuval "VF[%d]: Omitting RSS due to wrong queue %04x\n", 2401f29ffdb6SMintz, Yuval vf->relative_vf_id, q_idx); 2402f29ffdb6SMintz, Yuval b_reject = true; 2403f29ffdb6SMintz, Yuval goto out; 2404f29ffdb6SMintz, Yuval } 2405dacd88d6SYuval Mintz 2406f29ffdb6SMintz, Yuval if (!vf->vf_queues[q_idx].p_rx_cid) { 2407f29ffdb6SMintz, Yuval DP_VERBOSE(p_hwfn, 2408f29ffdb6SMintz, Yuval QED_MSG_IOV, 2409f29ffdb6SMintz, Yuval "VF[%d]: Omitting RSS due to inactive queue %08x\n", 2410f29ffdb6SMintz, Yuval vf->relative_vf_id, q_idx); 2411f29ffdb6SMintz, Yuval b_reject = true; 2412f29ffdb6SMintz, Yuval goto out; 2413f29ffdb6SMintz, Yuval } 2414f29ffdb6SMintz, Yuval 2415f29ffdb6SMintz, Yuval p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid; 2416dacd88d6SYuval Mintz } 2417dacd88d6SYuval Mintz 2418dacd88d6SYuval Mintz p_data->rss_params = p_rss; 2419f29ffdb6SMintz, Yuval out: 2420dacd88d6SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS; 2421f29ffdb6SMintz, Yuval if (!b_reject) 2422f29ffdb6SMintz, Yuval *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS; 2423dacd88d6SYuval Mintz } 2424dacd88d6SYuval Mintz 242517b235c1SYuval Mintz static void 242617b235c1SYuval Mintz qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn, 242717b235c1SYuval Mintz struct qed_vf_info *vf, 242817b235c1SYuval Mintz struct qed_sp_vport_update_params *p_data, 242917b235c1SYuval Mintz struct qed_sge_tpa_params *p_sge_tpa, 243017b235c1SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 243117b235c1SYuval Mintz { 243217b235c1SYuval Mintz struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv; 243317b235c1SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; 243417b235c1SYuval Mintz 243517b235c1SYuval Mintz p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *) 243617b235c1SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 243717b235c1SYuval Mintz 243817b235c1SYuval Mintz if (!p_sge_tpa_tlv) { 243917b235c1SYuval Mintz p_data->sge_tpa_params = NULL; 244017b235c1SYuval Mintz return; 244117b235c1SYuval Mintz } 244217b235c1SYuval Mintz 244317b235c1SYuval Mintz memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params)); 244417b235c1SYuval Mintz 244517b235c1SYuval Mintz p_sge_tpa->update_tpa_en_flg = 244617b235c1SYuval Mintz !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG); 244717b235c1SYuval Mintz p_sge_tpa->update_tpa_param_flg = 244817b235c1SYuval Mintz !!(p_sge_tpa_tlv->update_sge_tpa_flags & 244917b235c1SYuval Mintz VFPF_UPDATE_TPA_PARAM_FLAG); 245017b235c1SYuval Mintz 245117b235c1SYuval Mintz p_sge_tpa->tpa_ipv4_en_flg = 245217b235c1SYuval Mintz !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG); 245317b235c1SYuval Mintz p_sge_tpa->tpa_ipv6_en_flg = 245417b235c1SYuval Mintz !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG); 245517b235c1SYuval Mintz p_sge_tpa->tpa_pkt_split_flg = 245617b235c1SYuval Mintz !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG); 245717b235c1SYuval Mintz p_sge_tpa->tpa_hdr_data_split_flg = 245817b235c1SYuval Mintz !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG); 245917b235c1SYuval Mintz p_sge_tpa->tpa_gro_consistent_flg = 246017b235c1SYuval Mintz !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG); 246117b235c1SYuval Mintz 246217b235c1SYuval Mintz p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num; 246317b235c1SYuval Mintz p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size; 246417b235c1SYuval Mintz p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start; 246517b235c1SYuval Mintz p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont; 246617b235c1SYuval Mintz p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe; 246717b235c1SYuval Mintz 246817b235c1SYuval Mintz p_data->sge_tpa_params = p_sge_tpa; 246917b235c1SYuval Mintz 247017b235c1SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA; 247117b235c1SYuval Mintz } 247217b235c1SYuval Mintz 2473f990c82cSMintz, Yuval static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn, 2474f990c82cSMintz, Yuval u8 vfid, 2475f990c82cSMintz, Yuval struct qed_sp_vport_update_params *params, 2476f990c82cSMintz, Yuval u16 *tlvs) 2477f990c82cSMintz, Yuval { 2478f990c82cSMintz, Yuval u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; 2479f990c82cSMintz, Yuval struct qed_filter_accept_flags *flags = ¶ms->accept_flags; 2480f990c82cSMintz, Yuval struct qed_public_vf_info *vf_info; 2481f990c82cSMintz, Yuval 2482f990c82cSMintz, Yuval /* Untrusted VFs can't even be trusted to know that fact. 2483f990c82cSMintz, Yuval * Simply indicate everything is configured fine, and trace 2484f990c82cSMintz, Yuval * configuration 'behind their back'. 2485f990c82cSMintz, Yuval */ 2486f990c82cSMintz, Yuval if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM))) 2487f990c82cSMintz, Yuval return 0; 2488f990c82cSMintz, Yuval 2489f990c82cSMintz, Yuval vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); 2490f990c82cSMintz, Yuval 2491f990c82cSMintz, Yuval if (flags->update_rx_mode_config) { 2492f990c82cSMintz, Yuval vf_info->rx_accept_mode = flags->rx_accept_filter; 2493f990c82cSMintz, Yuval if (!vf_info->is_trusted_configured) 2494f990c82cSMintz, Yuval flags->rx_accept_filter &= ~mask; 2495f990c82cSMintz, Yuval } 2496f990c82cSMintz, Yuval 2497f990c82cSMintz, Yuval if (flags->update_tx_mode_config) { 2498f990c82cSMintz, Yuval vf_info->tx_accept_mode = flags->tx_accept_filter; 2499f990c82cSMintz, Yuval if (!vf_info->is_trusted_configured) 2500f990c82cSMintz, Yuval flags->tx_accept_filter &= ~mask; 2501f990c82cSMintz, Yuval } 2502f990c82cSMintz, Yuval 2503f990c82cSMintz, Yuval return 0; 2504f990c82cSMintz, Yuval } 2505f990c82cSMintz, Yuval 2506dacd88d6SYuval Mintz static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, 2507dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 2508dacd88d6SYuval Mintz struct qed_vf_info *vf) 2509dacd88d6SYuval Mintz { 2510f29ffdb6SMintz, Yuval struct qed_rss_params *p_rss_params = NULL; 2511dacd88d6SYuval Mintz struct qed_sp_vport_update_params params; 2512dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 251317b235c1SYuval Mintz struct qed_sge_tpa_params sge_tpa_params; 2514f29ffdb6SMintz, Yuval u16 tlvs_mask = 0, tlvs_accepted = 0; 2515dacd88d6SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 2516dacd88d6SYuval Mintz u16 length; 2517dacd88d6SYuval Mintz int rc; 2518dacd88d6SYuval Mintz 251941086467SYuval Mintz /* Valiate PF can send such a request */ 252041086467SYuval Mintz if (!vf->vport_instance) { 252141086467SYuval Mintz DP_VERBOSE(p_hwfn, 252241086467SYuval Mintz QED_MSG_IOV, 252341086467SYuval Mintz "No VPORT instance available for VF[%d], failing vport update\n", 252441086467SYuval Mintz vf->abs_vf_id); 252541086467SYuval Mintz status = PFVF_STATUS_FAILURE; 252641086467SYuval Mintz goto out; 252741086467SYuval Mintz } 2528f29ffdb6SMintz, Yuval p_rss_params = vzalloc(sizeof(*p_rss_params)); 2529f29ffdb6SMintz, Yuval if (p_rss_params == NULL) { 2530f29ffdb6SMintz, Yuval status = PFVF_STATUS_FAILURE; 2531f29ffdb6SMintz, Yuval goto out; 2532f29ffdb6SMintz, Yuval } 253341086467SYuval Mintz 2534dacd88d6SYuval Mintz memset(¶ms, 0, sizeof(params)); 2535dacd88d6SYuval Mintz params.opaque_fid = vf->opaque_fid; 2536dacd88d6SYuval Mintz params.vport_id = vf->vport_id; 2537dacd88d6SYuval Mintz params.rss_params = NULL; 2538dacd88d6SYuval Mintz 2539dacd88d6SYuval Mintz /* Search for extended tlvs list and update values 2540dacd88d6SYuval Mintz * from VF in struct qed_sp_vport_update_params. 2541dacd88d6SYuval Mintz */ 2542dacd88d6SYuval Mintz qed_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask); 254317b235c1SYuval Mintz qed_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask); 254417b235c1SYuval Mintz qed_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask); 2545dacd88d6SYuval Mintz qed_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask); 2546dacd88d6SYuval Mintz qed_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask); 254717b235c1SYuval Mintz qed_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask); 254817b235c1SYuval Mintz qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms, 254917b235c1SYuval Mintz &sge_tpa_params, mbx, &tlvs_mask); 2550dacd88d6SYuval Mintz 2551f29ffdb6SMintz, Yuval tlvs_accepted = tlvs_mask; 2552f29ffdb6SMintz, Yuval 2553f29ffdb6SMintz, Yuval /* Some of the extended TLVs need to be validated first; In that case, 2554f29ffdb6SMintz, Yuval * they can update the mask without updating the accepted [so that 2555f29ffdb6SMintz, Yuval * PF could communicate to VF it has rejected request]. 2556dacd88d6SYuval Mintz */ 2557f29ffdb6SMintz, Yuval qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params, 2558f29ffdb6SMintz, Yuval mbx, &tlvs_mask, &tlvs_accepted); 2559f29ffdb6SMintz, Yuval 2560f990c82cSMintz, Yuval if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id, 2561f990c82cSMintz, Yuval ¶ms, &tlvs_accepted)) { 2562f990c82cSMintz, Yuval tlvs_accepted = 0; 2563f990c82cSMintz, Yuval status = PFVF_STATUS_NOT_SUPPORTED; 2564f990c82cSMintz, Yuval goto out; 2565f990c82cSMintz, Yuval } 2566f990c82cSMintz, Yuval 2567f29ffdb6SMintz, Yuval if (!tlvs_accepted) { 2568f29ffdb6SMintz, Yuval if (tlvs_mask) 2569f29ffdb6SMintz, Yuval DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2570f29ffdb6SMintz, Yuval "Upper-layer prevents VF vport configuration\n"); 2571f29ffdb6SMintz, Yuval else 2572f29ffdb6SMintz, Yuval DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2573dacd88d6SYuval Mintz "No feature tlvs found for vport update\n"); 2574dacd88d6SYuval Mintz status = PFVF_STATUS_NOT_SUPPORTED; 2575dacd88d6SYuval Mintz goto out; 2576dacd88d6SYuval Mintz } 2577dacd88d6SYuval Mintz 2578dacd88d6SYuval Mintz rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); 2579dacd88d6SYuval Mintz 2580dacd88d6SYuval Mintz if (rc) 2581dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 2582dacd88d6SYuval Mintz 2583dacd88d6SYuval Mintz out: 2584f29ffdb6SMintz, Yuval vfree(p_rss_params); 2585dacd88d6SYuval Mintz length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status, 2586f29ffdb6SMintz, Yuval tlvs_mask, tlvs_accepted); 2587dacd88d6SYuval Mintz qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); 2588dacd88d6SYuval Mintz } 2589dacd88d6SYuval Mintz 25908246d0b4SYuval Mintz static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn, 259108feecd7SYuval Mintz struct qed_vf_info *p_vf, 259208feecd7SYuval Mintz struct qed_filter_ucast *p_params) 259308feecd7SYuval Mintz { 259408feecd7SYuval Mintz int i; 259508feecd7SYuval Mintz 259608feecd7SYuval Mintz /* First remove entries and then add new ones */ 259708feecd7SYuval Mintz if (p_params->opcode == QED_FILTER_REMOVE) { 259808feecd7SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) 259908feecd7SYuval Mintz if (p_vf->shadow_config.vlans[i].used && 260008feecd7SYuval Mintz p_vf->shadow_config.vlans[i].vid == 260108feecd7SYuval Mintz p_params->vlan) { 260208feecd7SYuval Mintz p_vf->shadow_config.vlans[i].used = false; 260308feecd7SYuval Mintz break; 260408feecd7SYuval Mintz } 260508feecd7SYuval Mintz if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { 260608feecd7SYuval Mintz DP_VERBOSE(p_hwfn, 260708feecd7SYuval Mintz QED_MSG_IOV, 260808feecd7SYuval Mintz "VF [%d] - Tries to remove a non-existing vlan\n", 260908feecd7SYuval Mintz p_vf->relative_vf_id); 261008feecd7SYuval Mintz return -EINVAL; 261108feecd7SYuval Mintz } 261208feecd7SYuval Mintz } else if (p_params->opcode == QED_FILTER_REPLACE || 261308feecd7SYuval Mintz p_params->opcode == QED_FILTER_FLUSH) { 261408feecd7SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) 261508feecd7SYuval Mintz p_vf->shadow_config.vlans[i].used = false; 261608feecd7SYuval Mintz } 261708feecd7SYuval Mintz 261808feecd7SYuval Mintz /* In forced mode, we're willing to remove entries - but we don't add 261908feecd7SYuval Mintz * new ones. 262008feecd7SYuval Mintz */ 26211a635e48SYuval Mintz if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)) 262208feecd7SYuval Mintz return 0; 262308feecd7SYuval Mintz 262408feecd7SYuval Mintz if (p_params->opcode == QED_FILTER_ADD || 262508feecd7SYuval Mintz p_params->opcode == QED_FILTER_REPLACE) { 262608feecd7SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { 262708feecd7SYuval Mintz if (p_vf->shadow_config.vlans[i].used) 262808feecd7SYuval Mintz continue; 262908feecd7SYuval Mintz 263008feecd7SYuval Mintz p_vf->shadow_config.vlans[i].used = true; 263108feecd7SYuval Mintz p_vf->shadow_config.vlans[i].vid = p_params->vlan; 263208feecd7SYuval Mintz break; 263308feecd7SYuval Mintz } 263408feecd7SYuval Mintz 263508feecd7SYuval Mintz if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { 263608feecd7SYuval Mintz DP_VERBOSE(p_hwfn, 263708feecd7SYuval Mintz QED_MSG_IOV, 263808feecd7SYuval Mintz "VF [%d] - Tries to configure more than %d vlan filters\n", 263908feecd7SYuval Mintz p_vf->relative_vf_id, 264008feecd7SYuval Mintz QED_ETH_VF_NUM_VLAN_FILTERS + 1); 264108feecd7SYuval Mintz return -EINVAL; 264208feecd7SYuval Mintz } 264308feecd7SYuval Mintz } 264408feecd7SYuval Mintz 264508feecd7SYuval Mintz return 0; 264608feecd7SYuval Mintz } 264708feecd7SYuval Mintz 26488246d0b4SYuval Mintz static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, 26498246d0b4SYuval Mintz struct qed_vf_info *p_vf, 26508246d0b4SYuval Mintz struct qed_filter_ucast *p_params) 26518246d0b4SYuval Mintz { 26528246d0b4SYuval Mintz int i; 26538246d0b4SYuval Mintz 26548246d0b4SYuval Mintz /* If we're in forced-mode, we don't allow any change */ 26551a635e48SYuval Mintz if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) 26568246d0b4SYuval Mintz return 0; 26578246d0b4SYuval Mintz 26588246d0b4SYuval Mintz /* First remove entries and then add new ones */ 26598246d0b4SYuval Mintz if (p_params->opcode == QED_FILTER_REMOVE) { 26608246d0b4SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { 26618246d0b4SYuval Mintz if (ether_addr_equal(p_vf->shadow_config.macs[i], 26628246d0b4SYuval Mintz p_params->mac)) { 26630ee28e31SShyam Saini eth_zero_addr(p_vf->shadow_config.macs[i]); 26648246d0b4SYuval Mintz break; 26658246d0b4SYuval Mintz } 26668246d0b4SYuval Mintz } 26678246d0b4SYuval Mintz 26688246d0b4SYuval Mintz if (i == QED_ETH_VF_NUM_MAC_FILTERS) { 26698246d0b4SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 26708246d0b4SYuval Mintz "MAC isn't configured\n"); 26718246d0b4SYuval Mintz return -EINVAL; 26728246d0b4SYuval Mintz } 26738246d0b4SYuval Mintz } else if (p_params->opcode == QED_FILTER_REPLACE || 26748246d0b4SYuval Mintz p_params->opcode == QED_FILTER_FLUSH) { 26758246d0b4SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) 26760ee28e31SShyam Saini eth_zero_addr(p_vf->shadow_config.macs[i]); 26778246d0b4SYuval Mintz } 26788246d0b4SYuval Mintz 26798246d0b4SYuval Mintz /* List the new MAC address */ 26808246d0b4SYuval Mintz if (p_params->opcode != QED_FILTER_ADD && 26818246d0b4SYuval Mintz p_params->opcode != QED_FILTER_REPLACE) 26828246d0b4SYuval Mintz return 0; 26838246d0b4SYuval Mintz 26848246d0b4SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { 26858246d0b4SYuval Mintz if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) { 26868246d0b4SYuval Mintz ether_addr_copy(p_vf->shadow_config.macs[i], 26878246d0b4SYuval Mintz p_params->mac); 26888246d0b4SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 26898246d0b4SYuval Mintz "Added MAC at %d entry in shadow\n", i); 26908246d0b4SYuval Mintz break; 26918246d0b4SYuval Mintz } 26928246d0b4SYuval Mintz } 26938246d0b4SYuval Mintz 26948246d0b4SYuval Mintz if (i == QED_ETH_VF_NUM_MAC_FILTERS) { 26958246d0b4SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n"); 26968246d0b4SYuval Mintz return -EINVAL; 26978246d0b4SYuval Mintz } 26988246d0b4SYuval Mintz 26998246d0b4SYuval Mintz return 0; 27008246d0b4SYuval Mintz } 27018246d0b4SYuval Mintz 27028246d0b4SYuval Mintz static int 27038246d0b4SYuval Mintz qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn, 27048246d0b4SYuval Mintz struct qed_vf_info *p_vf, 27058246d0b4SYuval Mintz struct qed_filter_ucast *p_params) 27068246d0b4SYuval Mintz { 27078246d0b4SYuval Mintz int rc = 0; 27088246d0b4SYuval Mintz 27098246d0b4SYuval Mintz if (p_params->type == QED_FILTER_MAC) { 27108246d0b4SYuval Mintz rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params); 27118246d0b4SYuval Mintz if (rc) 27128246d0b4SYuval Mintz return rc; 27138246d0b4SYuval Mintz } 27148246d0b4SYuval Mintz 27158246d0b4SYuval Mintz if (p_params->type == QED_FILTER_VLAN) 27168246d0b4SYuval Mintz rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params); 27178246d0b4SYuval Mintz 27188246d0b4SYuval Mintz return rc; 27198246d0b4SYuval Mintz } 27208246d0b4SYuval Mintz 2721ba56947aSBaoyou Xie static int qed_iov_chk_ucast(struct qed_hwfn *hwfn, 2722dacd88d6SYuval Mintz int vfid, struct qed_filter_ucast *params) 2723dacd88d6SYuval Mintz { 2724dacd88d6SYuval Mintz struct qed_public_vf_info *vf; 2725dacd88d6SYuval Mintz 2726dacd88d6SYuval Mintz vf = qed_iov_get_public_vf_info(hwfn, vfid, true); 2727dacd88d6SYuval Mintz if (!vf) 2728dacd88d6SYuval Mintz return -EINVAL; 2729dacd88d6SYuval Mintz 2730dacd88d6SYuval Mintz /* No real decision to make; Store the configured MAC */ 2731dacd88d6SYuval Mintz if (params->type == QED_FILTER_MAC || 2732dacd88d6SYuval Mintz params->type == QED_FILTER_MAC_VLAN) 2733dacd88d6SYuval Mintz ether_addr_copy(vf->mac, params->mac); 2734dacd88d6SYuval Mintz 2735dacd88d6SYuval Mintz return 0; 2736dacd88d6SYuval Mintz } 2737dacd88d6SYuval Mintz 2738dacd88d6SYuval Mintz static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn, 2739dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 2740dacd88d6SYuval Mintz struct qed_vf_info *vf) 2741dacd88d6SYuval Mintz { 274208feecd7SYuval Mintz struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt; 2743dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 2744dacd88d6SYuval Mintz struct vfpf_ucast_filter_tlv *req; 2745dacd88d6SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 2746dacd88d6SYuval Mintz struct qed_filter_ucast params; 2747dacd88d6SYuval Mintz int rc; 2748dacd88d6SYuval Mintz 2749dacd88d6SYuval Mintz /* Prepare the unicast filter params */ 2750dacd88d6SYuval Mintz memset(¶ms, 0, sizeof(struct qed_filter_ucast)); 2751dacd88d6SYuval Mintz req = &mbx->req_virt->ucast_filter; 2752dacd88d6SYuval Mintz params.opcode = (enum qed_filter_opcode)req->opcode; 2753dacd88d6SYuval Mintz params.type = (enum qed_filter_ucast_type)req->type; 2754dacd88d6SYuval Mintz 2755dacd88d6SYuval Mintz params.is_rx_filter = 1; 2756dacd88d6SYuval Mintz params.is_tx_filter = 1; 2757dacd88d6SYuval Mintz params.vport_to_remove_from = vf->vport_id; 2758dacd88d6SYuval Mintz params.vport_to_add_to = vf->vport_id; 2759dacd88d6SYuval Mintz memcpy(params.mac, req->mac, ETH_ALEN); 2760dacd88d6SYuval Mintz params.vlan = req->vlan; 2761dacd88d6SYuval Mintz 2762dacd88d6SYuval Mintz DP_VERBOSE(p_hwfn, 2763dacd88d6SYuval Mintz QED_MSG_IOV, 2764dacd88d6SYuval Mintz "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n", 2765dacd88d6SYuval Mintz vf->abs_vf_id, params.opcode, params.type, 2766dacd88d6SYuval Mintz params.is_rx_filter ? "RX" : "", 2767dacd88d6SYuval Mintz params.is_tx_filter ? "TX" : "", 2768dacd88d6SYuval Mintz params.vport_to_add_to, 2769dacd88d6SYuval Mintz params.mac[0], params.mac[1], 2770dacd88d6SYuval Mintz params.mac[2], params.mac[3], 2771dacd88d6SYuval Mintz params.mac[4], params.mac[5], params.vlan); 2772dacd88d6SYuval Mintz 2773dacd88d6SYuval Mintz if (!vf->vport_instance) { 2774dacd88d6SYuval Mintz DP_VERBOSE(p_hwfn, 2775dacd88d6SYuval Mintz QED_MSG_IOV, 2776dacd88d6SYuval Mintz "No VPORT instance available for VF[%d], failing ucast MAC configuration\n", 2777dacd88d6SYuval Mintz vf->abs_vf_id); 2778dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 2779dacd88d6SYuval Mintz goto out; 2780dacd88d6SYuval Mintz } 2781dacd88d6SYuval Mintz 278208feecd7SYuval Mintz /* Update shadow copy of the VF configuration */ 278308feecd7SYuval Mintz if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms)) { 278408feecd7SYuval Mintz status = PFVF_STATUS_FAILURE; 278508feecd7SYuval Mintz goto out; 278608feecd7SYuval Mintz } 278708feecd7SYuval Mintz 278808feecd7SYuval Mintz /* Determine if the unicast filtering is acceptible by PF */ 27891a635e48SYuval Mintz if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) && 279008feecd7SYuval Mintz (params.type == QED_FILTER_VLAN || 279108feecd7SYuval Mintz params.type == QED_FILTER_MAC_VLAN)) { 279208feecd7SYuval Mintz /* Once VLAN is forced or PVID is set, do not allow 279308feecd7SYuval Mintz * to add/replace any further VLANs. 279408feecd7SYuval Mintz */ 279508feecd7SYuval Mintz if (params.opcode == QED_FILTER_ADD || 279608feecd7SYuval Mintz params.opcode == QED_FILTER_REPLACE) 279708feecd7SYuval Mintz status = PFVF_STATUS_FORCED; 279808feecd7SYuval Mintz goto out; 279908feecd7SYuval Mintz } 280008feecd7SYuval Mintz 28011a635e48SYuval Mintz if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) && 2802eff16960SYuval Mintz (params.type == QED_FILTER_MAC || 2803eff16960SYuval Mintz params.type == QED_FILTER_MAC_VLAN)) { 2804eff16960SYuval Mintz if (!ether_addr_equal(p_bulletin->mac, params.mac) || 2805eff16960SYuval Mintz (params.opcode != QED_FILTER_ADD && 2806eff16960SYuval Mintz params.opcode != QED_FILTER_REPLACE)) 2807eff16960SYuval Mintz status = PFVF_STATUS_FORCED; 2808eff16960SYuval Mintz goto out; 2809eff16960SYuval Mintz } 2810eff16960SYuval Mintz 2811dacd88d6SYuval Mintz rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, ¶ms); 2812dacd88d6SYuval Mintz if (rc) { 2813dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 2814dacd88d6SYuval Mintz goto out; 2815dacd88d6SYuval Mintz } 2816dacd88d6SYuval Mintz 2817dacd88d6SYuval Mintz rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms, 2818dacd88d6SYuval Mintz QED_SPQ_MODE_CB, NULL); 2819dacd88d6SYuval Mintz if (rc) 2820dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 2821dacd88d6SYuval Mintz 2822dacd88d6SYuval Mintz out: 2823dacd88d6SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER, 2824dacd88d6SYuval Mintz sizeof(struct pfvf_def_resp_tlv), status); 2825dacd88d6SYuval Mintz } 2826dacd88d6SYuval Mintz 28270b55e27dSYuval Mintz static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn, 28280b55e27dSYuval Mintz struct qed_ptt *p_ptt, 28290b55e27dSYuval Mintz struct qed_vf_info *vf) 28300b55e27dSYuval Mintz { 28310b55e27dSYuval Mintz int i; 28320b55e27dSYuval Mintz 28330b55e27dSYuval Mintz /* Reset the SBs */ 28340b55e27dSYuval Mintz for (i = 0; i < vf->num_sbs; i++) 28350b55e27dSYuval Mintz qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 28360b55e27dSYuval Mintz vf->igu_sbs[i], 28370b55e27dSYuval Mintz vf->opaque_fid, false); 28380b55e27dSYuval Mintz 28390b55e27dSYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP, 28400b55e27dSYuval Mintz sizeof(struct pfvf_def_resp_tlv), 28410b55e27dSYuval Mintz PFVF_STATUS_SUCCESS); 28420b55e27dSYuval Mintz } 28430b55e27dSYuval Mintz 28440b55e27dSYuval Mintz static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn, 28450b55e27dSYuval Mintz struct qed_ptt *p_ptt, struct qed_vf_info *vf) 28460b55e27dSYuval Mintz { 28470b55e27dSYuval Mintz u16 length = sizeof(struct pfvf_def_resp_tlv); 28480b55e27dSYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 28490b55e27dSYuval Mintz 28500b55e27dSYuval Mintz /* Disable Interrupts for VF */ 28510b55e27dSYuval Mintz qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); 28520b55e27dSYuval Mintz 28530b55e27dSYuval Mintz /* Reset Permission table */ 28540b55e27dSYuval Mintz qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); 28550b55e27dSYuval Mintz 28560b55e27dSYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE, 28570b55e27dSYuval Mintz length, status); 28580b55e27dSYuval Mintz } 28590b55e27dSYuval Mintz 28600b55e27dSYuval Mintz static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn, 28610b55e27dSYuval Mintz struct qed_ptt *p_ptt, 28620b55e27dSYuval Mintz struct qed_vf_info *p_vf) 28630b55e27dSYuval Mintz { 28640b55e27dSYuval Mintz u16 length = sizeof(struct pfvf_def_resp_tlv); 28651fe614d1SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 28661fe614d1SYuval Mintz int rc = 0; 28670b55e27dSYuval Mintz 28680b55e27dSYuval Mintz qed_iov_vf_cleanup(p_hwfn, p_vf); 28690b55e27dSYuval Mintz 28701fe614d1SYuval Mintz if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) { 28711fe614d1SYuval Mintz /* Stopping the VF */ 28721fe614d1SYuval Mintz rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid, 28731fe614d1SYuval Mintz p_vf->opaque_fid); 28741fe614d1SYuval Mintz 28751fe614d1SYuval Mintz if (rc) { 28761fe614d1SYuval Mintz DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n", 28771fe614d1SYuval Mintz rc); 28781fe614d1SYuval Mintz status = PFVF_STATUS_FAILURE; 28791fe614d1SYuval Mintz } 28801fe614d1SYuval Mintz 28811fe614d1SYuval Mintz p_vf->state = VF_STOPPED; 28821fe614d1SYuval Mintz } 28831fe614d1SYuval Mintz 28840b55e27dSYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE, 28851fe614d1SYuval Mintz length, status); 28860b55e27dSYuval Mintz } 28870b55e27dSYuval Mintz 28880b55e27dSYuval Mintz static int 28890b55e27dSYuval Mintz qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, 28900b55e27dSYuval Mintz struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) 28910b55e27dSYuval Mintz { 28920b55e27dSYuval Mintz int cnt; 28930b55e27dSYuval Mintz u32 val; 28940b55e27dSYuval Mintz 28950b55e27dSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid); 28960b55e27dSYuval Mintz 28970b55e27dSYuval Mintz for (cnt = 0; cnt < 50; cnt++) { 28980b55e27dSYuval Mintz val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT); 28990b55e27dSYuval Mintz if (!val) 29000b55e27dSYuval Mintz break; 29010b55e27dSYuval Mintz msleep(20); 29020b55e27dSYuval Mintz } 29030b55e27dSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 29040b55e27dSYuval Mintz 29050b55e27dSYuval Mintz if (cnt == 50) { 29060b55e27dSYuval Mintz DP_ERR(p_hwfn, 29070b55e27dSYuval Mintz "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n", 29080b55e27dSYuval Mintz p_vf->abs_vf_id, val); 29090b55e27dSYuval Mintz return -EBUSY; 29100b55e27dSYuval Mintz } 29110b55e27dSYuval Mintz 29120b55e27dSYuval Mintz return 0; 29130b55e27dSYuval Mintz } 29140b55e27dSYuval Mintz 29150b55e27dSYuval Mintz static int 29160b55e27dSYuval Mintz qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, 29170b55e27dSYuval Mintz struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) 29180b55e27dSYuval Mintz { 29190b55e27dSYuval Mintz u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS]; 29200b55e27dSYuval Mintz int i, cnt; 29210b55e27dSYuval Mintz 29220b55e27dSYuval Mintz /* Read initial consumers & producers */ 29230b55e27dSYuval Mintz for (i = 0; i < MAX_NUM_VOQS; i++) { 29240b55e27dSYuval Mintz u32 prod; 29250b55e27dSYuval Mintz 29260b55e27dSYuval Mintz cons[i] = qed_rd(p_hwfn, p_ptt, 29270b55e27dSYuval Mintz PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + 29280b55e27dSYuval Mintz i * 0x40); 29290b55e27dSYuval Mintz prod = qed_rd(p_hwfn, p_ptt, 29300b55e27dSYuval Mintz PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 + 29310b55e27dSYuval Mintz i * 0x40); 29320b55e27dSYuval Mintz distance[i] = prod - cons[i]; 29330b55e27dSYuval Mintz } 29340b55e27dSYuval Mintz 29350b55e27dSYuval Mintz /* Wait for consumers to pass the producers */ 29360b55e27dSYuval Mintz i = 0; 29370b55e27dSYuval Mintz for (cnt = 0; cnt < 50; cnt++) { 29380b55e27dSYuval Mintz for (; i < MAX_NUM_VOQS; i++) { 29390b55e27dSYuval Mintz u32 tmp; 29400b55e27dSYuval Mintz 29410b55e27dSYuval Mintz tmp = qed_rd(p_hwfn, p_ptt, 29420b55e27dSYuval Mintz PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + 29430b55e27dSYuval Mintz i * 0x40); 29440b55e27dSYuval Mintz if (distance[i] > tmp - cons[i]) 29450b55e27dSYuval Mintz break; 29460b55e27dSYuval Mintz } 29470b55e27dSYuval Mintz 29480b55e27dSYuval Mintz if (i == MAX_NUM_VOQS) 29490b55e27dSYuval Mintz break; 29500b55e27dSYuval Mintz 29510b55e27dSYuval Mintz msleep(20); 29520b55e27dSYuval Mintz } 29530b55e27dSYuval Mintz 29540b55e27dSYuval Mintz if (cnt == 50) { 29550b55e27dSYuval Mintz DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n", 29560b55e27dSYuval Mintz p_vf->abs_vf_id, i); 29570b55e27dSYuval Mintz return -EBUSY; 29580b55e27dSYuval Mintz } 29590b55e27dSYuval Mintz 29600b55e27dSYuval Mintz return 0; 29610b55e27dSYuval Mintz } 29620b55e27dSYuval Mintz 29630b55e27dSYuval Mintz static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn, 29640b55e27dSYuval Mintz struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) 29650b55e27dSYuval Mintz { 29660b55e27dSYuval Mintz int rc; 29670b55e27dSYuval Mintz 29680b55e27dSYuval Mintz rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt); 29690b55e27dSYuval Mintz if (rc) 29700b55e27dSYuval Mintz return rc; 29710b55e27dSYuval Mintz 29720b55e27dSYuval Mintz rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt); 29730b55e27dSYuval Mintz if (rc) 29740b55e27dSYuval Mintz return rc; 29750b55e27dSYuval Mintz 29760b55e27dSYuval Mintz return 0; 29770b55e27dSYuval Mintz } 29780b55e27dSYuval Mintz 29790b55e27dSYuval Mintz static int 29800b55e27dSYuval Mintz qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn, 29810b55e27dSYuval Mintz struct qed_ptt *p_ptt, 29820b55e27dSYuval Mintz u16 rel_vf_id, u32 *ack_vfs) 29830b55e27dSYuval Mintz { 29840b55e27dSYuval Mintz struct qed_vf_info *p_vf; 29850b55e27dSYuval Mintz int rc = 0; 29860b55e27dSYuval Mintz 29870b55e27dSYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); 29880b55e27dSYuval Mintz if (!p_vf) 29890b55e27dSYuval Mintz return 0; 29900b55e27dSYuval Mintz 29910b55e27dSYuval Mintz if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] & 29920b55e27dSYuval Mintz (1ULL << (rel_vf_id % 64))) { 29930b55e27dSYuval Mintz u16 vfid = p_vf->abs_vf_id; 29940b55e27dSYuval Mintz 29950b55e27dSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 29960b55e27dSYuval Mintz "VF[%d] - Handling FLR\n", vfid); 29970b55e27dSYuval Mintz 29980b55e27dSYuval Mintz qed_iov_vf_cleanup(p_hwfn, p_vf); 29990b55e27dSYuval Mintz 30000b55e27dSYuval Mintz /* If VF isn't active, no need for anything but SW */ 30010b55e27dSYuval Mintz if (!p_vf->b_init) 30020b55e27dSYuval Mintz goto cleanup; 30030b55e27dSYuval Mintz 30040b55e27dSYuval Mintz rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt); 30050b55e27dSYuval Mintz if (rc) 30060b55e27dSYuval Mintz goto cleanup; 30070b55e27dSYuval Mintz 30080b55e27dSYuval Mintz rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true); 30090b55e27dSYuval Mintz if (rc) { 30100b55e27dSYuval Mintz DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid); 30110b55e27dSYuval Mintz return rc; 30120b55e27dSYuval Mintz } 30130b55e27dSYuval Mintz 30147eff82b0SYuval Mintz /* Workaround to make VF-PF channel ready, as FW 30157eff82b0SYuval Mintz * doesn't do that as a part of FLR. 30167eff82b0SYuval Mintz */ 30177eff82b0SYuval Mintz REG_WR(p_hwfn, 30187eff82b0SYuval Mintz GTT_BAR0_MAP_REG_USDM_RAM + 30197eff82b0SYuval Mintz USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1); 30207eff82b0SYuval Mintz 30210b55e27dSYuval Mintz /* VF_STOPPED has to be set only after final cleanup 30220b55e27dSYuval Mintz * but prior to re-enabling the VF. 30230b55e27dSYuval Mintz */ 30240b55e27dSYuval Mintz p_vf->state = VF_STOPPED; 30250b55e27dSYuval Mintz 30260b55e27dSYuval Mintz rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf); 30270b55e27dSYuval Mintz if (rc) { 30280b55e27dSYuval Mintz DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n", 30290b55e27dSYuval Mintz vfid); 30300b55e27dSYuval Mintz return rc; 30310b55e27dSYuval Mintz } 30320b55e27dSYuval Mintz cleanup: 30330b55e27dSYuval Mintz /* Mark VF for ack and clean pending state */ 30340b55e27dSYuval Mintz if (p_vf->state == VF_RESET) 30350b55e27dSYuval Mintz p_vf->state = VF_STOPPED; 30361a635e48SYuval Mintz ack_vfs[vfid / 32] |= BIT((vfid % 32)); 30370b55e27dSYuval Mintz p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= 30380b55e27dSYuval Mintz ~(1ULL << (rel_vf_id % 64)); 3039fd3c615aSMintz, Yuval p_vf->vf_mbx.b_pending_msg = false; 30400b55e27dSYuval Mintz } 30410b55e27dSYuval Mintz 30420b55e27dSYuval Mintz return rc; 30430b55e27dSYuval Mintz } 30440b55e27dSYuval Mintz 3045ba56947aSBaoyou Xie static int 3046ba56947aSBaoyou Xie qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 30470b55e27dSYuval Mintz { 30480b55e27dSYuval Mintz u32 ack_vfs[VF_MAX_STATIC / 32]; 30490b55e27dSYuval Mintz int rc = 0; 30500b55e27dSYuval Mintz u16 i; 30510b55e27dSYuval Mintz 30520b55e27dSYuval Mintz memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32)); 30530b55e27dSYuval Mintz 30540b55e27dSYuval Mintz /* Since BRB <-> PRS interface can't be tested as part of the flr 30550b55e27dSYuval Mintz * polling due to HW limitations, simply sleep a bit. And since 30560b55e27dSYuval Mintz * there's no need to wait per-vf, do it before looping. 30570b55e27dSYuval Mintz */ 30580b55e27dSYuval Mintz msleep(100); 30590b55e27dSYuval Mintz 30600b55e27dSYuval Mintz for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) 30610b55e27dSYuval Mintz qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs); 30620b55e27dSYuval Mintz 30630b55e27dSYuval Mintz rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs); 30640b55e27dSYuval Mintz return rc; 30650b55e27dSYuval Mintz } 30660b55e27dSYuval Mintz 30670b55e27dSYuval Mintz int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) 30680b55e27dSYuval Mintz { 30690b55e27dSYuval Mintz u16 i, found = 0; 30700b55e27dSYuval Mintz 30710b55e27dSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n"); 30720b55e27dSYuval Mintz for (i = 0; i < (VF_MAX_STATIC / 32); i++) 30730b55e27dSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 30740b55e27dSYuval Mintz "[%08x,...,%08x]: %08x\n", 30750b55e27dSYuval Mintz i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]); 30760b55e27dSYuval Mintz 30770b55e27dSYuval Mintz if (!p_hwfn->cdev->p_iov_info) { 30780b55e27dSYuval Mintz DP_NOTICE(p_hwfn, "VF flr but no IOV\n"); 30790b55e27dSYuval Mintz return 0; 30800b55e27dSYuval Mintz } 30810b55e27dSYuval Mintz 30820b55e27dSYuval Mintz /* Mark VFs */ 30830b55e27dSYuval Mintz for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) { 30840b55e27dSYuval Mintz struct qed_vf_info *p_vf; 30850b55e27dSYuval Mintz u8 vfid; 30860b55e27dSYuval Mintz 30870b55e27dSYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, i, false); 30880b55e27dSYuval Mintz if (!p_vf) 30890b55e27dSYuval Mintz continue; 30900b55e27dSYuval Mintz 30910b55e27dSYuval Mintz vfid = p_vf->abs_vf_id; 30921a635e48SYuval Mintz if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) { 30930b55e27dSYuval Mintz u64 *p_flr = p_hwfn->pf_iov_info->pending_flr; 30940b55e27dSYuval Mintz u16 rel_vf_id = p_vf->relative_vf_id; 30950b55e27dSYuval Mintz 30960b55e27dSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 30970b55e27dSYuval Mintz "VF[%d] [rel %d] got FLR-ed\n", 30980b55e27dSYuval Mintz vfid, rel_vf_id); 30990b55e27dSYuval Mintz 31000b55e27dSYuval Mintz p_vf->state = VF_RESET; 31010b55e27dSYuval Mintz 31020b55e27dSYuval Mintz /* No need to lock here, since pending_flr should 31030b55e27dSYuval Mintz * only change here and before ACKing MFw. Since 31040b55e27dSYuval Mintz * MFW will not trigger an additional attention for 31050b55e27dSYuval Mintz * VF flr until ACKs, we're safe. 31060b55e27dSYuval Mintz */ 31070b55e27dSYuval Mintz p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64); 31080b55e27dSYuval Mintz found = 1; 31090b55e27dSYuval Mintz } 31100b55e27dSYuval Mintz } 31110b55e27dSYuval Mintz 31120b55e27dSYuval Mintz return found; 31130b55e27dSYuval Mintz } 31140b55e27dSYuval Mintz 311573390ac9SYuval Mintz static void qed_iov_get_link(struct qed_hwfn *p_hwfn, 311673390ac9SYuval Mintz u16 vfid, 311773390ac9SYuval Mintz struct qed_mcp_link_params *p_params, 311873390ac9SYuval Mintz struct qed_mcp_link_state *p_link, 311973390ac9SYuval Mintz struct qed_mcp_link_capabilities *p_caps) 312073390ac9SYuval Mintz { 312173390ac9SYuval Mintz struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, 312273390ac9SYuval Mintz vfid, 312373390ac9SYuval Mintz false); 312473390ac9SYuval Mintz struct qed_bulletin_content *p_bulletin; 312573390ac9SYuval Mintz 312673390ac9SYuval Mintz if (!p_vf) 312773390ac9SYuval Mintz return; 312873390ac9SYuval Mintz 312973390ac9SYuval Mintz p_bulletin = p_vf->bulletin.p_virt; 313073390ac9SYuval Mintz 313173390ac9SYuval Mintz if (p_params) 313273390ac9SYuval Mintz __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin); 313373390ac9SYuval Mintz if (p_link) 313473390ac9SYuval Mintz __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin); 313573390ac9SYuval Mintz if (p_caps) 313673390ac9SYuval Mintz __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin); 313773390ac9SYuval Mintz } 313873390ac9SYuval Mintz 313937bff2b9SYuval Mintz static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, 314037bff2b9SYuval Mintz struct qed_ptt *p_ptt, int vfid) 314137bff2b9SYuval Mintz { 314237bff2b9SYuval Mintz struct qed_iov_vf_mbx *mbx; 314337bff2b9SYuval Mintz struct qed_vf_info *p_vf; 314437bff2b9SYuval Mintz 314537bff2b9SYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 314637bff2b9SYuval Mintz if (!p_vf) 314737bff2b9SYuval Mintz return; 314837bff2b9SYuval Mintz 314937bff2b9SYuval Mintz mbx = &p_vf->vf_mbx; 315037bff2b9SYuval Mintz 315137bff2b9SYuval Mintz /* qed_iov_process_mbx_request */ 3152fd3c615aSMintz, Yuval if (!mbx->b_pending_msg) { 3153fd3c615aSMintz, Yuval DP_NOTICE(p_hwfn, 3154fd3c615aSMintz, Yuval "VF[%02x]: Trying to process mailbox message when none is pending\n", 3155fd3c615aSMintz, Yuval p_vf->abs_vf_id); 3156fd3c615aSMintz, Yuval return; 3157fd3c615aSMintz, Yuval } 3158fd3c615aSMintz, Yuval mbx->b_pending_msg = false; 315937bff2b9SYuval Mintz 316037bff2b9SYuval Mintz mbx->first_tlv = mbx->req_virt->first_tlv; 316137bff2b9SYuval Mintz 3162fd3c615aSMintz, Yuval DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3163fd3c615aSMintz, Yuval "VF[%02x]: Processing mailbox message [type %04x]\n", 3164fd3c615aSMintz, Yuval p_vf->abs_vf_id, mbx->first_tlv.tl.type); 3165fd3c615aSMintz, Yuval 316637bff2b9SYuval Mintz /* check if tlv type is known */ 31677eff82b0SYuval Mintz if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) && 31687eff82b0SYuval Mintz !p_vf->b_malicious) { 31691408cc1fSYuval Mintz switch (mbx->first_tlv.tl.type) { 31701408cc1fSYuval Mintz case CHANNEL_TLV_ACQUIRE: 31711408cc1fSYuval Mintz qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf); 31721408cc1fSYuval Mintz break; 3173dacd88d6SYuval Mintz case CHANNEL_TLV_VPORT_START: 3174dacd88d6SYuval Mintz qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf); 3175dacd88d6SYuval Mintz break; 3176dacd88d6SYuval Mintz case CHANNEL_TLV_VPORT_TEARDOWN: 3177dacd88d6SYuval Mintz qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf); 3178dacd88d6SYuval Mintz break; 3179dacd88d6SYuval Mintz case CHANNEL_TLV_START_RXQ: 3180dacd88d6SYuval Mintz qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf); 3181dacd88d6SYuval Mintz break; 3182dacd88d6SYuval Mintz case CHANNEL_TLV_START_TXQ: 3183dacd88d6SYuval Mintz qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf); 3184dacd88d6SYuval Mintz break; 3185dacd88d6SYuval Mintz case CHANNEL_TLV_STOP_RXQS: 3186dacd88d6SYuval Mintz qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf); 3187dacd88d6SYuval Mintz break; 3188dacd88d6SYuval Mintz case CHANNEL_TLV_STOP_TXQS: 3189dacd88d6SYuval Mintz qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf); 3190dacd88d6SYuval Mintz break; 319117b235c1SYuval Mintz case CHANNEL_TLV_UPDATE_RXQ: 319217b235c1SYuval Mintz qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf); 319317b235c1SYuval Mintz break; 3194dacd88d6SYuval Mintz case CHANNEL_TLV_VPORT_UPDATE: 3195dacd88d6SYuval Mintz qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf); 3196dacd88d6SYuval Mintz break; 3197dacd88d6SYuval Mintz case CHANNEL_TLV_UCAST_FILTER: 3198dacd88d6SYuval Mintz qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf); 3199dacd88d6SYuval Mintz break; 32000b55e27dSYuval Mintz case CHANNEL_TLV_CLOSE: 32010b55e27dSYuval Mintz qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf); 32020b55e27dSYuval Mintz break; 32030b55e27dSYuval Mintz case CHANNEL_TLV_INT_CLEANUP: 32040b55e27dSYuval Mintz qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf); 32050b55e27dSYuval Mintz break; 32060b55e27dSYuval Mintz case CHANNEL_TLV_RELEASE: 32070b55e27dSYuval Mintz qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); 32080b55e27dSYuval Mintz break; 32091408cc1fSYuval Mintz } 32107eff82b0SYuval Mintz } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { 32117eff82b0SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 32127eff82b0SYuval Mintz "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n", 32137eff82b0SYuval Mintz p_vf->abs_vf_id, mbx->first_tlv.tl.type); 32147eff82b0SYuval Mintz 32157eff82b0SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, 32167eff82b0SYuval Mintz mbx->first_tlv.tl.type, 32177eff82b0SYuval Mintz sizeof(struct pfvf_def_resp_tlv), 32187eff82b0SYuval Mintz PFVF_STATUS_MALICIOUS); 321937bff2b9SYuval Mintz } else { 322037bff2b9SYuval Mintz /* unknown TLV - this may belong to a VF driver from the future 322137bff2b9SYuval Mintz * - a version written after this PF driver was written, which 322237bff2b9SYuval Mintz * supports features unknown as of yet. Too bad since we don't 322337bff2b9SYuval Mintz * support them. Or this may be because someone wrote a crappy 322437bff2b9SYuval Mintz * VF driver and is sending garbage over the channel. 322537bff2b9SYuval Mintz */ 322654fdd80fSYuval Mintz DP_NOTICE(p_hwfn, 322754fdd80fSYuval Mintz "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n", 322854fdd80fSYuval Mintz p_vf->abs_vf_id, 322954fdd80fSYuval Mintz mbx->first_tlv.tl.type, 323054fdd80fSYuval Mintz mbx->first_tlv.tl.length, 323154fdd80fSYuval Mintz mbx->first_tlv.padding, mbx->first_tlv.reply_address); 323237bff2b9SYuval Mintz 323354fdd80fSYuval Mintz /* Try replying in case reply address matches the acquisition's 323454fdd80fSYuval Mintz * posted address. 323554fdd80fSYuval Mintz */ 323654fdd80fSYuval Mintz if (p_vf->acquire.first_tlv.reply_address && 323754fdd80fSYuval Mintz (mbx->first_tlv.reply_address == 323854fdd80fSYuval Mintz p_vf->acquire.first_tlv.reply_address)) { 323954fdd80fSYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, 324054fdd80fSYuval Mintz mbx->first_tlv.tl.type, 324154fdd80fSYuval Mintz sizeof(struct pfvf_def_resp_tlv), 324254fdd80fSYuval Mintz PFVF_STATUS_NOT_SUPPORTED); 324354fdd80fSYuval Mintz } else { 324437bff2b9SYuval Mintz DP_VERBOSE(p_hwfn, 324537bff2b9SYuval Mintz QED_MSG_IOV, 324654fdd80fSYuval Mintz "VF[%02x]: Can't respond to TLV - no valid reply address\n", 324754fdd80fSYuval Mintz p_vf->abs_vf_id); 324837bff2b9SYuval Mintz } 324937bff2b9SYuval Mintz } 325037bff2b9SYuval Mintz } 325137bff2b9SYuval Mintz 3252fd3c615aSMintz, Yuval void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events) 325337bff2b9SYuval Mintz { 3254fd3c615aSMintz, Yuval int i; 325537bff2b9SYuval Mintz 3256fd3c615aSMintz, Yuval memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH); 3257fd3c615aSMintz, Yuval 3258fd3c615aSMintz, Yuval qed_for_each_vf(p_hwfn, i) { 3259fd3c615aSMintz, Yuval struct qed_vf_info *p_vf; 3260fd3c615aSMintz, Yuval 3261fd3c615aSMintz, Yuval p_vf = &p_hwfn->pf_iov_info->vfs_array[i]; 3262fd3c615aSMintz, Yuval if (p_vf->vf_mbx.b_pending_msg) 3263fd3c615aSMintz, Yuval events[i / 64] |= 1ULL << (i % 64); 326437bff2b9SYuval Mintz } 326537bff2b9SYuval Mintz } 326637bff2b9SYuval Mintz 32677eff82b0SYuval Mintz static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn, 32687eff82b0SYuval Mintz u16 abs_vfid) 32697eff82b0SYuval Mintz { 32707eff82b0SYuval Mintz u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf; 32717eff82b0SYuval Mintz 32727eff82b0SYuval Mintz if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) { 32737eff82b0SYuval Mintz DP_VERBOSE(p_hwfn, 32747eff82b0SYuval Mintz QED_MSG_IOV, 32757eff82b0SYuval Mintz "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n", 32767eff82b0SYuval Mintz abs_vfid); 32777eff82b0SYuval Mintz return NULL; 32787eff82b0SYuval Mintz } 32797eff82b0SYuval Mintz 32807eff82b0SYuval Mintz return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min]; 32817eff82b0SYuval Mintz } 32827eff82b0SYuval Mintz 328337bff2b9SYuval Mintz static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, 328437bff2b9SYuval Mintz u16 abs_vfid, struct regpair *vf_msg) 328537bff2b9SYuval Mintz { 32867eff82b0SYuval Mintz struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn, 328737bff2b9SYuval Mintz abs_vfid); 32887eff82b0SYuval Mintz 32897eff82b0SYuval Mintz if (!p_vf) 329037bff2b9SYuval Mintz return 0; 329137bff2b9SYuval Mintz 329237bff2b9SYuval Mintz /* List the physical address of the request so that handler 329337bff2b9SYuval Mintz * could later on copy the message from it. 329437bff2b9SYuval Mintz */ 329537bff2b9SYuval Mintz p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo; 329637bff2b9SYuval Mintz 329737bff2b9SYuval Mintz /* Mark the event and schedule the workqueue */ 3298fd3c615aSMintz, Yuval p_vf->vf_mbx.b_pending_msg = true; 329937bff2b9SYuval Mintz qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG); 330037bff2b9SYuval Mintz 330137bff2b9SYuval Mintz return 0; 330237bff2b9SYuval Mintz } 330337bff2b9SYuval Mintz 33047eff82b0SYuval Mintz static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, 33057eff82b0SYuval Mintz struct malicious_vf_eqe_data *p_data) 33067eff82b0SYuval Mintz { 33077eff82b0SYuval Mintz struct qed_vf_info *p_vf; 33087eff82b0SYuval Mintz 33097eff82b0SYuval Mintz p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id); 33107eff82b0SYuval Mintz 33117eff82b0SYuval Mintz if (!p_vf) 33127eff82b0SYuval Mintz return; 33137eff82b0SYuval Mintz 33147eff82b0SYuval Mintz DP_INFO(p_hwfn, 33157eff82b0SYuval Mintz "VF [%d] - Malicious behavior [%02x]\n", 33167eff82b0SYuval Mintz p_vf->abs_vf_id, p_data->err_id); 33177eff82b0SYuval Mintz 33187eff82b0SYuval Mintz p_vf->b_malicious = true; 33197eff82b0SYuval Mintz } 33207eff82b0SYuval Mintz 332137bff2b9SYuval Mintz int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, 332237bff2b9SYuval Mintz u8 opcode, __le16 echo, union event_ring_data *data) 332337bff2b9SYuval Mintz { 332437bff2b9SYuval Mintz switch (opcode) { 332537bff2b9SYuval Mintz case COMMON_EVENT_VF_PF_CHANNEL: 332637bff2b9SYuval Mintz return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo), 332737bff2b9SYuval Mintz &data->vf_pf_channel.msg_addr); 33287eff82b0SYuval Mintz case COMMON_EVENT_MALICIOUS_VF: 33297eff82b0SYuval Mintz qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf); 33307eff82b0SYuval Mintz return 0; 333137bff2b9SYuval Mintz default: 333237bff2b9SYuval Mintz DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n", 333337bff2b9SYuval Mintz opcode); 333437bff2b9SYuval Mintz return -EINVAL; 333537bff2b9SYuval Mintz } 333637bff2b9SYuval Mintz } 333737bff2b9SYuval Mintz 333832a47e72SYuval Mintz u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) 333932a47e72SYuval Mintz { 334032a47e72SYuval Mintz struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 334132a47e72SYuval Mintz u16 i; 334232a47e72SYuval Mintz 334332a47e72SYuval Mintz if (!p_iov) 334432a47e72SYuval Mintz goto out; 334532a47e72SYuval Mintz 334632a47e72SYuval Mintz for (i = rel_vf_id; i < p_iov->total_vfs; i++) 33477eff82b0SYuval Mintz if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false)) 334832a47e72SYuval Mintz return i; 334932a47e72SYuval Mintz 335032a47e72SYuval Mintz out: 335132a47e72SYuval Mintz return MAX_NUM_VFS; 335232a47e72SYuval Mintz } 335337bff2b9SYuval Mintz 335437bff2b9SYuval Mintz static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt, 335537bff2b9SYuval Mintz int vfid) 335637bff2b9SYuval Mintz { 335737bff2b9SYuval Mintz struct qed_dmae_params params; 335837bff2b9SYuval Mintz struct qed_vf_info *vf_info; 335937bff2b9SYuval Mintz 336037bff2b9SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 336137bff2b9SYuval Mintz if (!vf_info) 336237bff2b9SYuval Mintz return -EINVAL; 336337bff2b9SYuval Mintz 336437bff2b9SYuval Mintz memset(¶ms, 0, sizeof(struct qed_dmae_params)); 336537bff2b9SYuval Mintz params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST; 336637bff2b9SYuval Mintz params.src_vfid = vf_info->abs_vf_id; 336737bff2b9SYuval Mintz 336837bff2b9SYuval Mintz if (qed_dmae_host2host(p_hwfn, ptt, 336937bff2b9SYuval Mintz vf_info->vf_mbx.pending_req, 337037bff2b9SYuval Mintz vf_info->vf_mbx.req_phys, 337137bff2b9SYuval Mintz sizeof(union vfpf_tlvs) / 4, ¶ms)) { 337237bff2b9SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 337337bff2b9SYuval Mintz "Failed to copy message from VF 0x%02x\n", vfid); 337437bff2b9SYuval Mintz 337537bff2b9SYuval Mintz return -EIO; 337637bff2b9SYuval Mintz } 337737bff2b9SYuval Mintz 337837bff2b9SYuval Mintz return 0; 337937bff2b9SYuval Mintz } 338037bff2b9SYuval Mintz 3381eff16960SYuval Mintz static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn, 3382eff16960SYuval Mintz u8 *mac, int vfid) 3383eff16960SYuval Mintz { 3384eff16960SYuval Mintz struct qed_vf_info *vf_info; 3385eff16960SYuval Mintz u64 feature; 3386eff16960SYuval Mintz 3387eff16960SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 3388eff16960SYuval Mintz if (!vf_info) { 3389eff16960SYuval Mintz DP_NOTICE(p_hwfn->cdev, 3390eff16960SYuval Mintz "Can not set forced MAC, invalid vfid [%d]\n", vfid); 3391eff16960SYuval Mintz return; 3392eff16960SYuval Mintz } 3393eff16960SYuval Mintz 33947eff82b0SYuval Mintz if (vf_info->b_malicious) { 33957eff82b0SYuval Mintz DP_NOTICE(p_hwfn->cdev, 33967eff82b0SYuval Mintz "Can't set forced MAC to malicious VF [%d]\n", vfid); 33977eff82b0SYuval Mintz return; 33987eff82b0SYuval Mintz } 33997eff82b0SYuval Mintz 3400eff16960SYuval Mintz feature = 1 << MAC_ADDR_FORCED; 3401eff16960SYuval Mintz memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); 3402eff16960SYuval Mintz 3403eff16960SYuval Mintz vf_info->bulletin.p_virt->valid_bitmap |= feature; 3404eff16960SYuval Mintz /* Forced MAC will disable MAC_ADDR */ 34051a635e48SYuval Mintz vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR); 3406eff16960SYuval Mintz 3407eff16960SYuval Mintz qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); 3408eff16960SYuval Mintz } 3409eff16960SYuval Mintz 3410ba56947aSBaoyou Xie static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, 341108feecd7SYuval Mintz u16 pvid, int vfid) 341208feecd7SYuval Mintz { 341308feecd7SYuval Mintz struct qed_vf_info *vf_info; 341408feecd7SYuval Mintz u64 feature; 341508feecd7SYuval Mintz 341608feecd7SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 341708feecd7SYuval Mintz if (!vf_info) { 341808feecd7SYuval Mintz DP_NOTICE(p_hwfn->cdev, 341908feecd7SYuval Mintz "Can not set forced MAC, invalid vfid [%d]\n", vfid); 342008feecd7SYuval Mintz return; 342108feecd7SYuval Mintz } 342208feecd7SYuval Mintz 34237eff82b0SYuval Mintz if (vf_info->b_malicious) { 34247eff82b0SYuval Mintz DP_NOTICE(p_hwfn->cdev, 34257eff82b0SYuval Mintz "Can't set forced vlan to malicious VF [%d]\n", vfid); 34267eff82b0SYuval Mintz return; 34277eff82b0SYuval Mintz } 34287eff82b0SYuval Mintz 342908feecd7SYuval Mintz feature = 1 << VLAN_ADDR_FORCED; 343008feecd7SYuval Mintz vf_info->bulletin.p_virt->pvid = pvid; 343108feecd7SYuval Mintz if (pvid) 343208feecd7SYuval Mintz vf_info->bulletin.p_virt->valid_bitmap |= feature; 343308feecd7SYuval Mintz else 343408feecd7SYuval Mintz vf_info->bulletin.p_virt->valid_bitmap &= ~feature; 343508feecd7SYuval Mintz 343608feecd7SYuval Mintz qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); 343708feecd7SYuval Mintz } 343808feecd7SYuval Mintz 34396ddc7608SYuval Mintz static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid) 34406ddc7608SYuval Mintz { 34416ddc7608SYuval Mintz struct qed_vf_info *p_vf_info; 34426ddc7608SYuval Mintz 34436ddc7608SYuval Mintz p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 34446ddc7608SYuval Mintz if (!p_vf_info) 34456ddc7608SYuval Mintz return false; 34466ddc7608SYuval Mintz 34476ddc7608SYuval Mintz return !!p_vf_info->vport_instance; 34486ddc7608SYuval Mintz } 34496ddc7608SYuval Mintz 3450ba56947aSBaoyou Xie static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid) 34510b55e27dSYuval Mintz { 34520b55e27dSYuval Mintz struct qed_vf_info *p_vf_info; 34530b55e27dSYuval Mintz 34540b55e27dSYuval Mintz p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 34550b55e27dSYuval Mintz if (!p_vf_info) 34560b55e27dSYuval Mintz return true; 34570b55e27dSYuval Mintz 34580b55e27dSYuval Mintz return p_vf_info->state == VF_STOPPED; 34590b55e27dSYuval Mintz } 34600b55e27dSYuval Mintz 346173390ac9SYuval Mintz static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid) 346273390ac9SYuval Mintz { 346373390ac9SYuval Mintz struct qed_vf_info *vf_info; 346473390ac9SYuval Mintz 346573390ac9SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 346673390ac9SYuval Mintz if (!vf_info) 346773390ac9SYuval Mintz return false; 346873390ac9SYuval Mintz 346973390ac9SYuval Mintz return vf_info->spoof_chk; 347073390ac9SYuval Mintz } 347173390ac9SYuval Mintz 3472ba56947aSBaoyou Xie static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val) 34736ddc7608SYuval Mintz { 34746ddc7608SYuval Mintz struct qed_vf_info *vf; 34756ddc7608SYuval Mintz int rc = -EINVAL; 34766ddc7608SYuval Mintz 34776ddc7608SYuval Mintz if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { 34786ddc7608SYuval Mintz DP_NOTICE(p_hwfn, 34796ddc7608SYuval Mintz "SR-IOV sanity check failed, can't set spoofchk\n"); 34806ddc7608SYuval Mintz goto out; 34816ddc7608SYuval Mintz } 34826ddc7608SYuval Mintz 34836ddc7608SYuval Mintz vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 34846ddc7608SYuval Mintz if (!vf) 34856ddc7608SYuval Mintz goto out; 34866ddc7608SYuval Mintz 34876ddc7608SYuval Mintz if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) { 34886ddc7608SYuval Mintz /* After VF VPORT start PF will configure spoof check */ 34896ddc7608SYuval Mintz vf->req_spoofchk_val = val; 34906ddc7608SYuval Mintz rc = 0; 34916ddc7608SYuval Mintz goto out; 34926ddc7608SYuval Mintz } 34936ddc7608SYuval Mintz 34946ddc7608SYuval Mintz rc = __qed_iov_spoofchk_set(p_hwfn, vf, val); 34956ddc7608SYuval Mintz 34966ddc7608SYuval Mintz out: 34976ddc7608SYuval Mintz return rc; 34986ddc7608SYuval Mintz } 34996ddc7608SYuval Mintz 3500eff16960SYuval Mintz static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn, 3501eff16960SYuval Mintz u16 rel_vf_id) 3502eff16960SYuval Mintz { 3503eff16960SYuval Mintz struct qed_vf_info *p_vf; 3504eff16960SYuval Mintz 3505eff16960SYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 3506eff16960SYuval Mintz if (!p_vf || !p_vf->bulletin.p_virt) 3507eff16960SYuval Mintz return NULL; 3508eff16960SYuval Mintz 35091a635e48SYuval Mintz if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) 3510eff16960SYuval Mintz return NULL; 3511eff16960SYuval Mintz 3512eff16960SYuval Mintz return p_vf->bulletin.p_virt->mac; 3513eff16960SYuval Mintz } 3514eff16960SYuval Mintz 3515ba56947aSBaoyou Xie static u16 3516ba56947aSBaoyou Xie qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id) 351708feecd7SYuval Mintz { 351808feecd7SYuval Mintz struct qed_vf_info *p_vf; 351908feecd7SYuval Mintz 352008feecd7SYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 352108feecd7SYuval Mintz if (!p_vf || !p_vf->bulletin.p_virt) 352208feecd7SYuval Mintz return 0; 352308feecd7SYuval Mintz 35241a635e48SYuval Mintz if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))) 352508feecd7SYuval Mintz return 0; 352608feecd7SYuval Mintz 352708feecd7SYuval Mintz return p_vf->bulletin.p_virt->pvid; 352808feecd7SYuval Mintz } 352908feecd7SYuval Mintz 3530733def6aSYuval Mintz static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn, 3531733def6aSYuval Mintz struct qed_ptt *p_ptt, int vfid, int val) 3532733def6aSYuval Mintz { 3533733def6aSYuval Mintz struct qed_vf_info *vf; 3534733def6aSYuval Mintz u8 abs_vp_id = 0; 3535733def6aSYuval Mintz int rc; 3536733def6aSYuval Mintz 3537733def6aSYuval Mintz vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 3538733def6aSYuval Mintz if (!vf) 3539733def6aSYuval Mintz return -EINVAL; 3540733def6aSYuval Mintz 3541733def6aSYuval Mintz rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id); 3542733def6aSYuval Mintz if (rc) 3543733def6aSYuval Mintz return rc; 3544733def6aSYuval Mintz 3545733def6aSYuval Mintz return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val); 3546733def6aSYuval Mintz } 3547733def6aSYuval Mintz 3548ba56947aSBaoyou Xie static int 3549ba56947aSBaoyou Xie qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate) 3550733def6aSYuval Mintz { 3551733def6aSYuval Mintz struct qed_vf_info *vf; 3552733def6aSYuval Mintz u8 vport_id; 3553733def6aSYuval Mintz int i; 3554733def6aSYuval Mintz 3555733def6aSYuval Mintz for_each_hwfn(cdev, i) { 3556733def6aSYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 3557733def6aSYuval Mintz 3558733def6aSYuval Mintz if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { 3559733def6aSYuval Mintz DP_NOTICE(p_hwfn, 3560733def6aSYuval Mintz "SR-IOV sanity check failed, can't set min rate\n"); 3561733def6aSYuval Mintz return -EINVAL; 3562733def6aSYuval Mintz } 3563733def6aSYuval Mintz } 3564733def6aSYuval Mintz 3565733def6aSYuval Mintz vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true); 3566733def6aSYuval Mintz vport_id = vf->vport_id; 3567733def6aSYuval Mintz 3568733def6aSYuval Mintz return qed_configure_vport_wfq(cdev, vport_id, rate); 3569733def6aSYuval Mintz } 3570733def6aSYuval Mintz 357173390ac9SYuval Mintz static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid) 357273390ac9SYuval Mintz { 357373390ac9SYuval Mintz struct qed_wfq_data *vf_vp_wfq; 357473390ac9SYuval Mintz struct qed_vf_info *vf_info; 357573390ac9SYuval Mintz 357673390ac9SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 357773390ac9SYuval Mintz if (!vf_info) 357873390ac9SYuval Mintz return 0; 357973390ac9SYuval Mintz 358073390ac9SYuval Mintz vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id]; 358173390ac9SYuval Mintz 358273390ac9SYuval Mintz if (vf_vp_wfq->configured) 358373390ac9SYuval Mintz return vf_vp_wfq->min_speed; 358473390ac9SYuval Mintz else 358573390ac9SYuval Mintz return 0; 358673390ac9SYuval Mintz } 358773390ac9SYuval Mintz 358837bff2b9SYuval Mintz /** 358937bff2b9SYuval Mintz * qed_schedule_iov - schedules IOV task for VF and PF 359037bff2b9SYuval Mintz * @hwfn: hardware function pointer 359137bff2b9SYuval Mintz * @flag: IOV flag for VF/PF 359237bff2b9SYuval Mintz */ 359337bff2b9SYuval Mintz void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag) 359437bff2b9SYuval Mintz { 359537bff2b9SYuval Mintz smp_mb__before_atomic(); 359637bff2b9SYuval Mintz set_bit(flag, &hwfn->iov_task_flags); 359737bff2b9SYuval Mintz smp_mb__after_atomic(); 359837bff2b9SYuval Mintz DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); 359937bff2b9SYuval Mintz queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0); 360037bff2b9SYuval Mintz } 360137bff2b9SYuval Mintz 36021408cc1fSYuval Mintz void qed_vf_start_iov_wq(struct qed_dev *cdev) 36031408cc1fSYuval Mintz { 36041408cc1fSYuval Mintz int i; 36051408cc1fSYuval Mintz 36061408cc1fSYuval Mintz for_each_hwfn(cdev, i) 36071408cc1fSYuval Mintz queue_delayed_work(cdev->hwfns[i].iov_wq, 36081408cc1fSYuval Mintz &cdev->hwfns[i].iov_task, 0); 36091408cc1fSYuval Mintz } 36101408cc1fSYuval Mintz 36110b55e27dSYuval Mintz int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) 36120b55e27dSYuval Mintz { 36130b55e27dSYuval Mintz int i, j; 36140b55e27dSYuval Mintz 36150b55e27dSYuval Mintz for_each_hwfn(cdev, i) 36160b55e27dSYuval Mintz if (cdev->hwfns[i].iov_wq) 36170b55e27dSYuval Mintz flush_workqueue(cdev->hwfns[i].iov_wq); 36180b55e27dSYuval Mintz 36190b55e27dSYuval Mintz /* Mark VFs for disablement */ 36200b55e27dSYuval Mintz qed_iov_set_vfs_to_disable(cdev, true); 36210b55e27dSYuval Mintz 36220b55e27dSYuval Mintz if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled) 36230b55e27dSYuval Mintz pci_disable_sriov(cdev->pdev); 36240b55e27dSYuval Mintz 36250b55e27dSYuval Mintz for_each_hwfn(cdev, i) { 36260b55e27dSYuval Mintz struct qed_hwfn *hwfn = &cdev->hwfns[i]; 36270b55e27dSYuval Mintz struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 36280b55e27dSYuval Mintz 36290b55e27dSYuval Mintz /* Failure to acquire the ptt in 100g creates an odd error 36300b55e27dSYuval Mintz * where the first engine has already relased IOV. 36310b55e27dSYuval Mintz */ 36320b55e27dSYuval Mintz if (!ptt) { 36330b55e27dSYuval Mintz DP_ERR(hwfn, "Failed to acquire ptt\n"); 36340b55e27dSYuval Mintz return -EBUSY; 36350b55e27dSYuval Mintz } 36360b55e27dSYuval Mintz 3637733def6aSYuval Mintz /* Clean WFQ db and configure equal weight for all vports */ 3638733def6aSYuval Mintz qed_clean_wfq_db(hwfn, ptt); 3639733def6aSYuval Mintz 36400b55e27dSYuval Mintz qed_for_each_vf(hwfn, j) { 36410b55e27dSYuval Mintz int k; 36420b55e27dSYuval Mintz 36437eff82b0SYuval Mintz if (!qed_iov_is_valid_vfid(hwfn, j, true, false)) 36440b55e27dSYuval Mintz continue; 36450b55e27dSYuval Mintz 36460b55e27dSYuval Mintz /* Wait until VF is disabled before releasing */ 36470b55e27dSYuval Mintz for (k = 0; k < 100; k++) { 36480b55e27dSYuval Mintz if (!qed_iov_is_vf_stopped(hwfn, j)) 36490b55e27dSYuval Mintz msleep(20); 36500b55e27dSYuval Mintz else 36510b55e27dSYuval Mintz break; 36520b55e27dSYuval Mintz } 36530b55e27dSYuval Mintz 36540b55e27dSYuval Mintz if (k < 100) 36550b55e27dSYuval Mintz qed_iov_release_hw_for_vf(&cdev->hwfns[i], 36560b55e27dSYuval Mintz ptt, j); 36570b55e27dSYuval Mintz else 36580b55e27dSYuval Mintz DP_ERR(hwfn, 36590b55e27dSYuval Mintz "Timeout waiting for VF's FLR to end\n"); 36600b55e27dSYuval Mintz } 36610b55e27dSYuval Mintz 36620b55e27dSYuval Mintz qed_ptt_release(hwfn, ptt); 36630b55e27dSYuval Mintz } 36640b55e27dSYuval Mintz 36650b55e27dSYuval Mintz qed_iov_set_vfs_to_disable(cdev, false); 36660b55e27dSYuval Mintz 36670b55e27dSYuval Mintz return 0; 36680b55e27dSYuval Mintz } 36690b55e27dSYuval Mintz 36703da7a37aSMintz, Yuval static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn, 36713da7a37aSMintz, Yuval u16 vfid, 36723da7a37aSMintz, Yuval struct qed_iov_vf_init_params *params) 36733da7a37aSMintz, Yuval { 36743da7a37aSMintz, Yuval u16 base, i; 36753da7a37aSMintz, Yuval 36763da7a37aSMintz, Yuval /* Since we have an equal resource distribution per-VF, and we assume 36773da7a37aSMintz, Yuval * PF has acquired the QED_PF_L2_QUE first queues, we start setting 36783da7a37aSMintz, Yuval * sequentially from there. 36793da7a37aSMintz, Yuval */ 36803da7a37aSMintz, Yuval base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues; 36813da7a37aSMintz, Yuval 36823da7a37aSMintz, Yuval params->rel_vf_id = vfid; 36833da7a37aSMintz, Yuval for (i = 0; i < params->num_queues; i++) { 36843da7a37aSMintz, Yuval params->req_rx_queue[i] = base + i; 36853da7a37aSMintz, Yuval params->req_tx_queue[i] = base + i; 36863da7a37aSMintz, Yuval } 36873da7a37aSMintz, Yuval } 36883da7a37aSMintz, Yuval 36890b55e27dSYuval Mintz static int qed_sriov_enable(struct qed_dev *cdev, int num) 36900b55e27dSYuval Mintz { 36913da7a37aSMintz, Yuval struct qed_iov_vf_init_params params; 36920b55e27dSYuval Mintz int i, j, rc; 36930b55e27dSYuval Mintz 36940b55e27dSYuval Mintz if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { 36950b55e27dSYuval Mintz DP_NOTICE(cdev, "Can start at most %d VFs\n", 36960b55e27dSYuval Mintz RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1); 36970b55e27dSYuval Mintz return -EINVAL; 36980b55e27dSYuval Mintz } 36990b55e27dSYuval Mintz 37003da7a37aSMintz, Yuval memset(¶ms, 0, sizeof(params)); 37013da7a37aSMintz, Yuval 37020b55e27dSYuval Mintz /* Initialize HW for VF access */ 37030b55e27dSYuval Mintz for_each_hwfn(cdev, j) { 37040b55e27dSYuval Mintz struct qed_hwfn *hwfn = &cdev->hwfns[j]; 37050b55e27dSYuval Mintz struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 37065a1f965aSMintz, Yuval 37075a1f965aSMintz, Yuval /* Make sure not to use more than 16 queues per VF */ 37083da7a37aSMintz, Yuval params.num_queues = min_t(int, 37093da7a37aSMintz, Yuval FEAT_NUM(hwfn, QED_VF_L2_QUE) / num, 37103da7a37aSMintz, Yuval 16); 37110b55e27dSYuval Mintz 37120b55e27dSYuval Mintz if (!ptt) { 37130b55e27dSYuval Mintz DP_ERR(hwfn, "Failed to acquire ptt\n"); 37140b55e27dSYuval Mintz rc = -EBUSY; 37150b55e27dSYuval Mintz goto err; 37160b55e27dSYuval Mintz } 37170b55e27dSYuval Mintz 37180b55e27dSYuval Mintz for (i = 0; i < num; i++) { 37197eff82b0SYuval Mintz if (!qed_iov_is_valid_vfid(hwfn, i, false, true)) 37200b55e27dSYuval Mintz continue; 37210b55e27dSYuval Mintz 37223da7a37aSMintz, Yuval qed_sriov_enable_qid_config(hwfn, i, ¶ms); 37233da7a37aSMintz, Yuval rc = qed_iov_init_hw_for_vf(hwfn, ptt, ¶ms); 37240b55e27dSYuval Mintz if (rc) { 37250b55e27dSYuval Mintz DP_ERR(cdev, "Failed to enable VF[%d]\n", i); 37260b55e27dSYuval Mintz qed_ptt_release(hwfn, ptt); 37270b55e27dSYuval Mintz goto err; 37280b55e27dSYuval Mintz } 37290b55e27dSYuval Mintz } 37300b55e27dSYuval Mintz 37310b55e27dSYuval Mintz qed_ptt_release(hwfn, ptt); 37320b55e27dSYuval Mintz } 37330b55e27dSYuval Mintz 37340b55e27dSYuval Mintz /* Enable SRIOV PCIe functions */ 37350b55e27dSYuval Mintz rc = pci_enable_sriov(cdev->pdev, num); 37360b55e27dSYuval Mintz if (rc) { 37370b55e27dSYuval Mintz DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc); 37380b55e27dSYuval Mintz goto err; 37390b55e27dSYuval Mintz } 37400b55e27dSYuval Mintz 37410b55e27dSYuval Mintz return num; 37420b55e27dSYuval Mintz 37430b55e27dSYuval Mintz err: 37440b55e27dSYuval Mintz qed_sriov_disable(cdev, false); 37450b55e27dSYuval Mintz return rc; 37460b55e27dSYuval Mintz } 37470b55e27dSYuval Mintz 37480b55e27dSYuval Mintz static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param) 37490b55e27dSYuval Mintz { 37500b55e27dSYuval Mintz if (!IS_QED_SRIOV(cdev)) { 37510b55e27dSYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n"); 37520b55e27dSYuval Mintz return -EOPNOTSUPP; 37530b55e27dSYuval Mintz } 37540b55e27dSYuval Mintz 37550b55e27dSYuval Mintz if (num_vfs_param) 37560b55e27dSYuval Mintz return qed_sriov_enable(cdev, num_vfs_param); 37570b55e27dSYuval Mintz else 37580b55e27dSYuval Mintz return qed_sriov_disable(cdev, true); 37590b55e27dSYuval Mintz } 37600b55e27dSYuval Mintz 3761eff16960SYuval Mintz static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid) 3762eff16960SYuval Mintz { 3763eff16960SYuval Mintz int i; 3764eff16960SYuval Mintz 3765eff16960SYuval Mintz if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { 3766eff16960SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 3767eff16960SYuval Mintz "Cannot set a VF MAC; Sriov is not enabled\n"); 3768eff16960SYuval Mintz return -EINVAL; 3769eff16960SYuval Mintz } 3770eff16960SYuval Mintz 37717eff82b0SYuval Mintz if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) { 3772eff16960SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 3773eff16960SYuval Mintz "Cannot set VF[%d] MAC (VF is not active)\n", vfid); 3774eff16960SYuval Mintz return -EINVAL; 3775eff16960SYuval Mintz } 3776eff16960SYuval Mintz 3777eff16960SYuval Mintz for_each_hwfn(cdev, i) { 3778eff16960SYuval Mintz struct qed_hwfn *hwfn = &cdev->hwfns[i]; 3779eff16960SYuval Mintz struct qed_public_vf_info *vf_info; 3780eff16960SYuval Mintz 3781eff16960SYuval Mintz vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); 3782eff16960SYuval Mintz if (!vf_info) 3783eff16960SYuval Mintz continue; 3784eff16960SYuval Mintz 3785eff16960SYuval Mintz /* Set the forced MAC, and schedule the IOV task */ 3786eff16960SYuval Mintz ether_addr_copy(vf_info->forced_mac, mac); 3787eff16960SYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); 3788eff16960SYuval Mintz } 3789eff16960SYuval Mintz 3790eff16960SYuval Mintz return 0; 3791eff16960SYuval Mintz } 3792eff16960SYuval Mintz 379308feecd7SYuval Mintz static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid) 379408feecd7SYuval Mintz { 379508feecd7SYuval Mintz int i; 379608feecd7SYuval Mintz 379708feecd7SYuval Mintz if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { 379808feecd7SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 379908feecd7SYuval Mintz "Cannot set a VF MAC; Sriov is not enabled\n"); 380008feecd7SYuval Mintz return -EINVAL; 380108feecd7SYuval Mintz } 380208feecd7SYuval Mintz 38037eff82b0SYuval Mintz if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) { 380408feecd7SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 380508feecd7SYuval Mintz "Cannot set VF[%d] MAC (VF is not active)\n", vfid); 380608feecd7SYuval Mintz return -EINVAL; 380708feecd7SYuval Mintz } 380808feecd7SYuval Mintz 380908feecd7SYuval Mintz for_each_hwfn(cdev, i) { 381008feecd7SYuval Mintz struct qed_hwfn *hwfn = &cdev->hwfns[i]; 381108feecd7SYuval Mintz struct qed_public_vf_info *vf_info; 381208feecd7SYuval Mintz 381308feecd7SYuval Mintz vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); 381408feecd7SYuval Mintz if (!vf_info) 381508feecd7SYuval Mintz continue; 381608feecd7SYuval Mintz 381708feecd7SYuval Mintz /* Set the forced vlan, and schedule the IOV task */ 381808feecd7SYuval Mintz vf_info->forced_vlan = vid; 381908feecd7SYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); 382008feecd7SYuval Mintz } 382108feecd7SYuval Mintz 382208feecd7SYuval Mintz return 0; 382308feecd7SYuval Mintz } 382408feecd7SYuval Mintz 382573390ac9SYuval Mintz static int qed_get_vf_config(struct qed_dev *cdev, 382673390ac9SYuval Mintz int vf_id, struct ifla_vf_info *ivi) 382773390ac9SYuval Mintz { 382873390ac9SYuval Mintz struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 382973390ac9SYuval Mintz struct qed_public_vf_info *vf_info; 383073390ac9SYuval Mintz struct qed_mcp_link_state link; 383173390ac9SYuval Mintz u32 tx_rate; 383273390ac9SYuval Mintz 383373390ac9SYuval Mintz /* Sanitize request */ 383473390ac9SYuval Mintz if (IS_VF(cdev)) 383573390ac9SYuval Mintz return -EINVAL; 383673390ac9SYuval Mintz 38377eff82b0SYuval Mintz if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) { 383873390ac9SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 383973390ac9SYuval Mintz "VF index [%d] isn't active\n", vf_id); 384073390ac9SYuval Mintz return -EINVAL; 384173390ac9SYuval Mintz } 384273390ac9SYuval Mintz 384373390ac9SYuval Mintz vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); 384473390ac9SYuval Mintz 384573390ac9SYuval Mintz qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); 384673390ac9SYuval Mintz 384773390ac9SYuval Mintz /* Fill information about VF */ 384873390ac9SYuval Mintz ivi->vf = vf_id; 384973390ac9SYuval Mintz 385073390ac9SYuval Mintz if (is_valid_ether_addr(vf_info->forced_mac)) 385173390ac9SYuval Mintz ether_addr_copy(ivi->mac, vf_info->forced_mac); 385273390ac9SYuval Mintz else 385373390ac9SYuval Mintz ether_addr_copy(ivi->mac, vf_info->mac); 385473390ac9SYuval Mintz 385573390ac9SYuval Mintz ivi->vlan = vf_info->forced_vlan; 385673390ac9SYuval Mintz ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id); 385773390ac9SYuval Mintz ivi->linkstate = vf_info->link_state; 385873390ac9SYuval Mintz tx_rate = vf_info->tx_rate; 385973390ac9SYuval Mintz ivi->max_tx_rate = tx_rate ? tx_rate : link.speed; 386073390ac9SYuval Mintz ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id); 386173390ac9SYuval Mintz 386273390ac9SYuval Mintz return 0; 386373390ac9SYuval Mintz } 386473390ac9SYuval Mintz 386536558c3dSYuval Mintz void qed_inform_vf_link_state(struct qed_hwfn *hwfn) 386636558c3dSYuval Mintz { 386736558c3dSYuval Mintz struct qed_mcp_link_capabilities caps; 386836558c3dSYuval Mintz struct qed_mcp_link_params params; 386936558c3dSYuval Mintz struct qed_mcp_link_state link; 387036558c3dSYuval Mintz int i; 387136558c3dSYuval Mintz 387236558c3dSYuval Mintz if (!hwfn->pf_iov_info) 387336558c3dSYuval Mintz return; 387436558c3dSYuval Mintz 387536558c3dSYuval Mintz /* Update bulletin of all future possible VFs with link configuration */ 387636558c3dSYuval Mintz for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) { 3877733def6aSYuval Mintz struct qed_public_vf_info *vf_info; 3878733def6aSYuval Mintz 3879733def6aSYuval Mintz vf_info = qed_iov_get_public_vf_info(hwfn, i, false); 3880733def6aSYuval Mintz if (!vf_info) 3881733def6aSYuval Mintz continue; 3882733def6aSYuval Mintz 388336558c3dSYuval Mintz memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params)); 388436558c3dSYuval Mintz memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); 388536558c3dSYuval Mintz memcpy(&caps, qed_mcp_get_link_capabilities(hwfn), 388636558c3dSYuval Mintz sizeof(caps)); 388736558c3dSYuval Mintz 3888733def6aSYuval Mintz /* Modify link according to the VF's configured link state */ 3889733def6aSYuval Mintz switch (vf_info->link_state) { 3890733def6aSYuval Mintz case IFLA_VF_LINK_STATE_DISABLE: 3891733def6aSYuval Mintz link.link_up = false; 3892733def6aSYuval Mintz break; 3893733def6aSYuval Mintz case IFLA_VF_LINK_STATE_ENABLE: 3894733def6aSYuval Mintz link.link_up = true; 3895733def6aSYuval Mintz /* Set speed according to maximum supported by HW. 3896733def6aSYuval Mintz * that is 40G for regular devices and 100G for CMT 3897733def6aSYuval Mintz * mode devices. 3898733def6aSYuval Mintz */ 3899733def6aSYuval Mintz link.speed = (hwfn->cdev->num_hwfns > 1) ? 3900733def6aSYuval Mintz 100000 : 40000; 3901733def6aSYuval Mintz default: 3902733def6aSYuval Mintz /* In auto mode pass PF link image to VF */ 3903733def6aSYuval Mintz break; 3904733def6aSYuval Mintz } 3905733def6aSYuval Mintz 3906733def6aSYuval Mintz if (link.link_up && vf_info->tx_rate) { 3907733def6aSYuval Mintz struct qed_ptt *ptt; 3908733def6aSYuval Mintz int rate; 3909733def6aSYuval Mintz 3910733def6aSYuval Mintz rate = min_t(int, vf_info->tx_rate, link.speed); 3911733def6aSYuval Mintz 3912733def6aSYuval Mintz ptt = qed_ptt_acquire(hwfn); 3913733def6aSYuval Mintz if (!ptt) { 3914733def6aSYuval Mintz DP_NOTICE(hwfn, "Failed to acquire PTT\n"); 3915733def6aSYuval Mintz return; 3916733def6aSYuval Mintz } 3917733def6aSYuval Mintz 3918733def6aSYuval Mintz if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) { 3919733def6aSYuval Mintz vf_info->tx_rate = rate; 3920733def6aSYuval Mintz link.speed = rate; 3921733def6aSYuval Mintz } 3922733def6aSYuval Mintz 3923733def6aSYuval Mintz qed_ptt_release(hwfn, ptt); 3924733def6aSYuval Mintz } 3925733def6aSYuval Mintz 392636558c3dSYuval Mintz qed_iov_set_link(hwfn, i, ¶ms, &link, &caps); 392736558c3dSYuval Mintz } 392836558c3dSYuval Mintz 392936558c3dSYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 393036558c3dSYuval Mintz } 393136558c3dSYuval Mintz 3932733def6aSYuval Mintz static int qed_set_vf_link_state(struct qed_dev *cdev, 3933733def6aSYuval Mintz int vf_id, int link_state) 3934733def6aSYuval Mintz { 3935733def6aSYuval Mintz int i; 3936733def6aSYuval Mintz 3937733def6aSYuval Mintz /* Sanitize request */ 3938733def6aSYuval Mintz if (IS_VF(cdev)) 3939733def6aSYuval Mintz return -EINVAL; 3940733def6aSYuval Mintz 39417eff82b0SYuval Mintz if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) { 3942733def6aSYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 3943733def6aSYuval Mintz "VF index [%d] isn't active\n", vf_id); 3944733def6aSYuval Mintz return -EINVAL; 3945733def6aSYuval Mintz } 3946733def6aSYuval Mintz 3947733def6aSYuval Mintz /* Handle configuration of link state */ 3948733def6aSYuval Mintz for_each_hwfn(cdev, i) { 3949733def6aSYuval Mintz struct qed_hwfn *hwfn = &cdev->hwfns[i]; 3950733def6aSYuval Mintz struct qed_public_vf_info *vf; 3951733def6aSYuval Mintz 3952733def6aSYuval Mintz vf = qed_iov_get_public_vf_info(hwfn, vf_id, true); 3953733def6aSYuval Mintz if (!vf) 3954733def6aSYuval Mintz continue; 3955733def6aSYuval Mintz 3956733def6aSYuval Mintz if (vf->link_state == link_state) 3957733def6aSYuval Mintz continue; 3958733def6aSYuval Mintz 3959733def6aSYuval Mintz vf->link_state = link_state; 3960733def6aSYuval Mintz qed_inform_vf_link_state(&cdev->hwfns[i]); 3961733def6aSYuval Mintz } 3962733def6aSYuval Mintz 3963733def6aSYuval Mintz return 0; 3964733def6aSYuval Mintz } 3965733def6aSYuval Mintz 39666ddc7608SYuval Mintz static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val) 39676ddc7608SYuval Mintz { 39686ddc7608SYuval Mintz int i, rc = -EINVAL; 39696ddc7608SYuval Mintz 39706ddc7608SYuval Mintz for_each_hwfn(cdev, i) { 39716ddc7608SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 39726ddc7608SYuval Mintz 39736ddc7608SYuval Mintz rc = qed_iov_spoofchk_set(p_hwfn, vfid, val); 39746ddc7608SYuval Mintz if (rc) 39756ddc7608SYuval Mintz break; 39766ddc7608SYuval Mintz } 39776ddc7608SYuval Mintz 39786ddc7608SYuval Mintz return rc; 39796ddc7608SYuval Mintz } 39806ddc7608SYuval Mintz 3981733def6aSYuval Mintz static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate) 3982733def6aSYuval Mintz { 3983733def6aSYuval Mintz int i; 3984733def6aSYuval Mintz 3985733def6aSYuval Mintz for_each_hwfn(cdev, i) { 3986733def6aSYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 3987733def6aSYuval Mintz struct qed_public_vf_info *vf; 3988733def6aSYuval Mintz 3989733def6aSYuval Mintz if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { 3990733def6aSYuval Mintz DP_NOTICE(p_hwfn, 3991733def6aSYuval Mintz "SR-IOV sanity check failed, can't set tx rate\n"); 3992733def6aSYuval Mintz return -EINVAL; 3993733def6aSYuval Mintz } 3994733def6aSYuval Mintz 3995733def6aSYuval Mintz vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true); 3996733def6aSYuval Mintz 3997733def6aSYuval Mintz vf->tx_rate = rate; 3998733def6aSYuval Mintz 3999733def6aSYuval Mintz qed_inform_vf_link_state(p_hwfn); 4000733def6aSYuval Mintz } 4001733def6aSYuval Mintz 4002733def6aSYuval Mintz return 0; 4003733def6aSYuval Mintz } 4004733def6aSYuval Mintz 4005733def6aSYuval Mintz static int qed_set_vf_rate(struct qed_dev *cdev, 4006733def6aSYuval Mintz int vfid, u32 min_rate, u32 max_rate) 4007733def6aSYuval Mintz { 4008733def6aSYuval Mintz int rc_min = 0, rc_max = 0; 4009733def6aSYuval Mintz 4010733def6aSYuval Mintz if (max_rate) 4011733def6aSYuval Mintz rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate); 4012733def6aSYuval Mintz 4013733def6aSYuval Mintz if (min_rate) 4014733def6aSYuval Mintz rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate); 4015733def6aSYuval Mintz 4016733def6aSYuval Mintz if (rc_max | rc_min) 4017733def6aSYuval Mintz return -EINVAL; 4018733def6aSYuval Mintz 4019733def6aSYuval Mintz return 0; 4020733def6aSYuval Mintz } 4021733def6aSYuval Mintz 4022f990c82cSMintz, Yuval static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust) 4023f990c82cSMintz, Yuval { 4024f990c82cSMintz, Yuval int i; 4025f990c82cSMintz, Yuval 4026f990c82cSMintz, Yuval for_each_hwfn(cdev, i) { 4027f990c82cSMintz, Yuval struct qed_hwfn *hwfn = &cdev->hwfns[i]; 4028f990c82cSMintz, Yuval struct qed_public_vf_info *vf; 4029f990c82cSMintz, Yuval 4030f990c82cSMintz, Yuval if (!qed_iov_pf_sanity_check(hwfn, vfid)) { 4031f990c82cSMintz, Yuval DP_NOTICE(hwfn, 4032f990c82cSMintz, Yuval "SR-IOV sanity check failed, can't set trust\n"); 4033f990c82cSMintz, Yuval return -EINVAL; 4034f990c82cSMintz, Yuval } 4035f990c82cSMintz, Yuval 4036f990c82cSMintz, Yuval vf = qed_iov_get_public_vf_info(hwfn, vfid, true); 4037f990c82cSMintz, Yuval 4038f990c82cSMintz, Yuval if (vf->is_trusted_request == trust) 4039f990c82cSMintz, Yuval return 0; 4040f990c82cSMintz, Yuval vf->is_trusted_request = trust; 4041f990c82cSMintz, Yuval 4042f990c82cSMintz, Yuval qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG); 4043f990c82cSMintz, Yuval } 4044f990c82cSMintz, Yuval 4045f990c82cSMintz, Yuval return 0; 4046f990c82cSMintz, Yuval } 4047f990c82cSMintz, Yuval 404837bff2b9SYuval Mintz static void qed_handle_vf_msg(struct qed_hwfn *hwfn) 404937bff2b9SYuval Mintz { 405037bff2b9SYuval Mintz u64 events[QED_VF_ARRAY_LENGTH]; 405137bff2b9SYuval Mintz struct qed_ptt *ptt; 405237bff2b9SYuval Mintz int i; 405337bff2b9SYuval Mintz 405437bff2b9SYuval Mintz ptt = qed_ptt_acquire(hwfn); 405537bff2b9SYuval Mintz if (!ptt) { 405637bff2b9SYuval Mintz DP_VERBOSE(hwfn, QED_MSG_IOV, 405737bff2b9SYuval Mintz "Can't acquire PTT; re-scheduling\n"); 405837bff2b9SYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG); 405937bff2b9SYuval Mintz return; 406037bff2b9SYuval Mintz } 406137bff2b9SYuval Mintz 4062fd3c615aSMintz, Yuval qed_iov_pf_get_pending_events(hwfn, events); 406337bff2b9SYuval Mintz 406437bff2b9SYuval Mintz DP_VERBOSE(hwfn, QED_MSG_IOV, 406537bff2b9SYuval Mintz "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n", 406637bff2b9SYuval Mintz events[0], events[1], events[2]); 406737bff2b9SYuval Mintz 406837bff2b9SYuval Mintz qed_for_each_vf(hwfn, i) { 406937bff2b9SYuval Mintz /* Skip VFs with no pending messages */ 407037bff2b9SYuval Mintz if (!(events[i / 64] & (1ULL << (i % 64)))) 407137bff2b9SYuval Mintz continue; 407237bff2b9SYuval Mintz 407337bff2b9SYuval Mintz DP_VERBOSE(hwfn, QED_MSG_IOV, 407437bff2b9SYuval Mintz "Handling VF message from VF 0x%02x [Abs 0x%02x]\n", 407537bff2b9SYuval Mintz i, hwfn->cdev->p_iov_info->first_vf_in_pf + i); 407637bff2b9SYuval Mintz 407737bff2b9SYuval Mintz /* Copy VF's message to PF's request buffer for that VF */ 407837bff2b9SYuval Mintz if (qed_iov_copy_vf_msg(hwfn, ptt, i)) 407937bff2b9SYuval Mintz continue; 408037bff2b9SYuval Mintz 408137bff2b9SYuval Mintz qed_iov_process_mbx_req(hwfn, ptt, i); 408237bff2b9SYuval Mintz } 408337bff2b9SYuval Mintz 408437bff2b9SYuval Mintz qed_ptt_release(hwfn, ptt); 408537bff2b9SYuval Mintz } 408637bff2b9SYuval Mintz 408708feecd7SYuval Mintz static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn) 408808feecd7SYuval Mintz { 408908feecd7SYuval Mintz int i; 409008feecd7SYuval Mintz 409108feecd7SYuval Mintz qed_for_each_vf(hwfn, i) { 409208feecd7SYuval Mintz struct qed_public_vf_info *info; 409308feecd7SYuval Mintz bool update = false; 4094eff16960SYuval Mintz u8 *mac; 409508feecd7SYuval Mintz 409608feecd7SYuval Mintz info = qed_iov_get_public_vf_info(hwfn, i, true); 409708feecd7SYuval Mintz if (!info) 409808feecd7SYuval Mintz continue; 409908feecd7SYuval Mintz 410008feecd7SYuval Mintz /* Update data on bulletin board */ 4101eff16960SYuval Mintz mac = qed_iov_bulletin_get_forced_mac(hwfn, i); 4102eff16960SYuval Mintz if (is_valid_ether_addr(info->forced_mac) && 4103eff16960SYuval Mintz (!mac || !ether_addr_equal(mac, info->forced_mac))) { 4104eff16960SYuval Mintz DP_VERBOSE(hwfn, 4105eff16960SYuval Mintz QED_MSG_IOV, 4106eff16960SYuval Mintz "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n", 4107eff16960SYuval Mintz i, 4108eff16960SYuval Mintz hwfn->cdev->p_iov_info->first_vf_in_pf + i); 4109eff16960SYuval Mintz 4110eff16960SYuval Mintz /* Update bulletin board with forced MAC */ 4111eff16960SYuval Mintz qed_iov_bulletin_set_forced_mac(hwfn, 4112eff16960SYuval Mintz info->forced_mac, i); 4113eff16960SYuval Mintz update = true; 4114eff16960SYuval Mintz } 411508feecd7SYuval Mintz 411608feecd7SYuval Mintz if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^ 411708feecd7SYuval Mintz info->forced_vlan) { 411808feecd7SYuval Mintz DP_VERBOSE(hwfn, 411908feecd7SYuval Mintz QED_MSG_IOV, 412008feecd7SYuval Mintz "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n", 412108feecd7SYuval Mintz info->forced_vlan, 412208feecd7SYuval Mintz i, 412308feecd7SYuval Mintz hwfn->cdev->p_iov_info->first_vf_in_pf + i); 412408feecd7SYuval Mintz qed_iov_bulletin_set_forced_vlan(hwfn, 412508feecd7SYuval Mintz info->forced_vlan, i); 412608feecd7SYuval Mintz update = true; 412708feecd7SYuval Mintz } 412808feecd7SYuval Mintz 412908feecd7SYuval Mintz if (update) 413008feecd7SYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 413108feecd7SYuval Mintz } 413208feecd7SYuval Mintz } 413308feecd7SYuval Mintz 413436558c3dSYuval Mintz static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) 413536558c3dSYuval Mintz { 413636558c3dSYuval Mintz struct qed_ptt *ptt; 413736558c3dSYuval Mintz int i; 413836558c3dSYuval Mintz 413936558c3dSYuval Mintz ptt = qed_ptt_acquire(hwfn); 414036558c3dSYuval Mintz if (!ptt) { 414136558c3dSYuval Mintz DP_NOTICE(hwfn, "Failed allocating a ptt entry\n"); 414236558c3dSYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 414336558c3dSYuval Mintz return; 414436558c3dSYuval Mintz } 414536558c3dSYuval Mintz 414636558c3dSYuval Mintz qed_for_each_vf(hwfn, i) 414736558c3dSYuval Mintz qed_iov_post_vf_bulletin(hwfn, i, ptt); 414836558c3dSYuval Mintz 414936558c3dSYuval Mintz qed_ptt_release(hwfn, ptt); 415036558c3dSYuval Mintz } 415136558c3dSYuval Mintz 4152f990c82cSMintz, Yuval static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) 4153f990c82cSMintz, Yuval { 4154f990c82cSMintz, Yuval struct qed_sp_vport_update_params params; 4155f990c82cSMintz, Yuval struct qed_filter_accept_flags *flags; 4156f990c82cSMintz, Yuval struct qed_public_vf_info *vf_info; 4157f990c82cSMintz, Yuval struct qed_vf_info *vf; 4158f990c82cSMintz, Yuval u8 mask; 4159f990c82cSMintz, Yuval int i; 4160f990c82cSMintz, Yuval 4161f990c82cSMintz, Yuval mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; 4162f990c82cSMintz, Yuval flags = ¶ms.accept_flags; 4163f990c82cSMintz, Yuval 4164f990c82cSMintz, Yuval qed_for_each_vf(hwfn, i) { 4165f990c82cSMintz, Yuval /* Need to make sure current requested configuration didn't 4166f990c82cSMintz, Yuval * flip so that we'll end up configuring something that's not 4167f990c82cSMintz, Yuval * needed. 4168f990c82cSMintz, Yuval */ 4169f990c82cSMintz, Yuval vf_info = qed_iov_get_public_vf_info(hwfn, i, true); 4170f990c82cSMintz, Yuval if (vf_info->is_trusted_configured == 4171f990c82cSMintz, Yuval vf_info->is_trusted_request) 4172f990c82cSMintz, Yuval continue; 4173f990c82cSMintz, Yuval vf_info->is_trusted_configured = vf_info->is_trusted_request; 4174f990c82cSMintz, Yuval 4175f990c82cSMintz, Yuval /* Validate that the VF has a configured vport */ 4176f990c82cSMintz, Yuval vf = qed_iov_get_vf_info(hwfn, i, true); 4177f990c82cSMintz, Yuval if (!vf->vport_instance) 4178f990c82cSMintz, Yuval continue; 4179f990c82cSMintz, Yuval 4180f990c82cSMintz, Yuval memset(¶ms, 0, sizeof(params)); 4181f990c82cSMintz, Yuval params.opaque_fid = vf->opaque_fid; 4182f990c82cSMintz, Yuval params.vport_id = vf->vport_id; 4183f990c82cSMintz, Yuval 4184f990c82cSMintz, Yuval if (vf_info->rx_accept_mode & mask) { 4185f990c82cSMintz, Yuval flags->update_rx_mode_config = 1; 4186f990c82cSMintz, Yuval flags->rx_accept_filter = vf_info->rx_accept_mode; 4187f990c82cSMintz, Yuval } 4188f990c82cSMintz, Yuval 4189f990c82cSMintz, Yuval if (vf_info->tx_accept_mode & mask) { 4190f990c82cSMintz, Yuval flags->update_tx_mode_config = 1; 4191f990c82cSMintz, Yuval flags->tx_accept_filter = vf_info->tx_accept_mode; 4192f990c82cSMintz, Yuval } 4193f990c82cSMintz, Yuval 4194f990c82cSMintz, Yuval /* Remove if needed; Otherwise this would set the mask */ 4195f990c82cSMintz, Yuval if (!vf_info->is_trusted_configured) { 4196f990c82cSMintz, Yuval flags->rx_accept_filter &= ~mask; 4197f990c82cSMintz, Yuval flags->tx_accept_filter &= ~mask; 4198f990c82cSMintz, Yuval } 4199f990c82cSMintz, Yuval 4200f990c82cSMintz, Yuval if (flags->update_rx_mode_config || 4201f990c82cSMintz, Yuval flags->update_tx_mode_config) 4202f990c82cSMintz, Yuval qed_sp_vport_update(hwfn, ¶ms, 4203f990c82cSMintz, Yuval QED_SPQ_MODE_EBLOCK, NULL); 4204f990c82cSMintz, Yuval } 4205f990c82cSMintz, Yuval } 4206f990c82cSMintz, Yuval 4207ba56947aSBaoyou Xie static void qed_iov_pf_task(struct work_struct *work) 4208ba56947aSBaoyou Xie 420937bff2b9SYuval Mintz { 421037bff2b9SYuval Mintz struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 421137bff2b9SYuval Mintz iov_task.work); 42120b55e27dSYuval Mintz int rc; 421337bff2b9SYuval Mintz 421437bff2b9SYuval Mintz if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) 421537bff2b9SYuval Mintz return; 421637bff2b9SYuval Mintz 42170b55e27dSYuval Mintz if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) { 42180b55e27dSYuval Mintz struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 42190b55e27dSYuval Mintz 42200b55e27dSYuval Mintz if (!ptt) { 42210b55e27dSYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); 42220b55e27dSYuval Mintz return; 42230b55e27dSYuval Mintz } 42240b55e27dSYuval Mintz 42250b55e27dSYuval Mintz rc = qed_iov_vf_flr_cleanup(hwfn, ptt); 42260b55e27dSYuval Mintz if (rc) 42270b55e27dSYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); 42280b55e27dSYuval Mintz 42290b55e27dSYuval Mintz qed_ptt_release(hwfn, ptt); 42300b55e27dSYuval Mintz } 42310b55e27dSYuval Mintz 423237bff2b9SYuval Mintz if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags)) 423337bff2b9SYuval Mintz qed_handle_vf_msg(hwfn); 423408feecd7SYuval Mintz 423508feecd7SYuval Mintz if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG, 423608feecd7SYuval Mintz &hwfn->iov_task_flags)) 423708feecd7SYuval Mintz qed_handle_pf_set_vf_unicast(hwfn); 423808feecd7SYuval Mintz 423936558c3dSYuval Mintz if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG, 424036558c3dSYuval Mintz &hwfn->iov_task_flags)) 424136558c3dSYuval Mintz qed_handle_bulletin_post(hwfn); 4242f990c82cSMintz, Yuval 4243f990c82cSMintz, Yuval if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags)) 4244f990c82cSMintz, Yuval qed_iov_handle_trust_change(hwfn); 424537bff2b9SYuval Mintz } 424637bff2b9SYuval Mintz 424737bff2b9SYuval Mintz void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) 424837bff2b9SYuval Mintz { 424937bff2b9SYuval Mintz int i; 425037bff2b9SYuval Mintz 425137bff2b9SYuval Mintz for_each_hwfn(cdev, i) { 425237bff2b9SYuval Mintz if (!cdev->hwfns[i].iov_wq) 425337bff2b9SYuval Mintz continue; 425437bff2b9SYuval Mintz 425537bff2b9SYuval Mintz if (schedule_first) { 425637bff2b9SYuval Mintz qed_schedule_iov(&cdev->hwfns[i], 425737bff2b9SYuval Mintz QED_IOV_WQ_STOP_WQ_FLAG); 425837bff2b9SYuval Mintz cancel_delayed_work_sync(&cdev->hwfns[i].iov_task); 425937bff2b9SYuval Mintz } 426037bff2b9SYuval Mintz 426137bff2b9SYuval Mintz flush_workqueue(cdev->hwfns[i].iov_wq); 426237bff2b9SYuval Mintz destroy_workqueue(cdev->hwfns[i].iov_wq); 426337bff2b9SYuval Mintz } 426437bff2b9SYuval Mintz } 426537bff2b9SYuval Mintz 426637bff2b9SYuval Mintz int qed_iov_wq_start(struct qed_dev *cdev) 426737bff2b9SYuval Mintz { 426837bff2b9SYuval Mintz char name[NAME_SIZE]; 426937bff2b9SYuval Mintz int i; 427037bff2b9SYuval Mintz 427137bff2b9SYuval Mintz for_each_hwfn(cdev, i) { 427237bff2b9SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 427337bff2b9SYuval Mintz 427436558c3dSYuval Mintz /* PFs needs a dedicated workqueue only if they support IOV. 427536558c3dSYuval Mintz * VFs always require one. 427636558c3dSYuval Mintz */ 427736558c3dSYuval Mintz if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn)) 427837bff2b9SYuval Mintz continue; 427937bff2b9SYuval Mintz 428037bff2b9SYuval Mintz snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x", 428137bff2b9SYuval Mintz cdev->pdev->bus->number, 428237bff2b9SYuval Mintz PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id); 428337bff2b9SYuval Mintz 428437bff2b9SYuval Mintz p_hwfn->iov_wq = create_singlethread_workqueue(name); 428537bff2b9SYuval Mintz if (!p_hwfn->iov_wq) { 428637bff2b9SYuval Mintz DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n"); 428737bff2b9SYuval Mintz return -ENOMEM; 428837bff2b9SYuval Mintz } 428937bff2b9SYuval Mintz 429036558c3dSYuval Mintz if (IS_PF(cdev)) 429137bff2b9SYuval Mintz INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task); 429236558c3dSYuval Mintz else 429336558c3dSYuval Mintz INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task); 429437bff2b9SYuval Mintz } 429537bff2b9SYuval Mintz 429637bff2b9SYuval Mintz return 0; 429737bff2b9SYuval Mintz } 42980b55e27dSYuval Mintz 42990b55e27dSYuval Mintz const struct qed_iov_hv_ops qed_iov_ops_pass = { 43000b55e27dSYuval Mintz .configure = &qed_sriov_configure, 4301eff16960SYuval Mintz .set_mac = &qed_sriov_pf_set_mac, 430208feecd7SYuval Mintz .set_vlan = &qed_sriov_pf_set_vlan, 430373390ac9SYuval Mintz .get_config = &qed_get_vf_config, 4304733def6aSYuval Mintz .set_link_state = &qed_set_vf_link_state, 43056ddc7608SYuval Mintz .set_spoof = &qed_spoof_configure, 4306733def6aSYuval Mintz .set_rate = &qed_set_vf_rate, 4307f990c82cSMintz, Yuval .set_trust = &qed_set_vf_trust, 43080b55e27dSYuval Mintz }; 4309